diff --git a/.dockerignore b/.dockerignore index 3a8e436d515..f24c490e9ad 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,11 @@ .git .worktrees + +# Sensitive files – docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN +# into the project root; keep it out of the build context. +.env +.env.* + .bun-cache .bun .tmp diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2761a7b0d3b..00670107d00 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ on: concurrency: group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} + cancel-in-progress: true env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" @@ -38,9 +38,8 @@ jobs: id: check uses: ./.github/actions/detect-docs-changes - # Detect which heavy areas are touched so PRs can skip unrelated expensive jobs. - # Push to main keeps broad coverage, but this job still needs to run so - # downstream jobs that list it in `needs` are not skipped. + # Detect which heavy areas are touched so CI can skip unrelated expensive jobs. + # Fail-safe: if detection fails, downstream jobs run. changed-scope: needs: [docs-scope] if: needs.docs-scope.outputs.docs_only != 'true' @@ -82,7 +81,7 @@ jobs: # Build dist once for Node-relevant changes and share it with downstream jobs. build-artifacts: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -141,7 +140,7 @@ jobs: checks: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false @@ -149,6 +148,13 @@ jobs: include: - runtime: node task: test + shard_index: 1 + shard_count: 2 + command: pnpm canvas:a2ui:bundle && pnpm test + - runtime: node + task: test + shard_index: 2 + shard_count: 2 command: pnpm canvas:a2ui:bundle && pnpm test - runtime: node task: extensions @@ -160,40 +166,47 @@ jobs: task: test command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts steps: - - name: Skip bun lane on push - if: github.event_name == 'push' && matrix.runtime == 'bun' - run: echo "Skipping bun test lane on push events." + - name: Skip bun lane on pull requests + if: github.event_name == 'pull_request' && matrix.runtime == 'bun' + run: echo "Skipping Bun compatibility lane on pull requests." - name: Checkout - if: github.event_name != 'push' || matrix.runtime != 'bun' + if: github.event_name != 'pull_request' || matrix.runtime != 'bun' uses: actions/checkout@v6 with: submodules: false - name: Setup Node environment - if: matrix.runtime != 'bun' || github.event_name != 'push' + if: matrix.runtime != 'bun' || github.event_name != 'pull_request' uses: ./.github/actions/setup-node-env with: install-bun: "${{ matrix.runtime == 'bun' }}" use-sticky-disk: "false" - name: Configure Node test resources - if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' + if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' + env: + SHARD_COUNT: ${{ matrix.shard_count || '' }} + SHARD_INDEX: ${{ matrix.shard_index || '' }} run: | # `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes. # Default heap limits have been too low on Linux CI (V8 OOM near 4GB). echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV" echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV" + if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then + echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV" + echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV" + fi - name: Run ${{ matrix.task }} (${{ matrix.runtime }}) - if: matrix.runtime != 'bun' || github.event_name != 'push' + if: matrix.runtime != 'bun' || github.event_name != 'pull_request' run: ${{ matrix.command }} # Types, lint, and format check. check: name: "check" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -239,7 +252,7 @@ jobs: compat-node22: name: "compat-node22" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -272,7 +285,7 @@ jobs: skills-python: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -365,7 +378,7 @@ jobs: checks-windows: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true' runs-on: blacksmith-32vcpu-windows-2025 timeout-minutes: 45 env: @@ -727,7 +740,7 @@ jobs: android: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false diff --git a/.github/workflows/openclaw-npm-release.yml b/.github/workflows/openclaw-npm-release.yml index ac0a8f728e3..903bba74706 100644 --- a/.github/workflows/openclaw-npm-release.yml +++ b/.github/workflows/openclaw-npm-release.yml @@ -69,8 +69,13 @@ jobs: run: pnpm release:check - name: Publish + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} run: | set -euo pipefail + if [[ -n "${NODE_AUTH_TOKEN:-}" ]]; then + printf '//registry.npmjs.org/:_authToken=%s\n' "$NODE_AUTH_TOKEN" > "$HOME/.npmrc" + fi PACKAGE_VERSION=$(node -p "require('./package.json').version") if [[ "$PACKAGE_VERSION" == *-beta.* ]]; then diff --git a/.jscpd.json b/.jscpd.json new file mode 100644 index 00000000000..777b025b0c8 --- /dev/null +++ b/.jscpd.json @@ -0,0 +1,16 @@ +{ + "gitignore": true, + "noSymlinks": true, + "ignore": [ + "**/node_modules/**", + "**/dist/**", + "dist/**", + "**/.git/**", + "**/coverage/**", + "**/build/**", + "**/.build/**", + "**/.artifacts/**", + "docs/zh-CN/**", + "**/CHANGELOG.md" + ] +} diff --git a/AGENTS.md b/AGENTS.md index 45eed9ec2ad..28d1b9cc2a6 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -132,6 +132,7 @@ - Framework: Vitest with V8 coverage thresholds (70% lines/branches/functions/statements). - Naming: match source names with `*.test.ts`; e2e in `*.e2e.test.ts`. - Run `pnpm test` (or `pnpm test:coverage`) before pushing when you touch logic. +- For targeted/local debugging, keep using the wrapper: `pnpm test -- [vitest args...]` (for example `pnpm test -- src/commands/onboard-search.test.ts -t "shows registered plugin providers"`); do not default to raw `pnpm vitest run ...` because it bypasses wrapper config/profile/pool routing. - Do not set test workers above 16; tried already. - If local Vitest runs cause memory pressure (common on non-Mac-Studio hosts), use `OPENCLAW_TEST_PROFILE=low OPENCLAW_TEST_SERIAL_GATEWAY=1 pnpm test` for land/gate runs. - Live tests (real keys): `CLAWDBOT_LIVE_TEST=1 pnpm test:live` (OpenClaw-only) or `LIVE=1 pnpm test:live` (includes provider live tests). Docker: `pnpm test:docker:live-models`, `pnpm test:docker:live-gateway`. Onboarding Docker E2E: `pnpm test:docker:onboard`. @@ -201,6 +202,42 @@ ## Agent-Specific Notes - Vocabulary: "makeup" = "mac app". +- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested. +- Parallels macOS smoke playbook: + - `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`. + - Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed. + - Preferred automation entrypoint: `pnpm test:parallels:macos`. It restores the snapshot most closely matching `macOS 26.3.1 fresh`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes. + - Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero. + - Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded. + - Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-smoke.*`. + - All-OS parallel runs should share the host `dist` build via `/tmp/openclaw-parallels-build.lock` instead of rebuilding three times. + - Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails. + - Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`. + - For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green. + - Don’t run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially. + - Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading. +- Parallels Windows smoke playbook: + - Preferred automation entrypoint: `pnpm test:parallels:windows`. It restores the snapshot most closely matching `pre-openclaw-native-e2e-2026-03-12`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes. + - Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero. + - Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded. + - Always use `prlctl exec --current-user` for Windows guest runs; plain `prlctl exec` lands in `NT AUTHORITY\SYSTEM` and does not match the real desktop-user install path. + - Prefer explicit `npm.cmd` / `openclaw.cmd`. Bare `npm` / `openclaw` in PowerShell can hit the `.ps1` shim and fail under restrictive execution policy. + - Use PowerShell only as the transport (`powershell.exe -NoProfile -ExecutionPolicy Bypass`) and call the `.cmd` shims explicitly from inside it. + - Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-windows.*`. + - Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails. + - Keep Windows onboarding/status text ASCII-clean in logs. Fancy punctuation in banners shows up as mojibake through the current guest PowerShell capture path. +- Parallels Linux smoke playbook: + - Preferred automation entrypoint: `pnpm test:parallels:linux`. It restores the snapshot most closely matching `fresh` on `Ubuntu 24.04.3 ARM64`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes. + - Use plain `prlctl exec` on this snapshot. `--current-user` is not the right transport there. + - Fresh snapshot reality: `curl` is missing and `apt-get update` can fail on clock skew. Bootstrap with `apt-get -o Acquire::Check-Date=false update` and install `curl ca-certificates` before testing installer paths. + - Fresh `main` tgz smoke on Linux still needs the latest-release installer first, because this snapshot has no Node/npm before bootstrap. The harness does stable bootstrap first, then overlays current `main`. + - This snapshot does not have a usable `systemd --user` session. Treat managed daemon install as unsupported here; use `--skip-health`, then verify with direct `openclaw gateway run --bind loopback --port 18789 --force`. + - Env-backed auth refs are still fine, but any direct shell launch (`openclaw gateway run`, `openclaw agent --local`, Linux `gateway status --deep` against that direct run) must inherit the referenced env vars in the same shell. + - `prlctl exec` reaps detached Linux child processes on this snapshot, so a background `openclaw gateway run` launched from automation is not a trustworthy smoke path. The harness verifies installer + `agent --local`; do direct gateway checks only from an interactive guest shell when needed. + - When you do run Linux gateway checks manually from an interactive guest shell, use `openclaw gateway status --deep --require-rpc` so an RPC miss is a hard failure. + - Prefer direct argv guest commands for fetch/install steps (`curl`, `npm install -g`, `openclaw ...`) over nested `bash -lc` quoting; Linux guest quoting through Parallels was the flaky part. + - Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-linux.*`. + - Current expected outcome on Linux smoke: fresh + upgrade should pass installer and `agent --local`; gateway remains `skipped-no-detached-linux-gateway` on this snapshot and should not be treated as a regression by itself. - Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`. - When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`). - Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`. diff --git a/CHANGELOG.md b/CHANGELOG.md index f7679f4c5b0..9cb7ca5ee9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,32 +7,64 @@ Docs: https://docs.openclaw.ai ### Changes - Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus. -- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei. - iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman. +- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chrome’s own setup guides. +- Browser/agents: add built-in `profile="user"` for the logged-in host browser and `profile="chrome-relay"` for the extension relay, so agent browser calls can prefer the real signed-in browser without the extra `browserSession` selector. +- Browser/act automation: add batched actions, selector targeting, and delayed clicks for browser act requests with normalized batch dispatch. Thanks @vincentkoc. +- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei. +- Dependencies/pi: bump `@mariozechner/pi-agent-core`, `@mariozechner/pi-ai`, `@mariozechner/pi-coding-agent`, and `@mariozechner/pi-tui` to `0.58.0`. ### Fixes +- Dashboard/chat UI: stop reloading full chat history on every live tool result in dashboard v2 so tool-heavy runs no longer trigger UI freeze/re-render storms while the final event still refreshes persisted history. (#45541) Thanks @BunsDev. - Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang. -- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups. -- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding. -- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus. -- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv. -- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman. +- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus. +- Browser/existing-session: harden driver validation and session lifecycle so transport errors trigger reconnects while tool-level errors preserve the session, and extract shared ARIA role sets to deduplicate Playwright and Chrome MCP snapshot paths. (#45682) Thanks @odysseus0. +- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata. +- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark. - Gateway/session reset: preserve `lastAccountId` and `lastThreadId` across gateway session resets so replies keep routing back to the same account and thread after `/reset`. (#44773) Thanks @Lanfei. -- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei. +- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots. +- Gateway/status: add `openclaw gateway status --require-rpc` and clearer Linux non-interactive daemon-install failure reporting so automation can fail hard on probe misses instead of treating a printed RPC error as green. +- macOS/exec approvals: respect per-agent exec approval settings in the gateway prompter, including allowlist fallback when the native prompt cannot be shown, so gateway-triggered `system.run` requests follow configured policy instead of always prompting or denying unexpectedly. (#13707) Thanks @sliekens. +- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus. +- Telegram/inbound media IPv4 fallback: retry SSRF-guarded Telegram file downloads once with the same IPv4 fallback policy as Bot API calls so fresh installs on IPv6-broken hosts no longer fail to download inbound images. +- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups. +- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart. +- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`. +- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding. +- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman. +- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss. +- Dashboard/chat UI: render oversized plain-text replies as normal paragraphs instead of capped gray code blocks, so long desktop chat responses stay readable without tab-switching refreshes. +- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han. +- Gateway/Control UI: restore the operator-only device-auth bypass and classify browser connect failures so origin and device-identity problems no longer show up as auth errors in the Control UI and web chat. (#45512) thanks @sallyom. +- macOS/voice wake: stop crashing wake-word command extraction when speech segment ranges come from a different transcript instance. +- Discord/allowlists: honor raw `guild_id` when hydrated guild objects are missing so allowlisted channels and threads like `#maintainers` no longer get false-dropped before channel allowlist checks. +- macOS/runtime locator: require Node >=22.16.0 during macOS runtime discovery so the app no longer accepts Node versions that the main runtime guard rejects later. Thanks @sumleo. +- Agents/custom providers: preserve blank API keys for loopback OpenAI-compatible custom providers by clearing the synthetic Authorization header at runtime, while keeping explicit apiKey and oauth/token config from silently downgrading into fake bearer auth. (#45631) Thanks @xinhuagu. +- Models/google-vertex Gemini flash-lite normalization: apply existing bare-ID preview normalization to `google-vertex` model refs and provider configs so `google-vertex/gemini-3.1-flash-lite` resolves as `gemini-3.1-flash-lite-preview`. (#42435) thanks @scoootscooob. +- iMessage/remote attachments: reject unsafe remote attachment paths before spawning SCP, so sender-controlled filenames can no longer inject shell metacharacters into remote media staging. Thanks @lintsinghua. +- Telegram/webhook auth: validate the Telegram webhook secret before reading or parsing request bodies, so unauthenticated requests are rejected immediately instead of consuming up to 1 MB first. Thanks @space08. +- Security/device pairing: make bootstrap setup codes single-use so pending device pairing requests cannot be silently replayed and widened to admin before approval. Thanks @tdjackey. +- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization. +- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates. +- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path. +- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants. +- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token. +- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks. +- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins. +- Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn. +- Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc. - Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference. - Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97. +- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei. +- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv. +- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello. +- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin. - Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179. -- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus. - Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec. - Signal/config validation: add `channels.signal.groups` schema support so per-group `requireMention`, `tools`, and `toolsBySender` overrides no longer get rejected during config validation. (#27199) Thanks @unisone. - Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh. -- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates. -- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path. -- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark. -- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots. -- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello. -- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin. +- Telegram/media errors: redact Telegram file URLs before building media fetch errors so failed inbound downloads do not leak bot tokens into logs. Thanks @space08. ## 2026.3.12 @@ -45,6 +77,7 @@ Docs: https://docs.openclaw.ai - Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi - Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff - Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc. +- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc. ### Fixes @@ -101,13 +134,16 @@ Docs: https://docs.openclaw.ai - Gateway/session stores: regenerate the Swift push-test protocol models and align Windows native session-store realpath handling so protocol checks and sync session discovery stop drifting on Windows. (#44266) thanks @jalehman. - Context engine/session routing: forward optional `sessionKey` through context-engine lifecycle calls so plugins can see structured routing metadata during bootstrap, assembly, post-turn ingestion, and compaction. (#44157) thanks @jalehman. - Agents/failover: classify z.ai `network_error` stop reasons as retryable timeouts so provider connectivity failures trigger fallback instead of surfacing raw unhandled-stop-reason errors. (#43884) Thanks @hougangdev. +- Config/Anthropic startup: inline Anthropic alias normalization during config load so gateway startup no longer crashes on dated Anthropic model refs like `anthropic/claude-sonnet-4-20250514`. (#45520) Thanks @BunsDev. - Memory/session sync: add mode-aware post-compaction session reindexing with `agents.defaults.compaction.postIndexSync` plus `agents.defaults.memorySearch.sync.sessions.postCompactionForce`, so compacted session memory can refresh immediately without forcing every deployment into synchronous reindexing. (#25561) thanks @rodrigouroz. - Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb. - Telegram/native command sync: suppress expected `BOT_COMMANDS_TOO_MUCH` retry error noise, add a final fallback summary log, and document the difference between command-menu overflow and real Telegram network failures. - Mattermost/reply media delivery: pass agent-scoped `mediaLocalRoots` through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666. - Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process `HOME`/`OPENCLAW_HOME` changes no longer reuse stale plugin state or misreport `~/...` plugins as untracked. (#44046) thanks @gumadeiras. - Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated `session.store` roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras. +- Browser/existing-session: stop reporting fake CDP ports/URLs for live attached Chrome sessions, render `transport: chrome-mcp` in CLI/status output instead of `port: 0`, and keep timeout diagnostics transport-aware when no direct CDP URL exists. - Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and `models list --plain`, and migrate legacy duplicated `openrouter/openrouter/...` config entries forward on write. +- Feishu/event dedupe: keep early duplicate suppression aligned with the shared Feishu message-id contract and release the pre-queue dedupe marker after failed dispatch so retried events can recover instead of being dropped until the short TTL expires. (#43762) Thanks @yunweibang. - Gateway/hooks: bucket hook auth failures by forwarded client IP behind trusted proxies and warn when `hooks.allowedAgentIds` leaves hook routing unrestricted. - Agents/compaction: skip the post-compaction `cache-ttl` marker write when a compaction completed in the same attempt, preventing the next turn from immediately triggering a second tiny compaction. (#28548) thanks @MoerAI. - Native chat/macOS: add `/new`, `/reset`, and `/clear` reset triggers, keep shared main-session aliases aligned, and ignore stale model-selection completions so native chat state stays in sync across reset and fast model changes. (#10898) Thanks @Nachx639. @@ -118,6 +154,8 @@ Docs: https://docs.openclaw.ai - Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn. - CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621. - Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte. +- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh. +- Feishu/file uploads: preserve literal UTF-8 filenames in `im.file.create` so Chinese and other non-ASCII filenames no longer appear percent-encoded in chat. (#34262) Thanks @fabiaodemianyang and @KangShuaiFu. ## 2026.3.11 @@ -258,6 +296,7 @@ Docs: https://docs.openclaw.ai - Agents/failover: classify ZenMux quota-refresh `402` responses as `rate_limit` so model fallback retries continue instead of stopping on a temporary subscription window. (#43917) thanks @bwjoke. - Agents/failover: classify HTTP 422 malformed-request responses as `format` and recognize OpenRouter "requires more credits" billing errors so provider fallback triggers instead of surfacing raw errors. (#43823) thanks @jnMetaCode. - Memory/QMD Windows: fail closed when `qmd.cmd` or `mcporter.cmd` wrappers cannot be resolved to a direct entrypoint, so memory search no longer falls back to shell execution on Windows. +- macOS/remote gateway: stop PortGuardian from killing Docker Desktop and other external listeners on the gateway port in remote mode, so containerized and tunneled gateway setups no longer lose their port-forward owner on app startup. (#6755) Thanks @teslamint. ## 2026.3.8 @@ -3261,7 +3300,7 @@ Docs: https://docs.openclaw.ai - Agents: add CLI log hint to "agent failed before reply" messages. (#1550) Thanks @sweepies. - Agents: warn and ignore tool allowlists that only reference unknown or unloaded plugin tools. (#1566) - Agents: treat plugin-only tool allowlists as opt-ins; keep core tools enabled. (#1467) -- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (commit 084002998) +- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (#45459) Thanks @LyttonFeng and @vincentkoc. - Slack: honor open groupPolicy for unlisted channels in message + slash gating. (#1563) Thanks @itsjaydesu. - Discord: limit autoThread mention bypass to bot-owned threads; keep ack reactions mention-gated. (#1511) Thanks @pvoo. - Discord: retry rate-limited allowlist resolution + command deploy to avoid gateway crashes. (commit f70ac0c7c) diff --git a/Dockerfile b/Dockerfile index 72c413ebe7b..57a3440f385 100644 --- a/Dockerfile +++ b/Dockerfile @@ -132,6 +132,7 @@ WORKDIR /app RUN --mount=type=cache,id=openclaw-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends && \ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ procps hostname curl git openssl diff --git a/Dockerfile.sandbox b/Dockerfile.sandbox index 8b50c7a6745..37cdab5fcd2 100644 --- a/Dockerfile.sandbox +++ b/Dockerfile.sandbox @@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/Dockerfile.sandbox-browser b/Dockerfile.sandbox-browser index f04e4a82a62..e8e8bb59f84 100644 --- a/Dockerfile.sandbox-browser +++ b/Dockerfile.sandbox-browser @@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/Dockerfile.sandbox-common b/Dockerfile.sandbox-common index 39eaa3692b4..fba29a5df3d 100644 --- a/Dockerfile.sandbox-common +++ b/Dockerfile.sandbox-common @@ -24,6 +24,7 @@ ENV PATH=${BUN_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/sbin RUN --mount=type=cache,id=openclaw-sandbox-common-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-common-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends ${PACKAGES} RUN if [ "${INSTALL_PNPM}" = "1" ]; then npm install -g pnpm; fi diff --git a/Swabble/Sources/SwabbleKit/WakeWordGate.swift b/Swabble/Sources/SwabbleKit/WakeWordGate.swift index 27c952a8d1b..1a1479b630b 100644 --- a/Swabble/Sources/SwabbleKit/WakeWordGate.swift +++ b/Swabble/Sources/SwabbleKit/WakeWordGate.swift @@ -101,25 +101,19 @@ public enum WakeWordGate { } public static func commandText( - transcript: String, + transcript _: String, segments: [WakeWordSegment], triggerEndTime: TimeInterval) -> String { let threshold = triggerEndTime + 0.001 + var commandWords: [String] = [] + commandWords.reserveCapacity(segments.count) for segment in segments where segment.start >= threshold { - if normalizeToken(segment.text).isEmpty { continue } - if let range = segment.range { - let slice = transcript[range.lowerBound...] - return String(slice).trimmingCharacters(in: Self.whitespaceAndPunctuation) - } - break + let normalized = normalizeToken(segment.text) + if normalized.isEmpty { continue } + commandWords.append(segment.text) } - - let text = segments - .filter { $0.start >= threshold && !normalizeToken($0.text).isEmpty } - .map(\.text) - .joined(separator: " ") - return text.trimmingCharacters(in: Self.whitespaceAndPunctuation) + return commandWords.joined(separator: " ").trimmingCharacters(in: Self.whitespaceAndPunctuation) } public static func matchesTextOnly(text: String, triggers: [String]) -> Bool { diff --git a/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift b/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift index 5cc283c35ae..7e5b4abdd74 100644 --- a/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift +++ b/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift @@ -46,6 +46,25 @@ import Testing let match = WakeWordGate.match(transcript: transcript, segments: segments, config: config) #expect(match?.command == "do it") } + + @Test func commandTextHandlesForeignRangeIndices() { + let transcript = "hey clawd do thing" + let other = "do thing" + let foreignRange = other.range(of: "do") + let segments = [ + WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")), + WakeWordSegment(text: "clawd", start: 0.2, duration: 0.1, range: transcript.range(of: "clawd")), + WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange), + WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil), + ] + + let command = WakeWordGate.commandText( + transcript: transcript, + segments: segments, + triggerEndTime: 0.3) + + #expect(command == "do thing") + } } private func makeSegments( diff --git a/apps/android/README.md b/apps/android/README.md index 0a92e4c8ec5..9c6baf807c9 100644 --- a/apps/android/README.md +++ b/apps/android/README.md @@ -30,8 +30,12 @@ cd apps/android ./gradlew :app:assembleDebug ./gradlew :app:installDebug ./gradlew :app:testDebugUnitTest +cd ../.. +bun run android:bundle:release ``` +`bun run android:bundle:release` auto-bumps Android `versionName`/`versionCode` in `apps/android/app/build.gradle.kts`, then builds a signed release `.aab`. + ## Kotlin Lint + Format ```bash diff --git a/apps/android/app/build.gradle.kts b/apps/android/app/build.gradle.kts index b187e131048..46afccbc3bf 100644 --- a/apps/android/app/build.gradle.kts +++ b/apps/android/app/build.gradle.kts @@ -1,5 +1,7 @@ import com.android.build.api.variant.impl.VariantOutputImpl +val dnsjavaInetAddressResolverService = "META-INF/services/java.net.spi.InetAddressResolverProvider" + val androidStoreFile = providers.gradleProperty("OPENCLAW_ANDROID_STORE_FILE").orNull?.takeIf { it.isNotBlank() } val androidStorePassword = providers.gradleProperty("OPENCLAW_ANDROID_STORE_PASSWORD").orNull?.takeIf { it.isNotBlank() } val androidKeyAlias = providers.gradleProperty("OPENCLAW_ANDROID_KEY_ALIAS").orNull?.takeIf { it.isNotBlank() } @@ -63,8 +65,8 @@ android { applicationId = "ai.openclaw.app" minSdk = 31 targetSdk = 36 - versionCode = 202603130 - versionName = "2026.3.13" + versionCode = 2026031400 + versionName = "2026.3.14" ndk { // Support all major ABIs — native libs are tiny (~47 KB per ABI) abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64") @@ -78,6 +80,9 @@ android { } isMinifyEnabled = true isShrinkResources = true + ndk { + debugSymbolLevel = "SYMBOL_TABLE" + } proguardFiles(getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro") } debug { @@ -104,6 +109,10 @@ android { "/META-INF/LICENSE*.txt", "DebugProbesKt.bin", "kotlin-tooling-metadata.json", + "org/bouncycastle/pqc/crypto/picnic/lowmcL1.bin.properties", + "org/bouncycastle/pqc/crypto/picnic/lowmcL3.bin.properties", + "org/bouncycastle/pqc/crypto/picnic/lowmcL5.bin.properties", + "org/bouncycastle/x509/CertPathReviewerMessages*.properties", ) } } @@ -168,7 +177,6 @@ dependencies { // material-icons-extended pulled in full icon set (~20 MB DEX). Only ~18 icons used. // R8 will tree-shake unused icons when minify is enabled on release builds. implementation("androidx.compose.material:material-icons-extended") - implementation("androidx.navigation:navigation-compose:2.9.7") debugImplementation("androidx.compose.ui:ui-tooling") @@ -193,7 +201,6 @@ dependencies { implementation("androidx.camera:camera-camera2:1.5.2") implementation("androidx.camera:camera-lifecycle:1.5.2") implementation("androidx.camera:camera-video:1.5.2") - implementation("androidx.camera:camera-view:1.5.2") implementation("com.google.android.gms:play-services-code-scanner:16.1.0") // Unicast DNS-SD (Wide-Area Bonjour) for tailnet discovery domains. @@ -211,3 +218,45 @@ dependencies { tasks.withType().configureEach { useJUnitPlatform() } + +val stripReleaseDnsjavaServiceDescriptor = + tasks.register("stripReleaseDnsjavaServiceDescriptor") { + val mergedJar = + layout.buildDirectory.file( + "intermediates/merged_java_res/release/mergeReleaseJavaResource/base.jar", + ) + + inputs.file(mergedJar) + outputs.file(mergedJar) + + doLast { + val jarFile = mergedJar.get().asFile + if (!jarFile.exists()) { + return@doLast + } + + val unpackDir = temporaryDir.resolve("merged-java-res") + delete(unpackDir) + copy { + from(zipTree(jarFile)) + into(unpackDir) + exclude(dnsjavaInetAddressResolverService) + } + delete(jarFile) + ant.invokeMethod( + "zip", + mapOf( + "destfile" to jarFile.absolutePath, + "basedir" to unpackDir.absolutePath, + ), + ) + } + } + +tasks.matching { it.name == "stripReleaseDnsjavaServiceDescriptor" }.configureEach { + dependsOn("mergeReleaseJavaResource") +} + +tasks.matching { it.name == "minifyReleaseWithR8" }.configureEach { + dependsOn(stripReleaseDnsjavaServiceDescriptor) +} diff --git a/apps/android/app/proguard-rules.pro b/apps/android/app/proguard-rules.pro index 78e4a363919..7c04b96833a 100644 --- a/apps/android/app/proguard-rules.pro +++ b/apps/android/app/proguard-rules.pro @@ -1,26 +1,6 @@ -# ── App classes ─────────────────────────────────────────────────── --keep class ai.openclaw.app.** { *; } - -# ── Bouncy Castle ───────────────────────────────────────────────── --keep class org.bouncycastle.** { *; } -dontwarn org.bouncycastle.** - -# ── CameraX ─────────────────────────────────────────────────────── --keep class androidx.camera.** { *; } - -# ── kotlinx.serialization ──────────────────────────────────────── --keep class kotlinx.serialization.** { *; } --keepclassmembers class * { - @kotlinx.serialization.Serializable *; -} --keepattributes *Annotation*, InnerClasses - -# ── OkHttp ──────────────────────────────────────────────────────── -dontwarn okhttp3.** -dontwarn okio.** --keep class okhttp3.internal.platform.** { *; } - -# ── Misc suppressions ──────────────────────────────────────────── -dontwarn com.sun.jna.** -dontwarn javax.naming.** -dontwarn lombok.Generated diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt index 9ca5687e594..3416900ed5b 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt @@ -97,7 +97,7 @@ internal fun parseGatewayEndpoint(rawInput: String): GatewayEndpointConfig? { "wss", "https" -> true else -> true } - val port = uri.port.takeIf { it in 1..65535 } ?: 18789 + val port = uri.port.takeIf { it in 1..65535 } ?: if (tls) 443 else 18789 val displayUrl = "${if (tls) "https" else "http"}://$host:$port" return GatewayEndpointConfig(host = host, port = port, tls = tls, displayUrl = displayUrl) diff --git a/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt b/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt index a4eef3b9b09..5c24631cf0b 100644 --- a/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt +++ b/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt @@ -92,6 +92,30 @@ class GatewayConfigResolverTest { assertNull(resolved?.password?.takeIf { it.isNotEmpty() }) } + @Test + fun resolveGatewayConnectConfigDefaultsPortlessWssSetupCodeTo443() { + val setupCode = + encodeSetupCode("""{"url":"wss://gateway.example","bootstrapToken":"bootstrap-1"}""") + + val resolved = + resolveGatewayConnectConfig( + useSetupCode = true, + setupCode = setupCode, + manualHost = "", + manualPort = "", + manualTls = true, + fallbackToken = "shared-token", + fallbackPassword = "shared-password", + ) + + assertEquals("gateway.example", resolved?.host) + assertEquals(443, resolved?.port) + assertEquals(true, resolved?.tls) + assertEquals("bootstrap-1", resolved?.bootstrapToken) + assertNull(resolved?.token?.takeIf { it.isNotEmpty() }) + assertNull(resolved?.password?.takeIf { it.isNotEmpty() }) + } + private fun encodeSetupCode(payloadJson: String): String { return Base64.getUrlEncoder().withoutPadding().encodeToString(payloadJson.toByteArray(Charsets.UTF_8)) } diff --git a/apps/android/scripts/build-release-aab.ts b/apps/android/scripts/build-release-aab.ts new file mode 100644 index 00000000000..30e4bb0390b --- /dev/null +++ b/apps/android/scripts/build-release-aab.ts @@ -0,0 +1,125 @@ +#!/usr/bin/env bun + +import { $ } from "bun"; +import { dirname, join } from "node:path"; +import { fileURLToPath } from "node:url"; + +const scriptDir = dirname(fileURLToPath(import.meta.url)); +const androidDir = join(scriptDir, ".."); +const buildGradlePath = join(androidDir, "app", "build.gradle.kts"); +const bundlePath = join(androidDir, "app", "build", "outputs", "bundle", "release", "app-release.aab"); + +type VersionState = { + versionName: string; + versionCode: number; +}; + +type ParsedVersionMatches = { + versionNameMatch: RegExpMatchArray; + versionCodeMatch: RegExpMatchArray; +}; + +function formatVersionName(date: Date): string { + const year = date.getFullYear(); + const month = date.getMonth() + 1; + const day = date.getDate(); + return `${year}.${month}.${day}`; +} + +function formatVersionCodePrefix(date: Date): string { + const year = date.getFullYear().toString(); + const month = (date.getMonth() + 1).toString().padStart(2, "0"); + const day = date.getDate().toString().padStart(2, "0"); + return `${year}${month}${day}`; +} + +function parseVersionMatches(buildGradleText: string): ParsedVersionMatches { + const versionCodeMatch = buildGradleText.match(/versionCode = (\d+)/); + const versionNameMatch = buildGradleText.match(/versionName = "([^"]+)"/); + if (!versionCodeMatch || !versionNameMatch) { + throw new Error(`Couldn't parse versionName/versionCode from ${buildGradlePath}`); + } + return { versionCodeMatch, versionNameMatch }; +} + +function resolveNextVersionCode(currentVersionCode: number, todayPrefix: string): number { + const currentRaw = currentVersionCode.toString(); + let nextSuffix = 0; + + if (currentRaw.startsWith(todayPrefix)) { + const suffixRaw = currentRaw.slice(todayPrefix.length); + nextSuffix = (suffixRaw ? Number.parseInt(suffixRaw, 10) : 0) + 1; + } + + if (!Number.isInteger(nextSuffix) || nextSuffix < 0 || nextSuffix > 99) { + throw new Error( + `Can't auto-bump Android versionCode for ${todayPrefix}: next suffix ${nextSuffix} is invalid`, + ); + } + + return Number.parseInt(`${todayPrefix}${nextSuffix.toString().padStart(2, "0")}`, 10); +} + +function resolveNextVersion(buildGradleText: string, date: Date): VersionState { + const { versionCodeMatch } = parseVersionMatches(buildGradleText); + const currentVersionCode = Number.parseInt(versionCodeMatch[1] ?? "", 10); + if (!Number.isInteger(currentVersionCode)) { + throw new Error(`Invalid Android versionCode in ${buildGradlePath}`); + } + + const versionName = formatVersionName(date); + const versionCode = resolveNextVersionCode(currentVersionCode, formatVersionCodePrefix(date)); + return { versionName, versionCode }; +} + +function updateBuildGradleVersions(buildGradleText: string, nextVersion: VersionState): string { + return buildGradleText + .replace(/versionCode = \d+/, `versionCode = ${nextVersion.versionCode}`) + .replace(/versionName = "[^"]+"/, `versionName = "${nextVersion.versionName}"`); +} + +async function sha256Hex(path: string): Promise { + const buffer = await Bun.file(path).arrayBuffer(); + const digest = await crypto.subtle.digest("SHA-256", buffer); + return Array.from(new Uint8Array(digest), (byte) => byte.toString(16).padStart(2, "0")).join(""); +} + +async function verifyBundleSignature(path: string): Promise { + await $`jarsigner -verify ${path}`.quiet(); +} + +async function main() { + const buildGradleFile = Bun.file(buildGradlePath); + const originalText = await buildGradleFile.text(); + const nextVersion = resolveNextVersion(originalText, new Date()); + const updatedText = updateBuildGradleVersions(originalText, nextVersion); + + if (updatedText === originalText) { + throw new Error("Android version bump produced no change"); + } + + console.log(`Android versionName -> ${nextVersion.versionName}`); + console.log(`Android versionCode -> ${nextVersion.versionCode}`); + + await Bun.write(buildGradlePath, updatedText); + + try { + await $`./gradlew :app:bundleRelease`.cwd(androidDir); + } catch (error) { + await Bun.write(buildGradlePath, originalText); + throw error; + } + + const bundleFile = Bun.file(bundlePath); + if (!(await bundleFile.exists())) { + throw new Error(`Signed bundle missing at ${bundlePath}`); + } + + await verifyBundleSignature(bundlePath); + const hash = await sha256Hex(bundlePath); + + console.log(`Signed AAB: ${bundlePath}`); + console.log(`SHA-256: ${hash}`); +} + +await main(); diff --git a/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift b/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift index c7d9d0928e1..a36e58db1d8 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift @@ -45,8 +45,8 @@ enum ExecApprovalEvaluator { let skillAllow: Bool if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty { - let bins = await SkillBinsCache.shared.currentBins() - skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) } + let bins = await SkillBinsCache.shared.currentTrust() + skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins) } else { skillAllow = false } @@ -65,4 +65,26 @@ enum ExecApprovalEvaluator { allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil, skillAllow: skillAllow) } + + static func isSkillAutoAllowed( + _ resolutions: [ExecCommandResolution], + trustedBinsByName: [String: Set]) -> Bool + { + guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false } + return resolutions.allSatisfy { resolution in + guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName), + let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath) + else { + return false + } + return trustedBinsByName[executableName]?.contains(resolvedPath) == true + } + } + + static func _testIsSkillAutoAllowed( + _ resolutions: [ExecCommandResolution], + trustedBinsByName: [String: Set]) -> Bool + { + self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName) + } } diff --git a/apps/macos/Sources/OpenClaw/ExecApprovals.swift b/apps/macos/Sources/OpenClaw/ExecApprovals.swift index ba49b37cd9f..141da33ad48 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovals.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovals.swift @@ -370,6 +370,17 @@ enum ExecApprovalsStore { static func resolve(agentId: String?) -> ExecApprovalsResolved { let file = self.ensureFile() + return self.resolveFromFile(file, agentId: agentId) + } + + /// Read-only resolve: loads file without writing (no ensureFile side effects). + /// Safe to call from background threads / off MainActor. + static func resolveReadOnly(agentId: String?) -> ExecApprovalsResolved { + let file = self.loadFile() + return self.resolveFromFile(file, agentId: agentId) + } + + private static func resolveFromFile(_ file: ExecApprovalsFile, agentId: String?) -> ExecApprovalsResolved { let defaults = file.defaults ?? ExecApprovalsDefaults() let resolvedDefaults = ExecApprovalsResolvedDefaults( security: defaults.security ?? self.defaultSecurity, @@ -777,6 +788,7 @@ actor SkillBinsCache { static let shared = SkillBinsCache() private var bins: Set = [] + private var trustByName: [String: Set] = [:] private var lastRefresh: Date? private let refreshInterval: TimeInterval = 90 @@ -787,27 +799,90 @@ actor SkillBinsCache { return self.bins } + func currentTrust(force: Bool = false) async -> [String: Set] { + if force || self.isStale() { + await self.refresh() + } + return self.trustByName + } + func refresh() async { do { let report = try await GatewayConnection.shared.skillsStatus() - var next = Set() - for skill in report.skills { - for bin in skill.requirements.bins { - let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines) - if !trimmed.isEmpty { next.insert(trimmed) } - } - } - self.bins = next + let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths()) + self.bins = trust.names + self.trustByName = trust.pathsByName self.lastRefresh = Date() } catch { if self.lastRefresh == nil { self.bins = [] + self.trustByName = [:] } } } + static func normalizeSkillBinName(_ value: String) -> String? { + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased() + return trimmed.isEmpty ? nil : trimmed + } + + static func normalizeResolvedPath(_ value: String?) -> String? { + let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + guard !trimmed.isEmpty else { return nil } + return URL(fileURLWithPath: trimmed).standardizedFileURL.path + } + + static func buildTrustIndex( + report: SkillsStatusReport, + searchPaths: [String]) -> SkillBinTrustIndex + { + var names = Set() + var pathsByName: [String: Set] = [:] + + for skill in report.skills { + for bin in skill.requirements.bins { + let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { continue } + names.insert(trimmed) + + guard let name = self.normalizeSkillBinName(trimmed), + let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths), + let normalizedPath = self.normalizeResolvedPath(resolvedPath) + else { + continue + } + + var paths = pathsByName[name] ?? Set() + paths.insert(normalizedPath) + pathsByName[name] = paths + } + } + + return SkillBinTrustIndex(names: names, pathsByName: pathsByName) + } + + private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? { + let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin + if expanded.contains("/") || expanded.contains("\\") { + return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil + } + return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths) + } + private func isStale() -> Bool { guard let lastRefresh else { return true } return Date().timeIntervalSince(lastRefresh) > self.refreshInterval } + + static func _testBuildTrustIndex( + report: SkillsStatusReport, + searchPaths: [String]) -> SkillBinTrustIndex + { + self.buildTrustIndex(report: report, searchPaths: searchPaths) + } +} + +struct SkillBinTrustIndex { + let names: Set + let pathsByName: [String: Set] } diff --git a/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift b/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift index 379e8c0f559..08e60b84d2b 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift @@ -43,7 +43,33 @@ final class ExecApprovalsGatewayPrompter { do { let data = try JSONEncoder().encode(payload) let request = try JSONDecoder().decode(GatewayApprovalRequest.self, from: data) - guard self.shouldPresent(request: request) else { return } + let presentation = self.shouldPresent(request: request) + guard presentation.shouldAsk else { + // Ask policy says no prompt needed – resolve based on security policy + let decision: ExecApprovalDecision = presentation.security == .full ? .allowOnce : .deny + try await GatewayConnection.shared.requestVoid( + method: .execApprovalResolve, + params: [ + "id": AnyCodable(request.id), + "decision": AnyCodable(decision.rawValue), + ], + timeoutMs: 10000) + return + } + guard presentation.canPresent else { + let decision = Self.fallbackDecision( + request: request.request, + askFallback: presentation.askFallback, + allowlist: presentation.allowlist) + try await GatewayConnection.shared.requestVoid( + method: .execApprovalResolve, + params: [ + "id": AnyCodable(request.id), + "decision": AnyCodable(decision.rawValue), + ], + timeoutMs: 10000) + return + } let decision = ExecApprovalsPromptPresenter.prompt(request.request) try await GatewayConnection.shared.requestVoid( method: .execApprovalResolve, @@ -57,16 +83,89 @@ final class ExecApprovalsGatewayPrompter { } } - private func shouldPresent(request: GatewayApprovalRequest) -> Bool { + /// Whether the ask policy requires prompting the user. + /// Note: this only determines if a prompt is shown, not whether the action is allowed. + /// The security policy (full/deny/allowlist) decides the actual outcome. + private static func shouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool { + switch ask { + case .always: + return true + case .onMiss: + return security == .allowlist + case .off: + return false + } + } + + struct PresentationDecision { + /// Whether the ask policy requires prompting the user (not whether the action is allowed). + var shouldAsk: Bool + /// Whether the prompt can actually be shown (session match, recent activity, etc.). + var canPresent: Bool + /// The resolved security policy, used to determine allow/deny when no prompt is shown. + var security: ExecSecurity + /// Fallback security policy when a prompt is needed but can't be presented. + var askFallback: ExecSecurity + var allowlist: [ExecAllowlistEntry] + } + + private func shouldPresent(request: GatewayApprovalRequest) -> PresentationDecision { let mode = AppStateStore.shared.connectionMode let activeSession = WebChatManager.shared.activeSessionKey?.trimmingCharacters(in: .whitespacesAndNewlines) let requestSession = request.request.sessionKey?.trimmingCharacters(in: .whitespacesAndNewlines) - return Self.shouldPresent( + + // Read-only resolve to avoid disk writes on the MainActor + let approvals = ExecApprovalsStore.resolveReadOnly(agentId: request.request.agentId) + let security = approvals.agent.security + let ask = approvals.agent.ask + + let shouldAsk = Self.shouldAsk(security: security, ask: ask) + + let canPresent = shouldAsk && Self.shouldPresent( mode: mode, activeSession: activeSession, requestSession: requestSession, lastInputSeconds: Self.lastInputSeconds(), thresholdSeconds: 120) + + return PresentationDecision( + shouldAsk: shouldAsk, + canPresent: canPresent, + security: security, + askFallback: approvals.agent.askFallback, + allowlist: approvals.allowlist) + } + + private static func fallbackDecision( + request: ExecApprovalPromptRequest, + askFallback: ExecSecurity, + allowlist: [ExecAllowlistEntry]) -> ExecApprovalDecision + { + guard askFallback == .allowlist else { + return askFallback == .full ? .allowOnce : .deny + } + let resolution = self.fallbackResolution(for: request) + let match = ExecAllowlistMatcher.match(entries: allowlist, resolution: resolution) + return match == nil ? .deny : .allowOnce + } + + private static func fallbackResolution(for request: ExecApprovalPromptRequest) -> ExecCommandResolution? { + let resolvedPath = request.resolvedPath?.trimmingCharacters(in: .whitespacesAndNewlines) + let trimmedResolvedPath = (resolvedPath?.isEmpty == false) ? resolvedPath : nil + let rawExecutable = self.firstToken(from: request.command) ?? trimmedResolvedPath ?? "" + guard !rawExecutable.isEmpty || trimmedResolvedPath != nil else { return nil } + let executableName = trimmedResolvedPath.map { URL(fileURLWithPath: $0).lastPathComponent } ?? rawExecutable + return ExecCommandResolution( + rawExecutable: rawExecutable, + resolvedPath: trimmedResolvedPath, + executableName: executableName, + cwd: request.cwd) + } + + private static func firstToken(from command: String) -> String? { + let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return nil } + return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init) } private static func shouldPresent( @@ -117,5 +216,29 @@ extension ExecApprovalsGatewayPrompter { lastInputSeconds: lastInputSeconds, thresholdSeconds: thresholdSeconds) } + + static func _testShouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool { + self.shouldAsk(security: security, ask: ask) + } + + static func _testFallbackDecision( + command: String, + resolvedPath: String?, + askFallback: ExecSecurity, + allowlistPatterns: [String]) -> ExecApprovalDecision + { + self.fallbackDecision( + request: ExecApprovalPromptRequest( + command: command, + cwd: nil, + host: nil, + security: nil, + ask: nil, + agentId: nil, + resolvedPath: resolvedPath, + sessionKey: nil), + askFallback: askFallback, + allowlist: allowlistPatterns.map { ExecAllowlistEntry(pattern: $0) }) + } } #endif diff --git a/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift b/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift index 91a22153f3c..f89293a81aa 100644 --- a/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift +++ b/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift @@ -37,8 +37,7 @@ struct ExecCommandResolution { var resolutions: [ExecCommandResolution] = [] resolutions.reserveCapacity(segments.count) for segment in segments { - guard let token = self.parseFirstToken(segment), - let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env) + guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env) else { return [] } @@ -88,6 +87,20 @@ struct ExecCommandResolution { cwd: cwd) } + private static func resolveShellSegmentExecutable( + _ segment: String, + cwd: String?, + env: [String: String]?) -> ExecCommandResolution? + { + let tokens = self.tokenizeShellWords(segment) + guard !tokens.isEmpty else { return nil } + let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens) + guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else { + return nil + } + return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env) + } + private static func parseFirstToken(_ command: String) -> String? { let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) guard !trimmed.isEmpty else { return nil } @@ -102,6 +115,59 @@ struct ExecCommandResolution { return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init) } + private static func tokenizeShellWords(_ command: String) -> [String] { + let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return [] } + + var tokens: [String] = [] + var current = "" + var inSingle = false + var inDouble = false + var escaped = false + + func appendCurrent() { + guard !current.isEmpty else { return } + tokens.append(current) + current.removeAll(keepingCapacity: true) + } + + for ch in trimmed { + if escaped { + current.append(ch) + escaped = false + continue + } + + if ch == "\\", !inSingle { + escaped = true + continue + } + + if ch == "'", !inDouble { + inSingle.toggle() + continue + } + + if ch == "\"", !inSingle { + inDouble.toggle() + continue + } + + if ch.isWhitespace, !inSingle, !inDouble { + appendCurrent() + continue + } + + current.append(ch) + } + + if escaped { + current.append("\\") + } + appendCurrent() + return tokens + } + private enum ShellTokenContext { case unquoted case doubleQuoted @@ -148,8 +214,14 @@ struct ExecCommandResolution { while idx < chars.count { let ch = chars[idx] let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil + let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle) if escaped { + if ch == "\n" { + escaped = false + idx += 1 + continue + } current.append(ch) escaped = false idx += 1 @@ -157,6 +229,10 @@ struct ExecCommandResolution { } if ch == "\\", !inSingle { + if next == "\n" { + idx += 2 + continue + } current.append(ch) escaped = true idx += 1 @@ -177,7 +253,7 @@ struct ExecCommandResolution { continue } - if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) { + if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) { // Fail closed on command/process substitution in allowlist mode, // including command substitution inside double-quoted shell strings. return nil @@ -201,6 +277,25 @@ struct ExecCommandResolution { return segments } + private static func nextShellSignificantCharacter( + chars: [Character], + after idx: Int, + inSingle: Bool) -> Character? + { + guard !inSingle else { + return idx + 1 < chars.count ? chars[idx + 1] : nil + } + var cursor = idx + 1 + while cursor < chars.count { + if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" { + cursor += 2 + continue + } + return chars[cursor] + } + return nil + } + private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool { let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted guard let rules = self.shellFailClosedRules[context] else { diff --git a/apps/macos/Sources/OpenClaw/PortGuardian.swift b/apps/macos/Sources/OpenClaw/PortGuardian.swift index dfae5c3bcaa..7d8837415ff 100644 --- a/apps/macos/Sources/OpenClaw/PortGuardian.swift +++ b/apps/macos/Sources/OpenClaw/PortGuardian.swift @@ -47,7 +47,7 @@ actor PortGuardian { let listeners = await self.listeners(on: port) guard !listeners.isEmpty else { continue } for listener in listeners { - if self.isExpected(listener, port: port, mode: mode) { + if Self.isExpected(listener, port: port, mode: mode) { let message = """ port \(port) already served by expected \(listener.command) (pid \(listener.pid)) — keeping @@ -55,6 +55,14 @@ actor PortGuardian { self.logger.info("\(message, privacy: .public)") continue } + if mode == .remote { + let message = """ + port \(port) held by \(listener.command) + (pid \(listener.pid)) in remote mode — not killing + """ + self.logger.warning(message) + continue + } let killed = await self.kill(listener.pid) if killed { let message = """ @@ -271,8 +279,8 @@ actor PortGuardian { switch mode { case .remote: - expectedDesc = "SSH tunnel to remote gateway" - okPredicate = { $0.command.lowercased().contains("ssh") } + expectedDesc = "Remote gateway (SSH tunnel, Docker, or direct)" + okPredicate = { _ in true } case .local: expectedDesc = "Gateway websocket (node/tsx)" okPredicate = { listener in @@ -352,13 +360,12 @@ actor PortGuardian { return sigkill.ok } - private func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool { + private static func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool { let cmd = listener.command.lowercased() let full = listener.fullCommand.lowercased() switch mode { case .remote: - // Remote mode expects an SSH tunnel for the gateway WebSocket port. - if port == GatewayEnvironment.gatewayPort() { return cmd.contains("ssh") } + if port == GatewayEnvironment.gatewayPort() { return true } return false case .local: // The gateway daemon may listen as `openclaw` or as its runtime (`node`, `bun`, etc). @@ -406,6 +413,16 @@ extension PortGuardian { self.parseListeners(from: text).map { ($0.pid, $0.command, $0.fullCommand, $0.user) } } + static func _testIsExpected( + command: String, + fullCommand: String, + port: Int, + mode: AppState.ConnectionMode) -> Bool + { + let listener = Listener(pid: 0, command: command, fullCommand: fullCommand, user: nil) + return Self.isExpected(listener, port: port, mode: mode) + } + static func _testBuildReport( port: Int, mode: AppState.ConnectionMode, diff --git a/apps/macos/Sources/OpenClaw/RuntimeLocator.swift b/apps/macos/Sources/OpenClaw/RuntimeLocator.swift index 3112f57879b..6f1ef2b723d 100644 --- a/apps/macos/Sources/OpenClaw/RuntimeLocator.swift +++ b/apps/macos/Sources/OpenClaw/RuntimeLocator.swift @@ -54,7 +54,7 @@ enum RuntimeResolutionError: Error { enum RuntimeLocator { private static let logger = Logger(subsystem: "ai.openclaw", category: "runtime") - private static let minNode = RuntimeVersion(major: 22, minor: 0, patch: 0) + private static let minNode = RuntimeVersion(major: 22, minor: 16, patch: 0) static func resolve( searchPaths: [String] = CommandResolver.preferredPaths()) -> Result @@ -91,7 +91,7 @@ enum RuntimeLocator { switch error { case let .notFound(searchPaths): [ - "openclaw needs Node >=22.0.0 but found no runtime.", + "openclaw needs Node >=22.16.0 but found no runtime.", "PATH searched: \(searchPaths.joined(separator: ":"))", "Install Node: https://nodejs.org/en/download", ].joined(separator: "\n") @@ -105,7 +105,7 @@ enum RuntimeLocator { [ "Could not parse \(kind.rawValue) version output \"\(raw)\" from \(path).", "PATH searched: \(searchPaths.joined(separator: ":"))", - "Try reinstalling or pinning a supported version (Node >=22.0.0).", + "Try reinstalling or pinning a supported version (Node >=22.16.0).", ].joined(separator: "\n") } } diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift index f12b8f717dc..fa92cc81ef5 100644 --- a/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift @@ -141,6 +141,26 @@ struct ExecAllowlistTests { #expect(resolutions.isEmpty) } + @Test func `resolve for allowlist fails closed on line-continued command substitution`() { + let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.isEmpty) + } + + @Test func `resolve for allowlist fails closed on chained line-continued command substitution`() { + let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.isEmpty) + } + @Test func `resolve for allowlist fails closed on quoted backticks`() { let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""] let resolutions = ExecCommandResolution.resolveForAllowlist( @@ -208,6 +228,30 @@ struct ExecAllowlistTests { #expect(resolutions[1].executableName == "touch") } + @Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() { + let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.count == 1) + #expect(resolutions[0].resolvedPath == "/usr/bin/touch") + #expect(resolutions[0].executableName == "touch") + } + + @Test func `resolve for allowlist unwraps env assignments inside shell segments`() { + let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.count == 1) + #expect(resolutions[0].resolvedPath == "/usr/bin/touch") + #expect(resolutions[0].executableName == "touch") + } + @Test func `resolve for allowlist unwraps env to effective direct executable`() { let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"] let resolutions = ExecCommandResolution.resolveForAllowlist( diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift index cd4e234ed66..03b17b42ab2 100644 --- a/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift @@ -52,4 +52,51 @@ struct ExecApprovalsGatewayPrompterTests { lastInputSeconds: 400) #expect(!remote) } + + // MARK: - shouldAsk + + @Test func askAlwaysPromptsRegardlessOfSecurity() { + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .always)) + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .always)) + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .always)) + } + + @Test func askOnMissPromptsOnlyForAllowlist() { + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .onMiss)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .onMiss)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .onMiss)) + } + + @Test func askOffNeverPrompts() { + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .off)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .off)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .off)) + } + + @Test func fallbackAllowlistAllowsMatchingResolvedPath() { + let decision = ExecApprovalsGatewayPrompter._testFallbackDecision( + command: "git status", + resolvedPath: "/usr/bin/git", + askFallback: .allowlist, + allowlistPatterns: ["/usr/bin/git"]) + #expect(decision == .allowOnce) + } + + @Test func fallbackAllowlistDeniesAllowlistMiss() { + let decision = ExecApprovalsGatewayPrompter._testFallbackDecision( + command: "git status", + resolvedPath: "/usr/bin/git", + askFallback: .allowlist, + allowlistPatterns: ["/usr/bin/rg"]) + #expect(decision == .deny) + } + + @Test func fallbackFullAllowsWhenPromptCannotBeShown() { + let decision = ExecApprovalsGatewayPrompter._testFallbackDecision( + command: "git status", + resolvedPath: "/usr/bin/git", + askFallback: .full, + allowlistPatterns: []) + #expect(decision == .allowOnce) + } } diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift new file mode 100644 index 00000000000..779b59a3499 --- /dev/null +++ b/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift @@ -0,0 +1,90 @@ +import Foundation +import Testing +@testable import OpenClaw + +struct ExecSkillBinTrustTests { + @Test func `build trust index resolves skill bin paths`() throws { + let fixture = try Self.makeExecutable(named: "jq") + defer { try? FileManager.default.removeItem(at: fixture.root) } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [fixture.root.path]) + + #expect(trust.names == ["jq"]) + #expect(trust.pathsByName["jq"] == [fixture.path]) + } + + @Test func `skill auto allow accepts trusted resolved skill bin path`() throws { + let fixture = try Self.makeExecutable(named: "jq") + defer { try? FileManager.default.removeItem(at: fixture.root) } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [fixture.root.path]) + let resolution = ExecCommandResolution( + rawExecutable: "jq", + resolvedPath: fixture.path, + executableName: "jq", + cwd: nil) + + #expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName)) + } + + @Test func `skill auto allow rejects same basename at different path`() throws { + let trusted = try Self.makeExecutable(named: "jq") + let untrusted = try Self.makeExecutable(named: "jq") + defer { + try? FileManager.default.removeItem(at: trusted.root) + try? FileManager.default.removeItem(at: untrusted.root) + } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [trusted.root.path]) + let resolution = ExecCommandResolution( + rawExecutable: "jq", + resolvedPath: untrusted.path, + executableName: "jq", + cwd: nil) + + #expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName)) + } + + private static func makeExecutable(named name: String) throws -> (root: URL, path: String) { + let root = FileManager.default.temporaryDirectory + .appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true) + try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true) + let file = root.appendingPathComponent(name) + try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8) + try FileManager.default.setAttributes( + [.posixPermissions: NSNumber(value: Int16(0o755))], + ofItemAtPath: file.path) + return (root, file.path) + } + + private static func makeReport(bins: [String]) -> SkillsStatusReport { + SkillsStatusReport( + workspaceDir: "/tmp/workspace", + managedSkillsDir: "/tmp/skills", + skills: [ + SkillStatus( + name: "test-skill", + description: "test", + source: "local", + filePath: "/tmp/skills/test-skill/SKILL.md", + baseDir: "/tmp/skills/test-skill", + skillKey: "test-skill", + primaryEnv: nil, + emoji: nil, + homepage: nil, + always: false, + disabled: false, + eligible: true, + requirements: SkillRequirements(bins: bins, env: [], config: []), + missing: SkillMissing(bins: [], env: [], config: []), + configChecks: [], + install: []) + ]) + } +} diff --git a/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift b/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift index c8928978f74..a37135ff490 100644 --- a/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift @@ -139,6 +139,54 @@ struct LowCoverageHelperTests { #expect(emptyReport.summary.contains("Nothing is listening")) } + @Test func `port guardian remote mode does not kill docker`() { + #expect(PortGuardian._testIsExpected( + command: "com.docker.backend", + fullCommand: "com.docker.backend", + port: 18789, mode: .remote) == true) + + #expect(PortGuardian._testIsExpected( + command: "ssh", + fullCommand: "ssh -L 18789:localhost:18789 user@host", + port: 18789, mode: .remote) == true) + + #expect(PortGuardian._testIsExpected( + command: "podman", + fullCommand: "podman", + port: 18789, mode: .remote) == true) + } + + @Test func `port guardian local mode still rejects unexpected`() { + #expect(PortGuardian._testIsExpected( + command: "com.docker.backend", + fullCommand: "com.docker.backend", + port: 18789, mode: .local) == false) + + #expect(PortGuardian._testIsExpected( + command: "python", + fullCommand: "python server.py", + port: 18789, mode: .local) == false) + + #expect(PortGuardian._testIsExpected( + command: "node", + fullCommand: "node /path/to/gateway-daemon", + port: 18789, mode: .local) == true) + } + + @Test func `port guardian remote mode report accepts any listener`() { + let dockerReport = PortGuardian._testBuildReport( + port: 18789, mode: .remote, + listeners: [(pid: 99, command: "com.docker.backend", + fullCommand: "com.docker.backend", user: "me")]) + #expect(dockerReport.offenders.isEmpty) + + let localDockerReport = PortGuardian._testBuildReport( + port: 18789, mode: .local, + listeners: [(pid: 99, command: "com.docker.backend", + fullCommand: "com.docker.backend", user: "me")]) + #expect(!localDockerReport.offenders.isEmpty) + } + @Test @MainActor func `canvas scheme handler resolves files and errors`() throws { let root = FileManager().temporaryDirectory .appendingPathComponent("canvas-\(UUID().uuidString)", isDirectory: true) diff --git a/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift b/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift index 990c033445f..782dbd77212 100644 --- a/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift @@ -16,7 +16,7 @@ struct RuntimeLocatorTests { @Test func `resolve succeeds with valid node`() throws { let script = """ #!/bin/sh - echo v22.5.0 + echo v22.16.0 """ let node = try self.makeTempExecutable(contents: script) let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path]) @@ -25,7 +25,23 @@ struct RuntimeLocatorTests { return } #expect(res.path == node.path) - #expect(res.version == RuntimeVersion(major: 22, minor: 5, patch: 0)) + #expect(res.version == RuntimeVersion(major: 22, minor: 16, patch: 0)) + } + + @Test func `resolve fails on boundary below minimum`() throws { + let script = """ + #!/bin/sh + echo v22.15.9 + """ + let node = try self.makeTempExecutable(contents: script) + let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path]) + guard case let .failure(.unsupported(_, found, required, path, _)) = result else { + Issue.record("Expected unsupported error, got \(result)") + return + } + #expect(found == RuntimeVersion(major: 22, minor: 15, patch: 9)) + #expect(required == RuntimeVersion(major: 22, minor: 16, patch: 0)) + #expect(path == node.path) } @Test func `resolve fails when too old`() throws { @@ -60,7 +76,17 @@ struct RuntimeLocatorTests { @Test func `describe failure includes paths`() { let msg = RuntimeLocator.describeFailure(.notFound(searchPaths: ["/tmp/a", "/tmp/b"])) + #expect(msg.contains("Node >=22.16.0")) #expect(msg.contains("PATH searched: /tmp/a:/tmp/b")) + + let parseMsg = RuntimeLocator.describeFailure( + .versionParse( + kind: .node, + raw: "garbage", + path: "/usr/local/bin/node", + searchPaths: ["/usr/local/bin"], + )) + #expect(parseMsg.contains("Node >=22.16.0")) } @Test func `runtime version parses with leading V and metadata`() { diff --git a/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift b/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift index eac7ceea37d..fcf3f3b1158 100644 --- a/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift @@ -74,4 +74,22 @@ struct VoiceWakeRuntimeTests { let config = WakeWordGateConfig(triggers: ["openclaw"], minPostTriggerGap: 0.3) #expect(WakeWordGate.match(transcript: transcript, segments: segments, config: config)?.command == "do thing") } + + @Test func `gate command text handles foreign string ranges`() { + let transcript = "hey openclaw do thing" + let other = "do thing" + let foreignRange = other.range(of: "do") + let segments = [ + WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")), + WakeWordSegment(text: "openclaw", start: 0.2, duration: 0.1, range: transcript.range(of: "openclaw")), + WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange), + WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil), + ] + + #expect( + WakeWordGate.commandText( + transcript: transcript, + segments: segments, + triggerEndTime: 0.3) == "do thing") + } } diff --git a/docs/brave-search.md b/docs/brave-search.md index a8bba5c3e91..4a541690431 100644 --- a/docs/brave-search.md +++ b/docs/brave-search.md @@ -73,7 +73,7 @@ await web_search({ ## Notes - OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits. -- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans. +- Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans. - The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service). - Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`). diff --git a/docs/channels/slack.md b/docs/channels/slack.md index 7fe44cc611b..aa9127ea630 100644 --- a/docs/channels/slack.md +++ b/docs/channels/slack.md @@ -218,6 +218,55 @@ For actions/directory reads, user token can be preferred when configured. For wr - if encoded option values exceed Slack limits, the flow falls back to buttons - For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value. +## Interactive replies + +Slack can render agent-authored interactive reply controls, but this feature is disabled by default. + +Enable it globally: + +```json5 +{ + channels: { + slack: { + capabilities: { + interactiveReplies: true, + }, + }, + }, +} +``` + +Or enable it for one Slack account only: + +```json5 +{ + channels: { + slack: { + accounts: { + ops: { + capabilities: { + interactiveReplies: true, + }, + }, + }, + }, + }, +} +``` + +When enabled, agents can emit Slack-only reply directives: + +- `[[slack_buttons: Approve:approve, Reject:reject]]` +- `[[slack_select: Choose a target | Canary:canary, Production:production]]` + +These directives compile into Slack Block Kit and route clicks or selections back through the existing Slack interaction event path. + +Notes: + +- This is Slack-specific UI. Other channels do not translate Slack Block Kit directives into their own button systems. +- The interactive callback values are OpenClaw-generated opaque tokens, not raw agent-authored values. +- If generated interactive blocks would exceed Slack Block Kit limits, OpenClaw falls back to the original text reply instead of sending an invalid blocks payload. + Default slash command settings: - `enabled: false` diff --git a/docs/ci.md b/docs/ci.md index 16a7e670964..e8710b87cb1 100644 --- a/docs/ci.md +++ b/docs/ci.md @@ -9,32 +9,32 @@ read_when: # CI Pipeline -The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed. +The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed. ## Job Overview -| Job | Purpose | When it runs | -| ----------------- | ------------------------------------------------------- | ------------------------------------------------- | -| `docs-scope` | Detect docs-only changes | Always | -| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs | -| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes | -| `check-docs` | Markdown lint + broken link check | Docs changed | -| `code-analysis` | LOC threshold check (1000 lines) | PRs only | -| `secrets` | Detect leaked secrets | Always | -| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes | -| `release-check` | Validate npm pack contents | After build | -| `checks` | Node/Bun tests + protocol check | Non-docs, node changes | -| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | -| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | -| `android` | Gradle build + tests | Non-docs, android changes | +| Job | Purpose | When it runs | +| ----------------- | ------------------------------------------------------- | ---------------------------------- | +| `docs-scope` | Detect docs-only changes | Always | +| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes | +| `check` | TypeScript types, lint, format | Non-docs, node changes | +| `check-docs` | Markdown lint + broken link check | Docs changed | +| `secrets` | Detect leaked secrets | Always | +| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes | +| `release-check` | Validate npm pack contents | Pushes to `main` after build | +| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes | +| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes | +| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | +| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | +| `android` | Gradle build + tests | Non-docs, android changes | ## Fail-Fast Order Jobs are ordered so cheap checks fail before expensive ones run: -1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min) -2. `build-artifacts` (blocked on above) -3. `checks`, `checks-windows`, `macos`, `android` (blocked on build) +1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first) +2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android` +3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22` Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`. diff --git a/docs/cli/browser.md b/docs/cli/browser.md index 8e0ddad92ef..f9ddc151717 100644 --- a/docs/cli/browser.md +++ b/docs/cli/browser.md @@ -27,7 +27,7 @@ Related: ## Quick start (local) ```bash -openclaw browser --browser-profile chrome tabs +openclaw browser profiles openclaw browser --browser-profile openclaw start openclaw browser --browser-profile openclaw open https://example.com openclaw browser --browser-profile openclaw snapshot @@ -38,7 +38,8 @@ openclaw browser --browser-profile openclaw snapshot Profiles are named browser routing configs. In practice: - `openclaw`: launches/attaches to a dedicated OpenClaw-managed Chrome instance (isolated user data dir). -- `chrome`: controls your existing Chrome tab(s) via the Chrome extension relay. +- `user`: controls your existing signed-in Chrome session via Chrome DevTools MCP. +- `chrome-relay`: controls your existing Chrome tab(s) via the Chrome extension relay. ```bash openclaw browser profiles diff --git a/docs/cli/gateway.md b/docs/cli/gateway.md index 95c20e3aa7c..96367774948 100644 --- a/docs/cli/gateway.md +++ b/docs/cli/gateway.md @@ -126,6 +126,23 @@ openclaw gateway probe openclaw gateway probe --json ``` +Interpretation: + +- `Reachable: yes` means at least one target accepted a WebSocket connect. +- `RPC: ok` means detail RPC calls (`health`/`status`/`system-presence`/`config.get`) also succeeded. +- `RPC: limited - missing scope: operator.read` means connect succeeded but detail RPC is scope-limited. This is reported as **degraded** reachability, not full failure. +- Exit code is non-zero only when no probed target is reachable. + +JSON notes (`--json`): + +- Top level: + - `ok`: at least one target is reachable. + - `degraded`: at least one target had scope-limited detail RPC. +- Per target (`targets[].connect`): + - `ok`: reachability after connect + degraded classification. + - `rpcOk`: full detail RPC success. + - `scopeLimited`: detail RPC failed due to missing operator scope. + #### Remote over SSH (Mac app parity) The macOS app “Remote over SSH” mode uses a local port-forward so the remote gateway (which may be bound to loopback only) becomes reachable at `ws://127.0.0.1:`. diff --git a/docs/gateway/configuration-reference.md b/docs/gateway/configuration-reference.md index b4a697d5a5a..658a3084437 100644 --- a/docs/gateway/configuration-reference.md +++ b/docs/gateway/configuration-reference.md @@ -2342,7 +2342,7 @@ See [Plugins](/tools/plugin). browser: { enabled: true, evaluateEnabled: true, - defaultProfile: "chrome", + defaultProfile: "user", ssrfPolicy: { dangerouslyAllowPrivateNetwork: true, // default trusted-network mode // allowPrivateNetwork: true, // legacy alias diff --git a/docs/gateway/openresponses-http-api.md b/docs/gateway/openresponses-http-api.md index bcba166db9d..fa86f912ef5 100644 --- a/docs/gateway/openresponses-http-api.md +++ b/docs/gateway/openresponses-http-api.md @@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first. Under the hood, requests are executed as a normal Gateway agent run (same codepath as `openclaw agent`), so routing/permissions/config match your Gateway. -## Authentication +## Authentication, security, and routing -Uses the Gateway auth configuration. Send a bearer token: +Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api): -- `Authorization: Bearer ` +- use `Authorization: Bearer ` with the normal Gateway auth config +- treat the endpoint as full operator access for the gateway instance +- select agents with `model: "openclaw:"`, `model: "agent:"`, or `x-openclaw-agent-id` +- use `x-openclaw-session-key` for explicit session routing -Notes: - -- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`). -- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`). -- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`. - -## Security boundary (important) - -Treat this endpoint as a **full operator-access** surface for the gateway instance. - -- HTTP bearer auth here is not a narrow per-user scope model. -- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential. -- Requests run through the same control-plane agent path as trusted operator actions. -- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway. -- If the target agent policy allows sensitive tools, this endpoint can use them. -- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet. - -See [Security](/gateway/security) and [Remote access](/gateway/remote). - -## Choosing an agent - -No custom headers required: encode the agent id in the OpenResponses `model` field: - -- `model: "openclaw:"` (example: `"openclaw:main"`, `"openclaw:beta"`) -- `model: "agent:"` (alias) - -Or target a specific OpenClaw agent by header: - -- `x-openclaw-agent-id: ` (default: `main`) - -Advanced: - -- `x-openclaw-session-key: ` to fully control session routing. - -## Enabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `true`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: true }, - }, - }, - }, -} -``` - -## Disabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `false`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: false }, - }, - }, - }, -} -``` +Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`. ## Session behavior diff --git a/docs/gateway/troubleshooting.md b/docs/gateway/troubleshooting.md index ebea28a6541..f5829454e57 100644 --- a/docs/gateway/troubleshooting.md +++ b/docs/gateway/troubleshooting.md @@ -289,7 +289,7 @@ Look for: - Valid browser executable path. - CDP profile reachability. -- Extension relay tab attachment for `profile="chrome"`. +- Extension relay tab attachment for `profile="chrome-relay"`. Common signatures: diff --git a/docs/help/testing.md b/docs/help/testing.md index db374bb03da..b2057e8a1da 100644 --- a/docs/help/testing.md +++ b/docs/help/testing.md @@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost): - No real keys required - Should be fast and stable - Pool note: - - OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards. - - On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`). + - OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards. + - On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there. - Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`). ### E2E (gateway smoke) diff --git a/docs/help/troubleshooting.md b/docs/help/troubleshooting.md index 951e1a480d7..a3988c4ea58 100644 --- a/docs/help/troubleshooting.md +++ b/docs/help/troubleshooting.md @@ -28,7 +28,7 @@ Good output in one line: - `openclaw status` → shows configured channels and no obvious auth errors. - `openclaw status --all` → full report is present and shareable. -- `openclaw gateway probe` → expected gateway target is reachable. +- `openclaw gateway probe` → expected gateway target is reachable (`Reachable: yes`). `RPC: limited - missing scope: operator.read` is degraded diagnostics, not a connect failure. - `openclaw gateway status` → `Runtime: running` and `RPC probe: ok`. - `openclaw doctor` → no blocking config/service errors. - `openclaw channels status --probe` → channels report `connected` or `ready`. diff --git a/docs/install/docker-vm-runtime.md b/docs/install/docker-vm-runtime.md new file mode 100644 index 00000000000..77436f44486 --- /dev/null +++ b/docs/install/docker-vm-runtime.md @@ -0,0 +1,138 @@ +--- +summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts" +read_when: + - You are deploying OpenClaw on a cloud VM with Docker + - You need the shared binary bake, persistence, and update flow +title: "Docker VM Runtime" +--- + +# Docker VM Runtime + +Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers. + +## Bake required binaries into the image + +Installing binaries inside a running container is a trap. +Anything installed at runtime will be lost on restart. + +All external binaries required by skills must be installed at image build time. + +The examples below show three common binaries only: + +- `gog` for Gmail access +- `goplaces` for Google Places +- `wacli` for WhatsApp + +These are examples, not a complete list. +You may install as many binaries as needed using the same pattern. + +If you add new skills later that depend on additional binaries, you must: + +1. Update the Dockerfile +2. Rebuild the image +3. Restart the containers + +**Example Dockerfile** + +```dockerfile +FROM node:24-bookworm + +RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* + +# Example binary 1: Gmail CLI +RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog + +# Example binary 2: Google Places CLI +RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces + +# Example binary 3: WhatsApp CLI +RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli + +# Add more binaries below using the same pattern + +WORKDIR /app +COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ +COPY ui/package.json ./ui/package.json +COPY scripts ./scripts + +RUN corepack enable +RUN pnpm install --frozen-lockfile + +COPY . . +RUN pnpm build +RUN pnpm ui:install +RUN pnpm ui:build + +ENV NODE_ENV=production + +CMD ["node","dist/index.js"] +``` + +## Build and launch + +```bash +docker compose build +docker compose up -d openclaw-gateway +``` + +If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. +Use a larger machine class before retrying. + +Verify binaries: + +```bash +docker compose exec openclaw-gateway which gog +docker compose exec openclaw-gateway which goplaces +docker compose exec openclaw-gateway which wacli +``` + +Expected output: + +``` +/usr/local/bin/gog +/usr/local/bin/goplaces +/usr/local/bin/wacli +``` + +Verify Gateway: + +```bash +docker compose logs -f openclaw-gateway +``` + +Expected output: + +``` +[gateway] listening on ws://0.0.0.0:18789 +``` + +## What persists where + +OpenClaw runs in Docker, but Docker is not the source of truth. +All long-lived state must survive restarts, rebuilds, and reboots. + +| Component | Location | Persistence mechanism | Notes | +| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | +| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | +| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | +| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | +| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | +| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | +| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | +| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | +| Node runtime | Container filesystem | Docker image | Rebuilt every image build | +| OS packages | Container filesystem | Docker image | Do not install at runtime | +| Docker container | Ephemeral | Restartable | Safe to destroy | + +## Updates + +To update OpenClaw on the VM: + +```bash +git pull +docker compose build +docker compose up -d +``` diff --git a/docs/install/gcp.md b/docs/install/gcp.md index dfedfe4ba38..7ff4a00d087 100644 --- a/docs/install/gcp.md +++ b/docs/install/gcp.md @@ -281,77 +281,20 @@ services: --- -## 10) Bake required binaries into the image (critical) +## 10) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:24-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 11) Build and launch +## 11) GCP-specific launch notes -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. +On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing: @@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins If you changed the gateway port, replace `18789` with your configured port. -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 12) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - ---- - -## 13) Access from your laptop +## 12) Access from your laptop Create an SSH tunnel to forward the Gateway port: @@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list docker compose run --rm openclaw-cli devices approve ``` ---- - -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- - -## Updates - -To update OpenClaw on the VM: - -```bash -cd ~/openclaw -git pull -docker compose build -docker compose up -d -``` +Need the shared persistence and update reference again? +See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates). --- diff --git a/docs/install/hetzner.md b/docs/install/hetzner.md index 4c27840cee0..46bc76d6243 100644 --- a/docs/install/hetzner.md +++ b/docs/install/hetzner.md @@ -202,107 +202,20 @@ services: --- -## 7) Bake required binaries into the image (critical) +## 7) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:24-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 8) Build and launch +## 8) Hetzner-specific access -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 9) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - -From your laptop: +After the shared build and launch steps, tunnel from your laptop: ```bash ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP @@ -316,25 +229,7 @@ Paste your gateway token. --- -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- +The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where). ## Infrastructure as Code (Terraform) diff --git a/docs/platforms/android.md b/docs/platforms/android.md index 4df71b83e73..6bd5effb361 100644 --- a/docs/platforms/android.md +++ b/docs/platforms/android.md @@ -9,6 +9,8 @@ title: "Android App" # Android App (Node) +> **Note:** The Android app has not been publicly released yet. The source code is available in the [OpenClaw repository](https://github.com/openclaw/openclaw) under `apps/android`. You can build it yourself using Java 17 and the Android SDK (`./gradlew :app:assembleDebug`). See [apps/android/README.md](https://github.com/openclaw/openclaw/blob/main/apps/android/README.md) for build instructions. + ## Support snapshot - Role: companion node app (Android does not host the Gateway). diff --git a/docs/plugins/voice-call.md b/docs/plugins/voice-call.md index 17263ca0509..14198fdba36 100644 --- a/docs/plugins/voice-call.md +++ b/docs/plugins/voice-call.md @@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set: } ``` +`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin +normalizes the provider-supplied `From` value and compares it to `allowFrom`. +Webhook verification authenticates provider delivery and payload integrity, but +it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as +caller-ID filtering, not strong caller identity. + Auto-responses use the agent system. Tune with: - `responseModel` diff --git a/docs/reference/api-usage-costs.md b/docs/reference/api-usage-costs.md index baf4302ac0d..bbb1d90de87 100644 --- a/docs/reference/api-usage-costs.md +++ b/docs/reference/api-usage-costs.md @@ -85,8 +85,8 @@ See [Memory](/concepts/memory). - **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey` - **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey` -**Brave Search free credit:** Each Brave plan includes $5/month in renewing -free credit. The Search plan costs $5 per 1,000 requests, so the credit covers +**Brave Search free credit:** Each Brave plan includes \$5/month in renewing +free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 requests/month at no charge. Set your usage limit in the Brave dashboard to avoid unexpected charges. diff --git a/docs/reference/test.md b/docs/reference/test.md index 6d5c5535a83..378789f6d6e 100644 --- a/docs/reference/test.md +++ b/docs/reference/test.md @@ -11,7 +11,7 @@ title: "Tests" - `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests don’t collide with a running instance. Use this when a prior gateway run left port 18789 occupied. - `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic. -- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`. +- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`. - `pnpm test`: runs the fast core unit lane by default for quick local feedback. - `pnpm test:channels`: runs channel-heavy suites. - `pnpm test:extensions`: runs extension/plugin suites. diff --git a/docs/reference/wizard.md b/docs/reference/wizard.md index 60e88fe4226..bbaebbdc84f 100644 --- a/docs/reference/wizard.md +++ b/docs/reference/wizard.md @@ -167,93 +167,8 @@ openclaw onboard --non-interactive \ `--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts. - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice gemini-api-key \ - --gemini-api-key "$GEMINI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice zai-api-key \ - --zai-api-key "$ZAI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice ai-gateway-api-key \ - --ai-gateway-api-key "$AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice cloudflare-ai-gateway-api-key \ - --cloudflare-ai-gateway-account-id "your-account-id" \ - --cloudflare-ai-gateway-gateway-id "your-gateway-id" \ - --cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice moonshot-api-key \ - --moonshot-api-key "$MOONSHOT_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice synthetic-api-key \ - --synthetic-api-key "$SYNTHETIC_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice opencode-zen \ - --opencode-zen-api-key "$OPENCODE_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog. - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice ollama \ - --custom-model-id "qwen3.5:27b" \ - --accept-risk \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance. - - +Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples). +Use this reference page for flag semantics and step ordering. ### Add agent (non-interactive) diff --git a/docs/tools/browser-linux-troubleshooting.md b/docs/tools/browser-linux-troubleshooting.md index 01e6cbc3ff9..1ab51657044 100644 --- a/docs/tools/browser-linux-troubleshooting.md +++ b/docs/tools/browser-linux-troubleshooting.md @@ -123,7 +123,7 @@ curl -s http://127.0.0.1:18791/tabs ### Problem: "Chrome extension relay is running, but no tab is connected" -You’re using the `chrome` profile (extension relay). It expects the OpenClaw +You’re using the `chrome-relay` profile (extension relay). It expects the OpenClaw browser extension to be attached to a live tab. Fix options: @@ -135,5 +135,5 @@ Fix options: Notes: -- The `chrome` profile uses your **system default Chromium browser** when possible. +- The `chrome-relay` profile uses your **system default Chromium browser** when possible. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl`; only set those for remote CDP. diff --git a/docs/tools/browser-login.md b/docs/tools/browser-login.md index 910c21ca218..d570b3b2e87 100644 --- a/docs/tools/browser-login.md +++ b/docs/tools/browser-login.md @@ -20,6 +20,13 @@ Back to the main browser docs: [Browser](/tools/browser). OpenClaw controls a **dedicated Chrome profile** (named `openclaw`, orange‑tinted UI). This is separate from your daily browser profile. +For agent browser tool calls: + +- Default choice: the agent should use its isolated `openclaw` browser. +- Use `profile="user"` only when existing logged-in sessions matter and the user is at the computer to click/approve any attach prompt. +- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow. +- If you have multiple user-browser profiles, specify the profile explicitly instead of guessing. + Two easy ways to access it: 1. **Ask the agent to open the browser** and then log in yourself. diff --git a/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md b/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md index d63bb891c48..2e7844860aa 100644 --- a/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md +++ b/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md @@ -33,7 +33,7 @@ Choose this when: ### Option 2: Chrome extension relay -Use the built-in `chrome` profile plus the OpenClaw Chrome extension. +Use the built-in `chrome-relay` profile plus the OpenClaw Chrome extension. Choose this when: @@ -155,7 +155,7 @@ Example: { browser: { enabled: true, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", relayBindHost: "0.0.0.0", }, } @@ -197,7 +197,7 @@ openclaw browser tabs --browser-profile remote For the extension relay: ```bash -openclaw browser tabs --browser-profile chrome +openclaw browser tabs --browser-profile chrome-relay ``` Good result: diff --git a/docs/tools/browser.md b/docs/tools/browser.md index d632e713068..ebe352036c5 100644 --- a/docs/tools/browser.md +++ b/docs/tools/browser.md @@ -18,8 +18,8 @@ Beginner view: - Think of it as a **separate, agent-only browser**. - The `openclaw` profile does **not** touch your personal browser profile. - The agent can **open tabs, read pages, click, and type** in a safe lane. -- The default `chrome` profile uses the **system default Chromium browser** via the - extension relay; switch to `openclaw` for the isolated managed browser. +- The built-in `user` profile attaches to your real signed-in Chrome session; + `chrome-relay` is the explicit extension-relay profile. ## What you get @@ -43,11 +43,22 @@ openclaw browser --browser-profile openclaw snapshot If you get “Browser disabled”, enable it in config (see below) and restart the Gateway. -## Profiles: `openclaw` vs `chrome` +## Profiles: `openclaw` vs `user` vs `chrome-relay` - `openclaw`: managed, isolated browser (no extension required). -- `chrome`: extension relay to your **system browser** (requires the OpenClaw - extension to be attached to a tab). +- `user`: built-in Chrome MCP attach profile for your **real signed-in Chrome** + session. +- `chrome-relay`: extension relay to your **system browser** (requires the + OpenClaw extension to be attached to a tab). + +For agent browser tool calls: + +- Default: use the isolated `openclaw` browser. +- Prefer `profile="user"` when existing logged-in sessions matter and the user + is at the computer to click/approve any attach prompt. +- Use `profile="chrome-relay"` only when the user explicitly wants the Chrome + extension / toolbar-button attach flow. +- `profile` is the explicit override when you want a specific browser mode. Set `browser.defaultProfile: "openclaw"` if you want managed mode by default. @@ -68,7 +79,7 @@ Browser settings live in `~/.openclaw/openclaw.json`. // cdpUrl: "http://127.0.0.1:18792", // legacy single-profile override remoteCdpTimeoutMs: 1500, // remote CDP HTTP timeout (ms) remoteCdpHandshakeTimeoutMs: 3000, // remote CDP WebSocket handshake timeout (ms) - defaultProfile: "chrome", + defaultProfile: "openclaw", color: "#FF4500", headless: false, noSandbox: false, @@ -77,6 +88,16 @@ Browser settings live in `~/.openclaw/openclaw.json`. profiles: { openclaw: { cdpPort: 18800, color: "#FF4500" }, work: { cdpPort: 18801, color: "#0066CC" }, + user: { + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }, + "chrome-relay": { + driver: "extension", + cdpUrl: "http://127.0.0.1:18792", + color: "#00AA00", + }, remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" }, }, }, @@ -97,9 +118,11 @@ Notes: - `browser.ssrfPolicy.allowPrivateNetwork` remains supported as a legacy alias for compatibility. - `attachOnly: true` means “never launch a local browser; only attach if it is already running.” - `color` + per-profile `color` tint the browser UI so you can see which profile is active. -- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay. +- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "user"` to opt into the signed-in user browser, or `defaultProfile: "chrome-relay"` for the extension relay. - Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP. +- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do + not set `cdpUrl` for that driver. ## Use Brave (or another Chromium-based browser) @@ -264,11 +287,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be: - **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port - **remote**: an explicit CDP URL (Chromium-based browser running elsewhere) - **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension +- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect Defaults: - The `openclaw` profile is auto-created if missing. -- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default). +- The `chrome-relay` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default). +- Existing-session profiles are opt-in; create them with `--driver existing-session`. - Local CDP ports allocate from **18800–18899** by default. - Deleting a profile moves its local data directory to Trash. @@ -311,8 +336,8 @@ openclaw browser extension install 2. Use it: -- CLI: `openclaw browser --browser-profile chrome tabs` -- Agent tool: `browser` with `profile="chrome"` +- CLI: `openclaw browser --browser-profile chrome-relay tabs` +- Agent tool: `browser` with `profile="chrome-relay"` Optional: if you want a different name or relay port, create your own profile: @@ -328,6 +353,81 @@ Notes: - This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions). - Detach by clicking the extension icon again. +- Agent use: prefer `profile="user"` for logged-in sites. Use `profile="chrome-relay"` + only when you specifically want the extension flow. The user must be present + to click the extension and attach the tab. + +## Chrome existing-session via MCP + +OpenClaw can also attach to a running Chrome profile through the official +Chrome DevTools MCP server. This reuses the tabs and login state already open in +that Chrome profile. + +Official background and setup references: + +- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp) + +Built-in profile: + +- `user` + +Optional: create your own custom existing-session profile if you want a +different name or color. + +Then in Chrome: + +1. Open `chrome://inspect/#remote-debugging` +2. Enable remote debugging +3. Keep Chrome running and approve the connection prompt when OpenClaw attaches + +Live attach smoke test: + +```bash +openclaw browser --browser-profile user start +openclaw browser --browser-profile user status +openclaw browser --browser-profile user tabs +openclaw browser --browser-profile user snapshot --format ai +``` + +What success looks like: + +- `status` shows `driver: existing-session` +- `status` shows `transport: chrome-mcp` +- `status` shows `running: true` +- `tabs` lists your already-open Chrome tabs +- `snapshot` returns refs from the selected live tab + +What to check if attach does not work: + +- Chrome is version `144+` +- remote debugging is enabled at `chrome://inspect/#remote-debugging` +- Chrome showed and you accepted the attach consent prompt + +Agent use: + +- Use `profile="user"` when you need the user’s logged-in browser state. +- If you use a custom existing-session profile, pass that explicit profile name. +- Prefer `profile="user"` over `profile="chrome-relay"` unless the user + explicitly wants the extension / attach-tab flow. +- Only choose this mode when the user is at the computer to approve the attach + prompt. +- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect` + +Notes: + +- This path is higher-risk than the isolated `openclaw` profile because it can + act inside your signed-in browser session. +- OpenClaw does not launch Chrome for this driver; it attaches to an existing + session only. +- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not + the legacy default-profile remote debugging port workflow. +- Existing-session screenshots support page captures and `--ref` element + captures from snapshots, but not CSS `--element` selectors. +- Existing-session `wait --url` supports exact, substring, and glob patterns + like other browser drivers. `wait --load networkidle` is not supported yet. +- Some features still require the extension relay or managed browser path, such + as PDF export and download interception. - Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated. WSL2 / cross-namespace example: @@ -337,7 +437,7 @@ WSL2 / cross-namespace example: browser: { enabled: true, relayBindHost: "0.0.0.0", - defaultProfile: "chrome", + defaultProfile: "chrome-relay", }, } ``` diff --git a/docs/tools/chrome-extension.md b/docs/tools/chrome-extension.md index ce4b271ae9c..91a6c1240f1 100644 --- a/docs/tools/chrome-extension.md +++ b/docs/tools/chrome-extension.md @@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs Attach/detach happens via a **single Chrome toolbar button**. +If you want Chrome’s official DevTools MCP attach flow instead of the OpenClaw +extension relay, use an `existing-session` browser profile instead. See +[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chrome’s own +setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your +browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp). + ## What it is (concept) There are three parts: @@ -55,7 +62,7 @@ After upgrading OpenClaw: ## Use it (set gateway token once) -OpenClaw ships with a built-in browser profile named `chrome` that targets the extension relay on the default port. +OpenClaw ships with a built-in browser profile named `chrome-relay` that targets the extension relay on the default port. Before first attach, open extension Options and set: @@ -64,8 +71,8 @@ Before first attach, open extension Options and set: Use it: -- CLI: `openclaw browser --browser-profile chrome tabs` -- Agent tool: `browser` with `profile="chrome"` +- CLI: `openclaw browser --browser-profile chrome-relay tabs` +- Agent tool: `browser` with `profile="chrome-relay"` If you want a different name or a different relay port, create your own profile: diff --git a/docs/tools/index.md b/docs/tools/index.md index 6552d6f9118..bdd9b78456f 100644 --- a/docs/tools/index.md +++ b/docs/tools/index.md @@ -316,7 +316,11 @@ Common parameters: Notes: - Requires `browser.enabled=true` (default is `true`; set `false` to disable). - All actions accept optional `profile` parameter for multi-instance support. -- When `profile` is omitted, uses `browser.defaultProfile` (defaults to "chrome"). +- Omit `profile` for the safe default: isolated OpenClaw-managed browser (`openclaw`). +- Use `profile="user"` for the real local host browser when existing logins/cookies matter and the user is present to click/approve any attach prompt. +- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow. +- `profile="user"` and `profile="chrome-relay"` are host-only; do not combine them with sandbox/node targets. +- When `profile` is omitted, uses `browser.defaultProfile` (defaults to `openclaw`). - Profile names: lowercase alphanumeric + hyphens only (max 64 chars). - Port range: 18800-18899 (~100 profiles max). - Remote profiles are attach-only (no start/stop/reset). diff --git a/docs/tools/web.md b/docs/tools/web.md index e77d046ce5b..a2aa1d37bfd 100644 --- a/docs/tools/web.md +++ b/docs/tools/web.md @@ -65,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi 2. In the dashboard, choose the **Search** plan and generate an API key. 3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment. -Each Brave plan includes **$5/month in free credit** (renewing). The Search -plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set +Each Brave plan includes **\$5/month in free credit** (renewing). The Search +plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans and pricing. diff --git a/extensions/acpx/src/ensure.test.ts b/extensions/acpx/src/ensure.test.ts index cae52f29f9b..c0bb5469b29 100644 --- a/extensions/acpx/src/ensure.test.ts +++ b/extensions/acpx/src/ensure.test.ts @@ -54,6 +54,49 @@ describe("acpx ensure", () => { } }); + function mockEnsureInstallFlow() { + spawnAndCollectMock + .mockResolvedValueOnce({ + stdout: "acpx 0.0.9\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: "added 1 package\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: `acpx ${ACPX_PINNED_VERSION}\n`, + stderr: "", + code: 0, + error: null, + }); + } + + function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) { + expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ + command: "npm", + args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + } + it("accepts the pinned acpx version", async () => { spawnAndCollectMock.mockResolvedValueOnce({ stdout: `acpx ${ACPX_PINNED_VERSION}\n`, @@ -177,25 +220,7 @@ describe("acpx ensure", () => { }); it("installs and verifies pinned acpx when precheck fails", async () => { - spawnAndCollectMock - .mockResolvedValueOnce({ - stdout: "acpx 0.0.9\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: "added 1 package\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: `acpx ${ACPX_PINNED_VERSION}\n`, - stderr: "", - code: 0, - error: null, - }); + mockEnsureInstallFlow(); await ensureAcpx({ command: "/plugin/node_modules/.bin/acpx", @@ -204,33 +229,11 @@ describe("acpx ensure", () => { }); expect(spawnAndCollectMock).toHaveBeenCalledTimes(3); - expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ - command: "npm", - args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], - cwd: "/plugin", - }); + expectEnsureInstallCalls(); }); it("threads stripProviderAuthEnvVars through version probes and install", async () => { - spawnAndCollectMock - .mockResolvedValueOnce({ - stdout: "acpx 0.0.9\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: "added 1 package\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: `acpx ${ACPX_PINNED_VERSION}\n`, - stderr: "", - code: 0, - error: null, - }); + mockEnsureInstallFlow(); await ensureAcpx({ command: "/plugin/node_modules/.bin/acpx", @@ -239,24 +242,7 @@ describe("acpx ensure", () => { stripProviderAuthEnvVars: true, }); - expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ - command: "/plugin/node_modules/.bin/acpx", - args: ["--version"], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); - expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ - command: "npm", - args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); - expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({ - command: "/plugin/node_modules/.bin/acpx", - args: ["--version"], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); + expectEnsureInstallCalls(true); }); it("fails with actionable error when npm install fails", async () => { diff --git a/extensions/acpx/src/runtime-internals/process.test.ts b/extensions/acpx/src/runtime-internals/process.test.ts index ba6ad923d3b..ef0492308ae 100644 --- a/extensions/acpx/src/runtime-internals/process.test.ts +++ b/extensions/acpx/src/runtime-internals/process.test.ts @@ -254,6 +254,44 @@ describe("waitForExit", () => { }); describe("spawnAndCollect", () => { + type SpawnedEnvSnapshot = { + openai?: string; + github?: string; + hf?: string; + openclaw?: string; + shell?: string; + }; + + function stubProviderAuthEnv(env: Record) { + for (const [key, value] of Object.entries(env)) { + vi.stubEnv(key, value); + } + } + + async function collectSpawnedEnvSnapshot(options?: { + stripProviderAuthEnvVars?: boolean; + openAiEnvKey?: string; + githubEnvKey?: string; + hfEnvKey?: string; + }): Promise { + const openAiEnvKey = options?.openAiEnvKey ?? "OPENAI_API_KEY"; + const githubEnvKey = options?.githubEnvKey ?? "GITHUB_TOKEN"; + const hfEnvKey = options?.hfEnvKey ?? "HF_TOKEN"; + const result = await spawnAndCollect({ + command: process.execPath, + args: [ + "-e", + `process.stdout.write(JSON.stringify({openai:process.env.${openAiEnvKey},github:process.env.${githubEnvKey},hf:process.env.${hfEnvKey},openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))`, + ], + cwd: process.cwd(), + stripProviderAuthEnvVars: options?.stripProviderAuthEnvVars, + }); + + expect(result.code).toBe(0); + expect(result.error).toBeNull(); + return JSON.parse(result.stdout) as SpawnedEnvSnapshot; + } + it("returns abort error immediately when signal is already aborted", async () => { const controller = new AbortController(); controller.abort(); @@ -292,31 +330,15 @@ describe("spawnAndCollect", () => { }); it("strips shared provider auth env vars from spawned acpx children", async () => { - vi.stubEnv("OPENAI_API_KEY", "openai-secret"); - vi.stubEnv("GITHUB_TOKEN", "gh-secret"); - vi.stubEnv("HF_TOKEN", "hf-secret"); - vi.stubEnv("OPENCLAW_API_KEY", "keep-me"); - - const result = await spawnAndCollect({ - command: process.execPath, - args: [ - "-e", - "process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))", - ], - cwd: process.cwd(), + stubProviderAuthEnv({ + OPENAI_API_KEY: "openai-secret", + GITHUB_TOKEN: "gh-secret", + HF_TOKEN: "hf-secret", + OPENCLAW_API_KEY: "keep-me", + }); + const parsed = await collectSpawnedEnvSnapshot({ stripProviderAuthEnvVars: true, }); - - expect(result.code).toBe(0); - expect(result.error).toBeNull(); - - const parsed = JSON.parse(result.stdout) as { - openai?: string; - github?: string; - hf?: string; - openclaw?: string; - shell?: string; - }; expect(parsed.openai).toBeUndefined(); expect(parsed.github).toBeUndefined(); expect(parsed.hf).toBeUndefined(); @@ -325,29 +347,16 @@ describe("spawnAndCollect", () => { }); it("strips provider auth env vars case-insensitively", async () => { - vi.stubEnv("OpenAI_Api_Key", "openai-secret"); - vi.stubEnv("Github_Token", "gh-secret"); - vi.stubEnv("OPENCLAW_API_KEY", "keep-me"); - - const result = await spawnAndCollect({ - command: process.execPath, - args: [ - "-e", - "process.stdout.write(JSON.stringify({openai:process.env.OpenAI_Api_Key,github:process.env.Github_Token,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))", - ], - cwd: process.cwd(), - stripProviderAuthEnvVars: true, + stubProviderAuthEnv({ + OpenAI_Api_Key: "openai-secret", + Github_Token: "gh-secret", + OPENCLAW_API_KEY: "keep-me", + }); + const parsed = await collectSpawnedEnvSnapshot({ + stripProviderAuthEnvVars: true, + openAiEnvKey: "OpenAI_Api_Key", + githubEnvKey: "Github_Token", }); - - expect(result.code).toBe(0); - expect(result.error).toBeNull(); - - const parsed = JSON.parse(result.stdout) as { - openai?: string; - github?: string; - openclaw?: string; - shell?: string; - }; expect(parsed.openai).toBeUndefined(); expect(parsed.github).toBeUndefined(); expect(parsed.openclaw).toBe("keep-me"); @@ -355,30 +364,13 @@ describe("spawnAndCollect", () => { }); it("preserves provider auth env vars for explicit custom commands by default", async () => { - vi.stubEnv("OPENAI_API_KEY", "openai-secret"); - vi.stubEnv("GITHUB_TOKEN", "gh-secret"); - vi.stubEnv("HF_TOKEN", "hf-secret"); - vi.stubEnv("OPENCLAW_API_KEY", "keep-me"); - - const result = await spawnAndCollect({ - command: process.execPath, - args: [ - "-e", - "process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))", - ], - cwd: process.cwd(), + stubProviderAuthEnv({ + OPENAI_API_KEY: "openai-secret", + GITHUB_TOKEN: "gh-secret", + HF_TOKEN: "hf-secret", + OPENCLAW_API_KEY: "keep-me", }); - - expect(result.code).toBe(0); - expect(result.error).toBeNull(); - - const parsed = JSON.parse(result.stdout) as { - openai?: string; - github?: string; - hf?: string; - openclaw?: string; - shell?: string; - }; + const parsed = await collectSpawnedEnvSnapshot(); expect(parsed.openai).toBe("openai-secret"); expect(parsed.github).toBe("gh-secret"); expect(parsed.hf).toBe("hf-secret"); diff --git a/extensions/bluebubbles/src/attachments.test.ts b/extensions/bluebubbles/src/attachments.test.ts index 8ef94cf08ae..704b907eb8b 100644 --- a/extensions/bluebubbles/src/attachments.test.ts +++ b/extensions/bluebubbles/src/attachments.test.ts @@ -82,6 +82,15 @@ describe("downloadBlueBubblesAttachment", () => { ).rejects.toThrow("too large"); } + function mockSuccessfulAttachmentDownload(buffer = new Uint8Array([1])) { + mockFetch.mockResolvedValueOnce({ + ok: true, + headers: new Headers(), + arrayBuffer: () => Promise.resolve(buffer.buffer), + }); + return buffer; + } + it("throws when guid is missing", async () => { const attachment: BlueBubblesAttachment = {}; await expect( @@ -159,12 +168,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("encodes guid in URL", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att/with/special chars" }; await downloadBlueBubblesAttachment(attachment, { @@ -244,12 +248,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("resolves credentials from config when opts not provided", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-config" }; const result = await downloadBlueBubblesAttachment(attachment, { @@ -270,12 +269,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("passes ssrfPolicy with allowPrivateNetwork when config enables it", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-ssrf" }; await downloadBlueBubblesAttachment(attachment, { @@ -295,12 +289,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("auto-allowlists serverUrl hostname when allowPrivateNetwork is not set", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-no-ssrf" }; await downloadBlueBubblesAttachment(attachment, { @@ -313,12 +302,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("auto-allowlists private IP serverUrl hostname when allowPrivateNetwork is not set", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-private-ip" }; await downloadBlueBubblesAttachment(attachment, { @@ -352,6 +336,14 @@ describe("sendBlueBubblesAttachment", () => { return Buffer.from(body).toString("utf8"); } + function expectVoiceAttachmentBody() { + const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; + const bodyText = decodeBody(body); + expect(bodyText).toContain('name="isAudioMessage"'); + expect(bodyText).toContain("true"); + return bodyText; + } + it("marks voice memos when asVoice is true and mp3 is provided", async () => { mockFetch.mockResolvedValueOnce({ ok: true, @@ -367,10 +359,7 @@ describe("sendBlueBubblesAttachment", () => { opts: { serverUrl: "http://localhost:1234", password: "test" }, }); - const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; - const bodyText = decodeBody(body); - expect(bodyText).toContain('name="isAudioMessage"'); - expect(bodyText).toContain("true"); + const bodyText = expectVoiceAttachmentBody(); expect(bodyText).toContain('filename="voice.mp3"'); }); @@ -389,8 +378,7 @@ describe("sendBlueBubblesAttachment", () => { opts: { serverUrl: "http://localhost:1234", password: "test" }, }); - const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; - const bodyText = decodeBody(body); + const bodyText = expectVoiceAttachmentBody(); expect(bodyText).toContain('filename="voice.mp3"'); expect(bodyText).toContain('name="voice.mp3"'); }); diff --git a/extensions/bluebubbles/src/attachments.ts b/extensions/bluebubbles/src/attachments.ts index cbd8a74d807..c5392fd2595 100644 --- a/extensions/bluebubbles/src/attachments.ts +++ b/extensions/bluebubbles/src/attachments.ts @@ -2,7 +2,7 @@ import crypto from "node:crypto"; import path from "node:path"; import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles"; import { resolveBlueBubblesServerAccount } from "./account-resolve.js"; -import { postMultipartFormData } from "./multipart.js"; +import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js"; import { getCachedBlueBubblesPrivateApiStatus, isBlueBubblesPrivateApiStatusEnabled, @@ -262,12 +262,7 @@ export async function sendBlueBubblesAttachment(params: { timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads }); - if (!res.ok) { - const errorText = await res.text(); - throw new Error( - `BlueBubbles attachment send failed (${res.status}): ${errorText || "unknown"}`, - ); - } + await assertMultipartActionOk(res, "attachment send"); const responseBody = await res.text(); if (!responseBody) { diff --git a/extensions/bluebubbles/src/chat.test.ts b/extensions/bluebubbles/src/chat.test.ts index cc37829bc9d..f8adc9b86fd 100644 --- a/extensions/bluebubbles/src/chat.test.ts +++ b/extensions/bluebubbles/src/chat.test.ts @@ -29,6 +29,11 @@ describe("chat", () => { }); } + function mockTwoOkTextResponses() { + mockOkTextResponse(); + mockOkTextResponse(); + } + async function expectCalledUrlIncludesPassword(params: { password: string; invoke: () => Promise; @@ -198,15 +203,7 @@ describe("chat", () => { }); it("uses POST for start and DELETE for stop", async () => { - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }) - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); + mockTwoOkTextResponses(); await sendBlueBubblesTyping("iMessage;-;+15551234567", true, { serverUrl: "http://localhost:1234", @@ -442,15 +439,7 @@ describe("chat", () => { }); it("adds and removes participant using matching endpoint", async () => { - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }) - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); + mockTwoOkTextResponses(); await addBlueBubblesParticipant("chat-guid", "+15551234567", { serverUrl: "http://localhost:1234", diff --git a/extensions/bluebubbles/src/chat.ts b/extensions/bluebubbles/src/chat.ts index b63f09272f2..17340b7f980 100644 --- a/extensions/bluebubbles/src/chat.ts +++ b/extensions/bluebubbles/src/chat.ts @@ -2,7 +2,7 @@ import crypto from "node:crypto"; import path from "node:path"; import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles"; import { resolveBlueBubblesServerAccount } from "./account-resolve.js"; -import { postMultipartFormData } from "./multipart.js"; +import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js"; import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js"; import { blueBubblesFetchWithTimeout, buildBlueBubblesApiUrl } from "./types.js"; @@ -55,12 +55,7 @@ async function sendBlueBubblesChatEndpointRequest(params: { { method: params.method }, params.opts.timeoutMs, ); - if (!res.ok) { - const errorText = await res.text().catch(() => ""); - throw new Error( - `BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`, - ); - } + await assertMultipartActionOk(res, params.action); } async function sendPrivateApiJsonRequest(params: { @@ -86,12 +81,7 @@ async function sendPrivateApiJsonRequest(params: { } const res = await blueBubblesFetchWithTimeout(url, request, params.opts.timeoutMs); - if (!res.ok) { - const errorText = await res.text().catch(() => ""); - throw new Error( - `BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`, - ); - } + await assertMultipartActionOk(res, params.action); } export async function markBlueBubblesChatRead( @@ -329,8 +319,5 @@ export async function setGroupIconBlueBubbles( timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads }); - if (!res.ok) { - const errorText = await res.text().catch(() => ""); - throw new Error(`BlueBubbles setGroupIcon failed (${res.status}): ${errorText || "unknown"}`); - } + await assertMultipartActionOk(res, "setGroupIcon"); } diff --git a/extensions/bluebubbles/src/media-send.test.ts b/extensions/bluebubbles/src/media-send.test.ts index 9f065599bfb..59fe82cbeae 100644 --- a/extensions/bluebubbles/src/media-send.test.ts +++ b/extensions/bluebubbles/src/media-send.test.ts @@ -70,6 +70,70 @@ async function makeTempDir(): Promise { return dir; } +async function makeTempFile( + fileName: string, + contents: string, + dir?: string, +): Promise<{ dir: string; filePath: string }> { + const resolvedDir = dir ?? (await makeTempDir()); + const filePath = path.join(resolvedDir, fileName); + await fs.writeFile(filePath, contents, "utf8"); + return { dir: resolvedDir, filePath }; +} + +async function sendLocalMedia(params: { + cfg: OpenClawConfig; + mediaPath: string; + accountId?: string; +}) { + return sendBlueBubblesMedia({ + cfg: params.cfg, + to: "chat:123", + accountId: params.accountId, + mediaPath: params.mediaPath, + }); +} + +async function expectRejectedLocalMedia(params: { + cfg: OpenClawConfig; + mediaPath: string; + error: RegExp; + accountId?: string; +}) { + await expect( + sendLocalMedia({ + cfg: params.cfg, + mediaPath: params.mediaPath, + accountId: params.accountId, + }), + ).rejects.toThrow(params.error); + + expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled(); +} + +async function expectAllowedLocalMedia(params: { + cfg: OpenClawConfig; + mediaPath: string; + expectedAttachment: Record; + accountId?: string; + expectMimeDetection?: boolean; +}) { + const result = await sendLocalMedia({ + cfg: params.cfg, + mediaPath: params.mediaPath, + accountId: params.accountId, + }); + + expect(result).toEqual({ messageId: "msg-1" }); + expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1); + expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual( + expect.objectContaining(params.expectedAttachment), + ); + if (params.expectMimeDetection) { + expect(runtimeMocks.detectMime).toHaveBeenCalled(); + } +} + beforeEach(() => { const runtime = createMockRuntime(); runtimeMocks = runtime.mocks; @@ -110,57 +174,43 @@ describe("sendBlueBubblesMedia local-path hardening", () => { const outsideFile = path.join(outsideDir, "outside.txt"); await fs.writeFile(outsideFile, "not allowed", "utf8"); - await expect( - sendBlueBubblesMedia({ - cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), - to: "chat:123", - mediaPath: outsideFile, - }), - ).rejects.toThrow(/not under any configured mediaLocalRoots/i); - - expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled(); + await expectRejectedLocalMedia({ + cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), + mediaPath: outsideFile, + error: /not under any configured mediaLocalRoots/i, + }); }); it("allows local paths that are explicitly configured", async () => { - const allowedRoot = await makeTempDir(); - const allowedFile = path.join(allowedRoot, "allowed.txt"); - await fs.writeFile(allowedFile, "allowed", "utf8"); + const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile( + "allowed.txt", + "allowed", + ); - const result = await sendBlueBubblesMedia({ + await expectAllowedLocalMedia({ cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), - to: "chat:123", mediaPath: allowedFile, - }); - - expect(result).toEqual({ messageId: "msg-1" }); - expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1); - expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual( - expect.objectContaining({ + expectedAttachment: { filename: "allowed.txt", contentType: "text/plain", - }), - ); - expect(runtimeMocks.detectMime).toHaveBeenCalled(); + }, + expectMimeDetection: true, + }); }); it("allows file:// media paths and file:// local roots", async () => { - const allowedRoot = await makeTempDir(); - const allowedFile = path.join(allowedRoot, "allowed.txt"); - await fs.writeFile(allowedFile, "allowed", "utf8"); - - const result = await sendBlueBubblesMedia({ - cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }), - to: "chat:123", - mediaPath: pathToFileURL(allowedFile).toString(), - }); - - expect(result).toEqual({ messageId: "msg-1" }); - expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1); - expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual( - expect.objectContaining({ - filename: "allowed.txt", - }), + const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile( + "allowed.txt", + "allowed", ); + + await expectAllowedLocalMedia({ + cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }), + mediaPath: pathToFileURL(allowedFile).toString(), + expectedAttachment: { + filename: "allowed.txt", + }, + }); }); it("uses account-specific mediaLocalRoots over top-level roots", async () => { @@ -213,15 +263,11 @@ describe("sendBlueBubblesMedia local-path hardening", () => { return; } - await expect( - sendBlueBubblesMedia({ - cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), - to: "chat:123", - mediaPath: linkPath, - }), - ).rejects.toThrow(/not under any configured mediaLocalRoots/i); - - expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled(); + await expectRejectedLocalMedia({ + cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), + mediaPath: linkPath, + error: /not under any configured mediaLocalRoots/i, + }); }); it("rejects relative mediaLocalRoots entries", async () => { diff --git a/extensions/bluebubbles/src/monitor-normalize.test.ts b/extensions/bluebubbles/src/monitor-normalize.test.ts index 3e06302593c..62651279237 100644 --- a/extensions/bluebubbles/src/monitor-normalize.test.ts +++ b/extensions/bluebubbles/src/monitor-normalize.test.ts @@ -1,18 +1,24 @@ import { describe, expect, it } from "vitest"; import { normalizeWebhookMessage, normalizeWebhookReaction } from "./monitor-normalize.js"; +function createFallbackDmPayload(overrides: Record = {}) { + return { + guid: "msg-1", + isGroup: false, + isFromMe: false, + handle: null, + chatGuid: "iMessage;-;+15551234567", + ...overrides, + }; +} + describe("normalizeWebhookMessage", () => { it("falls back to DM chatGuid handle when sender handle is missing", () => { const result = normalizeWebhookMessage({ type: "new-message", - data: { - guid: "msg-1", + data: createFallbackDmPayload({ text: "hello", - isGroup: false, - isFromMe: false, - handle: null, - chatGuid: "iMessage;-;+15551234567", - }, + }), }); expect(result).not.toBeNull(); @@ -78,15 +84,11 @@ describe("normalizeWebhookReaction", () => { it("falls back to DM chatGuid handle when reaction sender handle is missing", () => { const result = normalizeWebhookReaction({ type: "updated-message", - data: { + data: createFallbackDmPayload({ guid: "msg-2", associatedMessageGuid: "p:0/msg-1", associatedMessageType: 2000, - isGroup: false, - isFromMe: false, - handle: null, - chatGuid: "iMessage;-;+15551234567", - }, + }), }); expect(result).not.toBeNull(); diff --git a/extensions/bluebubbles/src/monitor-normalize.ts b/extensions/bluebubbles/src/monitor-normalize.ts index 83454602d4c..085bd8923e1 100644 --- a/extensions/bluebubbles/src/monitor-normalize.ts +++ b/extensions/bluebubbles/src/monitor-normalize.ts @@ -582,6 +582,29 @@ export function parseTapbackText(params: { return null; } + const parseLeadingReactionAction = ( + prefix: "reacted" | "removed", + defaultAction: "added" | "removed", + ) => { + if (!lower.startsWith(prefix)) { + return null; + } + const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; + if (!emoji) { + return null; + } + const quotedText = extractQuotedTapbackText(trimmed); + if (params.requireQuoted && !quotedText) { + return null; + } + const fallback = trimmed.slice(prefix.length).trim(); + return { + emoji, + action: params.actionHint ?? defaultAction, + quotedText: quotedText ?? fallback, + }; + }; + for (const [pattern, { emoji, action }] of TAPBACK_TEXT_MAP) { if (lower.startsWith(pattern)) { // Extract quoted text if present (e.g., 'Loved "hello"' -> "hello") @@ -599,30 +622,14 @@ export function parseTapbackText(params: { } } - if (lower.startsWith("reacted")) { - const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; - if (!emoji) { - return null; - } - const quotedText = extractQuotedTapbackText(trimmed); - if (params.requireQuoted && !quotedText) { - return null; - } - const fallback = trimmed.slice("reacted".length).trim(); - return { emoji, action: params.actionHint ?? "added", quotedText: quotedText ?? fallback }; + const reacted = parseLeadingReactionAction("reacted", "added"); + if (reacted) { + return reacted; } - if (lower.startsWith("removed")) { - const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; - if (!emoji) { - return null; - } - const quotedText = extractQuotedTapbackText(trimmed); - if (params.requireQuoted && !quotedText) { - return null; - } - const fallback = trimmed.slice("removed".length).trim(); - return { emoji, action: params.actionHint ?? "removed", quotedText: quotedText ?? fallback }; + const removed = parseLeadingReactionAction("removed", "removed"); + if (removed) { + return removed; } return null; } diff --git a/extensions/bluebubbles/src/monitor.webhook-auth.test.ts b/extensions/bluebubbles/src/monitor.webhook-auth.test.ts index 7a6a29353bd..f6826ac510b 100644 --- a/extensions/bluebubbles/src/monitor.webhook-auth.test.ts +++ b/extensions/bluebubbles/src/monitor.webhook-auth.test.ts @@ -302,65 +302,102 @@ describe("BlueBubbles webhook monitor", () => { }; } - describe("webhook parsing + auth handling", () => { - it("rejects non-POST requests", async () => { - const account = createMockAccount(); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); + async function dispatchWebhook(req: IncomingMessage) { + const res = createMockResponse(); + const handled = await handleBlueBubblesWebhookRequest(req, res); + return { handled, res }; + } - unregister = registerBlueBubblesWebhookTarget({ + function createWebhookRequestForTest(params?: { + method?: string; + url?: string; + body?: unknown; + headers?: Record; + remoteAddress?: string; + }) { + const req = createMockRequest( + params?.method ?? "POST", + params?.url ?? "/bluebubbles-webhook", + params?.body ?? {}, + params?.headers, + ); + if (params?.remoteAddress) { + setRequestRemoteAddress(req, params.remoteAddress); + } + return req; + } + + function createHangingWebhookRequest(url = "/bluebubbles-webhook?password=test-password") { + const req = new EventEmitter() as IncomingMessage; + const destroyMock = vi.fn(); + req.method = "POST"; + req.url = url; + req.headers = {}; + req.destroy = destroyMock as unknown as IncomingMessage["destroy"]; + setRequestRemoteAddress(req, "127.0.0.1"); + return { req, destroyMock }; + } + + function registerWebhookTargets( + params: Array<{ + account: ResolvedBlueBubblesAccount; + statusSink?: (event: unknown) => void; + }>, + ) { + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const unregisterFns = params.map(({ account, statusSink }) => + registerBlueBubblesWebhookTarget({ account, config, runtime: { log: vi.fn(), error: vi.fn() }, core, path: "/bluebubbles-webhook", - }); + statusSink, + }), + ); - const req = createMockRequest("GET", "/bluebubbles-webhook", {}); - const res = createMockResponse(); + unregister = () => { + for (const unregisterFn of unregisterFns) { + unregisterFn(); + } + }; + } - const handled = await handleBlueBubblesWebhookRequest(req, res); + async function expectWebhookStatus( + req: IncomingMessage, + expectedStatus: number, + expectedBody?: string, + ) { + const { handled, res } = await dispatchWebhook(req); + expect(handled).toBe(true); + expect(res.statusCode).toBe(expectedStatus); + if (expectedBody !== undefined) { + expect(res.body).toBe(expectedBody); + } + return res; + } - expect(handled).toBe(true); - expect(res.statusCode).toBe(405); + describe("webhook parsing + auth handling", () => { + it("rejects non-POST requests", async () => { + setupWebhookTarget(); + const req = createWebhookRequestForTest({ method: "GET" }); + await expectWebhookStatus(req, 405); }); it("accepts POST requests with valid JSON payload", async () => { setupWebhookTarget(); const payload = createNewMessagePayload({ date: Date.now() }); - - const req = createMockRequest("POST", "/bluebubbles-webhook", payload); - const res = createMockResponse(); - - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("ok"); + const req = createWebhookRequestForTest({ body: payload }); + await expectWebhookStatus(req, 200, "ok"); }); it("rejects requests with invalid JSON", async () => { - const account = createMockAccount(); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); - - const req = createMockRequest("POST", "/bluebubbles-webhook", "invalid json {{"); - const res = createMockResponse(); - - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(400); + setupWebhookTarget(); + const req = createWebhookRequestForTest({ body: "invalid json {{" }); + await expectWebhookStatus(req, 400); }); it("accepts URL-encoded payload wrappers", async () => { @@ -369,42 +406,17 @@ describe("BlueBubbles webhook monitor", () => { const encodedBody = new URLSearchParams({ payload: JSON.stringify(payload), }).toString(); - - const req = createMockRequest("POST", "/bluebubbles-webhook", encodedBody); - const res = createMockResponse(); - - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("ok"); + const req = createWebhookRequestForTest({ body: encodedBody }); + await expectWebhookStatus(req, 200, "ok"); }); it("returns 408 when request body times out (Slow-Loris protection)", async () => { vi.useFakeTimers(); try { - const account = createMockAccount(); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); + setupWebhookTarget(); // Create a request that never sends data or ends (simulates slow-loris) - const req = new EventEmitter() as IncomingMessage; - req.method = "POST"; - req.url = "/bluebubbles-webhook?password=test-password"; - req.headers = {}; - (req as unknown as { socket: { remoteAddress: string } }).socket = { - remoteAddress: "127.0.0.1", - }; - req.destroy = vi.fn(); + const { req, destroyMock } = createHangingWebhookRequest(); const res = createMockResponse(); @@ -416,7 +428,7 @@ describe("BlueBubbles webhook monitor", () => { const handled = await handledPromise; expect(handled).toBe(true); expect(res.statusCode).toBe(408); - expect(req.destroy).toHaveBeenCalled(); + expect(destroyMock).toHaveBeenCalled(); } finally { vi.useRealTimers(); } @@ -424,140 +436,62 @@ describe("BlueBubbles webhook monitor", () => { it("rejects unauthorized requests before reading the body", async () => { const account = createMockAccount({ password: "secret-token" }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); - - const req = new EventEmitter() as IncomingMessage; - req.method = "POST"; - req.url = "/bluebubbles-webhook?password=wrong-token"; - req.headers = {}; + setupWebhookTarget({ account }); + const { req } = createHangingWebhookRequest("/bluebubbles-webhook?password=wrong-token"); const onSpy = vi.spyOn(req, "on"); - (req as unknown as { socket: { remoteAddress: string } }).socket = { - remoteAddress: "127.0.0.1", - }; - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + await expectWebhookStatus(req, 401); expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function)); }); it("authenticates via password query parameter", async () => { const account = createMockAccount({ password: "secret-token" }); - - // Mock non-localhost request - const req = createMockRequest( - "POST", - "/bluebubbles-webhook?password=secret-token", - createNewMessagePayload(), - ); - setRequestRemoteAddress(req, "192.168.1.100"); setupWebhookTarget({ account }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=secret-token", + body: createNewMessagePayload(), + remoteAddress: "192.168.1.100", + }); + await expectWebhookStatus(req, 200); }); it("authenticates via x-password header", async () => { const account = createMockAccount({ password: "secret-token" }); - - const req = createMockRequest( - "POST", - "/bluebubbles-webhook", - createNewMessagePayload(), - { "x-password": "secret-token" }, // pragma: allowlist secret - ); - setRequestRemoteAddress(req, "192.168.1.100"); setupWebhookTarget({ account }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); + const req = createWebhookRequestForTest({ + body: createNewMessagePayload(), + headers: { "x-password": "secret-token" }, // pragma: allowlist secret + remoteAddress: "192.168.1.100", + }); + await expectWebhookStatus(req, 200); }); it("rejects unauthorized requests with wrong password", async () => { const account = createMockAccount({ password: "secret-token" }); - const req = createMockRequest( - "POST", - "/bluebubbles-webhook?password=wrong-token", - createNewMessagePayload(), - ); - setRequestRemoteAddress(req, "192.168.1.100"); setupWebhookTarget({ account }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=wrong-token", + body: createNewMessagePayload(), + remoteAddress: "192.168.1.100", + }); + await expectWebhookStatus(req, 401); }); it("rejects ambiguous routing when multiple targets match the same password", async () => { const accountA = createMockAccount({ password: "secret-token" }); const accountB = createMockAccount({ password: "secret-token" }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - const sinkA = vi.fn(); const sinkB = vi.fn(); + registerWebhookTargets([ + { account: accountA, statusSink: sinkA }, + { account: accountB, statusSink: sinkB }, + ]); - const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }); - (req as unknown as { socket: { remoteAddress: string } }).socket = { + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=secret-token", + body: createNewMessagePayload(), remoteAddress: "192.168.1.100", - }; - - const unregisterA = registerBlueBubblesWebhookTarget({ - account: accountA, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkA, }); - const unregisterB = registerBlueBubblesWebhookTarget({ - account: accountB, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkB, - }); - unregister = () => { - unregisterA(); - unregisterB(); - }; - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + await expectWebhookStatus(req, 401); expect(sinkA).not.toHaveBeenCalled(); expect(sinkB).not.toHaveBeenCalled(); }); @@ -565,107 +499,38 @@ describe("BlueBubbles webhook monitor", () => { it("ignores targets without passwords when a password-authenticated target matches", async () => { const accountStrict = createMockAccount({ password: "secret-token" }); const accountWithoutPassword = createMockAccount({ password: undefined }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - const sinkStrict = vi.fn(); const sinkWithoutPassword = vi.fn(); + registerWebhookTargets([ + { account: accountStrict, statusSink: sinkStrict }, + { account: accountWithoutPassword, statusSink: sinkWithoutPassword }, + ]); - const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }); - (req as unknown as { socket: { remoteAddress: string } }).socket = { + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=secret-token", + body: createNewMessagePayload(), remoteAddress: "192.168.1.100", - }; - - const unregisterStrict = registerBlueBubblesWebhookTarget({ - account: accountStrict, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkStrict, }); - const unregisterNoPassword = registerBlueBubblesWebhookTarget({ - account: accountWithoutPassword, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkWithoutPassword, - }); - unregister = () => { - unregisterStrict(); - unregisterNoPassword(); - }; - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); + await expectWebhookStatus(req, 200); expect(sinkStrict).toHaveBeenCalledTimes(1); expect(sinkWithoutPassword).not.toHaveBeenCalled(); }); it("requires authentication for loopback requests when password is configured", async () => { const account = createMockAccount({ password: "secret-token" }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); + setupWebhookTarget({ account }); for (const remoteAddress of ["127.0.0.1", "::1", "::ffff:127.0.0.1"]) { - const req = createMockRequest("POST", "/bluebubbles-webhook", { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }); - (req as unknown as { socket: { remoteAddress: string } }).socket = { + const req = createWebhookRequestForTest({ + body: createNewMessagePayload(), remoteAddress, - }; - - const loopbackUnregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); - - loopbackUnregister(); + await expectWebhookStatus(req, 401); } }); it("rejects targets without passwords for loopback and proxied-looking requests", async () => { const account = createMockAccount({ password: undefined }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); + setupWebhookTarget({ account }); const headerVariants: Record[] = [ { host: "localhost" }, @@ -673,28 +538,12 @@ describe("BlueBubbles webhook monitor", () => { { host: "localhost", forwarded: "for=203.0.113.10;proto=https;host=example.com" }, ]; for (const headers of headerVariants) { - const req = createMockRequest( - "POST", - "/bluebubbles-webhook", - { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }, + const req = createWebhookRequestForTest({ + body: createNewMessagePayload(), headers, - ); - (req as unknown as { socket: { remoteAddress: string } }).socket = { remoteAddress: "127.0.0.1", - }; - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + }); + await expectWebhookStatus(req, 401); } }); diff --git a/extensions/bluebubbles/src/multipart.ts b/extensions/bluebubbles/src/multipart.ts index 851cca016b7..e7c840745bb 100644 --- a/extensions/bluebubbles/src/multipart.ts +++ b/extensions/bluebubbles/src/multipart.ts @@ -30,3 +30,11 @@ export async function postMultipartFormData(params: { params.timeoutMs, ); } + +export async function assertMultipartActionOk(response: Response, action: string): Promise { + if (response.ok) { + return; + } + const errorText = await response.text().catch(() => ""); + throw new Error(`BlueBubbles ${action} failed (${response.status}): ${errorText || "unknown"}`); +} diff --git a/extensions/bluebubbles/src/reactions.test.ts b/extensions/bluebubbles/src/reactions.test.ts index 419ccc81e45..0b55337b35c 100644 --- a/extensions/bluebubbles/src/reactions.test.ts +++ b/extensions/bluebubbles/src/reactions.test.ts @@ -19,7 +19,7 @@ describe("reactions", () => { }); describe("sendBlueBubblesReaction", () => { - async function expectRemovedReaction(emoji: string) { + async function expectRemovedReaction(emoji: string, expectedReaction = "-love") { mockFetch.mockResolvedValueOnce({ ok: true, text: () => Promise.resolve(""), @@ -37,7 +37,7 @@ describe("reactions", () => { }); const body = JSON.parse(mockFetch.mock.calls[0][1].body); - expect(body.reaction).toBe("-love"); + expect(body.reaction).toBe(expectedReaction); } it("throws when chatGuid is empty", async () => { @@ -327,45 +327,11 @@ describe("reactions", () => { describe("reaction removal aliases", () => { it("handles emoji-based removal", async () => { - mockFetch.mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); - - await sendBlueBubblesReaction({ - chatGuid: "chat-123", - messageGuid: "msg-123", - emoji: "👍", - remove: true, - opts: { - serverUrl: "http://localhost:1234", - password: "test", - }, - }); - - const body = JSON.parse(mockFetch.mock.calls[0][1].body); - expect(body.reaction).toBe("-like"); + await expectRemovedReaction("👍", "-like"); }); it("handles text alias removal", async () => { - mockFetch.mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); - - await sendBlueBubblesReaction({ - chatGuid: "chat-123", - messageGuid: "msg-123", - emoji: "haha", - remove: true, - opts: { - serverUrl: "http://localhost:1234", - password: "test", - }, - }); - - const body = JSON.parse(mockFetch.mock.calls[0][1].body); - expect(body.reaction).toBe("-laugh"); + await expectRemovedReaction("haha", "-laugh"); }); }); }); diff --git a/extensions/device-pair/index.ts b/extensions/device-pair/index.ts index 825d1668ac0..7ba88842a7a 100644 --- a/extensions/device-pair/index.ts +++ b/extensions/device-pair/index.ts @@ -108,13 +108,21 @@ function resolveScheme( return cfg.gateway?.tls?.enabled === true ? "wss" : "ws"; } -function isPrivateIPv4(address: string): boolean { +function parseIPv4Octets(address: string): [number, number, number, number] | null { const parts = address.split("."); - if (parts.length != 4) { - return false; + if (parts.length !== 4) { + return null; } const octets = parts.map((part) => Number.parseInt(part, 10)); if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) { + return null; + } + return octets as [number, number, number, number]; +} + +function isPrivateIPv4(address: string): boolean { + const octets = parseIPv4Octets(address); + if (!octets) { return false; } const [a, b] = octets; @@ -131,12 +139,8 @@ function isPrivateIPv4(address: string): boolean { } function isTailnetIPv4(address: string): boolean { - const parts = address.split("."); - if (parts.length !== 4) { - return false; - } - const octets = parts.map((part) => Number.parseInt(part, 10)); - if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) { + const octets = parseIPv4Octets(address); + if (!octets) { return false; } const [a, b] = octets; diff --git a/extensions/diffs/index.test.ts b/extensions/diffs/index.test.ts index df0a0a79192..c38da12bfcd 100644 --- a/extensions/diffs/index.test.ts +++ b/extensions/diffs/index.test.ts @@ -1,6 +1,8 @@ import type { IncomingMessage } from "node:http"; +import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs"; import { describe, expect, it, vi } from "vitest"; import { createMockServerResponse } from "../../src/test-utils/mock-http-response.js"; +import { createTestPluginApi } from "../test-utils/plugin-api.js"; import plugin from "./index.js"; describe("diffs plugin registration", () => { @@ -9,33 +11,19 @@ describe("diffs plugin registration", () => { const registerHttpRoute = vi.fn(); const on = vi.fn(); - plugin.register?.({ - id: "diffs", - name: "Diffs", - description: "Diffs", - source: "test", - config: {}, - runtime: {} as never, - logger: { - info() {}, - warn() {}, - error() {}, - }, - registerTool, - registerHook() {}, - registerHttpRoute, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerCommand() {}, - registerContextEngine() {}, - resolvePath(input: string) { - return input; - }, - on, - }); + plugin.register?.( + createTestPluginApi({ + id: "diffs", + name: "Diffs", + description: "Diffs", + source: "test", + config: {}, + runtime: {} as never, + registerTool, + registerHttpRoute, + on, + }), + ); expect(registerTool).toHaveBeenCalledTimes(1); expect(registerHttpRoute).toHaveBeenCalledTimes(1); @@ -55,17 +43,15 @@ describe("diffs plugin registration", () => { }); it("applies plugin-config defaults through registered tool and viewer handler", async () => { - let registeredTool: - | { execute?: (toolCallId: string, params: Record) => Promise } - | undefined; - let registeredHttpRouteHandler: - | (( - req: IncomingMessage, - res: ReturnType, - ) => Promise) - | undefined; + type RegisteredTool = { + execute?: (toolCallId: string, params: Record) => Promise; + }; + type RegisteredHttpRouteParams = Parameters[0]; - plugin.register?.({ + let registeredTool: RegisteredTool | undefined; + let registeredHttpRouteHandler: RegisteredHttpRouteParams["handler"] | undefined; + + const api = createTestPluginApi({ id: "diffs", name: "Diffs", description: "Diffs", @@ -88,31 +74,16 @@ describe("diffs plugin registration", () => { }, }, runtime: {} as never, - logger: { - info() {}, - warn() {}, - error() {}, - }, - registerTool(tool) { + registerTool(tool: Parameters[0]) { registeredTool = typeof tool === "function" ? undefined : tool; }, - registerHook() {}, - registerHttpRoute(params) { - registeredHttpRouteHandler = params.handler as typeof registeredHttpRouteHandler; + registerHttpRoute(params: RegisteredHttpRouteParams) { + registeredHttpRouteHandler = params.handler; }, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerCommand() {}, - registerContextEngine() {}, - resolvePath(input: string) { - return input; - }, - on() {}, }); + plugin.register?.(api as unknown as OpenClawPluginApi); + const result = await registeredTool?.execute?.("tool-1", { before: "one\n", after: "two\n", diff --git a/extensions/diffs/package.json b/extensions/diffs/package.json index bb5f232517a..391a6893173 100644 --- a/extensions/diffs/package.json +++ b/extensions/diffs/package.json @@ -8,7 +8,7 @@ "build:viewer": "bun build src/viewer-client.ts --target browser --format esm --minify --outfile assets/viewer-runtime.js" }, "dependencies": { - "@pierre/diffs": "1.0.11", + "@pierre/diffs": "1.1.0", "@sinclair/typebox": "0.34.48", "playwright-core": "1.58.2" }, diff --git a/extensions/diffs/src/http.test.ts b/extensions/diffs/src/http.test.ts index 43216580379..a1caef018e4 100644 --- a/extensions/diffs/src/http.test.ts +++ b/extensions/diffs/src/http.test.ts @@ -9,6 +9,19 @@ describe("createDiffsHttpHandler", () => { let store: DiffArtifactStore; let cleanupRootDir: () => Promise; + async function handleLocalGet(url: string) { + const handler = createDiffsHttpHandler({ store }); + const res = createMockServerResponse(); + const handled = await handler( + localReq({ + method: "GET", + url, + }), + res, + ); + return { handled, res }; + } + beforeEach(async () => { ({ store, cleanup: cleanupRootDir } = await createDiffStoreHarness("openclaw-diffs-http-")); }); @@ -19,16 +32,7 @@ describe("createDiffsHttpHandler", () => { it("serves a stored diff document", async () => { const artifact = await createViewerArtifact(store); - - const handler = createDiffsHttpHandler({ store }); - const res = createMockServerResponse(); - const handled = await handler( - localReq({ - method: "GET", - url: artifact.viewerPath, - }), - res, - ); + const { handled, res } = await handleLocalGet(artifact.viewerPath); expect(handled).toBe(true); expect(res.statusCode).toBe(200); @@ -38,15 +42,8 @@ describe("createDiffsHttpHandler", () => { it("rejects invalid tokens", async () => { const artifact = await createViewerArtifact(store); - - const handler = createDiffsHttpHandler({ store }); - const res = createMockServerResponse(); - const handled = await handler( - localReq({ - method: "GET", - url: artifact.viewerPath.replace(artifact.token, "bad-token"), - }), - res, + const { handled, res } = await handleLocalGet( + artifact.viewerPath.replace(artifact.token, "bad-token"), ); expect(handled).toBe(true); diff --git a/extensions/diffs/src/render.ts b/extensions/diffs/src/render.ts index fb3d089c90a..ce01091eea6 100644 --- a/extensions/diffs/src/render.ts +++ b/extensions/diffs/src/render.ts @@ -1,5 +1,12 @@ -import type { FileContents, FileDiffMetadata, SupportedLanguages } from "@pierre/diffs"; -import { parsePatchFiles } from "@pierre/diffs"; +import fs from "node:fs/promises"; +import { createRequire } from "node:module"; +import type { + FileContents, + FileDiffMetadata, + SupportedLanguages, + ThemeRegistrationResolved, +} from "@pierre/diffs"; +import { RegisteredCustomThemes, parsePatchFiles } from "@pierre/diffs"; import { preloadFileDiff, preloadMultiFileDiff } from "@pierre/diffs/ssr"; import type { DiffInput, @@ -13,6 +20,45 @@ import { VIEWER_LOADER_PATH } from "./viewer-assets.js"; const DEFAULT_FILE_NAME = "diff.txt"; const MAX_PATCH_FILE_COUNT = 128; const MAX_PATCH_TOTAL_LINES = 120_000; +const diffsRequire = createRequire(import.meta.resolve("@pierre/diffs")); + +let pierreThemesPatched = false; + +function createThemeLoader( + themeName: "pierre-dark" | "pierre-light", + themePath: string, +): () => Promise { + let cachedTheme: ThemeRegistrationResolved | undefined; + return async () => { + if (cachedTheme) { + return cachedTheme; + } + const raw = await fs.readFile(themePath, "utf8"); + const parsed = JSON.parse(raw) as Record; + cachedTheme = { + ...parsed, + name: themeName, + } as ThemeRegistrationResolved; + return cachedTheme; + }; +} + +function patchPierreThemeLoadersForNode24(): void { + if (pierreThemesPatched) { + return; + } + try { + const darkThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-dark.json"); + const lightThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-light.json"); + RegisteredCustomThemes.set("pierre-dark", createThemeLoader("pierre-dark", darkThemePath)); + RegisteredCustomThemes.set("pierre-light", createThemeLoader("pierre-light", lightThemePath)); + pierreThemesPatched = true; + } catch { + // Keep upstream loaders if theme files cannot be resolved. + } +} + +patchPierreThemeLoadersForNode24(); function escapeCssString(value: string): string { return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"'); diff --git a/extensions/diffs/src/tool.test.ts b/extensions/diffs/src/tool.test.ts index 416bdf8dc14..056b10c0643 100644 --- a/extensions/diffs/src/tool.test.ts +++ b/extensions/diffs/src/tool.test.ts @@ -2,6 +2,7 @@ import fs from "node:fs/promises"; import path from "node:path"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { createTestPluginApi } from "../../test-utils/plugin-api.js"; import type { DiffScreenshotter } from "./browser.js"; import { DEFAULT_DIFFS_TOOL_DEFAULTS } from "./config.js"; import { DiffArtifactStore } from "./store.js"; @@ -135,9 +136,7 @@ describe("diffs tool", () => { mode: "file", }); - expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); - expect((result?.details as Record).mode).toBe("file"); - expect((result?.details as Record).viewerUrl).toBeUndefined(); + expectArtifactOnlyFileResult(screenshotter, result); }); it("honors ttlSeconds for artifact-only file output", async () => { @@ -227,9 +226,7 @@ describe("diffs tool", () => { after: "two\n", }); - expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); - expect((result?.details as Record).mode).toBe("file"); - expect((result?.details as Record).viewerUrl).toBeUndefined(); + expectArtifactOnlyFileResult(screenshotter, result); }); it("falls back to view output when both mode cannot render an image", async () => { @@ -387,7 +384,7 @@ describe("diffs tool", () => { }); function createApi(): OpenClawPluginApi { - return { + return createTestPluginApi({ id: "diffs", name: "Diffs", description: "Diffs", @@ -399,26 +396,7 @@ function createApi(): OpenClawPluginApi { }, }, runtime: {} as OpenClawPluginApi["runtime"], - logger: { - info() {}, - warn() {}, - error() {}, - }, - registerTool() {}, - registerHook() {}, - registerHttpRoute() {}, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerCommand() {}, - registerContextEngine() {}, - resolvePath(input: string) { - return input; - }, - on() {}, - }; + }) as OpenClawPluginApi; } function createToolWithScreenshotter( @@ -434,6 +412,15 @@ function createToolWithScreenshotter( }); } +function expectArtifactOnlyFileResult( + screenshotter: DiffScreenshotter, + result: { details?: unknown } | null | undefined, +) { + expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); + expect((result?.details as Record).mode).toBe("file"); + expect((result?.details as Record).viewerUrl).toBeUndefined(); +} + function createPngScreenshotter( params: { assertHtml?: (html: string) => void; diff --git a/extensions/discord/src/subagent-hooks.test.ts b/extensions/discord/src/subagent-hooks.test.ts index d58f07c1314..6d5824f69ae 100644 --- a/extensions/discord/src/subagent-hooks.test.ts +++ b/extensions/discord/src/subagent-hooks.test.ts @@ -75,6 +75,27 @@ function getRequiredHandler( return handler; } +function resolveSubagentDeliveryTargetForTest(requesterOrigin: { + channel: string; + accountId: string; + to: string; + threadId?: string; +}) { + const handlers = registerHandlersForTest(); + const handler = getRequiredHandler(handlers, "subagent_delivery_target"); + return handler( + { + childSessionKey: "agent:main:subagent:child", + requesterSessionKey: "agent:main:main", + requesterOrigin, + childRunId: "run-1", + spawnMode: "session", + expectsCompletionMessage: true, + }, + {}, + ); +} + function createSpawnEvent(overrides?: { childSessionKey?: string; agentId?: string; @@ -324,25 +345,12 @@ describe("discord subagent hook handlers", () => { hookMocks.listThreadBindingsBySessionKey.mockReturnValueOnce([ { accountId: "work", threadId: "777" }, ]); - const handlers = registerHandlersForTest(); - const handler = getRequiredHandler(handlers, "subagent_delivery_target"); - - const result = handler( - { - childSessionKey: "agent:main:subagent:child", - requesterSessionKey: "agent:main:main", - requesterOrigin: { - channel: "discord", - accountId: "work", - to: "channel:123", - threadId: "777", - }, - childRunId: "run-1", - spawnMode: "session", - expectsCompletionMessage: true, - }, - {}, - ); + const result = resolveSubagentDeliveryTargetForTest({ + channel: "discord", + accountId: "work", + to: "channel:123", + threadId: "777", + }); expect(hookMocks.listThreadBindingsBySessionKey).toHaveBeenCalledWith({ targetSessionKey: "agent:main:subagent:child", @@ -364,24 +372,11 @@ describe("discord subagent hook handlers", () => { { accountId: "work", threadId: "777" }, { accountId: "work", threadId: "888" }, ]); - const handlers = registerHandlersForTest(); - const handler = getRequiredHandler(handlers, "subagent_delivery_target"); - - const result = handler( - { - childSessionKey: "agent:main:subagent:child", - requesterSessionKey: "agent:main:main", - requesterOrigin: { - channel: "discord", - accountId: "work", - to: "channel:123", - }, - childRunId: "run-1", - spawnMode: "session", - expectsCompletionMessage: true, - }, - {}, - ); + const result = resolveSubagentDeliveryTargetForTest({ + channel: "discord", + accountId: "work", + to: "channel:123", + }); expect(result).toBeUndefined(); }); diff --git a/extensions/feishu/src/accounts.test.ts b/extensions/feishu/src/accounts.test.ts index 56783bbd29d..cfe8d0abcdc 100644 --- a/extensions/feishu/src/accounts.test.ts +++ b/extensions/feishu/src/accounts.test.ts @@ -9,6 +9,23 @@ import type { FeishuConfig } from "./types.js"; const asConfig = (value: Partial) => value as FeishuConfig; +function makeDefaultAndRouterAccounts() { + return { + default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret + "router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret + }; +} + +function expectExplicitDefaultAccountSelection( + account: ReturnType, + appId: string, +) { + expect(account.accountId).toBe("router-d"); + expect(account.selectionSource).toBe("explicit-default"); + expect(account.configured).toBe(true); + expect(account.appId).toBe(appId); +} + function withEnvVar(key: string, value: string | undefined, run: () => void) { const prev = process.env[key]; if (value === undefined) { @@ -44,10 +61,7 @@ describe("resolveDefaultFeishuAccountId", () => { channels: { feishu: { defaultAccount: "router-d", - accounts: { - default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret - "router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret - }, + accounts: makeDefaultAndRouterAccounts(), }, }, }; @@ -278,10 +292,7 @@ describe("resolveFeishuAccount", () => { }; const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined }); - expect(account.accountId).toBe("router-d"); - expect(account.selectionSource).toBe("explicit-default"); - expect(account.configured).toBe(true); - expect(account.appId).toBe("top_level_app"); + expectExplicitDefaultAccountSelection(account, "top_level_app"); }); it("uses configured default account when accountId is omitted", () => { @@ -298,10 +309,7 @@ describe("resolveFeishuAccount", () => { }; const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined }); - expect(account.accountId).toBe("router-d"); - expect(account.selectionSource).toBe("explicit-default"); - expect(account.configured).toBe(true); - expect(account.appId).toBe("cli_router"); + expectExplicitDefaultAccountSelection(account, "cli_router"); }); it("keeps explicit accountId selection", () => { @@ -309,10 +317,7 @@ describe("resolveFeishuAccount", () => { channels: { feishu: { defaultAccount: "router-d", - accounts: { - default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret - "router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret - }, + accounts: makeDefaultAndRouterAccounts(), }, }, }; diff --git a/extensions/feishu/src/bot.ts b/extensions/feishu/src/bot.ts index 13a130b3d79..815f935ed94 100644 --- a/extensions/feishu/src/bot.ts +++ b/extensions/feishu/src/bot.ts @@ -15,7 +15,7 @@ import { } from "openclaw/plugin-sdk/feishu"; import { resolveFeishuAccount } from "./accounts.js"; import { createFeishuClient } from "./client.js"; -import { tryRecordMessage, tryRecordMessagePersistent } from "./dedup.js"; +import { finalizeFeishuMessageProcessing, tryRecordMessagePersistent } from "./dedup.js"; import { maybeCreateDynamicAgent } from "./dynamic-agent.js"; import { normalizeFeishuExternalKey } from "./external-keys.js"; import { downloadMessageResourceFeishu } from "./media.js"; @@ -867,8 +867,18 @@ export async function handleFeishuMessage(params: { runtime?: RuntimeEnv; chatHistories?: Map; accountId?: string; + processingClaimHeld?: boolean; }): Promise { - const { cfg, event, botOpenId, botName, runtime, chatHistories, accountId } = params; + const { + cfg, + event, + botOpenId, + botName, + runtime, + chatHistories, + accountId, + processingClaimHeld = false, + } = params; // Resolve account with merged config const account = resolveFeishuAccount({ cfg, accountId }); @@ -877,16 +887,15 @@ export async function handleFeishuMessage(params: { const log = runtime?.log ?? console.log; const error = runtime?.error ?? console.error; - // Dedup: synchronous memory guard prevents concurrent duplicate dispatch - // before the async persistent check completes. const messageId = event.message.message_id; - const memoryDedupeKey = `${account.accountId}:${messageId}`; - if (!tryRecordMessage(memoryDedupeKey)) { - log(`feishu: skipping duplicate message ${messageId} (memory dedup)`); - return; - } - // Persistent dedup survives restarts and reconnects. - if (!(await tryRecordMessagePersistent(messageId, account.accountId, log))) { + if ( + !(await finalizeFeishuMessageProcessing({ + messageId, + namespace: account.accountId, + log, + claimHeld: processingClaimHeld, + })) + ) { log(`feishu: skipping duplicate message ${messageId}`); return; } diff --git a/extensions/feishu/src/config-schema.test.ts b/extensions/feishu/src/config-schema.test.ts index 0e0881c849f..aacbac85062 100644 --- a/extensions/feishu/src/config-schema.test.ts +++ b/extensions/feishu/src/config-schema.test.ts @@ -1,6 +1,16 @@ import { describe, expect, it } from "vitest"; import { FeishuConfigSchema, FeishuGroupSchema } from "./config-schema.js"; +function expectSchemaIssue( + result: ReturnType, + issuePath: string, +) { + expect(result.success).toBe(false); + if (!result.success) { + expect(result.error.issues.some((issue) => issue.path.join(".") === issuePath)).toBe(true); + } +} + describe("FeishuConfigSchema webhook validation", () => { it("applies top-level defaults", () => { const result = FeishuConfigSchema.parse({}); @@ -39,12 +49,7 @@ describe("FeishuConfigSchema webhook validation", () => { appSecret: "secret_top", // pragma: allowlist secret }); - expect(result.success).toBe(false); - if (!result.success) { - expect( - result.error.issues.some((issue) => issue.path.join(".") === "verificationToken"), - ).toBe(true); - } + expectSchemaIssue(result, "verificationToken"); }); it("rejects top-level webhook mode without encryptKey", () => { @@ -55,10 +60,7 @@ describe("FeishuConfigSchema webhook validation", () => { appSecret: "secret_top", // pragma: allowlist secret }); - expect(result.success).toBe(false); - if (!result.success) { - expect(result.error.issues.some((issue) => issue.path.join(".") === "encryptKey")).toBe(true); - } + expectSchemaIssue(result, "encryptKey"); }); it("accepts top-level webhook mode with verificationToken and encryptKey", () => { @@ -84,14 +86,7 @@ describe("FeishuConfigSchema webhook validation", () => { }, }); - expect(result.success).toBe(false); - if (!result.success) { - expect( - result.error.issues.some( - (issue) => issue.path.join(".") === "accounts.main.verificationToken", - ), - ).toBe(true); - } + expectSchemaIssue(result, "accounts.main.verificationToken"); }); it("rejects account webhook mode without encryptKey", () => { @@ -106,12 +101,7 @@ describe("FeishuConfigSchema webhook validation", () => { }, }); - expect(result.success).toBe(false); - if (!result.success) { - expect( - result.error.issues.some((issue) => issue.path.join(".") === "accounts.main.encryptKey"), - ).toBe(true); - } + expectSchemaIssue(result, "accounts.main.encryptKey"); }); it("accepts account webhook mode inheriting top-level verificationToken and encryptKey", () => { diff --git a/extensions/feishu/src/dedup.ts b/extensions/feishu/src/dedup.ts index 35f95d5c76b..fc3e9baad65 100644 --- a/extensions/feishu/src/dedup.ts +++ b/extensions/feishu/src/dedup.ts @@ -10,9 +10,15 @@ import { const DEDUP_TTL_MS = 24 * 60 * 60 * 1000; const MEMORY_MAX_SIZE = 1_000; const FILE_MAX_ENTRIES = 10_000; +const EVENT_DEDUP_TTL_MS = 5 * 60 * 1000; +const EVENT_MEMORY_MAX_SIZE = 2_000; type PersistentDedupeData = Record; const memoryDedupe = createDedupeCache({ ttlMs: DEDUP_TTL_MS, maxSize: MEMORY_MAX_SIZE }); +const processingClaims = createDedupeCache({ + ttlMs: EVENT_DEDUP_TTL_MS, + maxSize: EVENT_MEMORY_MAX_SIZE, +}); function resolveStateDirFromEnv(env: NodeJS.ProcessEnv = process.env): string { const stateOverride = env.OPENCLAW_STATE_DIR?.trim() || env.CLAWDBOT_STATE_DIR?.trim(); @@ -37,6 +43,103 @@ const persistentDedupe = createPersistentDedupe({ resolveFilePath: resolveNamespaceFilePath, }); +function resolveEventDedupeKey( + namespace: string, + messageId: string | undefined | null, +): string | null { + const trimmed = messageId?.trim(); + if (!trimmed) { + return null; + } + return `${namespace}:${trimmed}`; +} + +function normalizeMessageId(messageId: string | undefined | null): string | null { + const trimmed = messageId?.trim(); + return trimmed ? trimmed : null; +} + +function resolveMemoryDedupeKey( + namespace: string, + messageId: string | undefined | null, +): string | null { + const trimmed = normalizeMessageId(messageId); + if (!trimmed) { + return null; + } + return `${namespace}:${trimmed}`; +} + +export function tryBeginFeishuMessageProcessing( + messageId: string | undefined | null, + namespace = "global", +): boolean { + return !processingClaims.check(resolveEventDedupeKey(namespace, messageId)); +} + +export function releaseFeishuMessageProcessing( + messageId: string | undefined | null, + namespace = "global", +): void { + processingClaims.delete(resolveEventDedupeKey(namespace, messageId)); +} + +export async function finalizeFeishuMessageProcessing(params: { + messageId: string | undefined | null; + namespace?: string; + log?: (...args: unknown[]) => void; + claimHeld?: boolean; +}): Promise { + const { messageId, namespace = "global", log, claimHeld = false } = params; + const normalizedMessageId = normalizeMessageId(messageId); + const memoryKey = resolveMemoryDedupeKey(namespace, messageId); + if (!memoryKey || !normalizedMessageId) { + return false; + } + if (!claimHeld && !tryBeginFeishuMessageProcessing(normalizedMessageId, namespace)) { + return false; + } + if (!tryRecordMessage(memoryKey)) { + releaseFeishuMessageProcessing(normalizedMessageId, namespace); + return false; + } + if (!(await tryRecordMessagePersistent(normalizedMessageId, namespace, log))) { + releaseFeishuMessageProcessing(normalizedMessageId, namespace); + return false; + } + return true; +} + +export async function recordProcessedFeishuMessage( + messageId: string | undefined | null, + namespace = "global", + log?: (...args: unknown[]) => void, +): Promise { + const normalizedMessageId = normalizeMessageId(messageId); + const memoryKey = resolveMemoryDedupeKey(namespace, messageId); + if (!memoryKey || !normalizedMessageId) { + return false; + } + tryRecordMessage(memoryKey); + return await tryRecordMessagePersistent(normalizedMessageId, namespace, log); +} + +export async function hasProcessedFeishuMessage( + messageId: string | undefined | null, + namespace = "global", + log?: (...args: unknown[]) => void, +): Promise { + const normalizedMessageId = normalizeMessageId(messageId); + const memoryKey = resolveMemoryDedupeKey(namespace, messageId); + if (!memoryKey || !normalizedMessageId) { + return false; + } + if (hasRecordedMessage(memoryKey)) { + return true; + } + return hasRecordedMessagePersistent(normalizedMessageId, namespace, log); +} + /** * Synchronous dedup — memory only. * Kept for backward compatibility; prefer {@link tryRecordMessagePersistent}. diff --git a/extensions/feishu/src/media.test.ts b/extensions/feishu/src/media.test.ts index 813e5090292..80555c294ae 100644 --- a/extensions/feishu/src/media.test.ts +++ b/extensions/feishu/src/media.test.ts @@ -64,18 +64,21 @@ function expectMediaTimeoutClientConfigured(): void { ); } +function mockResolvedFeishuAccount() { + resolveFeishuAccountMock.mockReturnValue({ + configured: true, + accountId: "main", + config: {}, + appId: "app_id", + appSecret: "app_secret", + domain: "feishu", + }); +} + describe("sendMediaFeishu msg_type routing", () => { beforeEach(() => { vi.clearAllMocks(); - - resolveFeishuAccountMock.mockReturnValue({ - configured: true, - accountId: "main", - config: {}, - appId: "app_id", - appSecret: "app_secret", - domain: "feishu", - }); + mockResolvedFeishuAccount(); normalizeFeishuTargetMock.mockReturnValue("ou_target"); resolveReceiveIdTypeMock.mockReturnValue("open_id"); @@ -381,7 +384,7 @@ describe("sendMediaFeishu msg_type routing", () => { expect(messageResourceGetMock).not.toHaveBeenCalled(); }); - it("encodes Chinese filenames for file uploads", async () => { + it("preserves Chinese filenames for file uploads", async () => { await sendMediaFeishu({ cfg: {} as any, to: "user:ou_target", @@ -390,8 +393,7 @@ describe("sendMediaFeishu msg_type routing", () => { }); const createCall = fileCreateMock.mock.calls[0][0]; - expect(createCall.data.file_name).not.toBe("测试文档.pdf"); - expect(createCall.data.file_name).toBe(encodeURIComponent("测试文档") + ".pdf"); + expect(createCall.data.file_name).toBe("测试文档.pdf"); }); it("preserves ASCII filenames unchanged for file uploads", async () => { @@ -406,7 +408,7 @@ describe("sendMediaFeishu msg_type routing", () => { expect(createCall.data.file_name).toBe("report-2026.pdf"); }); - it("encodes special characters (em-dash, full-width brackets) in filenames", async () => { + it("preserves special Unicode characters (em-dash, full-width brackets) in filenames", async () => { await sendMediaFeishu({ cfg: {} as any, to: "user:ou_target", @@ -415,9 +417,7 @@ describe("sendMediaFeishu msg_type routing", () => { }); const createCall = fileCreateMock.mock.calls[0][0]; - expect(createCall.data.file_name).toMatch(/\.md$/); - expect(createCall.data.file_name).not.toContain("—"); - expect(createCall.data.file_name).not.toContain("("); + expect(createCall.data.file_name).toBe("报告—详情(2026).md"); }); }); @@ -427,71 +427,48 @@ describe("sanitizeFileNameForUpload", () => { expect(sanitizeFileNameForUpload("my-file_v2.txt")).toBe("my-file_v2.txt"); }); - it("encodes Chinese characters in basename, preserves extension", () => { - const result = sanitizeFileNameForUpload("测试文件.md"); - expect(result).toBe(encodeURIComponent("测试文件") + ".md"); - expect(result).toMatch(/\.md$/); + it("preserves Chinese characters", () => { + expect(sanitizeFileNameForUpload("测试文件.md")).toBe("测试文件.md"); + expect(sanitizeFileNameForUpload("武汉15座山登山信息汇总.csv")).toBe( + "武汉15座山登山信息汇总.csv", + ); }); - it("encodes em-dash and full-width brackets", () => { - const result = sanitizeFileNameForUpload("文件—说明(v2).pdf"); - expect(result).toMatch(/\.pdf$/); - expect(result).not.toContain("—"); - expect(result).not.toContain("("); - expect(result).not.toContain(")"); + it("preserves em-dash and full-width brackets", () => { + expect(sanitizeFileNameForUpload("文件—说明(v2).pdf")).toBe("文件—说明(v2).pdf"); }); - it("encodes single quotes and parentheses per RFC 5987", () => { - const result = sanitizeFileNameForUpload("文件'(test).txt"); - expect(result).toContain("%27"); - expect(result).toContain("%28"); - expect(result).toContain("%29"); - expect(result).toMatch(/\.txt$/); + it("preserves single quotes and parentheses", () => { + expect(sanitizeFileNameForUpload("文件'(test).txt")).toBe("文件'(test).txt"); }); - it("handles filenames without extension", () => { - const result = sanitizeFileNameForUpload("测试文件"); - expect(result).toBe(encodeURIComponent("测试文件")); + it("preserves filenames without extension", () => { + expect(sanitizeFileNameForUpload("测试文件")).toBe("测试文件"); }); - it("handles mixed ASCII and non-ASCII", () => { - const result = sanitizeFileNameForUpload("Report_报告_2026.xlsx"); - expect(result).toMatch(/\.xlsx$/); - expect(result).not.toContain("报告"); + it("preserves mixed ASCII and non-ASCII", () => { + expect(sanitizeFileNameForUpload("Report_报告_2026.xlsx")).toBe("Report_报告_2026.xlsx"); }); - it("encodes non-ASCII extensions", () => { - const result = sanitizeFileNameForUpload("报告.文档"); - expect(result).toContain("%E6%96%87%E6%A1%A3"); - expect(result).not.toContain("文档"); + it("preserves emoji filenames", () => { + expect(sanitizeFileNameForUpload("report_😀.txt")).toBe("report_😀.txt"); }); - it("encodes emoji filenames", () => { - const result = sanitizeFileNameForUpload("report_😀.txt"); - expect(result).toContain("%F0%9F%98%80"); - expect(result).toMatch(/\.txt$/); + it("strips control characters", () => { + expect(sanitizeFileNameForUpload("bad\x00file.txt")).toBe("bad_file.txt"); + expect(sanitizeFileNameForUpload("inject\r\nheader.txt")).toBe("inject__header.txt"); }); - it("encodes mixed ASCII and non-ASCII extensions", () => { - const result = sanitizeFileNameForUpload("notes_总结.v测试"); - expect(result).toContain("notes_"); - expect(result).toContain("%E6%B5%8B%E8%AF%95"); - expect(result).not.toContain("测试"); + it("strips quotes and backslashes to prevent header injection", () => { + expect(sanitizeFileNameForUpload('file"name.txt')).toBe("file_name.txt"); + expect(sanitizeFileNameForUpload("file\\name.txt")).toBe("file_name.txt"); }); }); describe("downloadMessageResourceFeishu", () => { beforeEach(() => { vi.clearAllMocks(); - - resolveFeishuAccountMock.mockReturnValue({ - configured: true, - accountId: "main", - config: {}, - appId: "app_id", - appSecret: "app_secret", - domain: "feishu", - }); + mockResolvedFeishuAccount(); createFeishuClientMock.mockReturnValue({ im: { diff --git a/extensions/feishu/src/media.ts b/extensions/feishu/src/media.ts index 41438c570f2..45596fe45ed 100644 --- a/extensions/feishu/src/media.ts +++ b/extensions/feishu/src/media.ts @@ -226,21 +226,17 @@ export async function uploadImageFeishu(params: { } /** - * Encode a filename for safe use in Feishu multipart/form-data uploads. - * Non-ASCII characters (Chinese, em-dash, full-width brackets, etc.) cause - * the upload to silently fail when passed raw through the SDK's form-data - * serialization. RFC 5987 percent-encoding keeps headers 7-bit clean while - * Feishu's server decodes and preserves the original display name. + * Sanitize a filename for safe use in Feishu multipart/form-data uploads. + * Strips control characters and multipart-injection vectors (CWE-93) while + * preserving the original UTF-8 display name (Chinese, emoji, etc.). + * + * Previous versions percent-encoded non-ASCII characters, but the Feishu + * `im.file.create` API uses `file_name` as a literal display name — it does + * NOT decode percent-encoding — so encoded filenames appeared as garbled text + * in chat (regression in v2026.3.2). */ export function sanitizeFileNameForUpload(fileName: string): string { - const ASCII_ONLY = /^[\x20-\x7E]+$/; - if (ASCII_ONLY.test(fileName)) { - return fileName; - } - return encodeURIComponent(fileName) - .replace(/'/g, "%27") - .replace(/\(/g, "%28") - .replace(/\)/g, "%29"); + return fileName.replace(/[\x00-\x1F\x7F\r\n"\\]/g, "_"); } /** diff --git a/extensions/feishu/src/monitor.account.ts b/extensions/feishu/src/monitor.account.ts index f7d40d8e280..3f3cad8ddc3 100644 --- a/extensions/feishu/src/monitor.account.ts +++ b/extensions/feishu/src/monitor.account.ts @@ -12,10 +12,10 @@ import { import { handleFeishuCardAction, type FeishuCardActionEvent } from "./card-action.js"; import { createEventDispatcher } from "./client.js"; import { - hasRecordedMessage, - hasRecordedMessagePersistent, - tryRecordMessage, - tryRecordMessagePersistent, + hasProcessedFeishuMessage, + recordProcessedFeishuMessage, + releaseFeishuMessageProcessing, + tryBeginFeishuMessageProcessing, warmupDedupFromDisk, } from "./dedup.js"; import { isMentionForwardRequest } from "./mention.js"; @@ -264,6 +264,7 @@ function registerEventHandlers( runtime, chatHistories, accountId, + processingClaimHeld: true, }); await enqueue(chatId, task); }; @@ -291,10 +292,8 @@ function registerEventHandlers( return; } for (const messageId of suppressedIds) { - // Keep in-memory dedupe in sync with handleFeishuMessage's keying. - tryRecordMessage(`${accountId}:${messageId}`); try { - await tryRecordMessagePersistent(messageId, accountId, log); + await recordProcessedFeishuMessage(messageId, accountId, log); } catch (err) { error( `feishu[${accountId}]: failed to record merged dedupe id ${messageId}: ${String(err)}`, @@ -303,15 +302,7 @@ function registerEventHandlers( } }; const isMessageAlreadyProcessed = async (entry: FeishuMessageEvent): Promise => { - const messageId = entry.message.message_id?.trim(); - if (!messageId) { - return false; - } - const memoryKey = `${accountId}:${messageId}`; - if (hasRecordedMessage(memoryKey)) { - return true; - } - return hasRecordedMessagePersistent(messageId, accountId, log); + return await hasProcessedFeishuMessage(entry.message.message_id, accountId, log); }; const inboundDebouncer = core.channel.debounce.createInboundDebouncer({ debounceMs: inboundDebounceMs, @@ -384,19 +375,28 @@ function registerEventHandlers( }, }); }, - onError: (err) => { + onError: (err, entries) => { + for (const entry of entries) { + releaseFeishuMessageProcessing(entry.message.message_id, accountId); + } error(`feishu[${accountId}]: inbound debounce flush failed: ${String(err)}`); }, }); eventDispatcher.register({ "im.message.receive_v1": async (data) => { + const event = data as unknown as FeishuMessageEvent; + const messageId = event.message?.message_id?.trim(); + if (!tryBeginFeishuMessageProcessing(messageId, accountId)) { + log(`feishu[${accountId}]: dropping duplicate event for message ${messageId}`); + return; + } const processMessage = async () => { - const event = data as unknown as FeishuMessageEvent; await inboundDebouncer.enqueue(event); }; if (fireAndForget) { void processMessage().catch((err) => { + releaseFeishuMessageProcessing(messageId, accountId); error(`feishu[${accountId}]: error handling message: ${String(err)}`); }); return; @@ -404,6 +404,7 @@ function registerEventHandlers( try { await processMessage(); } catch (err) { + releaseFeishuMessageProcessing(messageId, accountId); error(`feishu[${accountId}]: error handling message: ${String(err)}`); } }, diff --git a/extensions/feishu/src/monitor.reaction.test.ts b/extensions/feishu/src/monitor.reaction.test.ts index e17859d0531..49da928ea3b 100644 --- a/extensions/feishu/src/monitor.reaction.test.ts +++ b/extensions/feishu/src/monitor.reaction.test.ts @@ -78,6 +78,25 @@ async function resolveReactionWithLookup(params: { }); } +async function resolveNonBotReaction(params?: { cfg?: ClawdbotConfig; uuid?: () => string }) { + return await resolveReactionSyntheticEvent({ + cfg: params?.cfg ?? cfg, + accountId: "default", + event: makeReactionEvent(), + botOpenId: "ou_bot", + fetchMessage: async () => ({ + messageId: "om_msg1", + chatId: "oc_group", + chatType: "group", + senderOpenId: "ou_other", + senderType: "user", + content: "hello", + contentType: "text", + }), + ...(params?.uuid ? { uuid: params.uuid } : {}), + }); +} + type FeishuMention = NonNullable[number]; function buildDebounceConfig(): ClawdbotConfig { @@ -179,11 +198,23 @@ function getFirstDispatchedEvent(): FeishuMessageEvent { return firstParams.event; } +function expectSingleDispatchedEvent(): FeishuMessageEvent { + expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); + return getFirstDispatchedEvent(); +} + +function expectParsedFirstDispatchedEvent(botOpenId = "ou_bot") { + const dispatched = expectSingleDispatchedEvent(); + return { + dispatched, + parsed: parseFeishuMessageEvent(dispatched, botOpenId), + }; +} + function setDedupPassThroughMocks(): void { - vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false); } function createMention(params: { openId: string; name: string; key?: string }): FeishuMention { @@ -203,6 +234,12 @@ async function enqueueDebouncedMessage( await Promise.resolve(); } +function setStaleRetryMocks(messageId = "om_old") { + vi.spyOn(dedup, "hasProcessedFeishuMessage").mockImplementation( + async (currentMessageId) => currentMessageId === messageId, + ); +} + describe("resolveReactionSyntheticEvent", () => { it("filters app self-reactions", async () => { const event = makeReactionEvent({ operator_type: "app" }); @@ -262,28 +299,12 @@ describe("resolveReactionSyntheticEvent", () => { }); it("filters reactions on non-bot messages", async () => { - const event = makeReactionEvent(); - const result = await resolveReactionSyntheticEvent({ - cfg, - accountId: "default", - event, - botOpenId: "ou_bot", - fetchMessage: async () => ({ - messageId: "om_msg1", - chatId: "oc_group", - chatType: "group", - senderOpenId: "ou_other", - senderType: "user", - content: "hello", - contentType: "text", - }), - }); + const result = await resolveNonBotReaction(); expect(result).toBeNull(); }); it("allows non-bot reactions when reactionNotifications is all", async () => { - const event = makeReactionEvent(); - const result = await resolveReactionSyntheticEvent({ + const result = await resolveNonBotReaction({ cfg: { channels: { feishu: { @@ -291,18 +312,6 @@ describe("resolveReactionSyntheticEvent", () => { }, }, } as ClawdbotConfig, - accountId: "default", - event, - botOpenId: "ou_bot", - fetchMessage: async () => ({ - messageId: "om_msg1", - chatId: "oc_group", - chatType: "group", - senderOpenId: "ou_other", - senderType: "user", - content: "hello", - contentType: "text", - }), uuid: () => "fixed-uuid", }); expect(result?.message.message_id).toBe("om_msg1:reaction:THUMBSUP:fixed-uuid"); @@ -457,18 +466,16 @@ describe("Feishu inbound debounce regressions", () => { ); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); + const dispatched = expectSingleDispatchedEvent(); const mergedMentions = dispatched.message.mentions ?? []; expect(mergedMentions.some((mention) => mention.id.open_id === "ou_bot")).toBe(true); expect(mergedMentions.some((mention) => mention.id.open_id === "ou_user_a")).toBe(false); }); it("passes prefetched botName through to handleFeishuMessage", async () => { - vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false); const onMessage = await setupDebounceMonitor({ botName: "OpenClaw Bot" }); await onMessage( @@ -517,9 +524,7 @@ describe("Feishu inbound debounce regressions", () => { ); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); - const parsed = parseFeishuMessageEvent(dispatched, "ou_bot"); + const { dispatched, parsed } = expectParsedFirstDispatchedEvent(); expect(parsed.mentionedBot).toBe(true); expect(parsed.mentionTargets).toBeUndefined(); const mergedMentions = dispatched.message.mentions ?? []; @@ -547,19 +552,14 @@ describe("Feishu inbound debounce regressions", () => { ); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); - const parsed = parseFeishuMessageEvent(dispatched, "ou_bot"); + const { parsed } = expectParsedFirstDispatchedEvent(); expect(parsed.mentionedBot).toBe(true); }); it("excludes previously processed retries from combined debounce text", async () => { - vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old")); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation( - async (messageId) => messageId === "om_old", - ); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + setStaleRetryMocks(); const onMessage = await setupDebounceMonitor(); await onMessage(createTextEvent({ messageId: "om_old", text: "stale" })); @@ -576,20 +576,16 @@ describe("Feishu inbound debounce regressions", () => { await Promise.resolve(); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); + const dispatched = expectSingleDispatchedEvent(); expect(dispatched.message.message_id).toBe("om_new_2"); const combined = JSON.parse(dispatched.message.content) as { text?: string }; expect(combined.text).toBe("first\nsecond"); }); it("uses latest fresh message id when debounce batch ends with stale retry", async () => { - const recordSpy = vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old")); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation( - async (messageId) => messageId === "om_old", - ); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + const recordSpy = vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + setStaleRetryMocks(); const onMessage = await setupDebounceMonitor(); await onMessage(createTextEvent({ messageId: "om_new", text: "fresh" })); @@ -600,12 +596,58 @@ describe("Feishu inbound debounce regressions", () => { await Promise.resolve(); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); + const dispatched = expectSingleDispatchedEvent(); expect(dispatched.message.message_id).toBe("om_new"); const combined = JSON.parse(dispatched.message.content) as { text?: string }; expect(combined.text).toBe("fresh"); - expect(recordSpy).toHaveBeenCalledWith("default:om_old"); - expect(recordSpy).not.toHaveBeenCalledWith("default:om_new"); + expect(recordSpy).toHaveBeenCalledWith("om_old", "default", expect.any(Function)); + expect(recordSpy).not.toHaveBeenCalledWith("om_new", "default", expect.any(Function)); + }); + + it("releases early event dedupe when debounced dispatch fails", async () => { + setDedupPassThroughMocks(); + const enqueueMock = vi.fn(); + setFeishuRuntime( + createPluginRuntimeMock({ + channel: { + debounce: { + createInboundDebouncer: (params: { + onError?: (err: unknown, items: T[]) => void; + }) => ({ + enqueue: async (item: T) => { + enqueueMock(item); + params.onError?.(new Error("dispatch failed"), [item]); + }, + flushKey: async () => {}, + }), + resolveInboundDebounceMs, + }, + text: { + hasControlCommand, + }, + }, + }), + ); + const onMessage = await setupDebounceMonitor(); + const event = createTextEvent({ messageId: "om_retryable", text: "hello" }); + + await enqueueDebouncedMessage(onMessage, event); + expect(enqueueMock).toHaveBeenCalledTimes(1); + + await enqueueDebouncedMessage(onMessage, event); + expect(enqueueMock).toHaveBeenCalledTimes(2); + expect(handleFeishuMessageMock).not.toHaveBeenCalled(); + }); + + it("drops duplicate inbound events before they re-enter the debounce pipeline", async () => { + const onMessage = await setupDebounceMonitor(); + const event = createTextEvent({ messageId: "om_duplicate", text: "hello" }); + + await enqueueDebouncedMessage(onMessage, event); + await vi.advanceTimersByTimeAsync(25); + await enqueueDebouncedMessage(onMessage, event); + await vi.advanceTimersByTimeAsync(25); + + expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); }); }); diff --git a/extensions/feishu/src/monitor.startup.test.ts b/extensions/feishu/src/monitor.startup.test.ts index f5e19159f0a..96dbd52b8ef 100644 --- a/extensions/feishu/src/monitor.startup.test.ts +++ b/extensions/feishu/src/monitor.startup.test.ts @@ -3,33 +3,19 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js"; const probeFeishuMock = vi.hoisted(() => vi.fn()); -const feishuClientMockModule = vi.hoisted(() => ({ - createFeishuWSClient: vi.fn(() => ({ start: vi.fn() })), - createEventDispatcher: vi.fn(() => ({ register: vi.fn() })), -})); -const feishuRuntimeMockModule = vi.hoisted(() => ({ - getFeishuRuntime: () => ({ - channel: { - debounce: { - resolveInboundDebounceMs: () => 0, - createInboundDebouncer: () => ({ - enqueue: async () => {}, - flushKey: async () => {}, - }), - }, - text: { - hasControlCommand: () => false, - }, - }, - }), -})); vi.mock("./probe.js", () => ({ probeFeishu: probeFeishuMock, })); -vi.mock("./client.js", () => feishuClientMockModule); -vi.mock("./runtime.js", () => feishuRuntimeMockModule); +vi.mock("./client.js", async () => { + const { createFeishuClientMockModule } = await import("./monitor.test-mocks.js"); + return createFeishuClientMockModule(); +}); +vi.mock("./runtime.js", async () => { + const { createFeishuRuntimeMockModule } = await import("./monitor.test-mocks.js"); + return createFeishuRuntimeMockModule(); +}); function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig { return { @@ -52,6 +38,12 @@ function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig } as ClawdbotConfig; } +async function waitForStartedAccount(started: string[], accountId: string) { + for (let i = 0; i < 10 && !started.includes(accountId); i += 1) { + await Promise.resolve(); + } +} + afterEach(() => { stopFeishuMonitor(); }); @@ -116,10 +108,7 @@ describe("Feishu monitor startup preflight", () => { }); try { - for (let i = 0; i < 10 && !started.includes("beta"); i += 1) { - await Promise.resolve(); - } - + await waitForStartedAccount(started, "beta"); expect(started).toEqual(["alpha", "beta"]); expect(started.filter((accountId) => accountId === "alpha")).toHaveLength(1); } finally { @@ -153,10 +142,7 @@ describe("Feishu monitor startup preflight", () => { }); try { - for (let i = 0; i < 10 && !started.includes("beta"); i += 1) { - await Promise.resolve(); - } - + await waitForStartedAccount(started, "beta"); expect(started).toEqual(["alpha", "beta"]); expect(runtime.error).toHaveBeenCalledWith( expect.stringContaining("bot info probe timed out"), diff --git a/extensions/feishu/src/monitor.webhook-e2e.test.ts b/extensions/feishu/src/monitor.webhook-e2e.test.ts index 2e73f973408..a11957e3393 100644 --- a/extensions/feishu/src/monitor.webhook-e2e.test.ts +++ b/extensions/feishu/src/monitor.webhook-e2e.test.ts @@ -1,9 +1,7 @@ import crypto from "node:crypto"; -import { createServer } from "node:http"; -import type { AddressInfo } from "node:net"; -import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; import { afterEach, describe, expect, it, vi } from "vitest"; import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js"; +import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js"; const probeFeishuMock = vi.hoisted(() => vi.fn()); @@ -23,61 +21,6 @@ vi.mock("./runtime.js", () => createFeishuRuntimeMockModule()); import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js"; -async function getFreePort(): Promise { - const server = createServer(); - await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); - const address = server.address() as AddressInfo | null; - if (!address) { - throw new Error("missing server address"); - } - await new Promise((resolve) => server.close(() => resolve())); - return address.port; -} - -async function waitUntilServerReady(url: string): Promise { - for (let i = 0; i < 50; i += 1) { - try { - const response = await fetch(url, { method: "GET" }); - if (response.status >= 200 && response.status < 500) { - return; - } - } catch { - // retry - } - await new Promise((resolve) => setTimeout(resolve, 20)); - } - throw new Error(`server did not start: ${url}`); -} - -function buildConfig(params: { - accountId: string; - path: string; - port: number; - verificationToken?: string; - encryptKey?: string; -}): ClawdbotConfig { - return { - channels: { - feishu: { - enabled: true, - accounts: { - [params.accountId]: { - enabled: true, - appId: "cli_test", - appSecret: "secret_test", // pragma: allowlist secret - connectionMode: "webhook", - webhookHost: "127.0.0.1", - webhookPort: params.port, - webhookPath: params.path, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, - }, - }, - }, - }, - } as ClawdbotConfig; -} - function signFeishuPayload(params: { encryptKey: string; payload: Record; @@ -107,41 +50,12 @@ function encryptFeishuPayload(encryptKey: string, payload: Record Promise, -) { - const port = await getFreePort(); - const cfg = buildConfig({ - accountId: params.accountId, - path: params.path, - port, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, +async function postSignedPayload(url: string, payload: Record) { + return await fetch(url, { + method: "POST", + headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }), + body: JSON.stringify(payload), }); - - const abortController = new AbortController(); - const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; - const monitorPromise = monitorFeishuProvider({ - config: cfg, - runtime, - abortSignal: abortController.signal, - }); - - const url = `http://127.0.0.1:${port}${params.path}`; - await waitUntilServerReady(url); - - try { - await run(url); - } finally { - abortController.abort(); - await monitorPromise; - } } afterEach(() => { @@ -159,6 +73,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { type: "url_verification", challenge: "challenge-token" }; const response = await fetch(url, { @@ -185,6 +100,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const response = await fetch(url, { method: "POST", @@ -208,6 +124,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const response = await fetch(url, { method: "POST", @@ -231,13 +148,10 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { type: "url_verification", challenge: "challenge-token" }; - const response = await fetch(url, { - method: "POST", - headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }), - body: JSON.stringify(payload), - }); + const response = await postSignedPayload(url, payload); expect(response.status).toBe(200); await expect(response.json()).resolves.toEqual({ challenge: "challenge-token" }); @@ -255,17 +169,14 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { schema: "2.0", header: { event_type: "unknown.event" }, event: {}, }; - const response = await fetch(url, { - method: "POST", - headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }), - body: JSON.stringify(payload), - }); + const response = await postSignedPayload(url, payload); expect(response.status).toBe(200); expect(await response.text()).toContain("no unknown.event event handle"); @@ -283,6 +194,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { encrypt: encryptFeishuPayload("encrypt_key", { @@ -290,11 +202,7 @@ describe("Feishu webhook signed-request e2e", () => { challenge: "encrypted-challenge-token", }), }; - const response = await fetch(url, { - method: "POST", - headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }), - body: JSON.stringify(payload), - }); + const response = await postSignedPayload(url, payload); expect(response.status).toBe(200); await expect(response.json()).resolves.toEqual({ diff --git a/extensions/feishu/src/monitor.webhook-security.test.ts b/extensions/feishu/src/monitor.webhook-security.test.ts index e9bfa8bf008..957d874cc3a 100644 --- a/extensions/feishu/src/monitor.webhook-security.test.ts +++ b/extensions/feishu/src/monitor.webhook-security.test.ts @@ -1,11 +1,13 @@ -import { createServer } from "node:http"; -import type { AddressInfo } from "node:net"; -import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; import { afterEach, describe, expect, it, vi } from "vitest"; import { createFeishuClientMockModule, createFeishuRuntimeMockModule, } from "./monitor.test-mocks.js"; +import { + buildWebhookConfig, + getFreePort, + withRunningWebhookMonitor, +} from "./monitor.webhook.test-helpers.js"; const probeFeishuMock = vi.hoisted(() => vi.fn()); @@ -33,98 +35,6 @@ import { stopFeishuMonitor, } from "./monitor.js"; -async function getFreePort(): Promise { - const server = createServer(); - await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); - const address = server.address() as AddressInfo | null; - if (!address) { - throw new Error("missing server address"); - } - await new Promise((resolve) => server.close(() => resolve())); - return address.port; -} - -async function waitUntilServerReady(url: string): Promise { - for (let i = 0; i < 50; i += 1) { - try { - const response = await fetch(url, { method: "GET" }); - if (response.status >= 200 && response.status < 500) { - return; - } - } catch { - // retry - } - await new Promise((resolve) => setTimeout(resolve, 20)); - } - throw new Error(`server did not start: ${url}`); -} - -function buildConfig(params: { - accountId: string; - path: string; - port: number; - verificationToken?: string; - encryptKey?: string; -}): ClawdbotConfig { - return { - channels: { - feishu: { - enabled: true, - accounts: { - [params.accountId]: { - enabled: true, - appId: "cli_test", - appSecret: "secret_test", // pragma: allowlist secret - connectionMode: "webhook", - webhookHost: "127.0.0.1", - webhookPort: params.port, - webhookPath: params.path, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, - }, - }, - }, - }, - } as ClawdbotConfig; -} - -async function withRunningWebhookMonitor( - params: { - accountId: string; - path: string; - verificationToken: string; - encryptKey: string; - }, - run: (url: string) => Promise, -) { - const port = await getFreePort(); - const cfg = buildConfig({ - accountId: params.accountId, - path: params.path, - port, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, - }); - - const abortController = new AbortController(); - const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; - const monitorPromise = monitorFeishuProvider({ - config: cfg, - runtime, - abortSignal: abortController.signal, - }); - - const url = `http://127.0.0.1:${port}${params.path}`; - await waitUntilServerReady(url); - - try { - await run(url); - } finally { - abortController.abort(); - await monitorPromise; - } -} - afterEach(() => { clearFeishuWebhookRateLimitStateForTest(); stopFeishuMonitor(); @@ -134,7 +44,7 @@ describe("Feishu webhook security hardening", () => { it("rejects webhook mode without verificationToken", async () => { probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); - const cfg = buildConfig({ + const cfg = buildWebhookConfig({ accountId: "missing-token", path: "/hook-missing-token", port: await getFreePort(), @@ -148,7 +58,7 @@ describe("Feishu webhook security hardening", () => { it("rejects webhook mode without encryptKey", async () => { probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); - const cfg = buildConfig({ + const cfg = buildWebhookConfig({ accountId: "missing-encrypt-key", path: "/hook-missing-encrypt", port: await getFreePort(), @@ -167,6 +77,7 @@ describe("Feishu webhook security hardening", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const response = await fetch(url, { method: "POST", @@ -189,6 +100,7 @@ describe("Feishu webhook security hardening", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { let saw429 = false; for (let i = 0; i < 130; i += 1) { diff --git a/extensions/feishu/src/monitor.webhook.test-helpers.ts b/extensions/feishu/src/monitor.webhook.test-helpers.ts new file mode 100644 index 00000000000..b9de2150bd4 --- /dev/null +++ b/extensions/feishu/src/monitor.webhook.test-helpers.ts @@ -0,0 +1,98 @@ +import { createServer } from "node:http"; +import type { AddressInfo } from "node:net"; +import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; +import { vi } from "vitest"; +import type { monitorFeishuProvider } from "./monitor.js"; + +export async function getFreePort(): Promise { + const server = createServer(); + await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); + const address = server.address() as AddressInfo | null; + if (!address) { + throw new Error("missing server address"); + } + await new Promise((resolve) => server.close(() => resolve())); + return address.port; +} + +async function waitUntilServerReady(url: string): Promise { + for (let i = 0; i < 50; i += 1) { + try { + const response = await fetch(url, { method: "GET" }); + if (response.status >= 200 && response.status < 500) { + return; + } + } catch { + // retry + } + await new Promise((resolve) => setTimeout(resolve, 20)); + } + throw new Error(`server did not start: ${url}`); +} + +export function buildWebhookConfig(params: { + accountId: string; + path: string; + port: number; + verificationToken?: string; + encryptKey?: string; +}): ClawdbotConfig { + return { + channels: { + feishu: { + enabled: true, + accounts: { + [params.accountId]: { + enabled: true, + appId: "cli_test", + appSecret: "secret_test", // pragma: allowlist secret + connectionMode: "webhook", + webhookHost: "127.0.0.1", + webhookPort: params.port, + webhookPath: params.path, + encryptKey: params.encryptKey, + verificationToken: params.verificationToken, + }, + }, + }, + }, + } as ClawdbotConfig; +} + +export async function withRunningWebhookMonitor( + params: { + accountId: string; + path: string; + verificationToken: string; + encryptKey: string; + }, + monitor: typeof monitorFeishuProvider, + run: (url: string) => Promise, +) { + const port = await getFreePort(); + const cfg = buildWebhookConfig({ + accountId: params.accountId, + path: params.path, + port, + encryptKey: params.encryptKey, + verificationToken: params.verificationToken, + }); + + const abortController = new AbortController(); + const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; + const monitorPromise = monitor({ + config: cfg, + runtime, + abortSignal: abortController.signal, + }); + + const url = `http://127.0.0.1:${port}${params.path}`; + await waitUntilServerReady(url); + + try { + await run(url); + } finally { + abortController.abort(); + await monitorPromise; + } +} diff --git a/extensions/feishu/src/outbound.test.ts b/extensions/feishu/src/outbound.test.ts index 11cfc957e80..39b7c1e4a63 100644 --- a/extensions/feishu/src/outbound.test.ts +++ b/extensions/feishu/src/outbound.test.ts @@ -29,12 +29,16 @@ vi.mock("./runtime.js", () => ({ import { feishuOutbound } from "./outbound.js"; const sendText = feishuOutbound.sendText!; +function resetOutboundMocks() { + vi.clearAllMocks(); + sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); + sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); + sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); +} + describe("feishuOutbound.sendText local-image auto-convert", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); async function createTmpImage(ext = ".png"): Promise<{ dir: string; file: string }> { @@ -181,10 +185,7 @@ describe("feishuOutbound.sendText local-image auto-convert", () => { describe("feishuOutbound.sendText replyToId forwarding", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); it("forwards replyToId as replyToMessageId to sendMessageFeishu", async () => { @@ -249,10 +250,7 @@ describe("feishuOutbound.sendText replyToId forwarding", () => { describe("feishuOutbound.sendMedia replyToId forwarding", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); it("forwards replyToId to sendMediaFeishu", async () => { @@ -292,10 +290,7 @@ describe("feishuOutbound.sendMedia replyToId forwarding", () => { describe("feishuOutbound.sendMedia renderMode", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); it("uses markdown cards for captions when renderMode=card", async () => { diff --git a/extensions/feishu/src/probe.test.ts b/extensions/feishu/src/probe.test.ts index b93935cccc6..bfc270a4459 100644 --- a/extensions/feishu/src/probe.test.ts +++ b/extensions/feishu/src/probe.test.ts @@ -8,6 +8,22 @@ vi.mock("./client.js", () => ({ import { FEISHU_PROBE_REQUEST_TIMEOUT_MS, probeFeishu, clearProbeCache } from "./probe.js"; +const DEFAULT_CREDS = { appId: "cli_123", appSecret: "secret" } as const; // pragma: allowlist secret +const DEFAULT_SUCCESS_RESPONSE = { + code: 0, + bot: { bot_name: "TestBot", open_id: "ou_abc123" }, +} as const; +const DEFAULT_SUCCESS_RESULT = { + ok: true, + appId: "cli_123", + botName: "TestBot", + botOpenId: "ou_abc123", +} as const; +const BOT1_RESPONSE = { + code: 0, + bot: { bot_name: "Bot1", open_id: "ou_1" }, +} as const; + function makeRequestFn(response: Record) { return vi.fn().mockResolvedValue(response); } @@ -18,6 +34,64 @@ function setupClient(response: Record) { return requestFn; } +function setupSuccessClient() { + return setupClient(DEFAULT_SUCCESS_RESPONSE); +} + +async function expectDefaultSuccessResult( + creds = DEFAULT_CREDS, + expected: Awaited> = DEFAULT_SUCCESS_RESULT, +) { + const result = await probeFeishu(creds); + expect(result).toEqual(expected); +} + +async function withFakeTimers(run: () => Promise) { + vi.useFakeTimers(); + try { + await run(); + } finally { + vi.useRealTimers(); + } +} + +async function expectErrorResultCached(params: { + requestFn: ReturnType; + expectedError: string; + ttlMs: number; +}) { + createFeishuClientMock.mockReturnValue({ request: params.requestFn }); + + const first = await probeFeishu(DEFAULT_CREDS); + const second = await probeFeishu(DEFAULT_CREDS); + expect(first).toMatchObject({ ok: false, error: params.expectedError }); + expect(second).toMatchObject({ ok: false, error: params.expectedError }); + expect(params.requestFn).toHaveBeenCalledTimes(1); + + vi.advanceTimersByTime(params.ttlMs + 1); + + await probeFeishu(DEFAULT_CREDS); + expect(params.requestFn).toHaveBeenCalledTimes(2); +} + +async function expectFreshDefaultProbeAfter( + requestFn: ReturnType, + invalidate: () => void, +) { + await probeFeishu(DEFAULT_CREDS); + expect(requestFn).toHaveBeenCalledTimes(1); + + invalidate(); + + await probeFeishu(DEFAULT_CREDS); + expect(requestFn).toHaveBeenCalledTimes(2); +} + +async function readSequentialDefaultProbePair() { + const first = await probeFeishu(DEFAULT_CREDS); + return { first, second: await probeFeishu(DEFAULT_CREDS) }; +} + describe("probeFeishu", () => { beforeEach(() => { clearProbeCache(); @@ -44,28 +118,16 @@ describe("probeFeishu", () => { }); it("returns bot info on successful probe", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, - }); + const requestFn = setupSuccessClient(); - const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret - expect(result).toEqual({ - ok: true, - appId: "cli_123", - botName: "TestBot", - botOpenId: "ou_abc123", - }); + await expectDefaultSuccessResult(); expect(requestFn).toHaveBeenCalledTimes(1); }); it("passes the probe timeout to the Feishu request", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, - }); + const requestFn = setupSuccessClient(); - await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret + await probeFeishu(DEFAULT_CREDS); expect(requestFn).toHaveBeenCalledWith( expect.objectContaining({ @@ -77,19 +139,16 @@ describe("probeFeishu", () => { }); it("returns timeout error when request exceeds timeout", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const requestFn = vi.fn().mockImplementation(() => new Promise(() => {})); createFeishuClientMock.mockReturnValue({ request: requestFn }); - const promise = probeFeishu({ appId: "cli_123", appSecret: "secret" }, { timeoutMs: 1_000 }); + const promise = probeFeishu(DEFAULT_CREDS, { timeoutMs: 1_000 }); await vi.advanceTimersByTimeAsync(1_000); const result = await promise; expect(result).toMatchObject({ ok: false, error: "probe timed out after 1000ms" }); - } finally { - vi.useRealTimers(); - } + }); }); it("returns aborted when abort signal is already aborted", async () => { @@ -106,14 +165,9 @@ describe("probeFeishu", () => { expect(createFeishuClientMock).not.toHaveBeenCalled(); }); it("returns cached result on subsequent calls within TTL", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, - }); + const requestFn = setupSuccessClient(); - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - const first = await probeFeishu(creds); - const second = await probeFeishu(creds); + const { first, second } = await readSequentialDefaultProbePair(); expect(first).toEqual(second); // Only one API call should have been made @@ -121,76 +175,37 @@ describe("probeFeishu", () => { }); it("makes a fresh API call after cache expires", async () => { - vi.useFakeTimers(); - try { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, + await withFakeTimers(async () => { + const requestFn = setupSuccessClient(); + + await expectFreshDefaultProbeAfter(requestFn, () => { + vi.advanceTimersByTime(10 * 60 * 1000 + 1); }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(1); - - // Advance time past the success TTL - vi.advanceTimersByTime(10 * 60 * 1000 + 1); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); - } finally { - vi.useRealTimers(); - } + }); }); it("caches failed probe results (API error) for the error TTL", async () => { - vi.useFakeTimers(); - try { - const requestFn = makeRequestFn({ code: 99, msg: "token expired" }); - createFeishuClientMock.mockReturnValue({ request: requestFn }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - const first = await probeFeishu(creds); - const second = await probeFeishu(creds); - expect(first).toMatchObject({ ok: false, error: "API error: token expired" }); - expect(second).toMatchObject({ ok: false, error: "API error: token expired" }); - expect(requestFn).toHaveBeenCalledTimes(1); - - vi.advanceTimersByTime(60 * 1000 + 1); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); - } finally { - vi.useRealTimers(); - } + await withFakeTimers(async () => { + await expectErrorResultCached({ + requestFn: makeRequestFn({ code: 99, msg: "token expired" }), + expectedError: "API error: token expired", + ttlMs: 60 * 1000, + }); + }); }); it("caches thrown request errors for the error TTL", async () => { - vi.useFakeTimers(); - try { - const requestFn = vi.fn().mockRejectedValue(new Error("network error")); - createFeishuClientMock.mockReturnValue({ request: requestFn }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - const first = await probeFeishu(creds); - const second = await probeFeishu(creds); - expect(first).toMatchObject({ ok: false, error: "network error" }); - expect(second).toMatchObject({ ok: false, error: "network error" }); - expect(requestFn).toHaveBeenCalledTimes(1); - - vi.advanceTimersByTime(60 * 1000 + 1); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); - } finally { - vi.useRealTimers(); - } + await withFakeTimers(async () => { + await expectErrorResultCached({ + requestFn: vi.fn().mockRejectedValue(new Error("network error")), + expectedError: "network error", + ttlMs: 60 * 1000, + }); + }); }); it("caches per account independently", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "Bot1", open_id: "ou_1" }, - }); + const requestFn = setupClient(BOT1_RESPONSE); await probeFeishu({ appId: "cli_aaa", appSecret: "s1" }); // pragma: allowlist secret expect(requestFn).toHaveBeenCalledTimes(1); @@ -205,10 +220,7 @@ describe("probeFeishu", () => { }); it("does not share cache between accounts with same appId but different appSecret", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "Bot1", open_id: "ou_1" }, - }); + const requestFn = setupClient(BOT1_RESPONSE); // First account with appId + secret A await probeFeishu({ appId: "cli_shared", appSecret: "secret_aaa" }); // pragma: allowlist secret @@ -221,10 +233,7 @@ describe("probeFeishu", () => { }); it("uses accountId for cache key when available", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "Bot1", open_id: "ou_1" }, - }); + const requestFn = setupClient(BOT1_RESPONSE); // Two accounts with same appId+appSecret but different accountIds are cached separately await probeFeishu({ accountId: "acct-1", appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret @@ -239,19 +248,11 @@ describe("probeFeishu", () => { }); it("clearProbeCache forces fresh API call", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, + const requestFn = setupSuccessClient(); + + await expectFreshDefaultProbeAfter(requestFn, () => { + clearProbeCache(); }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(1); - - clearProbeCache(); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); }); it("handles response.data.bot fallback path", async () => { @@ -260,10 +261,8 @@ describe("probeFeishu", () => { data: { bot: { bot_name: "DataBot", open_id: "ou_data" } }, }); - const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret - expect(result).toEqual({ - ok: true, - appId: "cli_123", + await expectDefaultSuccessResult(DEFAULT_CREDS, { + ...DEFAULT_SUCCESS_RESULT, botName: "DataBot", botOpenId: "ou_data", }); diff --git a/extensions/feishu/src/reply-dispatcher.test.ts b/extensions/feishu/src/reply-dispatcher.test.ts index 744532320de..10b829857a1 100644 --- a/extensions/feishu/src/reply-dispatcher.test.ts +++ b/extensions/feishu/src/reply-dispatcher.test.ts @@ -25,44 +25,33 @@ vi.mock("./typing.js", () => ({ addTypingIndicator: addTypingIndicatorMock, removeTypingIndicator: removeTypingIndicatorMock, })); -vi.mock("./streaming-card.js", () => ({ - mergeStreamingText: (previousText: string | undefined, nextText: string | undefined) => { - const previous = typeof previousText === "string" ? previousText : ""; - const next = typeof nextText === "string" ? nextText : ""; - if (!next) { - return previous; - } - if (!previous || next === previous) { - return next; - } - if (next.startsWith(previous)) { - return next; - } - if (previous.startsWith(next)) { - return previous; - } - return `${previous}${next}`; - }, - FeishuStreamingSession: class { - active = false; - start = vi.fn(async () => { - this.active = true; - }); - update = vi.fn(async () => {}); - close = vi.fn(async () => { - this.active = false; - }); - isActive = vi.fn(() => this.active); +vi.mock("./streaming-card.js", async () => { + const actual = await vi.importActual("./streaming-card.js"); + return { + mergeStreamingText: actual.mergeStreamingText, + FeishuStreamingSession: class { + active = false; + start = vi.fn(async () => { + this.active = true; + }); + update = vi.fn(async () => {}); + close = vi.fn(async () => { + this.active = false; + }); + isActive = vi.fn(() => this.active); - constructor() { - streamingInstances.push(this); - } - }, -})); + constructor() { + streamingInstances.push(this); + } + }, + }; +}); import { createFeishuReplyDispatcher } from "./reply-dispatcher.js"; describe("createFeishuReplyDispatcher streaming behavior", () => { + type ReplyDispatcherArgs = Parameters[0]; + beforeEach(() => { vi.clearAllMocks(); streamingInstances.length = 0; @@ -128,6 +117,25 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { return createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; } + function createRuntimeLogger() { + return { log: vi.fn(), error: vi.fn() } as never; + } + + function createDispatcherHarness(overrides: Partial = {}) { + const result = createFeishuReplyDispatcher({ + cfg: {} as never, + agentId: "agent", + runtime: {} as never, + chatId: "oc_chat", + ...overrides, + }); + + return { + result, + options: createReplyDispatcherWithTypingMock.mock.calls.at(-1)?.[0], + }; + } + it("skips typing indicator when account typingIndicator is disabled", async () => { resolveFeishuAccountMock.mockReturnValue({ accountId: "main", @@ -209,14 +217,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("keeps auto mode plain text on non-streaming send path", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver({ text: "plain text" }, { kind: "final" }); expect(streamingInstances).toHaveLength(0); @@ -225,14 +226,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("suppresses internal block payload delivery", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver({ text: "internal reasoning chunk" }, { kind: "block" }); expect(streamingInstances).toHaveLength(0); @@ -253,15 +247,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("uses streaming session for auto mode markdown payloads", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), rootId: "om_root_topic", }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); expect(streamingInstances).toHaveLength(1); @@ -277,14 +266,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("closes streaming with block text when final reply is missing", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```md\npartial answer\n```" }, { kind: "block" }); await options.onIdle?.(); @@ -295,14 +279,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("delivers distinct final payloads after streaming close", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```md\n完整回复第一段\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n完整回复第一段 + 第二段\n```" }, { kind: "final" }); @@ -316,14 +295,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("skips exact duplicate final text after streaming close", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" }); @@ -383,14 +357,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }, }); - const result = createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { result, options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.onReplyStart?.(); await result.replyOptions.onPartialReply?.({ text: "hello" }); await options.deliver({ text: "lo world" }, { kind: "block" }); @@ -402,14 +371,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("sends media-only payloads as attachments", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" }); expect(sendMediaFeishuMock).toHaveBeenCalledTimes(1); @@ -424,14 +386,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("falls back to legacy mediaUrl when mediaUrls is an empty array", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver( { text: "caption", mediaUrl: "https://example.com/a.png", mediaUrls: [] }, { kind: "final" }, @@ -447,14 +402,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("sends attachments after streaming final markdown replies", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver( { text: "```ts\nconst x = 1\n```", mediaUrls: ["https://example.com/a.png"] }, { kind: "final" }, @@ -472,16 +422,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("passes replyInThread to sendMessageFeishu for plain text", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "plain text" }, { kind: "final" }); expect(sendMessageFeishuMock).toHaveBeenCalledWith( @@ -504,16 +448,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }, }); - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "card text" }, { kind: "final" }); expect(sendMarkdownCardFeishuMock).toHaveBeenCalledWith( @@ -525,16 +463,11 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("passes replyToMessageId and replyInThread to streaming.start()", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); expect(streamingInstances).toHaveLength(1); @@ -545,18 +478,13 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("disables streaming for thread replies and keeps reply metadata", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), replyToMessageId: "om_msg", replyInThread: false, threadReply: true, rootId: "om_root_topic", }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); expect(streamingInstances).toHaveLength(0); @@ -569,16 +497,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("passes replyInThread to media attachments", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" }); expect(sendMediaFeishuMock).toHaveBeenCalledWith( diff --git a/extensions/feishu/src/reply-dispatcher.ts b/extensions/feishu/src/reply-dispatcher.ts index 3bd1353825d..6f66ffffa58 100644 --- a/extensions/feishu/src/reply-dispatcher.ts +++ b/extensions/feishu/src/reply-dispatcher.ts @@ -224,6 +224,41 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP lastPartial = ""; }; + const sendChunkedTextReply = async (params: { + text: string; + useCard: boolean; + infoKind?: string; + }) => { + let first = true; + const chunkSource = params.useCard + ? params.text + : core.channel.text.convertMarkdownTables(params.text, tableMode); + for (const chunk of core.channel.text.chunkTextWithMode( + chunkSource, + textChunkLimit, + chunkMode, + )) { + const message = { + cfg, + to: chatId, + text: chunk, + replyToMessageId: sendReplyToMessageId, + replyInThread: effectiveReplyInThread, + mentions: first ? mentionTargets : undefined, + accountId, + }; + if (params.useCard) { + await sendMarkdownCardFeishu(message); + } else { + await sendMessageFeishu(message); + } + first = false; + } + if (params.infoKind === "final") { + deliveredFinalTexts.add(params.text); + } + }; + const { dispatcher, replyOptions, markDispatchIdle } = core.channel.reply.createReplyDispatcherWithTyping({ responsePrefix: prefixContext.responsePrefix, @@ -303,48 +338,10 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP return; } - let first = true; if (useCard) { - for (const chunk of core.channel.text.chunkTextWithMode( - text, - textChunkLimit, - chunkMode, - )) { - await sendMarkdownCardFeishu({ - cfg, - to: chatId, - text: chunk, - replyToMessageId: sendReplyToMessageId, - replyInThread: effectiveReplyInThread, - mentions: first ? mentionTargets : undefined, - accountId, - }); - first = false; - } - if (info?.kind === "final") { - deliveredFinalTexts.add(text); - } + await sendChunkedTextReply({ text, useCard: true, infoKind: info?.kind }); } else { - const converted = core.channel.text.convertMarkdownTables(text, tableMode); - for (const chunk of core.channel.text.chunkTextWithMode( - converted, - textChunkLimit, - chunkMode, - )) { - await sendMessageFeishu({ - cfg, - to: chatId, - text: chunk, - replyToMessageId: sendReplyToMessageId, - replyInThread: effectiveReplyInThread, - mentions: first ? mentionTargets : undefined, - accountId, - }); - first = false; - } - if (info?.kind === "final") { - deliveredFinalTexts.add(text); - } + await sendChunkedTextReply({ text, useCard: false, infoKind: info?.kind }); } } diff --git a/extensions/feishu/src/send.reply-fallback.test.ts b/extensions/feishu/src/send.reply-fallback.test.ts index 75dda353bbe..610ded167fd 100644 --- a/extensions/feishu/src/send.reply-fallback.test.ts +++ b/extensions/feishu/src/send.reply-fallback.test.ts @@ -25,6 +25,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { const replyMock = vi.fn(); const createMock = vi.fn(); + async function expectFallbackResult( + send: () => Promise<{ messageId?: string }>, + expectedMessageId: string, + ) { + const result = await send(); + expect(replyMock).toHaveBeenCalledTimes(1); + expect(createMock).toHaveBeenCalledTimes(1); + expect(result.messageId).toBe(expectedMessageId); + } + beforeEach(() => { vi.clearAllMocks(); resolveFeishuSendTargetMock.mockReturnValue({ @@ -51,16 +61,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_new" }, }); - const result = await sendMessageFeishu({ - cfg: {} as never, - to: "user:ou_target", - text: "hello", - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_new"); + await expectFallbackResult( + () => + sendMessageFeishu({ + cfg: {} as never, + to: "user:ou_target", + text: "hello", + replyToMessageId: "om_parent", + }), + "om_new", + ); }); it("falls back to create for withdrawn card replies", async () => { @@ -73,16 +83,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_card_new" }, }); - const result = await sendCardFeishu({ - cfg: {} as never, - to: "user:ou_target", - card: { schema: "2.0" }, - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_card_new"); + await expectFallbackResult( + () => + sendCardFeishu({ + cfg: {} as never, + to: "user:ou_target", + card: { schema: "2.0" }, + replyToMessageId: "om_parent", + }), + "om_card_new", + ); }); it("still throws for non-withdrawn reply failures", async () => { @@ -111,16 +121,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_thrown_fallback" }, }); - const result = await sendMessageFeishu({ - cfg: {} as never, - to: "user:ou_target", - text: "hello", - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_thrown_fallback"); + await expectFallbackResult( + () => + sendMessageFeishu({ + cfg: {} as never, + to: "user:ou_target", + text: "hello", + replyToMessageId: "om_parent", + }), + "om_thrown_fallback", + ); }); it("falls back to create when card reply throws a not-found AxiosError", async () => { @@ -133,16 +143,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_axios_fallback" }, }); - const result = await sendCardFeishu({ - cfg: {} as never, - to: "user:ou_target", - card: { schema: "2.0" }, - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_axios_fallback"); + await expectFallbackResult( + () => + sendCardFeishu({ + cfg: {} as never, + to: "user:ou_target", + card: { schema: "2.0" }, + replyToMessageId: "om_parent", + }), + "om_axios_fallback", + ); }); it("re-throws non-withdrawn thrown errors for text messages", async () => { diff --git a/extensions/feishu/src/send.ts b/extensions/feishu/src/send.ts index 5bfa836e0a6..5692edd32ff 100644 --- a/extensions/feishu/src/send.ts +++ b/extensions/feishu/src/send.ts @@ -55,6 +55,30 @@ type FeishuCreateMessageClient = { }; }; +type FeishuMessageSender = { + id?: string; + id_type?: string; + sender_type?: string; +}; + +type FeishuMessageGetItem = { + message_id?: string; + chat_id?: string; + chat_type?: FeishuChatType; + msg_type?: string; + body?: { content?: string }; + sender?: FeishuMessageSender; + create_time?: string; +}; + +type FeishuGetMessageResponse = { + code?: number; + msg?: string; + data?: FeishuMessageGetItem & { + items?: FeishuMessageGetItem[]; + }; +}; + /** Send a direct message as a fallback when a reply target is unavailable. */ async function sendFallbackDirect( client: FeishuCreateMessageClient, @@ -214,36 +238,7 @@ export async function getMessageFeishu(params: { try { const response = (await client.im.message.get({ path: { message_id: messageId }, - })) as { - code?: number; - msg?: string; - data?: { - items?: Array<{ - message_id?: string; - chat_id?: string; - chat_type?: FeishuChatType; - msg_type?: string; - body?: { content?: string }; - sender?: { - id?: string; - id_type?: string; - sender_type?: string; - }; - create_time?: string; - }>; - message_id?: string; - chat_id?: string; - chat_type?: FeishuChatType; - msg_type?: string; - body?: { content?: string }; - sender?: { - id?: string; - id_type?: string; - sender_type?: string; - }; - create_time?: string; - }; - }; + })) as FeishuGetMessageResponse; if (response.code !== 0) { return null; diff --git a/extensions/google-gemini-cli-auth/oauth.test.ts b/extensions/google-gemini-cli-auth/oauth.test.ts index 1471f804771..02100b73b1f 100644 --- a/extensions/google-gemini-cli-auth/oauth.test.ts +++ b/extensions/google-gemini-cli-auth/oauth.test.ts @@ -144,6 +144,13 @@ describe("extractGeminiCliCredentials", () => { } } + function expectFakeCliCredentials(result: unknown) { + expect(result).toEqual({ + clientId: FAKE_CLIENT_ID, + clientSecret: FAKE_CLIENT_SECRET, + }); + } + beforeEach(async () => { vi.clearAllMocks(); originalPath = process.env.PATH; @@ -169,10 +176,7 @@ describe("extractGeminiCliCredentials", () => { clearCredentialsCache(); const result = extractGeminiCliCredentials(); - expect(result).toEqual({ - clientId: FAKE_CLIENT_ID, - clientSecret: FAKE_CLIENT_SECRET, - }); + expectFakeCliCredentials(result); }); it("extracts credentials when PATH entry is an npm global shim", async () => { @@ -182,10 +186,7 @@ describe("extractGeminiCliCredentials", () => { clearCredentialsCache(); const result = extractGeminiCliCredentials(); - expect(result).toEqual({ - clientId: FAKE_CLIENT_ID, - clientSecret: FAKE_CLIENT_SECRET, - }); + expectFakeCliCredentials(result); }); it("returns null when oauth2.js cannot be found", async () => { @@ -274,16 +275,16 @@ describe("loginGeminiCliOAuth", () => { }); } - async function runRemoteLoginWithCapturedAuthUrl( - loginGeminiCliOAuth: (options: { - isRemote: boolean; - openUrl: () => Promise; - log: (msg: string) => void; - note: () => Promise; - prompt: () => Promise; - progress: { update: () => void; stop: () => void }; - }) => Promise<{ projectId: string }>, - ) { + type LoginGeminiCliOAuthFn = (options: { + isRemote: boolean; + openUrl: () => Promise; + log: (msg: string) => void; + note: () => Promise; + prompt: () => Promise; + progress: { update: () => void; stop: () => void }; + }) => Promise<{ projectId: string }>; + + async function runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth: LoginGeminiCliOAuthFn) { let authUrl = ""; const result = await loginGeminiCliOAuth({ isRemote: true, @@ -304,6 +305,14 @@ describe("loginGeminiCliOAuth", () => { return { result, authUrl }; } + async function runRemoteLoginExpectingProjectId( + loginGeminiCliOAuth: LoginGeminiCliOAuthFn, + projectId: string, + ) { + const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); + expect(result.projectId).toBe(projectId); + } + let envSnapshot: Partial>; beforeEach(() => { envSnapshot = Object.fromEntries(ENV_KEYS.map((key) => [key, process.env[key]])); @@ -357,9 +366,7 @@ describe("loginGeminiCliOAuth", () => { vi.stubGlobal("fetch", fetchMock); const { loginGeminiCliOAuth } = await import("./oauth.js"); - const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); - - expect(result.projectId).toBe("daily-project"); + await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "daily-project"); const loadRequests = requests.filter((request) => request.url.includes("v1internal:loadCodeAssist"), ); @@ -414,9 +421,7 @@ describe("loginGeminiCliOAuth", () => { vi.stubGlobal("fetch", fetchMock); const { loginGeminiCliOAuth } = await import("./oauth.js"); - const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); - - expect(result.projectId).toBe("env-project"); + await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "env-project"); expect(requests.filter((url) => url.includes("v1internal:loadCodeAssist"))).toHaveLength(3); expect(requests.some((url) => url.includes("v1internal:onboardUser"))).toBe(false); }); diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json index a942ed3d673..8b6f42e371c 100644 --- a/extensions/googlechat/package.json +++ b/extensions/googlechat/package.json @@ -7,6 +7,9 @@ "dependencies": { "google-auth-library": "^10.6.1" }, + "devDependencies": { + "openclaw": "workspace:*" + }, "peerDependencies": { "openclaw": ">=2026.3.11" }, diff --git a/extensions/googlechat/src/api.test.ts b/extensions/googlechat/src/api.test.ts index fc011268ec2..81312d39820 100644 --- a/extensions/googlechat/src/api.test.ts +++ b/extensions/googlechat/src/api.test.ts @@ -13,6 +13,21 @@ const account = { config: {}, } as ResolvedGoogleChatAccount; +function stubSuccessfulSend(name: string) { + const fetchMock = vi + .fn() + .mockResolvedValue(new Response(JSON.stringify({ name }), { status: 200 })); + vi.stubGlobal("fetch", fetchMock); + return fetchMock; +} + +async function expectDownloadToRejectForResponse(response: Response) { + vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); + await expect( + downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }), + ).rejects.toThrow(/max bytes/i); +} + describe("downloadGoogleChatMedia", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -29,11 +44,7 @@ describe("downloadGoogleChatMedia", () => { status: 200, headers: { "content-length": "50", "content-type": "application/octet-stream" }, }); - vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); - - await expect( - downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }), - ).rejects.toThrow(/max bytes/i); + await expectDownloadToRejectForResponse(response); }); it("rejects when streamed payload exceeds max bytes", async () => { @@ -52,11 +63,7 @@ describe("downloadGoogleChatMedia", () => { status: 200, headers: { "content-type": "application/octet-stream" }, }); - vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); - - await expect( - downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }), - ).rejects.toThrow(/max bytes/i); + await expectDownloadToRejectForResponse(response); }); }); @@ -66,12 +73,7 @@ describe("sendGoogleChatMessage", () => { }); it("adds messageReplyOption when sending to an existing thread", async () => { - const fetchMock = vi - .fn() - .mockResolvedValue( - new Response(JSON.stringify({ name: "spaces/AAA/messages/123" }), { status: 200 }), - ); - vi.stubGlobal("fetch", fetchMock); + const fetchMock = stubSuccessfulSend("spaces/AAA/messages/123"); await sendGoogleChatMessage({ account, @@ -89,12 +91,7 @@ describe("sendGoogleChatMessage", () => { }); it("does not set messageReplyOption for non-thread sends", async () => { - const fetchMock = vi - .fn() - .mockResolvedValue( - new Response(JSON.stringify({ name: "spaces/AAA/messages/124" }), { status: 200 }), - ); - vi.stubGlobal("fetch", fetchMock); + const fetchMock = stubSuccessfulSend("spaces/AAA/messages/124"); await sendGoogleChatMessage({ account, diff --git a/extensions/googlechat/src/api.ts b/extensions/googlechat/src/api.ts index 7c4f26b8db9..d9c7b666ff0 100644 --- a/extensions/googlechat/src/api.ts +++ b/extensions/googlechat/src/api.ts @@ -14,70 +14,24 @@ const headersToObject = (headers?: HeadersInit): Record => ? Object.fromEntries(headers) : headers || {}; -async function fetchJson( - account: ResolvedGoogleChatAccount, - url: string, - init: RequestInit, -): Promise { - const token = await getGoogleChatAccessToken(account); - const { response: res, release } = await fetchWithSsrFGuard({ +async function withGoogleChatResponse(params: { + account: ResolvedGoogleChatAccount; + url: string; + init?: RequestInit; + auditContext: string; + errorPrefix?: string; + handleResponse: (response: Response) => Promise; +}): Promise { + const { + account, url, - init: { - ...init, - headers: { - ...headersToObject(init.headers), - Authorization: `Bearer ${token}`, - "Content-Type": "application/json", - }, - }, - auditContext: "googlechat.api.json", - }); - try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`); - } - return (await res.json()) as T; - } finally { - await release(); - } -} - -async function fetchOk( - account: ResolvedGoogleChatAccount, - url: string, - init: RequestInit, -): Promise { + init, + auditContext, + errorPrefix = "Google Chat API", + handleResponse, + } = params; const token = await getGoogleChatAccessToken(account); - const { response: res, release } = await fetchWithSsrFGuard({ - url, - init: { - ...init, - headers: { - ...headersToObject(init.headers), - Authorization: `Bearer ${token}`, - }, - }, - auditContext: "googlechat.api.ok", - }); - try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`); - } - } finally { - await release(); - } -} - -async function fetchBuffer( - account: ResolvedGoogleChatAccount, - url: string, - init?: RequestInit, - options?: { maxBytes?: number }, -): Promise<{ buffer: Buffer; contentType?: string }> { - const token = await getGoogleChatAccessToken(account); - const { response: res, release } = await fetchWithSsrFGuard({ + const { response, release } = await fetchWithSsrFGuard({ url, init: { ...init, @@ -86,52 +40,103 @@ async function fetchBuffer( Authorization: `Bearer ${token}`, }, }, - auditContext: "googlechat.api.buffer", + auditContext, }); try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`); + if (!response.ok) { + const text = await response.text().catch(() => ""); + throw new Error(`${errorPrefix} ${response.status}: ${text || response.statusText}`); } - const maxBytes = options?.maxBytes; - const lengthHeader = res.headers.get("content-length"); - if (maxBytes && lengthHeader) { - const length = Number(lengthHeader); - if (Number.isFinite(length) && length > maxBytes) { - throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); - } - } - if (!maxBytes || !res.body) { - const buffer = Buffer.from(await res.arrayBuffer()); - const contentType = res.headers.get("content-type") ?? undefined; - return { buffer, contentType }; - } - const reader = res.body.getReader(); - const chunks: Buffer[] = []; - let total = 0; - while (true) { - const { done, value } = await reader.read(); - if (done) { - break; - } - if (!value) { - continue; - } - total += value.length; - if (total > maxBytes) { - await reader.cancel(); - throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); - } - chunks.push(Buffer.from(value)); - } - const buffer = Buffer.concat(chunks, total); - const contentType = res.headers.get("content-type") ?? undefined; - return { buffer, contentType }; + return await handleResponse(response); } finally { await release(); } } +async function fetchJson( + account: ResolvedGoogleChatAccount, + url: string, + init: RequestInit, +): Promise { + return await withGoogleChatResponse({ + account, + url, + init: { + ...init, + headers: { + ...headersToObject(init.headers), + "Content-Type": "application/json", + }, + }, + auditContext: "googlechat.api.json", + handleResponse: async (response) => (await response.json()) as T, + }); +} + +async function fetchOk( + account: ResolvedGoogleChatAccount, + url: string, + init: RequestInit, +): Promise { + await withGoogleChatResponse({ + account, + url, + init, + auditContext: "googlechat.api.ok", + handleResponse: async () => undefined, + }); +} + +async function fetchBuffer( + account: ResolvedGoogleChatAccount, + url: string, + init?: RequestInit, + options?: { maxBytes?: number }, +): Promise<{ buffer: Buffer; contentType?: string }> { + return await withGoogleChatResponse({ + account, + url, + init, + auditContext: "googlechat.api.buffer", + handleResponse: async (res) => { + const maxBytes = options?.maxBytes; + const lengthHeader = res.headers.get("content-length"); + if (maxBytes && lengthHeader) { + const length = Number(lengthHeader); + if (Number.isFinite(length) && length > maxBytes) { + throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); + } + } + if (!maxBytes || !res.body) { + const buffer = Buffer.from(await res.arrayBuffer()); + const contentType = res.headers.get("content-type") ?? undefined; + return { buffer, contentType }; + } + const reader = res.body.getReader(); + const chunks: Buffer[] = []; + let total = 0; + while (true) { + const { done, value } = await reader.read(); + if (done) { + break; + } + if (!value) { + continue; + } + total += value.length; + if (total > maxBytes) { + await reader.cancel(); + throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); + } + chunks.push(Buffer.from(value)); + } + const buffer = Buffer.concat(chunks, total); + const contentType = res.headers.get("content-type") ?? undefined; + return { buffer, contentType }; + }, + }); +} + export async function sendGoogleChatMessage(params: { account: ResolvedGoogleChatAccount; space: string; @@ -208,34 +213,29 @@ export async function uploadGoogleChatAttachment(params: { Buffer.from(footer, "utf8"), ]); - const token = await getGoogleChatAccessToken(account); const url = `${CHAT_UPLOAD_BASE}/${space}/attachments:upload?uploadType=multipart`; - const { response: res, release } = await fetchWithSsrFGuard({ + const payload = await withGoogleChatResponse<{ + attachmentDataRef?: { attachmentUploadToken?: string }; + }>({ + account, url, init: { method: "POST", headers: { - Authorization: `Bearer ${token}`, "Content-Type": `multipart/related; boundary=${boundary}`, }, body, }, auditContext: "googlechat.upload", + errorPrefix: "Google Chat upload", + handleResponse: async (response) => + (await response.json()) as { + attachmentDataRef?: { attachmentUploadToken?: string }; + }, }); - try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat upload ${res.status}: ${text || res.statusText}`); - } - const payload = (await res.json()) as { - attachmentDataRef?: { attachmentUploadToken?: string }; - }; - return { - attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken, - }; - } finally { - await release(); - } + return { + attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken, + }; } export async function downloadGoogleChatMedia(params: { diff --git a/extensions/googlechat/src/channel.startup.test.ts b/extensions/googlechat/src/channel.startup.test.ts index 521cbb94c5f..11c46aa663a 100644 --- a/extensions/googlechat/src/channel.startup.test.ts +++ b/extensions/googlechat/src/channel.startup.test.ts @@ -1,6 +1,10 @@ import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/googlechat"; import { afterEach, describe, expect, it, vi } from "vitest"; -import { createStartAccountContext } from "../../test-utils/start-account-context.js"; +import { + abortStartedAccount, + expectPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; import type { ResolvedGoogleChatAccount } from "./accounts.js"; const hoisted = vi.hoisted(() => ({ @@ -39,29 +43,25 @@ describe("googlechatPlugin gateway.startAccount", () => { }, }; - const patches: ChannelAccountSnapshot[] = []; - const abort = new AbortController(); - const task = googlechatPlugin.gateway!.startAccount!( - createStartAccountContext({ - account, - abortSignal: abort.signal, - statusPatchSink: (next) => patches.push({ ...next }), - }), - ); - let settled = false; - void task.then(() => { - settled = true; + const { abort, patches, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: googlechatPlugin.gateway!.startAccount!, + account, }); - await vi.waitFor(() => { - expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce(); + await expectPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, + assertBeforeAbort: () => { + expect(unregister).not.toHaveBeenCalled(); + }, + assertAfterAbort: () => { + expect(unregister).toHaveBeenCalledOnce(); + }, }); - expect(settled).toBe(false); - expect(unregister).not.toHaveBeenCalled(); - - abort.abort(); - await task; - - expect(unregister).toHaveBeenCalledOnce(); expect(patches.some((entry) => entry.running === true)).toBe(true); expect(patches.some((entry) => entry.running === false)).toBe(true); }); diff --git a/extensions/googlechat/src/channel.ts b/extensions/googlechat/src/channel.ts index 47980f97d92..3ae992d3e9e 100644 --- a/extensions/googlechat/src/channel.ts +++ b/extensions/googlechat/src/channel.ts @@ -30,6 +30,7 @@ import { type OpenClawConfig, } from "openclaw/plugin-sdk/googlechat"; import { GoogleChatConfigSchema } from "openclaw/plugin-sdk/googlechat"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { listGoogleChatAccountIds, resolveDefaultGoogleChatAccountId, @@ -473,20 +474,14 @@ export const googlechatPlugin: ChannelPlugin = { } return issues; }), - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - credentialSource: snapshot.credentialSource ?? "none", - audienceType: snapshot.audienceType ?? null, - audience: snapshot.audience ?? null, - webhookPath: snapshot.webhookPath ?? null, - webhookUrl: snapshot.webhookUrl ?? null, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + credentialSource: snapshot.credentialSource ?? "none", + audienceType: snapshot.audienceType ?? null, + audience: snapshot.audience ?? null, + webhookPath: snapshot.webhookPath ?? null, + webhookUrl: snapshot.webhookUrl ?? null, + }), probeAccount: async ({ account }) => probeGoogleChat(account), buildAccountSnapshot: ({ account, runtime, probe }) => { const base = buildComputedAccountStatusSnapshot({ diff --git a/extensions/googlechat/src/monitor.webhook-routing.test.ts b/extensions/googlechat/src/monitor.webhook-routing.test.ts index 812883f1b4c..9896efce645 100644 --- a/extensions/googlechat/src/monitor.webhook-routing.test.ts +++ b/extensions/googlechat/src/monitor.webhook-routing.test.ts @@ -117,6 +117,34 @@ function registerTwoTargets() { }; } +async function dispatchWebhookRequest(req: IncomingMessage) { + const res = createMockServerResponse(); + const handled = await handleGoogleChatWebhookRequest(req, res); + expect(handled).toBe(true); + return res; +} + +async function expectVerifiedRoute(params: { + request: IncomingMessage; + expectedStatus: number; + sinkA: ReturnType; + sinkB: ReturnType; + expectedSink: "none" | "A" | "B"; +}) { + const res = await dispatchWebhookRequest(params.request); + expect(res.statusCode).toBe(params.expectedStatus); + const expectedCounts = + params.expectedSink === "A" ? [1, 0] : params.expectedSink === "B" ? [0, 1] : [0, 0]; + expect(params.sinkA).toHaveBeenCalledTimes(expectedCounts[0]); + expect(params.sinkB).toHaveBeenCalledTimes(expectedCounts[1]); +} + +function mockSecondVerifierSuccess() { + vi.mocked(verifyGoogleChatRequest) + .mockResolvedValueOnce({ ok: false, reason: "invalid" }) + .mockResolvedValueOnce({ ok: true }); +} + describe("Google Chat webhook routing", () => { afterEach(() => { setActivePluginRegistry(createEmptyPluginRegistry()); @@ -165,45 +193,37 @@ describe("Google Chat webhook routing", () => { const { sinkA, sinkB, unregister } = registerTwoTargets(); try { - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest( - createWebhookRequest({ + await expectVerifiedRoute({ + request: createWebhookRequest({ authorization: "Bearer test-token", payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/AAA" } }, }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); - expect(sinkA).not.toHaveBeenCalled(); - expect(sinkB).not.toHaveBeenCalled(); + expectedStatus: 401, + sinkA, + sinkB, + expectedSink: "none", + }); } finally { unregister(); } }); it("routes to the single verified target when earlier targets fail verification", async () => { - vi.mocked(verifyGoogleChatRequest) - .mockResolvedValueOnce({ ok: false, reason: "invalid" }) - .mockResolvedValueOnce({ ok: true }); + mockSecondVerifierSuccess(); const { sinkA, sinkB, unregister } = registerTwoTargets(); try { - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest( - createWebhookRequest({ + await expectVerifiedRoute({ + request: createWebhookRequest({ authorization: "Bearer test-token", payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/BBB" } }, }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(sinkA).not.toHaveBeenCalled(); - expect(sinkB).toHaveBeenCalledTimes(1); + expectedStatus: 200, + sinkA, + sinkB, + expectedSink: "B", + }); } finally { unregister(); } @@ -218,10 +238,7 @@ describe("Google Chat webhook routing", () => { authorization: "Bearer invalid-token", }); const onSpy = vi.spyOn(req, "on"); - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest(req, res); - - expect(handled).toBe(true); + const res = await dispatchWebhookRequest(req); expect(res.statusCode).toBe(401); expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function)); } finally { @@ -230,15 +247,12 @@ describe("Google Chat webhook routing", () => { }); it("supports add-on requests that provide systemIdToken in the body", async () => { - vi.mocked(verifyGoogleChatRequest) - .mockResolvedValueOnce({ ok: false, reason: "invalid" }) - .mockResolvedValueOnce({ ok: true }); + mockSecondVerifierSuccess(); const { sinkA, sinkB, unregister } = registerTwoTargets(); try { - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest( - createWebhookRequest({ + await expectVerifiedRoute({ + request: createWebhookRequest({ payload: { commonEventObject: { hostApp: "CHAT" }, authorizationEventObject: { systemIdToken: "addon-token" }, @@ -252,13 +266,11 @@ describe("Google Chat webhook routing", () => { }, }, }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(sinkA).not.toHaveBeenCalled(); - expect(sinkB).toHaveBeenCalledTimes(1); + expectedStatus: 200, + sinkA, + sinkB, + expectedSink: "B", + }); } finally { unregister(); } diff --git a/extensions/imessage/src/channel.ts b/extensions/imessage/src/channel.ts index 22c45cf6072..17023599eb1 100644 --- a/extensions/imessage/src/channel.ts +++ b/extensions/imessage/src/channel.ts @@ -29,6 +29,7 @@ import { type ChannelPlugin, type ResolvedIMessageAccount, } from "openclaw/plugin-sdk/imessage"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { getIMessageRuntime } from "./runtime.js"; const meta = getChatChannelMeta("imessage"); @@ -264,17 +265,11 @@ export const imessagePlugin: ChannelPlugin = { dbPath: null, }, collectStatusIssues: (accounts) => collectStatusIssuesFromLastError("imessage", accounts), - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - cliPath: snapshot.cliPath ?? null, - dbPath: snapshot.dbPath ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + cliPath: snapshot.cliPath ?? null, + dbPath: snapshot.dbPath ?? null, + }), probeAccount: async ({ timeoutMs }) => getIMessageRuntime().channel.imessage.probeIMessage(timeoutMs), buildAccountSnapshot: ({ account, runtime, probe }) => ({ diff --git a/extensions/irc/src/accounts.test.ts b/extensions/irc/src/accounts.test.ts index afd1b597b81..5b4685795c6 100644 --- a/extensions/irc/src/accounts.test.ts +++ b/extensions/irc/src/accounts.test.ts @@ -81,6 +81,32 @@ describe("resolveDefaultIrcAccountId", () => { }); describe("resolveIrcAccount", () => { + it("parses delimited IRC_CHANNELS env values for the default account", () => { + const previousChannels = process.env.IRC_CHANNELS; + process.env.IRC_CHANNELS = "alpha, beta\ngamma; delta"; + + try { + const account = resolveIrcAccount({ + cfg: asConfig({ + channels: { + irc: { + host: "irc.example.com", + nick: "claw", + }, + }, + }), + }); + + expect(account.config.channels).toEqual(["alpha", "beta", "gamma", "delta"]); + } finally { + if (previousChannels === undefined) { + delete process.env.IRC_CHANNELS; + } else { + process.env.IRC_CHANNELS = previousChannels; + } + } + }); + it.runIf(process.platform !== "win32")("rejects symlinked password files", () => { const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-irc-account-")); const passwordFile = path.join(dir, "password.txt"); diff --git a/extensions/irc/src/accounts.ts b/extensions/irc/src/accounts.ts index 13d48fffdb7..9367a7d2123 100644 --- a/extensions/irc/src/accounts.ts +++ b/extensions/irc/src/accounts.ts @@ -3,6 +3,7 @@ import { tryReadSecretFileSync } from "openclaw/plugin-sdk/core"; import { createAccountListHelpers, normalizeResolvedSecretInputString, + parseOptionalDelimitedEntries, } from "openclaw/plugin-sdk/irc"; import type { CoreConfig, IrcAccountConfig, IrcNickServConfig } from "./types.js"; @@ -42,17 +43,6 @@ function parseIntEnv(value?: string): number | undefined { return parsed; } -function parseListEnv(value?: string): string[] | undefined { - if (!value?.trim()) { - return undefined; - } - const parsed = value - .split(/[\n,;]+/g) - .map((entry) => entry.trim()) - .filter(Boolean); - return parsed.length > 0 ? parsed : undefined; -} - const { listAccountIds: listIrcAccountIds, resolveDefaultAccountId: resolveDefaultIrcAccountId } = createAccountListHelpers("irc", { normalizeAccountId }); export { listIrcAccountIds, resolveDefaultIrcAccountId }; @@ -174,7 +164,9 @@ export function resolveIrcAccount(params: { accountId === DEFAULT_ACCOUNT_ID ? parseIntEnv(process.env.IRC_PORT) : undefined; const port = merged.port ?? envPort ?? (tls ? 6697 : 6667); const envChannels = - accountId === DEFAULT_ACCOUNT_ID ? parseListEnv(process.env.IRC_CHANNELS) : undefined; + accountId === DEFAULT_ACCOUNT_ID + ? parseOptionalDelimitedEntries(process.env.IRC_CHANNELS) + : undefined; const host = ( merged.host?.trim() || diff --git a/extensions/irc/src/channel.startup.test.ts b/extensions/irc/src/channel.startup.test.ts index ef972f64c0e..7b4416d1892 100644 --- a/extensions/irc/src/channel.startup.test.ts +++ b/extensions/irc/src/channel.startup.test.ts @@ -1,5 +1,8 @@ import { afterEach, describe, expect, it, vi } from "vitest"; -import { createStartAccountContext } from "../../test-utils/start-account-context.js"; +import { + expectStopPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; import type { ResolvedIrcAccount } from "./accounts.js"; const hoisted = vi.hoisted(() => ({ @@ -41,27 +44,20 @@ describe("ircPlugin gateway.startAccount", () => { config: {} as ResolvedIrcAccount["config"], }; - const abort = new AbortController(); - const task = ircPlugin.gateway!.startAccount!( - createStartAccountContext({ - account, - abortSignal: abort.signal, - }), - ); - let settled = false; - void task.then(() => { - settled = true; + const { abort, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: ircPlugin.gateway!.startAccount!, + account, }); - await vi.waitFor(() => { - expect(hoisted.monitorIrcProvider).toHaveBeenCalledOnce(); + await expectStopPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.monitorIrcProvider).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, + stop, }); - expect(settled).toBe(false); - expect(stop).not.toHaveBeenCalled(); - - abort.abort(); - await task; - - expect(stop).toHaveBeenCalledOnce(); }); }); diff --git a/extensions/irc/src/channel.ts b/extensions/irc/src/channel.ts index c598a9a0ef3..62d64fb0866 100644 --- a/extensions/irc/src/channel.ts +++ b/extensions/irc/src/channel.ts @@ -14,10 +14,10 @@ import { deleteAccountFromConfigSection, getChatChannelMeta, PAIRING_APPROVED_MESSAGE, - runPassiveAccountLifecycle, setAccountEnabledInConfigSection, type ChannelPlugin, } from "openclaw/plugin-sdk/irc"; +import { runStoppablePassiveMonitor } from "../../shared/passive-monitor.js"; import { listIrcAccountIds, resolveDefaultIrcAccountId, @@ -367,7 +367,7 @@ export const ircPlugin: ChannelPlugin = { ctx.log?.info( `[${account.accountId}] starting IRC provider (${account.host}:${account.port}${account.tls ? " tls" : ""})`, ); - await runPassiveAccountLifecycle({ + await runStoppablePassiveMonitor({ abortSignal: ctx.abortSignal, start: async () => await monitorIrcProvider({ @@ -377,9 +377,6 @@ export const ircPlugin: ChannelPlugin = { abortSignal: ctx.abortSignal, statusSink, }), - stop: async (monitor) => { - monitor.stop(); - }, }); }, }, diff --git a/extensions/irc/src/config-schema.ts b/extensions/irc/src/config-schema.ts index aa37b596cd1..8b9625b5bc4 100644 --- a/extensions/irc/src/config-schema.ts +++ b/extensions/irc/src/config-schema.ts @@ -9,6 +9,7 @@ import { requireOpenAllowFrom, } from "openclaw/plugin-sdk/irc"; import { z } from "zod"; +import { requireChannelOpenAllowFrom } from "../../shared/config-schema-helpers.js"; const IrcGroupSchema = z .object({ @@ -69,12 +70,12 @@ export const IrcAccountSchemaBase = z .strict(); export const IrcAccountSchema = IrcAccountSchemaBase.superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "irc", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: 'channels.irc.dmPolicy="open" requires channels.irc.allowFrom to include "*"', + requireOpenAllowFrom, }); }); @@ -82,11 +83,11 @@ export const IrcConfigSchema = IrcAccountSchemaBase.extend({ accounts: z.record(z.string(), IrcAccountSchema.optional()).optional(), defaultAccount: z.string().optional(), }).superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "irc", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: 'channels.irc.dmPolicy="open" requires channels.irc.allowFrom to include "*"', + requireOpenAllowFrom, }); }); diff --git a/extensions/irc/src/monitor.ts b/extensions/irc/src/monitor.ts index e416d95f8eb..2eec74a73d4 100644 --- a/extensions/irc/src/monitor.ts +++ b/extensions/irc/src/monitor.ts @@ -1,4 +1,5 @@ -import { createLoggerBackedRuntime, type RuntimeEnv } from "openclaw/plugin-sdk/irc"; +import type { RuntimeEnv } from "openclaw/plugin-sdk/irc"; +import { resolveLoggerBackedRuntime } from "../../shared/runtime.js"; import { resolveIrcAccount } from "./accounts.js"; import { connectIrcClient, type IrcClient } from "./client.js"; import { buildIrcConnectOptions } from "./connect-options.js"; @@ -39,12 +40,10 @@ export async function monitorIrcProvider(opts: IrcMonitorOptions): Promise<{ sto accountId: opts.accountId, }); - const runtime: RuntimeEnv = - opts.runtime ?? - createLoggerBackedRuntime({ - logger: core.logging.getChildLogger(), - exitError: () => new Error("Runtime exit not available"), - }); + const runtime: RuntimeEnv = resolveLoggerBackedRuntime( + opts.runtime, + core.logging.getChildLogger(), + ); if (!account.configured) { throw new Error( diff --git a/extensions/irc/src/onboarding.test.ts b/extensions/irc/src/onboarding.test.ts index 21f3e978c1a..613503700f3 100644 --- a/extensions/irc/src/onboarding.test.ts +++ b/extensions/irc/src/onboarding.test.ts @@ -1,5 +1,6 @@ import type { RuntimeEnv, WizardPrompter } from "openclaw/plugin-sdk/irc"; import { describe, expect, it, vi } from "vitest"; +import { createRuntimeEnv } from "../../test-utils/runtime-env.js"; import { ircOnboardingAdapter } from "./onboarding.js"; import type { CoreConfig } from "./types.js"; @@ -63,13 +64,7 @@ describe("irc onboarding", () => { }), }); - const runtime: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn((code: number): never => { - throw new Error(`exit ${code}`); - }), - }; + const runtime: RuntimeEnv = createRuntimeEnv(); const result = await ircOnboardingAdapter.configure({ cfg: {} as CoreConfig, diff --git a/extensions/irc/src/send.test.ts b/extensions/irc/src/send.test.ts index df7b5e60ddd..8fbe58e7f22 100644 --- a/extensions/irc/src/send.test.ts +++ b/extensions/irc/src/send.test.ts @@ -1,4 +1,9 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + createSendCfgThreadingRuntime, + expectProvidedCfgSkipsRuntimeLoad, + expectRuntimeCfgFallback, +} from "../../test-utils/send-config.js"; import type { IrcClient } from "./client.js"; import type { CoreConfig } from "./types.js"; @@ -27,20 +32,7 @@ const hoisted = vi.hoisted(() => { }); vi.mock("./runtime.js", () => ({ - getIrcRuntime: () => ({ - config: { - loadConfig: hoisted.loadConfig, - }, - channel: { - text: { - resolveMarkdownTableMode: hoisted.resolveMarkdownTableMode, - convertMarkdownTables: hoisted.convertMarkdownTables, - }, - activity: { - record: hoisted.record, - }, - }, - }), + getIrcRuntime: () => createSendCfgThreadingRuntime(hoisted), })); vi.mock("./accounts.js", () => ({ @@ -87,8 +79,9 @@ describe("sendMessageIrc cfg threading", () => { accountId: "work", }); - expect(hoisted.loadConfig).not.toHaveBeenCalled(); - expect(hoisted.resolveIrcAccount).toHaveBeenCalledWith({ + expectProvidedCfgSkipsRuntimeLoad({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveIrcAccount, cfg: providedCfg, accountId: "work", }); @@ -106,8 +99,9 @@ describe("sendMessageIrc cfg threading", () => { await sendMessageIrc("#ops", "ping", { client }); - expect(hoisted.loadConfig).toHaveBeenCalledTimes(1); - expect(hoisted.resolveIrcAccount).toHaveBeenCalledWith({ + expectRuntimeCfgFallback({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveIrcAccount, cfg: runtimeCfg, accountId: undefined, }); diff --git a/extensions/line/src/channel.ts b/extensions/line/src/channel.ts index ddc612b8fa7..982d7670082 100644 --- a/extensions/line/src/channel.ts +++ b/extensions/line/src/channel.ts @@ -347,6 +347,16 @@ export const linePlugin: ChannelPlugin = { : []; const mediaUrls = payload.mediaUrls ?? (payload.mediaUrl ? [payload.mediaUrl] : []); const shouldSendQuickRepliesInline = chunks.length === 0 && hasQuickReplies; + const sendMediaMessages = async () => { + for (const url of mediaUrls) { + lastResult = await runtime.channel.line.sendMessageLine(to, "", { + verbose: false, + mediaUrl: url, + cfg, + accountId: accountId ?? undefined, + }); + } + }; if (!shouldSendQuickRepliesInline) { if (lineData.flexMessage) { @@ -391,14 +401,7 @@ export const linePlugin: ChannelPlugin = { const sendMediaAfterText = !(hasQuickReplies && chunks.length > 0); if (mediaUrls.length > 0 && !shouldSendQuickRepliesInline && !sendMediaAfterText) { - for (const url of mediaUrls) { - lastResult = await runtime.channel.line.sendMessageLine(to, "", { - verbose: false, - mediaUrl: url, - cfg, - accountId: accountId ?? undefined, - }); - } + await sendMediaMessages(); } if (chunks.length > 0) { @@ -471,14 +474,7 @@ export const linePlugin: ChannelPlugin = { } if (mediaUrls.length > 0 && !shouldSendQuickRepliesInline && sendMediaAfterText) { - for (const url of mediaUrls) { - lastResult = await runtime.channel.line.sendMessageLine(to, "", { - verbose: false, - mediaUrl: url, - cfg, - accountId: accountId ?? undefined, - }); - } + await sendMediaMessages(); } if (lastResult) { diff --git a/extensions/llm-task/src/llm-task-tool.test.ts b/extensions/llm-task/src/llm-task-tool.test.ts index fc9f0e07215..2bf0cb655aa 100644 --- a/extensions/llm-task/src/llm-task-tool.test.ts +++ b/extensions/llm-task/src/llm-task-tool.test.ts @@ -29,6 +29,21 @@ function fakeApi(overrides: any = {}) { }; } +function mockEmbeddedRunJson(payload: unknown) { + // oxlint-disable-next-line typescript/no-explicit-any + (runEmbeddedPiAgent as any).mockResolvedValueOnce({ + meta: {}, + payloads: [{ text: JSON.stringify(payload) }], + }); +} + +async function executeEmbeddedRun(input: Record) { + const tool = createLlmTaskTool(fakeApi()); + await tool.execute("id", input); + // oxlint-disable-next-line typescript/no-explicit-any + return (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; +} + describe("llm-task tool (json-only)", () => { beforeEach(() => vi.clearAllMocks()); @@ -96,42 +111,25 @@ describe("llm-task tool (json-only)", () => { }); it("passes provider/model overrides to embedded runner", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ + prompt: "x", + provider: "anthropic", + model: "claude-4-sonnet", }); - const tool = createLlmTaskTool(fakeApi()); - await tool.execute("id", { prompt: "x", provider: "anthropic", model: "claude-4-sonnet" }); - // oxlint-disable-next-line typescript/no-explicit-any - const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; expect(call.provider).toBe("anthropic"); expect(call.model).toBe("claude-4-sonnet"); }); it("passes thinking override to embedded runner", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], - }); - const tool = createLlmTaskTool(fakeApi()); - await tool.execute("id", { prompt: "x", thinking: "high" }); - // oxlint-disable-next-line typescript/no-explicit-any - const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x", thinking: "high" }); expect(call.thinkLevel).toBe("high"); }); it("normalizes thinking aliases", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], - }); - const tool = createLlmTaskTool(fakeApi()); - await tool.execute("id", { prompt: "x", thinking: "on" }); - // oxlint-disable-next-line typescript/no-explicit-any - const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x", thinking: "on" }); expect(call.thinkLevel).toBe("low"); }); @@ -150,24 +148,13 @@ describe("llm-task tool (json-only)", () => { }); it("does not pass thinkLevel when thinking is omitted", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], - }); - const tool = createLlmTaskTool(fakeApi()); - await tool.execute("id", { prompt: "x" }); - // oxlint-disable-next-line typescript/no-explicit-any - const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x" }); expect(call.thinkLevel).toBeUndefined(); }); it("enforces allowedModels", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], - }); + mockEmbeddedRunJson({ ok: true }); const tool = createLlmTaskTool( fakeApi({ pluginConfig: { allowedModels: ["openai-codex/gpt-5.2"] } }), ); @@ -177,15 +164,8 @@ describe("llm-task tool (json-only)", () => { }); it("disables tools for embedded run", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], - }); - const tool = createLlmTaskTool(fakeApi()); - await tool.execute("id", { prompt: "x" }); - // oxlint-disable-next-line typescript/no-explicit-any - const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x" }); expect(call.disableTools).toBe(true); }); }); diff --git a/extensions/lobster/src/windows-spawn.test.ts b/extensions/lobster/src/windows-spawn.test.ts index e3d791e36e4..48e6ddc9a54 100644 --- a/extensions/lobster/src/windows-spawn.test.ts +++ b/extensions/lobster/src/windows-spawn.test.ts @@ -14,6 +14,19 @@ describe("resolveWindowsLobsterSpawn", () => { let tempDir = ""; const originalProcessState = snapshotPlatformPathEnv(); + async function expectUnwrappedShim(params: { + scriptPath: string; + shimPath: string; + shimLine: string; + }) { + await createWindowsCmdShimFixture(params); + + const target = resolveWindowsLobsterSpawn(params.shimPath, ["run", "noop"], process.env); + expect(target.command).toBe(process.execPath); + expect(target.argv).toEqual([params.scriptPath, "run", "noop"]); + expect(target.windowsHide).toBe(true); + } + beforeEach(async () => { tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-lobster-win-spawn-")); setProcessPlatform("win32"); @@ -30,31 +43,21 @@ describe("resolveWindowsLobsterSpawn", () => { it("unwraps cmd shim with %dp0% token", async () => { const scriptPath = path.join(tempDir, "shim-dist", "lobster-cli.cjs"); const shimPath = path.join(tempDir, "shim", "lobster.cmd"); - await createWindowsCmdShimFixture({ + await expectUnwrappedShim({ shimPath, scriptPath, shimLine: `"%dp0%\\..\\shim-dist\\lobster-cli.cjs" %*`, }); - - const target = resolveWindowsLobsterSpawn(shimPath, ["run", "noop"], process.env); - expect(target.command).toBe(process.execPath); - expect(target.argv).toEqual([scriptPath, "run", "noop"]); - expect(target.windowsHide).toBe(true); }); it("unwraps cmd shim with %~dp0% token", async () => { const scriptPath = path.join(tempDir, "shim-dist", "lobster-cli.cjs"); const shimPath = path.join(tempDir, "shim", "lobster.cmd"); - await createWindowsCmdShimFixture({ + await expectUnwrappedShim({ shimPath, scriptPath, shimLine: `"%~dp0%\\..\\shim-dist\\lobster-cli.cjs" %*`, }); - - const target = resolveWindowsLobsterSpawn(shimPath, ["run", "noop"], process.env); - expect(target.command).toBe(process.execPath); - expect(target.argv).toEqual([scriptPath, "run", "noop"]); - expect(target.windowsHide).toBe(true); }); it("ignores node.exe shim entries and picks lobster script", async () => { diff --git a/extensions/matrix/package.json b/extensions/matrix/package.json index 764e1795e1a..6fd32f7d951 100644 --- a/extensions/matrix/package.json +++ b/extensions/matrix/package.json @@ -4,7 +4,7 @@ "description": "OpenClaw Matrix channel plugin", "type": "module", "dependencies": { - "@mariozechner/pi-agent-core": "0.57.1", + "@mariozechner/pi-agent-core": "0.58.0", "@matrix-org/matrix-sdk-crypto-nodejs": "^0.4.0", "@vector-im/matrix-bot-sdk": "0.8.0-element.3", "markdown-it": "14.1.1", diff --git a/extensions/matrix/src/channel.directory.test.ts b/extensions/matrix/src/channel.directory.test.ts index 51c781c0b75..2c5bc9533f3 100644 --- a/extensions/matrix/src/channel.directory.test.ts +++ b/extensions/matrix/src/channel.directory.test.ts @@ -1,36 +1,17 @@ import type { PluginRuntime, RuntimeEnv } from "openclaw/plugin-sdk/matrix"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import { createRuntimeEnv } from "../../test-utils/runtime-env.js"; import { matrixPlugin } from "./channel.js"; import { setMatrixRuntime } from "./runtime.js"; +import { createMatrixBotSdkMock } from "./test-mocks.js"; import type { CoreConfig } from "./types.js"; -vi.mock("@vector-im/matrix-bot-sdk", () => ({ - ConsoleLogger: class { - trace = vi.fn(); - debug = vi.fn(); - info = vi.fn(); - warn = vi.fn(); - error = vi.fn(); - }, - MatrixClient: class {}, - LogService: { - setLogger: vi.fn(), - warn: vi.fn(), - info: vi.fn(), - debug: vi.fn(), - }, - SimpleFsStorageProvider: class {}, - RustSdkCryptoStorageProvider: class {}, -})); +vi.mock("@vector-im/matrix-bot-sdk", () => + createMatrixBotSdkMock({ includeVerboseLogService: true }), +); describe("matrix directory", () => { - const runtimeEnv: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn((code: number): never => { - throw new Error(`exit ${code}`); - }), - }; + const runtimeEnv: RuntimeEnv = createRuntimeEnv(); beforeEach(() => { setMatrixRuntime({ diff --git a/extensions/matrix/src/channel.ts b/extensions/matrix/src/channel.ts index a024b3f3e8a..bad3322f8d0 100644 --- a/extensions/matrix/src/channel.ts +++ b/extensions/matrix/src/channel.ts @@ -15,6 +15,7 @@ import { PAIRING_APPROVED_MESSAGE, type ChannelPlugin, } from "openclaw/plugin-sdk/matrix"; +import { buildTrafficStatusSummary } from "../../shared/channel-status-summary.js"; import { matrixMessageActions } from "./actions.js"; import { MatrixConfigSchema } from "./config-schema.js"; import { listMatrixDirectoryGroupsLive, listMatrixDirectoryPeersLive } from "./directory-live.js"; @@ -410,8 +411,7 @@ export const matrixPlugin: ChannelPlugin = { lastError: runtime?.lastError ?? null, probe, lastProbeAt: runtime?.lastProbeAt ?? null, - lastInboundAt: runtime?.lastInboundAt ?? null, - lastOutboundAt: runtime?.lastOutboundAt ?? null, + ...buildTrafficStatusSummary(runtime), }), }, gateway: { diff --git a/extensions/matrix/src/matrix/monitor/allowlist.ts b/extensions/matrix/src/matrix/monitor/allowlist.ts index 326360cade5..a48fe63bdb0 100644 --- a/extensions/matrix/src/matrix/monitor/allowlist.ts +++ b/extensions/matrix/src/matrix/monitor/allowlist.ts @@ -1,7 +1,7 @@ import { compileAllowlist, normalizeStringEntries, - resolveAllowlistCandidates, + resolveCompiledAllowlistMatch, type AllowlistMatch, } from "openclaw/plugin-sdk/matrix"; @@ -77,19 +77,13 @@ export function resolveMatrixAllowListMatch(params: { userId?: string; }): MatrixAllowListMatch { const compiledAllowList = compileAllowlist(params.allowList); - if (compiledAllowList.set.size === 0) { - return { allowed: false }; - } - if (compiledAllowList.wildcard) { - return { allowed: true, matchKey: "*", matchSource: "wildcard" }; - } const userId = normalizeMatrixUser(params.userId); const candidates: Array<{ value?: string; source: MatrixAllowListSource }> = [ { value: userId, source: "id" }, { value: userId ? `matrix:${userId}` : "", source: "prefixed-id" }, { value: userId ? `user:${userId}` : "", source: "prefixed-user" }, ]; - return resolveAllowlistCandidates({ + return resolveCompiledAllowlistMatch({ compiledAllowlist: compiledAllowList, candidates, }); diff --git a/extensions/matrix/src/matrix/monitor/direct.test.ts b/extensions/matrix/src/matrix/monitor/direct.test.ts index 298b3996837..6688f76e649 100644 --- a/extensions/matrix/src/matrix/monitor/direct.test.ts +++ b/extensions/matrix/src/matrix/monitor/direct.test.ts @@ -7,6 +7,8 @@ import { createDirectRoomTracker } from "./direct.js"; type StateEvent = Record; type DmMap = Record; +const brokenDmRoomId = "!broken-dm:example.org"; +const defaultBrokenDmMembers = ["@alice:example.org", "@bot:example.org"]; function createMockClient(opts: { dmRooms?: DmMap; @@ -50,6 +52,21 @@ function createMockClient(opts: { }; } +function createBrokenDmClient(roomNameEvent?: StateEvent) { + return createMockClient({ + dmRooms: {}, + membersByRoom: { + [brokenDmRoomId]: defaultBrokenDmMembers, + }, + stateEvents: { + // is_direct not set on either member (e.g. Continuwuity bug) + [`${brokenDmRoomId}|m.room.member|@alice:example.org`]: {}, + [`${brokenDmRoomId}|m.room.member|@bot:example.org`]: {}, + ...(roomNameEvent ? { [`${brokenDmRoomId}|m.room.name|`]: roomNameEvent } : {}), + }, + }); +} + // --------------------------------------------------------------------------- // Tests -- isDirectMessage // --------------------------------------------------------------------------- @@ -131,22 +148,11 @@ describe("createDirectRoomTracker", () => { describe("conservative fallback (memberCount + room name)", () => { it("returns true for 2-member room WITHOUT a room name (broken flags)", async () => { - const client = createMockClient({ - dmRooms: {}, - membersByRoom: { - "!broken-dm:example.org": ["@alice:example.org", "@bot:example.org"], - }, - stateEvents: { - // is_direct not set on either member (e.g. Continuwuity bug) - "!broken-dm:example.org|m.room.member|@alice:example.org": {}, - "!broken-dm:example.org|m.room.member|@bot:example.org": {}, - // No m.room.name -> getRoomStateEvent will throw (event not found) - }, - }); + const client = createBrokenDmClient(); const tracker = createDirectRoomTracker(client as never); const result = await tracker.isDirectMessage({ - roomId: "!broken-dm:example.org", + roomId: brokenDmRoomId, senderId: "@alice:example.org", }); @@ -154,21 +160,11 @@ describe("createDirectRoomTracker", () => { }); it("returns true for 2-member room with empty room name", async () => { - const client = createMockClient({ - dmRooms: {}, - membersByRoom: { - "!broken-dm:example.org": ["@alice:example.org", "@bot:example.org"], - }, - stateEvents: { - "!broken-dm:example.org|m.room.member|@alice:example.org": {}, - "!broken-dm:example.org|m.room.member|@bot:example.org": {}, - "!broken-dm:example.org|m.room.name|": { name: "" }, - }, - }); + const client = createBrokenDmClient({ name: "" }); const tracker = createDirectRoomTracker(client as never); const result = await tracker.isDirectMessage({ - roomId: "!broken-dm:example.org", + roomId: brokenDmRoomId, senderId: "@alice:example.org", }); diff --git a/extensions/matrix/src/matrix/monitor/events.test.ts b/extensions/matrix/src/matrix/monitor/events.test.ts index 9179cf69ee3..6dac0db59fc 100644 --- a/extensions/matrix/src/matrix/monitor/events.test.ts +++ b/extensions/matrix/src/matrix/monitor/events.test.ts @@ -12,6 +12,19 @@ vi.mock("../send.js", () => ({ })); describe("registerMatrixMonitorEvents", () => { + const roomId = "!room:example.org"; + + function makeEvent(overrides: Partial): MatrixRawEvent { + return { + event_id: "$event", + sender: "@alice:example.org", + type: "m.room.message", + origin_server_ts: 0, + content: {}, + ...overrides, + }; + } + beforeEach(() => { sendReadReceiptMatrixMock.mockClear(); }); @@ -53,12 +66,22 @@ describe("registerMatrixMonitorEvents", () => { return { client, getUserId, onRoomMessage, roomMessageHandler, logVerboseMessage }; } + async function expectForwardedWithoutReadReceipt(event: MatrixRawEvent) { + const { onRoomMessage, roomMessageHandler } = createHarness(); + + roomMessageHandler(roomId, event); + await vi.waitFor(() => { + expect(onRoomMessage).toHaveBeenCalledWith(roomId, event); + }); + expect(sendReadReceiptMatrixMock).not.toHaveBeenCalled(); + } + it("sends read receipt immediately for non-self messages", async () => { const { client, onRoomMessage, roomMessageHandler } = createHarness(); - const event = { + const event = makeEvent({ event_id: "$e1", sender: "@alice:example.org", - } as MatrixRawEvent; + }); roomMessageHandler("!room:example.org", event); @@ -69,36 +92,27 @@ describe("registerMatrixMonitorEvents", () => { }); it("does not send read receipts for self messages", async () => { - const { onRoomMessage, roomMessageHandler } = createHarness(); - const event = { - event_id: "$e2", - sender: "@bot:example.org", - } as MatrixRawEvent; - - roomMessageHandler("!room:example.org", event); - await vi.waitFor(() => { - expect(onRoomMessage).toHaveBeenCalledWith("!room:example.org", event); - }); - expect(sendReadReceiptMatrixMock).not.toHaveBeenCalled(); + await expectForwardedWithoutReadReceipt( + makeEvent({ + event_id: "$e2", + sender: "@bot:example.org", + }), + ); }); it("skips receipt when message lacks sender or event id", async () => { - const { onRoomMessage, roomMessageHandler } = createHarness(); - const event = { - sender: "@alice:example.org", - } as MatrixRawEvent; - - roomMessageHandler("!room:example.org", event); - await vi.waitFor(() => { - expect(onRoomMessage).toHaveBeenCalledWith("!room:example.org", event); - }); - expect(sendReadReceiptMatrixMock).not.toHaveBeenCalled(); + await expectForwardedWithoutReadReceipt( + makeEvent({ + sender: "@alice:example.org", + event_id: "", + }), + ); }); it("caches self user id across messages", async () => { const { getUserId, roomMessageHandler } = createHarness(); - const first = { event_id: "$e3", sender: "@alice:example.org" } as MatrixRawEvent; - const second = { event_id: "$e4", sender: "@bob:example.org" } as MatrixRawEvent; + const first = makeEvent({ event_id: "$e3", sender: "@alice:example.org" }); + const second = makeEvent({ event_id: "$e4", sender: "@bob:example.org" }); roomMessageHandler("!room:example.org", first); roomMessageHandler("!room:example.org", second); @@ -112,7 +126,7 @@ describe("registerMatrixMonitorEvents", () => { it("logs and continues when sending read receipt fails", async () => { sendReadReceiptMatrixMock.mockRejectedValueOnce(new Error("network boom")); const { roomMessageHandler, onRoomMessage, logVerboseMessage } = createHarness(); - const event = { event_id: "$e5", sender: "@alice:example.org" } as MatrixRawEvent; + const event = makeEvent({ event_id: "$e5", sender: "@alice:example.org" }); roomMessageHandler("!room:example.org", event); @@ -128,7 +142,7 @@ describe("registerMatrixMonitorEvents", () => { const { roomMessageHandler, onRoomMessage, getUserId } = createHarness({ getUserId: vi.fn().mockRejectedValue(new Error("cannot resolve self")), }); - const event = { event_id: "$e6", sender: "@alice:example.org" } as MatrixRawEvent; + const event = makeEvent({ event_id: "$e6", sender: "@alice:example.org" }); roomMessageHandler("!room:example.org", event); diff --git a/extensions/matrix/src/matrix/monitor/handler.ts b/extensions/matrix/src/matrix/monitor/handler.ts index 0adc9fa2886..22ee16275cf 100644 --- a/extensions/matrix/src/matrix/monitor/handler.ts +++ b/extensions/matrix/src/matrix/monitor/handler.ts @@ -686,6 +686,7 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam channel: "matrix", accountId: route.accountId, }); + const humanDelay = core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId); const typingCallbacks = createTypingCallbacks({ start: () => sendTypingMatrix(roomId, true, undefined, client), stop: () => sendTypingMatrix(roomId, false, undefined, client), @@ -711,7 +712,7 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam const { dispatcher, replyOptions, markDispatchIdle } = core.channel.reply.createReplyDispatcherWithTyping({ ...prefixOptions, - humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId), + humanDelay, typingCallbacks, deliver: async (payload) => { await deliverMatrixReplies({ diff --git a/extensions/matrix/src/matrix/send-queue.test.ts b/extensions/matrix/src/matrix/send-queue.test.ts index aa4765eaab3..240dd8ee71d 100644 --- a/extensions/matrix/src/matrix/send-queue.test.ts +++ b/extensions/matrix/src/matrix/send-queue.test.ts @@ -1,16 +1,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { createDeferred } from "../../../shared/deferred.js"; import { DEFAULT_SEND_GAP_MS, enqueueSend } from "./send-queue.js"; -function deferred() { - let resolve!: (value: T | PromiseLike) => void; - let reject!: (reason?: unknown) => void; - const promise = new Promise((res, rej) => { - resolve = res; - reject = rej; - }); - return { promise, resolve, reject }; -} - describe("enqueueSend", () => { beforeEach(() => { vi.useFakeTimers(); @@ -21,7 +12,7 @@ describe("enqueueSend", () => { }); it("serializes sends per room", async () => { - const gate = deferred(); + const gate = createDeferred(); const events: string[] = []; const first = enqueueSend("!room:example.org", async () => { @@ -91,7 +82,7 @@ describe("enqueueSend", () => { }); it("continues queued work when the head task fails", async () => { - const gate = deferred(); + const gate = createDeferred(); const events: string[] = []; const first = enqueueSend("!room:example.org", async () => { diff --git a/extensions/matrix/src/matrix/send.test.ts b/extensions/matrix/src/matrix/send.test.ts index dabe915b388..2bf21023909 100644 --- a/extensions/matrix/src/matrix/send.test.ts +++ b/extensions/matrix/src/matrix/send.test.ts @@ -1,6 +1,7 @@ import type { PluginRuntime } from "openclaw/plugin-sdk/matrix"; import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; import { setMatrixRuntime } from "../runtime.js"; +import { createMatrixBotSdkMock } from "../test-mocks.js"; vi.mock("music-metadata", () => ({ // `resolveMediaDurationMs` lazily imports `music-metadata`; in tests we don't @@ -8,21 +9,13 @@ vi.mock("music-metadata", () => ({ parseBuffer: vi.fn().mockResolvedValue({ format: {} }), })); -vi.mock("@vector-im/matrix-bot-sdk", () => ({ - ConsoleLogger: class { - trace = vi.fn(); - debug = vi.fn(); - info = vi.fn(); - warn = vi.fn(); - error = vi.fn(); - }, - LogService: { - setLogger: vi.fn(), - }, - MatrixClient: vi.fn(), - SimpleFsStorageProvider: vi.fn(), - RustSdkCryptoStorageProvider: vi.fn(), -})); +vi.mock("@vector-im/matrix-bot-sdk", () => + createMatrixBotSdkMock({ + matrixClient: vi.fn(), + simpleFsStorageProvider: vi.fn(), + rustSdkCryptoStorageProvider: vi.fn(), + }), +); vi.mock("./send-queue.js", () => ({ enqueueSend: async (_roomId: string, fn: () => Promise) => await fn(), diff --git a/extensions/matrix/src/resolve-targets.test.ts b/extensions/matrix/src/resolve-targets.test.ts index 10dff313a2e..02a5088e8ae 100644 --- a/extensions/matrix/src/resolve-targets.test.ts +++ b/extensions/matrix/src/resolve-targets.test.ts @@ -8,6 +8,15 @@ vi.mock("./directory-live.js", () => ({ listMatrixDirectoryGroupsLive: vi.fn(), })); +async function resolveUserTarget(input = "Alice") { + const [result] = await resolveMatrixTargets({ + cfg: {}, + inputs: [input], + kind: "user", + }); + return result; +} + describe("resolveMatrixTargets (users)", () => { beforeEach(() => { vi.mocked(listMatrixDirectoryPeersLive).mockReset(); @@ -20,11 +29,7 @@ describe("resolveMatrixTargets (users)", () => { ]; vi.mocked(listMatrixDirectoryPeersLive).mockResolvedValue(matches); - const [result] = await resolveMatrixTargets({ - cfg: {}, - inputs: ["Alice"], - kind: "user", - }); + const result = await resolveUserTarget(); expect(result?.resolved).toBe(true); expect(result?.id).toBe("@alice:example.org"); @@ -37,11 +42,7 @@ describe("resolveMatrixTargets (users)", () => { ]; vi.mocked(listMatrixDirectoryPeersLive).mockResolvedValue(matches); - const [result] = await resolveMatrixTargets({ - cfg: {}, - inputs: ["Alice"], - kind: "user", - }); + const result = await resolveUserTarget(); expect(result?.resolved).toBe(false); expect(result?.note).toMatch(/use full Matrix ID/i); diff --git a/extensions/matrix/src/test-mocks.ts b/extensions/matrix/src/test-mocks.ts new file mode 100644 index 00000000000..687b94459ea --- /dev/null +++ b/extensions/matrix/src/test-mocks.ts @@ -0,0 +1,53 @@ +import type { Mock } from "vitest"; +import { vi } from "vitest"; + +type MatrixBotSdkMockParams = { + matrixClient?: unknown; + simpleFsStorageProvider?: unknown; + rustSdkCryptoStorageProvider?: unknown; + includeVerboseLogService?: boolean; +}; + +type MatrixBotSdkMock = { + ConsoleLogger: new () => { + trace: Mock<() => void>; + debug: Mock<() => void>; + info: Mock<() => void>; + warn: Mock<() => void>; + error: Mock<() => void>; + }; + MatrixClient: unknown; + LogService: { + setLogger: Mock<() => void>; + warn?: Mock<() => void>; + info?: Mock<() => void>; + debug?: Mock<() => void>; + }; + SimpleFsStorageProvider: unknown; + RustSdkCryptoStorageProvider: unknown; +}; + +export function createMatrixBotSdkMock(params: MatrixBotSdkMockParams = {}): MatrixBotSdkMock { + return { + ConsoleLogger: class { + trace = vi.fn(); + debug = vi.fn(); + info = vi.fn(); + warn = vi.fn(); + error = vi.fn(); + }, + MatrixClient: params.matrixClient ?? class {}, + LogService: { + setLogger: vi.fn(), + ...(params.includeVerboseLogService + ? { + warn: vi.fn(), + info: vi.fn(), + debug: vi.fn(), + } + : {}), + }, + SimpleFsStorageProvider: params.simpleFsStorageProvider ?? class {}, + RustSdkCryptoStorageProvider: params.rustSdkCryptoStorageProvider ?? class {}, + }; +} diff --git a/extensions/mattermost/src/channel.ts b/extensions/mattermost/src/channel.ts index f8116e127b3..c872b8d5085 100644 --- a/extensions/mattermost/src/channel.ts +++ b/extensions/mattermost/src/channel.ts @@ -21,6 +21,7 @@ import { type ChannelMessageActionName, type ChannelPlugin, } from "openclaw/plugin-sdk/mattermost"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { MattermostConfigSchema } from "./config-schema.js"; import { resolveMattermostGroupRequireMention } from "./group-mentions.js"; import { @@ -419,18 +420,12 @@ export const mattermostPlugin: ChannelPlugin = { lastStopAt: null, lastError: null, }, - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - botTokenSource: snapshot.botTokenSource ?? "none", - running: snapshot.running ?? false, - connected: snapshot.connected ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - baseUrl: snapshot.baseUrl ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + botTokenSource: snapshot.botTokenSource ?? "none", + connected: snapshot.connected ?? false, + baseUrl: snapshot.baseUrl ?? null, + }), probeAccount: async ({ account, timeoutMs }) => { const token = account.botToken?.trim(); const baseUrl = account.baseUrl?.trim(); diff --git a/extensions/mattermost/src/config-schema.ts b/extensions/mattermost/src/config-schema.ts index 43dd7ede8d2..16ee615454c 100644 --- a/extensions/mattermost/src/config-schema.ts +++ b/extensions/mattermost/src/config-schema.ts @@ -6,6 +6,7 @@ import { requireOpenAllowFrom, } from "openclaw/plugin-sdk/mattermost"; import { z } from "zod"; +import { requireChannelOpenAllowFrom } from "../../shared/config-schema-helpers.js"; import { buildSecretInputSchema } from "./secret-input.js"; const MattermostSlashCommandsSchema = z @@ -61,13 +62,12 @@ const MattermostAccountSchemaBase = z .strict(); const MattermostAccountSchema = MattermostAccountSchemaBase.superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "mattermost", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.mattermost.dmPolicy="open" requires channels.mattermost.allowFrom to include "*"', + requireOpenAllowFrom, }); }); @@ -75,12 +75,11 @@ export const MattermostConfigSchema = MattermostAccountSchemaBase.extend({ accounts: z.record(z.string(), MattermostAccountSchema.optional()).optional(), defaultAccount: z.string().optional(), }).superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "mattermost", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.mattermost.dmPolicy="open" requires channels.mattermost.allowFrom to include "*"', + requireOpenAllowFrom, }); }); diff --git a/extensions/mattermost/src/mattermost/client.test.ts b/extensions/mattermost/src/mattermost/client.test.ts index 3d325dda527..7d49ad3c573 100644 --- a/extensions/mattermost/src/mattermost/client.test.ts +++ b/extensions/mattermost/src/mattermost/client.test.ts @@ -27,6 +27,28 @@ function createMockFetch(response?: { status?: number; body?: unknown; contentTy return { mockFetch: mockFetch as unknown as typeof fetch, calls }; } +function createTestClient(response?: { status?: number; body?: unknown; contentType?: string }) { + const { mockFetch, calls } = createMockFetch(response); + const client = createMattermostClient({ + baseUrl: "http://localhost:8065", + botToken: "tok", + fetchImpl: mockFetch, + }); + return { client, calls }; +} + +async function updatePostAndCapture( + update: Parameters[2], + response?: { status?: number; body?: unknown; contentType?: string }, +) { + const { client, calls } = createTestClient(response ?? { body: { id: "post1" } }); + await updateMattermostPost(client, "post1", update); + return { + calls, + body: JSON.parse(calls[0].init?.body as string) as Record, + }; +} + // ── normalizeMattermostBaseUrl ──────────────────────────────────────── describe("normalizeMattermostBaseUrl", () => { @@ -229,68 +251,38 @@ describe("createMattermostPost", () => { describe("updateMattermostPost", () => { it("sends PUT to /posts/{id}", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { message: "Updated" }); + const { calls } = await updatePostAndCapture({ message: "Updated" }); expect(calls[0].url).toContain("/posts/post1"); expect(calls[0].init?.method).toBe("PUT"); }); it("includes post id in the body", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { message: "Updated" }); - - const body = JSON.parse(calls[0].init?.body as string); + const { body } = await updatePostAndCapture({ message: "Updated" }); expect(body.id).toBe("post1"); expect(body.message).toBe("Updated"); }); it("includes props for button completion updates", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { + const { body } = await updatePostAndCapture({ message: "Original message", props: { attachments: [{ text: "✓ **do_now** selected by @tony" }], }, }); - - const body = JSON.parse(calls[0].init?.body as string); expect(body.message).toBe("Original message"); - expect(body.props.attachments[0].text).toContain("✓"); - expect(body.props.attachments[0].text).toContain("do_now"); + expect(body.props).toMatchObject({ + attachments: [{ text: expect.stringContaining("✓") }], + }); + expect(body.props).toMatchObject({ + attachments: [{ text: expect.stringContaining("do_now") }], + }); }); it("omits message when not provided", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { + const { body } = await updatePostAndCapture({ props: { attachments: [] }, }); - - const body = JSON.parse(calls[0].init?.body as string); expect(body.id).toBe("post1"); expect(body.message).toBeUndefined(); expect(body.props).toEqual({ attachments: [] }); diff --git a/extensions/mattermost/src/mattermost/interactions.test.ts b/extensions/mattermost/src/mattermost/interactions.test.ts index 3f52982cc52..62c7bdb757f 100644 --- a/extensions/mattermost/src/mattermost/interactions.test.ts +++ b/extensions/mattermost/src/mattermost/interactions.test.ts @@ -496,6 +496,104 @@ describe("createMattermostInteractionHandler", () => { return res as unknown as ServerResponse & { headers: Record; body: string }; } + function createActionContext(actionId = "approve", channelId = "chan-1") { + const context = { action_id: actionId, __openclaw_channel_id: channelId }; + return { context, token: generateInteractionToken(context, "acct") }; + } + + function createInteractionBody(params: { + context: Record; + token: string; + channelId?: string; + postId?: string; + userId?: string; + userName?: string; + }) { + return { + user_id: params.userId ?? "user-1", + ...(params.userName ? { user_name: params.userName } : {}), + channel_id: params.channelId ?? "chan-1", + post_id: params.postId ?? "post-1", + context: { ...params.context, _token: params.token }, + }; + } + + async function runHandler( + handler: ReturnType, + params: { + body: unknown; + remoteAddress?: string; + headers?: Record; + }, + ) { + const req = createReq({ + remoteAddress: params.remoteAddress, + headers: params.headers, + body: params.body, + }); + const res = createRes(); + await handler(req, res); + return res; + } + + function expectForbiddenResponse( + res: ServerResponse & { body: string }, + expectedMessage: string, + ) { + expect(res.statusCode).toBe(403); + expect(res.body).toContain(expectedMessage); + } + + function expectSuccessfulApprovalUpdate( + res: ServerResponse & { body: string }, + requestLog?: Array<{ path: string; method?: string }>, + ) { + expect(res.statusCode).toBe(200); + expect(res.body).toBe("{}"); + if (requestLog) { + expect(requestLog).toEqual([ + { path: "/posts/post-1", method: undefined }, + { path: "/posts/post-1", method: "PUT" }, + ]); + } + } + + function createActionPost(params?: { + actionId?: string; + actionName?: string; + channelId?: string; + rootId?: string; + }): MattermostPost { + return { + id: "post-1", + channel_id: params?.channelId ?? "chan-1", + ...(params?.rootId ? { root_id: params.rootId } : {}), + message: "Choose", + props: { + attachments: [ + { + actions: [ + { + id: params?.actionId ?? "approve", + name: params?.actionName ?? "Approve", + }, + ], + }, + ], + }, + }; + } + + function createUnusedInteractionHandler() { + return createMattermostInteractionHandler({ + client: { + request: async () => ({ message: "unused" }), + } as unknown as MattermostClient, + botUserId: "bot", + accountId: "acct", + }); + } + async function runApproveInteraction(params?: { actionName?: string; allowedSourceIps?: string[]; @@ -503,8 +601,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress?: string; headers?: Record; }) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const requestLog: Array<{ path: string; method?: string }> = []; const handler = createMattermostInteractionHandler({ client: { @@ -513,15 +610,7 @@ describe("createMattermostInteractionHandler", () => { if (init?.method === "PUT") { return { id: "post-1" }; } - return { - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [ - { actions: [{ id: "approve", name: params?.actionName ?? "Approve" }] }, - ], - }, - }; + return createActionPost({ actionName: params?.actionName }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -530,50 +619,27 @@ describe("createMattermostInteractionHandler", () => { trustedProxies: params?.trustedProxies, }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: params?.remoteAddress, headers: params?.headers, - body: { - user_id: "user-1", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token, userName: "alice" }), }); - const res = createRes(); - await handler(req, res); return { res, requestLog }; } async function runInvalidActionRequest(actionId: string) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: actionId, name: actionId }] }], - }, - }), + request: async () => createActionPost({ actionId, actionName: actionId }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + return await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - await handler(req, res); - return res; } it("accepts callback requests from an allowlisted source IP", async () => { @@ -582,12 +648,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress: "198.51.100.8", }); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expectSuccessfulApprovalUpdate(res, requestLog); }); it("accepts forwarded Mattermost source IPs from a trusted proxy", async () => { @@ -603,8 +664,7 @@ describe("createMattermostInteractionHandler", () => { }); it("rejects callback requests from non-allowlisted source IPs", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { request: async () => { @@ -616,33 +676,17 @@ describe("createMattermostInteractionHandler", () => { allowedSourceIps: ["127.0.0.1"], }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: "198.51.100.8", - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Forbidden origin"); + expectForbiddenResponse(res, "Forbidden origin"); }); it("rejects requests with an invalid interaction token", async () => { - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", - }); + const handler = createUnusedInteractionHandler(); - const req = createReq({ + const res = await runHandler(handler, { body: { user_id: "user-1", channel_id: "chan-1", @@ -650,72 +694,33 @@ describe("createMattermostInteractionHandler", () => { context: { action_id: "approve", _token: "deadbeef" }, }, }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Invalid token"); + expectForbiddenResponse(res, "Invalid token"); }); it("rejects requests when the signed channel does not match the callback payload", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", + const { context, token } = createActionContext(); + const handler = createUnusedInteractionHandler(); + + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, channelId: "chan-2" }), }); - - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-2", - post_id: "post-1", - context: { ...context, _token: token }, - }, - }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Channel mismatch"); + expectForbiddenResponse(res, "Channel mismatch"); }); it("rejects requests when the fetched post does not belong to the callback channel", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-9", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "approve", name: "Approve" }] }], - }, - }), + request: async () => createActionPost({ channelId: "chan-9" }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Post/channel mismatch"); + expectForbiddenResponse(res, "Post/channel mismatch"); }); it("rejects requests when the action is not present on the fetched post", async () => { @@ -730,12 +735,7 @@ describe("createMattermostInteractionHandler", () => { actionName: "approve", }); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expectSuccessfulApprovalUpdate(res, requestLog); }); it("forwards fetched post threading metadata to session and button callbacks", async () => { @@ -745,19 +745,10 @@ describe("createMattermostInteractionHandler", () => { enqueueSystemEvent, }, } as unknown as Parameters[0]); - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const resolveSessionKey = vi.fn().mockResolvedValue("session:thread:root-9"); const dispatchButtonClick = vi.fn(); - const fetchedPost: MattermostPost = { - id: "post-1", - channel_id: "chan-1", - root_id: "root-9", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "approve", name: "Approve" }] }], - }, - }; + const fetchedPost = createActionPost({ rootId: "root-9" }); const handler = createMattermostInteractionHandler({ client: { request: async (_path: string, init?: { method?: string }) => @@ -769,19 +760,9 @@ describe("createMattermostInteractionHandler", () => { dispatchButtonClick, }); - const req = createReq({ - body: { - user_id: "user-1", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, userName: "alice" }), }); - const res = createRes(); - - await handler(req, res); - expect(res.statusCode).toBe(200); expect(resolveSessionKey).toHaveBeenCalledWith({ channelId: "chan-1", @@ -803,8 +784,7 @@ describe("createMattermostInteractionHandler", () => { }); it("lets a custom interaction handler short-circuit generic completion updates", async () => { - const context = { action_id: "mdlprov", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext("mdlprov"); const requestLog: Array<{ path: string; method?: string }> = []; const handleInteraction = vi.fn().mockResolvedValue({ ephemeral_text: "Only the original requester can use this picker.", @@ -814,14 +794,10 @@ describe("createMattermostInteractionHandler", () => { client: { request: async (path: string, init?: { method?: string }) => { requestLog.push({ path, method: init?.method }); - return { - id: "post-1", - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "mdlprov", name: "Browse providers" }] }], - }, - }; + return createActionPost({ + actionId: "mdlprov", + actionName: "Browse providers", + }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -830,18 +806,14 @@ describe("createMattermostInteractionHandler", () => { dispatchButtonClick, }); - const req = createReq({ - body: { - user_id: "user-2", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ + context, + token, + userId: "user-2", + userName: "alice", + }), }); - const res = createRes(); - - await handler(req, res); expect(res.statusCode).toBe(200); expect(res.body).toBe( diff --git a/extensions/mattermost/src/mattermost/model-picker.test.ts b/extensions/mattermost/src/mattermost/model-picker.test.ts index b448339523e..cebafc4a1bc 100644 --- a/extensions/mattermost/src/mattermost/model-picker.test.ts +++ b/extensions/mattermost/src/mattermost/model-picker.test.ts @@ -60,6 +60,15 @@ describe("Mattermost model picker", () => { expect(view.buttons[0]?.[0]?.text).toBe("Browse providers"); }); + it("trims accidental model spacing in Mattermost current-model text", () => { + const view = renderMattermostModelSummaryView({ + ownerUserId: "user-1", + currentModel: " OpenAI/ gpt-5 ", + }); + + expect(view.text).toContain("Current: openai/gpt-5"); + }); + it("renders providers and models with Telegram-style navigation", () => { const providersView = renderMattermostProviderPickerView({ ownerUserId: "user-1", diff --git a/extensions/mattermost/src/mattermost/model-picker.ts b/extensions/mattermost/src/mattermost/model-picker.ts index 42462180901..1547041a74a 100644 --- a/extensions/mattermost/src/mattermost/model-picker.ts +++ b/extensions/mattermost/src/mattermost/model-picker.ts @@ -36,15 +36,13 @@ export type MattermostModelPickerRenderedView = { function splitModelRef(modelRef?: string | null): { provider: string; model: string } | null { const trimmed = modelRef?.trim(); - if (!trimmed) { + const match = trimmed?.match(/^([^/]+)\/(.+)$/u); + if (!match) { return null; } - const slashIndex = trimmed.indexOf("/"); - if (slashIndex <= 0 || slashIndex >= trimmed.length - 1) { - return null; - } - const provider = normalizeProviderId(trimmed.slice(0, slashIndex)); - const model = trimmed.slice(slashIndex + 1).trim(); + const provider = normalizeProviderId(match[1]); + // Mattermost copy should normalize accidental whitespace around the model. + const model = match[2].trim(); if (!provider || !model) { return null; } diff --git a/extensions/mattermost/src/mattermost/monitor-helpers.ts b/extensions/mattermost/src/mattermost/monitor-helpers.ts index de264e6cf2c..219c0562638 100644 --- a/extensions/mattermost/src/mattermost/monitor-helpers.ts +++ b/extensions/mattermost/src/mattermost/monitor-helpers.ts @@ -41,12 +41,12 @@ function normalizeAgentId(value: string | undefined | null): string { type AgentEntry = NonNullable["list"]>[number]; +function isAgentEntry(entry: unknown): entry is AgentEntry { + return Boolean(entry && typeof entry === "object"); +} + function listAgents(cfg: OpenClawConfig): AgentEntry[] { - const list = cfg.agents?.list; - if (!Array.isArray(list)) { - return []; - } - return list.filter((entry): entry is AgentEntry => Boolean(entry && typeof entry === "object")); + return Array.isArray(cfg.agents?.list) ? cfg.agents.list.filter(isAgentEntry) : []; } function resolveAgentEntry(cfg: OpenClawConfig, agentId: string): AgentEntry | undefined { diff --git a/extensions/mattermost/src/mattermost/monitor.authz.test.ts b/extensions/mattermost/src/mattermost/monitor.authz.test.ts index 92fd0a3c3f4..68919da7908 100644 --- a/extensions/mattermost/src/mattermost/monitor.authz.test.ts +++ b/extensions/mattermost/src/mattermost/monitor.authz.test.ts @@ -16,6 +16,35 @@ const accountFixture: ResolvedMattermostAccount = { config: {}, }; +function authorizeGroupCommand(senderId: string) { + return authorizeMattermostCommandInvocation({ + account: { + ...accountFixture, + config: { + groupPolicy: "allowlist", + allowFrom: ["trusted-user"], + }, + }, + cfg: { + commands: { + useAccessGroups: true, + }, + }, + senderId, + senderName: senderId, + channelId: "chan-1", + channelInfo: { + id: "chan-1", + type: "O", + name: "general", + display_name: "General", + }, + storeAllowFrom: [], + allowTextCommands: true, + hasControlCommand: true, + }); +} + describe("mattermost monitor authz", () => { it("keeps DM allowlist merged with pairing-store entries", () => { const resolved = resolveMattermostEffectiveAllowFromLists({ @@ -72,32 +101,7 @@ describe("mattermost monitor authz", () => { }); it("denies group control commands when the sender is outside the allowlist", () => { - const decision = authorizeMattermostCommandInvocation({ - account: { - ...accountFixture, - config: { - groupPolicy: "allowlist", - allowFrom: ["trusted-user"], - }, - }, - cfg: { - commands: { - useAccessGroups: true, - }, - }, - senderId: "attacker", - senderName: "attacker", - channelId: "chan-1", - channelInfo: { - id: "chan-1", - type: "O", - name: "general", - display_name: "General", - }, - storeAllowFrom: [], - allowTextCommands: true, - hasControlCommand: true, - }); + const decision = authorizeGroupCommand("attacker"); expect(decision).toMatchObject({ ok: false, @@ -107,32 +111,7 @@ describe("mattermost monitor authz", () => { }); it("authorizes group control commands for allowlisted senders", () => { - const decision = authorizeMattermostCommandInvocation({ - account: { - ...accountFixture, - config: { - groupPolicy: "allowlist", - allowFrom: ["trusted-user"], - }, - }, - cfg: { - commands: { - useAccessGroups: true, - }, - }, - senderId: "trusted-user", - senderName: "trusted-user", - channelId: "chan-1", - channelInfo: { - id: "chan-1", - type: "O", - name: "general", - display_name: "General", - }, - storeAllowFrom: [], - allowTextCommands: true, - hasControlCommand: true, - }); + const decision = authorizeGroupCommand("trusted-user"); expect(decision).toMatchObject({ ok: true, diff --git a/extensions/mattermost/src/mattermost/reactions.test.ts b/extensions/mattermost/src/mattermost/reactions.test.ts index 0b07c1b497b..2659f2e1a99 100644 --- a/extensions/mattermost/src/mattermost/reactions.test.ts +++ b/extensions/mattermost/src/mattermost/reactions.test.ts @@ -14,6 +14,28 @@ describe("mattermost reactions", () => { resetMattermostReactionBotUserCacheForTests(); }); + async function addReactionWithFetch( + fetchMock: ReturnType, + ) { + return addMattermostReaction({ + cfg: createMattermostTestConfig(), + postId: "POST1", + emojiName: "thumbsup", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + } + + async function removeReactionWithFetch( + fetchMock: ReturnType, + ) { + return removeMattermostReaction({ + cfg: createMattermostTestConfig(), + postId: "POST1", + emojiName: "thumbsup", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + } + it("adds reactions by calling /users/me then POST /reactions", async () => { const fetchMock = createMattermostReactionFetchMock({ mode: "add", @@ -21,12 +43,7 @@ describe("mattermost reactions", () => { emojiName: "thumbsup", }); - const result = await addMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await addReactionWithFetch(fetchMock); expect(result).toEqual({ ok: true }); expect(fetchMock).toHaveBeenCalled(); @@ -41,12 +58,7 @@ describe("mattermost reactions", () => { body: { id: "err", message: "boom" }, }); - const result = await addMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await addReactionWithFetch(fetchMock); expect(result.ok).toBe(false); if (!result.ok) { @@ -61,12 +73,7 @@ describe("mattermost reactions", () => { emojiName: "thumbsup", }); - const result = await removeMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await removeReactionWithFetch(fetchMock); expect(result).toEqual({ ok: true }); expect(fetchMock).toHaveBeenCalled(); diff --git a/extensions/mattermost/src/mattermost/send.test.ts b/extensions/mattermost/src/mattermost/send.test.ts index cebb82ef7e3..774f40f99fa 100644 --- a/extensions/mattermost/src/mattermost/send.test.ts +++ b/extensions/mattermost/src/mattermost/send.test.ts @@ -1,4 +1,8 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + expectProvidedCfgSkipsRuntimeLoad, + expectRuntimeCfgFallback, +} from "../../../test-utils/send-config.js"; import { parseMattermostTarget, sendMessageMattermost } from "./send.js"; import { resetMattermostOpaqueTargetCacheForTests } from "./target-resolution.js"; @@ -107,8 +111,9 @@ describe("sendMessageMattermost", () => { accountId: "work", }); - expect(mockState.loadConfig).not.toHaveBeenCalled(); - expect(mockState.resolveMattermostAccount).toHaveBeenCalledWith({ + expectProvidedCfgSkipsRuntimeLoad({ + loadConfig: mockState.loadConfig, + resolveAccount: mockState.resolveMattermostAccount, cfg: providedCfg, accountId: "work", }); @@ -126,8 +131,9 @@ describe("sendMessageMattermost", () => { await sendMessageMattermost("channel:town-square", "hello"); - expect(mockState.loadConfig).toHaveBeenCalledTimes(1); - expect(mockState.resolveMattermostAccount).toHaveBeenCalledWith({ + expectRuntimeCfgFallback({ + loadConfig: mockState.loadConfig, + resolveAccount: mockState.resolveMattermostAccount, cfg: runtimeCfg, accountId: undefined, }); diff --git a/extensions/mattermost/src/mattermost/slash-commands.test.ts b/extensions/mattermost/src/mattermost/slash-commands.test.ts index 4beaea98ca5..d53c8f99203 100644 --- a/extensions/mattermost/src/mattermost/slash-commands.test.ts +++ b/extensions/mattermost/src/mattermost/slash-commands.test.ts @@ -10,6 +10,25 @@ import { } from "./slash-commands.js"; describe("slash-commands", () => { + async function registerSingleStatusCommand( + request: (path: string, init?: { method?: string }) => Promise, + ) { + const client = { request } as unknown as MattermostClient; + return registerSlashCommands({ + client, + teamId: "team-1", + creatorUserId: "bot-user", + callbackUrl: "http://gateway/callback", + commands: [ + { + trigger: "oc_status", + description: "status", + autoComplete: true, + }, + ], + }); + } + it("parses application/x-www-form-urlencoded payloads", () => { const payload = parseSlashCommandPayload( "token=t1&team_id=team&channel_id=ch1&user_id=u1&command=%2Foc_status&text=now", @@ -101,21 +120,7 @@ describe("slash-commands", () => { } throw new Error(`unexpected request path: ${path}`); }); - const client = { request } as unknown as MattermostClient; - - const result = await registerSlashCommands({ - client, - teamId: "team-1", - creatorUserId: "bot-user", - callbackUrl: "http://gateway/callback", - commands: [ - { - trigger: "oc_status", - description: "status", - autoComplete: true, - }, - ], - }); + const result = await registerSingleStatusCommand(request); expect(result).toHaveLength(1); expect(result[0]?.managed).toBe(false); @@ -144,21 +149,7 @@ describe("slash-commands", () => { } throw new Error(`unexpected request path: ${path}`); }); - const client = { request } as unknown as MattermostClient; - - const result = await registerSlashCommands({ - client, - teamId: "team-1", - creatorUserId: "bot-user", - callbackUrl: "http://gateway/callback", - commands: [ - { - trigger: "oc_status", - description: "status", - autoComplete: true, - }, - ], - }); + const result = await registerSingleStatusCommand(request); expect(result).toHaveLength(0); expect(request).toHaveBeenCalledTimes(1); diff --git a/extensions/mattermost/src/mattermost/slash-http.test.ts b/extensions/mattermost/src/mattermost/slash-http.test.ts index 92a6babe35c..a89bfc4e33a 100644 --- a/extensions/mattermost/src/mattermost/slash-http.test.ts +++ b/extensions/mattermost/src/mattermost/slash-http.test.ts @@ -58,6 +58,23 @@ const accountFixture: ResolvedMattermostAccount = { config: {}, }; +async function runSlashRequest(params: { + commandTokens: Set; + body: string; + method?: string; +}) { + const handler = createSlashCommandHttpHandler({ + account: accountFixture, + cfg: {} as OpenClawConfig, + runtime: {} as RuntimeEnv, + commandTokens: params.commandTokens, + }); + const req = createRequest({ method: params.method, body: params.body }); + const response = createResponse(); + await handler(req, response.res); + return response; +} + describe("slash-http", () => { it("rejects non-POST methods", async () => { const handler = createSlashCommandHttpHandler({ @@ -93,36 +110,20 @@ describe("slash-http", () => { }); it("fails closed when no command tokens are registered", async () => { - const handler = createSlashCommandHttpHandler({ - account: accountFixture, - cfg: {} as OpenClawConfig, - runtime: {} as RuntimeEnv, + const response = await runSlashRequest({ commandTokens: new Set(), - }); - const req = createRequest({ body: "token=tok1&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=", }); - const response = createResponse(); - - await handler(req, response.res); expect(response.res.statusCode).toBe(401); expect(response.getBody()).toContain("Unauthorized: invalid command token."); }); it("rejects unknown command tokens", async () => { - const handler = createSlashCommandHttpHandler({ - account: accountFixture, - cfg: {} as OpenClawConfig, - runtime: {} as RuntimeEnv, + const response = await runSlashRequest({ commandTokens: new Set(["known-token"]), - }); - const req = createRequest({ body: "token=unknown&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=", }); - const response = createResponse(); - - await handler(req, response.res); expect(response.res.statusCode).toBe(401); expect(response.getBody()).toContain("Unauthorized: invalid command token."); diff --git a/extensions/mattermost/src/mattermost/slash-http.ts b/extensions/mattermost/src/mattermost/slash-http.ts index 36a5643e3fd..468f5c3584c 100644 --- a/extensions/mattermost/src/mattermost/slash-http.ts +++ b/extensions/mattermost/src/mattermost/slash-http.ts @@ -475,6 +475,7 @@ async function handleSlashCommandAsync(params: { channel: "mattermost", accountId: account.accountId, }); + const humanDelay = core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId); const typingCallbacks = createTypingCallbacks({ start: () => sendMattermostTyping(client, { channelId }), @@ -491,7 +492,7 @@ async function handleSlashCommandAsync(params: { const { dispatcher, replyOptions, markDispatchIdle } = core.channel.reply.createReplyDispatcherWithTyping({ ...prefixOptions, - humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId), + humanDelay, deliver: async (payload: ReplyPayload) => { await deliverMattermostReplyPayload({ core, diff --git a/extensions/memory-core/package.json b/extensions/memory-core/package.json index 9f0bc40571d..969bff3e07c 100644 --- a/extensions/memory-core/package.json +++ b/extensions/memory-core/package.json @@ -4,6 +4,9 @@ "private": true, "description": "OpenClaw core memory search plugin", "type": "module", + "devDependencies": { + "openclaw": "workspace:*" + }, "peerDependencies": { "openclaw": ">=2026.3.11" }, diff --git a/extensions/memory-lancedb/index.test.ts b/extensions/memory-lancedb/index.test.ts index 2d9a6db1063..a733c3dffb8 100644 --- a/extensions/memory-lancedb/index.test.ts +++ b/extensions/memory-lancedb/index.test.ts @@ -18,12 +18,12 @@ const HAS_OPENAI_KEY = Boolean(process.env.OPENAI_API_KEY); const liveEnabled = HAS_OPENAI_KEY && process.env.OPENCLAW_LIVE_TEST === "1"; const describeLive = liveEnabled ? describe : describe.skip; -describe("memory plugin e2e", () => { - let tmpDir: string; - let dbPath: string; +function installTmpDirHarness(params: { prefix: string }) { + let tmpDir = ""; + let dbPath = ""; beforeEach(async () => { - tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-test-")); + tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), params.prefix)); dbPath = path.join(tmpDir, "lancedb"); }); @@ -33,6 +33,27 @@ describe("memory plugin e2e", () => { } }); + return { + getTmpDir: () => tmpDir, + getDbPath: () => dbPath, + }; +} + +describe("memory plugin e2e", () => { + const { getDbPath } = installTmpDirHarness({ prefix: "openclaw-memory-test-" }); + + async function parseConfig(overrides: Record = {}) { + const { default: memoryPlugin } = await import("./index.js"); + return memoryPlugin.configSchema?.parse?.({ + embedding: { + apiKey: OPENAI_API_KEY, + model: "text-embedding-3-small", + }, + dbPath: getDbPath(), + ...overrides, + }); + } + test("memory plugin registers and initializes correctly", async () => { // Dynamic import to avoid loading LanceDB when not testing const { default: memoryPlugin } = await import("./index.js"); @@ -46,21 +67,14 @@ describe("memory plugin e2e", () => { }); test("config schema parses valid config", async () => { - const { default: memoryPlugin } = await import("./index.js"); - - const config = memoryPlugin.configSchema?.parse?.({ - embedding: { - apiKey: OPENAI_API_KEY, - model: "text-embedding-3-small", - }, - dbPath, + const config = await parseConfig({ autoCapture: true, autoRecall: true, }); expect(config).toBeDefined(); expect(config?.embedding?.apiKey).toBe(OPENAI_API_KEY); - expect(config?.dbPath).toBe(dbPath); + expect(config?.dbPath).toBe(getDbPath()); expect(config?.captureMaxChars).toBe(500); }); @@ -74,7 +88,7 @@ describe("memory plugin e2e", () => { embedding: { apiKey: "${TEST_MEMORY_API_KEY}", }, - dbPath, + dbPath: getDbPath(), }); expect(config?.embedding?.apiKey).toBe("test-key-123"); @@ -88,7 +102,7 @@ describe("memory plugin e2e", () => { expect(() => { memoryPlugin.configSchema?.parse?.({ embedding: {}, - dbPath, + dbPath: getDbPath(), }); }).toThrow("embedding.apiKey is required"); }); @@ -99,21 +113,14 @@ describe("memory plugin e2e", () => { expect(() => { memoryPlugin.configSchema?.parse?.({ embedding: { apiKey: OPENAI_API_KEY }, - dbPath, + dbPath: getDbPath(), captureMaxChars: 99, }); }).toThrow("captureMaxChars must be between 100 and 10000"); }); test("config schema accepts captureMaxChars override", async () => { - const { default: memoryPlugin } = await import("./index.js"); - - const config = memoryPlugin.configSchema?.parse?.({ - embedding: { - apiKey: OPENAI_API_KEY, - model: "text-embedding-3-small", - }, - dbPath, + const config = await parseConfig({ captureMaxChars: 1800, }); @@ -121,15 +128,7 @@ describe("memory plugin e2e", () => { }); test("config schema keeps autoCapture disabled by default", async () => { - const { default: memoryPlugin } = await import("./index.js"); - - const config = memoryPlugin.configSchema?.parse?.({ - embedding: { - apiKey: OPENAI_API_KEY, - model: "text-embedding-3-small", - }, - dbPath, - }); + const config = await parseConfig(); expect(config?.autoCapture).toBe(false); expect(config?.autoRecall).toBe(true); @@ -176,7 +175,7 @@ describe("memory plugin e2e", () => { model: "text-embedding-3-small", dimensions: 1024, }, - dbPath, + dbPath: getDbPath(), autoCapture: false, autoRecall: false, }, @@ -279,19 +278,7 @@ describe("memory plugin e2e", () => { // Live tests that require OpenAI API key and actually use LanceDB describeLive("memory plugin live tests", () => { - let tmpDir: string; - let dbPath: string; - - beforeEach(async () => { - tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-live-")); - dbPath = path.join(tmpDir, "lancedb"); - }); - - afterEach(async () => { - if (tmpDir) { - await fs.rm(tmpDir, { recursive: true, force: true }); - } - }); + const { getDbPath } = installTmpDirHarness({ prefix: "openclaw-memory-live-" }); test("memory tools work end-to-end", async () => { const { default: memoryPlugin } = await import("./index.js"); @@ -318,7 +305,7 @@ describeLive("memory plugin live tests", () => { apiKey: liveApiKey, model: "text-embedding-3-small", }, - dbPath, + dbPath: getDbPath(), autoCapture: false, autoRecall: false, }, diff --git a/extensions/memory-lancedb/package.json b/extensions/memory-lancedb/package.json index 89d3e4385d0..9e1af0d7df2 100644 --- a/extensions/memory-lancedb/package.json +++ b/extensions/memory-lancedb/package.json @@ -7,7 +7,7 @@ "dependencies": { "@lancedb/lancedb": "^0.26.2", "@sinclair/typebox": "0.34.48", - "openai": "^6.27.0" + "openai": "^6.29.0" }, "openclaw": { "extensions": [ diff --git a/extensions/msteams/src/attachments.test.ts b/extensions/msteams/src/attachments.test.ts index 6887fad7fcb..790dc8bd33f 100644 --- a/extensions/msteams/src/attachments.test.ts +++ b/extensions/msteams/src/attachments.test.ts @@ -88,14 +88,17 @@ function isUrlAllowedBySsrfPolicy(url: string, policy?: SsrFPolicy): boolean { ); } -const fetchRemoteMediaMock = vi.fn(async (params: RemoteMediaFetchParams) => { +async function fetchRemoteMediaWithRedirects( + params: RemoteMediaFetchParams, + requestInit?: RequestInit, +) { const fetchFn = params.fetchImpl ?? fetch; let currentUrl = params.url; for (let i = 0; i <= MAX_REDIRECT_HOPS; i += 1) { if (!isUrlAllowedBySsrfPolicy(currentUrl, params.ssrfPolicy)) { throw new Error(`Blocked hostname (not in allowlist): ${currentUrl}`); } - const res = await fetchFn(currentUrl, { redirect: "manual" }); + const res = await fetchFn(currentUrl, { redirect: "manual", ...requestInit }); if (REDIRECT_STATUS_CODES.includes(res.status)) { const location = res.headers.get("location"); if (!location) { @@ -107,6 +110,10 @@ const fetchRemoteMediaMock = vi.fn(async (params: RemoteMediaFetchParams) => { return readRemoteMediaResponse(res, params); } throw new Error("too many redirects"); +} + +const fetchRemoteMediaMock = vi.fn(async (params: RemoteMediaFetchParams) => { + return await fetchRemoteMediaWithRedirects(params); }); const runtimeStub: PluginRuntime = createPluginRuntimeMock({ @@ -720,24 +727,9 @@ describe("msteams attachments", () => { }); fetchRemoteMediaMock.mockImplementationOnce(async (params) => { - const fetchFn = params.fetchImpl ?? fetch; - let currentUrl = params.url; - for (let i = 0; i < MAX_REDIRECT_HOPS; i += 1) { - const res = await fetchFn(currentUrl, { - redirect: "manual", - dispatcher: {}, - } as RequestInit); - if (REDIRECT_STATUS_CODES.includes(res.status)) { - const location = res.headers.get("location"); - if (!location) { - throw new Error("redirect missing location"); - } - currentUrl = new URL(location, currentUrl).toString(); - continue; - } - return readRemoteMediaResponse(res, params); - } - throw new Error("too many redirects"); + return await fetchRemoteMediaWithRedirects(params, { + dispatcher: {}, + } as RequestInit); }); const media = await downloadAttachmentsWithFetch( diff --git a/extensions/msteams/src/attachments/shared.test.ts b/extensions/msteams/src/attachments/shared.test.ts index 186a70f71aa..3e29e65aac4 100644 --- a/extensions/msteams/src/attachments/shared.test.ts +++ b/extensions/msteams/src/attachments/shared.test.ts @@ -31,6 +31,23 @@ function mockFetchWithRedirect(redirectMap: Record, finalBody = }); } +async function expectSafeFetchStatus(params: { + fetchMock: ReturnType; + url: string; + allowHosts: string[]; + expectedStatus: number; + resolveFn?: typeof publicResolve; +}) { + const res = await safeFetch({ + url: params.url, + allowHosts: params.allowHosts, + fetchFn: params.fetchMock as unknown as typeof fetch, + resolveFn: params.resolveFn ?? publicResolve, + }); + expect(res.status).toBe(params.expectedStatus); + return res; +} + describe("msteams attachment allowlists", () => { it("normalizes wildcard host lists", () => { expect(resolveAllowedHosts(["*", "graph.microsoft.com"])).toEqual(["*"]); @@ -121,13 +138,12 @@ describe("safeFetch", () => { const fetchMock = vi.fn(async (_url: string, _init?: RequestInit) => { return new Response("ok", { status: 200 }); }); - const res = await safeFetch({ + await expectSafeFetchStatus({ + fetchMock, url: "https://teams.sharepoint.com/file.pdf", allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, + expectedStatus: 200, }); - expect(res.status).toBe(200); expect(fetchMock).toHaveBeenCalledOnce(); // Should have used redirect: "manual" expect(fetchMock.mock.calls[0][1]).toHaveProperty("redirect", "manual"); @@ -137,13 +153,12 @@ describe("safeFetch", () => { const fetchMock = mockFetchWithRedirect({ "https://teams.sharepoint.com/file.pdf": "https://cdn.sharepoint.com/storage/file.pdf", }); - const res = await safeFetch({ + await expectSafeFetchStatus({ + fetchMock, url: "https://teams.sharepoint.com/file.pdf", allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, + expectedStatus: 200, }); - expect(res.status).toBe(200); expect(fetchMock).toHaveBeenCalledTimes(2); }); diff --git a/extensions/msteams/src/channel.directory.test.ts b/extensions/msteams/src/channel.directory.test.ts index 0746f78aabb..be95e6103ea 100644 --- a/extensions/msteams/src/channel.directory.test.ts +++ b/extensions/msteams/src/channel.directory.test.ts @@ -1,15 +1,10 @@ import type { OpenClawConfig, RuntimeEnv } from "openclaw/plugin-sdk/msteams"; import { describe, expect, it } from "vitest"; +import { createDirectoryTestRuntime, expectDirectorySurface } from "../../test-utils/directory.js"; import { msteamsPlugin } from "./channel.js"; describe("msteams directory", () => { - const runtimeEnv: RuntimeEnv = { - log: () => {}, - error: () => {}, - exit: (code: number): never => { - throw new Error(`exit ${code}`); - }, - }; + const runtimeEnv = createDirectoryTestRuntime() as RuntimeEnv; it("lists peers and groups from config", async () => { const cfg = { @@ -29,12 +24,10 @@ describe("msteams directory", () => { }, } as unknown as OpenClawConfig; - expect(msteamsPlugin.directory).toBeTruthy(); - expect(msteamsPlugin.directory?.listPeers).toBeTruthy(); - expect(msteamsPlugin.directory?.listGroups).toBeTruthy(); + const directory = expectDirectorySurface(msteamsPlugin.directory); await expect( - msteamsPlugin.directory!.listPeers!({ + directory.listPeers({ cfg, query: undefined, limit: undefined, @@ -50,7 +43,7 @@ describe("msteams directory", () => { ); await expect( - msteamsPlugin.directory!.listGroups!({ + directory.listGroups({ cfg, query: undefined, limit: undefined, diff --git a/extensions/msteams/src/messenger.test.ts b/extensions/msteams/src/messenger.test.ts index aa0a92b5159..cc4cf2fb6f0 100644 --- a/extensions/msteams/src/messenger.test.ts +++ b/extensions/msteams/src/messenger.test.ts @@ -139,6 +139,22 @@ describe("msteams messenger", () => { }); describe("sendMSTeamsMessages", () => { + function createRevokedThreadContext(params?: { failAfterAttempt?: number; sent?: string[] }) { + let attempt = 0; + return { + sendActivity: async (activity: unknown) => { + const { text } = activity as { text?: string }; + const content = text ?? ""; + attempt += 1; + if (params?.failAfterAttempt && attempt < params.failAfterAttempt) { + params.sent?.push(content); + return { id: `id:${content}` }; + } + throw new TypeError(REVOCATION_ERROR); + }, + }; + } + const baseRef: StoredConversationReference = { activityId: "activity123", user: { id: "user123", name: "User" }, @@ -305,13 +321,7 @@ describe("msteams messenger", () => { it("falls back to proactive messaging when thread context is revoked", async () => { const proactiveSent: string[] = []; - - const ctx = { - sendActivity: async () => { - throw new TypeError(REVOCATION_ERROR); - }, - }; - + const ctx = createRevokedThreadContext(); const adapter = createFallbackAdapter(proactiveSent); const ids = await sendMSTeamsMessages({ @@ -331,21 +341,7 @@ describe("msteams messenger", () => { it("falls back only for remaining thread messages after context revocation", async () => { const threadSent: string[] = []; const proactiveSent: string[] = []; - let attempt = 0; - - const ctx = { - sendActivity: async (activity: unknown) => { - const { text } = activity as { text?: string }; - const content = text ?? ""; - attempt += 1; - if (attempt === 1) { - threadSent.push(content); - return { id: `id:${content}` }; - } - throw new TypeError(REVOCATION_ERROR); - }, - }; - + const ctx = createRevokedThreadContext({ failAfterAttempt: 2, sent: threadSent }); const adapter = createFallbackAdapter(proactiveSent); const ids = await sendMSTeamsMessages({ diff --git a/extensions/msteams/src/monitor-handler.file-consent.test.ts b/extensions/msteams/src/monitor-handler.file-consent.test.ts index 88a6a67a838..5e72f7a9dd1 100644 --- a/extensions/msteams/src/monitor-handler.file-consent.test.ts +++ b/extensions/msteams/src/monitor-handler.file-consent.test.ts @@ -123,6 +123,26 @@ function createInvokeContext(params: { }; } +function createConsentInvokeHarness(params: { + pendingConversationId?: string; + invokeConversationId: string; + action: "accept" | "decline"; +}) { + const uploadId = storePendingUpload({ + buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), + filename: "secret.txt", + contentType: "text/plain", + conversationId: params.pendingConversationId ?? "19:victim@thread.v2", + }); + const handler = registerMSTeamsHandlers(createActivityHandler(), createDeps()); + const { context, sendActivity } = createInvokeContext({ + conversationId: params.invokeConversationId, + uploadId, + action: params.action, + }); + return { uploadId, handler, context, sendActivity }; +} + describe("msteams file consent invoke authz", () => { beforeEach(() => { setMSTeamsRuntime(runtimeStub); @@ -132,17 +152,8 @@ describe("msteams file consent invoke authz", () => { }); it("uploads when invoke conversation matches pending upload conversation", async () => { - const uploadId = storePendingUpload({ - buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), - filename: "secret.txt", - contentType: "text/plain", - conversationId: "19:victim@thread.v2", - }); - const deps = createDeps(); - const handler = registerMSTeamsHandlers(createActivityHandler(), deps); - const { context, sendActivity } = createInvokeContext({ - conversationId: "19:victim@thread.v2;messageid=abc123", - uploadId, + const { uploadId, handler, context, sendActivity } = createConsentInvokeHarness({ + invokeConversationId: "19:victim@thread.v2;messageid=abc123", action: "accept", }); @@ -166,17 +177,8 @@ describe("msteams file consent invoke authz", () => { }); it("rejects cross-conversation accept invoke and keeps pending upload", async () => { - const uploadId = storePendingUpload({ - buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), - filename: "secret.txt", - contentType: "text/plain", - conversationId: "19:victim@thread.v2", - }); - const deps = createDeps(); - const handler = registerMSTeamsHandlers(createActivityHandler(), deps); - const { context, sendActivity } = createInvokeContext({ - conversationId: "19:attacker@thread.v2", - uploadId, + const { uploadId, handler, context, sendActivity } = createConsentInvokeHarness({ + invokeConversationId: "19:attacker@thread.v2", action: "accept", }); @@ -198,17 +200,8 @@ describe("msteams file consent invoke authz", () => { }); it("ignores cross-conversation decline invoke and keeps pending upload", async () => { - const uploadId = storePendingUpload({ - buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), - filename: "secret.txt", - contentType: "text/plain", - conversationId: "19:victim@thread.v2", - }); - const deps = createDeps(); - const handler = registerMSTeamsHandlers(createActivityHandler(), deps); - const { context, sendActivity } = createInvokeContext({ - conversationId: "19:attacker@thread.v2", - uploadId, + const { uploadId, handler, context, sendActivity } = createConsentInvokeHarness({ + invokeConversationId: "19:attacker@thread.v2", action: "decline", }); diff --git a/extensions/msteams/src/monitor-handler/message-handler.ts b/extensions/msteams/src/monitor-handler/message-handler.ts index fff243fb70c..60a88c56664 100644 --- a/extensions/msteams/src/monitor-handler/message-handler.ts +++ b/extensions/msteams/src/monitor-handler/message-handler.ts @@ -9,7 +9,7 @@ import { evaluateSenderGroupAccessForPolicy, resolveSenderScopedGroupPolicy, recordPendingHistoryEntryIfEnabled, - resolveControlCommandGate, + resolveDualTextControlCommandGate, resolveDefaultGroupPolicy, isDangerousNameMatchingEnabled, readStoreAllowFromForDmPolicy, @@ -297,18 +297,15 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { senderName, allowNameMatching: isDangerousNameMatchingEnabled(msteamsCfg), }); - const hasControlCommandInMessage = core.channel.text.hasControlCommand(text, cfg); - const commandGate = resolveControlCommandGate({ + const { commandAuthorized, shouldBlock } = resolveDualTextControlCommandGate({ useAccessGroups, - authorizers: [ - { configured: commandDmAllowFrom.length > 0, allowed: ownerAllowedForCommands }, - { configured: effectiveGroupAllowFrom.length > 0, allowed: groupAllowedForCommands }, - ], - allowTextCommands: true, - hasControlCommand: hasControlCommandInMessage, + primaryConfigured: commandDmAllowFrom.length > 0, + primaryAllowed: ownerAllowedForCommands, + secondaryConfigured: effectiveGroupAllowFrom.length > 0, + secondaryAllowed: groupAllowedForCommands, + hasControlCommand: core.channel.text.hasControlCommand(text, cfg), }); - const commandAuthorized = commandGate.commandAuthorized; - if (commandGate.shouldBlock) { + if (shouldBlock) { logInboundDrop({ log: logVerboseMessage, channel: "msteams", diff --git a/extensions/msteams/src/policy.test.ts b/extensions/msteams/src/policy.test.ts index 091e22d1fd8..ac324f3d785 100644 --- a/extensions/msteams/src/policy.test.ts +++ b/extensions/msteams/src/policy.test.ts @@ -6,6 +6,27 @@ import { resolveMSTeamsRouteConfig, } from "./policy.js"; +function resolveNamedTeamRouteConfig(allowNameMatching = false) { + const cfg: MSTeamsConfig = { + teams: { + "My Team": { + requireMention: true, + channels: { + "General Chat": { requireMention: false }, + }, + }, + }, + }; + + return resolveMSTeamsRouteConfig({ + cfg, + teamName: "My Team", + channelName: "General Chat", + conversationId: "ignored", + allowNameMatching, + }); +} + describe("msteams policy", () => { describe("resolveMSTeamsRouteConfig", () => { it("returns team and channel config when present", () => { @@ -51,23 +72,7 @@ describe("msteams policy", () => { }); it("blocks team and channel name matches by default", () => { - const cfg: MSTeamsConfig = { - teams: { - "My Team": { - requireMention: true, - channels: { - "General Chat": { requireMention: false }, - }, - }, - }, - }; - - const res = resolveMSTeamsRouteConfig({ - cfg, - teamName: "My Team", - channelName: "General Chat", - conversationId: "ignored", - }); + const res = resolveNamedTeamRouteConfig(); expect(res.teamConfig).toBeUndefined(); expect(res.channelConfig).toBeUndefined(); @@ -75,24 +80,7 @@ describe("msteams policy", () => { }); it("matches team and channel by name when dangerous name matching is enabled", () => { - const cfg: MSTeamsConfig = { - teams: { - "My Team": { - requireMention: true, - channels: { - "General Chat": { requireMention: false }, - }, - }, - }, - }; - - const res = resolveMSTeamsRouteConfig({ - cfg, - teamName: "My Team", - channelName: "General Chat", - conversationId: "ignored", - allowNameMatching: true, - }); + const res = resolveNamedTeamRouteConfig(true); expect(res.teamConfig?.requireMention).toBe(true); expect(res.channelConfig?.requireMention).toBe(false); diff --git a/extensions/nextcloud-talk/src/channel.startup.test.ts b/extensions/nextcloud-talk/src/channel.startup.test.ts index 79b3cd77cd5..5fd0607e753 100644 --- a/extensions/nextcloud-talk/src/channel.startup.test.ts +++ b/extensions/nextcloud-talk/src/channel.startup.test.ts @@ -1,5 +1,9 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { createStartAccountContext } from "../../test-utils/start-account-context.js"; +import { + expectStopPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; import type { ResolvedNextcloudTalkAccount } from "./accounts.js"; const hoisted = vi.hoisted(() => ({ @@ -40,28 +44,20 @@ describe("nextcloudTalkPlugin gateway.startAccount", () => { it("keeps startAccount pending until abort, then stops the monitor", async () => { const stop = vi.fn(); hoisted.monitorNextcloudTalkProvider.mockResolvedValue({ stop }); - const abort = new AbortController(); - - const task = nextcloudTalkPlugin.gateway!.startAccount!( - createStartAccountContext({ - account: buildAccount(), - abortSignal: abort.signal, - }), - ); - let settled = false; - void task.then(() => { - settled = true; + const { abort, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: nextcloudTalkPlugin.gateway!.startAccount!, + account: buildAccount(), }); - await vi.waitFor(() => { - expect(hoisted.monitorNextcloudTalkProvider).toHaveBeenCalledOnce(); + await expectStopPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.monitorNextcloudTalkProvider).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, + stop, }); - expect(settled).toBe(false); - expect(stop).not.toHaveBeenCalled(); - - abort.abort(); - await task; - - expect(stop).toHaveBeenCalledOnce(); }); it("stops immediately when startAccount receives an already-aborted signal", async () => { diff --git a/extensions/nextcloud-talk/src/channel.ts b/extensions/nextcloud-talk/src/channel.ts index 8a908b7e0ac..473299b74e0 100644 --- a/extensions/nextcloud-talk/src/channel.ts +++ b/extensions/nextcloud-talk/src/channel.ts @@ -5,7 +5,6 @@ import { createAccountStatusSink, formatAllowFromLowercase, mapAllowFromEntries, - runPassiveAccountLifecycle, } from "openclaw/plugin-sdk/compat"; import { applyAccountNameToChannelSection, @@ -21,6 +20,7 @@ import { type OpenClawConfig, type ChannelSetupInput, } from "openclaw/plugin-sdk/nextcloud-talk"; +import { runStoppablePassiveMonitor } from "../../shared/passive-monitor.js"; import { listNextcloudTalkAccountIds, resolveDefaultNextcloudTalkAccountId, @@ -344,7 +344,7 @@ export const nextcloudTalkPlugin: ChannelPlugin = setStatus: ctx.setStatus, }); - await runPassiveAccountLifecycle({ + await runStoppablePassiveMonitor({ abortSignal: ctx.abortSignal, start: async () => await monitorNextcloudTalkProvider({ @@ -354,9 +354,6 @@ export const nextcloudTalkPlugin: ChannelPlugin = abortSignal: ctx.abortSignal, statusSink, }), - stop: async (monitor) => { - monitor.stop(); - }, }); }, logoutAccount: async ({ accountId, cfg }) => { diff --git a/extensions/nextcloud-talk/src/config-schema.ts b/extensions/nextcloud-talk/src/config-schema.ts index 5ab3e632d22..85cb14ff213 100644 --- a/extensions/nextcloud-talk/src/config-schema.ts +++ b/extensions/nextcloud-talk/src/config-schema.ts @@ -9,6 +9,7 @@ import { requireOpenAllowFrom, } from "openclaw/plugin-sdk/nextcloud-talk"; import { z } from "zod"; +import { requireChannelOpenAllowFrom } from "../../shared/config-schema-helpers.js"; import { buildSecretInputSchema } from "./secret-input.js"; export const NextcloudTalkRoomSchema = z @@ -48,13 +49,12 @@ export const NextcloudTalkAccountSchemaBase = z export const NextcloudTalkAccountSchema = NextcloudTalkAccountSchemaBase.superRefine( (value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "nextcloud-talk", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.nextcloud-talk.dmPolicy="open" requires channels.nextcloud-talk.allowFrom to include "*"', + requireOpenAllowFrom, }); }, ); @@ -63,12 +63,11 @@ export const NextcloudTalkConfigSchema = NextcloudTalkAccountSchemaBase.extend({ accounts: z.record(z.string(), NextcloudTalkAccountSchema.optional()).optional(), defaultAccount: z.string().optional(), }).superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "nextcloud-talk", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.nextcloud-talk.dmPolicy="open" requires channels.nextcloud-talk.allowFrom to include "*"', + requireOpenAllowFrom, }); }); diff --git a/extensions/nextcloud-talk/src/monitor.ts b/extensions/nextcloud-talk/src/monitor.ts index f940195a28b..93c66ade4b5 100644 --- a/extensions/nextcloud-talk/src/monitor.ts +++ b/extensions/nextcloud-talk/src/monitor.ts @@ -1,12 +1,12 @@ import { createServer, type IncomingMessage, type Server, type ServerResponse } from "node:http"; import os from "node:os"; import { - createLoggerBackedRuntime, type RuntimeEnv, isRequestBodyLimitError, readRequestBodyWithLimit, requestBodyErrorToText, } from "openclaw/plugin-sdk/nextcloud-talk"; +import { resolveLoggerBackedRuntime } from "../../shared/runtime.js"; import { resolveNextcloudTalkAccount } from "./accounts.js"; import { handleNextcloudTalkInbound } from "./inbound.js"; import { createNextcloudTalkReplayGuard } from "./replay-guard.js"; @@ -318,12 +318,10 @@ export async function monitorNextcloudTalkProvider( cfg, accountId: opts.accountId, }); - const runtime: RuntimeEnv = - opts.runtime ?? - createLoggerBackedRuntime({ - logger: core.logging.getChildLogger(), - exitError: () => new Error("Runtime exit not available"), - }); + const runtime: RuntimeEnv = resolveLoggerBackedRuntime( + opts.runtime, + core.logging.getChildLogger(), + ); if (!account.secret) { throw new Error(`Nextcloud Talk bot secret not configured for account "${account.accountId}"`); diff --git a/extensions/nextcloud-talk/src/send.test.ts b/extensions/nextcloud-talk/src/send.test.ts index 88133f9cbed..3ee178b815d 100644 --- a/extensions/nextcloud-talk/src/send.test.ts +++ b/extensions/nextcloud-talk/src/send.test.ts @@ -1,4 +1,9 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + createSendCfgThreadingRuntime, + expectProvidedCfgSkipsRuntimeLoad, + expectRuntimeCfgFallback, +} from "../../test-utils/send-config.js"; const hoisted = vi.hoisted(() => ({ loadConfig: vi.fn(), @@ -17,20 +22,7 @@ const hoisted = vi.hoisted(() => ({ })); vi.mock("./runtime.js", () => ({ - getNextcloudTalkRuntime: () => ({ - config: { - loadConfig: hoisted.loadConfig, - }, - channel: { - text: { - resolveMarkdownTableMode: hoisted.resolveMarkdownTableMode, - convertMarkdownTables: hoisted.convertMarkdownTables, - }, - activity: { - record: hoisted.record, - }, - }, - }), + getNextcloudTalkRuntime: () => createSendCfgThreadingRuntime(hoisted), })); vi.mock("./accounts.js", () => ({ @@ -72,8 +64,9 @@ describe("nextcloud-talk send cfg threading", () => { accountId: "work", }); - expect(hoisted.loadConfig).not.toHaveBeenCalled(); - expect(hoisted.resolveNextcloudTalkAccount).toHaveBeenCalledWith({ + expectProvidedCfgSkipsRuntimeLoad({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveNextcloudTalkAccount, cfg, accountId: "work", }); @@ -95,8 +88,9 @@ describe("nextcloud-talk send cfg threading", () => { }); expect(result).toEqual({ ok: true }); - expect(hoisted.loadConfig).toHaveBeenCalledTimes(1); - expect(hoisted.resolveNextcloudTalkAccount).toHaveBeenCalledWith({ + expectRuntimeCfgFallback({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveNextcloudTalkAccount, cfg: runtimeCfg, accountId: "default", }); diff --git a/extensions/nostr/src/channel.ts b/extensions/nostr/src/channel.ts index 20de320a3d1..937c698bd47 100644 --- a/extensions/nostr/src/channel.ts +++ b/extensions/nostr/src/channel.ts @@ -7,6 +7,10 @@ import { mapAllowFromEntries, type ChannelPlugin, } from "openclaw/plugin-sdk/nostr"; +import { + buildPassiveChannelStatusSummary, + buildTrafficStatusSummary, +} from "../../shared/channel-status-summary.js"; import type { NostrProfile } from "./config-schema.js"; import { NostrConfigSchema } from "./config-schema.js"; import type { MetricEvent, MetricsSnapshot } from "./metrics.js"; @@ -160,14 +164,10 @@ export const nostrPlugin: ChannelPlugin = { status: { defaultRuntime: createDefaultChannelRuntimeState(DEFAULT_ACCOUNT_ID), collectStatusIssues: (accounts) => collectStatusIssuesFromLastError("nostr", accounts), - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - publicKey: snapshot.publicKey ?? null, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveChannelStatusSummary(snapshot, { + publicKey: snapshot.publicKey ?? null, + }), buildAccountSnapshot: ({ account, runtime }) => ({ accountId: account.accountId, name: account.name, @@ -179,8 +179,7 @@ export const nostrPlugin: ChannelPlugin = { lastStartAt: runtime?.lastStartAt ?? null, lastStopAt: runtime?.lastStopAt ?? null, lastError: runtime?.lastError ?? null, - lastInboundAt: runtime?.lastInboundAt ?? null, - lastOutboundAt: runtime?.lastOutboundAt ?? null, + ...buildTrafficStatusSummary(runtime), }), }, diff --git a/extensions/nostr/src/nostr-profile-http.test.ts b/extensions/nostr/src/nostr-profile-http.test.ts index 8fb17c443f4..3caa739c6c1 100644 --- a/extensions/nostr/src/nostr-profile-http.test.ts +++ b/extensions/nostr/src/nostr-profile-http.test.ts @@ -115,6 +115,13 @@ function createMockContext(overrides?: Partial): NostrP }; } +function expectOkResponse(res: ReturnType) { + expect(res._getStatusCode()).toBe(200); + const data = JSON.parse(res._getData()); + expect(data.ok).toBe(true); + return data; +} + function mockSuccessfulProfileImport() { vi.mocked(importProfileFromRelays).mockResolvedValue({ ok: true, @@ -208,6 +215,22 @@ describe("nostr-profile-http", () => { }); describe("PUT /api/channels/nostr/:accountId/profile", () => { + function mockPublishSuccess() { + vi.mocked(publishNostrProfile).mockResolvedValue({ + eventId: "event123", + createdAt: 1234567890, + successes: ["wss://relay.damus.io"], + failures: [], + }); + } + + function expectBadRequestResponse(res: ReturnType) { + expect(res._getStatusCode()).toBe(400); + const data = JSON.parse(res._getData()); + expect(data.ok).toBe(false); + return data; + } + async function expectPrivatePictureRejected(pictureUrl: string) { const ctx = createMockContext(); const handler = createNostrProfileHttpHandler(ctx); @@ -219,9 +242,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(400); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(false); + const data = expectBadRequestResponse(res); expect(data.error).toContain("private"); } @@ -235,18 +256,11 @@ describe("nostr-profile-http", () => { }); const res = createMockResponse(); - vi.mocked(publishNostrProfile).mockResolvedValue({ - eventId: "event123", - createdAt: 1234567890, - successes: ["wss://relay.damus.io"], - failures: [], - }); + mockPublishSuccess(); await handler(req, res); - expect(res._getStatusCode()).toBe(200); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(true); + const data = expectOkResponse(res); expect(data.eventId).toBe("event123"); expect(data.successes).toContain("wss://relay.damus.io"); expect(data.persisted).toBe(true); @@ -332,9 +346,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(400); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(false); + const data = expectBadRequestResponse(res); // The schema validation catches non-https URLs before SSRF check expect(data.error).toBe("Validation failed"); expect(data.details).toBeDefined(); @@ -368,12 +380,7 @@ describe("nostr-profile-http", () => { const ctx = createMockContext(); const handler = createNostrProfileHttpHandler(ctx); - vi.mocked(publishNostrProfile).mockResolvedValue({ - eventId: "event123", - createdAt: 1234567890, - successes: ["wss://relay.damus.io"], - failures: [], - }); + mockPublishSuccess(); // Make 6 requests (limit is 5/min) for (let i = 0; i < 6; i++) { @@ -384,7 +391,7 @@ describe("nostr-profile-http", () => { await handler(req, res); if (i < 5) { - expect(res._getStatusCode()).toBe(200); + expectOkResponse(res); } else { expect(res._getStatusCode()).toBe(429); const data = JSON.parse(res._getData()); @@ -414,6 +421,12 @@ describe("nostr-profile-http", () => { }); describe("POST /api/channels/nostr/:accountId/profile/import", () => { + function expectImportSuccessResponse(res: ReturnType) { + const data = expectOkResponse(res); + expect(data.imported.name).toBe("imported"); + return data; + } + it("imports profile from relays", async () => { const ctx = createMockContext(); const handler = createNostrProfileHttpHandler(ctx); @@ -424,10 +437,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(200); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(true); - expect(data.imported.name).toBe("imported"); + const data = expectImportSuccessResponse(res); expect(data.saved).toBe(false); // autoMerge not requested }); @@ -490,8 +500,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(200); - const data = JSON.parse(res._getData()); + const data = expectImportSuccessResponse(res); expect(data.saved).toBe(true); expect(ctx.updateConfigProfile).toHaveBeenCalled(); }); diff --git a/extensions/open-prose/skills/prose/alts/arabian-nights.md b/extensions/open-prose/skills/prose/alts/arabian-nights.md index cc0d146664e..c637c883bb6 100644 --- a/extensions/open-prose/skills/prose/alts/arabian-nights.md +++ b/extensions/open-prose/skills/prose/alts/arabian-nights.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from One Thousand and One Night | `prompt` | `command` | What is commanded of the djinn | | `model` | `spirit` | Which spirit answers | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Arabian Nights rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Nights -conjure "@alice/research" as research -wish topic: "What to investigate" - -djinn helper: - spirit: sonnet - -name findings = tale: helper - command: "Research {topic}" - -gift summary = tale "Summarize" - scroll: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Nights -bazaar: - security = tale "Check security" - perf = tale "Check performance" - style = tale "Check style" - -tale "Synthesize review" - scroll: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Nights -telling until **the code is bug-free** (max: 5): - tale "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Nights -venture: - tale "Risky operation" -should misfortune strike as err: - tale "Handle error" - scroll: err -and so it was: - tale "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Nights -crossroads **the severity level**: - path "Critical": - tale "Escalate immediately" - path "Minor": - tale "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `tale` +- `parallel` sample -> `bazaar` +- `loop` sample -> `telling` +- `try/catch/finally` sample -> `venture` / `should misfortune strike` / `and so it was` +- `choice` sample -> `crossroads` / `path` ```prose # Nights diff --git a/extensions/open-prose/skills/prose/alts/homer.md b/extensions/open-prose/skills/prose/alts/homer.md index bc27905cf78..716f2052e34 100644 --- a/extensions/open-prose/skills/prose/alts/homer.md +++ b/extensions/open-prose/skills/prose/alts/homer.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from Greek epic poetry—the Il | `prompt` | `charge` | The quest given | | `model` | `muse` | Which muse inspires | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Homeric rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Homeric -invoke "@alice/research" as research -omen topic: "What to investigate" - -hero helper: - muse: sonnet - -decree findings = trial: helper - charge: "Research {topic}" - -glory summary = trial "Summarize" - tidings: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Homeric -host: - security = trial "Check security" - perf = trial "Check performance" - style = trial "Check style" - -trial "Synthesize review" - tidings: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Homeric -ordeal until **the code is bug-free** (max: 5): - trial "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Homeric -venture: - trial "Risky operation" -should ruin come as err: - trial "Handle error" - tidings: err -in the end: - trial "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Homeric -crossroads **the severity level**: - path "Critical": - trial "Escalate immediately" - path "Minor": - trial "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `trial` +- `parallel` sample -> `host` +- `loop` sample -> `ordeal` +- `try/catch/finally` sample -> `venture` / `should ruin come` / `in the end` +- `choice` sample -> `crossroads` / `path` ```prose # Homeric diff --git a/extensions/open-prose/skills/prose/alts/shared-appendix.md b/extensions/open-prose/skills/prose/alts/shared-appendix.md new file mode 100644 index 00000000000..32a4fcbcd17 --- /dev/null +++ b/extensions/open-prose/skills/prose/alts/shared-appendix.md @@ -0,0 +1,35 @@ +--- +role: reference +summary: Shared appendix for experimental OpenProse alternate registers. +status: draft +requires: prose.md +--- + +# OpenProse Alternate Register Appendix + +Use this appendix with experimental register files such as `arabian-nights.md` and `homer.md`. + +## Unchanged keywords + +These keywords already work or are too functional to replace sensibly: + +- `**...**` discretion markers +- `until`, `while` +- `map`, `filter`, `reduce`, `pmap` +- `max` +- `as` +- model names such as `sonnet`, `opus`, and `haiku` + +## Comparison pattern + +Use the translation map in each register file to rewrite the same functional sample programs: + +- simple program +- parallel execution +- loop with condition +- error handling +- choice block +- conditionals + +The goal is consistency, not one canonical wording. +Keep the functional version intact and rewrite only the register-specific aliases. diff --git a/extensions/open-prose/skills/prose/state/sqlite.md b/extensions/open-prose/skills/prose/state/sqlite.md index cfec757567c..352a8705cd5 100644 --- a/extensions/open-prose/skills/prose/state/sqlite.md +++ b/extensions/open-prose/skills/prose/state/sqlite.md @@ -87,71 +87,28 @@ The `agents` and `agent_segments` tables for project-scoped agents live in `.pro ## Responsibility Separation -This section defines **who does what**. This is the contract between the VM and subagents. +The VM/subagent contract matches [postgres.md](./postgres.md#responsibility-separation). -### VM Responsibilities +SQLite-specific differences: -The VM (the orchestrating agent running the .prose program) is responsible for: +- the VM creates `state.db` instead of an `openprose` schema +- subagent confirmation messages point at a local database path, for example `.prose/runs//state.db` +- cleanup is typically `VACUUM` or file deletion rather than dropping schema objects -| Responsibility | Description | -| ------------------------- | -------------------------------------------------------------------------------------------------------- | -| **Database creation** | Create `state.db` and initialize core tables at run start | -| **Program registration** | Store the program source and metadata | -| **Execution tracking** | Update position, status, and timing as statements execute | -| **Subagent spawning** | Spawn sessions via Task tool with database path and instructions | -| **Parallel coordination** | Track branch status, implement join strategies | -| **Loop management** | Track iteration counts, evaluate conditions | -| **Error aggregation** | Record failures, manage retry state | -| **Context preservation** | Maintain sufficient narration in the main conversation thread so execution can be understood and resumed | -| **Completion detection** | Mark the run as complete when finished | +Example return values: -**Critical:** The VM must preserve enough context in its own conversation to understand execution state without re-reading the entire database. The database is for coordination and persistence, not a replacement for working memory. - -### Subagent Responsibilities - -Subagents (sessions spawned by the VM) are responsible for: - -| Responsibility | Description | -| ----------------------- | ----------------------------------------------------------------- | -| **Writing own outputs** | Insert/update their binding in the `bindings` table | -| **Memory management** | For persistent agents: read and update their memory record | -| **Segment recording** | For persistent agents: append segment history | -| **Attachment handling** | Write large outputs to `attachments/` directory, store path in DB | -| **Atomic writes** | Use transactions when updating multiple related records | - -**Critical:** Subagents write ONLY to `bindings`, `agents`, and `agent_segments` tables. The VM owns the `execution` table entirely. Completion signaling happens through the substrate (Task tool return), not database updates. - -**Critical:** Subagents must write their outputs directly to the database. The VM does not write subagent outputs—it only reads them after the subagent completes. - -**What subagents return to the VM:** A confirmation message with the binding location—not the full content: - -**Root scope:** - -``` +```text Binding written: research Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='research', execution_id=NULL) -Summary: AI safety research covering alignment, robustness, and interpretability with 15 citations. ``` -**Inside block invocation:** - -``` +```text Binding written: result Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='result', execution_id=43) Execution ID: 43 -Summary: Processed chunk into 3 sub-parts for recursive processing. ``` -The VM tracks locations, not values. This keeps the VM's context lean and enables arbitrarily large intermediate values. - -### Shared Concerns - -| Concern | Who Handles | -| ---------------- | ------------------------------------------------------------------ | -| Schema evolution | Either (use `CREATE TABLE IF NOT EXISTS`, `ALTER TABLE` as needed) | -| Custom tables | Either (prefix with `x_` for extensions) | -| Indexing | Either (add indexes for frequently-queried columns) | -| Cleanup | VM (at run end, optionally vacuum) | +The VM still tracks locations, not full values. --- diff --git a/extensions/phone-control/index.test.ts b/extensions/phone-control/index.test.ts index 9259092b153..2c3462c82a9 100644 --- a/extensions/phone-control/index.test.ts +++ b/extensions/phone-control/index.test.ts @@ -7,6 +7,7 @@ import type { PluginCommandContext, } from "openclaw/plugin-sdk/phone-control"; import { describe, expect, it, vi } from "vitest"; +import { createTestPluginApi } from "../test-utils/plugin-api.js"; import registerPhoneControl from "./index.js"; function createApi(params: { @@ -15,7 +16,7 @@ function createApi(params: { writeConfig: (next: Record) => Promise; registerCommand: (command: OpenClawPluginCommandDefinition) => void; }): OpenClawPluginApi { - return { + return createTestPluginApi({ id: "phone-control", name: "phone-control", source: "test", @@ -30,22 +31,8 @@ function createApi(params: { writeConfigFile: (next: Record) => params.writeConfig(next), }, } as OpenClawPluginApi["runtime"], - logger: { info() {}, warn() {}, error() {} }, - registerTool() {}, - registerHook() {}, - registerHttpRoute() {}, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerContextEngine() {}, registerCommand: params.registerCommand, - resolvePath(input: string) { - return input; - }, - on() {}, - }; + }) as OpenClawPluginApi; } function createCommandContext(args: string): PluginCommandContext { diff --git a/extensions/sglang/index.ts b/extensions/sglang/index.ts index 4c9102caebc..64143026592 100644 --- a/extensions/sglang/index.ts +++ b/extensions/sglang/index.ts @@ -1,13 +1,11 @@ import { buildSglangProvider, configureOpenAICompatibleSelfHostedProviderNonInteractive, + discoverOpenAICompatibleSelfHostedProvider, emptyPluginConfigSchema, - promptAndConfigureOpenAICompatibleSelfHostedProvider, + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth, type OpenClawPluginApi, - type ProviderAuthContext, type ProviderAuthMethodNonInteractiveContext, - type ProviderAuthResult, - type ProviderDiscoveryContext, } from "openclaw/plugin-sdk/core"; const PROVIDER_ID = "sglang"; @@ -30,8 +28,8 @@ const sglangPlugin = { label: "SGLang", hint: "Fast self-hosted OpenAI-compatible server", kind: "custom", - run: async (ctx: ProviderAuthContext): Promise => { - const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider({ + run: async (ctx) => + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth({ cfg: ctx.config, prompter: ctx.prompter, providerId: PROVIDER_ID, @@ -39,18 +37,7 @@ const sglangPlugin = { defaultBaseUrl: DEFAULT_BASE_URL, defaultApiKeyEnvVar: "SGLANG_API_KEY", modelPlaceholder: "Qwen/Qwen3-8B", - }); - return { - profiles: [ - { - profileId: result.profileId, - credential: result.credential, - }, - ], - configPatch: result.config, - defaultModel: result.modelRef, - }; - }, + }), runNonInteractive: async (ctx: ProviderAuthMethodNonInteractiveContext) => configureOpenAICompatibleSelfHostedProviderNonInteractive({ ctx, @@ -64,21 +51,12 @@ const sglangPlugin = { ], discovery: { order: "late", - run: async (ctx: ProviderDiscoveryContext) => { - if (ctx.config.models?.providers?.sglang) { - return null; - } - const { apiKey, discoveryApiKey } = ctx.resolveProviderApiKey(PROVIDER_ID); - if (!apiKey) { - return null; - } - return { - provider: { - ...(await buildSglangProvider({ apiKey: discoveryApiKey })), - apiKey, - }, - }; - }, + run: async (ctx) => + discoverOpenAICompatibleSelfHostedProvider({ + ctx, + providerId: PROVIDER_ID, + buildProvider: buildSglangProvider, + }), }, wizard: { onboarding: { diff --git a/extensions/shared/channel-status-summary.ts b/extensions/shared/channel-status-summary.ts new file mode 100644 index 00000000000..5ebdb067596 --- /dev/null +++ b/extensions/shared/channel-status-summary.ts @@ -0,0 +1,48 @@ +type PassiveChannelStatusSnapshot = { + configured?: boolean; + running?: boolean; + lastStartAt?: number | null; + lastStopAt?: number | null; + lastError?: string | null; + probe?: unknown; + lastProbeAt?: number | null; +}; + +type TrafficStatusSnapshot = { + lastInboundAt?: number | null; + lastOutboundAt?: number | null; +}; + +export function buildPassiveChannelStatusSummary( + snapshot: PassiveChannelStatusSnapshot, + extra?: TExtra, +) { + return { + configured: snapshot.configured ?? false, + ...(extra ?? ({} as TExtra)), + running: snapshot.running ?? false, + lastStartAt: snapshot.lastStartAt ?? null, + lastStopAt: snapshot.lastStopAt ?? null, + lastError: snapshot.lastError ?? null, + }; +} + +export function buildPassiveProbedChannelStatusSummary( + snapshot: PassiveChannelStatusSnapshot, + extra?: TExtra, +) { + return { + ...buildPassiveChannelStatusSummary(snapshot, extra), + probe: snapshot.probe, + lastProbeAt: snapshot.lastProbeAt ?? null, + }; +} + +export function buildTrafficStatusSummary( + snapshot?: TSnapshot | null, +) { + return { + lastInboundAt: snapshot?.lastInboundAt ?? null, + lastOutboundAt: snapshot?.lastOutboundAt ?? null, + }; +} diff --git a/extensions/shared/config-schema-helpers.ts b/extensions/shared/config-schema-helpers.ts new file mode 100644 index 00000000000..495793b54b6 --- /dev/null +++ b/extensions/shared/config-schema-helpers.ts @@ -0,0 +1,25 @@ +import type { z } from "zod"; + +type RequireOpenAllowFromFn = (params: { + policy?: string; + allowFrom?: Array; + ctx: z.RefinementCtx; + path: Array; + message: string; +}) => void; + +export function requireChannelOpenAllowFrom(params: { + channel: string; + policy?: string; + allowFrom?: Array; + ctx: z.RefinementCtx; + requireOpenAllowFrom: RequireOpenAllowFromFn; +}) { + params.requireOpenAllowFrom({ + policy: params.policy, + allowFrom: params.allowFrom, + ctx: params.ctx, + path: ["allowFrom"], + message: `channels.${params.channel}.dmPolicy="open" requires channels.${params.channel}.allowFrom to include "*"`, + }); +} diff --git a/extensions/shared/deferred.ts b/extensions/shared/deferred.ts new file mode 100644 index 00000000000..1a874100916 --- /dev/null +++ b/extensions/shared/deferred.ts @@ -0,0 +1,9 @@ +export function createDeferred() { + let resolve!: (value: T | PromiseLike) => void; + let reject!: (reason?: unknown) => void; + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + return { promise, resolve, reject }; +} diff --git a/extensions/shared/passive-monitor.ts b/extensions/shared/passive-monitor.ts new file mode 100644 index 00000000000..e5ffb3f03ff --- /dev/null +++ b/extensions/shared/passive-monitor.ts @@ -0,0 +1,18 @@ +import { runPassiveAccountLifecycle } from "openclaw/plugin-sdk"; + +type StoppableMonitor = { + stop: () => void; +}; + +export async function runStoppablePassiveMonitor(params: { + abortSignal: AbortSignal; + start: () => Promise; +}): Promise { + await runPassiveAccountLifecycle({ + abortSignal: params.abortSignal, + start: params.start, + stop: async (monitor) => { + monitor.stop(); + }, + }); +} diff --git a/extensions/shared/runtime.ts b/extensions/shared/runtime.ts new file mode 100644 index 00000000000..a1950ba6be0 --- /dev/null +++ b/extensions/shared/runtime.ts @@ -0,0 +1,14 @@ +import { createLoggerBackedRuntime } from "openclaw/plugin-sdk"; + +export function resolveLoggerBackedRuntime( + runtime: TRuntime | undefined, + logger: Parameters[0]["logger"], +): TRuntime { + return ( + runtime ?? + (createLoggerBackedRuntime({ + logger, + exitError: () => new Error("Runtime exit not available"), + }) as TRuntime) + ); +} diff --git a/extensions/shared/status-issues.ts b/extensions/shared/status-issues.ts new file mode 100644 index 00000000000..1eb39e2b686 --- /dev/null +++ b/extensions/shared/status-issues.ts @@ -0,0 +1,18 @@ +export function readStatusIssueFields( + value: unknown, + fields: readonly TField[], +): Record | null { + if (!value || typeof value !== "object") { + return null; + } + const record = value as Record; + const result = {} as Record; + for (const field of fields) { + result[field] = record[field]; + } + return result; +} + +export function coerceStatusIssueAccountId(value: unknown): string | undefined { + return typeof value === "string" ? value : typeof value === "number" ? String(value) : undefined; +} diff --git a/extensions/slack/src/channel.test.ts b/extensions/slack/src/channel.test.ts index ad6860d6f8d..98fbddca77d 100644 --- a/extensions/slack/src/channel.test.ts +++ b/extensions/slack/src/channel.test.ts @@ -15,6 +15,18 @@ vi.mock("./runtime.js", () => ({ import { slackPlugin } from "./channel.js"; +async function getSlackConfiguredState(cfg: OpenClawConfig) { + const account = slackPlugin.config.resolveAccount(cfg, "default"); + return { + configured: slackPlugin.config.isConfigured?.(account, cfg), + snapshot: await slackPlugin.status?.buildAccountSnapshot?.({ + account, + cfg, + runtime: undefined, + }), + }; +} + describe("slackPlugin actions", () => { it("prefers session lookup for announce target routing", () => { expect(slackPlugin.meta.preferSessionLookupForAnnounceTarget).toBe(true); @@ -137,6 +149,46 @@ describe("slackPlugin outbound", () => { }); }); +describe("slackPlugin agentPrompt", () => { + it("tells agents interactive replies are disabled by default", () => { + const hints = slackPlugin.agentPrompt?.messageToolHints?.({ + cfg: { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + }, + }, + }); + + expect(hints).toEqual([ + "- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts..capabilities`).", + ]); + }); + + it("shows Slack interactive reply directives when enabled", () => { + const hints = slackPlugin.agentPrompt?.messageToolHints?.({ + cfg: { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + capabilities: { interactiveReplies: true }, + }, + }, + }, + }); + + expect(hints).toContain( + "- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.", + ); + expect(hints).toContain( + "- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.", + ); + }); +}); + describe("slackPlugin config", () => { it("treats HTTP mode accounts with bot token + signing secret as configured", async () => { const cfg: OpenClawConfig = { @@ -149,13 +201,7 @@ describe("slackPlugin config", () => { }, }; - const account = slackPlugin.config.resolveAccount(cfg, "default"); - const configured = slackPlugin.config.isConfigured?.(account, cfg); - const snapshot = await slackPlugin.status?.buildAccountSnapshot?.({ - account, - cfg, - runtime: undefined, - }); + const { configured, snapshot } = await getSlackConfiguredState(cfg); expect(configured).toBe(true); expect(snapshot?.configured).toBe(true); @@ -171,13 +217,7 @@ describe("slackPlugin config", () => { }, }; - const account = slackPlugin.config.resolveAccount(cfg, "default"); - const configured = slackPlugin.config.isConfigured?.(account, cfg); - const snapshot = await slackPlugin.status?.buildAccountSnapshot?.({ - account, - cfg, - runtime: undefined, - }); + const { configured, snapshot } = await getSlackConfiguredState(cfg); expect(configured).toBe(false); expect(snapshot?.configured).toBe(false); diff --git a/extensions/slack/src/channel.ts b/extensions/slack/src/channel.ts index 570ef20ffa1..17209b6e4d1 100644 --- a/extensions/slack/src/channel.ts +++ b/extensions/slack/src/channel.ts @@ -29,6 +29,7 @@ import { resolveDefaultSlackAccountId, resolveSlackAccount, resolveSlackReplyToMode, + isSlackInteractiveRepliesEnabled, resolveSlackGroupRequireMention, resolveSlackGroupToolPolicy, buildSlackThreadingToolContext, @@ -37,6 +38,7 @@ import { type ChannelPlugin, type ResolvedSlackAccount, } from "openclaw/plugin-sdk/slack"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { getSlackRuntime } from "./runtime.js"; const meta = getChatChannelMeta("slack"); @@ -146,6 +148,17 @@ export const slackPlugin: ChannelPlugin = { media: true, nativeCommands: true, }, + agentPrompt: { + messageToolHints: ({ cfg, accountId }) => + isSlackInteractiveRepliesEnabled({ cfg, accountId }) + ? [ + "- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.", + "- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.", + ] + : [ + "- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts..capabilities`).", + ], + }, streaming: { blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 }, }, @@ -232,6 +245,18 @@ export const slackPlugin: ChannelPlugin = { }, resolver: { resolveTargets: async ({ cfg, accountId, inputs, kind }) => { + const toResolvedTarget = < + T extends { input: string; resolved: boolean; id?: string; name?: string }, + >( + entry: T, + note?: string, + ) => ({ + input: entry.input, + resolved: entry.resolved, + id: entry.id, + name: entry.name, + note, + }); const account = resolveSlackAccount({ cfg, accountId }); const token = account.config.userToken?.trim() || account.botToken?.trim(); if (!token) { @@ -246,25 +271,15 @@ export const slackPlugin: ChannelPlugin = { token, entries: inputs, }); - return resolved.map((entry) => ({ - input: entry.input, - resolved: entry.resolved, - id: entry.id, - name: entry.name, - note: entry.archived ? "archived" : undefined, - })); + return resolved.map((entry) => + toResolvedTarget(entry, entry.archived ? "archived" : undefined), + ); } const resolved = await getSlackRuntime().channel.slack.resolveUserAllowlist({ token, entries: inputs, }); - return resolved.map((entry) => ({ - input: entry.input, - resolved: entry.resolved, - id: entry.id, - name: entry.name, - note: entry.note, - })); + return resolved.map((entry) => toResolvedTarget(entry, entry.note)); }, }, actions: { @@ -407,17 +422,11 @@ export const slackPlugin: ChannelPlugin = { lastStopAt: null, lastError: null, }, - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - botTokenSource: snapshot.botTokenSource ?? "none", - appTokenSource: snapshot.appTokenSource ?? "none", - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + botTokenSource: snapshot.botTokenSource ?? "none", + appTokenSource: snapshot.appTokenSource ?? "none", + }), probeAccount: async ({ account, timeoutMs }) => { const token = account.botToken?.trim(); if (!token) { diff --git a/extensions/synology-chat/src/channel.integration.test.ts b/extensions/synology-chat/src/channel.integration.test.ts index b9cb5484621..e5d1e7f24c9 100644 --- a/extensions/synology-chat/src/channel.integration.test.ts +++ b/extensions/synology-chat/src/channel.integration.test.ts @@ -1,5 +1,9 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + dispatchReplyWithBufferedBlockDispatcher, + registerPluginHttpRouteMock, +} from "./channel.test-mocks.js"; import { makeFormBody, makeReq, makeRes } from "./test-http-utils.js"; type RegisteredRoute = { @@ -8,41 +12,6 @@ type RegisteredRoute = { handler: (req: IncomingMessage, res: ServerResponse) => Promise; }; -const registerPluginHttpRouteMock = vi.fn<(params: RegisteredRoute) => () => void>(() => vi.fn()); -const dispatchReplyWithBufferedBlockDispatcher = vi.fn().mockResolvedValue({ counts: {} }); - -vi.mock("openclaw/plugin-sdk/synology-chat", async (importOriginal) => { - const actual = await importOriginal(); - return { - ...actual, - DEFAULT_ACCOUNT_ID: "default", - setAccountEnabledInConfigSection: vi.fn((_opts: any) => ({})), - registerPluginHttpRoute: registerPluginHttpRouteMock, - buildChannelConfigSchema: vi.fn((schema: any) => ({ schema })), - createFixedWindowRateLimiter: vi.fn(() => ({ - isRateLimited: vi.fn(() => false), - size: vi.fn(() => 0), - clear: vi.fn(), - })), - }; -}); - -vi.mock("./runtime.js", () => ({ - getSynologyRuntime: vi.fn(() => ({ - config: { loadConfig: vi.fn().mockResolvedValue({}) }, - channel: { - reply: { - dispatchReplyWithBufferedBlockDispatcher, - }, - }, - })), -})); - -vi.mock("./client.js", () => ({ - sendMessage: vi.fn().mockResolvedValue(true), - sendFileUrl: vi.fn().mockResolvedValue(true), -})); - const { createSynologyChatPlugin } = await import("./channel.js"); describe("Synology channel wiring integration", () => { beforeEach(() => { diff --git a/extensions/synology-chat/src/channel.test-mocks.ts b/extensions/synology-chat/src/channel.test-mocks.ts new file mode 100644 index 00000000000..10ccca5f9d0 --- /dev/null +++ b/extensions/synology-chat/src/channel.test-mocks.ts @@ -0,0 +1,76 @@ +import type { IncomingMessage, ServerResponse } from "node:http"; +import type { Mock } from "vitest"; +import { vi } from "vitest"; + +export type RegisteredRoute = { + path: string; + accountId: string; + handler: (req: IncomingMessage, res: ServerResponse) => Promise; +}; + +export const registerPluginHttpRouteMock: Mock<(params: RegisteredRoute) => () => void> = vi.fn( + () => vi.fn(), +); + +export const dispatchReplyWithBufferedBlockDispatcher: Mock< + () => Promise<{ counts: Record }> +> = vi.fn().mockResolvedValue({ counts: {} }); + +async function readRequestBodyWithLimitForTest(req: IncomingMessage): Promise { + return await new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + req.on("data", (chunk) => { + chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)); + }); + req.on("end", () => resolve(Buffer.concat(chunks).toString("utf8"))); + req.on("error", reject); + }); +} + +vi.mock("openclaw/plugin-sdk/synology-chat", () => ({ + DEFAULT_ACCOUNT_ID: "default", + setAccountEnabledInConfigSection: vi.fn((_opts: unknown) => ({})), + registerPluginHttpRoute: registerPluginHttpRouteMock, + buildChannelConfigSchema: vi.fn((schema: unknown) => ({ schema })), + readRequestBodyWithLimit: vi.fn(readRequestBodyWithLimitForTest), + isRequestBodyLimitError: vi.fn(() => false), + requestBodyErrorToText: vi.fn(() => "Request body too large"), + createFixedWindowRateLimiter: vi.fn(() => ({ + isRateLimited: vi.fn(() => false), + size: vi.fn(() => 0), + clear: vi.fn(), + })), +})); + +vi.mock("./client.js", () => ({ + sendMessage: vi.fn().mockResolvedValue(true), + sendFileUrl: vi.fn().mockResolvedValue(true), +})); + +vi.mock("./runtime.js", () => ({ + getSynologyRuntime: vi.fn(() => ({ + config: { loadConfig: vi.fn().mockResolvedValue({}) }, + channel: { + reply: { + dispatchReplyWithBufferedBlockDispatcher, + }, + }, + })), +})); + +export function makeSecurityAccount(overrides: Record = {}) { + return { + accountId: "default", + enabled: true, + token: "t", + incomingUrl: "https://nas/incoming", + nasHost: "h", + webhookPath: "/w", + dmPolicy: "allowlist" as const, + allowedUserIds: [], + rateLimitPerMinute: 30, + botName: "Bot", + allowInsecureSsl: false, + ...overrides, + }; +} diff --git a/extensions/synology-chat/src/channel.test.ts b/extensions/synology-chat/src/channel.test.ts index 4e3be192f39..bdce5f37d79 100644 --- a/extensions/synology-chat/src/channel.test.ts +++ b/extensions/synology-chat/src/channel.test.ts @@ -1,40 +1,10 @@ -import { describe, it, expect, vi, beforeEach } from "vitest"; - -// Mock external dependencies -vi.mock("openclaw/plugin-sdk/synology-chat", () => ({ - DEFAULT_ACCOUNT_ID: "default", - setAccountEnabledInConfigSection: vi.fn((_opts: any) => ({})), - registerPluginHttpRoute: vi.fn(() => vi.fn()), - buildChannelConfigSchema: vi.fn((schema: any) => ({ schema })), - createFixedWindowRateLimiter: vi.fn(() => ({ - isRateLimited: vi.fn(() => false), - size: vi.fn(() => 0), - clear: vi.fn(), - })), -})); - -vi.mock("./client.js", () => ({ - sendMessage: vi.fn().mockResolvedValue(true), - sendFileUrl: vi.fn().mockResolvedValue(true), -})); +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { makeSecurityAccount, registerPluginHttpRouteMock } from "./channel.test-mocks.js"; vi.mock("./webhook-handler.js", () => ({ createWebhookHandler: vi.fn(() => vi.fn()), })); -vi.mock("./runtime.js", () => ({ - getSynologyRuntime: vi.fn(() => ({ - config: { loadConfig: vi.fn().mockResolvedValue({}) }, - channel: { - reply: { - dispatchReplyWithBufferedBlockDispatcher: vi.fn().mockResolvedValue({ - counts: {}, - }), - }, - }, - })), -})); - vi.mock("zod", () => ({ z: { object: vi.fn(() => ({ @@ -44,7 +14,6 @@ vi.mock("zod", () => ({ })); const { createSynologyChatPlugin } = await import("./channel.js"); -const { registerPluginHttpRoute } = await import("openclaw/plugin-sdk/synology-chat"); describe("createSynologyChatPlugin", () => { it("returns a plugin object with all required sections", () => { @@ -133,95 +102,35 @@ describe("createSynologyChatPlugin", () => { describe("security.collectWarnings", () => { it("warns when token is missing", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount({ token: "" }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("token"))).toBe(true); }); it("warns when allowInsecureSsl is true", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: true, - }; + const account = makeSecurityAccount({ allowInsecureSsl: true }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("SSL"))).toBe(true); }); it("warns when dmPolicy is open", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "open" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount({ dmPolicy: "open" }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("open"))).toBe(true); }); it("warns when dmPolicy is allowlist and allowedUserIds is empty", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount(); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("empty allowedUserIds"))).toBe(true); }); it("returns no warnings for fully configured account", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: ["user1"], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount({ allowedUserIds: ["user1"] }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings).toHaveLength(0); }); @@ -317,6 +226,23 @@ describe("createSynologyChatPlugin", () => { }); describe("gateway", () => { + function makeStartAccountCtx( + accountConfig: Record, + abortController = new AbortController(), + ) { + return { + abortController, + ctx: { + cfg: { + channels: { "synology-chat": accountConfig }, + }, + accountId: "default", + log: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, + abortSignal: abortController.signal, + }, + }; + } + async function expectPendingStartAccountPromise( result: Promise, abortController: AbortController, @@ -333,15 +259,7 @@ describe("createSynologyChatPlugin", () => { async function expectPendingStartAccount(accountConfig: Record) { const plugin = createSynologyChatPlugin(); - const abortController = new AbortController(); - const ctx = { - cfg: { - channels: { "synology-chat": accountConfig }, - }, - accountId: "default", - log: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, - abortSignal: abortController.signal, - }; + const { ctx, abortController } = makeStartAccountCtx(accountConfig); const result = plugin.gateway.startAccount(ctx); await expectPendingStartAccountPromise(result, abortController); } @@ -355,27 +273,16 @@ describe("createSynologyChatPlugin", () => { }); it("startAccount refuses allowlist accounts with empty allowedUserIds", async () => { - const registerMock = vi.mocked(registerPluginHttpRoute); + const registerMock = registerPluginHttpRouteMock; registerMock.mockClear(); - const abortController = new AbortController(); - const plugin = createSynologyChatPlugin(); - const ctx = { - cfg: { - channels: { - "synology-chat": { - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - dmPolicy: "allowlist", - allowedUserIds: [], - }, - }, - }, - accountId: "default", - log: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, - abortSignal: abortController.signal, - }; + const { ctx, abortController } = makeStartAccountCtx({ + enabled: true, + token: "t", + incomingUrl: "https://nas/incoming", + dmPolicy: "allowlist", + allowedUserIds: [], + }); const result = plugin.gateway.startAccount(ctx); await expectPendingStartAccountPromise(result, abortController); @@ -386,7 +293,7 @@ describe("createSynologyChatPlugin", () => { it("deregisters stale route before re-registering same account/path", async () => { const unregisterFirst = vi.fn(); const unregisterSecond = vi.fn(); - const registerMock = vi.mocked(registerPluginHttpRoute); + const registerMock = registerPluginHttpRouteMock; registerMock.mockReturnValueOnce(unregisterFirst).mockReturnValueOnce(unregisterSecond); const plugin = createSynologyChatPlugin(); diff --git a/extensions/synology-chat/src/client.test.ts b/extensions/synology-chat/src/client.test.ts index 416412f0408..2ae24f42904 100644 --- a/extensions/synology-chat/src/client.test.ts +++ b/extensions/synology-chat/src/client.test.ts @@ -51,7 +51,7 @@ function mockFailureResponse(statusCode = 500) { mockResponse(statusCode, "error"); } -describe("sendMessage", () => { +function installFakeTimerHarness() { beforeEach(() => { vi.clearAllMocks(); vi.useFakeTimers(); @@ -62,6 +62,10 @@ describe("sendMessage", () => { afterEach(() => { vi.useRealTimers(); }); +} + +describe("sendMessage", () => { + installFakeTimerHarness(); it("returns true on successful send", async () => { mockSuccessResponse(); @@ -86,16 +90,7 @@ describe("sendMessage", () => { }); describe("sendFileUrl", () => { - beforeEach(() => { - vi.clearAllMocks(); - vi.useFakeTimers(); - fakeNowMs += 10_000; - vi.setSystemTime(fakeNowMs); - }); - - afterEach(() => { - vi.useRealTimers(); - }); + installFakeTimerHarness(); it("returns true on success", async () => { mockSuccessResponse(); diff --git a/extensions/synology-chat/src/client.ts b/extensions/synology-chat/src/client.ts index 95240e556f5..d66f1b720f4 100644 --- a/extensions/synology-chat/src/client.ts +++ b/extensions/synology-chat/src/client.ts @@ -27,6 +27,12 @@ type ChatUserCacheEntry = { cachedAt: number; }; +type ChatWebhookPayload = { + text?: string; + file_url?: string; + user_ids?: number[]; +}; + // Cache user lists per bot endpoint to avoid cross-account bleed. const chatUserCache = new Map(); const CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes @@ -47,16 +53,7 @@ export async function sendMessage( ): Promise { // Synology Chat API requires user_ids (numeric) to specify the recipient // The @mention is optional but user_ids is mandatory - const payloadObj: Record = { text }; - if (userId) { - // userId can be numeric ID or username - if numeric, add to user_ids - const numericId = typeof userId === "number" ? userId : parseInt(userId, 10); - if (!isNaN(numericId)) { - payloadObj.user_ids = [numericId]; - } - } - const payload = JSON.stringify(payloadObj); - const body = `payload=${encodeURIComponent(payload)}`; + const body = buildWebhookBody({ text }, userId); // Internal rate limit: min 500ms between sends const now = Date.now(); @@ -95,15 +92,7 @@ export async function sendFileUrl( userId?: string | number, allowInsecureSsl = true, ): Promise { - const payloadObj: Record = { file_url: fileUrl }; - if (userId) { - const numericId = typeof userId === "number" ? userId : parseInt(userId, 10); - if (!isNaN(numericId)) { - payloadObj.user_ids = [numericId]; - } - } - const payload = JSON.stringify(payloadObj); - const body = `payload=${encodeURIComponent(payload)}`; + const body = buildWebhookBody({ file_url: fileUrl }, userId); try { const ok = await doPost(incomingUrl, body, allowInsecureSsl); @@ -215,6 +204,22 @@ export async function resolveChatUserId( return undefined; } +function buildWebhookBody(payload: ChatWebhookPayload, userId?: string | number): string { + const numericId = parseNumericUserId(userId); + if (numericId !== undefined) { + payload.user_ids = [numericId]; + } + return `payload=${encodeURIComponent(JSON.stringify(payload))}`; +} + +function parseNumericUserId(userId?: string | number): number | undefined { + if (userId === undefined) { + return undefined; + } + const numericId = typeof userId === "number" ? userId : parseInt(userId, 10); + return Number.isNaN(numericId) ? undefined : numericId; +} + function doPost(url: string, body: string, allowInsecureSsl = true): Promise { return new Promise((resolve, reject) => { let parsedUrl: URL; diff --git a/extensions/synology-chat/src/types.ts b/extensions/synology-chat/src/types.ts index 7ba222531c6..842c2ee97bb 100644 --- a/extensions/synology-chat/src/types.ts +++ b/extensions/synology-chat/src/types.ts @@ -2,8 +2,7 @@ * Type definitions for the Synology Chat channel plugin. */ -/** Raw channel config from openclaw.json channels.synology-chat */ -export interface SynologyChatChannelConfig { +type SynologyChatConfigFields = { enabled?: boolean; token?: string; incomingUrl?: string; @@ -14,22 +13,15 @@ export interface SynologyChatChannelConfig { rateLimitPerMinute?: number; botName?: string; allowInsecureSsl?: boolean; +}; + +/** Raw channel config from openclaw.json channels.synology-chat */ +export interface SynologyChatChannelConfig extends SynologyChatConfigFields { accounts?: Record; } /** Raw per-account config (overrides base config) */ -export interface SynologyChatAccountRaw { - enabled?: boolean; - token?: string; - incomingUrl?: string; - nasHost?: string; - webhookPath?: string; - dmPolicy?: "open" | "allowlist" | "disabled"; - allowedUserIds?: string | string[]; - rateLimitPerMinute?: number; - botName?: string; - allowInsecureSsl?: boolean; -} +export interface SynologyChatAccountRaw extends SynologyChatConfigFields {} /** Fully resolved account config with defaults applied */ export interface ResolvedSynologyChatAccount { diff --git a/extensions/synology-chat/src/webhook-handler.test.ts b/extensions/synology-chat/src/webhook-handler.test.ts index 37ee566e6a6..ae5bd061b85 100644 --- a/extensions/synology-chat/src/webhook-handler.test.ts +++ b/extensions/synology-chat/src/webhook-handler.test.ts @@ -2,6 +2,7 @@ import { EventEmitter } from "node:events"; import type { IncomingMessage, ServerResponse } from "node:http"; import { describe, it, expect, vi, beforeEach } from "vitest"; import type { ResolvedSynologyChatAccount } from "./types.js"; +import type { WebhookHandlerDeps } from "./webhook-handler.js"; import { clearSynologyWebhookRateLimiterStateForTest, createWebhookHandler, @@ -37,21 +38,7 @@ function makeReq( body: string, opts: { headers?: Record; url?: string } = {}, ): IncomingMessage { - const req = new EventEmitter() as IncomingMessage & { - destroyed: boolean; - }; - req.method = method; - req.headers = opts.headers ?? {}; - req.url = opts.url ?? "/webhook/synology"; - req.socket = { remoteAddress: "127.0.0.1" } as any; - req.destroyed = false; - req.destroy = ((_: Error | undefined) => { - if (req.destroyed) { - return req; - } - req.destroyed = true; - return req; - }) as IncomingMessage["destroy"]; + const req = makeBaseReq(method, opts); // Simulate body delivery process.nextTick(() => { @@ -65,11 +52,19 @@ function makeReq( return req; } function makeStalledReq(method: string): IncomingMessage { + return makeBaseReq(method); +} + +function makeBaseReq( + method: string, + opts: { headers?: Record; url?: string } = {}, +): IncomingMessage & { destroyed: boolean } { const req = new EventEmitter() as IncomingMessage & { destroyed: boolean; }; req.method = method; - req.headers = {}; + req.headers = opts.headers ?? {}; + req.url = opts.url ?? "/webhook/synology"; req.socket = { remoteAddress: "127.0.0.1" } as any; req.destroyed = false; req.destroy = ((_: Error | undefined) => { @@ -124,10 +119,12 @@ describe("createWebhookHandler", () => { async function expectForbiddenByPolicy(params: { account: Partial; bodyContains: string; + deliver?: WebhookHandlerDeps["deliver"]; }) { + const deliver = params.deliver ?? vi.fn(); const handler = createWebhookHandler({ account: makeAccount(params.account), - deliver: vi.fn(), + deliver, log, }); @@ -137,6 +134,7 @@ describe("createWebhookHandler", () => { expect(res._status).toBe(403); expect(res._body).toContain(params.bodyContains); + expect(deliver).not.toHaveBeenCalled(); } it("rejects non-POST methods with 405", async () => { @@ -302,22 +300,14 @@ describe("createWebhookHandler", () => { it("returns 403 when allowlist policy is set with empty allowedUserIds", async () => { const deliver = vi.fn(); - const handler = createWebhookHandler({ - account: makeAccount({ + await expectForbiddenByPolicy({ + account: { dmPolicy: "allowlist", allowedUserIds: [], - }), + }, + bodyContains: "Allowlist is empty", deliver, - log, }); - - const req = makeReq("POST", validBody); - const res = makeRes(); - await handler(req, res); - - expect(res._status).toBe(403); - expect(res._body).toContain("Allowlist is empty"); - expect(deliver).not.toHaveBeenCalled(); }); it("returns 403 when DMs are disabled", async () => { diff --git a/extensions/telegram/src/channel.test.ts b/extensions/telegram/src/channel.test.ts index f0736069015..a957a3e5b1c 100644 --- a/extensions/telegram/src/channel.test.ts +++ b/extensions/telegram/src/channel.test.ts @@ -91,6 +91,30 @@ function installGatewayRuntime(params?: { probeOk?: boolean; botUsername?: strin }; } +function configureOpsProxyNetwork(cfg: OpenClawConfig) { + cfg.channels!.telegram!.accounts!.ops = { + ...cfg.channels!.telegram!.accounts!.ops, + proxy: "http://127.0.0.1:8888", + network: { + autoSelectFamily: false, + dnsResultOrder: "ipv4first", + }, + }; +} + +function installSendMessageRuntime( + sendMessageTelegram: ReturnType, +): ReturnType { + setTelegramRuntime({ + channel: { + telegram: { + sendMessageTelegram, + }, + }, + } as unknown as PluginRuntime); + return sendMessageTelegram; +} + describe("telegramPlugin duplicate token guard", () => { it("marks secondary account as not configured when token is shared", async () => { const cfg = createCfg(); @@ -176,14 +200,7 @@ describe("telegramPlugin duplicate token guard", () => { }); const cfg = createCfg(); - cfg.channels!.telegram!.accounts!.ops = { - ...cfg.channels!.telegram!.accounts!.ops, - proxy: "http://127.0.0.1:8888", - network: { - autoSelectFamily: false, - dnsResultOrder: "ipv4first", - }, - }; + configureOpsProxyNetwork(cfg); const account = telegramPlugin.config.resolveAccount(cfg, "ops"); await telegramPlugin.status!.probeAccount!({ @@ -215,13 +232,9 @@ describe("telegramPlugin duplicate token guard", () => { }); const cfg = createCfg(); + configureOpsProxyNetwork(cfg); cfg.channels!.telegram!.accounts!.ops = { ...cfg.channels!.telegram!.accounts!.ops, - proxy: "http://127.0.0.1:8888", - network: { - autoSelectFamily: false, - dnsResultOrder: "ipv4first", - }, groups: { "-100123": { requireMention: false }, }, @@ -249,14 +262,9 @@ describe("telegramPlugin duplicate token guard", () => { }); it("forwards mediaLocalRoots to sendMessageTelegram for outbound media sends", async () => { - const sendMessageTelegram = vi.fn(async () => ({ messageId: "tg-1" })); - setTelegramRuntime({ - channel: { - telegram: { - sendMessageTelegram, - }, - }, - } as unknown as PluginRuntime); + const sendMessageTelegram = installSendMessageRuntime( + vi.fn(async () => ({ messageId: "tg-1" })), + ); const result = await telegramPlugin.outbound!.sendMedia!({ cfg: createCfg(), @@ -279,14 +287,9 @@ describe("telegramPlugin duplicate token guard", () => { }); it("preserves buttons for outbound text payload sends", async () => { - const sendMessageTelegram = vi.fn(async () => ({ messageId: "tg-2" })); - setTelegramRuntime({ - channel: { - telegram: { - sendMessageTelegram, - }, - }, - } as unknown as PluginRuntime); + const sendMessageTelegram = installSendMessageRuntime( + vi.fn(async () => ({ messageId: "tg-2" })), + ); const result = await telegramPlugin.outbound!.sendPayload!({ cfg: createCfg(), @@ -314,17 +317,12 @@ describe("telegramPlugin duplicate token guard", () => { }); it("sends outbound payload media lists and keeps buttons on the first message only", async () => { - const sendMessageTelegram = vi - .fn() - .mockResolvedValueOnce({ messageId: "tg-3", chatId: "12345" }) - .mockResolvedValueOnce({ messageId: "tg-4", chatId: "12345" }); - setTelegramRuntime({ - channel: { - telegram: { - sendMessageTelegram, - }, - }, - } as unknown as PluginRuntime); + const sendMessageTelegram = installSendMessageRuntime( + vi + .fn() + .mockResolvedValueOnce({ messageId: "tg-3", chatId: "12345" }) + .mockResolvedValueOnce({ messageId: "tg-4", chatId: "12345" }), + ); const result = await telegramPlugin.outbound!.sendPayload!({ cfg: createCfg(), diff --git a/extensions/telegram/src/channel.ts b/extensions/telegram/src/channel.ts index 52ae2b15ea8..20d012c9dda 100644 --- a/extensions/telegram/src/channel.ts +++ b/extensions/telegram/src/channel.ts @@ -78,6 +78,61 @@ function formatDuplicateTelegramTokenReason(params: { ); } +type TelegramSendFn = ReturnType< + typeof getTelegramRuntime +>["channel"]["telegram"]["sendMessageTelegram"]; +type TelegramSendOptions = NonNullable[2]>; + +function buildTelegramSendOptions(params: { + cfg: OpenClawConfig; + mediaUrl?: string | null; + mediaLocalRoots?: readonly string[] | null; + accountId?: string | null; + replyToId?: string | null; + threadId?: string | number | null; + silent?: boolean | null; +}): TelegramSendOptions { + return { + verbose: false, + cfg: params.cfg, + ...(params.mediaUrl ? { mediaUrl: params.mediaUrl } : {}), + ...(params.mediaLocalRoots?.length ? { mediaLocalRoots: params.mediaLocalRoots } : {}), + messageThreadId: parseTelegramThreadId(params.threadId), + replyToMessageId: parseTelegramReplyToMessageId(params.replyToId), + accountId: params.accountId ?? undefined, + silent: params.silent ?? undefined, + }; +} + +async function sendTelegramOutbound(params: { + cfg: OpenClawConfig; + to: string; + text: string; + mediaUrl?: string | null; + mediaLocalRoots?: readonly string[] | null; + accountId?: string | null; + deps?: { sendTelegram?: TelegramSendFn }; + replyToId?: string | null; + threadId?: string | number | null; + silent?: boolean | null; +}) { + const send = + params.deps?.sendTelegram ?? getTelegramRuntime().channel.telegram.sendMessageTelegram; + return await send( + params.to, + params.text, + buildTelegramSendOptions({ + cfg: params.cfg, + mediaUrl: params.mediaUrl, + mediaLocalRoots: params.mediaLocalRoots, + accountId: params.accountId, + replyToId: params.replyToId, + threadId: params.threadId, + silent: params.silent, + }), + ); +} + const telegramMessageActions: ChannelMessageActionAdapter = { listActions: (ctx) => getTelegramRuntime().channel.telegram.messageActions?.listActions?.(ctx) ?? [], @@ -327,35 +382,31 @@ export const telegramPlugin: ChannelPlugin { const send = deps?.sendTelegram ?? getTelegramRuntime().channel.telegram.sendMessageTelegram; - const replyToMessageId = parseTelegramReplyToMessageId(replyToId); - const messageThreadId = parseTelegramThreadId(threadId); const result = await sendTelegramPayloadMessages({ send, to, payload, - baseOpts: { - verbose: false, + baseOpts: buildTelegramSendOptions({ cfg, mediaLocalRoots, - messageThreadId, - replyToMessageId, - accountId: accountId ?? undefined, - silent: silent ?? undefined, - }, + accountId, + replyToId, + threadId, + silent, + }), }); return { channel: "telegram", ...result }; }, sendText: async ({ cfg, to, text, accountId, deps, replyToId, threadId, silent }) => { - const send = deps?.sendTelegram ?? getTelegramRuntime().channel.telegram.sendMessageTelegram; - const replyToMessageId = parseTelegramReplyToMessageId(replyToId); - const messageThreadId = parseTelegramThreadId(threadId); - const result = await send(to, text, { - verbose: false, + const result = await sendTelegramOutbound({ cfg, - messageThreadId, - replyToMessageId, - accountId: accountId ?? undefined, - silent: silent ?? undefined, + to, + text, + accountId, + deps, + replyToId, + threadId, + silent, }); return { channel: "telegram", ...result }; }, @@ -371,18 +422,17 @@ export const telegramPlugin: ChannelPlugin { - const send = deps?.sendTelegram ?? getTelegramRuntime().channel.telegram.sendMessageTelegram; - const replyToMessageId = parseTelegramReplyToMessageId(replyToId); - const messageThreadId = parseTelegramThreadId(threadId); - const result = await send(to, text, { - verbose: false, + const result = await sendTelegramOutbound({ cfg, + to, + text, mediaUrl, mediaLocalRoots, - messageThreadId, - replyToMessageId, - accountId: accountId ?? undefined, - silent: silent ?? undefined, + accountId, + deps, + replyToId, + threadId, + silent, }); return { channel: "telegram", ...result }; }, diff --git a/extensions/test-utils/directory.ts b/extensions/test-utils/directory.ts new file mode 100644 index 00000000000..90d2ed445d3 --- /dev/null +++ b/extensions/test-utils/directory.ts @@ -0,0 +1,27 @@ +import type { ChannelDirectoryAdapter } from "../../src/channels/plugins/types.js"; + +export function createDirectoryTestRuntime() { + return { + log: () => {}, + error: () => {}, + exit: (code: number): never => { + throw new Error(`exit ${code}`); + }, + }; +} + +export function expectDirectorySurface(directory: ChannelDirectoryAdapter | null | undefined) { + if (!directory) { + throw new Error("expected directory"); + } + if (!directory.listPeers) { + throw new Error("expected listPeers"); + } + if (!directory.listGroups) { + throw new Error("expected listGroups"); + } + return directory as { + listPeers: NonNullable; + listGroups: NonNullable; + }; +} diff --git a/extensions/test-utils/plugin-api.ts b/extensions/test-utils/plugin-api.ts new file mode 100644 index 00000000000..5c9693c1a80 --- /dev/null +++ b/extensions/test-utils/plugin-api.ts @@ -0,0 +1,25 @@ +import type { OpenClawPluginApi } from "../../src/plugins/types.js"; + +type TestPluginApiInput = Partial & + Pick; + +export function createTestPluginApi(api: TestPluginApiInput): OpenClawPluginApi { + return { + logger: { info() {}, warn() {}, error() {}, debug() {} }, + registerTool() {}, + registerHook() {}, + registerHttpRoute() {}, + registerChannel() {}, + registerGatewayMethod() {}, + registerCli() {}, + registerService() {}, + registerProvider() {}, + registerCommand() {}, + registerContextEngine() {}, + resolvePath(input: string) { + return input; + }, + on() {}, + ...api, + }; +} diff --git a/extensions/test-utils/send-config.ts b/extensions/test-utils/send-config.ts new file mode 100644 index 00000000000..61c7e126b12 --- /dev/null +++ b/extensions/test-utils/send-config.ts @@ -0,0 +1,65 @@ +import { expect } from "vitest"; + +type MockFn = (...args: never[]) => unknown; + +type CfgThreadingAssertion = { + loadConfig: MockFn; + resolveAccount: MockFn; + cfg: TCfg; + accountId?: string; +}; + +type SendRuntimeState = { + loadConfig: MockFn; + resolveMarkdownTableMode: MockFn; + convertMarkdownTables: MockFn; + record: MockFn; +}; + +export function expectProvidedCfgSkipsRuntimeLoad({ + loadConfig, + resolveAccount, + cfg, + accountId, +}: CfgThreadingAssertion): void { + expect(loadConfig).not.toHaveBeenCalled(); + expect(resolveAccount).toHaveBeenCalledWith({ + cfg, + accountId, + }); +} + +export function expectRuntimeCfgFallback({ + loadConfig, + resolveAccount, + cfg, + accountId, +}: CfgThreadingAssertion): void { + expect(loadConfig).toHaveBeenCalledTimes(1); + expect(resolveAccount).toHaveBeenCalledWith({ + cfg, + accountId, + }); +} + +export function createSendCfgThreadingRuntime({ + loadConfig, + resolveMarkdownTableMode, + convertMarkdownTables, + record, +}: SendRuntimeState) { + return { + config: { + loadConfig, + }, + channel: { + text: { + resolveMarkdownTableMode, + convertMarkdownTables, + }, + activity: { + record, + }, + }, + }; +} diff --git a/extensions/test-utils/start-account-lifecycle.ts b/extensions/test-utils/start-account-lifecycle.ts new file mode 100644 index 00000000000..6ce1c734736 --- /dev/null +++ b/extensions/test-utils/start-account-lifecycle.ts @@ -0,0 +1,72 @@ +import type { ChannelAccountSnapshot, ChannelGatewayContext } from "openclaw/plugin-sdk/test-utils"; +import { expect, vi } from "vitest"; +import { createStartAccountContext } from "./start-account-context.js"; + +export function startAccountAndTrackLifecycle(params: { + startAccount: (ctx: ChannelGatewayContext) => Promise; + account: TAccount; +}) { + const patches: ChannelAccountSnapshot[] = []; + const abort = new AbortController(); + const task = params.startAccount( + createStartAccountContext({ + account: params.account, + abortSignal: abort.signal, + statusPatchSink: (next) => patches.push({ ...next }), + }), + ); + let settled = false; + void task.then(() => { + settled = true; + }); + return { + abort, + patches, + task, + isSettled: () => settled, + }; +} + +export async function abortStartedAccount(params: { + abort: AbortController; + task: Promise; +}) { + params.abort.abort(); + await params.task; +} + +export async function expectPendingUntilAbort(params: { + waitForStarted: () => Promise; + isSettled: () => boolean; + abort: AbortController; + task: Promise; + assertBeforeAbort?: () => void; + assertAfterAbort?: () => void; +}) { + await params.waitForStarted(); + expect(params.isSettled()).toBe(false); + params.assertBeforeAbort?.(); + await abortStartedAccount({ abort: params.abort, task: params.task }); + params.assertAfterAbort?.(); +} + +export async function expectStopPendingUntilAbort(params: { + waitForStarted: () => Promise; + isSettled: () => boolean; + abort: AbortController; + task: Promise; + stop: ReturnType; +}) { + await expectPendingUntilAbort({ + waitForStarted: params.waitForStarted, + isSettled: params.isSettled, + abort: params.abort, + task: params.task, + assertBeforeAbort: () => { + expect(params.stop).not.toHaveBeenCalled(); + }, + assertAfterAbort: () => { + expect(params.stop).toHaveBeenCalledOnce(); + }, + }); +} diff --git a/extensions/test-utils/status-issues.ts b/extensions/test-utils/status-issues.ts new file mode 100644 index 00000000000..7de3c6bcd55 --- /dev/null +++ b/extensions/test-utils/status-issues.ts @@ -0,0 +1,10 @@ +import { expect } from "vitest"; + +export function expectOpenDmPolicyConfigIssue(params: { + collectIssues: (accounts: TAccount[]) => Array<{ kind?: string }>; + account: TAccount; +}) { + const issues = params.collectIssues([params.account]); + expect(issues).toHaveLength(1); + expect(issues[0]?.kind).toBe("config"); +} diff --git a/extensions/thread-ownership/index.test.ts b/extensions/thread-ownership/index.test.ts index 825b4ca5bb5..3d98d8f9735 100644 --- a/extensions/thread-ownership/index.test.ts +++ b/extensions/thread-ownership/index.test.ts @@ -51,6 +51,13 @@ describe("thread-ownership plugin", () => { register(api as any); }); + async function sendSlackThreadMessage() { + return await hooks.message_sending( + { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, + { channelId: "slack", conversationId: "C123" }, + ); + } + it("allows non-slack channels", async () => { const result = await hooks.message_sending( { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, @@ -76,10 +83,7 @@ describe("thread-ownership plugin", () => { new Response(JSON.stringify({ owner: "test-agent" }), { status: 200 }), ); - const result = await hooks.message_sending( - { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, - { channelId: "slack", conversationId: "C123" }, - ); + const result = await sendSlackThreadMessage(); expect(result).toBeUndefined(); expect(globalThis.fetch).toHaveBeenCalledWith( @@ -96,10 +100,7 @@ describe("thread-ownership plugin", () => { new Response(JSON.stringify({ owner: "other-agent" }), { status: 409 }), ); - const result = await hooks.message_sending( - { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, - { channelId: "slack", conversationId: "C123" }, - ); + const result = await sendSlackThreadMessage(); expect(result).toEqual({ cancel: true }); expect(api.logger.info).toHaveBeenCalledWith(expect.stringContaining("cancelled send")); @@ -108,10 +109,7 @@ describe("thread-ownership plugin", () => { it("fails open on network error", async () => { vi.mocked(globalThis.fetch).mockRejectedValue(new Error("ECONNREFUSED")); - const result = await hooks.message_sending( - { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, - { channelId: "slack", conversationId: "C123" }, - ); + const result = await sendSlackThreadMessage(); expect(result).toBeUndefined(); expect(api.logger.warn).toHaveBeenCalledWith( diff --git a/extensions/tlon/src/channel.ts b/extensions/tlon/src/channel.ts index 3c5bedbf841..eb37c8d7f74 100644 --- a/extensions/tlon/src/channel.ts +++ b/extensions/tlon/src/channel.ts @@ -153,6 +153,57 @@ function applyTlonSetupConfig(params: { }; } +type ResolvedTlonAccount = ReturnType; +type ConfiguredTlonAccount = ResolvedTlonAccount & { + ship: string; + url: string; + code: string; +}; + +function resolveOutboundContext(params: { + cfg: OpenClawConfig; + accountId?: string | null; + to: string; +}) { + const account = resolveTlonAccount(params.cfg, params.accountId ?? undefined); + if (!account.configured || !account.ship || !account.url || !account.code) { + throw new Error("Tlon account not configured"); + } + + const parsed = parseTlonTarget(params.to); + if (!parsed) { + throw new Error(`Invalid Tlon target. Use ${formatTargetHint()}`); + } + + return { account: account as ConfiguredTlonAccount, parsed }; +} + +function resolveReplyId(replyToId?: string | null, threadId?: string | number | null) { + return (replyToId ?? threadId) ? String(replyToId ?? threadId) : undefined; +} + +async function withHttpPokeAccountApi( + account: ConfiguredTlonAccount, + run: (api: Awaited>) => Promise, +) { + const api = await createHttpPokeApi({ + url: account.url, + ship: account.ship, + code: account.code, + allowPrivateNetwork: account.allowPrivateNetwork ?? undefined, + }); + + try { + return await run(api); + } finally { + try { + await api.delete(); + } catch { + // ignore cleanup errors + } + } +} + const tlonOutbound: ChannelOutboundAdapter = { deliveryMode: "direct", textChunkLimit: 10000, @@ -170,25 +221,8 @@ const tlonOutbound: ChannelOutboundAdapter = { return { ok: true, to: parsed.nest }; }, sendText: async ({ cfg, to, text, accountId, replyToId, threadId }) => { - const account = resolveTlonAccount(cfg, accountId ?? undefined); - if (!account.configured || !account.ship || !account.url || !account.code) { - throw new Error("Tlon account not configured"); - } - - const parsed = parseTlonTarget(to); - if (!parsed) { - throw new Error(`Invalid Tlon target. Use ${formatTargetHint()}`); - } - - // Use HTTP-only poke (no EventSource) to avoid conflicts with monitor's SSE connection - const api = await createHttpPokeApi({ - url: account.url, - ship: account.ship, - code: account.code, - allowPrivateNetwork: account.allowPrivateNetwork ?? undefined, - }); - - try { + const { account, parsed } = resolveOutboundContext({ cfg, accountId, to }); + return withHttpPokeAccountApi(account, async (api) => { const fromShip = normalizeShip(account.ship); if (parsed.kind === "dm") { return await sendDm({ @@ -198,52 +232,29 @@ const tlonOutbound: ChannelOutboundAdapter = { text, }); } - const replyId = (replyToId ?? threadId) ? String(replyToId ?? threadId) : undefined; return await sendGroupMessage({ api, fromShip, hostShip: parsed.hostShip, channelName: parsed.channelName, text, - replyToId: replyId, + replyToId: resolveReplyId(replyToId, threadId), }); - } finally { - try { - await api.delete(); - } catch { - // ignore cleanup errors - } - } + }); }, sendMedia: async ({ cfg, to, text, mediaUrl, accountId, replyToId, threadId }) => { - const account = resolveTlonAccount(cfg, accountId ?? undefined); - if (!account.configured || !account.ship || !account.url || !account.code) { - throw new Error("Tlon account not configured"); - } - - const parsed = parseTlonTarget(to); - if (!parsed) { - throw new Error(`Invalid Tlon target. Use ${formatTargetHint()}`); - } + const { account, parsed } = resolveOutboundContext({ cfg, accountId, to }); // Configure the API client for uploads configureClient({ shipUrl: account.url, shipName: account.ship.replace(/^~/, ""), verbose: false, - getCode: async () => account.code!, + getCode: async () => account.code, }); const uploadedUrl = mediaUrl ? await uploadImageFromUrl(mediaUrl) : undefined; - - const api = await createHttpPokeApi({ - url: account.url, - ship: account.ship, - code: account.code, - allowPrivateNetwork: account.allowPrivateNetwork ?? undefined, - }); - - try { + return withHttpPokeAccountApi(account, async (api) => { const fromShip = normalizeShip(account.ship); const story = buildMediaStory(text, uploadedUrl); @@ -255,22 +266,15 @@ const tlonOutbound: ChannelOutboundAdapter = { story, }); } - const replyId = (replyToId ?? threadId) ? String(replyToId ?? threadId) : undefined; return await sendGroupMessageWithStory({ api, fromShip, hostShip: parsed.hostShip, channelName: parsed.channelName, story, - replyToId: replyId, + replyToId: resolveReplyId(replyToId, threadId), }); - } finally { - try { - await api.delete(); - } catch { - // ignore cleanup errors - } - } + }); }, }; diff --git a/extensions/tlon/src/urbit/sse-client.ts b/extensions/tlon/src/urbit/sse-client.ts index ab12977d0e8..afa87502320 100644 --- a/extensions/tlon/src/urbit/sse-client.ts +++ b/extensions/tlon/src/urbit/sse-client.ts @@ -115,20 +115,7 @@ export class UrbitSSEClient { app: string; path: string; }) { - const { response, release } = await urbitFetch({ - baseUrl: this.url, - path: `/~/channel/${this.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: this.cookie, - }, - body: JSON.stringify([subscription]), - }, - ssrfPolicy: this.ssrfPolicy, - lookupFn: this.lookupFn, - fetchImpl: this.fetchImpl, + const { response, release } = await this.putChannelPayload([subscription], { timeoutMs: 30_000, auditContext: "tlon-urbit-subscribe", }); @@ -359,20 +346,7 @@ export class UrbitSSEClient { "event-id": eventId, }; - const { response, release } = await urbitFetch({ - baseUrl: this.url, - path: `/~/channel/${this.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: this.cookie, - }, - body: JSON.stringify([ackData]), - }, - ssrfPolicy: this.ssrfPolicy, - lookupFn: this.lookupFn, - fetchImpl: this.fetchImpl, + const { response, release } = await this.putChannelPayload([ackData], { timeoutMs: 10_000, auditContext: "tlon-urbit-ack", }); @@ -445,20 +419,7 @@ export class UrbitSSEClient { })); { - const { response, release } = await urbitFetch({ - baseUrl: this.url, - path: `/~/channel/${this.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: this.cookie, - }, - body: JSON.stringify(unsubscribes), - }, - ssrfPolicy: this.ssrfPolicy, - lookupFn: this.lookupFn, - fetchImpl: this.fetchImpl, + const { response, release } = await this.putChannelPayload(unsubscribes, { timeoutMs: 30_000, auditContext: "tlon-urbit-unsubscribe", }); @@ -501,4 +462,27 @@ export class UrbitSSEClient { await release(); } } + + private async putChannelPayload( + payload: unknown, + params: { timeoutMs: number; auditContext: string }, + ) { + return await urbitFetch({ + baseUrl: this.url, + path: `/~/channel/${this.channelId}`, + init: { + method: "PUT", + headers: { + "Content-Type": "application/json", + Cookie: this.cookie, + }, + body: JSON.stringify(payload), + }, + ssrfPolicy: this.ssrfPolicy, + lookupFn: this.lookupFn, + fetchImpl: this.fetchImpl, + timeoutMs: params.timeoutMs, + auditContext: params.auditContext, + }); + } } diff --git a/extensions/tlon/src/urbit/upload.test.ts b/extensions/tlon/src/urbit/upload.test.ts index 1a573a6b359..34dd6186d20 100644 --- a/extensions/tlon/src/urbit/upload.test.ts +++ b/extensions/tlon/src/urbit/upload.test.ts @@ -45,6 +45,27 @@ describe("uploadImageFromUrl", () => { }); } + async function setupSuccessfulUpload(params?: { + sourceUrl?: string; + contentType?: string; + uploadedUrl?: string; + }) { + const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); + const sourceUrl = params?.sourceUrl ?? "https://example.com/image.png"; + const contentType = params?.contentType ?? "image/png"; + const mockBlob = new Blob(["fake-image"], { type: contentType }); + mockSuccessfulFetch({ + mockFetch, + blob: mockBlob, + finalUrl: sourceUrl, + contentType, + }); + if (params?.uploadedUrl) { + mockUploadFile.mockResolvedValue({ url: params.uploadedUrl }); + } + return { mockBlob, mockUploadFile, uploadImageFromUrl }; + } + beforeEach(() => { vi.clearAllMocks(); }); @@ -54,16 +75,9 @@ describe("uploadImageFromUrl", () => { }); it("fetches image and calls uploadFile, returns uploaded URL", async () => { - const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); - - const mockBlob = new Blob(["fake-image"], { type: "image/png" }); - mockSuccessfulFetch({ - mockFetch, - blob: mockBlob, - finalUrl: "https://example.com/image.png", - contentType: "image/png", + const { mockBlob, mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload({ + uploadedUrl: "https://memex.tlon.network/uploaded.png", }); - mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" }); const result = await uploadImageFromUrl("https://example.com/image.png"); @@ -95,15 +109,7 @@ describe("uploadImageFromUrl", () => { }); it("returns original URL if upload fails", async () => { - const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); - - const mockBlob = new Blob(["fake-image"], { type: "image/png" }); - mockSuccessfulFetch({ - mockFetch, - blob: mockBlob, - finalUrl: "https://example.com/image.png", - contentType: "image/png", - }); + const { mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload(); mockUploadFile.mockRejectedValue(new Error("Upload failed")); const result = await uploadImageFromUrl("https://example.com/image.png"); diff --git a/extensions/twitch/src/access-control.test.ts b/extensions/twitch/src/access-control.test.ts index 874326c9697..3d522246700 100644 --- a/extensions/twitch/src/access-control.test.ts +++ b/extensions/twitch/src/access-control.test.ts @@ -49,6 +49,41 @@ describe("checkTwitchAccessControl", () => { return result; } + function expectAllowedAccessCheck(params: { + account?: Partial; + message?: Partial; + }) { + const result = runAccessCheck({ + account: params.account, + message: { + message: "@testbot hello", + ...params.message, + }, + }); + expect(result.allowed).toBe(true); + return result; + } + + function expectAllowFromBlocked(params: { + allowFrom: string[]; + allowedRoles?: NonNullable; + message?: Partial; + reason: string; + }) { + const result = runAccessCheck({ + account: { + allowFrom: params.allowFrom, + allowedRoles: params.allowedRoles, + }, + message: { + message: "@testbot hello", + ...params.message, + }, + }); + expect(result.allowed).toBe(false); + expect(result.reason).toContain(params.reason); + } + describe("when no restrictions are configured", () => { it("allows messages that mention the bot (default requireMention)", () => { const result = runAccessCheck({ @@ -109,62 +144,28 @@ describe("checkTwitchAccessControl", () => { describe("allowFrom allowlist", () => { it("allows users in the allowlist", () => { - const account: TwitchAccountConfig = { - ...mockAccount, - allowFrom: ["123456", "789012"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + const result = expectAllowedAccessCheck({ + account: { + allowFrom: ["123456", "789012"], + }, }); - expect(result.allowed).toBe(true); expect(result.matchKey).toBe("123456"); expect(result.matchSource).toBe("allowlist"); }); it("blocks users not in allowlist when allowFrom is set", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["789012"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + reason: "allowFrom", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("allowFrom"); }); it("blocks messages without userId", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["123456"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - userId: undefined, - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + message: { userId: undefined }, + reason: "user ID not available", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("user ID not available"); }); it("bypasses role checks when user is in allowlist", () => { @@ -188,47 +189,21 @@ describe("checkTwitchAccessControl", () => { }); it("blocks user with role when not in allowlist", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["789012"], allowedRoles: ["moderator"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - userId: "123456", - isMod: true, - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + message: { userId: "123456", isMod: true }, + reason: "allowFrom", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("allowFrom"); }); it("blocks user not in allowlist even when roles configured", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["789012"], allowedRoles: ["moderator"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - userId: "123456", - isMod: false, - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + message: { userId: "123456", isMod: false }, + reason: "allowFrom", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("allowFrom"); }); }); @@ -283,21 +258,11 @@ describe("checkTwitchAccessControl", () => { }); it("allows all users when role is 'all'", () => { - const account: TwitchAccountConfig = { - ...mockAccount, - allowedRoles: ["all"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + const result = expectAllowedAccessCheck({ + account: { + allowedRoles: ["all"], + }, }); - expect(result.allowed).toBe(true); expect(result.matchKey).toBe("all"); }); diff --git a/extensions/twitch/src/outbound.test.ts b/extensions/twitch/src/outbound.test.ts index 7b480df32dd..f58e2d1ad48 100644 --- a/extensions/twitch/src/outbound.test.ts +++ b/extensions/twitch/src/outbound.test.ts @@ -46,6 +46,20 @@ function assertResolvedTarget( return result.to; } +function expectTargetError( + resolveTarget: NonNullable, + params: Parameters>[0], + expectedMessage: string, +) { + const result = resolveTarget(params); + + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("expected resolveTarget to fail"); + } + expect(result.error.message).toContain(expectedMessage); +} + describe("outbound", () => { const mockAccount = { ...BASE_TWITCH_TEST_ACCOUNT, @@ -106,17 +120,15 @@ describe("outbound", () => { }); it("should error when target not in allowlist (implicit mode)", () => { - const result = resolveTarget({ - to: "#notallowed", - mode: "implicit", - allowFrom: ["#primary", "#secondary"], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Twitch"); + expectTargetError( + resolveTarget, + { + to: "#notallowed", + mode: "implicit", + allowFrom: ["#primary", "#secondary"], + }, + "Twitch", + ); }); it("should accept any target when allowlist is empty", () => { @@ -131,59 +143,51 @@ describe("outbound", () => { }); it("should error when no target provided with allowlist", () => { - const result = resolveTarget({ - to: undefined, - mode: "implicit", - allowFrom: ["#fallback", "#other"], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Twitch"); + expectTargetError( + resolveTarget, + { + to: undefined, + mode: "implicit", + allowFrom: ["#fallback", "#other"], + }, + "Twitch", + ); }); it("should return error when no target and no allowlist", () => { - const result = resolveTarget({ - to: undefined, - mode: "explicit", - allowFrom: [], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Missing target"); + expectTargetError( + resolveTarget, + { + to: undefined, + mode: "explicit", + allowFrom: [], + }, + "Missing target", + ); }); it("should handle whitespace-only target", () => { - const result = resolveTarget({ - to: " ", - mode: "explicit", - allowFrom: [], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Missing target"); + expectTargetError( + resolveTarget, + { + to: " ", + mode: "explicit", + allowFrom: [], + }, + "Missing target", + ); }); it("should error when target normalizes to empty string", () => { - const result = resolveTarget({ - to: "#", - mode: "explicit", - allowFrom: [], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Twitch"); + expectTargetError( + resolveTarget, + { + to: "#", + mode: "explicit", + allowFrom: [], + }, + "Twitch", + ); }); it("should filter wildcard from allowlist when checking membership", () => { diff --git a/extensions/twitch/src/plugin.ts b/extensions/twitch/src/plugin.ts index f6cf576b6a0..11cf90b8893 100644 --- a/extensions/twitch/src/plugin.ts +++ b/extensions/twitch/src/plugin.ts @@ -7,6 +7,7 @@ import type { OpenClawConfig } from "openclaw/plugin-sdk/twitch"; import { buildChannelConfigSchema } from "openclaw/plugin-sdk/twitch"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { twitchMessageActions } from "./actions.js"; import { removeClientManager } from "./client-manager-registry.js"; import { TwitchConfigSchema } from "./config-schema.js"; @@ -169,15 +170,8 @@ export const twitchPlugin: ChannelPlugin = { }, /** Build channel summary from snapshot */ - buildChannelSummary: ({ snapshot }: { snapshot: ChannelAccountSnapshot }) => ({ - configured: snapshot.configured ?? false, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }: { snapshot: ChannelAccountSnapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot), /** Probe account connection */ probeAccount: async ({ diff --git a/extensions/twitch/src/send.test.ts b/extensions/twitch/src/send.test.ts index e7185b3f5fb..b45321229a4 100644 --- a/extensions/twitch/src/send.test.ts +++ b/extensions/twitch/src/send.test.ts @@ -55,7 +55,10 @@ describe("send", () => { installTwitchTestHooks(); describe("sendMessageTwitchInternal", () => { - it("should send a message successfully", async () => { + async function mockSuccessfulSend(params: { + messageId: string; + stripMarkdown?: (text: string) => string; + }) { const { getAccountConfig } = await import("./config.js"); const { getClientManager } = await import("./client-manager-registry.js"); const { stripMarkdownForTwitch } = await import("./utils/markdown.js"); @@ -64,10 +67,18 @@ describe("send", () => { vi.mocked(getClientManager).mockReturnValue({ sendMessage: vi.fn().mockResolvedValue({ ok: true, - messageId: "twitch-msg-123", + messageId: params.messageId, }), } as unknown as ReturnType); - vi.mocked(stripMarkdownForTwitch).mockImplementation((text) => text); + vi.mocked(stripMarkdownForTwitch).mockImplementation( + params.stripMarkdown ?? ((text) => text), + ); + + return { stripMarkdownForTwitch }; + } + + it("should send a message successfully", async () => { + await mockSuccessfulSend({ messageId: "twitch-msg-123" }); const result = await sendMessageTwitchInternal( "#testchannel", @@ -83,18 +94,10 @@ describe("send", () => { }); it("should strip markdown when enabled", async () => { - const { getAccountConfig } = await import("./config.js"); - const { getClientManager } = await import("./client-manager-registry.js"); - const { stripMarkdownForTwitch } = await import("./utils/markdown.js"); - - vi.mocked(getAccountConfig).mockReturnValue(mockAccount); - vi.mocked(getClientManager).mockReturnValue({ - sendMessage: vi.fn().mockResolvedValue({ - ok: true, - messageId: "twitch-msg-456", - }), - } as unknown as ReturnType); - vi.mocked(stripMarkdownForTwitch).mockImplementation((text) => text.replace(/\*\*/g, "")); + const { stripMarkdownForTwitch } = await mockSuccessfulSend({ + messageId: "twitch-msg-456", + stripMarkdown: (text) => text.replace(/\*\*/g, ""), + }); await sendMessageTwitchInternal( "#testchannel", diff --git a/extensions/vllm/index.ts b/extensions/vllm/index.ts index fd0a5e18914..cb865de4dfd 100644 --- a/extensions/vllm/index.ts +++ b/extensions/vllm/index.ts @@ -1,13 +1,11 @@ import { buildVllmProvider, configureOpenAICompatibleSelfHostedProviderNonInteractive, + discoverOpenAICompatibleSelfHostedProvider, emptyPluginConfigSchema, - promptAndConfigureOpenAICompatibleSelfHostedProvider, + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth, type OpenClawPluginApi, - type ProviderAuthContext, type ProviderAuthMethodNonInteractiveContext, - type ProviderAuthResult, - type ProviderDiscoveryContext, } from "openclaw/plugin-sdk/core"; const PROVIDER_ID = "vllm"; @@ -30,8 +28,8 @@ const vllmPlugin = { label: "vLLM", hint: "Local/self-hosted OpenAI-compatible server", kind: "custom", - run: async (ctx: ProviderAuthContext): Promise => { - const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider({ + run: async (ctx) => + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth({ cfg: ctx.config, prompter: ctx.prompter, providerId: PROVIDER_ID, @@ -39,18 +37,7 @@ const vllmPlugin = { defaultBaseUrl: DEFAULT_BASE_URL, defaultApiKeyEnvVar: "VLLM_API_KEY", modelPlaceholder: "meta-llama/Meta-Llama-3-8B-Instruct", - }); - return { - profiles: [ - { - profileId: result.profileId, - credential: result.credential, - }, - ], - configPatch: result.config, - defaultModel: result.modelRef, - }; - }, + }), runNonInteractive: async (ctx: ProviderAuthMethodNonInteractiveContext) => configureOpenAICompatibleSelfHostedProviderNonInteractive({ ctx, @@ -64,21 +51,12 @@ const vllmPlugin = { ], discovery: { order: "late", - run: async (ctx: ProviderDiscoveryContext) => { - if (ctx.config.models?.providers?.vllm) { - return null; - } - const { apiKey, discoveryApiKey } = ctx.resolveProviderApiKey(PROVIDER_ID); - if (!apiKey) { - return null; - } - return { - provider: { - ...(await buildVllmProvider({ apiKey: discoveryApiKey })), - apiKey, - }, - }; - }, + run: async (ctx) => + discoverOpenAICompatibleSelfHostedProvider({ + ctx, + providerId: PROVIDER_ID, + buildProvider: buildVllmProvider, + }), }, wizard: { onboarding: { diff --git a/extensions/voice-call/README.md b/extensions/voice-call/README.md index 9acc9aec987..fe228537ee8 100644 --- a/extensions/voice-call/README.md +++ b/extensions/voice-call/README.md @@ -89,56 +89,18 @@ Notes: - Twilio/Telnyx/Plivo require a **publicly reachable** webhook URL. - `mock` is a local dev provider (no network calls). - Telnyx requires `telnyx.publicKey` (or `TELNYX_PUBLIC_KEY`) unless `skipSignatureVerification` is true. -- `tunnel.allowNgrokFreeTierLoopbackBypass: true` allows Twilio webhooks with invalid signatures **only** when `tunnel.provider="ngrok"` and `serve.bind` is loopback (ngrok local agent). Use for local dev only. - -Streaming security defaults: - -- `streaming.preStartTimeoutMs` closes sockets that never send a valid `start` frame. -- `streaming.maxPendingConnections` caps total unauthenticated pre-start sockets. -- `streaming.maxPendingConnectionsPerIp` caps unauthenticated pre-start sockets per source IP. -- `streaming.maxConnections` caps total open media stream sockets (pending + active). +- advanced webhook, streaming, and tunnel notes: `https://docs.openclaw.ai/plugins/voice-call` ## Stale call reaper -Use `staleCallReaperSeconds` to end calls that never receive a terminal webhook -(for example, notify-mode calls that never complete). The default is `0` -(disabled). - -Recommended ranges: - -- **Production:** `120`–`300` seconds for notify-style flows. -- Keep this value **higher than `maxDurationSeconds`** so normal calls can - finish. A good starting point is `maxDurationSeconds + 30–60` seconds. - -Example: - -```json5 -{ - staleCallReaperSeconds: 360, -} -``` +See the plugin docs for recommended ranges and production examples: +`https://docs.openclaw.ai/plugins/voice-call#stale-call-reaper` ## TTS for calls Voice Call uses the core `messages.tts` configuration (OpenAI or ElevenLabs) for -streaming speech on calls. You can override it under the plugin config with the -same shape — overrides deep-merge with `messages.tts`. - -```json5 -{ - tts: { - provider: "openai", - openai: { - voice: "alloy", - }, - }, -} -``` - -Notes: - -- Edge TTS is ignored for voice calls (telephony audio needs PCM; Edge output is unreliable). -- Core TTS is used when Twilio media streaming is enabled; otherwise calls fall back to provider native voices. +streaming speech on calls. Override examples and provider caveats live here: +`https://docs.openclaw.ai/plugins/voice-call#tts-for-calls` ## CLI diff --git a/extensions/voice-call/index.ts b/extensions/voice-call/index.ts index 8e2fba9898f..7393fb03c9b 100644 --- a/extensions/voice-call/index.ts +++ b/extensions/voice-call/index.ts @@ -227,6 +227,37 @@ const voiceCallPlugin = { params.respond(true, { callId: result.callId, initiated: true }); }; + const respondToCallMessageAction = async (params: { + requestParams: GatewayRequestHandlerOptions["params"]; + respond: GatewayRequestHandlerOptions["respond"]; + action: ( + request: Exclude>, { error: string }>, + ) => Promise<{ + success: boolean; + error?: string; + transcript?: string; + }>; + failure: string; + includeTranscript?: boolean; + }) => { + const request = await resolveCallMessageRequest(params.requestParams); + if ("error" in request) { + params.respond(false, { error: request.error }); + return; + } + const result = await params.action(request); + if (!result.success) { + params.respond(false, { error: result.error || params.failure }); + return; + } + params.respond( + true, + params.includeTranscript + ? { success: true, transcript: result.transcript } + : { success: true }, + ); + }; + api.registerGatewayMethod( "voicecall.initiate", async ({ params, respond }: GatewayRequestHandlerOptions) => { @@ -264,17 +295,13 @@ const voiceCallPlugin = { "voicecall.continue", async ({ params, respond }: GatewayRequestHandlerOptions) => { try { - const request = await resolveCallMessageRequest(params); - if ("error" in request) { - respond(false, { error: request.error }); - return; - } - const result = await request.rt.manager.continueCall(request.callId, request.message); - if (!result.success) { - respond(false, { error: result.error || "continue failed" }); - return; - } - respond(true, { success: true, transcript: result.transcript }); + await respondToCallMessageAction({ + requestParams: params, + respond, + action: (request) => request.rt.manager.continueCall(request.callId, request.message), + failure: "continue failed", + includeTranscript: true, + }); } catch (err) { sendError(respond, err); } @@ -285,17 +312,12 @@ const voiceCallPlugin = { "voicecall.speak", async ({ params, respond }: GatewayRequestHandlerOptions) => { try { - const request = await resolveCallMessageRequest(params); - if ("error" in request) { - respond(false, { error: request.error }); - return; - } - const result = await request.rt.manager.speak(request.callId, request.message); - if (!result.success) { - respond(false, { error: result.error || "speak failed" }); - return; - } - respond(true, { success: true }); + await respondToCallMessageAction({ + requestParams: params, + respond, + action: (request) => request.rt.manager.speak(request.callId, request.message), + failure: "speak failed", + }); } catch (err) { sendError(respond, err); } diff --git a/extensions/voice-call/src/manager.restore.test.ts b/extensions/voice-call/src/manager.restore.test.ts index f7f142a16ff..8f76169546f 100644 --- a/extensions/voice-call/src/manager.restore.test.ts +++ b/extensions/voice-call/src/manager.restore.test.ts @@ -9,121 +9,87 @@ import { } from "./manager.test-harness.js"; describe("CallManager verification on restore", () => { - it("skips stale calls reported terminal by provider", async () => { + async function initializeManager(params?: { + callOverrides?: Parameters[0]; + providerResult?: FakeProvider["getCallStatusResult"]; + configureProvider?: (provider: FakeProvider) => void; + configOverrides?: Partial<{ maxDurationSeconds: number }>; + }) { const storePath = createTestStorePath(); - const call = makePersistedCall(); + const call = makePersistedCall(params?.callOverrides); writeCallsToStore(storePath, [call]); const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "completed", isTerminal: true }; + if (params?.providerResult) { + provider.getCallStatusResult = params.providerResult; + } + params?.configureProvider?.(provider); const config = VoiceCallConfigSchema.parse({ enabled: true, provider: "plivo", fromNumber: "+15550000000", + ...params?.configOverrides, }); const manager = new CallManager(config, storePath); await manager.initialize(provider, "https://example.com/voice/webhook"); + return { call, manager }; + } + + it("skips stale calls reported terminal by provider", async () => { + const { manager } = await initializeManager({ + providerResult: { status: "completed", isTerminal: true }, + }); + expect(manager.getActiveCalls()).toHaveLength(0); }); it("keeps calls reported active by provider", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "in-progress", isTerminal: false }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { call, manager } = await initializeManager({ + providerResult: { status: "in-progress", isTerminal: false }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); expect(manager.getActiveCalls()[0]?.callId).toBe(call.callId); }); it("keeps calls when provider returns unknown (transient error)", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "error", isTerminal: false, isUnknown: true }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + providerResult: { status: "error", isTerminal: false, isUnknown: true }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); }); it("skips calls older than maxDurationSeconds", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall({ - startedAt: Date.now() - 600_000, - answeredAt: Date.now() - 590_000, + const { manager } = await initializeManager({ + callOverrides: { + startedAt: Date.now() - 600_000, + answeredAt: Date.now() - 590_000, + }, + configOverrides: { maxDurationSeconds: 300 }, }); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", - maxDurationSeconds: 300, - }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(0); }); it("skips calls without providerCallId", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall({ providerCallId: undefined, state: "initiated" }); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + callOverrides: { providerCallId: undefined, state: "initiated" }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(0); }); it("keeps call when getCallStatus throws (verification failure)", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatus = async () => { - throw new Error("network failure"); - }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + configureProvider: (provider) => { + provider.getCallStatus = async () => { + throw new Error("network failure"); + }; + }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); }); diff --git a/extensions/voice-call/src/providers/telnyx.test.ts b/extensions/voice-call/src/providers/telnyx.test.ts index c083070229f..15a4cc8f17f 100644 --- a/extensions/voice-call/src/providers/telnyx.test.ts +++ b/extensions/voice-call/src/providers/telnyx.test.ts @@ -22,6 +22,34 @@ function decodeBase64Url(input: string): Buffer { return Buffer.from(padded, "base64"); } +function createSignedTelnyxCtx(params: { + privateKey: crypto.KeyObject; + rawBody: string; +}): WebhookContext { + const timestamp = String(Math.floor(Date.now() / 1000)); + const signedPayload = `${timestamp}|${params.rawBody}`; + const signature = crypto + .sign(null, Buffer.from(signedPayload), params.privateKey) + .toString("base64"); + + return createCtx({ + rawBody: params.rawBody, + headers: { + "telnyx-signature-ed25519": signature, + "telnyx-timestamp": timestamp, + }, + }); +} + +function expectReplayVerification( + results: Array<{ ok: boolean; isReplay?: boolean; verifiedRequestKey?: string }>, +) { + expect(results.map((result) => result.ok)).toEqual([true, true]); + expect(results.map((result) => Boolean(result.isReplay))).toEqual([false, true]); + expect(results[0]?.verifiedRequestKey).toEqual(expect.any(String)); + expect(results[1]?.verifiedRequestKey).toBe(results[0]?.verifiedRequestKey); +} + function expectWebhookVerificationSucceeds(params: { publicKey: string; privateKey: crypto.KeyObject; @@ -35,20 +63,8 @@ function expectWebhookVerificationSucceeds(params: { event_type: "call.initiated", payload: { call_control_id: "x" }, }); - const timestamp = String(Math.floor(Date.now() / 1000)); - const signedPayload = `${timestamp}|${rawBody}`; - const signature = crypto - .sign(null, Buffer.from(signedPayload), params.privateKey) - .toString("base64"); - const result = provider.verifyWebhook( - createCtx({ - rawBody, - headers: { - "telnyx-signature-ed25519": signature, - "telnyx-timestamp": timestamp, - }, - }), + createSignedTelnyxCtx({ privateKey: params.privateKey, rawBody }), ); expect(result.ok).toBe(true); } @@ -117,26 +133,12 @@ describe("TelnyxProvider.verifyWebhook", () => { payload: { call_control_id: "call-replay-test" }, nonce: crypto.randomUUID(), }); - const timestamp = String(Math.floor(Date.now() / 1000)); - const signedPayload = `${timestamp}|${rawBody}`; - const signature = crypto.sign(null, Buffer.from(signedPayload), privateKey).toString("base64"); - const ctx = createCtx({ - rawBody, - headers: { - "telnyx-signature-ed25519": signature, - "telnyx-timestamp": timestamp, - }, - }); + const ctx = createSignedTelnyxCtx({ privateKey, rawBody }); const first = provider.verifyWebhook(ctx); const second = provider.verifyWebhook(ctx); - expect(first.ok).toBe(true); - expect(first.isReplay).toBeFalsy(); - expect(first.verifiedRequestKey).toBeTruthy(); - expect(second.ok).toBe(true); - expect(second.isReplay).toBe(true); - expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expectReplayVerification([first, second]); }); }); diff --git a/extensions/voice-call/src/providers/twilio.test.ts b/extensions/voice-call/src/providers/twilio.test.ts index 0a88bdeae07..4e23783b93a 100644 --- a/extensions/voice-call/src/providers/twilio.test.ts +++ b/extensions/voice-call/src/providers/twilio.test.ts @@ -21,6 +21,12 @@ function createContext(rawBody: string, query?: WebhookContext["query"]): Webhoo }; } +function expectStreamingTwiml(body: string) { + expect(body).toContain(STREAM_URL); + expect(body).toContain('"); +} + describe("TwilioProvider", () => { it("returns streaming TwiML for outbound conversation calls before in-progress", () => { const provider = createProvider(); @@ -30,9 +36,8 @@ describe("TwilioProvider", () => { const result = provider.parseWebhookEvent(ctx); - expect(result.providerResponseBody).toContain(STREAM_URL); - expect(result.providerResponseBody).toContain('"); + expect(result.providerResponseBody).toBeDefined(); + expectStreamingTwiml(result.providerResponseBody ?? ""); }); it("returns empty TwiML for status callbacks", () => { @@ -55,9 +60,8 @@ describe("TwilioProvider", () => { const result = provider.parseWebhookEvent(ctx); - expect(result.providerResponseBody).toContain(STREAM_URL); - expect(result.providerResponseBody).toContain('"); + expect(result.providerResponseBody).toBeDefined(); + expectStreamingTwiml(result.providerResponseBody ?? ""); }); it("returns queue TwiML for second inbound call when first call is active", () => { diff --git a/extensions/voice-call/src/webhook-security.test.ts b/extensions/voice-call/src/webhook-security.test.ts index 3134f18b729..3fe3cd473a1 100644 --- a/extensions/voice-call/src/webhook-security.test.ts +++ b/extensions/voice-call/src/webhook-security.test.ts @@ -98,6 +98,51 @@ function expectReplayResultPair( expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); } +function expectAcceptedWebhookVersion( + result: { ok: boolean; version?: string }, + version: "v2" | "v3", +) { + expect(result).toMatchObject({ ok: true, version }); +} + +function verifyTwilioNgrokLoopback(signature: string) { + return verifyTwilioWebhook( + { + headers: { + host: "127.0.0.1:3334", + "x-forwarded-proto": "https", + "x-forwarded-host": "local.ngrok-free.app", + "x-twilio-signature": signature, + }, + rawBody: "CallSid=CS123&CallStatus=completed&From=%2B15550000000", + url: "http://127.0.0.1:3334/voice/webhook", + method: "POST", + remoteAddress: "127.0.0.1", + }, + "test-auth-token", + { allowNgrokFreeTierLoopbackBypass: true }, + ); +} + +function verifyTwilioSignedRequest(params: { + headers: Record; + rawBody: string; + authToken: string; + publicUrl: string; +}) { + return verifyTwilioWebhook( + { + headers: params.headers, + rawBody: params.rawBody, + url: "http://local/voice/webhook?callId=abc", + method: "POST", + query: { callId: "abc" }, + }, + params.authToken, + { publicUrl: params.publicUrl }, + ); +} + describe("verifyPlivoWebhook", () => { it("accepts valid V2 signature", () => { const authToken = "test-auth-token"; @@ -127,8 +172,7 @@ describe("verifyPlivoWebhook", () => { authToken, ); - expect(result.ok).toBe(true); - expect(result.version).toBe("v2"); + expectAcceptedWebhookVersion(result, "v2"); }); it("accepts valid V3 signature (including multi-signature header)", () => { @@ -161,8 +205,7 @@ describe("verifyPlivoWebhook", () => { authToken, ); - expect(result.ok).toBe(true); - expect(result.version).toBe("v3"); + expectAcceptedWebhookVersion(result, "v3"); }); it("rejects missing signatures", () => { @@ -317,35 +360,10 @@ describe("verifyTwilioWebhook", () => { "i-twilio-idempotency-token": "idem-replay-1", }; - const first = verifyTwilioWebhook( - { - headers, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, - }, - authToken, - { publicUrl }, - ); - const second = verifyTwilioWebhook( - { - headers, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, - }, - authToken, - { publicUrl }, - ); + const first = verifyTwilioSignedRequest({ headers, rawBody: postBody, authToken, publicUrl }); + const second = verifyTwilioSignedRequest({ headers, rawBody: postBody, authToken, publicUrl }); - expect(first.ok).toBe(true); - expect(first.isReplay).toBeFalsy(); - expect(first.verifiedRequestKey).toBeTruthy(); - expect(second.ok).toBe(true); - expect(second.isReplay).toBe(true); - expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expectReplayResultPair(first, second); }); it("treats changed idempotency header as replay for identical signed requests", () => { @@ -355,45 +373,30 @@ describe("verifyTwilioWebhook", () => { const postBody = "CallSid=CS778&CallStatus=completed&From=%2B15550000000"; const signature = twilioSignature({ authToken, url: urlWithQuery, postBody }); - const first = verifyTwilioWebhook( - { - headers: { - host: "example.com", - "x-forwarded-proto": "https", - "x-twilio-signature": signature, - "i-twilio-idempotency-token": "idem-replay-a", - }, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, + const first = verifyTwilioSignedRequest({ + headers: { + host: "example.com", + "x-forwarded-proto": "https", + "x-twilio-signature": signature, + "i-twilio-idempotency-token": "idem-replay-a", }, + rawBody: postBody, authToken, - { publicUrl }, - ); - const second = verifyTwilioWebhook( - { - headers: { - host: "example.com", - "x-forwarded-proto": "https", - "x-twilio-signature": signature, - "i-twilio-idempotency-token": "idem-replay-b", - }, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, + publicUrl, + }); + const second = verifyTwilioSignedRequest({ + headers: { + host: "example.com", + "x-forwarded-proto": "https", + "x-twilio-signature": signature, + "i-twilio-idempotency-token": "idem-replay-b", }, + rawBody: postBody, authToken, - { publicUrl }, - ); + publicUrl, + }); - expect(first.ok).toBe(true); - expect(first.isReplay).toBe(false); - expect(first.verifiedRequestKey).toBeTruthy(); - expect(second.ok).toBe(true); - expect(second.isReplay).toBe(true); - expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expectReplayResultPair(first, second); }); it("rejects invalid signatures even when attacker injects forwarded host", () => { @@ -422,57 +425,22 @@ describe("verifyTwilioWebhook", () => { }); it("accepts valid signatures for ngrok free tier on loopback when compatibility mode is enabled", () => { - const authToken = "test-auth-token"; - const postBody = "CallSid=CS123&CallStatus=completed&From=%2B15550000000"; const webhookUrl = "https://local.ngrok-free.app/voice/webhook"; const signature = twilioSignature({ - authToken, + authToken: "test-auth-token", url: webhookUrl, - postBody, + postBody: "CallSid=CS123&CallStatus=completed&From=%2B15550000000", }); - const result = verifyTwilioWebhook( - { - headers: { - host: "127.0.0.1:3334", - "x-forwarded-proto": "https", - "x-forwarded-host": "local.ngrok-free.app", - "x-twilio-signature": signature, - }, - rawBody: postBody, - url: "http://127.0.0.1:3334/voice/webhook", - method: "POST", - remoteAddress: "127.0.0.1", - }, - authToken, - { allowNgrokFreeTierLoopbackBypass: true }, - ); + const result = verifyTwilioNgrokLoopback(signature); expect(result.ok).toBe(true); expect(result.verificationUrl).toBe(webhookUrl); }); it("does not allow invalid signatures for ngrok free tier on loopback", () => { - const authToken = "test-auth-token"; - const postBody = "CallSid=CS123&CallStatus=completed&From=%2B15550000000"; - - const result = verifyTwilioWebhook( - { - headers: { - host: "127.0.0.1:3334", - "x-forwarded-proto": "https", - "x-forwarded-host": "local.ngrok-free.app", - "x-twilio-signature": "invalid", - }, - rawBody: postBody, - url: "http://127.0.0.1:3334/voice/webhook", - method: "POST", - remoteAddress: "127.0.0.1", - }, - authToken, - { allowNgrokFreeTierLoopbackBypass: true }, - ); + const result = verifyTwilioNgrokLoopback("invalid"); expect(result.ok).toBe(false); expect(result.reason).toMatch(/Invalid signature/); diff --git a/extensions/voice-call/src/webhook.test.ts b/extensions/voice-call/src/webhook.test.ts index f5a827a3ef3..6297a69f14a 100644 --- a/extensions/voice-call/src/webhook.test.ts +++ b/extensions/voice-call/src/webhook.test.ts @@ -56,6 +56,28 @@ const createManager = (calls: CallRecord[]) => { return { manager, endCall, processEvent }; }; +async function runStaleCallReaperCase(params: { + callAgeMs: number; + staleCallReaperSeconds: number; + advanceMs: number; +}) { + const now = new Date("2026-02-16T00:00:00Z"); + vi.setSystemTime(now); + + const call = createCall(now.getTime() - params.callAgeMs); + const { manager, endCall } = createManager([call]); + const config = createConfig({ staleCallReaperSeconds: params.staleCallReaperSeconds }); + const server = new VoiceCallWebhookServer(config, manager, provider); + + try { + await server.start(); + await vi.advanceTimersByTimeAsync(params.advanceMs); + return { call, endCall }; + } finally { + await server.stop(); + } +} + async function postWebhookForm(server: VoiceCallWebhookServer, baseUrl: string, body: string) { const address = ( server as unknown as { server?: { address?: () => unknown } } @@ -81,39 +103,21 @@ describe("VoiceCallWebhookServer stale call reaper", () => { }); it("ends calls older than staleCallReaperSeconds", async () => { - const now = new Date("2026-02-16T00:00:00Z"); - vi.setSystemTime(now); - - const call = createCall(now.getTime() - 120_000); - const { manager, endCall } = createManager([call]); - const config = createConfig({ staleCallReaperSeconds: 60 }); - const server = new VoiceCallWebhookServer(config, manager, provider); - - try { - await server.start(); - await vi.advanceTimersByTimeAsync(30_000); - expect(endCall).toHaveBeenCalledWith(call.callId); - } finally { - await server.stop(); - } + const { call, endCall } = await runStaleCallReaperCase({ + callAgeMs: 120_000, + staleCallReaperSeconds: 60, + advanceMs: 30_000, + }); + expect(endCall).toHaveBeenCalledWith(call.callId); }); it("skips calls that are younger than the threshold", async () => { - const now = new Date("2026-02-16T00:00:00Z"); - vi.setSystemTime(now); - - const call = createCall(now.getTime() - 10_000); - const { manager, endCall } = createManager([call]); - const config = createConfig({ staleCallReaperSeconds: 60 }); - const server = new VoiceCallWebhookServer(config, manager, provider); - - try { - await server.start(); - await vi.advanceTimersByTimeAsync(30_000); - expect(endCall).not.toHaveBeenCalled(); - } finally { - await server.stop(); - } + const { endCall } = await runStaleCallReaperCase({ + callAgeMs: 10_000, + staleCallReaperSeconds: 60, + advanceMs: 30_000, + }); + expect(endCall).not.toHaveBeenCalled(); }); it("does not run when staleCallReaperSeconds is disabled", async () => { diff --git a/extensions/whatsapp/src/channel.outbound.test.ts b/extensions/whatsapp/src/channel.outbound.test.ts index 758274619e0..70220dcac3b 100644 --- a/extensions/whatsapp/src/channel.outbound.test.ts +++ b/extensions/whatsapp/src/channel.outbound.test.ts @@ -1,5 +1,8 @@ -import type { OpenClawConfig } from "openclaw/plugin-sdk/whatsapp"; import { describe, expect, it, vi } from "vitest"; +import { + createWhatsAppPollFixture, + expectWhatsAppPollSent, +} from "../../../src/test-helpers/whatsapp-outbound.js"; const hoisted = vi.hoisted(() => ({ sendPollWhatsApp: vi.fn(async () => ({ messageId: "wa-poll-1", toJid: "1555@s.whatsapp.net" })), @@ -22,25 +25,16 @@ import { whatsappPlugin } from "./channel.js"; describe("whatsappPlugin outbound sendPoll", () => { it("threads cfg into runtime sendPollWhatsApp call", async () => { - const cfg = { marker: "resolved-cfg" } as OpenClawConfig; - const poll = { - question: "Lunch?", - options: ["Pizza", "Sushi"], - maxSelections: 1, - }; + const { cfg, poll, to, accountId } = createWhatsAppPollFixture(); const result = await whatsappPlugin.outbound!.sendPoll!({ cfg, - to: "+1555", + to, poll, - accountId: "work", + accountId, }); - expect(hoisted.sendPollWhatsApp).toHaveBeenCalledWith("+1555", poll, { - verbose: false, - accountId: "work", - cfg, - }); + expectWhatsAppPollSent(hoisted.sendPollWhatsApp, { cfg, poll, to, accountId }); expect(result).toEqual({ messageId: "wa-poll-1", toJid: "1555@s.whatsapp.net" }); }); }); diff --git a/extensions/whatsapp/src/channel.ts b/extensions/whatsapp/src/channel.ts index 274b5e07883..5be1ba412b0 100644 --- a/extensions/whatsapp/src/channel.ts +++ b/extensions/whatsapp/src/channel.ts @@ -8,6 +8,7 @@ import { buildChannelConfigSchema, collectWhatsAppStatusIssues, createActionGate, + createWhatsAppOutboundBase, DEFAULT_ACCOUNT_ID, getChatChannelMeta, listWhatsAppAccountIds, @@ -283,52 +284,16 @@ export const whatsappPlugin: ChannelPlugin = { ); }, }, - outbound: { - deliveryMode: "gateway", + outbound: createWhatsAppOutboundBase({ chunker: (text, limit) => getWhatsAppRuntime().channel.text.chunkText(text, limit), - chunkerMode: "text", - textChunkLimit: 4000, - pollMaxOptions: 12, + sendMessageWhatsApp: async (...args) => + await getWhatsAppRuntime().channel.whatsapp.sendMessageWhatsApp(...args), + sendPollWhatsApp: async (...args) => + await getWhatsAppRuntime().channel.whatsapp.sendPollWhatsApp(...args), + shouldLogVerbose: () => getWhatsAppRuntime().logging.shouldLogVerbose(), resolveTarget: ({ to, allowFrom, mode }) => resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), - sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { - const send = deps?.sendWhatsApp ?? getWhatsAppRuntime().channel.whatsapp.sendMessageWhatsApp; - const result = await send(to, text, { - verbose: false, - cfg, - accountId: accountId ?? undefined, - gifPlayback, - }); - return { channel: "whatsapp", ...result }; - }, - sendMedia: async ({ - cfg, - to, - text, - mediaUrl, - mediaLocalRoots, - accountId, - deps, - gifPlayback, - }) => { - const send = deps?.sendWhatsApp ?? getWhatsAppRuntime().channel.whatsapp.sendMessageWhatsApp; - const result = await send(to, text, { - verbose: false, - cfg, - mediaUrl, - mediaLocalRoots, - accountId: accountId ?? undefined, - gifPlayback, - }); - return { channel: "whatsapp", ...result }; - }, - sendPoll: async ({ cfg, to, poll, accountId }) => - await getWhatsAppRuntime().channel.whatsapp.sendPollWhatsApp(to, poll, { - verbose: getWhatsAppRuntime().logging.shouldLogVerbose(), - accountId: accountId ?? undefined, - cfg, - }), - }, + }), auth: { login: async ({ cfg, accountId, runtime, verbose }) => { const resolvedAccountId = accountId?.trim() || resolveDefaultWhatsAppAccountId(cfg); diff --git a/extensions/zalo/package.json b/extensions/zalo/package.json index 3a9f118a4f6..3880b66abf8 100644 --- a/extensions/zalo/package.json +++ b/extensions/zalo/package.json @@ -4,7 +4,7 @@ "description": "OpenClaw Zalo channel plugin", "type": "module", "dependencies": { - "undici": "7.24.0", + "undici": "7.24.1", "zod": "^4.3.6" }, "openclaw": { diff --git a/extensions/zalo/src/api.test.ts b/extensions/zalo/src/api.test.ts index 00198f5072e..ffdeab84ae4 100644 --- a/extensions/zalo/src/api.test.ts +++ b/extensions/zalo/src/api.test.ts @@ -1,31 +1,26 @@ import { describe, expect, it, vi } from "vitest"; import { deleteWebhook, getWebhookInfo, sendChatAction, type ZaloFetch } from "./api.js"; +function createOkFetcher() { + return vi.fn(async () => new Response(JSON.stringify({ ok: true, result: {} }))); +} + +async function expectPostJsonRequest(run: (token: string, fetcher: ZaloFetch) => Promise) { + const fetcher = createOkFetcher(); + await run("test-token", fetcher); + expect(fetcher).toHaveBeenCalledTimes(1); + const [, init] = fetcher.mock.calls[0] ?? []; + expect(init?.method).toBe("POST"); + expect(init?.headers).toEqual({ "Content-Type": "application/json" }); +} + describe("Zalo API request methods", () => { it("uses POST for getWebhookInfo", async () => { - const fetcher = vi.fn( - async () => new Response(JSON.stringify({ ok: true, result: {} })), - ); - - await getWebhookInfo("test-token", fetcher); - - expect(fetcher).toHaveBeenCalledTimes(1); - const [, init] = fetcher.mock.calls[0] ?? []; - expect(init?.method).toBe("POST"); - expect(init?.headers).toEqual({ "Content-Type": "application/json" }); + await expectPostJsonRequest(getWebhookInfo); }); it("keeps POST for deleteWebhook", async () => { - const fetcher = vi.fn( - async () => new Response(JSON.stringify({ ok: true, result: {} })), - ); - - await deleteWebhook("test-token", fetcher); - - expect(fetcher).toHaveBeenCalledTimes(1); - const [, init] = fetcher.mock.calls[0] ?? []; - expect(init?.method).toBe("POST"); - expect(init?.headers).toEqual({ "Content-Type": "application/json" }); + await expectPostJsonRequest(deleteWebhook); }); it("aborts sendChatAction when the typing timeout elapses", async () => { diff --git a/extensions/zalo/src/channel.directory.test.ts b/extensions/zalo/src/channel.directory.test.ts index 99821c85017..8a303e72a97 100644 --- a/extensions/zalo/src/channel.directory.test.ts +++ b/extensions/zalo/src/channel.directory.test.ts @@ -1,15 +1,10 @@ import type { OpenClawConfig, RuntimeEnv } from "openclaw/plugin-sdk/zalo"; import { describe, expect, it } from "vitest"; +import { createDirectoryTestRuntime, expectDirectorySurface } from "../../test-utils/directory.js"; import { zaloPlugin } from "./channel.js"; describe("zalo directory", () => { - const runtimeEnv: RuntimeEnv = { - log: () => {}, - error: () => {}, - exit: (code: number): never => { - throw new Error(`exit ${code}`); - }, - }; + const runtimeEnv = createDirectoryTestRuntime() as RuntimeEnv; it("lists peers from allowFrom", async () => { const cfg = { @@ -20,12 +15,10 @@ describe("zalo directory", () => { }, } as unknown as OpenClawConfig; - expect(zaloPlugin.directory).toBeTruthy(); - expect(zaloPlugin.directory?.listPeers).toBeTruthy(); - expect(zaloPlugin.directory?.listGroups).toBeTruthy(); + const directory = expectDirectorySurface(zaloPlugin.directory); await expect( - zaloPlugin.directory!.listPeers!({ + directory.listPeers({ cfg, accountId: undefined, query: undefined, @@ -41,7 +34,7 @@ describe("zalo directory", () => { ); await expect( - zaloPlugin.directory!.listGroups!({ + directory.listGroups({ cfg, accountId: undefined, query: undefined, diff --git a/extensions/zalo/src/channel.startup.test.ts b/extensions/zalo/src/channel.startup.test.ts index 65e413f0f4f..ea0718d29a2 100644 --- a/extensions/zalo/src/channel.startup.test.ts +++ b/extensions/zalo/src/channel.startup.test.ts @@ -1,6 +1,9 @@ import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/zalo"; import { afterEach, describe, expect, it, vi } from "vitest"; -import { createStartAccountContext } from "../../test-utils/start-account-context.js"; +import { + expectPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; import type { ResolvedZaloAccount } from "./accounts.js"; const hoisted = vi.hoisted(() => ({ @@ -57,37 +60,28 @@ describe("zaloPlugin gateway.startAccount", () => { }), ); - const patches: ChannelAccountSnapshot[] = []; - const abort = new AbortController(); - const task = zaloPlugin.gateway!.startAccount!( - createStartAccountContext({ - account: buildAccount(), - abortSignal: abort.signal, - statusPatchSink: (next) => patches.push({ ...next }), - }), - ); - - let settled = false; - void task.then(() => { - settled = true; + const { abort, patches, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: zaloPlugin.gateway!.startAccount!, + account: buildAccount(), }); - await vi.waitFor(() => { - expect(hoisted.probeZalo).toHaveBeenCalledOnce(); - expect(hoisted.monitorZaloProvider).toHaveBeenCalledOnce(); + await expectPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.probeZalo).toHaveBeenCalledOnce(); + expect(hoisted.monitorZaloProvider).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, }); - expect(settled).toBe(false); expect(patches).toContainEqual( expect.objectContaining({ accountId: "default", }), ); - - abort.abort(); - await task; - - expect(settled).toBe(true); + expect(isSettled()).toBe(true); expect(hoisted.monitorZaloProvider).toHaveBeenCalledWith( expect.objectContaining({ token: "test-token", diff --git a/extensions/zalo/src/monitor.lifecycle.test.ts b/extensions/zalo/src/monitor.lifecycle.test.ts index 6cce789da56..e5fa65e1063 100644 --- a/extensions/zalo/src/monitor.lifecycle.test.ts +++ b/extensions/zalo/src/monitor.lifecycle.test.ts @@ -32,6 +32,41 @@ async function waitForPollingLoopStart(): Promise { await vi.waitFor(() => expect(getUpdatesMock).toHaveBeenCalledTimes(1)); } +const TEST_ACCOUNT = { + accountId: "default", + config: {}, +} as unknown as ResolvedZaloAccount; + +const TEST_CONFIG = {} as OpenClawConfig; + +function createLifecycleRuntime() { + return { + log: vi.fn<(message: string) => void>(), + error: vi.fn<(message: string) => void>(), + }; +} + +async function startLifecycleMonitor( + options: { + useWebhook?: boolean; + webhookSecret?: string; + webhookUrl?: string; + } = {}, +) { + const { monitorZaloProvider } = await import("./monitor.js"); + const abort = new AbortController(); + const runtime = createLifecycleRuntime(); + const run = monitorZaloProvider({ + token: "test-token", + account: TEST_ACCOUNT, + config: TEST_CONFIG, + runtime, + abortSignal: abort.signal, + ...options, + }); + return { abort, runtime, run }; +} + describe("monitorZaloProvider lifecycle", () => { afterEach(() => { vi.clearAllMocks(); @@ -39,26 +74,9 @@ describe("monitorZaloProvider lifecycle", () => { }); it("stays alive in polling mode until abort", async () => { - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - let settled = false; - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }).then(() => { + const { abort, runtime, run } = await startLifecycleMonitor(); + const monitoredRun = run.then(() => { settled = true; }); @@ -70,7 +88,7 @@ describe("monitorZaloProvider lifecycle", () => { expect(settled).toBe(false); abort.abort(); - await run; + await monitoredRun; expect(settled).toBe(true); expect(runtime.log).toHaveBeenCalledWith( @@ -84,25 +102,7 @@ describe("monitorZaloProvider lifecycle", () => { result: { url: "https://example.com/hooks/zalo" }, }); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }); + const { abort, runtime, run } = await startLifecycleMonitor(); await waitForPollingLoopStart(); @@ -120,25 +120,7 @@ describe("monitorZaloProvider lifecycle", () => { const { ZaloApiError } = await import("./api.js"); getWebhookInfoMock.mockRejectedValueOnce(new ZaloApiError("Not Found", 404, "Not Found")); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }); + const { abort, runtime, run } = await startLifecycleMonitor(); await waitForPollingLoopStart(); @@ -165,29 +147,13 @@ describe("monitorZaloProvider lifecycle", () => { }), ); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - let settled = false; - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, + const { abort, runtime, run } = await startLifecycleMonitor({ useWebhook: true, webhookUrl: "https://example.com/hooks/zalo", webhookSecret: "supersecret", // pragma: allowlist secret - }).then(() => { + }); + const monitoredRun = run.then(() => { settled = true; }); @@ -202,7 +168,7 @@ describe("monitorZaloProvider lifecycle", () => { expect(registry.httpRoutes).toHaveLength(1); resolveDeleteWebhook?.(); - await run; + await monitoredRun; expect(settled).toBe(true); expect(registry.httpRoutes).toHaveLength(0); diff --git a/extensions/zalo/src/monitor.ts b/extensions/zalo/src/monitor.ts index bd1351bd147..d82c0d96ba4 100644 --- a/extensions/zalo/src/monitor.ts +++ b/extensions/zalo/src/monitor.ts @@ -75,6 +75,35 @@ const WEBHOOK_CLEANUP_TIMEOUT_MS = 5_000; const ZALO_TYPING_TIMEOUT_MS = 5_000; type ZaloCoreRuntime = ReturnType; +type ZaloStatusSink = (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; +type ZaloProcessingContext = { + token: string; + account: ResolvedZaloAccount; + config: OpenClawConfig; + runtime: ZaloRuntimeEnv; + core: ZaloCoreRuntime; + statusSink?: ZaloStatusSink; + fetcher?: ZaloFetch; +}; +type ZaloPollingLoopParams = ZaloProcessingContext & { + abortSignal: AbortSignal; + isStopped: () => boolean; + mediaMaxMb: number; +}; +type ZaloUpdateProcessingParams = ZaloProcessingContext & { + update: ZaloUpdate; + mediaMaxMb: number; +}; +type ZaloMessagePipelineParams = ZaloProcessingContext & { + message: ZaloMessage; + text?: string; + mediaPath?: string; + mediaType?: string; +}; +type ZaloImageMessageParams = ZaloProcessingContext & { + message: ZaloMessage; + mediaMaxMb: number; +}; function formatZaloError(error: unknown): string { if (error instanceof Error) { @@ -135,32 +164,21 @@ export async function handleZaloWebhookRequest( res: ServerResponse, ): Promise { return handleZaloWebhookRequestInternal(req, res, async ({ update, target }) => { - await processUpdate( + await processUpdate({ update, - target.token, - target.account, - target.config, - target.runtime, - target.core as ZaloCoreRuntime, - target.mediaMaxMb, - target.statusSink, - target.fetcher, - ); + token: target.token, + account: target.account, + config: target.config, + runtime: target.runtime, + core: target.core as ZaloCoreRuntime, + mediaMaxMb: target.mediaMaxMb, + statusSink: target.statusSink, + fetcher: target.fetcher, + }); }); } -function startPollingLoop(params: { - token: string; - account: ResolvedZaloAccount; - config: OpenClawConfig; - runtime: ZaloRuntimeEnv; - core: ZaloCoreRuntime; - abortSignal: AbortSignal; - isStopped: () => boolean; - mediaMaxMb: number; - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; - fetcher?: ZaloFetch; -}) { +function startPollingLoop(params: ZaloPollingLoopParams) { const { token, account, @@ -174,6 +192,16 @@ function startPollingLoop(params: { fetcher, } = params; const pollTimeout = 30; + const processingContext = { + token, + account, + config, + runtime, + core, + mediaMaxMb, + statusSink, + fetcher, + }; runtime.log?.(`[${account.accountId}] Zalo polling loop started timeout=${String(pollTimeout)}s`); @@ -186,17 +214,10 @@ function startPollingLoop(params: { const response = await getUpdates(token, { timeout: pollTimeout }, fetcher); if (response.ok && response.result) { statusSink?.({ lastInboundAt: Date.now() }); - await processUpdate( - response.result, - token, - account, - config, - runtime, - core, - mediaMaxMb, - statusSink, - fetcher, - ); + await processUpdate({ + update: response.result, + ...processingContext, + }); } } catch (err) { if (err instanceof ZaloApiError && err.isPollingTimeout) { @@ -215,38 +236,27 @@ function startPollingLoop(params: { void poll(); } -async function processUpdate( - update: ZaloUpdate, - token: string, - account: ResolvedZaloAccount, - config: OpenClawConfig, - runtime: ZaloRuntimeEnv, - core: ZaloCoreRuntime, - mediaMaxMb: number, - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void, - fetcher?: ZaloFetch, -): Promise { +async function processUpdate(params: ZaloUpdateProcessingParams): Promise { + const { update, token, account, config, runtime, core, mediaMaxMb, statusSink, fetcher } = params; const { event_name, message } = update; + const sharedContext = { token, account, config, runtime, core, statusSink, fetcher }; if (!message) { return; } switch (event_name) { case "message.text.received": - await handleTextMessage(message, token, account, config, runtime, core, statusSink, fetcher); + await handleTextMessage({ + message, + ...sharedContext, + }); break; case "message.image.received": - await handleImageMessage( + await handleImageMessage({ message, - token, - account, - config, - runtime, - core, + ...sharedContext, mediaMaxMb, - statusSink, - fetcher, - ); + }); break; case "message.sticker.received": logVerbose(core, runtime, `[${account.accountId}] Received sticker from ${message.from.id}`); @@ -262,46 +272,24 @@ async function processUpdate( } async function handleTextMessage( - message: ZaloMessage, - token: string, - account: ResolvedZaloAccount, - config: OpenClawConfig, - runtime: ZaloRuntimeEnv, - core: ZaloCoreRuntime, - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void, - fetcher?: ZaloFetch, + params: ZaloProcessingContext & { message: ZaloMessage }, ): Promise { + const { message } = params; const { text } = message; if (!text?.trim()) { return; } await processMessageWithPipeline({ - message, - token, - account, - config, - runtime, - core, + ...params, text, mediaPath: undefined, mediaType: undefined, - statusSink, - fetcher, }); } -async function handleImageMessage( - message: ZaloMessage, - token: string, - account: ResolvedZaloAccount, - config: OpenClawConfig, - runtime: ZaloRuntimeEnv, - core: ZaloCoreRuntime, - mediaMaxMb: number, - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void, - fetcher?: ZaloFetch, -): Promise { +async function handleImageMessage(params: ZaloImageMessageParams): Promise { + const { message, mediaMaxMb, account, core, runtime } = params; const { photo, caption } = message; let mediaPath: string | undefined; @@ -325,33 +313,14 @@ async function handleImageMessage( } await processMessageWithPipeline({ - message, - token, - account, - config, - runtime, - core, + ...params, text: caption, mediaPath, mediaType, - statusSink, - fetcher, }); } -async function processMessageWithPipeline(params: { - message: ZaloMessage; - token: string; - account: ResolvedZaloAccount; - config: OpenClawConfig; - runtime: ZaloRuntimeEnv; - core: ZaloCoreRuntime; - text?: string; - mediaPath?: string; - mediaType?: string; - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; - fetcher?: ZaloFetch; -}): Promise { +async function processMessageWithPipeline(params: ZaloMessagePipelineParams): Promise { const { message, token, @@ -609,7 +578,7 @@ async function deliverZaloReply(params: { core: ZaloCoreRuntime; config: OpenClawConfig; accountId?: string; - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; + statusSink?: ZaloStatusSink; fetcher?: ZaloFetch; tableMode?: MarkdownTableMode; }): Promise { diff --git a/extensions/zalo/src/send.ts b/extensions/zalo/src/send.ts index 44f1549067a..e38427fcb14 100644 --- a/extensions/zalo/src/send.ts +++ b/extensions/zalo/src/send.ts @@ -21,6 +21,28 @@ export type ZaloSendResult = { error?: string; }; +function toZaloSendResult(response: { + ok?: boolean; + result?: { message_id?: string }; +}): ZaloSendResult { + if (response.ok && response.result) { + return { ok: true, messageId: response.result.message_id }; + } + return { ok: false, error: "Failed to send message" }; +} + +async function runZaloSend( + failureMessage: string, + send: () => Promise<{ ok?: boolean; result?: { message_id?: string } }>, +): Promise { + try { + const result = toZaloSendResult(await send()); + return result.ok ? result : { ok: false, error: failureMessage }; + } catch (err) { + return { ok: false, error: err instanceof Error ? err.message : String(err) }; + } +} + function resolveSendContext(options: ZaloSendOptions): { token: string; fetcher?: ZaloFetch; @@ -55,15 +77,30 @@ function resolveValidatedSendContext( return { ok: true, chatId: trimmedChatId, token, fetcher }; } +function resolveSendContextOrFailure( + chatId: string, + options: ZaloSendOptions, +): + | { context: { chatId: string; token: string; fetcher?: ZaloFetch } } + | { failure: ZaloSendResult } { + const context = resolveValidatedSendContext(chatId, options); + return context.ok + ? { context } + : { + failure: { ok: false, error: context.error }, + }; +} + export async function sendMessageZalo( chatId: string, text: string, options: ZaloSendOptions = {}, ): Promise { - const context = resolveValidatedSendContext(chatId, options); - if (!context.ok) { - return { ok: false, error: context.error }; + const resolved = resolveSendContextOrFailure(chatId, options); + if ("failure" in resolved) { + return resolved.failure; } + const { context } = resolved; if (options.mediaUrl) { return sendPhotoZalo(context.chatId, options.mediaUrl, { @@ -73,24 +110,16 @@ export async function sendMessageZalo( }); } - try { - const response = await sendMessage( + return await runZaloSend("Failed to send message", () => + sendMessage( context.token, { chat_id: context.chatId, text: text.slice(0, 2000), }, context.fetcher, - ); - - if (response.ok && response.result) { - return { ok: true, messageId: response.result.message_id }; - } - - return { ok: false, error: "Failed to send message" }; - } catch (err) { - return { ok: false, error: err instanceof Error ? err.message : String(err) }; - } + ), + ); } export async function sendPhotoZalo( @@ -98,17 +127,18 @@ export async function sendPhotoZalo( photoUrl: string, options: ZaloSendOptions = {}, ): Promise { - const context = resolveValidatedSendContext(chatId, options); - if (!context.ok) { - return { ok: false, error: context.error }; + const resolved = resolveSendContextOrFailure(chatId, options); + if ("failure" in resolved) { + return resolved.failure; } + const { context } = resolved; if (!photoUrl?.trim()) { return { ok: false, error: "No photo URL provided" }; } - try { - const response = await sendPhoto( + return await runZaloSend("Failed to send photo", () => + sendPhoto( context.token, { chat_id: context.chatId, @@ -116,14 +146,6 @@ export async function sendPhotoZalo( caption: options.caption?.slice(0, 2000), }, context.fetcher, - ); - - if (response.ok && response.result) { - return { ok: true, messageId: response.result.message_id }; - } - - return { ok: false, error: "Failed to send photo" }; - } catch (err) { - return { ok: false, error: err instanceof Error ? err.message : String(err) }; - } + ), + ); } diff --git a/extensions/zalo/src/status-issues.test.ts b/extensions/zalo/src/status-issues.test.ts new file mode 100644 index 00000000000..581a0dfe916 --- /dev/null +++ b/extensions/zalo/src/status-issues.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from "vitest"; +import { expectOpenDmPolicyConfigIssue } from "../../test-utils/status-issues.js"; +import { collectZaloStatusIssues } from "./status-issues.js"; + +describe("collectZaloStatusIssues", () => { + it("warns when dmPolicy is open", () => { + expectOpenDmPolicyConfigIssue({ + collectIssues: collectZaloStatusIssues, + account: { + accountId: "default", + enabled: true, + configured: true, + dmPolicy: "open", + }, + }); + }); + + it("skips unconfigured accounts", () => { + const issues = collectZaloStatusIssues([ + { + accountId: "default", + enabled: true, + configured: false, + dmPolicy: "open", + }, + ]); + expect(issues).toHaveLength(0); + }); +}); diff --git a/extensions/zalo/src/status-issues.ts b/extensions/zalo/src/status-issues.ts index cf6b3a3a384..c19992a64ee 100644 --- a/extensions/zalo/src/status-issues.ts +++ b/extensions/zalo/src/status-issues.ts @@ -1,38 +1,16 @@ import type { ChannelAccountSnapshot, ChannelStatusIssue } from "openclaw/plugin-sdk/zalo"; +import { coerceStatusIssueAccountId, readStatusIssueFields } from "../../shared/status-issues.js"; -type ZaloAccountStatus = { - accountId?: unknown; - enabled?: unknown; - configured?: unknown; - dmPolicy?: unknown; -}; - -const isRecord = (value: unknown): value is Record => - Boolean(value && typeof value === "object"); - -const asString = (value: unknown): string | undefined => - typeof value === "string" ? value : typeof value === "number" ? String(value) : undefined; - -function readZaloAccountStatus(value: ChannelAccountSnapshot): ZaloAccountStatus | null { - if (!isRecord(value)) { - return null; - } - return { - accountId: value.accountId, - enabled: value.enabled, - configured: value.configured, - dmPolicy: value.dmPolicy, - }; -} +const ZALO_STATUS_FIELDS = ["accountId", "enabled", "configured", "dmPolicy"] as const; export function collectZaloStatusIssues(accounts: ChannelAccountSnapshot[]): ChannelStatusIssue[] { const issues: ChannelStatusIssue[] = []; for (const entry of accounts) { - const account = readZaloAccountStatus(entry); + const account = readStatusIssueFields(entry, ZALO_STATUS_FIELDS); if (!account) { continue; } - const accountId = asString(account.accountId) ?? "default"; + const accountId = coerceStatusIssueAccountId(account.accountId) ?? "default"; const enabled = account.enabled !== false; const configured = account.configured === true; if (!enabled || !configured) { diff --git a/extensions/zalouser/src/accounts.test-mocks.ts b/extensions/zalouser/src/accounts.test-mocks.ts new file mode 100644 index 00000000000..0206095d0fc --- /dev/null +++ b/extensions/zalouser/src/accounts.test-mocks.ts @@ -0,0 +1,10 @@ +import { vi } from "vitest"; +import { createDefaultResolvedZalouserAccount } from "./test-helpers.js"; + +vi.mock("./accounts.js", async (importOriginal) => { + const actual = (await importOriginal()) as Record; + return { + ...actual, + resolveZalouserAccountSync: () => createDefaultResolvedZalouserAccount(), + }; +}); diff --git a/extensions/zalouser/src/accounts.ts b/extensions/zalouser/src/accounts.ts index 5ebec2d2c93..26a02ed47a0 100644 --- a/extensions/zalouser/src/accounts.ts +++ b/extensions/zalouser/src/accounts.ts @@ -43,17 +43,24 @@ function resolveProfile(config: ZalouserAccountConfig, accountId: string): strin return "default"; } -export async function resolveZalouserAccount(params: { - cfg: OpenClawConfig; - accountId?: string | null; -}): Promise { +function resolveZalouserAccountBase(params: { cfg: OpenClawConfig; accountId?: string | null }) { const accountId = normalizeAccountId(params.accountId); const baseEnabled = (params.cfg.channels?.zalouser as ZalouserConfig | undefined)?.enabled !== false; const merged = mergeZalouserAccountConfig(params.cfg, accountId); - const accountEnabled = merged.enabled !== false; - const enabled = baseEnabled && accountEnabled; - const profile = resolveProfile(merged, accountId); + return { + accountId, + enabled: baseEnabled && merged.enabled !== false, + merged, + profile: resolveProfile(merged, accountId), + }; +} + +export async function resolveZalouserAccount(params: { + cfg: OpenClawConfig; + accountId?: string | null; +}): Promise { + const { accountId, enabled, merged, profile } = resolveZalouserAccountBase(params); const authenticated = await checkZaloAuthenticated(profile); return { @@ -70,13 +77,7 @@ export function resolveZalouserAccountSync(params: { cfg: OpenClawConfig; accountId?: string | null; }): ResolvedZalouserAccount { - const accountId = normalizeAccountId(params.accountId); - const baseEnabled = - (params.cfg.channels?.zalouser as ZalouserConfig | undefined)?.enabled !== false; - const merged = mergeZalouserAccountConfig(params.cfg, accountId); - const accountEnabled = merged.enabled !== false; - const enabled = baseEnabled && accountEnabled; - const profile = resolveProfile(merged, accountId); + const { accountId, enabled, merged, profile } = resolveZalouserAccountBase(params); return { accountId, diff --git a/extensions/zalouser/src/channel.directory.test.ts b/extensions/zalouser/src/channel.directory.test.ts index f8c13b208e4..1736118bc0e 100644 --- a/extensions/zalouser/src/channel.directory.test.ts +++ b/extensions/zalouser/src/channel.directory.test.ts @@ -1,5 +1,6 @@ -import type { RuntimeEnv } from "openclaw/plugin-sdk/zalouser"; import { describe, expect, it, vi } from "vitest"; +import "./accounts.test-mocks.js"; +import { createZalouserRuntimeEnv } from "./test-helpers.js"; const listZaloGroupMembersMock = vi.hoisted(() => vi.fn(async () => [])); @@ -11,30 +12,9 @@ vi.mock("./zalo-js.js", async (importOriginal) => { }; }); -vi.mock("./accounts.js", async (importOriginal) => { - const actual = (await importOriginal()) as Record; - return { - ...actual, - resolveZalouserAccountSync: () => ({ - accountId: "default", - profile: "default", - name: "test", - enabled: true, - authenticated: true, - config: {}, - }), - }; -}); - import { zalouserPlugin } from "./channel.js"; -const runtimeStub: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: ((code: number): never => { - throw new Error(`exit ${code}`); - }) as RuntimeEnv["exit"], -}; +const runtimeStub = createZalouserRuntimeEnv(); describe("zalouser directory group members", () => { it("accepts prefixed group ids from directory groups list output", async () => { diff --git a/extensions/zalouser/src/channel.sendpayload.test.ts b/extensions/zalouser/src/channel.sendpayload.test.ts index d388773e2e6..27a8adf2c0d 100644 --- a/extensions/zalouser/src/channel.sendpayload.test.ts +++ b/extensions/zalouser/src/channel.sendpayload.test.ts @@ -1,5 +1,6 @@ import type { ReplyPayload } from "openclaw/plugin-sdk/zalouser"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import "./accounts.test-mocks.js"; import { installSendPayloadContractSuite, primeSendMock, @@ -12,20 +13,6 @@ vi.mock("./send.js", () => ({ sendReactionZalouser: vi.fn().mockResolvedValue({ ok: true }), })); -vi.mock("./accounts.js", async (importOriginal) => { - const actual = (await importOriginal()) as Record; - return { - ...actual, - resolveZalouserAccountSync: () => ({ - accountId: "default", - profile: "default", - name: "test", - enabled: true, - config: {}, - }), - }; -}); - function baseCtx(payload: ReplyPayload) { return { cfg: {}, diff --git a/extensions/zalouser/src/channel.test.ts b/extensions/zalouser/src/channel.test.ts index f54539ed809..321df502b38 100644 --- a/extensions/zalouser/src/channel.test.ts +++ b/extensions/zalouser/src/channel.test.ts @@ -15,6 +15,33 @@ vi.mock("./send.js", async (importOriginal) => { const mockSendMessage = vi.mocked(sendMessageZalouser); const mockSendReaction = vi.mocked(sendReactionZalouser); +function getResolveToolPolicy() { + const resolveToolPolicy = zalouserPlugin.groups?.resolveToolPolicy; + expect(resolveToolPolicy).toBeTypeOf("function"); + if (!resolveToolPolicy) { + throw new Error("resolveToolPolicy unavailable"); + } + return resolveToolPolicy; +} + +function resolveGroupToolPolicy( + groups: Record, + groupId: string, +) { + return getResolveToolPolicy()({ + cfg: { + channels: { + zalouser: { + groups, + }, + }, + }, + accountId: "default", + groupId, + groupChannel: groupId, + }); +} + describe("zalouser outbound", () => { beforeEach(() => { mockSendMessage.mockClear(); @@ -93,48 +120,12 @@ describe("zalouser channel policies", () => { }); it("resolves group tool policy by explicit group id", () => { - const resolveToolPolicy = zalouserPlugin.groups?.resolveToolPolicy; - expect(resolveToolPolicy).toBeTypeOf("function"); - if (!resolveToolPolicy) { - return; - } - const policy = resolveToolPolicy({ - cfg: { - channels: { - zalouser: { - groups: { - "123": { tools: { allow: ["search"] } }, - }, - }, - }, - }, - accountId: "default", - groupId: "123", - groupChannel: "123", - }); + const policy = resolveGroupToolPolicy({ "123": { tools: { allow: ["search"] } } }, "123"); expect(policy).toEqual({ allow: ["search"] }); }); it("falls back to wildcard group policy", () => { - const resolveToolPolicy = zalouserPlugin.groups?.resolveToolPolicy; - expect(resolveToolPolicy).toBeTypeOf("function"); - if (!resolveToolPolicy) { - return; - } - const policy = resolveToolPolicy({ - cfg: { - channels: { - zalouser: { - groups: { - "*": { tools: { deny: ["system.run"] } }, - }, - }, - }, - }, - accountId: "default", - groupId: "missing", - groupChannel: "missing", - }); + const policy = resolveGroupToolPolicy({ "*": { tools: { deny: ["system.run"] } } }, "missing"); expect(policy).toEqual({ deny: ["system.run"] }); }); diff --git a/extensions/zalouser/src/channel.ts b/extensions/zalouser/src/channel.ts index d2f7a714537..81fce5e3ab9 100644 --- a/extensions/zalouser/src/channel.ts +++ b/extensions/zalouser/src/channel.ts @@ -29,6 +29,7 @@ import { sendPayloadWithChunkedTextAndMedia, setAccountEnabledInConfigSection, } from "openclaw/plugin-sdk/zalouser"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { listZalouserAccountIds, resolveDefaultZalouserAccountId, @@ -652,15 +653,7 @@ export const zalouserPlugin: ChannelPlugin = { lastError: null, }, collectStatusIssues: collectZalouserStatusIssues, - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => buildPassiveProbedChannelStatusSummary(snapshot), probeAccount: async ({ account, timeoutMs }) => probeZalouser(account.profile, timeoutMs), buildAccountSnapshot: async ({ account, runtime }) => { const configured = await checkZcaAuthenticated(account.profile); diff --git a/extensions/zalouser/src/monitor.account-scope.test.ts b/extensions/zalouser/src/monitor.account-scope.test.ts index 919bd25887c..ff8884282ac 100644 --- a/extensions/zalouser/src/monitor.account-scope.test.ts +++ b/extensions/zalouser/src/monitor.account-scope.test.ts @@ -4,6 +4,7 @@ import "./monitor.send-mocks.js"; import { __testing } from "./monitor.js"; import { sendMessageZalouserMock } from "./monitor.send-mocks.js"; import { setZalouserRuntime } from "./runtime.js"; +import { createZalouserRuntimeEnv } from "./test-helpers.js"; import type { ResolvedZalouserAccount, ZaloInboundMessage } from "./types.js"; describe("zalouser monitor pairing account scoping", () => { @@ -80,19 +81,11 @@ describe("zalouser monitor pairing account scoping", () => { raw: { source: "test" }, }; - const runtime: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: ((code: number): never => { - throw new Error(`exit ${code}`); - }) as RuntimeEnv["exit"], - }; - await __testing.processMessage({ message, account, config, - runtime, + runtime: createZalouserRuntimeEnv(), }); expect(readAllowFromStore).toHaveBeenCalledWith( diff --git a/extensions/zalouser/src/monitor.group-gating.test.ts b/extensions/zalouser/src/monitor.group-gating.test.ts index f6723cad3d7..ef68d6f2529 100644 --- a/extensions/zalouser/src/monitor.group-gating.test.ts +++ b/extensions/zalouser/src/monitor.group-gating.test.ts @@ -9,6 +9,7 @@ import { sendTypingZalouserMock, } from "./monitor.send-mocks.js"; import { setZalouserRuntime } from "./runtime.js"; +import { createZalouserRuntimeEnv } from "./test-helpers.js"; import type { ResolvedZalouserAccount, ZaloInboundMessage } from "./types.js"; function createAccount(): ResolvedZalouserAccount { @@ -39,15 +40,7 @@ function createConfig(): OpenClawConfig { }; } -function createRuntimeEnv(): RuntimeEnv { - return { - log: vi.fn(), - error: vi.fn(), - exit: ((code: number): never => { - throw new Error(`exit ${code}`); - }) as RuntimeEnv["exit"], - }; -} +const createRuntimeEnv = () => createZalouserRuntimeEnv(); function installRuntime(params: { commandAuthorized?: boolean; @@ -187,6 +180,31 @@ function installRuntime(params: { }; } +function installGroupCommandAuthRuntime() { + return installRuntime({ + resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => + useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), + }); +} + +async function processGroupControlCommand(params: { + account: ResolvedZalouserAccount; + content?: string; + commandContent?: string; +}) { + await __testing.processMessage({ + message: createGroupMessage({ + content: params.content ?? "/new", + commandContent: params.commandContent ?? "/new", + hasAnyMention: true, + wasExplicitlyMentioned: true, + }), + account: params.account, + config: createConfig(), + runtime: createRuntimeEnv(), + }); +} + function createGroupMessage(overrides: Partial = {}): ZaloInboundMessage { return { threadId: "g-1", @@ -229,57 +247,152 @@ describe("zalouser monitor group mention gating", () => { sendSeenZalouserMock.mockClear(); }); - it("skips unmentioned group messages when requireMention=true", async () => { + async function processMessageWithDefaults(params: { + message: ZaloInboundMessage; + account?: ResolvedZalouserAccount; + historyState?: { + historyLimit: number; + groupHistories: Map< + string, + Array<{ sender: string; body: string; timestamp?: number; messageId?: string }> + >; + }; + }) { + await __testing.processMessage({ + message: params.message, + account: params.account ?? createAccount(), + config: createConfig(), + runtime: createZalouserRuntimeEnv(), + historyState: params.historyState, + }); + } + + async function expectSkippedGroupMessage(message?: Partial) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ - message: createGroupMessage(), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + await processMessageWithDefaults({ + message: createGroupMessage(message), }); - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); + } - it("fails closed when requireMention=true but mention detection is unavailable", async () => { + async function expectGroupCommandAuthorizers(params: { + accountConfig: ResolvedZalouserAccount["config"]; + expectedAuthorizers: Array<{ configured: boolean; allowed: boolean }>; + }) { + const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = + installGroupCommandAuthRuntime(); + await processGroupControlCommand({ + account: { + ...createAccount(), + config: params.accountConfig, + }, + }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; + expect(authCall?.authorizers).toEqual(params.expectedAuthorizers); + } + + async function processOpenDmMessage(params?: { + message?: Partial; + readSessionUpdatedAt?: (input?: { + storePath: string; + sessionKey: string; + }) => number | undefined; + }) { + const runtime = installRuntime({ + commandAuthorized: false, + }); + if (params?.readSessionUpdatedAt) { + runtime.readSessionUpdatedAt.mockImplementation(params.readSessionUpdatedAt); + } + const account = createAccount(); + await processMessageWithDefaults({ + message: createDmMessage(params?.message), + account: { + ...account, + config: { + ...account.config, + dmPolicy: "open", + }, + }, + }); + return runtime; + } + + async function expectDangerousNameMatching(params: { + dangerouslyAllowNameMatching?: boolean; + expectedDispatches: number; + }) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ - message: createGroupMessage({ - canResolveExplicitMention: false, - hasAnyMention: false, - wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); - expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); - - it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, - }); - await __testing.processMessage({ + await processMessageWithDefaults({ message: createGroupMessage({ + threadId: "g-attacker-001", + groupName: "Trusted Team", + senderId: "666", hasAnyMention: true, wasExplicitlyMentioned: true, content: "ping @bot", }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + account: { + ...createAccount(), + config: { + ...createAccount().config, + ...(params.dangerouslyAllowNameMatching ? { dangerouslyAllowNameMatching: true } : {}), + groupPolicy: "allowlist", + groupAllowFrom: ["*"], + groups: { + "group:g-trusted-001": { allow: true }, + "Trusted Team": { allow: true }, + }, + }, + }, }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes( + params.expectedDispatches, + ); + return dispatchReplyWithBufferedBlockDispatcher; + } + async function dispatchGroupMessage(params: { + commandAuthorized: boolean; + message: Partial; + }) { + const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + commandAuthorized: params.commandAuthorized, + }); + await processMessageWithDefaults({ + message: createGroupMessage(params.message), + }); expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + return dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + } + + it("skips unmentioned group messages when requireMention=true", async () => { + await expectSkippedGroupMessage(); + }); + + it("fails closed when requireMention=true but mention detection is unavailable", async () => { + await expectSkippedGroupMessage({ + canResolveExplicitMention: false, + hasAnyMention: false, + wasExplicitlyMentioned: false, + }); + }); + + it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { + const callArg = await dispatchGroupMessage({ + commandAuthorized: false, + message: { + hasAnyMention: true, + wasExplicitlyMentioned: true, + content: "ping @bot", + }, + }); expect(callArg?.ctx?.WasMentioned).toBe(true); expect(callArg?.ctx?.To).toBe("zalouser:group:g-1"); expect(callArg?.ctx?.OriginatingTo).toBe("zalouser:group:g-1"); @@ -290,22 +403,14 @@ describe("zalouser monitor group mention gating", () => { }); it("allows authorized control commands to bypass mention gating", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + const callArg = await dispatchGroupMessage({ commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ + message: { content: "/status", hasAnyMention: false, wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + }, }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.WasMentioned).toBe(true); }); @@ -346,57 +451,30 @@ describe("zalouser monitor group mention gating", () => { }); it("uses commandContent for mention-prefixed control commands", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + const callArg = await dispatchGroupMessage({ commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ + message: { content: "@Bot /new", commandContent: "/new", hasAnyMention: true, wasExplicitlyMentioned: true, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + }, }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.CommandBody).toBe("/new"); expect(callArg?.ctx?.BodyForCommands).toBe("/new"); }); it("allows group control commands when only allowFrom is configured", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - allowFrom: ["123"], - }, + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["123"], }, - config: createConfig(), - runtime: createRuntimeEnv(), + expectedAuthorizers: [ + { configured: true, allowed: true }, + { configured: true, allowed: true }, + ], }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: true }, - { configured: true, allowed: true }, - ]); }); it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => { @@ -425,123 +503,35 @@ describe("zalouser monitor group mention gating", () => { }); it("does not accept a different group id by matching only the mutable group name by default", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, - }); - await __testing.processMessage({ - message: createGroupMessage({ - threadId: "g-attacker-001", - groupName: "Trusted Team", - senderId: "666", - hasAnyMention: true, - wasExplicitlyMentioned: true, - content: "ping @bot", - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - groupPolicy: "allowlist", - groupAllowFrom: ["*"], - groups: { - "group:g-trusted-001": { allow: true }, - "Trusted Team": { allow: true }, - }, - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); + await expectDangerousNameMatching({ expectedDispatches: 0 }); }); it("accepts mutable group-name matches only when dangerouslyAllowNameMatching is enabled", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, + const dispatchReplyWithBufferedBlockDispatcher = await expectDangerousNameMatching({ + dangerouslyAllowNameMatching: true, + expectedDispatches: 1, }); - await __testing.processMessage({ - message: createGroupMessage({ - threadId: "g-attacker-001", - groupName: "Trusted Team", - senderId: "666", - hasAnyMention: true, - wasExplicitlyMentioned: true, - content: "ping @bot", - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - dangerouslyAllowNameMatching: true, - groupPolicy: "allowlist", - groupAllowFrom: ["*"], - groups: { - "group:g-trusted-001": { allow: true }, - "Trusted Team": { allow: true }, - }, - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.To).toBe("zalouser:group:g-attacker-001"); }); it("allows group control commands when sender is in groupAllowFrom", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - allowFrom: ["999"], - groupAllowFrom: ["123"], - }, + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["999"], + groupAllowFrom: ["123"], }, - config: createConfig(), - runtime: createRuntimeEnv(), + expectedAuthorizers: [ + { configured: true, allowed: false }, + { configured: true, allowed: true }, + ], }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: false }, - { configured: true, allowed: true }, - ]); }); it("routes DM messages with direct peer kind", async () => { const { dispatchReplyWithBufferedBlockDispatcher, resolveAgentRoute, buildAgentSessionKey } = - installRuntime({ - commandAuthorized: false, - }); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); + await processOpenDmMessage(); expect(resolveAgentRoute).toHaveBeenCalledWith( expect.objectContaining({ @@ -559,24 +549,9 @@ describe("zalouser monitor group mention gating", () => { }); it("reuses the legacy DM session key when only the old group-shaped session exists", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, readSessionUpdatedAt } = installRuntime({ - commandAuthorized: false, - }); - readSessionUpdatedAt.mockImplementation((input?: { storePath: string; sessionKey: string }) => - input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, - ); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), + const { dispatchReplyWithBufferedBlockDispatcher } = await processOpenDmMessage({ + readSessionUpdatedAt: (input?: { storePath: string; sessionKey: string }) => + input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, }); const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; diff --git a/extensions/zalouser/src/monitor.ts b/extensions/zalouser/src/monitor.ts index 3ba7e80d2b9..2bfa1be8aa4 100644 --- a/extensions/zalouser/src/monitor.ts +++ b/extensions/zalouser/src/monitor.ts @@ -31,6 +31,7 @@ import { summarizeMapping, warnMissingProviderGroupPolicyFallbackOnce, } from "openclaw/plugin-sdk/zalouser"; +import { createDeferred } from "../../shared/deferred.js"; import { buildZalouserGroupCandidates, findZalouserGroupEntry, @@ -129,16 +130,6 @@ function resolveInboundQueueKey(message: ZaloInboundMessage): string { return `direct:${senderId || threadId}`; } -function createDeferred() { - let resolve!: (value: T | PromiseLike) => void; - let reject!: (reason?: unknown) => void; - const promise = new Promise((res, rej) => { - resolve = res; - reject = rej; - }); - return { promise, resolve, reject }; -} - function resolveZalouserDmSessionScope(config: OpenClawConfig) { const configured = config.session?.dmScope; return configured === "main" || !configured ? "per-channel-peer" : configured; diff --git a/extensions/zalouser/src/status-issues.test.ts b/extensions/zalouser/src/status-issues.test.ts index 73f7277b2b9..c1e142c88e8 100644 --- a/extensions/zalouser/src/status-issues.test.ts +++ b/extensions/zalouser/src/status-issues.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it } from "vitest"; +import { expectOpenDmPolicyConfigIssue } from "../../test-utils/status-issues.js"; import { collectZalouserStatusIssues } from "./status-issues.js"; describe("collectZalouserStatusIssues", () => { @@ -17,16 +18,15 @@ describe("collectZalouserStatusIssues", () => { }); it("warns when dmPolicy is open", () => { - const issues = collectZalouserStatusIssues([ - { + expectOpenDmPolicyConfigIssue({ + collectIssues: collectZalouserStatusIssues, + account: { accountId: "default", enabled: true, configured: true, dmPolicy: "open", }, - ]); - expect(issues).toHaveLength(1); - expect(issues[0]?.kind).toBe("config"); + }); }); it("skips disabled accounts", () => { diff --git a/extensions/zalouser/src/status-issues.ts b/extensions/zalouser/src/status-issues.ts index fca889a5115..b42c915e00a 100644 --- a/extensions/zalouser/src/status-issues.ts +++ b/extensions/zalouser/src/status-issues.ts @@ -1,42 +1,24 @@ import type { ChannelAccountSnapshot, ChannelStatusIssue } from "openclaw/plugin-sdk/zalouser"; +import { coerceStatusIssueAccountId, readStatusIssueFields } from "../../shared/status-issues.js"; -type ZalouserAccountStatus = { - accountId?: unknown; - enabled?: unknown; - configured?: unknown; - dmPolicy?: unknown; - lastError?: unknown; -}; - -const isRecord = (value: unknown): value is Record => - Boolean(value && typeof value === "object"); - -const asString = (value: unknown): string | undefined => - typeof value === "string" ? value : typeof value === "number" ? String(value) : undefined; - -function readZalouserAccountStatus(value: ChannelAccountSnapshot): ZalouserAccountStatus | null { - if (!isRecord(value)) { - return null; - } - return { - accountId: value.accountId, - enabled: value.enabled, - configured: value.configured, - dmPolicy: value.dmPolicy, - lastError: value.lastError, - }; -} +const ZALOUSER_STATUS_FIELDS = [ + "accountId", + "enabled", + "configured", + "dmPolicy", + "lastError", +] as const; export function collectZalouserStatusIssues( accounts: ChannelAccountSnapshot[], ): ChannelStatusIssue[] { const issues: ChannelStatusIssue[] = []; for (const entry of accounts) { - const account = readZalouserAccountStatus(entry); + const account = readStatusIssueFields(entry, ZALOUSER_STATUS_FIELDS); if (!account) { continue; } - const accountId = asString(account.accountId) ?? "default"; + const accountId = coerceStatusIssueAccountId(account.accountId) ?? "default"; const enabled = account.enabled !== false; if (!enabled) { continue; diff --git a/extensions/zalouser/src/test-helpers.ts b/extensions/zalouser/src/test-helpers.ts new file mode 100644 index 00000000000..8b43e182c54 --- /dev/null +++ b/extensions/zalouser/src/test-helpers.ts @@ -0,0 +1,26 @@ +import type { RuntimeEnv } from "openclaw/plugin-sdk/zalouser"; +import type { ResolvedZalouserAccount } from "./types.js"; + +export function createZalouserRuntimeEnv(): RuntimeEnv { + return { + log: () => {}, + error: () => {}, + exit: ((code: number): never => { + throw new Error(`exit ${code}`); + }) as RuntimeEnv["exit"], + }; +} + +export function createDefaultResolvedZalouserAccount( + overrides: Partial = {}, +): ResolvedZalouserAccount { + return { + accountId: "default", + profile: "default", + name: "test", + enabled: true, + authenticated: true, + config: {}, + ...overrides, + }; +} diff --git a/package.json b/package.json index c63e72f66fa..9f8b6709063 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openclaw", - "version": "2026.3.13", + "version": "2026.3.13-beta.1", "description": "Multi-channel AI gateway with extensible messaging integrations", "keywords": [], "homepage": "https://github.com/openclaw/openclaw#readme", @@ -216,6 +216,7 @@ }, "scripts": { "android:assemble": "cd apps/android && ./gradlew :app:assembleDebug", + "android:bundle:release": "bun apps/android/scripts/build-release-aab.ts", "android:format": "cd apps/android && ./gradlew :app:ktlintFormat :benchmark:ktlintFormat", "android:install": "cd apps/android && ./gradlew :app:installDebug", "android:lint": "cd apps/android && ./gradlew :app:ktlintCheck :benchmark:ktlintCheck", @@ -325,6 +326,9 @@ "test:install:smoke": "bash scripts/test-install-sh-docker.sh", "test:live": "OPENCLAW_LIVE_TEST=1 CLAWDBOT_LIVE_TEST=1 vitest run --config vitest.live.config.ts", "test:macmini": "OPENCLAW_TEST_VM_FORKS=0 OPENCLAW_TEST_PROFILE=serial node scripts/test-parallel.mjs", + "test:parallels:linux": "bash scripts/e2e/parallels-linux-smoke.sh", + "test:parallels:macos": "bash scripts/e2e/parallels-macos-smoke.sh", + "test:parallels:windows": "bash scripts/e2e/parallels-windows-smoke.sh", "test:perf:budget": "node scripts/test-perf-budget.mjs", "test:perf:hotspots": "node scripts/test-hotspots.mjs", "test:sectriage": "pnpm exec vitest run --config vitest.gateway.config.ts && vitest run --config vitest.unit.config.ts --exclude src/daemon/launchd.integration.test.ts --exclude src/process/exec.test.ts", @@ -339,7 +343,7 @@ }, "dependencies": { "@agentclientprotocol/sdk": "0.16.1", - "@aws-sdk/client-bedrock": "^3.1008.0", + "@aws-sdk/client-bedrock": "^3.1009.0", "@buape/carbon": "0.0.0-beta-20260216184201", "@clack/prompts": "^1.1.0", "@discordjs/voice": "^0.19.1", @@ -349,14 +353,15 @@ "@larksuiteoapi/node-sdk": "^1.59.0", "@line/bot-sdk": "^10.6.0", "@lydell/node-pty": "1.2.0-beta.3", - "@mariozechner/pi-agent-core": "0.57.1", - "@mariozechner/pi-ai": "0.57.1", - "@mariozechner/pi-coding-agent": "0.57.1", - "@mariozechner/pi-tui": "0.57.1", + "@mariozechner/pi-agent-core": "0.58.0", + "@mariozechner/pi-ai": "0.58.0", + "@mariozechner/pi-coding-agent": "0.58.0", + "@mariozechner/pi-tui": "0.58.0", + "@modelcontextprotocol/sdk": "1.27.1", "@mozilla/readability": "^0.6.0", "@sinclair/typebox": "0.34.48", "@slack/bolt": "^4.6.0", - "@slack/web-api": "^7.14.1", + "@slack/web-api": "^7.15.0", "@whiskeysockets/baileys": "7.0.0-rc.9", "ajv": "^8.18.0", "chalk": "^5.6.2", @@ -367,7 +372,7 @@ "discord-api-types": "^0.38.42", "dotenv": "^17.3.1", "express": "^5.2.1", - "file-type": "^21.3.1", + "file-type": "^21.3.2", "grammy": "^1.41.1", "hono": "4.12.7", "https-proxy-agent": "^8.0.0", @@ -388,7 +393,7 @@ "sqlite-vec": "0.1.7-alpha.2", "tar": "7.5.11", "tslog": "^4.10.2", - "undici": "^7.24.0", + "undici": "^7.24.1", "ws": "^8.19.0", "yaml": "^2.8.2", "zod": "^4.3.6" @@ -402,7 +407,7 @@ "@types/node": "^25.5.0", "@types/qrcode-terminal": "^0.12.2", "@types/ws": "^8.18.1", - "@typescript/native-preview": "7.0.0-dev.20260312.1", + "@typescript/native-preview": "7.0.0-dev.20260313.1", "@vitest/coverage-v8": "^4.1.0", "jscpd": "4.0.8", "jsdom": "^28.1.0", @@ -437,7 +442,7 @@ "fast-xml-parser": "5.3.8", "request": "npm:@cypress/request@3.0.10", "request-promise": "npm:@cypress/request-promise@5.0.0", - "file-type": "21.3.1", + "file-type": "21.3.2", "form-data": "2.5.4", "minimatch": "10.2.4", "qs": "6.14.2", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ac32d145c57..bc3ec60b125 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -10,7 +10,7 @@ overrides: fast-xml-parser: 5.3.8 request: npm:@cypress/request@3.0.10 request-promise: npm:@cypress/request-promise@5.0.0 - file-type: 21.3.1 + file-type: 21.3.2 form-data: 2.5.4 minimatch: 10.2.4 qs: 6.14.2 @@ -29,8 +29,8 @@ importers: specifier: 0.16.1 version: 0.16.1(zod@4.3.6) '@aws-sdk/client-bedrock': - specifier: ^3.1008.0 - version: 3.1008.0 + specifier: ^3.1009.0 + version: 3.1009.0 '@buape/carbon': specifier: 0.0.0-beta-20260216184201 version: 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) @@ -59,17 +59,20 @@ importers: specifier: 1.2.0-beta.3 version: 1.2.0-beta.3 '@mariozechner/pi-agent-core': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-ai': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-coding-agent': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': - specifier: 0.57.1 - version: 0.57.1 + specifier: 0.58.0 + version: 0.58.0 + '@modelcontextprotocol/sdk': + specifier: 1.27.1 + version: 1.27.1(zod@4.3.6) '@mozilla/readability': specifier: ^0.6.0 version: 0.6.0 @@ -83,8 +86,8 @@ importers: specifier: ^4.6.0 version: 4.6.0(@types/express@5.0.6) '@slack/web-api': - specifier: ^7.14.1 - version: 7.14.1 + specifier: ^7.15.0 + version: 7.15.0 '@whiskeysockets/baileys': specifier: 7.0.0-rc.9 version: 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) @@ -116,8 +119,8 @@ importers: specifier: ^5.2.1 version: 5.2.1 file-type: - specifier: 21.3.1 - version: 21.3.1 + specifier: 21.3.2 + version: 21.3.2 grammy: specifier: ^1.41.1 version: 1.41.1 @@ -182,8 +185,8 @@ importers: specifier: ^4.10.2 version: 4.10.2 undici: - specifier: ^7.24.0 - version: 7.24.0 + specifier: ^7.24.1 + version: 7.24.1 ws: specifier: ^8.19.0 version: 8.19.0 @@ -219,8 +222,8 @@ importers: specifier: ^8.18.1 version: 8.18.1 '@typescript/native-preview': - specifier: 7.0.0-dev.20260312.1 - version: 7.0.0-dev.20260312.1 + specifier: 7.0.0-dev.20260313.1 + version: 7.0.0-dev.20260313.1 '@vitest/coverage-v8': specifier: ^4.1.0 version: 4.1.0(@vitest/browser@4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0))(vitest@4.1.0) @@ -247,7 +250,7 @@ importers: version: 0.21.1(signal-polyfill@0.2.2) tsdown: specifier: 0.21.2 - version: 0.21.2(@typescript/native-preview@7.0.0-dev.20260312.1)(typescript@5.9.3) + version: 0.21.2(@typescript/native-preview@7.0.0-dev.20260313.1)(typescript@5.9.3) tsx: specifier: ^4.21.0 version: 4.21.0 @@ -311,8 +314,8 @@ importers: extensions/diffs: dependencies: '@pierre/diffs': - specifier: 1.0.11 - version: 1.0.11(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + specifier: 1.1.0 + version: 1.1.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@sinclair/typebox': specifier: 0.34.48 version: 0.34.48 @@ -344,9 +347,10 @@ importers: google-auth-library: specifier: ^10.6.1 version: 10.6.1 + devDependencies: openclaw: - specifier: '>=2026.3.11' - version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) + specifier: workspace:* + version: link:../.. extensions/imessage: {} @@ -376,8 +380,8 @@ importers: extensions/matrix: dependencies: '@mariozechner/pi-agent-core': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@matrix-org/matrix-sdk-crypto-nodejs': specifier: ^0.4.0 version: 0.4.0 @@ -404,10 +408,10 @@ importers: version: 4.3.6 extensions/memory-core: - dependencies: + devDependencies: openclaw: - specifier: '>=2026.3.11' - version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) + specifier: workspace:* + version: link:../.. extensions/memory-lancedb: dependencies: @@ -418,8 +422,8 @@ importers: specifier: 0.34.48 version: 0.34.48 openai: - specifier: ^6.27.0 - version: 6.27.0(ws@8.19.0)(zod@4.3.6) + specifier: ^6.29.0 + version: 6.29.0(ws@8.19.0)(zod@4.3.6) extensions/minimax-portal-auth: {} @@ -517,8 +521,8 @@ importers: extensions/zalo: dependencies: undici: - specifier: 7.24.0 - version: 7.24.0 + specifier: 7.24.1 + version: 7.24.1 zod: specifier: ^4.3.6 version: 4.3.6 @@ -651,12 +655,8 @@ packages: resolution: {integrity: sha512-t8cl+bPLlHZQD2Sw1a4hSLUybqJZU71+m8znkyeU8CHntFqEp2mMbuLKdHKaAYQ1fAApXMsvzenCAkDzNeeJlw==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.1007.0': - resolution: {integrity: sha512-49hH8o6ALKkCiBUgg20HkwxNamP1yYA/n8Si73Z438EqhZGpCfScP3FfxVhrfD5o+4bV4Whi9BTzPKCa/PfUww==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/client-bedrock@3.1008.0': - resolution: {integrity: sha512-mzxO/DplpZZT7AIZUCG7Q78OlaeHeDybYz+ZlWZPaXFjGDJwUv1E3SKskmaaQvTsMeieie0WX7gzueYrCx4YfQ==} + '@aws-sdk/client-bedrock@3.1009.0': + resolution: {integrity: sha512-KzLNqSg1T59sSlQvEA4EL3oDIAMidM54AB1b+UGouPFuUrrwGp2uUlZUYzIIlCvqpf7wEDh8wypqXISRItkgdg==} engines: {node: '>=20.0.0'} '@aws-sdk/client-s3@3.1000.0': @@ -667,12 +667,8 @@ packages: resolution: {integrity: sha512-AlC0oQ1/mdJ8vCIqu524j5RB7M8i8E24bbkZmya1CuiQxkY7SdIZAyw7NDNMGaNINQFq/8oGRMX0HeOfCVsl/A==} engines: {node: '>=20.0.0'} - '@aws-sdk/core@3.973.18': - resolution: {integrity: sha512-GUIlegfcK2LO1J2Y98sCJy63rQSiLiDOgVw7HiHPRqfI2vb3XozTVqemwO0VSGXp54ngCnAQz0Lf0YPCBINNxA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/core@3.973.19': - resolution: {integrity: sha512-56KePyOcZnKTWCd89oJS1G6j3HZ9Kc+bh/8+EbvtaCCXdP6T7O7NzCiPuHRhFLWnzXIaXX3CxAz0nI5My9spHQ==} + '@aws-sdk/core@3.973.20': + resolution: {integrity: sha512-i3GuX+lowD892F3IuJf8o6AbyDupMTdyTxQrCJGcn71ni5hTZ82L4nQhcdumxZ7XPJRJJVHS/CR3uYOIIs0PVA==} engines: {node: '>=20.0.0'} '@aws-sdk/crc64-nvme@3.972.3': @@ -683,116 +679,64 @@ packages: resolution: {integrity: sha512-6ljXKIQ22WFKyIs1jbORIkGanySBHaPPTOI4OxACP5WXgbcR0nDYfqNJfXEGwCK7IzHdNbCSFsNKKs0qCexR8Q==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-env@3.972.16': - resolution: {integrity: sha512-HrdtnadvTGAQUr18sPzGlE5El3ICphnH6SU7UQOMOWFgRKbTRNN8msTxM4emzguUso9CzaHU2xy5ctSrmK5YNA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-env@3.972.17': - resolution: {integrity: sha512-MBAMW6YELzE1SdkOniqr51mrjapQUv8JXSGxtwRjQV0mwVDutVsn22OPAUt4RcLRvdiHQmNBDEFP9iTeSVCOlA==} + '@aws-sdk/credential-provider-env@3.972.18': + resolution: {integrity: sha512-X0B8AlQY507i5DwjLByeU2Af4ARsl9Vr84koDcXCbAkplmU+1xBFWxEPrWRAoh56waBne/yJqEloSwvRf4x6XA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-http@3.972.15': resolution: {integrity: sha512-dJuSTreu/T8f24SHDNTjd7eQ4rabr0TzPh2UTCwYexQtzG3nTDKm1e5eIdhiroTMDkPEJeY+WPkA6F9wod/20A==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-http@3.972.18': - resolution: {integrity: sha512-NyB6smuZAixND5jZumkpkunQ0voc4Mwgkd+SZ6cvAzIB7gK8HV8Zd4rS8Kn5MmoGgusyNfVGG+RLoYc4yFiw+A==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-http@3.972.19': - resolution: {integrity: sha512-9EJROO8LXll5a7eUFqu48k6BChrtokbmgeMWmsH7lBb6lVbtjslUYz/ShLi+SHkYzTomiGBhmzTW7y+H4BxsnA==} + '@aws-sdk/credential-provider-http@3.972.20': + resolution: {integrity: sha512-ey9Lelj001+oOfrbKmS6R2CJAiXX7QKY4Vj9VJv6L2eE6/VjD8DocHIoYqztTm70xDLR4E1jYPTKfIui+eRNDA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-ini@3.972.13': resolution: {integrity: sha512-JKSoGb7XeabZLBJptpqoZIFbROUIS65NuQnEHGOpuT9GuuZwag2qciKANiDLFiYk4u8nSrJC9JIOnWKVvPVjeA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-ini@3.972.17': - resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-ini@3.972.18': - resolution: {integrity: sha512-vthIAXJISZnj2576HeyLBj4WTeX+I7PwWeRkbOa0mVX39K13SCGxCgOFuKj2ytm9qTlLOmXe4cdEnroteFtJfw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-ini@3.972.19': - resolution: {integrity: sha512-pVJVjWqVrPqjpFq7o0mCmeZu1Y0c94OCHSYgivdCD2wfmYVtBbwQErakruhgOD8pcMcx9SCqRw1pzHKR7OGBcA==} + '@aws-sdk/credential-provider-ini@3.972.20': + resolution: {integrity: sha512-5flXSnKHMloObNF+9N0cupKegnH1Z37cdVlpETVgx8/rAhCe+VNlkcZH3HDg2SDn9bI765S+rhNPXGDJJPfbtA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-login@3.972.13': resolution: {integrity: sha512-RtYcrxdnJHKY8MFQGLltCURcjuMjnaQpAxPE6+/QEdDHHItMKZgabRe/KScX737F9vJMQsmJy9EmMOkCnoC1JQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-login@3.972.17': - resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-login@3.972.18': - resolution: {integrity: sha512-kINzc5BBxdYBkPZ0/i1AMPMOk5b5QaFNbYMElVw5QTX13AKj6jcxnv/YNl9oW9mg+Y08ti19hh01HhyEAxsSJQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-login@3.972.19': - resolution: {integrity: sha512-jOXdZ1o+CywQKr6gyxgxuUmnGwTTnY2Kxs1PM7fI6AYtDWDnmW/yKXayNqkF8KjP1unflqMWKVbVt5VgmE3L0g==} + '@aws-sdk/credential-provider-login@3.972.20': + resolution: {integrity: sha512-gEWo54nfqp2jABMu6HNsjVC4hDLpg9HC8IKSJnp0kqWtxIJYHTmiLSsIfI4ScQjxEwpB+jOOH8dOLax1+hy/Hw==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-node@3.972.14': resolution: {integrity: sha512-WqoC2aliIjQM/L3oFf6j+op/enT2i9Cc4UTxxMEKrJNECkq4/PlKE5BOjSYFcq6G9mz65EFbXJh7zOU4CvjSKQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-node@3.972.18': - resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-node@3.972.19': - resolution: {integrity: sha512-yDWQ9dFTr+IMxwanFe7+tbN5++q8psZBjlUwOiCXn1EzANoBgtqBwcpYcHaMGtn0Wlfj4NuXdf2JaEx1lz5RaQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-node@3.972.20': - resolution: {integrity: sha512-0xHca2BnPY0kzjDYPH7vk8YbfdBPpWVS67rtqQMalYDQUCBYS37cZ55K6TuFxCoIyNZgSCFrVKr9PXC5BVvQQw==} + '@aws-sdk/credential-provider-node@3.972.21': + resolution: {integrity: sha512-hah8if3/B/Q+LBYN5FukyQ1Mym6PLPDsBOBsIgNEYD6wLyZg0UmUF/OKIVC3nX9XH8TfTPuITK+7N/jenVACWA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-process@3.972.13': resolution: {integrity: sha512-rsRG0LQA4VR+jnDyuqtXi2CePYSmfm5GNL9KxiW8DSe25YwJSr06W8TdUfONAC+rjsTI+aIH2rBGG5FjMeANrw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-process@3.972.16': - resolution: {integrity: sha512-n89ibATwnLEg0ZdZmUds5bq8AfBAdoYEDpqP3uzPLaRuGelsKlIvCYSNNvfgGLi8NaHPNNhs1HjJZYbqkW9b+g==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-process@3.972.17': - resolution: {integrity: sha512-c8G8wT1axpJDgaP3xzcy+q8Y1fTi9A2eIQJvyhQ9xuXrUZhlCfXbC0vM9bM1CUXiZppFQ1p7g0tuUMvil/gCPg==} + '@aws-sdk/credential-provider-process@3.972.18': + resolution: {integrity: sha512-Tpl7SRaPoOLT32jbTWchPsn52hYYgJ0kpiFgnwk8pxTANQdUymVSZkzFvv1+oOgZm1CrbQUP9MBeoMZ9IzLZjA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-sso@3.972.13': resolution: {integrity: sha512-fr0UU1wx8kNHDhTQBXioc/YviSW8iXuAxHvnH7eQUtn8F8o/FU3uu6EUMvAQgyvn7Ne5QFnC0Cj0BFlwCk+RFw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-sso@3.972.17': - resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-sso@3.972.18': - resolution: {integrity: sha512-YHYEfj5S2aqInRt5ub8nDOX8vAxgMvd84wm2Y3WVNfFa/53vOv9T7WOAqXI25qjj3uEcV46xxfqdDQk04h5XQA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-sso@3.972.19': - resolution: {integrity: sha512-kVjQsEU3b///q7EZGrUzol9wzwJFKbEzqJKSq82A9ShrUTEO7FNylTtby3sPV19ndADZh1H3FB3+5ZrvKtEEeg==} + '@aws-sdk/credential-provider-sso@3.972.20': + resolution: {integrity: sha512-p+R+PYR5Z7Gjqf/6pvbCnzEHcqPCpLzR7Yf127HjJ6EAb4hUcD+qsNRnuww1sB/RmSeCLxyay8FMyqREw4p1RA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-web-identity@3.972.13': resolution: {integrity: sha512-a6iFMh1pgUH0TdcouBppLJUfPM7Yd3R9S1xFodPtCRoLqCz2RQFA3qjA8x4112PVYXEd4/pHX2eihapq39w0rA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-web-identity@3.972.17': - resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-web-identity@3.972.18': - resolution: {integrity: sha512-OqlEQpJ+J3T5B96qtC1zLLwkBloechP+fezKbCH0sbd2cCc0Ra55XpxWpk/hRj69xAOYtHvoC4orx6eTa4zU7g==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/credential-provider-web-identity@3.972.19': - resolution: {integrity: sha512-BV1BlTFdG4w4tAihxN7iXDBoNcNewXD4q8uZlNQiUrnqxwGWUhKHODIQVSPlQGxXClEj+63m+cqZskw+ESmeZg==} + '@aws-sdk/credential-provider-web-identity@3.972.20': + resolution: {integrity: sha512-rWCmh8o7QY4CsUj63qopzMzkDq/yPpkrpb+CnjBEFSOg/02T/we7sSTVg4QsDiVS9uwZ8VyONhq98qt+pIh3KA==} engines: {node: '>=20.0.0'} '@aws-sdk/eventstream-handler-node@3.972.10': @@ -819,8 +763,8 @@ packages: resolution: {integrity: sha512-5XHwjPH1lHB+1q4bfC7T8Z5zZrZXfaLcjSMwTd1HPSPrCmPFMbg3UQ5vgNWcVj0xoX4HWqTGkSf2byrjlnRg5w==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-host-header@3.972.7': - resolution: {integrity: sha512-aHQZgztBFEpDU1BB00VWCIIm85JjGjQW1OG9+98BdmaOpguJvzmXBGbnAiYcciCd+IS4e9BEq664lhzGnWJHgQ==} + '@aws-sdk/middleware-host-header@3.972.8': + resolution: {integrity: sha512-wAr2REfKsqoKQ+OkNqvOShnBoh+nkPurDKW7uAeVSu6kUECnWlSJiPvnoqxGlfousEY/v9LfS9sNc46hjSYDIQ==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-location-constraint@3.972.6': @@ -831,16 +775,16 @@ packages: resolution: {integrity: sha512-iFnaMFMQdljAPrvsCVKYltPt2j40LQqukAbXvW7v0aL5I+1GO7bZ/W8m12WxW3gwyK5p5u1WlHg8TSAizC5cZw==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-logger@3.972.7': - resolution: {integrity: sha512-LXhiWlWb26txCU1vcI9PneESSeRp/RYY/McuM4SpdrimQR5NgwaPb4VJCadVeuGWgh6QmqZ6rAKSoL1ob16W6w==} + '@aws-sdk/middleware-logger@3.972.8': + resolution: {integrity: sha512-CWl5UCM57WUFaFi5kB7IBY1UmOeLvNZAZ2/OZ5l20ldiJ3TiIz1pC65gYj8X0BCPWkeR1E32mpsCk1L1I4n+lA==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-recursion-detection@3.972.6': resolution: {integrity: sha512-dY4v3of5EEMvik6+UDwQ96KfUFDk8m1oZDdkSc5lwi4o7rFrjnv0A+yTV+gu230iybQZnKgDLg/rt2P3H+Vscw==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-recursion-detection@3.972.7': - resolution: {integrity: sha512-l2VQdcBcYLzIzykCHtXlbpiVCZ94/xniLIkAj0jpnpjY4xlgZx7f56Ypn+uV1y3gG0tNVytJqo3K9bfMFee7SQ==} + '@aws-sdk/middleware-recursion-detection@3.972.8': + resolution: {integrity: sha512-BnnvYs2ZEpdlmZ2PNlV2ZyQ8j8AEkMTjN79y/YA475ER1ByFYrkVR85qmhni8oeTaJcDqbx364wDpitDAA/wCA==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-sdk-s3@3.972.15': @@ -855,40 +799,28 @@ packages: resolution: {integrity: sha512-ABlFVcIMmuRAwBT+8q5abAxOr7WmaINirDJBnqGY5b5jSDo00UMlg/G4a0xoAgwm6oAECeJcwkvDlxDwKf58fQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-user-agent@3.972.19': - resolution: {integrity: sha512-Km90fcXt3W/iqujHzuM6IaDkYCj73gsYufcuWXApWdzoTy6KGk8fnchAjePMARU0xegIR3K4N3yIo1vy7OVe8A==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/middleware-user-agent@3.972.20': - resolution: {integrity: sha512-3kNTLtpUdeahxtnJRnj/oIdLAUdzTfr9N40KtxNhtdrq+Q1RPMdCJINRXq37m4t5+r3H70wgC3opW46OzFcZYA==} + '@aws-sdk/middleware-user-agent@3.972.21': + resolution: {integrity: sha512-62XRl1GDYPpkt7cx1AX1SPy9wgNE9Iw/NPuurJu4lmhCWS7sGKO+kS53TQ8eRmIxy3skmvNInnk0ZbWrU5Dpyg==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-websocket@3.972.12': resolution: {integrity: sha512-iyPP6FVDKe/5wy5ojC0akpDFG1vX3FeCUU47JuwN8xfvT66xlEI8qUJZPtN55TJVFzzWZJpWL78eqUE31md08Q==} engines: {node: '>= 14.0.0'} + '@aws-sdk/nested-clients@3.996.10': + resolution: {integrity: sha512-SlDol5Z+C7Ivnc2rKGqiqfSUmUZzY1qHfVs9myt/nxVwswgfpjdKahyTzLTx802Zfq0NFRs7AejwKzzzl5Co2w==} + engines: {node: '>=20.0.0'} + '@aws-sdk/nested-clients@3.996.3': resolution: {integrity: sha512-AU5TY1V29xqwg/MxmA2odwysTez+ccFAhmfRJk+QZT5HNv90UTA9qKd1J9THlsQkvmH7HWTEV1lDNxkQO5PzNw==} engines: {node: '>=20.0.0'} - '@aws-sdk/nested-clients@3.996.7': - resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/nested-clients@3.996.8': - resolution: {integrity: sha512-6HlLm8ciMW8VzfB80kfIx16PBA9lOa9Dl+dmCBi78JDhvGlx3I7Rorwi5PpVRkL31RprXnYna3yBf6UKkD/PqA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/nested-clients@3.996.9': - resolution: {integrity: sha512-+RpVtpmQbbtzFOKhMlsRcXM/3f1Z49qTOHaA8gEpHOYruERmog6f2AUtf/oTRLCWjR9H2b3roqryV/hI7QMW8w==} - engines: {node: '>=20.0.0'} - '@aws-sdk/region-config-resolver@3.972.6': resolution: {integrity: sha512-Aa5PusHLXAqLTX1UKDvI3pHQJtIsF7Q+3turCHqfz/1F61/zDMWfbTC8evjhrrYVAtz9Vsv3SJ/waSUeu7B6gw==} engines: {node: '>=20.0.0'} - '@aws-sdk/region-config-resolver@3.972.7': - resolution: {integrity: sha512-/Ev/6AI8bvt4HAAptzSjThGUMjcWaX3GX8oERkB0F0F9x2dLSBdgFDiyrRz3i0u0ZFZFQ1b28is4QhyqXTUsVA==} + '@aws-sdk/region-config-resolver@3.972.8': + resolution: {integrity: sha512-1eD4uhTDeambO/PNIDVG19A6+v4NdD7xzwLHDutHsUqz0B+i661MwQB2eYO4/crcCvCiQG4SRm1k81k54FEIvw==} engines: {node: '>=20.0.0'} '@aws-sdk/s3-request-presigner@3.1000.0': @@ -903,16 +835,8 @@ packages: resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==} engines: {node: '>=20.0.0'} - '@aws-sdk/token-providers@3.1005.0': - resolution: {integrity: sha512-vMxd+ivKqSxU9bHx5vmAlFKDAkjGotFU56IOkDa5DaTu1WWwbcse0yFHEm9I537oVvodaiwMl3VBwgHfzQ2rvw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/token-providers@3.1007.0': - resolution: {integrity: sha512-kKvVyr53vvVc5k6RbvI6jhafxufxO2SkEw8QeEzJqwOXH/IMY7Cm0IyhnBGdqj80iiIIiIM2jGe7Fn3TIdwdrw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/token-providers@3.1008.0': - resolution: {integrity: sha512-TulwlHQBWcJs668kNUDMZHN51DeLrDsYT59Ux4a/nbvr025gM6HjKJJ3LvnZccam7OS/ZKUVkWomCneRQKJbBg==} + '@aws-sdk/token-providers@3.1009.0': + resolution: {integrity: sha512-KCPLuTqN9u0Rr38Arln78fRG9KXpzsPWmof+PZzfAHMMQq2QED6YjQrkrfiH7PDefLWEposY1o4/eGwrmKA4JA==} engines: {node: '>=20.0.0'} '@aws-sdk/token-providers@3.999.0': @@ -927,6 +851,10 @@ packages: resolution: {integrity: sha512-hl7BGwDCWsjH8NkZfx+HgS7H2LyM2lTMAI7ba9c8O0KqdBLTdNJivsHpqjg9rNlAlPyREb6DeDRXUl0s8uFdmQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/types@3.973.6': + resolution: {integrity: sha512-Atfcy4E++beKtwJHiDln2Nby8W/mam64opFPTiHEqgsthqeydFS1pY+OUlN1ouNOmf8ArPU/6cDS65anOP3KQw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-arn-parser@3.972.2': resolution: {integrity: sha512-VkykWbqMjlSgBFDyrY3nOSqupMc6ivXuGmvci6Q3NnLq5kC+mKQe2QBZ4nrWRE/jqOxeFP2uYzLtwncYYcvQDg==} engines: {node: '>=20.0.0'} @@ -935,8 +863,8 @@ packages: resolution: {integrity: sha512-yWIQSNiCjykLL+ezN5A+DfBb1gfXTytBxm57e64lYmwxDHNmInYHRJYYRAGWG1o77vKEiWaw4ui28e3yb1k5aQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/util-endpoints@3.996.4': - resolution: {integrity: sha512-Hek90FBmd4joCFj+Vc98KLJh73Zqj3s2W56gjAcTkrNLMDI5nIFkG9YpfcJiVI1YlE2Ne1uOQNe+IgQ/Vz2XRA==} + '@aws-sdk/util-endpoints@3.996.5': + resolution: {integrity: sha512-Uh93L5sXFNbyR5sEPMzUU8tJ++Ku97EY4udmC01nB8Zu+xfBPwpIwJ6F7snqQeq8h2pf+8SGN5/NoytfKgYPIw==} engines: {node: '>=20.0.0'} '@aws-sdk/util-format-url@3.972.6': @@ -958,8 +886,8 @@ packages: '@aws-sdk/util-user-agent-browser@3.972.6': resolution: {integrity: sha512-Fwr/llD6GOrFgQnKaI2glhohdGuBDfHfora6iG9qsBBBR8xv1SdCSwbtf5CWlUdCw5X7g76G/9Hf0Inh0EmoxA==} - '@aws-sdk/util-user-agent-browser@3.972.7': - resolution: {integrity: sha512-7SJVuvhKhMF/BkNS1n0QAJYgvEwYbK2QLKBrzDiwQGiTRU6Yf1f3nehTzm/l21xdAOtWSfp2uWSddPnP2ZtsVw==} + '@aws-sdk/util-user-agent-browser@3.972.8': + resolution: {integrity: sha512-B3KGXJviV2u6Cdw2SDY2aDhoJkVfY/Q/Trwk2CMSkikE1Oi6gRzxhvhIfiRpHfmIsAhV4EA54TVEX8K6CbHbkA==} '@aws-sdk/util-user-agent-node@3.973.0': resolution: {integrity: sha512-A9J2G4Nf236e9GpaC1JnA8wRn6u6GjnOXiTwBLA6NUJhlBTIGfrTy+K1IazmF8y+4OFdW3O5TZlhyspJMqiqjA==} @@ -970,8 +898,8 @@ packages: aws-crt: optional: true - '@aws-sdk/util-user-agent-node@3.973.4': - resolution: {integrity: sha512-uqKeLqZ9D3nQjH7HGIERNXK9qnSpUK08l4MlJ5/NZqSSdeJsVANYp437EM9sEzwU28c2xfj2V6qlkqzsgtKs6Q==} + '@aws-sdk/util-user-agent-node@3.973.7': + resolution: {integrity: sha512-Hz6EZMUAEzqUd7e+vZ9LE7mn+5gMbxltXy18v+YSFY+9LBJz15wkNZvw5JqfX3z0FS9n3bgUtz3L5rAsfh4YlA==} engines: {node: '>=20.0.0'} peerDependencies: aws-crt: '>=1.0.0' @@ -979,26 +907,8 @@ packages: aws-crt: optional: true - '@aws-sdk/util-user-agent-node@3.973.5': - resolution: {integrity: sha512-Dyy38O4GeMk7UQ48RupfHif//gqnOPbq/zlvRssc11E2mClT+aUfc3VS2yD8oLtzqO3RsqQ9I3gOBB4/+HjPOw==} - engines: {node: '>=20.0.0'} - peerDependencies: - aws-crt: '>=1.0.0' - peerDependenciesMeta: - aws-crt: - optional: true - - '@aws-sdk/util-user-agent-node@3.973.6': - resolution: {integrity: sha512-iF7G0prk7AvmOK64FcLvc/fW+Ty1H+vttajL7PvJFReU8urMxfYmynTTuFKDTA76Wgpq3FzTPKwabMQIXQHiXQ==} - engines: {node: '>=20.0.0'} - peerDependencies: - aws-crt: '>=1.0.0' - peerDependenciesMeta: - aws-crt: - optional: true - - '@aws-sdk/xml-builder@3.972.10': - resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} + '@aws-sdk/xml-builder@3.972.11': + resolution: {integrity: sha512-iitV/gZKQMvY9d7ovmyFnFuTHbBAtrmLnvaSb/3X8vOKyevwtpmEtyc8AdhVWZe0pI/1GsHxlEvQeOePFzy7KQ==} engines: {node: '>=20.0.0'} '@aws-sdk/xml-builder@3.972.8': @@ -1795,22 +1705,22 @@ packages: resolution: {integrity: sha512-faGUlTcXka5l7rv0lP3K3vGW/ejRuOS24RR2aSFWREUQqzjgdsuWNo/IiPqL3kWRGt6Ahl2+qcDAwtdeWeuGUw==} hasBin: true - '@mariozechner/pi-agent-core@0.57.1': - resolution: {integrity: sha512-WXsBbkNWOObFGHkhixaT8GXJpHDd3+fn8QntYF+4R8Sa9WB90ENXWidO6b7vcKX+JX0jjO5dIsQxmzosARJKlg==} + '@mariozechner/pi-agent-core@0.58.0': + resolution: {integrity: sha512-zhkwx3Wdo27snVfnJWi7l+wyU4XlazkeunTtz4e500GC+ufGOp4C3aIf0XiO5ZOtTE/0lvUiG2bWULR/i4lgUQ==} engines: {node: '>=20.0.0'} - '@mariozechner/pi-ai@0.57.1': - resolution: {integrity: sha512-Bd/J4a3YpdzJVyHLih0vDSdB0QPL4ti0XsAwtHOK/8eVhB0fHM1CpcgIrcBFJ23TMcKXMi0qamz18ERfp8tmgg==} + '@mariozechner/pi-ai@0.58.0': + resolution: {integrity: sha512-3TrkJ9QcBYFPo4NxYluhd+JQ4M+98RaEkNPMrLFU4wK4GMFVtsL3kp1YJ/oj7X0eqKuuDKbHj6MdoMZeT2TCvA==} engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-coding-agent@0.57.1': - resolution: {integrity: sha512-u5MQEduj68rwVIsRsqrWkJYiJCyPph/a6bMoJAQKo1sb+Pc17Y/ojwa+wGssnUMjEB38AQKofWTVe8NFEpSWNw==} + '@mariozechner/pi-coding-agent@0.58.0': + resolution: {integrity: sha512-aCoqIMfcFWwuZrLC4MC1EnHwUrqo+ppamXlNYk5+nANH8U+51AP8OUqOUqT9NSHO9ZdItheU9wCqt7wPf5Ah8A==} engines: {node: '>=20.6.0'} hasBin: true - '@mariozechner/pi-tui@0.57.1': - resolution: {integrity: sha512-cjoRghLbeAHV0tTJeHgZXaryUi5zzBZofeZ7uJun1gztnckLLRjoVeaPTujNlc5BIfyKvFqhh1QWCZng/MXlpg==} + '@mariozechner/pi-tui@0.58.0': + resolution: {integrity: sha512-luRbQlk0ZCbYGCtCrKTqQX0ECKNYPj7OSlxKMXEY0B3bA6s4f/Xj0aLPiKlhsIynC2dPQmijA44ZDfrWFniWwA==} engines: {node: '>=20.0.0'} '@matrix-org/matrix-sdk-crypto-nodejs@0.4.0': @@ -1828,6 +1738,16 @@ packages: '@mistralai/mistralai@1.14.1': resolution: {integrity: sha512-IiLmmZFCCTReQgPAT33r7KQ1nYo5JPdvGkrkZqA8qQ2qB1GHgs5LoP5K2ICyrjnpw2n8oSxMM/VP+liiKcGNlQ==} + '@modelcontextprotocol/sdk@1.27.1': + resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==} + engines: {node: '>=18'} + peerDependencies: + '@cfworker/json-schema': ^4.1.1 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + '@cfworker/json-schema': + optional: true + '@mozilla/readability@0.6.0': resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==} engines: {node: '>=14.0.0'} @@ -2554,12 +2474,16 @@ packages: cpu: [x64] os: [win32] - '@pierre/diffs@1.0.11': - resolution: {integrity: sha512-j6zIEoyImQy1HfcJqbrDwP0O5I7V2VNXAaw53FqQ+SykRfaNwABeZHs9uibXO4supaXPmTx6LEH9Lffr03e1Tw==} + '@pierre/diffs@1.1.0': + resolution: {integrity: sha512-wbxrzcmanJuHZb81iir09j42uU9AnKxXDtAuEQJbAnti5f2UfYdCQYejawuHZStFrlsMacCZLh/dDHmqvAaQCw==} peerDependencies: react: ^18.3.1 || ^19.0.0 react-dom: ^18.3.1 || ^19.0.0 + '@pierre/theme@0.0.22': + resolution: {integrity: sha512-ePUIdQRNGjrveELTU7fY89Xa7YGHHEy5Po5jQy/18lm32eRn96+tnYJEtFooGdffrx55KBUtOXfvVy/7LDFFhA==} + engines: {vscode: ^1.0.0} + '@pinojs/redact@0.4.0': resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==} @@ -2799,6 +2723,10 @@ packages: resolution: {integrity: sha512-Wz7QYfPAlG/DR+DfABddUZeNgoeY7d1J39OCR2jR+v7VBsB8ezulDK5szTnDDPDwLH5IWhLvXIHlCFZV7MSKgA==} engines: {node: '>= 18', npm: '>= 8.6.0'} + '@slack/logger@4.0.1': + resolution: {integrity: sha512-6cmdPrV/RYfd2U0mDGiMK8S7OJqpCTm7enMLRR3edccsPX8j7zXTLnaEF4fhxxJJTAIOil6+qZrnUPTuaLvwrQ==} + engines: {node: '>= 18', npm: '>= 8.6.0'} + '@slack/oauth@3.0.4': resolution: {integrity: sha512-+8H0g7mbrHndEUbYCP7uYyBCbwqmm3E6Mo3nfsDvZZW74zKk1ochfH/fWSvGInYNCVvaBUbg3RZBbTp0j8yJCg==} engines: {node: '>=18', npm: '>=8.6.0'} @@ -2811,18 +2739,18 @@ packages: resolution: {integrity: sha512-PVF6P6nxzDMrzPC8fSCsnwaI+kF8YfEpxf3MqXmdyjyWTYsZQURpkK7WWUWvP5QpH55pB7zyYL9Qem/xSgc5VA==} engines: {node: '>= 12.13.0', npm: '>= 6.12.0'} - '@slack/web-api@7.14.1': - resolution: {integrity: sha512-RoygyteJeFswxDPJjUMESn9dldWVMD2xUcHHd9DenVavSfVC6FeVnSdDerOO7m8LLvw4Q132nQM4hX8JiF7dng==} + '@slack/types@2.20.1': + resolution: {integrity: sha512-eWX2mdt1ktpn8+40iiMc404uGrih+2fxiky3zBcPjtXKj6HLRdYlmhrPkJi7JTJm8dpXR6BWVWEDBXtaWMKD6A==} + engines: {node: '>= 12.13.0', npm: '>= 6.12.0'} + + '@slack/web-api@7.15.0': + resolution: {integrity: sha512-va7zYIt3QHG1x9M/jqXXRPFMoOVlVSSRHC5YH+DzKYsrz5xUKOA3lR4THsu/Zxha9N1jOndbKFKLtr0WOPW1Vw==} engines: {node: '>= 18', npm: '>= 8.6.0'} '@smithy/abort-controller@4.2.10': resolution: {integrity: sha512-qocxM/X4XGATqQtUkbE9SPUB6wekBi+FyJOMbPj0AhvyvFGYEmOlz6VB22iMePCQsFmMIvFSeViDvA7mZJG47g==} engines: {node: '>=18.0.0'} - '@smithy/abort-controller@4.2.11': - resolution: {integrity: sha512-Hj4WoYWMJnSpM6/kchsm4bUNTL9XiSyhvoMb2KIq4VJzyDt7JpGHUZHkVNPZVC7YE1tf8tPeVauxpFBKGW4/KQ==} - engines: {node: '>=18.0.0'} - '@smithy/abort-controller@4.2.12': resolution: {integrity: sha512-xolrFw6b+2iYGl6EcOL7IJY71vvyZ0DJ3mcKtpykqPe2uscwtzDZJa1uVQXyP7w9Dd+kGwYnPbMsJrGISKiY/Q==} engines: {node: '>=18.0.0'} @@ -2835,10 +2763,6 @@ packages: resolution: {integrity: sha512-y5d4xRiD6TzeP5BWlb+Ig/VFqF+t9oANNhGeMqyzU7obw7FYgTgVi50i5JqBTeKp+TABeDIeeXFZdz65RipNtA==} engines: {node: '>=18.0.0'} - '@smithy/config-resolver@4.4.10': - resolution: {integrity: sha512-IRTkd6ps0ru+lTWnfnsbXzW80A8Od8p3pYiZnW98K2Hb20rqfsX7VTlfUwhrcOeSSy68Gn9WBofwPuw3e5CCsg==} - engines: {node: '>=18.0.0'} - '@smithy/config-resolver@4.4.11': resolution: {integrity: sha512-YxFiiG4YDAtX7WMN7RuhHZLeTmRRAOyCbr+zB8e3AQzHPnUhS8zXjB1+cniPVQI3xbWsQPM0X2aaIkO/ME0ymw==} engines: {node: '>=18.0.0'} @@ -2855,18 +2779,10 @@ packages: resolution: {integrity: sha512-4xE+0L2NrsFKpEVFlFELkIHQddBvMbQ41LRIP74dGCXnY1zQ9DgksrBcRBDJT+iOzGy4VEJIeU3hkUK5mn06kg==} engines: {node: '>=18.0.0'} - '@smithy/core@3.23.9': - resolution: {integrity: sha512-1Vcut4LEL9HZsdpI0vFiRYIsaoPwZLjAxnVQDUMQK8beMS+EYPLDQCXtbzfxmM5GzSgjfe2Q9M7WaXwIMQllyQ==} - engines: {node: '>=18.0.0'} - '@smithy/credential-provider-imds@4.2.10': resolution: {integrity: sha512-3bsMLJJLTZGZqVGGeBVFfLzuRulVsGTj12BzRKODTHqUABpIr0jMN1vN3+u6r2OfyhAQ2pXaMZWX/swBK5I6PQ==} engines: {node: '>=18.0.0'} - '@smithy/credential-provider-imds@4.2.11': - resolution: {integrity: sha512-lBXrS6ku0kTj3xLmsJW0WwqWbGQ6ueooYyp/1L9lkyT0M02C+DWwYwc5aTyXFbRaK38ojALxNixg+LxKSHZc0g==} - engines: {node: '>=18.0.0'} - '@smithy/credential-provider-imds@4.2.12': resolution: {integrity: sha512-cr2lR792vNZcYMriSIj+Um3x9KWrjcu98kn234xA6reOAFMmbRpQMOv8KPgEmLLtx3eldU6c5wALKFqNOhugmg==} engines: {node: '>=18.0.0'} @@ -2915,10 +2831,6 @@ packages: resolution: {integrity: sha512-wbTRjOxdFuyEg0CpumjZO0hkUl+fetJFqxNROepuLIoijQh51aMBmzFLfoQdwRjxsuuS2jizzIUTjPWgd8pd7g==} engines: {node: '>=18.0.0'} - '@smithy/fetch-http-handler@5.3.13': - resolution: {integrity: sha512-U2Hcfl2s3XaYjikN9cT4mPu8ybDbImV3baXR0PkVlC0TTx808bRP3FaPGAzPtB8OByI+JqJ1kyS+7GEgae7+qQ==} - engines: {node: '>=18.0.0'} - '@smithy/fetch-http-handler@5.3.15': resolution: {integrity: sha512-T4jFU5N/yiIfrtrsb9uOQn7RdELdM/7HbyLNr6uO/mpkj1ctiVs7CihVr51w4LyQlXWDpXFn4BElf1WmQvZu/A==} engines: {node: '>=18.0.0'} @@ -2931,10 +2843,6 @@ packages: resolution: {integrity: sha512-1VzIOI5CcsvMDvP3iv1vG/RfLJVVVc67dCRyLSB2Hn9SWCZrDO3zvcIzj3BfEtqRW5kcMg5KAeVf1K3dR6nD3w==} engines: {node: '>=18.0.0'} - '@smithy/hash-node@4.2.11': - resolution: {integrity: sha512-T+p1pNynRkydpdL015ruIoyPSRw9e/SQOWmSAMmmprfswMrd5Ow5igOWNVlvyVFZlxXqGmyH3NQwfwy8r5Jx0A==} - engines: {node: '>=18.0.0'} - '@smithy/hash-node@4.2.12': resolution: {integrity: sha512-QhBYbGrbxTkZ43QoTPrK72DoYviDeg6YKDrHTMJbbC+A0sml3kSjzFtXP7BtbyJnXojLfTQldGdUR0RGD8dA3w==} engines: {node: '>=18.0.0'} @@ -2947,10 +2855,6 @@ packages: resolution: {integrity: sha512-vy9KPNSFUU0ajFYk0sDZIYiUlAWGEAhRfehIr5ZkdFrRFTAuXEPUd41USuqHU6vvLX4r6Q9X7MKBco5+Il0Org==} engines: {node: '>=18.0.0'} - '@smithy/invalid-dependency@4.2.11': - resolution: {integrity: sha512-cGNMrgykRmddrNhYy1yBdrp5GwIgEkniS7k9O1VLB38yxQtlvrxpZtUVvo6T4cKpeZsriukBuuxfJcdZQc/f/g==} - engines: {node: '>=18.0.0'} - '@smithy/invalid-dependency@4.2.12': resolution: {integrity: sha512-/4F1zb7Z8LOu1PalTdESFHR0RbPwHd3FcaG1sI3UEIriQTWakysgJr65lc1jj6QY5ye7aFsisajotH6UhWfm/g==} engines: {node: '>=18.0.0'} @@ -2975,10 +2879,6 @@ packages: resolution: {integrity: sha512-TQZ9kX5c6XbjhaEBpvhSvMEZ0klBs1CFtOdPFwATZSbC9UeQfKHPLPN9Y+I6wZGMOavlYTOlHEPDrt42PMSH9w==} engines: {node: '>=18.0.0'} - '@smithy/middleware-content-length@4.2.11': - resolution: {integrity: sha512-UvIfKYAKhCzr4p6jFevPlKhQwyQwlJ6IeKLDhmV1PlYfcW3RL4ROjNEDtSik4NYMi9kDkH7eSwyTP3vNJ/u/Dw==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-content-length@4.2.12': resolution: {integrity: sha512-YE58Yz+cvFInWI/wOTrB+DbvUVz/pLn5mC5MvOV4fdRUc6qGwygyngcucRQjAhiCEbmfLOXX0gntSIcgMvAjmA==} engines: {node: '>=18.0.0'} @@ -2987,10 +2887,6 @@ packages: resolution: {integrity: sha512-9W6Np4ceBP3XCYAGLoMCmn8t2RRVzuD1ndWPLBbv7H9CrwM9Bprf6Up6BM9ZA/3alodg0b7Kf6ftBK9R1N04vw==} engines: {node: '>=18.0.0'} - '@smithy/middleware-endpoint@4.4.23': - resolution: {integrity: sha512-UEFIejZy54T1EJn2aWJ45voB7RP2T+IRzUqocIdM6GFFa5ClZncakYJfcYnoXt3UsQrZZ9ZRauGm77l9UCbBLw==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-endpoint@4.4.25': resolution: {integrity: sha512-dqjLwZs2eBxIUG6Qtw8/YZ4DvzHGIf0DA18wrgtfP6a50UIO7e2nY0FPdcbv5tVJKqWCCU5BmGMOUwT7Puan+A==} engines: {node: '>=18.0.0'} @@ -2999,10 +2895,6 @@ packages: resolution: {integrity: sha512-/1psZZllBBSQ7+qo5+hhLz7AEPGLx3Z0+e3ramMBEuPK2PfvLK4SrncDB9VegX5mBn+oP/UTDrM6IHrFjvX1ZA==} engines: {node: '>=18.0.0'} - '@smithy/middleware-retry@4.4.40': - resolution: {integrity: sha512-YhEMakG1Ae57FajERdHNZ4ShOPIY7DsgV+ZoAxo/5BT0KIe+f6DDU2rtIymNNFIj22NJfeeI6LWIifrwM0f+rA==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-retry@4.4.42': resolution: {integrity: sha512-vbwyqHRIpIZutNXZpLAozakzamcINaRCpEy1MYmK6xBeW3xN+TyPRA123GjXnuxZIjc9848MRRCugVMTXxC4Eg==} engines: {node: '>=18.0.0'} @@ -3011,10 +2903,6 @@ packages: resolution: {integrity: sha512-STQdONGPwbbC7cusL60s7vOa6He6A9w2jWhoapL0mgVjmR19pr26slV+yoSP76SIssMTX/95e5nOZ6UQv6jolg==} engines: {node: '>=18.0.0'} - '@smithy/middleware-serde@4.2.12': - resolution: {integrity: sha512-W9g1bOLui7Xn5FABRVS0o3rXL0gfN37d/8I/W7i0N7oxjx9QecUmXEMSUMADTODwdtka9cN43t5BI2CodLJpng==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-serde@4.2.14': resolution: {integrity: sha512-+CcaLoLa5apzSRtloOyG7lQvkUw2ZDml3hRh4QiG9WyEPfW5Ke/3tPOPiPjUneuT59Tpn8+c3RVaUvvkkwqZwg==} engines: {node: '>=18.0.0'} @@ -3023,10 +2911,6 @@ packages: resolution: {integrity: sha512-pmts/WovNcE/tlyHa8z/groPeOtqtEpp61q3W0nW1nDJuMq/x+hWa/OVQBtgU0tBqupeXq0VBOLA4UZwE8I0YA==} engines: {node: '>=18.0.0'} - '@smithy/middleware-stack@4.2.11': - resolution: {integrity: sha512-s+eenEPW6RgliDk2IhjD2hWOxIx1NKrOHxEwNUaUXxYBxIyCcDfNULZ2Mu15E3kwcJWBedTET/kEASPV1A1Akg==} - engines: {node: '>=18.0.0'} - '@smithy/middleware-stack@4.2.12': resolution: {integrity: sha512-kruC5gRHwsCOuyCd4ouQxYjgRAym2uDlCvQ5acuMtRrcdfg7mFBg6blaxcJ09STpt3ziEkis6bhg1uwrWU7txw==} engines: {node: '>=18.0.0'} @@ -3035,10 +2919,6 @@ packages: resolution: {integrity: sha512-UALRbJtVX34AdP2VECKVlnNgidLHA2A7YgcJzwSBg1hzmnO/bZBHl/LDQQyYifzUwp1UOODnl9JJ3KNawpUJ9w==} engines: {node: '>=18.0.0'} - '@smithy/node-config-provider@4.3.11': - resolution: {integrity: sha512-xD17eE7kaLgBBGf5CZQ58hh2YmwK1Z0O8YhffwB/De2jsL0U3JklmhVYJ9Uf37OtUDLF2gsW40Xwwag9U869Gg==} - engines: {node: '>=18.0.0'} - '@smithy/node-config-provider@4.3.12': resolution: {integrity: sha512-tr2oKX2xMcO+rBOjobSwVAkV05SIfUKz8iI53rzxEmgW3GOOPOv0UioSDk+J8OpRQnpnhsO3Af6IEBabQBVmiw==} engines: {node: '>=18.0.0'} @@ -3047,10 +2927,6 @@ packages: resolution: {integrity: sha512-zo1+WKJkR9x7ZtMeMDAAsq2PufwiLDmkhcjpWPRRkmeIuOm6nq1qjFICSZbnjBvD09ei8KMo26BWxsu2BUU+5w==} engines: {node: '>=18.0.0'} - '@smithy/node-http-handler@4.4.14': - resolution: {integrity: sha512-DamSqaU8nuk0xTJDrYnRzZndHwwRnyj/n/+RqGGCcBKB4qrQem0mSDiWdupaNWdwxzyMU91qxDmHOCazfhtO3A==} - engines: {node: '>=18.0.0'} - '@smithy/node-http-handler@4.4.16': resolution: {integrity: sha512-ULC8UCS/HivdCB3jhi+kLFYe4B5gxH2gi9vHBfEIiRrT2jfKiZNiETJSlzRtE6B26XbBHjPtc8iZKSNqMol9bw==} engines: {node: '>=18.0.0'} @@ -3059,10 +2935,6 @@ packages: resolution: {integrity: sha512-5jm60P0CU7tom0eNrZ7YrkgBaoLFXzmqB0wVS+4uK8PPGmosSrLNf6rRd50UBvukztawZ7zyA8TxlrKpF5z9jw==} engines: {node: '>=18.0.0'} - '@smithy/property-provider@4.2.11': - resolution: {integrity: sha512-14T1V64o6/ndyrnl1ze1ZhyLzIeYNN47oF/QU6P5m82AEtyOkMJTb0gO1dPubYjyyKuPD6OSVMPDKe+zioOnCg==} - engines: {node: '>=18.0.0'} - '@smithy/property-provider@4.2.12': resolution: {integrity: sha512-jqve46eYU1v7pZ5BM+fmkbq3DerkSluPr5EhvOcHxygxzD05ByDRppRwRPPpFrsFo5yDtCYLKu+kreHKVrvc7A==} engines: {node: '>=18.0.0'} @@ -3071,10 +2943,6 @@ packages: resolution: {integrity: sha512-2NzVWpYY0tRdfeCJLsgrR89KE3NTWT2wGulhNUxYlRmtRmPwLQwKzhrfVaiNlA9ZpJvbW7cjTVChYKgnkqXj1A==} engines: {node: '>=18.0.0'} - '@smithy/protocol-http@5.3.11': - resolution: {integrity: sha512-hI+barOVDJBkNt4y0L2mu3Ugc0w7+BpJ2CZuLwXtSltGAAwCb3IvnalGlbDV/UCS6a9ZuT3+exd1WxNdLb5IlQ==} - engines: {node: '>=18.0.0'} - '@smithy/protocol-http@5.3.12': resolution: {integrity: sha512-fit0GZK9I1xoRlR4jXmbLhoN0OdEpa96ul8M65XdmXnxXkuMxM0Y8HDT0Fh0Xb4I85MBvBClOzgSrV1X2s1Hxw==} engines: {node: '>=18.0.0'} @@ -3083,10 +2951,6 @@ packages: resolution: {integrity: sha512-HeN7kEvuzO2DmAzLukE9UryiUvejD3tMp9a1D1NJETerIfKobBUCLfviP6QEk500166eD2IATaXM59qgUI+YDA==} engines: {node: '>=18.0.0'} - '@smithy/querystring-builder@4.2.11': - resolution: {integrity: sha512-7spdikrYiljpket6u0up2Ck2mxhy7dZ0+TDd+S53Dg2DHd6wg+YNJrTCHiLdgZmEXZKI7LJZcwL3721ZRDFiqA==} - engines: {node: '>=18.0.0'} - '@smithy/querystring-builder@4.2.12': resolution: {integrity: sha512-6wTZjGABQufekycfDGMEB84BgtdOE/rCVTov+EDXQ8NHKTUNIp/j27IliwP7tjIU9LR+sSzyGBOXjeEtVgzCHg==} engines: {node: '>=18.0.0'} @@ -3095,10 +2959,6 @@ packages: resolution: {integrity: sha512-4Mh18J26+ao1oX5wXJfWlTT+Q1OpDR8ssiC9PDOuEgVBGloqg18Fw7h5Ct8DyT9NBYwJgtJ2nLjKKFU6RP1G1Q==} engines: {node: '>=18.0.0'} - '@smithy/querystring-parser@4.2.11': - resolution: {integrity: sha512-nE3IRNjDltvGcoThD2abTozI1dkSy8aX+a2N1Rs55en5UsdyyIXgGEmevUL3okZFoJC77JgRGe99xYohhsjivQ==} - engines: {node: '>=18.0.0'} - '@smithy/querystring-parser@4.2.12': resolution: {integrity: sha512-P2OdvrgiAKpkPNKlKUtWbNZKB1XjPxM086NeVhK+W+wI46pIKdWBe5QyXvhUm3MEcyS/rkLvY8rZzyUdmyDZBw==} engines: {node: '>=18.0.0'} @@ -3107,10 +2967,6 @@ packages: resolution: {integrity: sha512-0R/+/Il5y8nB/By90o8hy/bWVYptbIfvoTYad0igYQO5RefhNCDmNzqxaMx7K1t/QWo0d6UynqpqN5cCQt1MCg==} engines: {node: '>=18.0.0'} - '@smithy/service-error-classification@4.2.11': - resolution: {integrity: sha512-HkMFJZJUhzU3HvND1+Yw/kYWXp4RPDLBWLcK1n+Vqw8xn4y2YiBhdww8IxhkQjP/QlZun5bwm3vcHc8AqIU3zw==} - engines: {node: '>=18.0.0'} - '@smithy/service-error-classification@4.2.12': resolution: {integrity: sha512-LlP29oSQN0Tw0b6D0Xo6BIikBswuIiGYbRACy5ujw/JgWSzTdYj46U83ssf6Ux0GyNJVivs2uReU8pt7Eu9okQ==} engines: {node: '>=18.0.0'} @@ -3119,10 +2975,6 @@ packages: resolution: {integrity: sha512-pHgASxl50rrtOztgQCPmOXFjRW+mCd7ALr/3uXNzRrRoGV5G2+78GOsQ3HlQuBVHCh9o6xqMNvlIKZjWn4Euug==} engines: {node: '>=18.0.0'} - '@smithy/shared-ini-file-loader@4.4.6': - resolution: {integrity: sha512-IB/M5I8G0EeXZTHsAxpx51tMQ5R719F3aq+fjEB6VtNcCHDc0ajFDIGDZw+FW9GxtEkgTduiPpjveJdA/CX7sw==} - engines: {node: '>=18.0.0'} - '@smithy/shared-ini-file-loader@4.4.7': resolution: {integrity: sha512-HrOKWsUb+otTeo1HxVWeEb99t5ER1XrBi/xka2Wv6NVmTbuCUC1dvlrksdvxFtODLBjsC+PHK+fuy2x/7Ynyiw==} engines: {node: '>=18.0.0'} @@ -3131,10 +2983,6 @@ packages: resolution: {integrity: sha512-Wab3wW8468WqTKIxI+aZe3JYO52/RYT/8sDOdzkUhjnLakLe9qoQqIcfih/qxcF4qWEFoWBszY0mj5uxffaVXA==} engines: {node: '>=18.0.0'} - '@smithy/signature-v4@5.3.11': - resolution: {integrity: sha512-V1L6N9aKOBAN4wEHLyqjLBnAz13mtILU0SeDrjOaIZEeN6IFa6DxwRt1NNpOdmSpQUfkBj0qeD3m6P77uzMhgQ==} - engines: {node: '>=18.0.0'} - '@smithy/signature-v4@5.3.12': resolution: {integrity: sha512-B/FBwO3MVOL00DaRSXfXfa/TRXRheagt/q5A2NM13u7q+sHS59EOVGQNfG7DkmVtdQm5m3vOosoKAXSqn/OEgw==} engines: {node: '>=18.0.0'} @@ -3143,10 +2991,6 @@ packages: resolution: {integrity: sha512-R8bQ9K3lCcXyZmBnQqUZJF4ChZmtWT5NLi6x5kgWx5D+/j0KorXcA0YcFg/X5TOgnTCy1tbKc6z2g2y4amFupQ==} engines: {node: '>=18.0.0'} - '@smithy/smithy-client@4.12.3': - resolution: {integrity: sha512-7k4UxjSpHmPN2AxVhvIazRSzFQjWnud3sOsXcFStzagww17j1cFQYqTSiQ8xuYK3vKLR1Ni8FzuT3VlKr3xCNw==} - engines: {node: '>=18.0.0'} - '@smithy/smithy-client@4.12.5': resolution: {integrity: sha512-UqwYawyqSr/aog8mnLnfbPurS0gi4G7IYDcD28cUIBhsvWs1+rQcL2IwkUQ+QZ7dibaoRzhNF99fAQ9AUcO00w==} engines: {node: '>=18.0.0'} @@ -3163,10 +3007,6 @@ packages: resolution: {integrity: sha512-uypjF7fCDsRk26u3qHmFI/ePL7bxxB9vKkE+2WKEciHhz+4QtbzWiHRVNRJwU3cKhrYDYQE3b0MRFtqfLYdA4A==} engines: {node: '>=18.0.0'} - '@smithy/url-parser@4.2.11': - resolution: {integrity: sha512-oTAGGHo8ZYc5VZsBREzuf5lf2pAurJQsccMusVZ85wDkX66ojEc/XauiGjzCj50A61ObFTPe6d7Pyt6UBYaing==} - engines: {node: '>=18.0.0'} - '@smithy/url-parser@4.2.12': resolution: {integrity: sha512-wOPKPEpso+doCZGIlr+e1lVI6+9VAKfL4kZWFgzVgGWY2hZxshNKod4l2LXS3PRC9otH/JRSjtEHqQ/7eLciRA==} engines: {node: '>=18.0.0'} @@ -3219,10 +3059,6 @@ packages: resolution: {integrity: sha512-R0smq7EHQXRVMxkAxtH5akJ/FvgAmNF6bUy/GwY/N20T4GrwjT633NFm0VuRpC+8Bbv8R9A0DoJ9OiZL/M3xew==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-browser@4.3.39': - resolution: {integrity: sha512-ui7/Ho/+VHqS7Km2wBw4/Ab4RktoiSshgcgpJzC4keFPs6tLJS4IQwbeahxQS3E/w98uq6E1mirCH/id9xIXeQ==} - engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-browser@4.3.41': resolution: {integrity: sha512-M1w1Ux0rSVvBOxIIiqbxvZvhnjQ+VUjJrugtORE90BbadSTH+jsQL279KRL3Hv0w69rE7EuYkV/4Lepz/NBW9g==} engines: {node: '>=18.0.0'} @@ -3231,10 +3067,6 @@ packages: resolution: {integrity: sha512-otWuoDm35btJV1L8MyHrPl462B07QCdMTktKc7/yM+Psv6KbED/ziXiHnmr7yPHUjfIwE9S8Max0LO24Mo3ZVg==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-node@4.2.42': - resolution: {integrity: sha512-QDA84CWNe8Akpj15ofLO+1N3Rfg8qa2K5uX0y6HnOp4AnRYRgWrKx/xzbYNbVF9ZsyJUYOfcoaN3y93wA/QJ2A==} - engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-node@4.2.44': resolution: {integrity: sha512-YPze3/lD1KmWuZsl9JlfhcgGLX7AXhSoaCDtiPntUjNW5/YY0lOHjkcgxyE9x/h5vvS1fzDifMGjzqnNlNiqOQ==} engines: {node: '>=18.0.0'} @@ -3243,10 +3075,6 @@ packages: resolution: {integrity: sha512-xyctc4klmjmieQiF9I1wssBWleRV0RhJ2DpO8+8yzi2LO1Z+4IWOZNGZGNj4+hq9kdo+nyfrRLmQTzc16Op2Vg==} engines: {node: '>=18.0.0'} - '@smithy/util-endpoints@3.3.2': - resolution: {integrity: sha512-+4HFLpE5u29AbFlTdlKIT7jfOzZ8PDYZKTb3e+AgLz986OYwqTourQ5H+jg79/66DB69Un1+qKecLnkZdAsYcA==} - engines: {node: '>=18.0.0'} - '@smithy/util-endpoints@3.3.3': resolution: {integrity: sha512-VACQVe50j0HZPjpwWcjyT51KUQ4AnsvEaQ2lKHOSL4mNLD0G9BjEniQ+yCt1qqfKfiAHRAts26ud7hBjamrwig==} engines: {node: '>=18.0.0'} @@ -3263,10 +3091,6 @@ packages: resolution: {integrity: sha512-LxaQIWLp4y0r72eA8mwPNQ9va4h5KeLM0I3M/HV9klmFaY2kN766wf5vsTzmaOpNNb7GgXAd9a25P3h8T49PSA==} engines: {node: '>=18.0.0'} - '@smithy/util-middleware@4.2.11': - resolution: {integrity: sha512-r3dtF9F+TpSZUxpOVVtPfk09Rlo4lT6ORBqEvX3IBT6SkQAdDSVKR5GcfmZbtl7WKhKnmb3wbDTQ6ibR2XHClw==} - engines: {node: '>=18.0.0'} - '@smithy/util-middleware@4.2.12': resolution: {integrity: sha512-Er805uFUOvgc0l8nv0e0su0VFISoxhJ/AwOn3gL2NWNY2LUEldP5WtVcRYSQBcjg0y9NfG8JYrCJaYDpupBHJQ==} engines: {node: '>=18.0.0'} @@ -3275,10 +3099,6 @@ packages: resolution: {integrity: sha512-HrBzistfpyE5uqTwiyLsFHscgnwB0kgv8vySp7q5kZ0Eltn/tjosaSGGDj/jJ9ys7pWzIP/icE2d+7vMKXLv7A==} engines: {node: '>=18.0.0'} - '@smithy/util-retry@4.2.11': - resolution: {integrity: sha512-XSZULmL5x6aCTTii59wJqKsY1l3eMIAomRAccW7Tzh9r8s7T/7rdo03oektuH5jeYRlJMPcNP92EuRDvk9aXbw==} - engines: {node: '>=18.0.0'} - '@smithy/util-retry@4.2.12': resolution: {integrity: sha512-1zopLDUEOwumjcHdJ1mwBHddubYF8GMQvstVCLC54Y46rqoHwlIU+8ZzUeaBcD+WCJHyDGSeZ2ml9YSe9aqcoQ==} engines: {node: '>=18.0.0'} @@ -3287,10 +3107,6 @@ packages: resolution: {integrity: sha512-OlOKnaqnkU9X+6wEkd7mN+WB7orPbCVDauXOj22Q7VtiTkvy7ZdSsOg4QiNAZMgI4OkvNf+/VLUC3VXkxuWJZw==} engines: {node: '>=18.0.0'} - '@smithy/util-stream@4.5.17': - resolution: {integrity: sha512-793BYZ4h2JAQkNHcEnyFxDTcZbm9bVybD0UV/LEWmZ5bkTms7JqjfrLMi2Qy0E5WFcCzLwCAPgcvcvxoeALbAQ==} - engines: {node: '>=18.0.0'} - '@smithy/util-stream@4.5.19': resolution: {integrity: sha512-v4sa+3xTweL1CLO2UP0p7tvIMH/Rq1X4KKOxd568mpe6LSLMQCnDHs4uv7m3ukpl3HvcN2JH6jiCS0SNRXKP/w==} engines: {node: '>=18.0.0'} @@ -3631,43 +3447,43 @@ packages: '@types/yauzl@2.10.3': resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-AhPdPuVe4osxWoeImS21jVhc0VJ2QnzLUZtEFMakY0Rf70C0b6il/m7hwRf9wkr9xXZLVOVJ1kYrpvQRuHFE0Q==} + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-/fU2IvlRQWOy63xSzkejW7tTQpsL5dQ/ATIsJFlK75vS941CnNJY8dAx3iQYLkHMhS45hhCIR+bbJPRaacq/fw==} cpu: [arm64] os: [darwin] - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-9I0P1/c/mQ6UVcQq7SYY/FJD23IN5T2y4GbSFOKQvzNVASV0tMnX4YV8YNf6b5jcwCzrVcrGNKKgWCj8xEFf8Q==} + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-oy7Ew1J3+YtO9QsqVGkncQ8bCwVPxNk8nSO2q1sHLccyYq0f4eDaZTlJ+u9Ynry548NwNucLh9wE+DWfWhzU3Q==} cpu: [x64] os: [darwin] - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-xwoMywagcvx9F2ocM+ybeg7eH9PHDpx1FBGOrloL1/xkGC4BCrn/RcaAe0AhzXzoJfHHmg7Sz9VzYmTR4N1Kqw==} + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-KkbAweTnBpmQ8wCGHjrLzPX+FuwhSrVERNqyGPaq/267Sxt0UwbIO3rZduXlq5UUln1+/z7uT/BNJiuoFW3iLw==} cpu: [arm64] os: [linux] - '@typescript/native-preview-linux-arm@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-/nAOhSLTxMJfHY+2cKdUxi2wYadf3g1GtC3VzgPfZMNxA28dJ8x75T26aSLaFYluh7cCSAwuGesCImijQDS2Lw==} + '@typescript/native-preview-linux-arm@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-IAx0ajfEiL1tJg1N6+/nHXJKebNe72yanY2N5bicwIB3t2BmydnrEPG+/OFVqc+prfJngxSx/61mvkXScZePzg==} cpu: [arm] os: [linux] - '@typescript/native-preview-linux-x64@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-vZs0LLpZw50Ac0TCmF9ND7KphJBhOfp9fxLhC+hFWaUU1iCQRjv1MtvroitF5OJKb21qFPJxkU+kfhlCRxLfqg==} + '@typescript/native-preview-linux-x64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-9LCNgXVNoArHlMuL6yFKJxSdshiiadTfW/pU4tz4Vbg+Dg9La1VE9mLlBdijy5ZIg4nsOFpR8JTDURcA1RoHXw==} cpu: [x64] os: [linux] - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-4LY/gd9cj1xDY2nEthB7WDW4j/fIYJ9wp9H71nOLd0wNNtkfqRXWSkQEeb+RByhV+dIb/n6kWbQQMeNfk7q4VQ==} + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-cP2y5hb2xhfEDIgxdhxhPXa/D5Lq3yj6zxVuhh9ZkUariF+ZAmF4pySlIA+7NdprgTQqvNY5Mp70cPUiYD3yUg==} cpu: [arm64] os: [win32] - '@typescript/native-preview-win32-x64@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-EP2JPo9s9EPUwXSX83qTImlDHhgkLeBbJ2MMdj+XrfBltHAvHKktzeSS73UhP77s/TnTkJR6BTWHENKKvLRbGQ==} + '@typescript/native-preview-win32-x64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-8KDfi7U1enFo4z6F0qe4Rd5QzBhk+4cwpZtOGAT9lgyR4pF/mo8zQd0t+Hlkj6d87W057RP8lgCGTGfclGWxUg==} cpu: [x64] os: [win32] - '@typescript/native-preview@7.0.0-dev.20260312.1': - resolution: {integrity: sha512-FwhlXG/yG0d7b2UmooBYyszLMpICRYdYGE6v65ZlMnH7cWKQyyFpMFgH9suRf3Np4QCbN+7qisj+F23kQOidVw==} + '@typescript/native-preview@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-x+ZrFAEq+c7bF4Ml8+abYZ9vW6mzu22fmcPbDcBmUl/4uGFCYXXww0FS3+me9MfdSOCAPtqcZtwApx1RQO2X/w==} hasBin: true '@typespec/ts-http-runtime@0.3.3': @@ -3950,6 +3766,9 @@ packages: axios@1.13.5: resolution: {integrity: sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==} + axios@1.13.6: + resolution: {integrity: sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==} + b4a@1.8.0: resolution: {integrity: sha512-qRuSmNSkGQaHwNbM7J78Wwy+ghLEYF1zNrSeMxj4Kgw6y33O3mXcQ6Ie9fRvfU/YnxWkOchPXbaLb73TkIsfdg==} peerDependencies: @@ -4271,6 +4090,10 @@ packages: core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + cors@2.8.6: + resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==} + engines: {node: '>= 0.10'} + croner@10.0.1: resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==} engines: {node: '>=18.0'} @@ -4550,6 +4373,14 @@ packages: events-universal@1.0.1: resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==} + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.7: + resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==} + engines: {node: '>=18.0.0'} + execa@4.1.0: resolution: {integrity: sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==} engines: {node: '>=10'} @@ -4561,6 +4392,12 @@ packages: exponential-backoff@3.1.3: resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==} + express-rate-limit@8.3.1: + resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==} + engines: {node: '>= 16'} + peerDependencies: + express: '>= 4.11' + express@4.22.1: resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==} engines: {node: '>= 0.10.0'} @@ -4620,8 +4457,8 @@ packages: resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} engines: {node: ^12.20 || >= 14.13} - file-type@21.3.1: - resolution: {integrity: sha512-SrzXX46I/zsRDjTb82eucsGg0ODq2NpGDp4HcsFKApPy8P8vACjpJRDoGGMfEzhFC0ry61ajd7f72J3603anBA==} + file-type@21.3.2: + resolution: {integrity: sha512-DLkUvGwep3poOV2wpzbHCOnSKGk1LzyXTv+aHFgN2VFl96wnp8YA9YjO2qPzg5PuL8q/SW9Pdi6WTkYOIh995w==} engines: {node: '>=20'} filename-reserved-regex@3.0.0: @@ -5058,6 +4895,9 @@ packages: jose@4.15.9: resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} + jose@6.2.1: + resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==} + js-stringify@1.0.2: resolution: {integrity: sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==} @@ -5102,6 +4942,9 @@ packages: json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + json-schema-typed@8.0.2: + resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} + json-schema@0.4.0: resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} @@ -5677,8 +5520,8 @@ packages: zod: optional: true - openai@6.27.0: - resolution: {integrity: sha512-osTKySlrdYrLYTt0zjhY8yp0JUBmWDCN+Q+QxsV4xMQnnoVFpylgKGgxwN8sSdTNw0G4y+WUXs4eCMWpyDNWZQ==} + openai@6.29.0: + resolution: {integrity: sha512-YxoArl2BItucdO89/sN6edksV0x47WUTgkgVfCgX7EuEMhbirENsgYe5oO4LTjBL9PtdKtk2WqND1gSLcTd2yw==} hasBin: true peerDependencies: ws: ^8.18.0 @@ -5689,14 +5532,6 @@ packages: zod: optional: true - openclaw@2026.3.11: - resolution: {integrity: sha512-bxwiBmHPakwfpY5tqC9lrV5TCu5PKf0c1bHNc3nhrb+pqKcPEWV4zOjDVFLQUHr98ihgWA+3pacy4b3LQ8wduQ==} - engines: {node: '>=22.12.0'} - hasBin: true - peerDependencies: - '@napi-rs/canvas': ^0.1.89 - node-llama-cpp: 3.16.2 - opus-decoder@0.7.11: resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==} @@ -5870,6 +5705,10 @@ packages: resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==} hasBin: true + pkce-challenge@5.0.1: + resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} + engines: {node: '>=16.20.0'} + playwright-core@1.58.2: resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==} engines: {node: '>=18'} @@ -6667,12 +6506,8 @@ packages: undici-types@7.18.2: resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==} - undici@7.22.0: - resolution: {integrity: sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg==} - engines: {node: '>=20.18.1'} - - undici@7.24.0: - resolution: {integrity: sha512-jxytwMHhsbdpBXxLAcuu0fzlQeXCNnWdDyRHpvWsUl8vd98UwYdl9YTyn8/HcpcJPC3pwUveefsa3zTxyD/ERg==} + undici@7.24.1: + resolution: {integrity: sha512-5xoBibbmnjlcR3jdqtY2Lnx7WbrD/tHlT01TmvqZUFVc9Q1w4+j5hbnapTqbcXITMH1ovjq/W7BkqBilHiVAaA==} engines: {node: '>=20.18.1'} unist-util-is@6.0.1: @@ -7047,7 +6882,7 @@ snapshots: '@aws-crypto/sha256-js': 5.2.0 '@aws-crypto/supports-web-crypto': 5.2.0 '@aws-crypto/util': 5.2.0 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@aws-sdk/util-locate-window': 3.965.5 '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 @@ -7055,7 +6890,7 @@ snapshots: '@aws-crypto/sha256-js@5.2.0': dependencies: '@aws-crypto/util': 5.2.0 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 tslib: 2.8.1 '@aws-crypto/supports-web-crypto@5.2.0': @@ -7072,115 +6907,70 @@ snapshots: dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.18 - '@aws-sdk/credential-provider-node': 3.972.18 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-node': 3.972.21 '@aws-sdk/eventstream-handler-node': 3.972.10 '@aws-sdk/middleware-eventstream': 3.972.7 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.19 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 '@aws-sdk/middleware-websocket': 3.972.12 - '@aws-sdk/region-config-resolver': 3.972.7 + '@aws-sdk/region-config-resolver': 3.972.8 '@aws-sdk/token-providers': 3.1004.0 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.4 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 + '@smithy/config-resolver': 4.4.11 + '@smithy/core': 3.23.11 '@smithy/eventstream-serde-browser': 4.2.11 '@smithy/eventstream-serde-config-resolver': 4.3.11 '@smithy/eventstream-serde-node': 4.2.11 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/hash-node': 4.2.12 + '@smithy/invalid-dependency': 4.2.12 + '@smithy/middleware-content-length': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-retry': 4.4.42 + '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-stack': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/node-http-handler': 4.4.16 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 '@smithy/util-base64': 4.3.2 '@smithy/util-body-length-browser': 4.2.2 '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-stream': 4.5.17 + '@smithy/util-defaults-mode-browser': 4.3.41 + '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 + '@smithy/util-stream': 4.5.19 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 transitivePeerDependencies: - aws-crt - '@aws-sdk/client-bedrock@3.1007.0': + '@aws-sdk/client-bedrock@3.1009.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.19 - '@aws-sdk/credential-provider-node': 3.972.19 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/token-providers': 3.1007.0 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.5 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/client-bedrock@3.1008.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.19 - '@aws-sdk/credential-provider-node': 3.972.20 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/token-providers': 3.1008.0 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.6 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-node': 3.972.21 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/region-config-resolver': 3.972.8 + '@aws-sdk/token-providers': 3.1009.0 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 '@smithy/config-resolver': 4.4.11 '@smithy/core': 3.23.11 '@smithy/fetch-http-handler': 5.3.15 @@ -7286,26 +7076,10 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@aws-sdk/core@3.973.18': + '@aws-sdk/core@3.973.20': dependencies: - '@aws-sdk/types': 3.973.5 - '@aws-sdk/xml-builder': 3.972.10 - '@smithy/core': 3.23.9 - '@smithy/node-config-provider': 4.3.11 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/signature-v4': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - - '@aws-sdk/core@3.973.19': - dependencies: - '@aws-sdk/types': 3.973.5 - '@aws-sdk/xml-builder': 3.972.10 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/xml-builder': 3.972.11 '@smithy/core': 3.23.11 '@smithy/node-config-provider': 4.3.12 '@smithy/property-provider': 4.2.12 @@ -7331,18 +7105,10 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-env@3.972.16': + '@aws-sdk/credential-provider-env@3.972.18': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@aws-sdk/credential-provider-env@3.972.17': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 '@smithy/property-provider': 4.2.12 '@smithy/types': 4.13.1 tslib: 2.8.1 @@ -7360,23 +7126,10 @@ snapshots: '@smithy/util-stream': 4.5.15 tslib: 2.8.1 - '@aws-sdk/credential-provider-http@3.972.18': + '@aws-sdk/credential-provider-http@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/node-http-handler': 4.4.14 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/util-stream': 4.5.17 - tslib: 2.8.1 - - '@aws-sdk/credential-provider-http@3.972.19': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 '@smithy/fetch-http-handler': 5.3.15 '@smithy/node-http-handler': 4.4.16 '@smithy/property-provider': 4.2.12 @@ -7405,55 +7158,17 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-ini@3.972.17': + '@aws-sdk/credential-provider-ini@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/credential-provider-env': 3.972.16 - '@aws-sdk/credential-provider-http': 3.972.18 - '@aws-sdk/credential-provider-login': 3.972.17 - '@aws-sdk/credential-provider-process': 3.972.16 - '@aws-sdk/credential-provider-sso': 3.972.17 - '@aws-sdk/credential-provider-web-identity': 3.972.17 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-ini@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/credential-provider-env': 3.972.17 - '@aws-sdk/credential-provider-http': 3.972.19 - '@aws-sdk/credential-provider-login': 3.972.18 - '@aws-sdk/credential-provider-process': 3.972.17 - '@aws-sdk/credential-provider-sso': 3.972.18 - '@aws-sdk/credential-provider-web-identity': 3.972.18 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-ini@3.972.19': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/credential-provider-env': 3.972.17 - '@aws-sdk/credential-provider-http': 3.972.19 - '@aws-sdk/credential-provider-login': 3.972.19 - '@aws-sdk/credential-provider-process': 3.972.17 - '@aws-sdk/credential-provider-sso': 3.972.19 - '@aws-sdk/credential-provider-web-identity': 3.972.19 - '@aws-sdk/nested-clients': 3.996.9 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-env': 3.972.18 + '@aws-sdk/credential-provider-http': 3.972.20 + '@aws-sdk/credential-provider-login': 3.972.20 + '@aws-sdk/credential-provider-process': 3.972.18 + '@aws-sdk/credential-provider-sso': 3.972.20 + '@aws-sdk/credential-provider-web-identity': 3.972.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 '@smithy/credential-provider-imds': 4.2.12 '@smithy/property-provider': 4.2.12 '@smithy/shared-ini-file-loader': 4.4.7 @@ -7475,37 +7190,11 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-login@3.972.17': + '@aws-sdk/credential-provider-login@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-login@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-login@3.972.19': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.9 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 '@smithy/property-provider': 4.2.12 '@smithy/protocol-http': 5.3.12 '@smithy/shared-ini-file-loader': 4.4.7 @@ -7531,49 +7220,15 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-node@3.972.18': + '@aws-sdk/credential-provider-node@3.972.21': dependencies: - '@aws-sdk/credential-provider-env': 3.972.16 - '@aws-sdk/credential-provider-http': 3.972.18 - '@aws-sdk/credential-provider-ini': 3.972.17 - '@aws-sdk/credential-provider-process': 3.972.16 - '@aws-sdk/credential-provider-sso': 3.972.17 - '@aws-sdk/credential-provider-web-identity': 3.972.17 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-node@3.972.19': - dependencies: - '@aws-sdk/credential-provider-env': 3.972.17 - '@aws-sdk/credential-provider-http': 3.972.19 - '@aws-sdk/credential-provider-ini': 3.972.18 - '@aws-sdk/credential-provider-process': 3.972.17 - '@aws-sdk/credential-provider-sso': 3.972.18 - '@aws-sdk/credential-provider-web-identity': 3.972.18 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-node@3.972.20': - dependencies: - '@aws-sdk/credential-provider-env': 3.972.17 - '@aws-sdk/credential-provider-http': 3.972.19 - '@aws-sdk/credential-provider-ini': 3.972.19 - '@aws-sdk/credential-provider-process': 3.972.17 - '@aws-sdk/credential-provider-sso': 3.972.19 - '@aws-sdk/credential-provider-web-identity': 3.972.19 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/credential-provider-env': 3.972.18 + '@aws-sdk/credential-provider-http': 3.972.20 + '@aws-sdk/credential-provider-ini': 3.972.20 + '@aws-sdk/credential-provider-process': 3.972.18 + '@aws-sdk/credential-provider-sso': 3.972.20 + '@aws-sdk/credential-provider-web-identity': 3.972.20 + '@aws-sdk/types': 3.973.6 '@smithy/credential-provider-imds': 4.2.12 '@smithy/property-provider': 4.2.12 '@smithy/shared-ini-file-loader': 4.4.7 @@ -7591,19 +7246,10 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-process@3.972.16': + '@aws-sdk/credential-provider-process@3.972.18': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@aws-sdk/credential-provider-process@3.972.17': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 '@smithy/property-provider': 4.2.12 '@smithy/shared-ini-file-loader': 4.4.7 '@smithy/types': 4.13.1 @@ -7622,38 +7268,12 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-sso@3.972.17': + '@aws-sdk/credential-provider-sso@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/token-providers': 3.1004.0 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-sso@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/token-providers': 3.1005.0 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-sso@3.972.19': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.9 - '@aws-sdk/token-providers': 3.1008.0 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/token-providers': 3.1009.0 + '@aws-sdk/types': 3.973.6 '@smithy/property-provider': 4.2.12 '@smithy/shared-ini-file-loader': 4.4.7 '@smithy/types': 4.13.1 @@ -7673,35 +7293,11 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-web-identity@3.972.17': + '@aws-sdk/credential-provider-web-identity@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-web-identity@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/credential-provider-web-identity@3.972.19': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.9 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 '@smithy/property-provider': 4.2.12 '@smithy/shared-ini-file-loader': 4.4.7 '@smithy/types': 4.13.1 @@ -7711,9 +7307,9 @@ snapshots: '@aws-sdk/eventstream-handler-node@3.972.10': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@smithy/eventstream-codec': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/middleware-bucket-endpoint@3.972.6': @@ -7728,9 +7324,9 @@ snapshots: '@aws-sdk/middleware-eventstream@3.972.7': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/middleware-expect-continue@3.972.6': @@ -7764,9 +7360,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-host-header@3.972.7': + '@aws-sdk/middleware-host-header@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 tslib: 2.8.1 @@ -7783,9 +7379,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-logger@3.972.7': + '@aws-sdk/middleware-logger@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@smithy/types': 4.13.1 tslib: 2.8.1 @@ -7797,9 +7393,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-recursion-detection@3.972.7': + '@aws-sdk/middleware-recursion-detection@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@aws/lambda-invoke-store': 0.2.4 '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 @@ -7838,22 +7434,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-user-agent@3.972.19': + '@aws-sdk/middleware-user-agent@3.972.21': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@smithy/core': 3.23.9 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-retry': 4.2.11 - tslib: 2.8.1 - - '@aws-sdk/middleware-user-agent@3.972.20': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 '@smithy/core': 3.23.11 '@smithy/protocol-http': 5.3.12 '@smithy/types': 4.13.1 @@ -7862,19 +7447,62 @@ snapshots: '@aws-sdk/middleware-websocket@3.972.12': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@aws-sdk/util-format-url': 3.972.7 '@smithy/eventstream-codec': 4.2.11 '@smithy/eventstream-serde-browser': 4.2.11 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/protocol-http': 5.3.11 - '@smithy/signature-v4': 5.3.11 - '@smithy/types': 4.13.0 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/protocol-http': 5.3.12 + '@smithy/signature-v4': 5.3.12 + '@smithy/types': 4.13.1 '@smithy/util-base64': 4.3.2 '@smithy/util-hex-encoding': 4.2.2 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 + '@aws-sdk/nested-clients@3.996.10': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/region-config-resolver': 3.972.8 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 + '@smithy/config-resolver': 4.4.11 + '@smithy/core': 3.23.11 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/hash-node': 4.2.12 + '@smithy/invalid-dependency': 4.2.12 + '@smithy/middleware-content-length': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-retry': 4.4.42 + '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-stack': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/node-http-handler': 4.4.16 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.41 + '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/nested-clients@3.996.3': dependencies: '@aws-crypto/sha256-browser': 5.2.0 @@ -7918,135 +7546,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/nested-clients@3.996.7': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.18 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.19 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.4 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/nested-clients@3.996.8': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.19 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.5 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/nested-clients@3.996.9': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.19 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.6 - '@smithy/config-resolver': 4.4.11 - '@smithy/core': 3.23.11 - '@smithy/fetch-http-handler': 5.3.15 - '@smithy/hash-node': 4.2.12 - '@smithy/invalid-dependency': 4.2.12 - '@smithy/middleware-content-length': 4.2.12 - '@smithy/middleware-endpoint': 4.4.25 - '@smithy/middleware-retry': 4.4.42 - '@smithy/middleware-serde': 4.2.14 - '@smithy/middleware-stack': 4.2.12 - '@smithy/node-config-provider': 4.3.12 - '@smithy/node-http-handler': 4.4.16 - '@smithy/protocol-http': 5.3.12 - '@smithy/smithy-client': 4.12.5 - '@smithy/types': 4.13.1 - '@smithy/url-parser': 4.2.12 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.41 - '@smithy/util-defaults-mode-node': 4.2.44 - '@smithy/util-endpoints': 3.3.3 - '@smithy/util-middleware': 4.2.12 - '@smithy/util-retry': 4.2.12 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/region-config-resolver@3.972.6': dependencies: '@aws-sdk/types': 3.973.4 @@ -8055,12 +7554,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/region-config-resolver@3.972.7': + '@aws-sdk/region-config-resolver@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/config-resolver': 4.4.10 - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/config-resolver': 4.4.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/s3-request-presigner@3.1000.0': @@ -8085,45 +7584,21 @@ snapshots: '@aws-sdk/token-providers@3.1004.0': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt - '@aws-sdk/token-providers@3.1005.0': + '@aws-sdk/token-providers@3.1009.0': dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/token-providers@3.1007.0': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/token-providers@3.1008.0': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.9 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 '@smithy/property-provider': 4.2.12 '@smithy/shared-ini-file-loader': 4.4.7 '@smithy/types': 4.13.1 @@ -8153,6 +7628,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/types@3.973.6': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + '@aws-sdk/util-arn-parser@3.972.2': dependencies: tslib: 2.8.1 @@ -8165,9 +7645,9 @@ snapshots: '@smithy/util-endpoints': 3.3.1 tslib: 2.8.1 - '@aws-sdk/util-endpoints@3.996.4': + '@aws-sdk/util-endpoints@3.996.5': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@smithy/types': 4.13.1 '@smithy/url-parser': 4.2.12 '@smithy/util-endpoints': 3.3.3 @@ -8182,9 +7662,9 @@ snapshots: '@aws-sdk/util-format-url@3.972.7': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/querystring-builder': 4.2.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/util-locate-window@3.965.4': @@ -8202,9 +7682,9 @@ snapshots: bowser: 2.14.1 tslib: 2.8.1 - '@aws-sdk/util-user-agent-browser@3.972.7': + '@aws-sdk/util-user-agent-browser@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@smithy/types': 4.13.1 bowser: 2.14.1 tslib: 2.8.1 @@ -8217,34 +7697,18 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/util-user-agent-node@3.973.4': + '@aws-sdk/util-user-agent-node@3.973.7': dependencies: - '@aws-sdk/middleware-user-agent': 3.972.19 - '@aws-sdk/types': 3.973.5 - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@aws-sdk/util-user-agent-node@3.973.5': - dependencies: - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/types': 3.973.5 - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@aws-sdk/util-user-agent-node@3.973.6': - dependencies: - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/types': 3.973.6 '@smithy/node-config-provider': 4.3.12 '@smithy/types': 4.13.1 '@smithy/util-config-provider': 4.2.2 tslib: 2.8.1 - '@aws-sdk/xml-builder@3.972.10': + '@aws-sdk/xml-builder@3.972.11': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 fast-xml-parser: 5.3.8 tslib: 2.8.1 @@ -8645,12 +8109,14 @@ snapshots: optionalDependencies: '@noble/hashes': 2.0.1 - '@google/genai@1.44.0': + '@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))': dependencies: google-auth-library: 10.6.1 p-retry: 4.6.2 protobufjs: 7.5.4 ws: 8.19.0 + optionalDependencies: + '@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6) transitivePeerDependencies: - bufferutil - supports-color @@ -8698,7 +8164,6 @@ snapshots: '@hono/node-server@1.19.10(hono@4.12.7)': dependencies: hono: 4.12.7 - optional: true '@huggingface/jinja@0.5.5': {} @@ -9025,9 +8490,9 @@ snapshots: std-env: 3.10.0 yoctocolors: 2.1.2 - '@mariozechner/pi-agent-core@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-agent-core@0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -9037,11 +8502,11 @@ snapshots: - ws - zod - '@mariozechner/pi-ai@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-ai@0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) '@aws-sdk/client-bedrock-runtime': 3.1004.0 - '@google/genai': 1.44.0 + '@google/genai': 1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) '@mistralai/mistralai': 1.14.1 '@sinclair/typebox': 0.34.48 ajv: 8.18.0 @@ -9050,7 +8515,7 @@ snapshots: openai: 6.26.0(ws@8.19.0)(zod@4.3.6) partial-json: 0.1.7 proxy-agent: 6.5.0 - undici: 7.24.0 + undici: 7.24.1 zod-to-json-schema: 3.25.1(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' @@ -9061,18 +8526,18 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-coding-agent@0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.57.1 + '@mariozechner/pi-agent-core': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.58.0 '@silvia-odwyer/photon-node': 0.3.4 chalk: 5.6.2 cli-highlight: 2.1.11 diff: 8.0.3 extract-zip: 2.0.1 - file-type: 21.3.1 + file-type: 21.3.2 glob: 13.0.6 hosted-git-info: 9.0.2 ignore: 7.0.5 @@ -9080,7 +8545,7 @@ snapshots: minimatch: 10.2.4 proper-lockfile: 4.1.2 strip-ansi: 7.2.0 - undici: 7.24.0 + undici: 7.24.1 yaml: 2.8.2 optionalDependencies: '@mariozechner/clipboard': 0.3.2 @@ -9093,7 +8558,7 @@ snapshots: - ws - zod - '@mariozechner/pi-tui@0.57.1': + '@mariozechner/pi-tui@0.58.0': dependencies: '@types/mime-types': 2.1.4 chalk: 5.6.2 @@ -9141,6 +8606,28 @@ snapshots: - bufferutil - utf-8-validate + '@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)': + dependencies: + '@hono/node-server': 1.19.10(hono@4.12.7) + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + content-type: 1.0.5 + cors: 2.8.6 + cross-spawn: 7.0.6 + eventsource: 3.0.7 + eventsource-parser: 3.0.6 + express: 5.2.1 + express-rate-limit: 8.3.1(express@5.2.1) + hono: 4.12.7 + jose: 6.2.1 + json-schema-typed: 8.0.2 + pkce-challenge: 5.0.1 + raw-body: 3.0.2 + zod: 4.3.6 + zod-to-json-schema: 3.25.1(zod@4.3.6) + transitivePeerDependencies: + - supports-color + '@mozilla/readability@0.6.0': {} '@napi-rs/canvas-android-arm64@0.1.95': @@ -9781,10 +9268,9 @@ snapshots: '@oxlint/binding-win32-x64-msvc@1.55.0': optional: true - '@pierre/diffs@1.0.11(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@pierre/diffs@1.1.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: - '@shikijs/core': 3.23.0 - '@shikijs/engine-javascript': 3.23.0 + '@pierre/theme': 0.0.22 '@shikijs/transformers': 3.23.0 diff: 8.0.3 hast-util-to-html: 9.0.5 @@ -9793,6 +9279,8 @@ snapshots: react-dom: 19.2.4(react@19.2.4) shiki: 3.23.0 + '@pierre/theme@0.0.22': {} + '@pinojs/redact@0.4.0': {} '@pkgjs/parseargs@0.11.0': @@ -9978,7 +9466,7 @@ snapshots: '@slack/oauth': 3.0.4 '@slack/socket-mode': 2.0.5 '@slack/types': 2.20.0 - '@slack/web-api': 7.14.1 + '@slack/web-api': 7.15.0 '@types/express': 5.0.6 axios: 1.13.5 express: 5.2.1 @@ -9995,10 +9483,14 @@ snapshots: dependencies: '@types/node': 25.5.0 + '@slack/logger@4.0.1': + dependencies: + '@types/node': 25.5.0 + '@slack/oauth@3.0.4': dependencies: '@slack/logger': 4.0.0 - '@slack/web-api': 7.14.1 + '@slack/web-api': 7.15.0 '@types/jsonwebtoken': 9.0.10 '@types/node': 25.5.0 jsonwebtoken: 9.0.3 @@ -10008,7 +9500,7 @@ snapshots: '@slack/socket-mode@2.0.5': dependencies: '@slack/logger': 4.0.0 - '@slack/web-api': 7.14.1 + '@slack/web-api': 7.15.0 '@types/node': 25.5.0 '@types/ws': 8.18.1 eventemitter3: 5.0.4 @@ -10020,13 +9512,15 @@ snapshots: '@slack/types@2.20.0': {} - '@slack/web-api@7.14.1': + '@slack/types@2.20.1': {} + + '@slack/web-api@7.15.0': dependencies: - '@slack/logger': 4.0.0 - '@slack/types': 2.20.0 + '@slack/logger': 4.0.1 + '@slack/types': 2.20.1 '@types/node': 25.5.0 '@types/retry': 0.12.0 - axios: 1.13.5 + axios: 1.13.6 eventemitter3: 5.0.4 form-data: 2.5.4 is-electron: 2.2.2 @@ -10042,11 +9536,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/abort-controller@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/abort-controller@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10061,15 +9550,6 @@ snapshots: dependencies: tslib: 2.8.1 - '@smithy/config-resolver@4.4.10': - dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-config-provider': 4.2.2 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - tslib: 2.8.1 - '@smithy/config-resolver@4.4.11': dependencies: '@smithy/node-config-provider': 4.3.12 @@ -10114,19 +9594,6 @@ snapshots: '@smithy/uuid': 1.1.1 tslib: 2.8.1 - '@smithy/core@3.23.9': - dependencies: - '@smithy/middleware-serde': 4.2.12 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-stream': 4.5.17 - '@smithy/util-utf8': 4.2.2 - '@smithy/uuid': 1.1.2 - tslib: 2.8.1 - '@smithy/credential-provider-imds@4.2.10': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -10135,14 +9602,6 @@ snapshots: '@smithy/url-parser': 4.2.10 tslib: 2.8.1 - '@smithy/credential-provider-imds@4.2.11': - dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/property-provider': 4.2.11 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - tslib: 2.8.1 - '@smithy/credential-provider-imds@4.2.12': dependencies: '@smithy/node-config-provider': 4.3.12 @@ -10161,7 +9620,7 @@ snapshots: '@smithy/eventstream-codec@4.2.11': dependencies: '@aws-crypto/crc32': 5.2.0 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 '@smithy/util-hex-encoding': 4.2.2 tslib: 2.8.1 @@ -10174,7 +9633,7 @@ snapshots: '@smithy/eventstream-serde-browser@4.2.11': dependencies: '@smithy/eventstream-serde-universal': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/eventstream-serde-config-resolver@4.3.10': @@ -10184,7 +9643,7 @@ snapshots: '@smithy/eventstream-serde-config-resolver@4.3.11': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/eventstream-serde-node@4.2.10': @@ -10196,7 +9655,7 @@ snapshots: '@smithy/eventstream-serde-node@4.2.11': dependencies: '@smithy/eventstream-serde-universal': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/eventstream-serde-universal@4.2.10': @@ -10208,7 +9667,7 @@ snapshots: '@smithy/eventstream-serde-universal@4.2.11': dependencies: '@smithy/eventstream-codec': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/fetch-http-handler@5.3.11': @@ -10219,14 +9678,6 @@ snapshots: '@smithy/util-base64': 4.3.1 tslib: 2.8.1 - '@smithy/fetch-http-handler@5.3.13': - dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - tslib: 2.8.1 - '@smithy/fetch-http-handler@5.3.15': dependencies: '@smithy/protocol-http': 5.3.12 @@ -10249,13 +9700,6 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/hash-node@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - '@smithy/util-buffer-from': 4.2.2 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - '@smithy/hash-node@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10274,11 +9718,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/invalid-dependency@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/invalid-dependency@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10308,12 +9747,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-content-length@4.2.11': - dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/middleware-content-length@4.2.12': dependencies: '@smithy/protocol-http': 5.3.12 @@ -10331,17 +9764,6 @@ snapshots: '@smithy/util-middleware': 4.2.10 tslib: 2.8.1 - '@smithy/middleware-endpoint@4.4.23': - dependencies: - '@smithy/core': 3.23.9 - '@smithy/middleware-serde': 4.2.12 - '@smithy/node-config-provider': 4.3.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-middleware': 4.2.11 - tslib: 2.8.1 - '@smithy/middleware-endpoint@4.4.25': dependencies: '@smithy/core': 3.23.11 @@ -10365,18 +9787,6 @@ snapshots: '@smithy/uuid': 1.1.1 tslib: 2.8.1 - '@smithy/middleware-retry@4.4.40': - dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/service-error-classification': 4.2.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/uuid': 1.1.2 - tslib: 2.8.1 - '@smithy/middleware-retry@4.4.42': dependencies: '@smithy/node-config-provider': 4.3.12 @@ -10395,12 +9805,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-serde@4.2.12': - dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/middleware-serde@4.2.14': dependencies: '@smithy/core': 3.23.11 @@ -10413,11 +9817,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-stack@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/middleware-stack@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10430,13 +9829,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/node-config-provider@4.3.11': - dependencies: - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/node-config-provider@4.3.12': dependencies: '@smithy/property-provider': 4.2.12 @@ -10452,14 +9844,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/node-http-handler@4.4.14': - dependencies: - '@smithy/abort-controller': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/node-http-handler@4.4.16': dependencies: '@smithy/abort-controller': 4.2.12 @@ -10473,11 +9857,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/property-provider@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/property-provider@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10488,11 +9867,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/protocol-http@5.3.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/protocol-http@5.3.12': dependencies: '@smithy/types': 4.13.1 @@ -10504,12 +9878,6 @@ snapshots: '@smithy/util-uri-escape': 4.2.1 tslib: 2.8.1 - '@smithy/querystring-builder@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - '@smithy/util-uri-escape': 4.2.2 - tslib: 2.8.1 - '@smithy/querystring-builder@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10521,11 +9889,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/querystring-parser@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/querystring-parser@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10535,10 +9898,6 @@ snapshots: dependencies: '@smithy/types': 4.13.0 - '@smithy/service-error-classification@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - '@smithy/service-error-classification@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10548,11 +9907,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/shared-ini-file-loader@4.4.6': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/shared-ini-file-loader@4.4.7': dependencies: '@smithy/types': 4.13.1 @@ -10569,17 +9923,6 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/signature-v4@5.3.11': - dependencies: - '@smithy/is-array-buffer': 4.2.2 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-hex-encoding': 4.2.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-uri-escape': 4.2.2 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - '@smithy/signature-v4@5.3.12': dependencies: '@smithy/is-array-buffer': 4.2.2 @@ -10601,16 +9944,6 @@ snapshots: '@smithy/util-stream': 4.5.15 tslib: 2.8.1 - '@smithy/smithy-client@4.12.3': - dependencies: - '@smithy/core': 3.23.9 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-stack': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-stream': 4.5.17 - tslib: 2.8.1 - '@smithy/smithy-client@4.12.5': dependencies: '@smithy/core': 3.23.11 @@ -10635,12 +9968,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/url-parser@4.2.11': - dependencies: - '@smithy/querystring-parser': 4.2.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/url-parser@4.2.12': dependencies: '@smithy/querystring-parser': 4.2.12 @@ -10705,13 +10032,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-defaults-mode-browser@4.3.39': - dependencies: - '@smithy/property-provider': 4.2.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/util-defaults-mode-browser@4.3.41': dependencies: '@smithy/property-provider': 4.2.12 @@ -10729,16 +10049,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-defaults-mode-node@4.2.42': - dependencies: - '@smithy/config-resolver': 4.4.10 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/property-provider': 4.2.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/util-defaults-mode-node@4.2.44': dependencies: '@smithy/config-resolver': 4.4.11 @@ -10755,12 +10065,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-endpoints@3.3.2': - dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/util-endpoints@3.3.3': dependencies: '@smithy/node-config-provider': 4.3.12 @@ -10780,11 +10084,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-middleware@4.2.11': - dependencies: - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/util-middleware@4.2.12': dependencies: '@smithy/types': 4.13.1 @@ -10796,12 +10095,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-retry@4.2.11': - dependencies: - '@smithy/service-error-classification': 4.2.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@smithy/util-retry@4.2.12': dependencies: '@smithy/service-error-classification': 4.2.12 @@ -10819,17 +10112,6 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/util-stream@4.5.17': - dependencies: - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/node-http-handler': 4.4.14 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - '@smithy/util-buffer-from': 4.2.2 - '@smithy/util-hex-encoding': 4.2.2 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - '@smithy/util-stream@4.5.19': dependencies: '@smithy/fetch-http-handler': 5.3.15 @@ -11219,36 +10501,36 @@ snapshots: '@types/node': 25.5.0 optional: true - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260312.1': + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260312.1': + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260312.1': + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-linux-arm@7.0.0-dev.20260312.1': + '@typescript/native-preview-linux-arm@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-linux-x64@7.0.0-dev.20260312.1': + '@typescript/native-preview-linux-x64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260312.1': + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-win32-x64@7.0.0-dev.20260312.1': + '@typescript/native-preview-win32-x64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview@7.0.0-dev.20260312.1': + '@typescript/native-preview@7.0.0-dev.20260313.1': optionalDependencies: - '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260312.1 - '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260312.1 - '@typescript/native-preview-linux-arm': 7.0.0-dev.20260312.1 - '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260312.1 - '@typescript/native-preview-linux-x64': 7.0.0-dev.20260312.1 - '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260312.1 - '@typescript/native-preview-win32-x64': 7.0.0-dev.20260312.1 + '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-linux-arm': 7.0.0-dev.20260313.1 + '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-linux-x64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-win32-x64': 7.0.0-dev.20260313.1 '@typespec/ts-http-runtime@0.3.3': dependencies: @@ -11603,6 +10885,14 @@ snapshots: transitivePeerDependencies: - debug + axios@1.13.6: + dependencies: + follow-redirects: 1.15.11 + form-data: 2.5.4 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + b4a@1.8.0: {} babel-walk@3.0.0-canary-5: @@ -11916,6 +11206,11 @@ snapshots: core-util-is@1.0.3: {} + cors@2.8.6: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + croner@10.0.1: {} cross-spawn@7.0.6: @@ -12167,6 +11462,12 @@ snapshots: transitivePeerDependencies: - bare-abort-controller + eventsource-parser@3.0.6: {} + + eventsource@3.0.7: + dependencies: + eventsource-parser: 3.0.6 + execa@4.1.0: dependencies: cross-spawn: 7.0.6 @@ -12183,6 +11484,11 @@ snapshots: exponential-backoff@3.1.3: {} + express-rate-limit@8.3.1(express@5.2.1): + dependencies: + express: 5.2.1 + ip-address: 10.1.0 + express@4.22.1: dependencies: accepts: 1.3.8 @@ -12303,7 +11609,7 @@ snapshots: node-domexception: '@nolyfill/domexception@1.0.28' web-streams-polyfill: 3.3.3 - file-type@21.3.1: + file-type@21.3.2: dependencies: '@tokenizer/inflate': 0.4.1 strtok3: 10.3.4 @@ -12826,6 +12132,8 @@ snapshots: jose@4.15.9: {} + jose@6.2.1: {} + js-stringify@1.0.2: {} js-tokens@10.0.0: {} @@ -12868,7 +12176,7 @@ snapshots: saxes: 6.0.0 symbol-tree: 3.2.4 tough-cookie: 4.1.3 - undici: 7.24.0 + undici: 7.24.1 w3c-xmlserializer: 5.0.0 webidl-conversions: 8.0.1 whatwg-mimetype: 5.0.0 @@ -12893,6 +12201,8 @@ snapshots: json-schema-traverse@1.0.0: {} + json-schema-typed@8.0.2: {} + json-schema@0.4.0: {} json-stringify-safe@5.0.1: {} @@ -13279,7 +12589,7 @@ snapshots: '@tokenizer/token': 0.3.0 content-type: 1.0.5 debug: 4.4.3 - file-type: 21.3.1 + file-type: 21.3.2 media-typer: 1.1.0 strtok3: 10.3.4 token-types: 6.1.2 @@ -13492,86 +12802,11 @@ snapshots: ws: 8.19.0 zod: 4.3.6 - openai@6.27.0(ws@8.19.0)(zod@4.3.6): + openai@6.29.0(ws@8.19.0)(zod@4.3.6): optionalDependencies: ws: 8.19.0 zod: 4.3.6 - openclaw@2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)): - dependencies: - '@agentclientprotocol/sdk': 0.16.1(zod@4.3.6) - '@aws-sdk/client-bedrock': 3.1007.0 - '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) - '@clack/prompts': 1.1.0 - '@discordjs/voice': 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) - '@grammyjs/runner': 2.0.3(grammy@1.41.1) - '@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.1) - '@homebridge/ciao': 1.3.5 - '@larksuiteoapi/node-sdk': 1.59.0 - '@line/bot-sdk': 10.6.0 - '@lydell/node-pty': 1.2.0-beta.3 - '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.57.1 - '@mozilla/readability': 0.6.0 - '@napi-rs/canvas': 0.1.95 - '@sinclair/typebox': 0.34.48 - '@slack/bolt': 4.6.0(@types/express@5.0.6) - '@slack/web-api': 7.14.1 - '@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) - ajv: 8.18.0 - chalk: 5.6.2 - chokidar: 5.0.0 - cli-highlight: 2.1.11 - commander: 14.0.3 - croner: 10.0.1 - discord-api-types: 0.38.42 - dotenv: 17.3.1 - express: 5.2.1 - file-type: 21.3.1 - grammy: 1.41.1 - hono: 4.12.7 - https-proxy-agent: 8.0.0 - ipaddr.js: 2.3.0 - jiti: 2.6.1 - json5: 2.2.3 - jszip: 3.10.1 - linkedom: 0.18.12 - long: 5.3.2 - markdown-it: 14.1.1 - node-edge-tts: 1.2.10 - node-llama-cpp: 3.16.2(typescript@5.9.3) - opusscript: 0.1.1 - osc-progress: 0.3.0 - pdfjs-dist: 5.5.207 - playwright-core: 1.58.2 - qrcode-terminal: 0.12.0 - sharp: 0.34.5 - sqlite-vec: 0.1.7-alpha.2 - tar: 7.5.11 - tslog: 4.10.2 - undici: 7.22.0 - ws: 8.19.0 - yaml: 2.8.2 - zod: 4.3.6 - transitivePeerDependencies: - - '@discordjs/opus' - - '@modelcontextprotocol/sdk' - - '@types/express' - - audio-decode - - aws-crt - - bufferutil - - canvas - - debug - - encoding - - ffmpeg-static - - jimp - - link-preview-js - - node-opus - - supports-color - - utf-8-validate - opus-decoder@0.7.11: dependencies: '@wasm-audio-decoders/common': 9.0.7 @@ -13784,6 +13019,8 @@ snapshots: sonic-boom: 4.2.1 thread-stream: 3.1.0 + pkce-challenge@5.0.1: {} + playwright-core@1.58.2: {} playwright@1.58.2: @@ -14106,7 +13343,7 @@ snapshots: dependencies: glob: 10.5.0 - rolldown-plugin-dts@0.22.5(@typescript/native-preview@7.0.0-dev.20260312.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3): + rolldown-plugin-dts@0.22.5(@typescript/native-preview@7.0.0-dev.20260313.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3): dependencies: '@babel/generator': 8.0.0-rc.2 '@babel/helper-validator-identifier': 8.0.0-rc.2 @@ -14119,7 +13356,7 @@ snapshots: obug: 2.1.1 rolldown: 1.0.0-rc.9 optionalDependencies: - '@typescript/native-preview': 7.0.0-dev.20260312.1 + '@typescript/native-preview': 7.0.0-dev.20260313.1 typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver @@ -14645,7 +13882,7 @@ snapshots: ts-algebra@2.0.0: {} - tsdown@0.21.2(@typescript/native-preview@7.0.0-dev.20260312.1)(typescript@5.9.3): + tsdown@0.21.2(@typescript/native-preview@7.0.0-dev.20260313.1)(typescript@5.9.3): dependencies: ansis: 4.2.0 cac: 7.0.0 @@ -14656,7 +13893,7 @@ snapshots: obug: 2.1.1 picomatch: 4.0.3 rolldown: 1.0.0-rc.9 - rolldown-plugin-dts: 0.22.5(@typescript/native-preview@7.0.0-dev.20260312.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3) + rolldown-plugin-dts: 0.22.5(@typescript/native-preview@7.0.0-dev.20260313.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3) semver: 7.7.4 tinyexec: 1.0.2 tinyglobby: 0.2.15 @@ -14725,9 +13962,7 @@ snapshots: undici-types@7.18.2: {} - undici@7.22.0: {} - - undici@7.24.0: {} + undici@7.24.1: {} unist-util-is@6.0.1: dependencies: diff --git a/scripts/ci-changed-scope.mjs b/scripts/ci-changed-scope.mjs index a4018b30a2c..c5ed28319b1 100644 --- a/scripts/ci-changed-scope.mjs +++ b/scripts/ci-changed-scope.mjs @@ -5,6 +5,7 @@ import { appendFileSync } from "node:fs"; const DOCS_PATH_RE = /^(docs\/|.*\.mdx?$)/; const SKILLS_PYTHON_SCOPE_RE = /^skills\//; +const CI_WORKFLOW_SCOPE_RE = /^\.github\/workflows\/ci\.yml$/; const MACOS_PROTOCOL_GEN_RE = /^(apps\/macos\/Sources\/OpenClawProtocol\/|apps\/shared\/OpenClawKit\/Sources\/OpenClawProtocol\/)/; const MACOS_NATIVE_RE = /^(apps\/macos\/|apps\/ios\/|apps\/shared\/|Swabble\/)/; @@ -55,6 +56,12 @@ export function detectChangedScope(changedPaths) { runSkillsPython = true; } + if (CI_WORKFLOW_SCOPE_RE.test(path)) { + runMacos = true; + runAndroid = true; + runSkillsPython = true; + } + if (!MACOS_PROTOCOL_GEN_RE.test(path) && MACOS_NATIVE_RE.test(path)) { runMacos = true; } diff --git a/scripts/docker/cleanup-smoke/Dockerfile b/scripts/docker/cleanup-smoke/Dockerfile index 19b89f3ac62..07a2334aa41 100644 --- a/scripts/docker/cleanup-smoke/Dockerfile +++ b/scripts/docker/cleanup-smoke/Dockerfile @@ -5,6 +5,7 @@ FROM node:24-bookworm-slim@sha256:b4687aef2571c632a1953695ce4d61d6462a7eda471fe6 RUN --mount=type=cache,id=openclaw-cleanup-smoke-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-cleanup-smoke-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/docker/install-sh-e2e/Dockerfile b/scripts/docker/install-sh-e2e/Dockerfile index 539f18d295d..e8069bf1e77 100644 --- a/scripts/docker/install-sh-e2e/Dockerfile +++ b/scripts/docker/install-sh-e2e/Dockerfile @@ -5,6 +5,7 @@ FROM node:24-bookworm-slim@sha256:b4687aef2571c632a1953695ce4d61d6462a7eda471fe6 RUN --mount=type=cache,id=openclaw-install-sh-e2e-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-install-sh-e2e-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/docker/install-sh-nonroot/Dockerfile b/scripts/docker/install-sh-nonroot/Dockerfile index d0c085d9f69..8e29715dbfb 100644 --- a/scripts/docker/install-sh-nonroot/Dockerfile +++ b/scripts/docker/install-sh-nonroot/Dockerfile @@ -11,6 +11,7 @@ RUN --mount=type=cache,id=openclaw-install-sh-nonroot-apt-cache,target=/var/cach if [ "${attempt}" -eq 3 ]; then exit 1; fi; \ sleep 3; \ done; \ + DEBIAN_FRONTEND=noninteractive apt-get -o Acquire::Retries=3 upgrade -y --no-install-recommends; \ apt-get -o Acquire::Retries=3 install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/docker/install-sh-smoke/Dockerfile b/scripts/docker/install-sh-smoke/Dockerfile index 899af551aeb..ee37a24d6ce 100644 --- a/scripts/docker/install-sh-smoke/Dockerfile +++ b/scripts/docker/install-sh-smoke/Dockerfile @@ -11,6 +11,7 @@ RUN --mount=type=cache,id=openclaw-install-sh-smoke-apt-cache,target=/var/cache/ if [ "${attempt}" -eq 3 ]; then exit 1; fi; \ sleep 3; \ done; \ + DEBIAN_FRONTEND=noninteractive apt-get -o Acquire::Retries=3 upgrade -y --no-install-recommends; \ apt-get -o Acquire::Retries=3 install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/e2e/parallels-linux-smoke.sh b/scripts/e2e/parallels-linux-smoke.sh new file mode 100644 index 00000000000..dfed00bf89d --- /dev/null +++ b/scripts/e2e/parallels-linux-smoke.sh @@ -0,0 +1,613 @@ +#!/usr/bin/env bash +set -euo pipefail + +VM_NAME="Ubuntu 24.04.3 ARM64" +SNAPSHOT_HINT="fresh" +MODE="both" +OPENAI_API_KEY_ENV="OPENAI_API_KEY" +INSTALL_URL="https://openclaw.ai/install.sh" +HOST_PORT="18427" +HOST_PORT_EXPLICIT=0 +HOST_IP="" +LATEST_VERSION="" +JSON_OUTPUT=0 +KEEP_SERVER=0 + +MAIN_TGZ_DIR="$(mktemp -d)" +MAIN_TGZ_PATH="" +SERVER_PID="" +RUN_DIR="$(mktemp -d /tmp/openclaw-parallels-linux.XXXXXX)" +BUILD_LOCK_DIR="${TMPDIR:-/tmp}/openclaw-parallels-build.lock" + +TIMEOUT_SNAPSHOT_S=180 +TIMEOUT_BOOTSTRAP_S=600 +TIMEOUT_INSTALL_S=1200 +TIMEOUT_VERIFY_S=90 +TIMEOUT_ONBOARD_S=180 +TIMEOUT_AGENT_S=180 + +FRESH_MAIN_STATUS="skip" +FRESH_MAIN_VERSION="skip" +FRESH_GATEWAY_STATUS="skip" +FRESH_AGENT_STATUS="skip" +UPGRADE_STATUS="skip" +LATEST_INSTALLED_VERSION="skip" +UPGRADE_MAIN_VERSION="skip" +UPGRADE_GATEWAY_STATUS="skip" +UPGRADE_AGENT_STATUS="skip" +DAEMON_STATUS="systemd-user-unavailable" + +say() { + printf '==> %s\n' "$*" +} + +warn() { + printf 'warn: %s\n' "$*" >&2 +} + +die() { + printf 'error: %s\n' "$*" >&2 + exit 1 +} + +cleanup() { + if [[ -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + fi + rm -rf "$MAIN_TGZ_DIR" +} + +trap cleanup EXIT + +usage() { + cat <<'EOF' +Usage: bash scripts/e2e/parallels-linux-smoke.sh [options] + +Options: + --vm Parallels VM name. Default: "Ubuntu 24.04.3 ARM64" + --snapshot-hint Snapshot name substring/fuzzy match. Default: "fresh" + --mode + --openai-api-key-env Host env var name for OpenAI API key. Default: OPENAI_API_KEY + --install-url Installer URL for latest release. Default: https://openclaw.ai/install.sh + --host-port Host HTTP port for current-main tgz. Default: 18427 + --host-ip Override Parallels host IP. + --latest-version Override npm latest version lookup. + --keep-server Leave temp host HTTP server running. + --json Print machine-readable JSON summary. + -h, --help Show help. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --vm) + VM_NAME="$2" + shift 2 + ;; + --snapshot-hint) + SNAPSHOT_HINT="$2" + shift 2 + ;; + --mode) + MODE="$2" + shift 2 + ;; + --openai-api-key-env) + OPENAI_API_KEY_ENV="$2" + shift 2 + ;; + --install-url) + INSTALL_URL="$2" + shift 2 + ;; + --host-port) + HOST_PORT="$2" + HOST_PORT_EXPLICIT=1 + shift 2 + ;; + --host-ip) + HOST_IP="$2" + shift 2 + ;; + --latest-version) + LATEST_VERSION="$2" + shift 2 + ;; + --keep-server) + KEEP_SERVER=1 + shift + ;; + --json) + JSON_OUTPUT=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown arg: $1" + ;; + esac +done + +case "$MODE" in + fresh|upgrade|both) ;; + *) + die "invalid --mode: $MODE" + ;; +esac + +OPENAI_API_KEY_VALUE="${!OPENAI_API_KEY_ENV:-}" +[[ -n "$OPENAI_API_KEY_VALUE" ]] || die "$OPENAI_API_KEY_ENV is required" + +resolve_snapshot_id() { + local json hint + json="$(prlctl snapshot-list "$VM_NAME" --json)" + hint="$SNAPSHOT_HINT" + SNAPSHOT_JSON="$json" SNAPSHOT_HINT="$hint" python3 - <<'PY' +import difflib +import json +import os +import sys + +payload = json.loads(os.environ["SNAPSHOT_JSON"]) +hint = os.environ["SNAPSHOT_HINT"].strip().lower() +best_id = None +best_score = -1.0 +for snapshot_id, meta in payload.items(): + name = str(meta.get("name", "")).strip() + lowered = name.lower() + score = 0.0 + if lowered == hint: + score = 10.0 + elif hint and hint in lowered: + score = 5.0 + len(hint) / max(len(lowered), 1) + else: + score = difflib.SequenceMatcher(None, hint, lowered).ratio() + if score > best_score: + best_score = score + best_id = snapshot_id +if not best_id: + sys.exit("no snapshot matched") +print(best_id) +PY +} + +resolve_host_ip() { + if [[ -n "$HOST_IP" ]]; then + printf '%s\n' "$HOST_IP" + return + fi + local detected + detected="$(ifconfig | awk '/inet 10\.211\./ { print $2; exit }')" + [[ -n "$detected" ]] || die "failed to detect Parallels host IP; pass --host-ip" + printf '%s\n' "$detected" +} + +is_host_port_free() { + local port="$1" + python3 - "$port" <<'PY' +import socket +import sys + +sock = socket.socket() +try: + sock.bind(("0.0.0.0", int(sys.argv[1]))) +except OSError: + raise SystemExit(1) +finally: + sock.close() +PY +} + +allocate_host_port() { + python3 - <<'PY' +import socket + +sock = socket.socket() +sock.bind(("0.0.0.0", 0)) +print(sock.getsockname()[1]) +sock.close() +PY +} + +resolve_host_port() { + if is_host_port_free "$HOST_PORT"; then + printf '%s\n' "$HOST_PORT" + return + fi + if [[ "$HOST_PORT_EXPLICIT" -eq 1 ]]; then + die "host port $HOST_PORT already in use" + fi + HOST_PORT="$(allocate_host_port)" + warn "host port 18427 busy; using $HOST_PORT" + printf '%s\n' "$HOST_PORT" +} + +guest_exec() { + prlctl exec "$VM_NAME" "$@" +} + +restore_snapshot() { + local snapshot_id="$1" + say "Restore snapshot $SNAPSHOT_HINT ($snapshot_id)" + prlctl snapshot-switch "$VM_NAME" --id "$snapshot_id" >/dev/null +} + +bootstrap_guest() { + guest_exec apt-get -o Acquire::Check-Date=false update + guest_exec apt-get install -y curl ca-certificates +} + +resolve_latest_version() { + if [[ -n "$LATEST_VERSION" ]]; then + printf '%s\n' "$LATEST_VERSION" + return + fi + npm view openclaw version --userconfig "$(mktemp)" +} + +current_build_commit() { + python3 - <<'PY' +import json +import pathlib + +path = pathlib.Path("dist/build-info.json") +if not path.exists(): + print("") +else: + print(json.loads(path.read_text()).get("commit", "")) +PY +} + +acquire_build_lock() { + local owner_pid="" + while ! mkdir "$BUILD_LOCK_DIR" 2>/dev/null; do + if [[ -f "$BUILD_LOCK_DIR/pid" ]]; then + owner_pid="$(cat "$BUILD_LOCK_DIR/pid" 2>/dev/null || true)" + if [[ -n "$owner_pid" ]] && ! kill -0 "$owner_pid" >/dev/null 2>&1; then + warn "Removing stale Parallels build lock" + rm -rf "$BUILD_LOCK_DIR" + continue + fi + fi + sleep 1 + done + printf '%s\n' "$$" >"$BUILD_LOCK_DIR/pid" +} + +release_build_lock() { + if [[ -d "$BUILD_LOCK_DIR" ]]; then + rm -rf "$BUILD_LOCK_DIR" + fi +} + +ensure_current_build() { + local head build_commit + acquire_build_lock + head="$(git rev-parse HEAD)" + build_commit="$(current_build_commit)" + if [[ "$build_commit" == "$head" ]]; then + release_build_lock + return + fi + say "Build dist for current head" + pnpm build + build_commit="$(current_build_commit)" + release_build_lock + [[ "$build_commit" == "$head" ]] || die "dist/build-info.json still does not match HEAD after build" +} + +pack_main_tgz() { + say "Pack current main tgz" + ensure_current_build + local short_head pkg + short_head="$(git rev-parse --short HEAD)" + pkg="$( + npm pack --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/openclaw-main-$short_head.tgz" + cp "$MAIN_TGZ_DIR/$pkg" "$MAIN_TGZ_PATH" + say "Packed $MAIN_TGZ_PATH" + tar -xOf "$MAIN_TGZ_PATH" package/dist/build-info.json +} + +start_server() { + local host_ip="$1" + local artifact probe_url attempt + artifact="$(basename "$MAIN_TGZ_PATH")" + attempt=0 + while :; do + attempt=$((attempt + 1)) + say "Serve current main tgz on $host_ip:$HOST_PORT" + ( + cd "$MAIN_TGZ_DIR" + exec python3 -m http.server "$HOST_PORT" --bind 0.0.0.0 + ) >/tmp/openclaw-parallels-linux-http.log 2>&1 & + SERVER_PID=$! + sleep 1 + probe_url="http://127.0.0.1:$HOST_PORT/$artifact" + if kill -0 "$SERVER_PID" >/dev/null 2>&1 && curl -fsSI "$probe_url" >/dev/null 2>&1; then + return 0 + fi + kill "$SERVER_PID" >/dev/null 2>&1 || true + wait "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" + if [[ "$HOST_PORT_EXPLICIT" -eq 1 || $attempt -ge 3 ]]; then + die "failed to start reachable host HTTP server on port $HOST_PORT" + fi + HOST_PORT="$(allocate_host_port)" + warn "retrying host HTTP server on port $HOST_PORT" + done +} + +install_latest_release() { + guest_exec curl -fsSL "$INSTALL_URL" -o /tmp/openclaw-install.sh + guest_exec /usr/bin/env OPENCLAW_NO_ONBOARD=1 bash /tmp/openclaw-install.sh --no-onboard + guest_exec openclaw --version +} + +install_main_tgz() { + local host_ip="$1" + local temp_name="$2" + local tgz_url="http://$host_ip:$HOST_PORT/$(basename "$MAIN_TGZ_PATH")" + guest_exec curl -fsSL "$tgz_url" -o "/tmp/$temp_name" + guest_exec npm install -g "/tmp/$temp_name" --no-fund --no-audit + guest_exec openclaw --version +} + +verify_version_contains() { + local needle="$1" + local version + version="$(guest_exec openclaw --version)" + printf '%s\n' "$version" + case "$version" in + *"$needle"*) ;; + *) + echo "version mismatch: expected substring $needle" >&2 + return 1 + ;; + esac +} + +run_ref_onboard() { + guest_exec /usr/bin/env "OPENAI_API_KEY=$OPENAI_API_KEY_VALUE" openclaw onboard \ + --non-interactive \ + --mode local \ + --auth-choice openai-api-key \ + --secret-input-mode ref \ + --gateway-port 18789 \ + --gateway-bind loopback \ + --skip-skills \ + --skip-health \ + --accept-risk \ + --json +} + +verify_local_turn() { + guest_exec /usr/bin/env "OPENAI_API_KEY=$OPENAI_API_KEY_VALUE" openclaw agent \ + --local \ + --agent main \ + --message ping \ + --json +} + +phase_log_path() { + printf '%s/%s.log\n' "$RUN_DIR" "$1" +} + +extract_last_version() { + local log_path="$1" + python3 - "$log_path" <<'PY' +import pathlib +import re +import sys + +text = pathlib.Path(sys.argv[1]).read_text(errors="replace") +matches = re.findall(r"OpenClaw [^\r\n]+ \([0-9a-f]{7,}\)", text) +print(matches[-1] if matches else "") +PY +} + +show_log_excerpt() { + local log_path="$1" + warn "log tail: $log_path" + tail -n 80 "$log_path" >&2 || true +} + +phase_run() { + local phase_id="$1" + local timeout_s="$2" + shift 2 + + local log_path pid start rc timed_out + log_path="$(phase_log_path "$phase_id")" + say "$phase_id" + start=$SECONDS + timed_out=0 + + ( + "$@" + ) >"$log_path" 2>&1 & + pid=$! + + while kill -0 "$pid" >/dev/null 2>&1; do + if (( SECONDS - start >= timeout_s )); then + timed_out=1 + kill "$pid" >/dev/null 2>&1 || true + sleep 2 + kill -9 "$pid" >/dev/null 2>&1 || true + break + fi + sleep 1 + done + + set +e + wait "$pid" + rc=$? + set -e + + if (( timed_out )); then + warn "$phase_id timed out after ${timeout_s}s" + printf 'timeout after %ss\n' "$timeout_s" >>"$log_path" + show_log_excerpt "$log_path" + return 124 + fi + + if [[ $rc -ne 0 ]]; then + warn "$phase_id failed (rc=$rc)" + show_log_excerpt "$log_path" + return "$rc" + fi + + return 0 +} + +write_summary_json() { + local summary_path="$RUN_DIR/summary.json" + python3 - "$summary_path" <<'PY' +import json +import os +import sys + +summary = { + "vm": os.environ["SUMMARY_VM"], + "snapshotHint": os.environ["SUMMARY_SNAPSHOT_HINT"], + "snapshotId": os.environ["SUMMARY_SNAPSHOT_ID"], + "mode": os.environ["SUMMARY_MODE"], + "latestVersion": os.environ["SUMMARY_LATEST_VERSION"], + "currentHead": os.environ["SUMMARY_CURRENT_HEAD"], + "runDir": os.environ["SUMMARY_RUN_DIR"], + "daemon": os.environ["SUMMARY_DAEMON_STATUS"], + "freshMain": { + "status": os.environ["SUMMARY_FRESH_MAIN_STATUS"], + "version": os.environ["SUMMARY_FRESH_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_FRESH_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_FRESH_AGENT_STATUS"], + }, + "upgrade": { + "status": os.environ["SUMMARY_UPGRADE_STATUS"], + "latestVersionInstalled": os.environ["SUMMARY_LATEST_INSTALLED_VERSION"], + "mainVersion": os.environ["SUMMARY_UPGRADE_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_UPGRADE_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_UPGRADE_AGENT_STATUS"], + }, +} +with open(sys.argv[1], "w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2, sort_keys=True) +print(sys.argv[1]) +PY +} + +run_fresh_main_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "fresh.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "fresh.bootstrap-guest" "$TIMEOUT_BOOTSTRAP_S" bootstrap_guest + phase_run "fresh.install-latest-bootstrap" "$TIMEOUT_INSTALL_S" install_latest_release + phase_run "fresh.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-fresh.tgz" + FRESH_MAIN_VERSION="$(extract_last_version "$(phase_log_path fresh.install-main)")" + phase_run "fresh.verify-main-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$(git rev-parse --short=7 HEAD)" + phase_run "fresh.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + FRESH_GATEWAY_STATUS="skipped-no-detached-linux-gateway" + phase_run "fresh.first-local-agent-turn" "$TIMEOUT_AGENT_S" verify_local_turn + FRESH_AGENT_STATUS="pass" +} + +run_upgrade_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "upgrade.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "upgrade.bootstrap-guest" "$TIMEOUT_BOOTSTRAP_S" bootstrap_guest + phase_run "upgrade.install-latest" "$TIMEOUT_INSTALL_S" install_latest_release + LATEST_INSTALLED_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-latest)")" + phase_run "upgrade.verify-latest-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$LATEST_VERSION" + phase_run "upgrade.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-upgrade.tgz" + UPGRADE_MAIN_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-main)")" + phase_run "upgrade.verify-main-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$(git rev-parse --short=7 HEAD)" + phase_run "upgrade.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + UPGRADE_GATEWAY_STATUS="skipped-no-detached-linux-gateway" + phase_run "upgrade.first-local-agent-turn" "$TIMEOUT_AGENT_S" verify_local_turn + UPGRADE_AGENT_STATUS="pass" +} + +SNAPSHOT_ID="$(resolve_snapshot_id)" +LATEST_VERSION="$(resolve_latest_version)" +HOST_IP="$(resolve_host_ip)" +HOST_PORT="$(resolve_host_port)" + +say "VM: $VM_NAME" +say "Snapshot hint: $SNAPSHOT_HINT" +say "Latest npm version: $LATEST_VERSION" +say "Current head: $(git rev-parse --short HEAD)" +say "Run logs: $RUN_DIR" + +pack_main_tgz +start_server "$HOST_IP" + +if [[ "$MODE" == "fresh" || "$MODE" == "both" ]]; then + set +e + run_fresh_main_lane "$SNAPSHOT_ID" "$HOST_IP" + fresh_rc=$? + set -e + if [[ $fresh_rc -eq 0 ]]; then + FRESH_MAIN_STATUS="pass" + else + FRESH_MAIN_STATUS="fail" + fi +fi + +if [[ "$MODE" == "upgrade" || "$MODE" == "both" ]]; then + set +e + run_upgrade_lane "$SNAPSHOT_ID" "$HOST_IP" + upgrade_rc=$? + set -e + if [[ $upgrade_rc -eq 0 ]]; then + UPGRADE_STATUS="pass" + else + UPGRADE_STATUS="fail" + fi +fi + +if [[ "$KEEP_SERVER" -eq 0 && -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" +fi + +SUMMARY_JSON_PATH="$( + SUMMARY_VM="$VM_NAME" \ + SUMMARY_SNAPSHOT_HINT="$SNAPSHOT_HINT" \ + SUMMARY_SNAPSHOT_ID="$SNAPSHOT_ID" \ + SUMMARY_MODE="$MODE" \ + SUMMARY_LATEST_VERSION="$LATEST_VERSION" \ + SUMMARY_CURRENT_HEAD="$(git rev-parse --short HEAD)" \ + SUMMARY_RUN_DIR="$RUN_DIR" \ + SUMMARY_DAEMON_STATUS="$DAEMON_STATUS" \ + SUMMARY_FRESH_MAIN_STATUS="$FRESH_MAIN_STATUS" \ + SUMMARY_FRESH_MAIN_VERSION="$FRESH_MAIN_VERSION" \ + SUMMARY_FRESH_GATEWAY_STATUS="$FRESH_GATEWAY_STATUS" \ + SUMMARY_FRESH_AGENT_STATUS="$FRESH_AGENT_STATUS" \ + SUMMARY_UPGRADE_STATUS="$UPGRADE_STATUS" \ + SUMMARY_LATEST_INSTALLED_VERSION="$LATEST_INSTALLED_VERSION" \ + SUMMARY_UPGRADE_MAIN_VERSION="$UPGRADE_MAIN_VERSION" \ + SUMMARY_UPGRADE_GATEWAY_STATUS="$UPGRADE_GATEWAY_STATUS" \ + SUMMARY_UPGRADE_AGENT_STATUS="$UPGRADE_AGENT_STATUS" \ + write_summary_json +)" + +if [[ "$JSON_OUTPUT" -eq 1 ]]; then + cat "$SUMMARY_JSON_PATH" +else + printf '\nSummary:\n' + printf ' daemon: %s\n' "$DAEMON_STATUS" + printf ' fresh-main: %s (%s)\n' "$FRESH_MAIN_STATUS" "$FRESH_MAIN_VERSION" + printf ' latest->main: %s (%s)\n' "$UPGRADE_STATUS" "$UPGRADE_MAIN_VERSION" + printf ' logs: %s\n' "$RUN_DIR" + printf ' summary: %s\n' "$SUMMARY_JSON_PATH" +fi + +if [[ "$FRESH_MAIN_STATUS" == "fail" || "$UPGRADE_STATUS" == "fail" ]]; then + exit 1 +fi diff --git a/scripts/e2e/parallels-macos-smoke.sh b/scripts/e2e/parallels-macos-smoke.sh new file mode 100644 index 00000000000..4de2fb19ae3 --- /dev/null +++ b/scripts/e2e/parallels-macos-smoke.sh @@ -0,0 +1,752 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" + +VM_NAME="macOS Tahoe" +SNAPSHOT_HINT="macOS 26.3.1 fresh" +MODE="both" +OPENAI_API_KEY_ENV="OPENAI_API_KEY" +INSTALL_URL="https://openclaw.ai/install.sh" +HOST_PORT="18425" +HOST_PORT_EXPLICIT=0 +HOST_IP="" +LATEST_VERSION="" +KEEP_SERVER=0 +CHECK_LATEST_REF=1 +JSON_OUTPUT=0 +GUEST_OPENCLAW_BIN="/opt/homebrew/bin/openclaw" +GUEST_OPENCLAW_ENTRY="/opt/homebrew/lib/node_modules/openclaw/openclaw.mjs" +GUEST_NODE_BIN="/opt/homebrew/bin/node" +GUEST_NPM_BIN="/opt/homebrew/bin/npm" + +MAIN_TGZ_DIR="$(mktemp -d)" +MAIN_TGZ_PATH="" +SERVER_PID="" +RUN_DIR="$(mktemp -d /tmp/openclaw-parallels-smoke.XXXXXX)" +BUILD_LOCK_DIR="${TMPDIR:-/tmp}/openclaw-parallels-build.lock" + +TIMEOUT_INSTALL_S=900 +TIMEOUT_VERIFY_S=60 +TIMEOUT_ONBOARD_S=180 +TIMEOUT_GATEWAY_S=60 +TIMEOUT_AGENT_S=120 +TIMEOUT_PERMISSION_S=60 +TIMEOUT_SNAPSHOT_S=180 + +FRESH_MAIN_VERSION="skip" +LATEST_INSTALLED_VERSION="skip" +UPGRADE_MAIN_VERSION="skip" +FRESH_GATEWAY_STATUS="skip" +UPGRADE_GATEWAY_STATUS="skip" +FRESH_AGENT_STATUS="skip" +UPGRADE_AGENT_STATUS="skip" + +say() { + printf '==> %s\n' "$*" +} + +warn() { + printf 'warn: %s\n' "$*" >&2 +} + +die() { + printf 'error: %s\n' "$*" >&2 + exit 1 +} + +cleanup() { + if [[ -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + fi + rm -rf "$MAIN_TGZ_DIR" + if [[ "${KEEP_SERVER:-0}" -eq 0 ]]; then + : + fi +} + +trap cleanup EXIT + +shell_quote() { + local value="$1" + printf "'%s'" "$(printf '%s' "$value" | sed "s/'/'\"'\"'/g")" +} + +usage() { + cat <<'EOF' +Usage: bash scripts/e2e/parallels-macos-smoke.sh [options] + +Options: + --vm Parallels VM name. Default: "macOS Tahoe" + --snapshot-hint Snapshot name substring/fuzzy match. + Default: "macOS 26.3.1 fresh" + --mode + fresh = fresh snapshot -> current main tgz -> onboard smoke + upgrade = fresh snapshot -> latest release -> current main tgz -> onboard smoke + both = run both lanes + --openai-api-key-env Host env var name for OpenAI API key. + Default: OPENAI_API_KEY + --install-url Installer URL for latest release. Default: https://openclaw.ai/install.sh + --host-port Host HTTP port for current-main tgz. Default: 18425 + --host-ip Override Parallels host IP. + --latest-version Override npm latest version lookup. + --skip-latest-ref-check Skip the known latest-release ref-mode precheck in upgrade lane. + --keep-server Leave temp host HTTP server running. + --json Print machine-readable JSON summary. + -h, --help Show help. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --vm) + VM_NAME="$2" + shift 2 + ;; + --snapshot-hint) + SNAPSHOT_HINT="$2" + shift 2 + ;; + --mode) + MODE="$2" + shift 2 + ;; + --openai-api-key-env) + OPENAI_API_KEY_ENV="$2" + shift 2 + ;; + --install-url) + INSTALL_URL="$2" + shift 2 + ;; + --host-port) + HOST_PORT="$2" + HOST_PORT_EXPLICIT=1 + shift 2 + ;; + --host-ip) + HOST_IP="$2" + shift 2 + ;; + --latest-version) + LATEST_VERSION="$2" + shift 2 + ;; + --skip-latest-ref-check) + CHECK_LATEST_REF=0 + shift + ;; + --keep-server) + KEEP_SERVER=1 + shift + ;; + --json) + JSON_OUTPUT=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown arg: $1" + ;; + esac +done + +case "$MODE" in + fresh|upgrade|both) ;; + *) + die "invalid --mode: $MODE" + ;; +esac + +OPENAI_API_KEY_VALUE="${!OPENAI_API_KEY_ENV:-}" +[[ -n "$OPENAI_API_KEY_VALUE" ]] || die "$OPENAI_API_KEY_ENV is required" + +resolve_snapshot_id() { + local json hint + json="$(prlctl snapshot-list "$VM_NAME" --json)" + hint="$SNAPSHOT_HINT" + SNAPSHOT_JSON="$json" SNAPSHOT_HINT="$hint" python3 - <<'PY' +import difflib +import json +import os +import sys + +payload = json.loads(os.environ["SNAPSHOT_JSON"]) +hint = os.environ["SNAPSHOT_HINT"].strip().lower() +best_id = None +best_score = -1.0 +for snapshot_id, meta in payload.items(): + name = str(meta.get("name", "")).strip() + lowered = name.lower() + score = 0.0 + if lowered == hint: + score = 10.0 + elif hint and hint in lowered: + score = 5.0 + len(hint) / max(len(lowered), 1) + else: + score = difflib.SequenceMatcher(None, hint, lowered).ratio() + if score > best_score: + best_score = score + best_id = snapshot_id +if not best_id: + sys.exit("no snapshot matched") +print(best_id) +PY +} + +resolve_host_ip() { + if [[ -n "$HOST_IP" ]]; then + printf '%s\n' "$HOST_IP" + return + fi + + local detected + detected="$(ifconfig | awk '/inet 10\.211\./ { print $2; exit }')" + [[ -n "$detected" ]] || die "failed to detect Parallels host IP; pass --host-ip" + printf '%s\n' "$detected" +} + +is_host_port_free() { + local port="$1" + python3 - "$port" <<'PY' +import socket +import sys + +port = int(sys.argv[1]) +sock = socket.socket() +try: + sock.bind(("0.0.0.0", port)) +except OSError: + raise SystemExit(1) +finally: + sock.close() +PY +} + +allocate_host_port() { + python3 - <<'PY' +import socket + +sock = socket.socket() +sock.bind(("0.0.0.0", 0)) +print(sock.getsockname()[1]) +sock.close() +PY +} + +resolve_host_port() { + if is_host_port_free "$HOST_PORT"; then + printf '%s\n' "$HOST_PORT" + return + fi + if [[ "$HOST_PORT_EXPLICIT" -eq 1 ]]; then + die "host port $HOST_PORT already in use" + fi + HOST_PORT="$(allocate_host_port)" + warn "host port 18425 busy; using $HOST_PORT" + printf '%s\n' "$HOST_PORT" +} + +wait_for_current_user() { + local deadline + deadline=$((SECONDS + TIMEOUT_SNAPSHOT_S)) + while (( SECONDS < deadline )); do + if prlctl exec "$VM_NAME" --current-user whoami >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + return 1 +} + +guest_current_user_exec() { + prlctl exec "$VM_NAME" --current-user /usr/bin/env \ + PATH=/opt/homebrew/bin:/opt/homebrew/sbin:/usr/bin:/bin:/usr/sbin:/sbin \ + "$@" +} + +guest_script() { + local mode script + mode="$1" + script="$2" + PRL_GUEST_VM_NAME="$VM_NAME" PRL_GUEST_MODE="$mode" PRL_GUEST_SCRIPT="$script" /opt/homebrew/bin/expect <<'EOF' +log_user 1 +set timeout -1 +match_max 1048576 + +set vm $env(PRL_GUEST_VM_NAME) +set mode $env(PRL_GUEST_MODE) +set script $env(PRL_GUEST_SCRIPT) +set cmd [list prlctl enter $vm] +if {$mode eq "current-user"} { + lappend cmd --current-user +} + +spawn {*}$cmd +send -- "printf '__OPENCLAW_READY__\\n'\r" +expect "__OPENCLAW_READY__" +log_user 0 +send -- "export PS1='' PROMPT='' PROMPT2='' RPROMPT=''\r" +send -- "stty -echo\r" + +send -- "cat >/tmp/openclaw-prl.sh <<'__OPENCLAW_SCRIPT__'\r" +send -- $script +if {![string match "*\n" $script]} { + send -- "\r" +} +send -- "__OPENCLAW_SCRIPT__\r" +send -- "/bin/bash /tmp/openclaw-prl.sh; rc=\$?; rm -f /tmp/openclaw-prl.sh; printf '__OPENCLAW_RC__:%s\\n' \"\$rc\"; exit \"\$rc\"\r" +log_user 1 + +set rc 1 +expect { + -re {__OPENCLAW_RC__:(-?[0-9]+)} { + set rc $expect_out(1,string) + exp_continue + } + eof {} +} +catch wait result +exit $rc +EOF +} + +guest_current_user_sh() { + local script + script=$'set -eu\n' + script+=$'set -o pipefail\n' + script+=$'trap "" PIPE\n' + script+=$'umask 022\n' + script+=$'export PATH="/opt/homebrew/bin:/opt/homebrew/sbin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH:-}"\n' + script+=$'if [ -z "${HOME:-}" ]; then export HOME="/Users/$(id -un)"; fi\n' + script+=$'cd "$HOME"\n' + script+="$1" + guest_script current-user "$script" +} + +restore_snapshot() { + local snapshot_id="$1" + say "Restore snapshot $SNAPSHOT_HINT ($snapshot_id)" + prlctl snapshot-switch "$VM_NAME" --id "$snapshot_id" >/dev/null + wait_for_current_user || die "desktop user did not become ready in $VM_NAME" +} + +resolve_latest_version() { + if [[ -n "$LATEST_VERSION" ]]; then + printf '%s\n' "$LATEST_VERSION" + return + fi + npm view openclaw version --userconfig "$(mktemp)" +} + +install_latest_release() { + local install_url_q + install_url_q="$(shell_quote "$INSTALL_URL")" + guest_current_user_sh "$(cat <&2 + return 1 + ;; + esac +} + +pack_main_tgz() { + say "Pack current main tgz" + ensure_current_build + local short_head pkg + short_head="$(git rev-parse --short HEAD)" + pkg="$( + npm pack --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/openclaw-main-$short_head.tgz" + cp "$MAIN_TGZ_DIR/$pkg" "$MAIN_TGZ_PATH" + say "Packed $MAIN_TGZ_PATH" + tar -xOf "$MAIN_TGZ_PATH" package/dist/build-info.json +} + +current_build_commit() { + python3 - <<'PY' +import json +import pathlib + +path = pathlib.Path("dist/build-info.json") +if not path.exists(): + print("") +else: + print(json.loads(path.read_text()).get("commit", "")) +PY +} + +acquire_build_lock() { + local owner_pid="" + while ! mkdir "$BUILD_LOCK_DIR" 2>/dev/null; do + if [[ -f "$BUILD_LOCK_DIR/pid" ]]; then + owner_pid="$(cat "$BUILD_LOCK_DIR/pid" 2>/dev/null || true)" + if [[ -n "$owner_pid" ]] && ! kill -0 "$owner_pid" >/dev/null 2>&1; then + warn "Removing stale Parallels build lock" + rm -rf "$BUILD_LOCK_DIR" + continue + fi + fi + sleep 1 + done + printf '%s\n' "$$" >"$BUILD_LOCK_DIR/pid" +} + +release_build_lock() { + if [[ -d "$BUILD_LOCK_DIR" ]]; then + rm -rf "$BUILD_LOCK_DIR" + fi +} + +ensure_current_build() { + local head build_commit + acquire_build_lock + head="$(git rev-parse HEAD)" + build_commit="$(current_build_commit)" + if [[ "$build_commit" == "$head" ]]; then + release_build_lock + return + fi + say "Build dist for current head" + pnpm build + build_commit="$(current_build_commit)" + release_build_lock + [[ "$build_commit" == "$head" ]] || die "dist/build-info.json still does not match HEAD after build" +} + +start_server() { + local host_ip="$1" + say "Serve current main tgz on $host_ip:$HOST_PORT" + ( + cd "$MAIN_TGZ_DIR" + exec python3 -m http.server "$HOST_PORT" --bind 0.0.0.0 + ) >/tmp/openclaw-parallels-http.log 2>&1 & + SERVER_PID=$! + sleep 1 + kill -0 "$SERVER_PID" >/dev/null 2>&1 || die "failed to start host HTTP server" +} + +install_main_tgz() { + local host_ip="$1" + local temp_name="$2" + local tgz_url_q + tgz_url_q="$(shell_quote "http://$host_ip:$HOST_PORT/$(basename "$MAIN_TGZ_PATH")")" + guest_current_user_sh "$(cat <&2; exit 1; fi; }; check_path "\$root/openclaw"; check_path "\$root/openclaw/extensions"; if [ -d "\$root/openclaw/extensions" ]; then while IFS= read -r -d '' extension_dir; do check_path "\$extension_dir"; done < <(/usr/bin/find "\$root/openclaw/extensions" -mindepth 1 -maxdepth 1 -type d -print0); fi +EOF +)" + guest_current_user_exec /bin/bash -lc "$cmd" +} + +run_ref_onboard() { + guest_current_user_exec \ + /usr/bin/env "OPENAI_API_KEY=$OPENAI_API_KEY_VALUE" \ + "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" onboard \ + --non-interactive \ + --mode local \ + --auth-choice openai-api-key \ + --secret-input-mode ref \ + --gateway-port 18789 \ + --gateway-bind loopback \ + --install-daemon \ + --skip-skills \ + --accept-risk \ + --json +} + +verify_gateway() { + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --deep --require-rpc +} + +show_gateway_status_compat() { + if guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --help | grep -Fq -- "--require-rpc"; then + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --deep --require-rpc + return + fi + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --deep +} + +verify_turn() { + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" agent --agent main --message ping --json +} + +phase_log_path() { + printf '%s/%s.log\n' "$RUN_DIR" "$1" +} + +extract_last_version() { + local log_path="$1" + python3 - "$log_path" <<'PY' +import pathlib +import re +import sys + +text = pathlib.Path(sys.argv[1]).read_text(errors="replace") +matches = re.findall(r"OpenClaw [^\r\n]+ \([0-9a-f]{7,}\)", text) +print(matches[-1] if matches else "") +PY +} + +show_log_excerpt() { + local log_path="$1" + warn "log tail: $log_path" + tail -n 80 "$log_path" >&2 || true +} + +phase_run() { + local phase_id="$1" + local timeout_s="$2" + shift 2 + + local log_path pid start rc timed_out + log_path="$(phase_log_path "$phase_id")" + say "$phase_id" + start=$SECONDS + timed_out=0 + + ( + "$@" + ) >"$log_path" 2>&1 & + pid=$! + + while kill -0 "$pid" >/dev/null 2>&1; do + if (( SECONDS - start >= timeout_s )); then + timed_out=1 + kill "$pid" >/dev/null 2>&1 || true + sleep 2 + kill -9 "$pid" >/dev/null 2>&1 || true + break + fi + sleep 1 + done + + set +e + wait "$pid" + rc=$? + set -e + + if (( timed_out )); then + warn "$phase_id timed out after ${timeout_s}s" + printf 'timeout after %ss\n' "$timeout_s" >>"$log_path" + show_log_excerpt "$log_path" + return 124 + fi + + if [[ $rc -ne 0 ]]; then + warn "$phase_id failed (rc=$rc)" + show_log_excerpt "$log_path" + return "$rc" + fi + + return 0 +} + +write_summary_json() { + local summary_path="$RUN_DIR/summary.json" + python3 - "$summary_path" <<'PY' +import json +import os +import sys + +summary = { + "vm": os.environ["SUMMARY_VM"], + "snapshotHint": os.environ["SUMMARY_SNAPSHOT_HINT"], + "snapshotId": os.environ["SUMMARY_SNAPSHOT_ID"], + "mode": os.environ["SUMMARY_MODE"], + "latestVersion": os.environ["SUMMARY_LATEST_VERSION"], + "currentHead": os.environ["SUMMARY_CURRENT_HEAD"], + "runDir": os.environ["SUMMARY_RUN_DIR"], + "freshMain": { + "status": os.environ["SUMMARY_FRESH_MAIN_STATUS"], + "version": os.environ["SUMMARY_FRESH_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_FRESH_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_FRESH_AGENT_STATUS"], + }, + "upgrade": { + "precheck": os.environ["SUMMARY_UPGRADE_PRECHECK_STATUS"], + "status": os.environ["SUMMARY_UPGRADE_STATUS"], + "latestVersionInstalled": os.environ["SUMMARY_LATEST_INSTALLED_VERSION"], + "mainVersion": os.environ["SUMMARY_UPGRADE_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_UPGRADE_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_UPGRADE_AGENT_STATUS"], + }, +} +with open(sys.argv[1], "w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2, sort_keys=True) +print(sys.argv[1]) +PY +} + +capture_latest_ref_failure() { + set +e + run_ref_onboard + local rc=$? + set -e + if [[ $rc -eq 0 ]]; then + say "Latest release ref-mode onboard passed" + return 0 + fi + warn "Latest release ref-mode onboard failed pre-upgrade" + set +e + show_gateway_status_compat || true + set -e + return 1 +} + +run_fresh_main_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "fresh.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "fresh.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-fresh.tgz" + FRESH_MAIN_VERSION="$(extract_last_version "$(phase_log_path fresh.install-main)")" + phase_run "fresh.verify-main-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$(git rev-parse --short=7 HEAD)" + phase_run "fresh.verify-bundle-permissions" "$TIMEOUT_PERMISSION_S" verify_bundle_permissions + phase_run "fresh.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + phase_run "fresh.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway + FRESH_GATEWAY_STATUS="pass" + phase_run "fresh.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn + FRESH_AGENT_STATUS="pass" +} + +run_upgrade_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "upgrade.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "upgrade.install-latest" "$TIMEOUT_INSTALL_S" install_latest_release + LATEST_INSTALLED_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-latest)")" + phase_run "upgrade.verify-latest-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$LATEST_VERSION" + if [[ "$CHECK_LATEST_REF" -eq 1 ]]; then + if phase_run "upgrade.latest-ref-precheck" "$TIMEOUT_ONBOARD_S" capture_latest_ref_failure; then + UPGRADE_PRECHECK_STATUS="latest-ref-pass" + else + UPGRADE_PRECHECK_STATUS="latest-ref-fail" + fi + else + UPGRADE_PRECHECK_STATUS="skipped" + fi + phase_run "upgrade.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-upgrade.tgz" + UPGRADE_MAIN_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-main)")" + phase_run "upgrade.verify-main-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$(git rev-parse --short=7 HEAD)" + phase_run "upgrade.verify-bundle-permissions" "$TIMEOUT_PERMISSION_S" verify_bundle_permissions + phase_run "upgrade.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + phase_run "upgrade.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway + UPGRADE_GATEWAY_STATUS="pass" + phase_run "upgrade.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn + UPGRADE_AGENT_STATUS="pass" +} + +FRESH_MAIN_STATUS="skip" +UPGRADE_STATUS="skip" +UPGRADE_PRECHECK_STATUS="skip" + +SNAPSHOT_ID="$(resolve_snapshot_id)" +LATEST_VERSION="$(resolve_latest_version)" +HOST_IP="$(resolve_host_ip)" +HOST_PORT="$(resolve_host_port)" + +say "VM: $VM_NAME" +say "Snapshot hint: $SNAPSHOT_HINT" +say "Latest npm version: $LATEST_VERSION" +say "Current head: $(git rev-parse --short HEAD)" +say "Run logs: $RUN_DIR" + +pack_main_tgz +start_server "$HOST_IP" + +if [[ "$MODE" == "fresh" || "$MODE" == "both" ]]; then + set +e + run_fresh_main_lane "$SNAPSHOT_ID" "$HOST_IP" + fresh_rc=$? + set -e + if [[ $fresh_rc -eq 0 ]]; then + FRESH_MAIN_STATUS="pass" + else + FRESH_MAIN_STATUS="fail" + fi +fi + +if [[ "$MODE" == "upgrade" || "$MODE" == "both" ]]; then + set +e + run_upgrade_lane "$SNAPSHOT_ID" "$HOST_IP" + upgrade_rc=$? + set -e + if [[ $upgrade_rc -eq 0 ]]; then + UPGRADE_STATUS="pass" + else + UPGRADE_STATUS="fail" + fi +fi + +if [[ "$KEEP_SERVER" -eq 0 && -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" +fi + +SUMMARY_JSON_PATH="$( + SUMMARY_VM="$VM_NAME" \ + SUMMARY_SNAPSHOT_HINT="$SNAPSHOT_HINT" \ + SUMMARY_SNAPSHOT_ID="$SNAPSHOT_ID" \ + SUMMARY_MODE="$MODE" \ + SUMMARY_LATEST_VERSION="$LATEST_VERSION" \ + SUMMARY_CURRENT_HEAD="$(git rev-parse --short HEAD)" \ + SUMMARY_RUN_DIR="$RUN_DIR" \ + SUMMARY_FRESH_MAIN_STATUS="$FRESH_MAIN_STATUS" \ + SUMMARY_FRESH_MAIN_VERSION="$FRESH_MAIN_VERSION" \ + SUMMARY_FRESH_GATEWAY_STATUS="$FRESH_GATEWAY_STATUS" \ + SUMMARY_FRESH_AGENT_STATUS="$FRESH_AGENT_STATUS" \ + SUMMARY_UPGRADE_PRECHECK_STATUS="$UPGRADE_PRECHECK_STATUS" \ + SUMMARY_UPGRADE_STATUS="$UPGRADE_STATUS" \ + SUMMARY_LATEST_INSTALLED_VERSION="$LATEST_INSTALLED_VERSION" \ + SUMMARY_UPGRADE_MAIN_VERSION="$UPGRADE_MAIN_VERSION" \ + SUMMARY_UPGRADE_GATEWAY_STATUS="$UPGRADE_GATEWAY_STATUS" \ + SUMMARY_UPGRADE_AGENT_STATUS="$UPGRADE_AGENT_STATUS" \ + write_summary_json +)" + +if [[ "$JSON_OUTPUT" -eq 1 ]]; then + cat "$SUMMARY_JSON_PATH" +else + printf '\nSummary:\n' + printf ' fresh-main: %s (%s)\n' "$FRESH_MAIN_STATUS" "$FRESH_MAIN_VERSION" + printf ' latest->main precheck: %s (%s)\n' "$UPGRADE_PRECHECK_STATUS" "$LATEST_INSTALLED_VERSION" + printf ' latest->main: %s (%s)\n' "$UPGRADE_STATUS" "$UPGRADE_MAIN_VERSION" + printf ' logs: %s\n' "$RUN_DIR" + printf ' summary: %s\n' "$SUMMARY_JSON_PATH" +fi + +if [[ "$FRESH_MAIN_STATUS" == "fail" || "$UPGRADE_STATUS" == "fail" ]]; then + exit 1 +fi diff --git a/scripts/e2e/parallels-windows-smoke.sh b/scripts/e2e/parallels-windows-smoke.sh new file mode 100644 index 00000000000..3b9ec366790 --- /dev/null +++ b/scripts/e2e/parallels-windows-smoke.sh @@ -0,0 +1,856 @@ +#!/usr/bin/env bash +set -euo pipefail + +VM_NAME="Windows 11" +SNAPSHOT_HINT="pre-openclaw-native-e2e-2026-03-12" +MODE="both" +OPENAI_API_KEY_ENV="OPENAI_API_KEY" +INSTALL_URL="https://openclaw.ai/install.ps1" +HOST_PORT="18426" +HOST_PORT_EXPLICIT=0 +HOST_IP="" +LATEST_VERSION="" +JSON_OUTPUT=0 +KEEP_SERVER=0 +CHECK_LATEST_REF=1 + +MAIN_TGZ_DIR="$(mktemp -d)" +MAIN_TGZ_PATH="" +MINGIT_ZIP_PATH="" +MINGIT_ZIP_NAME="" +SERVER_PID="" +RUN_DIR="$(mktemp -d /tmp/openclaw-parallels-windows.XXXXXX)" +BUILD_LOCK_DIR="${TMPDIR:-/tmp}/openclaw-parallels-build.lock" + +TIMEOUT_SNAPSHOT_S=240 +TIMEOUT_INSTALL_S=1200 +TIMEOUT_VERIFY_S=120 +TIMEOUT_ONBOARD_S=240 +TIMEOUT_GATEWAY_S=120 +TIMEOUT_AGENT_S=180 + +FRESH_MAIN_STATUS="skip" +FRESH_MAIN_VERSION="skip" +FRESH_GATEWAY_STATUS="skip" +FRESH_AGENT_STATUS="skip" +UPGRADE_STATUS="skip" +UPGRADE_PRECHECK_STATUS="skip" +LATEST_INSTALLED_VERSION="skip" +UPGRADE_MAIN_VERSION="skip" +UPGRADE_GATEWAY_STATUS="skip" +UPGRADE_AGENT_STATUS="skip" + +say() { + printf '==> %s\n' "$*" +} + +warn() { + printf 'warn: %s\n' "$*" >&2 +} + +die() { + printf 'error: %s\n' "$*" >&2 + exit 1 +} + +cleanup() { + if [[ -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + fi + rm -rf "$MAIN_TGZ_DIR" +} + +trap cleanup EXIT + +usage() { + cat <<'EOF' +Usage: bash scripts/e2e/parallels-windows-smoke.sh [options] + +Options: + --vm Parallels VM name. Default: "Windows 11" + --snapshot-hint Snapshot name substring/fuzzy match. + Default: "pre-openclaw-native-e2e-2026-03-12" + --mode + --openai-api-key-env Host env var name for OpenAI API key. + Default: OPENAI_API_KEY + --install-url Installer URL for latest release. Default: https://openclaw.ai/install.ps1 + --host-port Host HTTP port for current-main tgz. Default: 18426 + --host-ip Override Parallels host IP. + --latest-version Override npm latest version lookup. + --skip-latest-ref-check Skip latest-release ref-mode precheck. + --keep-server Leave temp host HTTP server running. + --json Print machine-readable JSON summary. + -h, --help Show help. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --vm) + VM_NAME="$2" + shift 2 + ;; + --snapshot-hint) + SNAPSHOT_HINT="$2" + shift 2 + ;; + --mode) + MODE="$2" + shift 2 + ;; + --openai-api-key-env) + OPENAI_API_KEY_ENV="$2" + shift 2 + ;; + --install-url) + INSTALL_URL="$2" + shift 2 + ;; + --host-port) + HOST_PORT="$2" + HOST_PORT_EXPLICIT=1 + shift 2 + ;; + --host-ip) + HOST_IP="$2" + shift 2 + ;; + --latest-version) + LATEST_VERSION="$2" + shift 2 + ;; + --skip-latest-ref-check) + CHECK_LATEST_REF=0 + shift + ;; + --keep-server) + KEEP_SERVER=1 + shift + ;; + --json) + JSON_OUTPUT=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown arg: $1" + ;; + esac +done + +case "$MODE" in + fresh|upgrade|both) ;; + *) + die "invalid --mode: $MODE" + ;; +esac + +OPENAI_API_KEY_VALUE="${!OPENAI_API_KEY_ENV:-}" +[[ -n "$OPENAI_API_KEY_VALUE" ]] || die "$OPENAI_API_KEY_ENV is required" + +ps_single_quote() { + printf "%s" "$1" | sed "s/'/''/g" +} + +ps_array_literal() { + local arg quoted parts=() + for arg in "$@"; do + quoted="$(ps_single_quote "$arg")" + parts+=("'$quoted'") + done + local joined="" + local part + for part in "${parts[@]}"; do + if [[ -n "$joined" ]]; then + joined+=", " + fi + joined+="$part" + done + printf '@(%s)' "$joined" +} + +resolve_snapshot_id() { + local json hint + json="$(prlctl snapshot-list "$VM_NAME" --json)" + hint="$SNAPSHOT_HINT" + SNAPSHOT_JSON="$json" SNAPSHOT_HINT="$hint" python3 - <<'PY' +import difflib +import json +import os +import sys + +payload = json.loads(os.environ["SNAPSHOT_JSON"]) +hint = os.environ["SNAPSHOT_HINT"].strip().lower() +best_id = None +best_score = -1.0 +for snapshot_id, meta in payload.items(): + name = str(meta.get("name", "")).strip() + lowered = name.lower() + score = 0.0 + if lowered == hint: + score = 10.0 + elif hint and hint in lowered: + score = 5.0 + len(hint) / max(len(lowered), 1) + else: + score = difflib.SequenceMatcher(None, hint, lowered).ratio() + if score > best_score: + best_score = score + best_id = snapshot_id +if not best_id: + sys.exit("no snapshot matched") +print(best_id) +PY +} + +resolve_host_ip() { + if [[ -n "$HOST_IP" ]]; then + printf '%s\n' "$HOST_IP" + return + fi + local detected + detected="$(ifconfig | awk '/inet 10\.211\./ { print $2; exit }')" + [[ -n "$detected" ]] || die "failed to detect Parallels host IP; pass --host-ip" + printf '%s\n' "$detected" +} + +is_host_port_free() { + local port="$1" + python3 - "$port" <<'PY' +import socket +import sys + +port = int(sys.argv[1]) +sock = socket.socket() +try: + sock.bind(("0.0.0.0", port)) +except OSError: + raise SystemExit(1) +finally: + sock.close() +PY +} + +allocate_host_port() { + python3 - <<'PY' +import socket + +sock = socket.socket() +sock.bind(("0.0.0.0", 0)) +print(sock.getsockname()[1]) +sock.close() +PY +} + +resolve_host_port() { + if is_host_port_free "$HOST_PORT"; then + printf '%s\n' "$HOST_PORT" + return + fi + if [[ "$HOST_PORT_EXPLICIT" -eq 1 ]]; then + die "host port $HOST_PORT already in use" + fi + HOST_PORT="$(allocate_host_port)" + warn "host port 18426 busy; using $HOST_PORT" + printf '%s\n' "$HOST_PORT" +} + +guest_exec() { + prlctl exec "$VM_NAME" --current-user "$@" +} + +guest_powershell() { + local script="$1" + local encoded + encoded="$( + SCRIPT_CONTENT="$script" python3 - <<'PY' +import base64 +import os + +script = "$ProgressPreference = 'SilentlyContinue'\n" + os.environ["SCRIPT_CONTENT"] +payload = script.encode("utf-16le") +print(base64.b64encode(payload).decode("ascii")) +PY + )" + guest_exec powershell.exe -NoProfile -ExecutionPolicy Bypass -EncodedCommand "$encoded" +} + +guest_run_openclaw() { + local env_name="${1:-}" + local env_value="${2:-}" + shift 2 + + local args_literal stdout_name stderr_name env_name_q env_value_q + args_literal="$(ps_array_literal "$@")" + stdout_name="openclaw-stdout-$RANDOM-$RANDOM.log" + stderr_name="openclaw-stderr-$RANDOM-$RANDOM.log" + env_name_q="$(ps_single_quote "$env_name")" + env_value_q="$(ps_single_quote "$env_value")" + + guest_powershell "$(cat </dev/null +} + +verify_windows_user_ready() { + guest_exec cmd.exe /d /s /c "echo ready" +} + +wait_for_guest_ready() { + local deadline + deadline=$((SECONDS + TIMEOUT_SNAPSHOT_S)) + while (( SECONDS < deadline )); do + if verify_windows_user_ready >/dev/null 2>&1; then + return 0 + fi + sleep 3 + done + return 1 +} + +phase_log_path() { + printf '%s/%s.log\n' "$RUN_DIR" "$1" +} + +show_log_excerpt() { + local log_path="$1" + warn "log tail: $log_path" + tail -n 80 "$log_path" >&2 || true +} + +phase_run() { + local phase_id="$1" + local timeout_s="$2" + shift 2 + + local log_path pid rc timed_out + log_path="$(phase_log_path "$phase_id")" + say "$phase_id" + timed_out=0 + + ( + "$@" + ) >"$log_path" 2>&1 & + pid=$! + + ( + sleep "$timeout_s" + kill "$pid" >/dev/null 2>&1 || true + sleep 2 + kill -9 "$pid" >/dev/null 2>&1 || true + ) & + local killer_pid=$! + + set +e + wait "$pid" + rc=$? + set -e + + if kill -0 "$killer_pid" >/dev/null 2>&1; then + kill "$killer_pid" >/dev/null 2>&1 || true + wait "$killer_pid" >/dev/null 2>&1 || true + else + timed_out=1 + fi + + if (( timed_out )); then + warn "$phase_id timed out after ${timeout_s}s" + printf 'timeout after %ss\n' "$timeout_s" >>"$log_path" + show_log_excerpt "$log_path" + return 124 + fi + + if [[ $rc -ne 0 ]]; then + warn "$phase_id failed (rc=$rc)" + show_log_excerpt "$log_path" + return "$rc" + fi + + return 0 +} + +extract_last_version() { + local log_path="$1" + python3 - "$log_path" <<'PY' +import pathlib +import re +import sys + +text = pathlib.Path(sys.argv[1]).read_text(errors="replace") +matches = re.findall(r"OpenClaw [^\r\n]+ \([0-9a-f]{7,}\)", text) +print(matches[-1] if matches else "") +PY +} + +write_summary_json() { + local summary_path="$RUN_DIR/summary.json" + python3 - "$summary_path" <<'PY' +import json +import os +import sys + +summary = { + "vm": os.environ["SUMMARY_VM"], + "snapshotHint": os.environ["SUMMARY_SNAPSHOT_HINT"], + "snapshotId": os.environ["SUMMARY_SNAPSHOT_ID"], + "mode": os.environ["SUMMARY_MODE"], + "latestVersion": os.environ["SUMMARY_LATEST_VERSION"], + "currentHead": os.environ["SUMMARY_CURRENT_HEAD"], + "runDir": os.environ["SUMMARY_RUN_DIR"], + "freshMain": { + "status": os.environ["SUMMARY_FRESH_MAIN_STATUS"], + "version": os.environ["SUMMARY_FRESH_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_FRESH_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_FRESH_AGENT_STATUS"], + }, + "upgrade": { + "precheck": os.environ["SUMMARY_UPGRADE_PRECHECK_STATUS"], + "status": os.environ["SUMMARY_UPGRADE_STATUS"], + "latestVersionInstalled": os.environ["SUMMARY_LATEST_INSTALLED_VERSION"], + "mainVersion": os.environ["SUMMARY_UPGRADE_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_UPGRADE_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_UPGRADE_AGENT_STATUS"], + }, +} +with open(sys.argv[1], "w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2, sort_keys=True) +print(sys.argv[1]) +PY +} + +resolve_latest_version() { + if [[ -n "$LATEST_VERSION" ]]; then + printf '%s\n' "$LATEST_VERSION" + return + fi + npm view openclaw version --userconfig "$(mktemp)" +} + +resolve_mingit_download() { + python3 - <<'PY' +import json +import urllib.request + +req = urllib.request.Request( + "https://api.github.com/repos/git-for-windows/git/releases/latest", + headers={ + "User-Agent": "openclaw-parallels-smoke", + "Accept": "application/vnd.github+json", + }, +) +with urllib.request.urlopen(req, timeout=30) as response: + data = json.load(response) + +assets = data.get("assets", []) +preferred_names = [ + "MinGit-2.53.0.2-arm64.zip", + "MinGit-2.53.0.2-64-bit.zip", +] + +best = None +for wanted in preferred_names: + for asset in assets: + if asset.get("name") == wanted: + best = asset + break + if best: + break + +if best is None: + for asset in assets: + name = asset.get("name", "") + if name.startswith("MinGit-") and name.endswith(".zip") and "busybox" not in name: + best = asset + break + +if best is None: + raise SystemExit("no MinGit asset found") + +print(best["name"]) +print(best["browser_download_url"]) +PY +} + +current_build_commit() { + python3 - <<'PY' +import json +import pathlib + +path = pathlib.Path("dist/build-info.json") +if not path.exists(): + print("") +else: + print(json.loads(path.read_text()).get("commit", "")) +PY +} + +acquire_build_lock() { + local owner_pid="" + while ! mkdir "$BUILD_LOCK_DIR" 2>/dev/null; do + if [[ -f "$BUILD_LOCK_DIR/pid" ]]; then + owner_pid="$(cat "$BUILD_LOCK_DIR/pid" 2>/dev/null || true)" + if [[ -n "$owner_pid" ]] && ! kill -0 "$owner_pid" >/dev/null 2>&1; then + warn "Removing stale Parallels build lock" + rm -rf "$BUILD_LOCK_DIR" + continue + fi + fi + sleep 1 + done + printf '%s\n' "$$" >"$BUILD_LOCK_DIR/pid" +} + +release_build_lock() { + if [[ -d "$BUILD_LOCK_DIR" ]]; then + rm -rf "$BUILD_LOCK_DIR" + fi +} + +ensure_current_build() { + local head build_commit + acquire_build_lock + head="$(git rev-parse HEAD)" + build_commit="$(current_build_commit)" + if [[ "$build_commit" == "$head" ]]; then + release_build_lock + return + fi + say "Build dist for current head" + pnpm build + build_commit="$(current_build_commit)" + release_build_lock + [[ "$build_commit" == "$head" ]] || die "dist/build-info.json still does not match HEAD after build" +} + +ensure_guest_git() { + local host_ip="$1" + local mingit_url + mingit_url="http://$host_ip:$HOST_PORT/$MINGIT_ZIP_NAME" + if guest_exec cmd.exe /d /s /c "where git.exe >nul 2>nul && git.exe --version"; then + return + fi + guest_exec cmd.exe /d /s /c "if exist \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\" rmdir /s /q \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\"" + guest_exec cmd.exe /d /s /c "mkdir \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\"" + guest_exec cmd.exe /d /s /c "curl.exe -fsSL \"$mingit_url\" -o \"%TEMP%\\$MINGIT_ZIP_NAME\"" + guest_exec cmd.exe /d /s /c "tar.exe -xf \"%TEMP%\\$MINGIT_ZIP_NAME\" -C \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\"" + guest_exec cmd.exe /d /s /c "del /q \"%TEMP%\\$MINGIT_ZIP_NAME\" & set \"PATH=%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\\cmd;%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\\mingw64\\bin;%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\\usr\\bin;%PATH%\" && git.exe --version" +} + +pack_main_tgz() { + say "Pack current main tgz" + ensure_current_build + local mingit_name mingit_url + mapfile -t mingit_meta < <(resolve_mingit_download) + mingit_name="${mingit_meta[0]}" + mingit_url="${mingit_meta[1]}" + MINGIT_ZIP_NAME="$mingit_name" + MINGIT_ZIP_PATH="$MAIN_TGZ_DIR/$mingit_name" + if [[ ! -f "$MINGIT_ZIP_PATH" ]]; then + say "Download $MINGIT_ZIP_NAME" + curl -fsSL "$mingit_url" -o "$MINGIT_ZIP_PATH" + fi + local short_head pkg + short_head="$(git rev-parse --short HEAD)" + pkg="$( + npm pack --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/openclaw-main-$short_head.tgz" + cp "$MAIN_TGZ_DIR/$pkg" "$MAIN_TGZ_PATH" + say "Packed $MAIN_TGZ_PATH" + tar -xOf "$MAIN_TGZ_PATH" package/dist/build-info.json +} + +start_server() { + local host_ip="$1" + local artifact probe_url attempt + artifact="$(basename "$MAIN_TGZ_PATH")" + attempt=0 + while :; do + attempt=$((attempt + 1)) + say "Serve current main tgz on $host_ip:$HOST_PORT" + ( + cd "$MAIN_TGZ_DIR" + exec python3 -m http.server "$HOST_PORT" --bind 0.0.0.0 + ) >/tmp/openclaw-parallels-windows-http.log 2>&1 & + SERVER_PID=$! + sleep 1 + probe_url="http://127.0.0.1:$HOST_PORT/$artifact" + if kill -0 "$SERVER_PID" >/dev/null 2>&1 && curl -fsSI "$probe_url" >/dev/null 2>&1; then + return 0 + fi + kill "$SERVER_PID" >/dev/null 2>&1 || true + wait "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" + if [[ "$HOST_PORT_EXPLICIT" -eq 1 || $attempt -ge 3 ]]; then + die "failed to start reachable host HTTP server on port $HOST_PORT" + fi + HOST_PORT="$(allocate_host_port)" + warn "retrying host HTTP server on port $HOST_PORT" + done +} + +install_latest_release() { + local install_url_q + install_url_q="$(ps_single_quote "$INSTALL_URL")" + guest_powershell "$(cat <&2 + return 1 + ;; + esac +} + +run_ref_onboard() { + local openai_key_q runner_name log_name done_name done_status + openai_key_q="$(ps_single_quote "$OPENAI_API_KEY_VALUE")" + runner_name="openclaw-onboard-$RANDOM-$RANDOM.ps1" + log_name="openclaw-onboard-$RANDOM-$RANDOM.log" + done_name="openclaw-onboard-$RANDOM-$RANDOM.done" + + guest_powershell "$(cat < "{1}" 2>&1' -f \$openclaw, \$log) + & cmd.exe /d /s /c \$cmdLine + Set-Content -Path \$done -Value ([string]\$LASTEXITCODE) +} catch { + if (Test-Path \$log) { + Add-Content -Path \$log -Value (\$_ | Out-String) + } else { + (\$_ | Out-String) | Set-Content -Path \$log + } + Set-Content -Path \$done -Value '1' +} +'@ | Set-Content -Path \$runner +Start-Process powershell.exe -ArgumentList @('-NoProfile', '-ExecutionPolicy', 'Bypass', '-File', \$runner) -WindowStyle Hidden | Out-Null +EOF +)" + + while :; do + done_status="$( + guest_powershell "\$done = Join-Path \$env:TEMP '$done_name'; if (Test-Path \$done) { (Get-Content \$done -Raw).Trim() }" + )" + done_status="${done_status//$'\r'/}" + if [[ -n "$done_status" ]]; then + guest_powershell "\$log = Join-Path \$env:TEMP '$log_name'; if (Test-Path \$log) { Get-Content \$log }" + [[ "$done_status" == "0" ]] + return $? + fi + sleep 2 + done +} + +verify_gateway() { + guest_run_openclaw "" "" gateway status --deep --require-rpc +} + +show_gateway_status_compat() { + if guest_run_openclaw "" "" gateway status --help | grep -Fq -- "--require-rpc"; then + guest_run_openclaw "" "" gateway status --deep --require-rpc + return + fi + guest_run_openclaw "" "" gateway status --deep +} + +verify_turn() { + guest_run_openclaw "" "" agent --agent main --message ping --json +} + +capture_latest_ref_failure() { + set +e + run_ref_onboard + local rc=$? + set -e + if [[ $rc -eq 0 ]]; then + say "Latest release ref-mode onboard passed" + return 0 + fi + warn "Latest release ref-mode onboard failed pre-upgrade" + set +e + show_gateway_status_compat || true + set -e + return 1 +} + +run_fresh_main_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "fresh.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" || return $? + phase_run "fresh.wait-for-user" "$TIMEOUT_SNAPSHOT_S" wait_for_guest_ready || return $? + phase_run "fresh.ensure-git" "$TIMEOUT_INSTALL_S" ensure_guest_git "$host_ip" || return $? + phase_run "fresh.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-fresh.tgz" || return $? + FRESH_MAIN_VERSION="$(extract_last_version "$(phase_log_path fresh.install-main)")" + phase_run "fresh.verify-main-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$(git rev-parse --short=7 HEAD)" || return $? + phase_run "fresh.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard || return $? + phase_run "fresh.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway || return $? + FRESH_GATEWAY_STATUS="pass" + phase_run "fresh.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn || return $? + FRESH_AGENT_STATUS="pass" +} + +run_upgrade_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "upgrade.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" || return $? + phase_run "upgrade.wait-for-user" "$TIMEOUT_SNAPSHOT_S" wait_for_guest_ready || return $? + phase_run "upgrade.install-latest" "$TIMEOUT_INSTALL_S" install_latest_release || return $? + LATEST_INSTALLED_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-latest)")" + phase_run "upgrade.verify-latest-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$LATEST_VERSION" || return $? + if [[ "$CHECK_LATEST_REF" -eq 1 ]]; then + if phase_run "upgrade.latest-ref-precheck" "$TIMEOUT_ONBOARD_S" capture_latest_ref_failure; then + UPGRADE_PRECHECK_STATUS="latest-ref-pass" + else + UPGRADE_PRECHECK_STATUS="latest-ref-fail" + fi + else + UPGRADE_PRECHECK_STATUS="skipped" + fi + phase_run "upgrade.ensure-git" "$TIMEOUT_INSTALL_S" ensure_guest_git "$host_ip" || return $? + phase_run "upgrade.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-upgrade.tgz" || return $? + UPGRADE_MAIN_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-main)")" + phase_run "upgrade.verify-main-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$(git rev-parse --short=7 HEAD)" || return $? + phase_run "upgrade.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard || return $? + phase_run "upgrade.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway || return $? + UPGRADE_GATEWAY_STATUS="pass" + phase_run "upgrade.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn || return $? + UPGRADE_AGENT_STATUS="pass" +} + +SNAPSHOT_ID="$(resolve_snapshot_id)" +LATEST_VERSION="$(resolve_latest_version)" +HOST_IP="$(resolve_host_ip)" +HOST_PORT="$(resolve_host_port)" + +say "VM: $VM_NAME" +say "Snapshot hint: $SNAPSHOT_HINT" +say "Latest npm version: $LATEST_VERSION" +say "Current head: $(git rev-parse --short HEAD)" +say "Run logs: $RUN_DIR" + +pack_main_tgz +start_server "$HOST_IP" + +if [[ "$MODE" == "fresh" || "$MODE" == "both" ]]; then + set +e + run_fresh_main_lane "$SNAPSHOT_ID" "$HOST_IP" + fresh_rc=$? + set -e + if [[ $fresh_rc -eq 0 ]]; then + FRESH_MAIN_STATUS="pass" + else + FRESH_MAIN_STATUS="fail" + fi +fi + +if [[ "$MODE" == "upgrade" || "$MODE" == "both" ]]; then + set +e + run_upgrade_lane "$SNAPSHOT_ID" "$HOST_IP" + upgrade_rc=$? + set -e + if [[ $upgrade_rc -eq 0 ]]; then + UPGRADE_STATUS="pass" + else + UPGRADE_STATUS="fail" + fi +fi + +if [[ "$KEEP_SERVER" -eq 0 && -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" +fi + +SUMMARY_JSON_PATH="$( + SUMMARY_VM="$VM_NAME" \ + SUMMARY_SNAPSHOT_HINT="$SNAPSHOT_HINT" \ + SUMMARY_SNAPSHOT_ID="$SNAPSHOT_ID" \ + SUMMARY_MODE="$MODE" \ + SUMMARY_LATEST_VERSION="$LATEST_VERSION" \ + SUMMARY_CURRENT_HEAD="$(git rev-parse --short HEAD)" \ + SUMMARY_RUN_DIR="$RUN_DIR" \ + SUMMARY_FRESH_MAIN_STATUS="$FRESH_MAIN_STATUS" \ + SUMMARY_FRESH_MAIN_VERSION="$FRESH_MAIN_VERSION" \ + SUMMARY_FRESH_GATEWAY_STATUS="$FRESH_GATEWAY_STATUS" \ + SUMMARY_FRESH_AGENT_STATUS="$FRESH_AGENT_STATUS" \ + SUMMARY_UPGRADE_PRECHECK_STATUS="$UPGRADE_PRECHECK_STATUS" \ + SUMMARY_UPGRADE_STATUS="$UPGRADE_STATUS" \ + SUMMARY_LATEST_INSTALLED_VERSION="$LATEST_INSTALLED_VERSION" \ + SUMMARY_UPGRADE_MAIN_VERSION="$UPGRADE_MAIN_VERSION" \ + SUMMARY_UPGRADE_GATEWAY_STATUS="$UPGRADE_GATEWAY_STATUS" \ + SUMMARY_UPGRADE_AGENT_STATUS="$UPGRADE_AGENT_STATUS" \ + write_summary_json +)" + +if [[ "$JSON_OUTPUT" -eq 1 ]]; then + cat "$SUMMARY_JSON_PATH" +else + printf '\nSummary:\n' + printf ' fresh-main: %s (%s)\n' "$FRESH_MAIN_STATUS" "$FRESH_MAIN_VERSION" + printf ' latest->main precheck: %s (%s)\n' "$UPGRADE_PRECHECK_STATUS" "$LATEST_INSTALLED_VERSION" + printf ' latest->main: %s (%s)\n' "$UPGRADE_STATUS" "$UPGRADE_MAIN_VERSION" + printf ' logs: %s\n' "$RUN_DIR" + printf ' summary: %s\n' "$SUMMARY_JSON_PATH" +fi + +if [[ "$FRESH_MAIN_STATUS" == "fail" || "$UPGRADE_STATUS" == "fail" ]]; then + exit 1 +fi diff --git a/scripts/install.sh b/scripts/install.sh index ea02c48b6db..2abfbad9935 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -995,6 +995,7 @@ SHARP_IGNORE_GLOBAL_LIBVIPS="${SHARP_IGNORE_GLOBAL_LIBVIPS:-1}" NPM_LOGLEVEL="${OPENCLAW_NPM_LOGLEVEL:-error}" NPM_SILENT_FLAG="--silent" VERBOSE="${OPENCLAW_VERBOSE:-0}" +VERIFY_INSTALL="${OPENCLAW_VERIFY_INSTALL:-0}" OPENCLAW_BIN="" PNPM_CMD=() HELP=0 @@ -1016,6 +1017,7 @@ Options: --no-git-update Skip git pull for existing checkout --no-onboard Skip onboarding (non-interactive) --no-prompt Disable prompts (required in CI/automation) + --verify Run a post-install smoke verify --dry-run Print what would happen (no changes) --verbose Print debug output (set -x, npm verbose) --help, -h Show this help @@ -1027,6 +1029,7 @@ Environment variables: OPENCLAW_GIT_DIR=... OPENCLAW_GIT_UPDATE=0|1 OPENCLAW_NO_PROMPT=1 + OPENCLAW_VERIFY_INSTALL=1 OPENCLAW_DRY_RUN=1 OPENCLAW_NO_ONBOARD=1 OPENCLAW_VERBOSE=1 @@ -1036,6 +1039,7 @@ Environment variables: Examples: curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-onboard + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-onboard --verify curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --install-method git --no-onboard EOF } @@ -1059,6 +1063,10 @@ parse_args() { VERBOSE=1 shift ;; + --verify) + VERIFY_INSTALL=1 + shift + ;; --no-prompt) NO_PROMPT=1 shift @@ -2196,7 +2204,38 @@ refresh_gateway_service_if_loaded() { return 0 fi - run_quiet_step "Probing gateway service" "$claw" gateway status --probe --deep || true + run_quiet_step "Probing gateway service" "$claw" gateway status --deep || true +} + +verify_installation() { + if [[ "${VERIFY_INSTALL}" != "1" ]]; then + return 0 + fi + + ui_stage "Verifying installation" + local claw="${OPENCLAW_BIN:-}" + if [[ -z "$claw" ]]; then + claw="$(resolve_openclaw_bin || true)" + fi + if [[ -z "$claw" ]]; then + ui_error "Install verify failed: openclaw not on PATH yet" + warn_openclaw_not_found + return 1 + fi + + run_quiet_step "Checking OpenClaw version" "$claw" --version || return 1 + + if is_gateway_daemon_loaded "$claw"; then + run_quiet_step "Checking gateway service" "$claw" gateway status --deep || { + ui_error "Install verify failed: gateway service unhealthy" + ui_info "Run: openclaw gateway status --deep" + return 1 + } + else + ui_info "Gateway service not loaded; skipping gateway deep probe" + fi + + ui_success "Install verify complete" } # Main installation flow @@ -2485,6 +2524,10 @@ main() { fi fi + if ! verify_installation; then + exit 1 + fi + if [[ "$should_open_dashboard" == "true" ]]; then maybe_open_dashboard fi diff --git a/scripts/test-parallel.mjs b/scripts/test-parallel.mjs index ca7636394bb..021ff1f905e 100644 --- a/scripts/test-parallel.mjs +++ b/scripts/test-parallel.mjs @@ -1,6 +1,7 @@ import { spawn } from "node:child_process"; import fs from "node:fs"; import os from "node:os"; +import path from "node:path"; // On Windows, `.cmd` launchers can fail with `spawn EINVAL` when invoked without a shell // (especially under GitHub Actions + Git Bash). Use `shell: true` and let the shell resolve pnpm. @@ -104,11 +105,11 @@ const hostMemoryGiB = Math.floor(os.totalmem() / 1024 ** 3); const highMemLocalHost = !isCI && hostMemoryGiB >= 96; const lowMemLocalHost = !isCI && hostMemoryGiB < 64; const nodeMajor = Number.parseInt(process.versions.node.split(".")[0] ?? "", 10); -// vmForks is a big win for transform/import heavy suites, but Node 24+ -// regressed with Vitest's vm runtime in this repo, and low-memory local hosts -// are more likely to hit per-worker V8 heap ceilings. Keep it opt-out via -// OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1. -const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor < 24 : true; +// vmForks is a big win for transform/import heavy suites. Node 24 is stable again +// for the default unit-fast lane after moving the known flaky files to fork-only +// isolation, but Node 25+ still falls back to process forks until re-validated. +// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1. +const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true; const useVmForks = process.env.OPENCLAW_TEST_VM_FORKS === "1" || (process.env.OPENCLAW_TEST_VM_FORKS !== "0" && !isWindows && supportsVmForks && !lowMemLocalHost); @@ -205,6 +206,45 @@ const shardIndexOverride = (() => { const parsed = Number.parseInt(process.env.OPENCLAW_TEST_SHARD_INDEX ?? "", 10); return Number.isFinite(parsed) && parsed > 0 ? parsed : null; })(); +const OPTION_TAKES_VALUE = new Set([ + "-t", + "-c", + "-r", + "--testNamePattern", + "--config", + "--root", + "--dir", + "--reporter", + "--outputFile", + "--pool", + "--execArgv", + "--vmMemoryLimit", + "--maxWorkers", + "--environment", + "--shard", + "--changed", + "--sequence", + "--inspect", + "--inspectBrk", + "--testTimeout", + "--hookTimeout", + "--bail", + "--retry", + "--diff", + "--exclude", + "--project", + "--slowTestThreshold", + "--teardownTimeout", + "--attachmentsDir", + "--mode", + "--api", + "--browser", + "--maxConcurrency", + "--mergeReports", + "--configLoader", + "--experimental", +]); +const SINGLE_RUN_ONLY_FLAGS = new Set(["--coverage", "--outputFile", "--mergeReports"]); if (shardIndexOverride !== null && shardCount <= 1) { console.error( @@ -229,6 +269,219 @@ const silentArgs = const rawPassthroughArgs = process.argv.slice(2); const passthroughArgs = rawPassthroughArgs[0] === "--" ? rawPassthroughArgs.slice(1) : rawPassthroughArgs; +const parsePassthroughArgs = (args) => { + const fileFilters = []; + const optionArgs = []; + let consumeNextAsOptionValue = false; + + for (const arg of args) { + if (consumeNextAsOptionValue) { + optionArgs.push(arg); + consumeNextAsOptionValue = false; + continue; + } + if (arg === "--") { + optionArgs.push(arg); + continue; + } + if (arg.startsWith("-")) { + optionArgs.push(arg); + consumeNextAsOptionValue = !arg.includes("=") && OPTION_TAKES_VALUE.has(arg); + continue; + } + fileFilters.push(arg); + } + + return { fileFilters, optionArgs }; +}; +const { fileFilters: passthroughFileFilters, optionArgs: passthroughOptionArgs } = + parsePassthroughArgs(passthroughArgs); +const passthroughRequiresSingleRun = passthroughOptionArgs.some((arg) => { + if (!arg.startsWith("-")) { + return false; + } + const [flag] = arg.split("=", 1); + return SINGLE_RUN_ONLY_FLAGS.has(flag); +}); +const channelPrefixes = ["src/telegram/", "src/discord/", "src/web/", "src/browser/", "src/line/"]; +const baseConfigPrefixes = ["src/agents/", "src/auto-reply/", "src/commands/", "test/", "ui/"]; +const normalizeRepoPath = (value) => value.split(path.sep).join("/"); +const walkTestFiles = (rootDir) => { + if (!fs.existsSync(rootDir)) { + return []; + } + const entries = fs.readdirSync(rootDir, { withFileTypes: true }); + const files = []; + for (const entry of entries) { + const fullPath = path.join(rootDir, entry.name); + if (entry.isDirectory()) { + files.push(...walkTestFiles(fullPath)); + continue; + } + if (!entry.isFile()) { + continue; + } + if ( + fullPath.endsWith(".test.ts") || + fullPath.endsWith(".live.test.ts") || + fullPath.endsWith(".e2e.test.ts") + ) { + files.push(normalizeRepoPath(fullPath)); + } + } + return files; +}; +const allKnownTestFiles = [ + ...new Set([ + ...walkTestFiles("src"), + ...walkTestFiles("extensions"), + ...walkTestFiles("test"), + ...walkTestFiles(path.join("ui", "src", "ui")), + ]), +]; +const inferTarget = (fileFilter) => { + const isolated = unitIsolatedFiles.includes(fileFilter); + if (fileFilter.endsWith(".live.test.ts")) { + return { owner: "live", isolated }; + } + if (fileFilter.endsWith(".e2e.test.ts")) { + return { owner: "e2e", isolated }; + } + if (fileFilter.startsWith("extensions/")) { + return { owner: "extensions", isolated }; + } + if (fileFilter.startsWith("src/gateway/")) { + return { owner: "gateway", isolated }; + } + if (channelPrefixes.some((prefix) => fileFilter.startsWith(prefix))) { + return { owner: "channels", isolated }; + } + if (baseConfigPrefixes.some((prefix) => fileFilter.startsWith(prefix))) { + return { owner: "base", isolated }; + } + if (fileFilter.startsWith("src/")) { + return { owner: "unit", isolated }; + } + return { owner: "base", isolated }; +}; +const resolveFilterMatches = (fileFilter) => { + const normalizedFilter = normalizeRepoPath(fileFilter); + if (fs.existsSync(fileFilter)) { + const stats = fs.statSync(fileFilter); + if (stats.isFile()) { + return [normalizedFilter]; + } + if (stats.isDirectory()) { + const prefix = normalizedFilter.endsWith("/") ? normalizedFilter : `${normalizedFilter}/`; + return allKnownTestFiles.filter((file) => file.startsWith(prefix)); + } + } + if (/[*?[\]{}]/.test(normalizedFilter)) { + return allKnownTestFiles.filter((file) => path.matchesGlob(file, normalizedFilter)); + } + return allKnownTestFiles.filter((file) => file.includes(normalizedFilter)); +}; +const createTargetedEntry = (owner, isolated, filters) => { + const name = isolated ? `${owner}-isolated` : owner; + const forceForks = isolated; + if (owner === "unit") { + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.unit.config.ts", + `--pool=${forceForks ? "forks" : useVmForks ? "vmForks" : "forks"}`, + ...(disableIsolation ? ["--isolate=false"] : []), + ...filters, + ], + }; + } + if (owner === "extensions") { + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.extensions.config.ts", + ...(forceForks ? ["--pool=forks"] : useVmForks ? ["--pool=vmForks"] : []), + ...filters, + ], + }; + } + if (owner === "gateway") { + return { + name, + args: ["vitest", "run", "--config", "vitest.gateway.config.ts", "--pool=forks", ...filters], + }; + } + if (owner === "channels") { + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.channels.config.ts", + ...(forceForks ? ["--pool=forks"] : []), + ...filters, + ], + }; + } + if (owner === "live") { + return { + name, + args: ["vitest", "run", "--config", "vitest.live.config.ts", ...filters], + }; + } + if (owner === "e2e") { + return { + name, + args: ["vitest", "run", "--config", "vitest.e2e.config.ts", ...filters], + }; + } + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.config.ts", + ...(forceForks ? ["--pool=forks"] : []), + ...filters, + ], + }; +}; +const targetedEntries = (() => { + if (passthroughFileFilters.length === 0) { + return []; + } + const groups = passthroughFileFilters.reduce((acc, fileFilter) => { + const matchedFiles = resolveFilterMatches(fileFilter); + if (matchedFiles.length === 0) { + const target = inferTarget(normalizeRepoPath(fileFilter)); + const key = `${target.owner}:${target.isolated ? "isolated" : "default"}`; + const files = acc.get(key) ?? []; + files.push(normalizeRepoPath(fileFilter)); + acc.set(key, files); + return acc; + } + for (const matchedFile of matchedFiles) { + const target = inferTarget(matchedFile); + const key = `${target.owner}:${target.isolated ? "isolated" : "default"}`; + const files = acc.get(key) ?? []; + files.push(matchedFile); + acc.set(key, files); + } + return acc; + }, new Map()); + return Array.from(groups, ([key, filters]) => { + const [owner, mode] = key.split(":"); + return createTargetedEntry(owner, mode === "isolated", [...new Set(filters)]); + }); +})(); const topLevelParallelEnabled = testProfile !== "low" && testProfile !== "serial"; const overrideWorkers = Number.parseInt(process.env.OPENCLAW_TEST_WORKERS ?? "", 10); const resolvedOverride = @@ -311,7 +564,7 @@ const maxWorkersForRun = (name) => { if (isCI && isMacOS) { return 1; } - if (name === "unit-isolated") { + if (name === "unit-isolated" || name.endsWith("-isolated")) { return defaultWorkerBudget.unitIsolated; } if (name === "extensions") { @@ -397,16 +650,16 @@ const runOnce = (entry, extraArgs = []) => }); }); -const run = async (entry) => { +const run = async (entry, extraArgs = []) => { if (shardCount <= 1) { - return runOnce(entry); + return runOnce(entry, extraArgs); } if (shardIndexOverride !== null) { - return runOnce(entry, ["--shard", `${shardIndexOverride}/${shardCount}`]); + return runOnce(entry, ["--shard", `${shardIndexOverride}/${shardCount}`, ...extraArgs]); } for (let shardIndex = 1; shardIndex <= shardCount; shardIndex += 1) { // eslint-disable-next-line no-await-in-loop - const code = await runOnce(entry, ["--shard", `${shardIndex}/${shardCount}`]); + const code = await runOnce(entry, ["--shard", `${shardIndex}/${shardCount}`, ...extraArgs]); if (code !== 0) { return code; } @@ -414,15 +667,15 @@ const run = async (entry) => { return 0; }; -const runEntries = async (entries) => { +const runEntries = async (entries, extraArgs = []) => { if (topLevelParallelEnabled) { - const codes = await Promise.all(entries.map(run)); + const codes = await Promise.all(entries.map((entry) => run(entry, extraArgs))); return codes.find((code) => code !== 0); } for (const entry of entries) { // eslint-disable-next-line no-await-in-loop - const code = await run(entry); + const code = await run(entry, extraArgs); if (code !== 0) { return code; } @@ -440,57 +693,48 @@ const shutdown = (signal) => { process.on("SIGINT", () => shutdown("SIGINT")); process.on("SIGTERM", () => shutdown("SIGTERM")); -if (passthroughArgs.length > 0) { - const maxWorkers = maxWorkersForRun("unit"); - const args = maxWorkers - ? [ - "vitest", - "run", - "--maxWorkers", - String(maxWorkers), - ...silentArgs, - ...windowsCiArgs, - ...passthroughArgs, - ] - : ["vitest", "run", ...silentArgs, ...windowsCiArgs, ...passthroughArgs]; - const nodeOptions = process.env.NODE_OPTIONS ?? ""; - const nextNodeOptions = WARNING_SUPPRESSION_FLAGS.reduce( - (acc, flag) => (acc.includes(flag) ? acc : `${acc} ${flag}`.trim()), - nodeOptions, - ); - const code = await new Promise((resolve) => { - let child; - try { - child = spawn(pnpm, args, { - stdio: "inherit", - env: { ...process.env, NODE_OPTIONS: nextNodeOptions }, - shell: isWindows, - }); - } catch (err) { - console.error(`[test-parallel] spawn failed: ${String(err)}`); - resolve(1); - return; +if (targetedEntries.length > 0) { + if (passthroughRequiresSingleRun && targetedEntries.length > 1) { + console.error( + "[test-parallel] The provided Vitest args require a single run, but the selected test filters span multiple wrapper configs. Run one target/config at a time.", + ); + process.exit(2); + } + const targetedParallelRuns = keepGatewaySerial + ? targetedEntries.filter((entry) => entry.name !== "gateway") + : targetedEntries; + const targetedSerialRuns = keepGatewaySerial + ? targetedEntries.filter((entry) => entry.name === "gateway") + : []; + const failedTargetedParallel = await runEntries(targetedParallelRuns, passthroughOptionArgs); + if (failedTargetedParallel !== undefined) { + process.exit(failedTargetedParallel); + } + for (const entry of targetedSerialRuns) { + // eslint-disable-next-line no-await-in-loop + const code = await run(entry, passthroughOptionArgs); + if (code !== 0) { + process.exit(code); } - children.add(child); - child.on("error", (err) => { - console.error(`[test-parallel] child error: ${String(err)}`); - }); - child.on("exit", (exitCode, signal) => { - children.delete(child); - resolve(exitCode ?? (signal ? 1 : 0)); - }); - }); - process.exit(Number(code) || 0); + } + process.exit(0); } -const failedParallel = await runEntries(parallelRuns); +if (passthroughRequiresSingleRun && passthroughOptionArgs.length > 0) { + console.error( + "[test-parallel] The provided Vitest args require a single run. Use the dedicated npm script for that workflow (for example `pnpm test:coverage`) or target a single test file/filter.", + ); + process.exit(2); +} + +const failedParallel = await runEntries(parallelRuns, passthroughOptionArgs); if (failedParallel !== undefined) { process.exit(failedParallel); } for (const entry of serialRuns) { // eslint-disable-next-line no-await-in-loop - const code = await run(entry); + const code = await run(entry, passthroughOptionArgs); if (code !== 0) { process.exit(code); } diff --git a/skills/node-connect/SKILL.md b/skills/node-connect/SKILL.md new file mode 100644 index 00000000000..ea468f19096 --- /dev/null +++ b/skills/node-connect/SKILL.md @@ -0,0 +1,142 @@ +--- +name: node-connect +description: Diagnose OpenClaw node connection and pairing failures for Android, iOS, and macOS companion apps. Use when QR/setup code/manual connect fails, local Wi-Fi works but VPS/tailnet does not, or errors mention pairing required, unauthorized, bootstrap token invalid or expired, gateway.bind, gateway.remote.url, Tailscale, or plugins.entries.device-pair.config.publicUrl. +--- + +# Node Connect + +Goal: find the one real route from node -> gateway, verify OpenClaw is advertising that route, then fix pairing/auth. + +## Topology first + +Decide which case you are in before proposing fixes: + +- same machine / emulator / USB tunnel +- same LAN / local Wi-Fi +- same Tailscale tailnet +- public URL / reverse proxy + +Do not mix them. + +- Local Wi-Fi problem: do not switch to Tailscale unless remote access is actually needed. +- VPS / remote gateway problem: do not keep debugging `localhost` or LAN IPs. + +## If ambiguous, ask first + +If the setup is unclear or the failure report is vague, ask short clarifying questions before diagnosing. + +Ask for: + +- which route they intend: same machine, same LAN, Tailscale tailnet, or public URL +- whether they used QR/setup code or manual host/port +- the exact app text/status/error, quoted exactly if possible +- whether `openclaw devices list` shows a pending pairing request + +Do not guess from `can't connect`. + +## Canonical checks + +Prefer `openclaw qr --json`. It uses the same setup-code payload Android scans. + +```bash +openclaw config get gateway.mode +openclaw config get gateway.bind +openclaw config get gateway.tailscale.mode +openclaw config get gateway.remote.url +openclaw config get gateway.auth.mode +openclaw config get gateway.auth.allowTailscale +openclaw config get plugins.entries.device-pair.config.publicUrl +openclaw qr --json +openclaw devices list +openclaw nodes status +``` + +If this OpenClaw instance is pointed at a remote gateway, also run: + +```bash +openclaw qr --remote --json +``` + +If Tailscale is part of the story: + +```bash +tailscale status --json +``` + +## Read the result, not guesses + +`openclaw qr --json` success means: + +- `gatewayUrl`: this is the actual endpoint the app should use. +- `urlSource`: this tells you which config path won. + +Common good sources: + +- `gateway.bind=lan`: same Wi-Fi / LAN only +- `gateway.bind=tailnet`: direct tailnet access +- `gateway.tailscale.mode=serve` or `gateway.tailscale.mode=funnel`: Tailscale route +- `plugins.entries.device-pair.config.publicUrl`: explicit public/reverse-proxy route +- `gateway.remote.url`: remote gateway route + +## Root-cause map + +If `openclaw qr --json` says `Gateway is only bound to loopback`: + +- remote node cannot connect yet +- fix the route, then generate a fresh setup code +- `gateway.bind=auto` is not enough if the effective QR route is still loopback +- same LAN: use `gateway.bind=lan` +- same tailnet: prefer `gateway.tailscale.mode=serve` or use `gateway.bind=tailnet` +- public internet: set a real `plugins.entries.device-pair.config.publicUrl` or `gateway.remote.url` + +If `gateway.bind=tailnet set, but no tailnet IP was found`: + +- gateway host is not actually on Tailscale + +If `qr --remote requires gateway.remote.url`: + +- remote-mode config is incomplete + +If the app says `pairing required`: + +- network route and auth worked +- approve the pending device + +```bash +openclaw devices list +openclaw devices approve --latest +``` + +If the app says `bootstrap token invalid or expired`: + +- old setup code +- generate a fresh one and rescan +- do this after any URL/auth fix too + +If the app says `unauthorized`: + +- wrong token/password, or wrong Tailscale expectation +- for Tailscale Serve, `gateway.auth.allowTailscale` must match the intended flow +- otherwise use explicit token/password + +## Fast heuristics + +- Same Wi-Fi setup + gateway advertises `127.0.0.1`, `localhost`, or loopback-only config: wrong. +- Remote setup + setup/manual uses private LAN IP: wrong. +- Tailnet setup + gateway advertises LAN IP instead of MagicDNS / tailnet route: wrong. +- Public URL set but QR still advertises something else: inspect `urlSource`; config is not what you think. +- `openclaw devices list` shows pending requests: stop changing network config and approve first. + +## Fix style + +Reply with one concrete diagnosis and one route. + +If there is not enough signal yet, ask for setup + exact app text instead of guessing. + +Good: + +- `The gateway is still loopback-only, so a node on another network can never reach it. Enable Tailscale Serve, restart the gateway, run openclaw qr again, rescan, then approve the pending device pairing.` + +Bad: + +- `Maybe LAN, maybe Tailscale, maybe port forwarding, maybe public URL.` diff --git a/src/acp/conversation-id.ts b/src/acp/conversation-id.ts index 7281fef4924..9cf17c9a579 100644 --- a/src/acp/conversation-id.ts +++ b/src/acp/conversation-id.ts @@ -4,7 +4,7 @@ export type ParsedTelegramTopicConversation = { canonicalConversationId: string; }; -function normalizeText(value: unknown): string { +export function normalizeConversationText(value: unknown): string { if (typeof value === "string") { return value.trim(); } @@ -15,7 +15,7 @@ function normalizeText(value: unknown): string { } export function parseTelegramChatIdFromTarget(raw: unknown): string | undefined { - const text = normalizeText(raw); + const text = normalizeConversationText(raw); if (!text) { return undefined; } diff --git a/src/acp/persistent-bindings.resolve.ts b/src/acp/persistent-bindings.resolve.ts index c69f1afe5af..84f052797ad 100644 --- a/src/acp/persistent-bindings.resolve.ts +++ b/src/acp/persistent-bindings.resolve.ts @@ -117,6 +117,70 @@ function toConfiguredBindingSpec(params: { }; } +function resolveConfiguredBindingRecord(params: { + cfg: OpenClawConfig; + bindings: AgentAcpBinding[]; + channel: ConfiguredAcpBindingChannel; + accountId: string; + selectConversation: ( + binding: AgentAcpBinding, + ) => { conversationId: string; parentConversationId?: string } | null; +}): ResolvedConfiguredAcpBinding | null { + let wildcardMatch: { + binding: AgentAcpBinding; + conversationId: string; + parentConversationId?: string; + } | null = null; + for (const binding of params.bindings) { + if (normalizeBindingChannel(binding.match.channel) !== params.channel) { + continue; + } + const accountMatchPriority = resolveAccountMatchPriority( + binding.match.accountId, + params.accountId, + ); + if (accountMatchPriority === 0) { + continue; + } + const conversation = params.selectConversation(binding); + if (!conversation) { + continue; + } + const spec = toConfiguredBindingSpec({ + cfg: params.cfg, + channel: params.channel, + accountId: params.accountId, + conversationId: conversation.conversationId, + parentConversationId: conversation.parentConversationId, + binding, + }); + if (accountMatchPriority === 2) { + return { + spec, + record: toConfiguredAcpBindingRecord(spec), + }; + } + if (!wildcardMatch) { + wildcardMatch = { binding, ...conversation }; + } + } + if (!wildcardMatch) { + return null; + } + const spec = toConfiguredBindingSpec({ + cfg: params.cfg, + channel: params.channel, + accountId: params.accountId, + conversationId: wildcardMatch.conversationId, + parentConversationId: wildcardMatch.parentConversationId, + binding: wildcardMatch.binding, + }); + return { + spec, + record: toConfiguredAcpBindingRecord(spec), + }; +} + export function resolveConfiguredAcpBindingSpecBySessionKey(params: { cfg: OpenClawConfig; sessionKey: string; @@ -207,57 +271,20 @@ export function resolveConfiguredAcpBindingRecord(params: { if (channel === "discord") { const bindings = listAcpBindings(params.cfg); - const resolveDiscordBindingForConversation = ( - targetConversationId: string, - ): ResolvedConfiguredAcpBinding | null => { - let wildcardMatch: AgentAcpBinding | null = null; - for (const binding of bindings) { - if (normalizeBindingChannel(binding.match.channel) !== "discord") { - continue; - } - const accountMatchPriority = resolveAccountMatchPriority( - binding.match.accountId, - accountId, - ); - if (accountMatchPriority === 0) { - continue; - } - const bindingConversationId = resolveBindingConversationId(binding); - if (!bindingConversationId || bindingConversationId !== targetConversationId) { - continue; - } - if (accountMatchPriority === 2) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "discord", - accountId, - conversationId: targetConversationId, - binding, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), - }; - } - if (!wildcardMatch) { - wildcardMatch = binding; - } - } - if (wildcardMatch) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "discord", - accountId, - conversationId: targetConversationId, - binding: wildcardMatch, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), - }; - } - return null; - }; + const resolveDiscordBindingForConversation = (targetConversationId: string) => + resolveConfiguredBindingRecord({ + cfg: params.cfg, + bindings, + channel: "discord", + accountId, + selectConversation: (binding) => { + const bindingConversationId = resolveBindingConversationId(binding); + if (!bindingConversationId || bindingConversationId !== targetConversationId) { + return null; + } + return { conversationId: targetConversationId }; + }, + }); const directMatch = resolveDiscordBindingForConversation(conversationId); if (directMatch) { @@ -280,61 +307,31 @@ export function resolveConfiguredAcpBindingRecord(params: { if (!parsed || !parsed.chatId.startsWith("-")) { return null; } - let wildcardMatch: AgentAcpBinding | null = null; - for (const binding of listAcpBindings(params.cfg)) { - if (normalizeBindingChannel(binding.match.channel) !== "telegram") { - continue; - } - const accountMatchPriority = resolveAccountMatchPriority(binding.match.accountId, accountId); - if (accountMatchPriority === 0) { - continue; - } - const targetConversationId = resolveBindingConversationId(binding); - if (!targetConversationId) { - continue; - } - const targetParsed = parseTelegramTopicConversation({ - conversationId: targetConversationId, - }); - if (!targetParsed || !targetParsed.chatId.startsWith("-")) { - continue; - } - if (targetParsed.canonicalConversationId !== parsed.canonicalConversationId) { - continue; - } - if (accountMatchPriority === 2) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "telegram", - accountId, + return resolveConfiguredBindingRecord({ + cfg: params.cfg, + bindings: listAcpBindings(params.cfg), + channel: "telegram", + accountId, + selectConversation: (binding) => { + const targetConversationId = resolveBindingConversationId(binding); + if (!targetConversationId) { + return null; + } + const targetParsed = parseTelegramTopicConversation({ + conversationId: targetConversationId, + }); + if (!targetParsed || !targetParsed.chatId.startsWith("-")) { + return null; + } + if (targetParsed.canonicalConversationId !== parsed.canonicalConversationId) { + return null; + } + return { conversationId: parsed.canonicalConversationId, parentConversationId: parsed.chatId, - binding, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), }; - } - if (!wildcardMatch) { - wildcardMatch = binding; - } - } - if (wildcardMatch) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "telegram", - accountId, - conversationId: parsed.canonicalConversationId, - parentConversationId: parsed.chatId, - binding: wildcardMatch, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), - }; - } - return null; + }, + }); } return null; diff --git a/src/acp/persistent-bindings.test.ts b/src/acp/persistent-bindings.test.ts index deafbc53e15..30e74c05082 100644 --- a/src/acp/persistent-bindings.test.ts +++ b/src/acp/persistent-bindings.test.ts @@ -30,6 +30,10 @@ import { resolveConfiguredAcpBindingSpecBySessionKey, } from "./persistent-bindings.js"; +type ConfiguredBinding = NonNullable[number]; +type BindingRecordInput = Parameters[0]; +type BindingSpec = Parameters[0]["spec"]; + const baseCfg = { session: { mainKey: "main", scope: "per-sender" }, agents: { @@ -37,6 +41,105 @@ const baseCfg = { }, } satisfies OpenClawConfig; +const defaultDiscordConversationId = "1478836151241412759"; +const defaultDiscordAccountId = "default"; + +function createCfgWithBindings( + bindings: ConfiguredBinding[], + overrides?: Partial, +): OpenClawConfig { + return { + ...baseCfg, + ...overrides, + bindings, + } as OpenClawConfig; +} + +function createDiscordBinding(params: { + agentId: string; + conversationId: string; + accountId?: string; + acp?: Record; +}): ConfiguredBinding { + return { + type: "acp", + agentId: params.agentId, + match: { + channel: "discord", + accountId: params.accountId ?? defaultDiscordAccountId, + peer: { kind: "channel", id: params.conversationId }, + }, + ...(params.acp ? { acp: params.acp } : {}), + } as ConfiguredBinding; +} + +function createTelegramGroupBinding(params: { + agentId: string; + conversationId: string; + acp?: Record; +}): ConfiguredBinding { + return { + type: "acp", + agentId: params.agentId, + match: { + channel: "telegram", + accountId: defaultDiscordAccountId, + peer: { kind: "group", id: params.conversationId }, + }, + ...(params.acp ? { acp: params.acp } : {}), + } as ConfiguredBinding; +} + +function resolveBindingRecord(cfg: OpenClawConfig, overrides: Partial = {}) { + return resolveConfiguredAcpBindingRecord({ + cfg, + channel: "discord", + accountId: defaultDiscordAccountId, + conversationId: defaultDiscordConversationId, + ...overrides, + }); +} + +function resolveDiscordBindingSpecBySession( + cfg: OpenClawConfig, + conversationId = defaultDiscordConversationId, +) { + const resolved = resolveBindingRecord(cfg, { conversationId }); + return resolveConfiguredAcpBindingSpecBySessionKey({ + cfg, + sessionKey: resolved?.record.targetSessionKey ?? "", + }); +} + +function createDiscordPersistentSpec(overrides: Partial = {}): BindingSpec { + return { + channel: "discord", + accountId: defaultDiscordAccountId, + conversationId: defaultDiscordConversationId, + agentId: "codex", + mode: "persistent", + ...overrides, + } as BindingSpec; +} + +function mockReadySession(params: { spec: BindingSpec; cwd: string }) { + const sessionKey = buildConfiguredAcpSessionKey(params.spec); + managerMocks.resolveSession.mockReturnValue({ + kind: "ready", + sessionKey, + meta: { + backend: "acpx", + agent: params.spec.acpAgentId ?? params.spec.agentId, + runtimeSessionName: "existing", + mode: params.spec.mode, + runtimeOptions: { cwd: params.cwd }, + state: "idle", + lastActivityAt: Date.now(), + }, + }); + return sessionKey; +} + beforeEach(() => { managerMocks.resolveSession.mockReset(); managerMocks.closeSession.mockReset().mockResolvedValue({ @@ -50,58 +153,30 @@ beforeEach(() => { describe("resolveConfiguredAcpBindingRecord", () => { it("resolves discord channel ACP binding from top-level typed bindings", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - cwd: "/repo/openclaw", - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + acp: { cwd: "/repo/openclaw" }, + }), + ]); + const resolved = resolveBindingRecord(cfg); expect(resolved?.spec.channel).toBe("discord"); - expect(resolved?.spec.conversationId).toBe("1478836151241412759"); + expect(resolved?.spec.conversationId).toBe(defaultDiscordConversationId); expect(resolved?.spec.agentId).toBe("codex"); expect(resolved?.record.targetSessionKey).toContain("agent:codex:acp:binding:discord:default:"); expect(resolved?.record.metadata?.source).toBe("config"); }); it("falls back to parent discord channel when conversation is a thread id", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "channel-parent-1" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: "channel-parent-1", + }), + ]); + const resolved = resolveBindingRecord(cfg, { conversationId: "thread-123", parentConversationId: "channel-parent-1", }); @@ -111,34 +186,17 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("prefers direct discord thread binding over parent channel fallback", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "channel-parent-1" }, - }, - }, - { - type: "acp", - agentId: "claude", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "thread-123" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: "channel-parent-1", + }), + createDiscordBinding({ + agentId: "claude", + conversationId: "thread-123", + }), + ]); + const resolved = resolveBindingRecord(cfg, { conversationId: "thread-123", parentConversationId: "channel-parent-1", }); @@ -148,60 +206,30 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("prefers exact account binding over wildcard for the same discord conversation", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "*", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - }, - { - type: "acp", - agentId: "claude", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + accountId: "*", + }), + createDiscordBinding({ + agentId: "claude", + conversationId: defaultDiscordConversationId, + }), + ]); + const resolved = resolveBindingRecord(cfg); expect(resolved?.spec.agentId).toBe("claude"); }); it("returns null when no top-level ACP binding matches the conversation", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "different-channel" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: "different-channel", + }), + ]); + const resolved = resolveBindingRecord(cfg, { conversationId: "thread-123", parentConversationId: "channel-parent-1", }); @@ -210,23 +238,13 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("resolves telegram forum topic bindings using canonical conversation ids", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "claude", - match: { - channel: "telegram", - accountId: "default", - peer: { kind: "group", id: "-1001234567890:topic:42" }, - }, - acp: { - backend: "acpx", - }, - }, - ], - } satisfies OpenClawConfig; + const cfg = createCfgWithBindings([ + createTelegramGroupBinding({ + agentId: "claude", + conversationId: "-1001234567890:topic:42", + acp: { backend: "acpx" }, + }), + ]); const canonical = resolveConfiguredAcpBindingRecord({ cfg, @@ -250,20 +268,12 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("skips telegram non-group topic configs", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "claude", - match: { - channel: "telegram", - accountId: "default", - peer: { kind: "group", id: "123456789:topic:42" }, - }, - }, - ], - } satisfies OpenClawConfig; + const cfg = createCfgWithBindings([ + createTelegramGroupBinding({ + agentId: "claude", + conversationId: "123456789:topic:42", + }), + ]); const resolved = resolveConfiguredAcpBindingRecord({ cfg, @@ -275,44 +285,34 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("applies agent runtime ACP defaults for bound conversations", () => { - const cfg = { - ...baseCfg, - agents: { - list: [ - { id: "main" }, - { - id: "coding", - runtime: { - type: "acp", - acp: { - agent: "codex", - backend: "acpx", - mode: "oneshot", - cwd: "/workspace/repo-a", + const cfg = createCfgWithBindings( + [ + createDiscordBinding({ + agentId: "coding", + conversationId: defaultDiscordConversationId, + }), + ], + { + agents: { + list: [ + { id: "main" }, + { + id: "coding", + runtime: { + type: "acp", + acp: { + agent: "codex", + backend: "acpx", + mode: "oneshot", + cwd: "/workspace/repo-a", + }, }, }, - }, - ], - }, - bindings: [ - { - type: "acp", - agentId: "coding", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, + ], }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); + }, + ); + const resolved = resolveBindingRecord(cfg); expect(resolved?.spec.agentId).toBe("coding"); expect(resolved?.spec.acpAgentId).toBe("codex"); @@ -324,37 +324,17 @@ describe("resolveConfiguredAcpBindingRecord", () => { describe("resolveConfiguredAcpBindingSpecBySessionKey", () => { it("maps a configured discord binding session key back to its spec", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - backend: "acpx", - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); - const spec = resolveConfiguredAcpBindingSpecBySessionKey({ - cfg, - sessionKey: resolved?.record.targetSessionKey ?? "", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + acp: { backend: "acpx" }, + }), + ]); + const spec = resolveDiscordBindingSpecBySession(cfg); expect(spec?.channel).toBe("discord"); - expect(spec?.conversationId).toBe("1478836151241412759"); + expect(spec?.conversationId).toBe(defaultDiscordConversationId); expect(spec?.agentId).toBe("codex"); expect(spec?.backend).toBe("acpx"); }); @@ -368,46 +348,20 @@ describe("resolveConfiguredAcpBindingSpecBySessionKey", () => { }); it("prefers exact account ACP settings over wildcard when session keys collide", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "*", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - backend: "wild", - }, - }, - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - backend: "exact", - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); - const spec = resolveConfiguredAcpBindingSpecBySessionKey({ - cfg, - sessionKey: resolved?.record.targetSessionKey ?? "", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + accountId: "*", + acp: { backend: "wild" }, + }), + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + acp: { backend: "exact" }, + }), + ]); + const spec = resolveDiscordBindingSpecBySession(cfg); expect(spec?.backend).toBe("exact"); }); @@ -435,26 +389,10 @@ describe("buildConfiguredAcpSessionKey", () => { describe("ensureConfiguredAcpBindingSession", () => { it("keeps an existing ready session when configured binding omits cwd", async () => { - const spec = { - channel: "discord" as const, - accountId: "default", - conversationId: "1478836151241412759", - agentId: "codex", - mode: "persistent" as const, - }; - const sessionKey = buildConfiguredAcpSessionKey(spec); - managerMocks.resolveSession.mockReturnValue({ - kind: "ready", - sessionKey, - meta: { - backend: "acpx", - agent: "codex", - runtimeSessionName: "existing", - mode: "persistent", - runtimeOptions: { cwd: "/workspace/openclaw" }, - state: "idle", - lastActivityAt: Date.now(), - }, + const spec = createDiscordPersistentSpec(); + const sessionKey = mockReadySession({ + spec, + cwd: "/workspace/openclaw", }); const ensured = await ensureConfiguredAcpBindingSession({ @@ -468,27 +406,12 @@ describe("ensureConfiguredAcpBindingSession", () => { }); it("reinitializes a ready session when binding config explicitly sets mismatched cwd", async () => { - const spec = { - channel: "discord" as const, - accountId: "default", - conversationId: "1478836151241412759", - agentId: "codex", - mode: "persistent" as const, + const spec = createDiscordPersistentSpec({ cwd: "/workspace/repo-a", - }; - const sessionKey = buildConfiguredAcpSessionKey(spec); - managerMocks.resolveSession.mockReturnValue({ - kind: "ready", - sessionKey, - meta: { - backend: "acpx", - agent: "codex", - runtimeSessionName: "existing", - mode: "persistent", - runtimeOptions: { cwd: "/workspace/other-repo" }, - state: "idle", - lastActivityAt: Date.now(), - }, + }); + const sessionKey = mockReadySession({ + spec, + cwd: "/workspace/other-repo", }); const ensured = await ensureConfiguredAcpBindingSession({ @@ -508,14 +431,10 @@ describe("ensureConfiguredAcpBindingSession", () => { }); it("initializes ACP session with runtime agent override when provided", async () => { - const spec = { - channel: "discord" as const, - accountId: "default", - conversationId: "1478836151241412759", + const spec = createDiscordPersistentSpec({ agentId: "coding", acpAgentId: "codex", - mode: "persistent" as const, - }; + }); managerMocks.resolveSession.mockReturnValue({ kind: "none" }); const ensured = await ensureConfiguredAcpBindingSession({ @@ -534,24 +453,16 @@ describe("ensureConfiguredAcpBindingSession", () => { describe("resetAcpSessionInPlace", () => { it("reinitializes from configured binding when ACP metadata is missing", async () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "claude", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478844424791396446" }, - }, - acp: { - mode: "persistent", - backend: "acpx", - }, + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "claude", + conversationId: "1478844424791396446", + acp: { + mode: "persistent", + backend: "acpx", }, - ], - } satisfies OpenClawConfig; + }), + ]); const sessionKey = buildConfiguredAcpSessionKey({ channel: "discord", accountId: "default", diff --git a/src/acp/server.startup.test.ts b/src/acp/server.startup.test.ts index 2f9b96d8511..35c43478ec9 100644 --- a/src/acp/server.startup.test.ts +++ b/src/acp/server.startup.test.ts @@ -129,6 +129,22 @@ describe("serveAcpGateway startup", () => { return { signalHandlers, onceSpy }; } + async function emitHelloAndWaitForAgentSideConnection() { + const gateway = getMockGateway(); + gateway.emitHello(); + await vi.waitFor(() => { + expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); + }); + } + + async function stopServeWithSigint( + signalHandlers: Map void>, + servePromise: Promise, + ) { + signalHandlers.get("SIGINT")?.(); + await servePromise; + } + beforeAll(async () => { ({ serveAcpGateway } = await import("./server.js")); }); @@ -153,14 +169,8 @@ describe("serveAcpGateway startup", () => { await Promise.resolve(); expect(mockState.agentSideConnectionCtor).not.toHaveBeenCalled(); - const gateway = getMockGateway(); - gateway.emitHello(); - await vi.waitFor(() => { - expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); - }); - - signalHandlers.get("SIGINT")?.(); - await servePromise; + await emitHelloAndWaitForAgentSideConnection(); + await stopServeWithSigint(signalHandlers, servePromise); } finally { onceSpy.mockRestore(); } @@ -207,13 +217,8 @@ describe("serveAcpGateway startup", () => { password: "resolved-secret-password", // pragma: allowlist secret }); - const gateway = getMockGateway(); - gateway.emitHello(); - await vi.waitFor(() => { - expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); - }); - signalHandlers.get("SIGINT")?.(); - await servePromise; + await emitHelloAndWaitForAgentSideConnection(); + await stopServeWithSigint(signalHandlers, servePromise); } finally { onceSpy.mockRestore(); } @@ -236,13 +241,8 @@ describe("serveAcpGateway startup", () => { }), ); - const gateway = getMockGateway(); - gateway.emitHello(); - await vi.waitFor(() => { - expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); - }); - signalHandlers.get("SIGINT")?.(); - await servePromise; + await emitHelloAndWaitForAgentSideConnection(); + await stopServeWithSigint(signalHandlers, servePromise); } finally { onceSpy.mockRestore(); } diff --git a/src/acp/translator.cancel-scoping.test.ts b/src/acp/translator.cancel-scoping.test.ts index c84832369a0..e862222f7a0 100644 --- a/src/acp/translator.cancel-scoping.test.ts +++ b/src/acp/translator.cancel-scoping.test.ts @@ -91,19 +91,45 @@ async function startPendingPrompt( }; } +async function cancelAndExpectAbortForPendingRun( + harness: Harness, + sessionId: string, + sessionKey: string, + pending: { promptPromise: Promise; runId: string }, +) { + await harness.agent.cancel({ sessionId } as CancelNotification); + + expect(harness.requestSpy).toHaveBeenCalledWith("chat.abort", { + sessionKey, + runId: pending.runId, + }); + await expect(pending.promptPromise).resolves.toEqual({ stopReason: "cancelled" }); +} + +async function deliverFinalChatEventAndExpectEndTurn( + harness: Harness, + sessionKey: string, + pending: { promptPromise: Promise; runId: string }, + seq: number, +) { + await harness.agent.handleGatewayEvent( + createChatEvent({ + runId: pending.runId, + sessionKey, + seq, + state: "final", + }), + ); + await expect(pending.promptPromise).resolves.toEqual({ stopReason: "end_turn" }); +} + describe("acp translator cancel and run scoping", () => { it("cancel passes active runId to chat.abort", async () => { const sessionKey = "agent:main:shared"; const harness = createHarness([{ sessionId: "session-1", sessionKey }]); const pending = await startPendingPrompt(harness, "session-1"); - await harness.agent.cancel({ sessionId: "session-1" } as CancelNotification); - - expect(harness.requestSpy).toHaveBeenCalledWith("chat.abort", { - sessionKey, - runId: pending.runId, - }); - await expect(pending.promptPromise).resolves.toEqual({ stopReason: "cancelled" }); + await cancelAndExpectAbortForPendingRun(harness, "session-1", sessionKey, pending); }); it("cancel uses pending runId when there is no active run", async () => { @@ -112,13 +138,7 @@ describe("acp translator cancel and run scoping", () => { const pending = await startPendingPrompt(harness, "session-1"); harness.sessionStore.clearActiveRun("session-1"); - await harness.agent.cancel({ sessionId: "session-1" } as CancelNotification); - - expect(harness.requestSpy).toHaveBeenCalledWith("chat.abort", { - sessionKey, - runId: pending.runId, - }); - await expect(pending.promptPromise).resolves.toEqual({ stopReason: "cancelled" }); + await cancelAndExpectAbortForPendingRun(harness, "session-1", sessionKey, pending); }); it("cancel skips chat.abort when there is no active run and no pending prompt", async () => { @@ -145,15 +165,7 @@ describe("acp translator cancel and run scoping", () => { expect(abortCalls).toHaveLength(0); expect(harness.sessionStore.getSession("session-2")?.activeRunId).toBe(pending2.runId); - await harness.agent.handleGatewayEvent( - createChatEvent({ - runId: pending2.runId, - sessionKey, - seq: 1, - state: "final", - }), - ); - await expect(pending2.promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + await deliverFinalChatEventAndExpectEndTurn(harness, sessionKey, pending2, 1); }); it("drops chat events when runId does not match the active prompt", async () => { @@ -250,15 +262,7 @@ describe("acp translator cancel and run scoping", () => { ); expect(harness.sessionUpdateSpy).toHaveBeenCalledTimes(1); - await harness.agent.handleGatewayEvent( - createChatEvent({ - runId: pending2.runId, - sessionKey, - seq: 1, - state: "final", - }), - ); - await expect(pending2.promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + await deliverFinalChatEventAndExpectEndTurn(harness, sessionKey, pending2, 1); expect(harness.sessionStore.getSession("session-1")?.activeRunId).toBe(pending1.runId); await harness.agent.handleGatewayEvent( diff --git a/src/acp/translator.prompt-prefix.test.ts b/src/acp/translator.prompt-prefix.test.ts index 38c186519c0..9d53e3aa103 100644 --- a/src/acp/translator.prompt-prefix.test.ts +++ b/src/acp/translator.prompt-prefix.test.ts @@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js"; import { AcpGatewayAgent } from "./translator.js"; import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js"; +const TEST_SESSION_ID = "session-1"; +const TEST_SESSION_KEY = "agent:main:main"; +const TEST_PROMPT = { + sessionId: TEST_SESSION_ID, + prompt: [{ type: "text", text: "hello" }], + _meta: {}, +} as unknown as PromptRequest; + describe("acp prompt cwd prefix", () => { + const createStopAfterSendSpy = () => + vi.fn(async (method: string) => { + if (method === "chat.send") { + throw new Error("stop-after-send"); + } + return {}; + }); + + async function runPromptAndCaptureRequest( + options: { + cwd?: string; + prefixCwd?: boolean; + provenanceMode?: "meta" | "meta+receipt"; + } = {}, + ) { + const sessionStore = createInMemorySessionStore(); + sessionStore.createSession({ + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"), + }); + + const requestSpy = createStopAfterSendSpy(); + const agent = new AcpGatewayAgent( + createAcpConnection(), + createAcpGateway(requestSpy as unknown as GatewayClient["request"]), + { + sessionStore, + prefixCwd: options.prefixCwd, + provenanceMode: options.provenanceMode, + }, + ); + + await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send"); + return requestSpy; + } + async function runPromptWithCwd(cwd: string) { const pinnedHome = os.homedir(); const previousOpenClawHome = process.env.OPENCLAW_HOME; @@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => { delete process.env.OPENCLAW_HOME; process.env.HOME = pinnedHome; - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd, - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - prefixCwd: true, - }, - ); - try { - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - return requestSpy; + return await runPromptAndCaptureRequest({ cwd, prefixCwd: true }); } finally { if (previousOpenClawHome === undefined) { delete process.env.OPENCLAW_HOME; @@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects system provenance metadata when enabled", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects a system provenance receipt when requested", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta+receipt", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => { expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"), + systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`), }), { expectFinal: true }, ); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"), + systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`), }), { expectFinal: true }, ); diff --git a/src/agents/auth-profiles/oauth.test.ts b/src/agents/auth-profiles/oauth.test.ts index c38d043c549..d4161b0d8ad 100644 --- a/src/agents/auth-profiles/oauth.test.ts +++ b/src/agents/auth-profiles/oauth.test.ts @@ -32,6 +32,20 @@ function tokenStore(params: { }; } +function githubCopilotTokenStore(profileId: string, includeInlineToken = true): AuthProfileStore { + return { + version: 1, + profiles: { + [profileId]: { + type: "token", + provider: "github-copilot", + ...(includeInlineToken ? { token: "" } : {}), + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, + }, + }, + }; +} + async function resolveWithConfig(params: { profileId: string; provider: string; @@ -59,6 +73,25 @@ async function withEnvVar(key: string, value: string, run: () => Promise): } } +async function expectResolvedApiKey(params: { + profileId: string; + provider: string; + mode: "api_key" | "token" | "oauth"; + store: AuthProfileStore; + expectedApiKey: string; +}) { + const result = await resolveApiKeyForProfile({ + cfg: cfgFor(params.profileId, params.provider, params.mode), + store: params.store, + profileId: params.profileId, + }); + expect(result).toEqual({ + apiKey: params.expectedApiKey, // pragma: allowlist secret + provider: params.provider, + email: undefined, + }); +} + describe("resolveApiKeyForProfile config compatibility", () => { it("accepts token credentials when config mode is oauth", async () => { const profileId = "anthropic:token"; @@ -278,25 +311,12 @@ describe("resolveApiKeyForProfile secret refs", () => { it("resolves token tokenRef from env", async () => { const profileId = "github-copilot:default"; await withEnvVar("GITHUB_TOKEN", "gh-ref-token", async () => { - const result = await resolveApiKeyForProfile({ - cfg: cfgFor(profileId, "github-copilot", "token"), - store: { - version: 1, - profiles: { - [profileId]: { - type: "token", - provider: "github-copilot", - token: "", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }, - }, + await expectResolvedApiKey({ profileId, - }); - expect(result).toEqual({ - apiKey: "gh-ref-token", // pragma: allowlist secret provider: "github-copilot", - email: undefined, + mode: "token", + store: githubCopilotTokenStore(profileId), + expectedApiKey: "gh-ref-token", // pragma: allowlist secret }); }); }); @@ -304,24 +324,12 @@ describe("resolveApiKeyForProfile secret refs", () => { it("resolves token tokenRef without inline token when expires is absent", async () => { const profileId = "github-copilot:no-inline-token"; await withEnvVar("GITHUB_TOKEN", "gh-ref-token", async () => { - const result = await resolveApiKeyForProfile({ - cfg: cfgFor(profileId, "github-copilot", "token"), - store: { - version: 1, - profiles: { - [profileId]: { - type: "token", - provider: "github-copilot", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }, - }, + await expectResolvedApiKey({ profileId, - }); - expect(result).toEqual({ - apiKey: "gh-ref-token", // pragma: allowlist secret provider: "github-copilot", - email: undefined, + mode: "token", + store: githubCopilotTokenStore(profileId, false), + expectedApiKey: "gh-ref-token", // pragma: allowlist secret }); }); }); diff --git a/src/agents/bash-tools.exec-host-gateway.ts b/src/agents/bash-tools.exec-host-gateway.ts index 6b43fbe8663..149a4785dd5 100644 --- a/src/agents/bash-tools.exec-host-gateway.ts +++ b/src/agents/bash-tools.exec-host-gateway.ts @@ -1,10 +1,4 @@ import type { AgentToolResult } from "@mariozechner/pi-agent-core"; -import { loadConfig } from "../config/config.js"; -import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js"; -import { - hasConfiguredExecApprovalDmRoute, - resolveExecApprovalInitiatingSurfaceState, -} from "../infra/exec-approval-surface.js"; import { addAllowlistEntry, type ExecAsk, @@ -19,20 +13,22 @@ import { detectCommandObfuscation } from "../infra/exec-obfuscation-detect.js"; import type { SafeBinProfile } from "../infra/exec-safe-bin-policy.js"; import { logInfo } from "../logger.js"; import { markBackgrounded, tail } from "./bash-process-registry.js"; -import { sendExecApprovalFollowup } from "./bash-tools.exec-approval-followup.js"; import { buildExecApprovalRequesterContext, buildExecApprovalTurnSourceContext, registerExecApprovalRequestForHostOrThrow, } from "./bash-tools.exec-approval-request.js"; import { - createDefaultExecApprovalRequestContext, - resolveBaseExecApprovalDecision, + buildDefaultExecApprovalRequestArgs, + buildExecApprovalFollowupTarget, + buildExecApprovalPendingToolResult, + createExecApprovalDecisionState, + createAndRegisterDefaultExecApprovalRequest, resolveApprovalDecisionOrUndefined, resolveExecHostApprovalContext, + sendExecApprovalFollowupResult, } from "./bash-tools.exec-host-shared.js"; import { - buildApprovalPendingMessage, DEFAULT_NOTIFY_TAIL_CHARS, createApprovalSlug, normalizeNotifyOutput, @@ -145,83 +141,78 @@ export async function processGatewayAllowlist( } if (requiresAsk) { + const requestArgs = buildDefaultExecApprovalRequestArgs({ + warnings: params.warnings, + approvalRunningNoticeMs: params.approvalRunningNoticeMs, + createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + }); + const registerGatewayApproval = async (approvalId: string) => + await registerExecApprovalRequestForHostOrThrow({ + approvalId, + command: params.command, + workdir: params.workdir, + host: "gateway", + security: hostSecurity, + ask: hostAsk, + ...buildExecApprovalRequesterContext({ + agentId: params.agentId, + sessionKey: params.sessionKey, + }), + resolvedPath: allowlistEval.segments[0]?.resolution?.resolvedPath, + ...buildExecApprovalTurnSourceContext(params), + }); const { approvalId, approvalSlug, warningText, - expiresAtMs: defaultExpiresAtMs, - preResolvedDecision: defaultPreResolvedDecision, - } = createDefaultExecApprovalRequestContext({ - warnings: params.warnings, - approvalRunningNoticeMs: params.approvalRunningNoticeMs, - createApprovalSlug, + expiresAtMs, + preResolvedDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + } = await createAndRegisterDefaultExecApprovalRequest({ + ...requestArgs, + register: registerGatewayApproval, }); const resolvedPath = allowlistEval.segments[0]?.resolution?.resolvedPath; const effectiveTimeout = typeof params.timeoutSec === "number" ? params.timeoutSec : params.defaultTimeoutSec; - let expiresAtMs = defaultExpiresAtMs; - let preResolvedDecision = defaultPreResolvedDecision; - - // Register first so the returned approval ID is actionable immediately. - const registration = await registerExecApprovalRequestForHostOrThrow({ + const followupTarget = buildExecApprovalFollowupTarget({ approvalId, - command: params.command, - workdir: params.workdir, - host: "gateway", - security: hostSecurity, - ask: hostAsk, - ...buildExecApprovalRequesterContext({ - agentId: params.agentId, - sessionKey: params.sessionKey, - }), - resolvedPath, - ...buildExecApprovalTurnSourceContext(params), + sessionKey: params.notifySessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); - expiresAtMs = registration.expiresAtMs; - preResolvedDecision = registration.finalDecision; - const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({ - channel: params.turnSourceChannel, - accountId: params.turnSourceAccountId, - }); - const cfg = loadConfig(); - const sentApproverDms = - (initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") && - hasConfiguredExecApprovalDmRoute(cfg); - const unavailableReason = - preResolvedDecision === null - ? "no-approval-route" - : initiatingSurface.kind === "disabled" - ? "initiating-platform-disabled" - : initiatingSurface.kind === "unsupported" - ? "initiating-platform-unsupported" - : null; void (async () => { const decision = await resolveApprovalDecisionOrUndefined({ approvalId, preResolvedDecision, onFailure: () => - void sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: `Exec denied (gateway id=${approvalId}, approval-request-failed): ${params.command}`, - }), + void sendExecApprovalFollowupResult( + followupTarget, + `Exec denied (gateway id=${approvalId}, approval-request-failed): ${params.command}`, + ), }); if (decision === undefined) { return; } - const baseDecision = resolveBaseExecApprovalDecision({ + const { + baseDecision, + approvedByAsk: initialApprovedByAsk, + deniedReason: initialDeniedReason, + } = createExecApprovalDecisionState({ decision, askFallback, obfuscationDetected: obfuscation.detected, }); - let approvedByAsk = baseDecision.approvedByAsk; - let deniedReason = baseDecision.deniedReason; + let approvedByAsk = initialApprovedByAsk; + let deniedReason = initialDeniedReason; if (baseDecision.timedOut && askFallback === "allowlist") { if (!analysisOk || !allowlistSatisfied) { @@ -253,15 +244,10 @@ export async function processGatewayAllowlist( } if (deniedReason) { - await sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: `Exec denied (gateway id=${approvalId}, ${deniedReason}): ${params.command}`, - }).catch(() => {}); + await sendExecApprovalFollowupResult( + followupTarget, + `Exec denied (gateway id=${approvalId}, ${deniedReason}): ${params.command}`, + ); return; } @@ -287,15 +273,10 @@ export async function processGatewayAllowlist( timeoutSec: effectiveTimeout, }); } catch { - await sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: `Exec denied (gateway id=${approvalId}, spawn-failed): ${params.command}`, - }).catch(() => {}); + await sendExecApprovalFollowupResult( + followupTarget, + `Exec denied (gateway id=${approvalId}, spawn-failed): ${params.command}`, + ); return; } @@ -309,63 +290,22 @@ export async function processGatewayAllowlist( const summary = output ? `Exec finished (gateway id=${approvalId}, session=${run.session.id}, ${exitLabel})\n${output}` : `Exec finished (gateway id=${approvalId}, session=${run.session.id}, ${exitLabel})`; - await sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: summary, - }).catch(() => {}); + await sendExecApprovalFollowupResult(followupTarget, summary); })(); return { - pendingResult: { - content: [ - { - type: "text", - text: - unavailableReason !== null - ? (buildExecApprovalUnavailableReplyPayload({ - warningText, - reason: unavailableReason, - channelLabel: initiatingSurface.channelLabel, - sentApproverDms, - }).text ?? "") - : buildApprovalPendingMessage({ - warningText, - approvalSlug, - approvalId, - command: params.command, - cwd: params.workdir, - host: "gateway", - }), - }, - ], - details: - unavailableReason !== null - ? ({ - status: "approval-unavailable", - reason: unavailableReason, - channelLabel: initiatingSurface.channelLabel, - sentApproverDms, - host: "gateway", - command: params.command, - cwd: params.workdir, - warningText, - } satisfies ExecToolDetails) - : ({ - status: "approval-pending", - approvalId, - approvalSlug, - expiresAtMs, - host: "gateway", - command: params.command, - cwd: params.workdir, - warningText, - } satisfies ExecToolDetails), - }, + pendingResult: buildExecApprovalPendingToolResult({ + host: "gateway", + command: params.command, + cwd: params.workdir, + warningText, + approvalId, + approvalSlug, + expiresAtMs, + initiatingSurface, + sentApproverDms, + unavailableReason, + }), }; } diff --git a/src/agents/bash-tools.exec-host-node.ts b/src/agents/bash-tools.exec-host-node.ts index c3a23197f0a..16af23590b4 100644 --- a/src/agents/bash-tools.exec-host-node.ts +++ b/src/agents/bash-tools.exec-host-node.ts @@ -1,11 +1,5 @@ import crypto from "node:crypto"; import type { AgentToolResult } from "@mariozechner/pi-agent-core"; -import { loadConfig } from "../config/config.js"; -import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js"; -import { - hasConfiguredExecApprovalDmRoute, - resolveExecApprovalInitiatingSurfaceState, -} from "../infra/exec-approval-surface.js"; import { type ExecApprovalsFile, type ExecAsk, @@ -18,20 +12,13 @@ import { detectCommandObfuscation } from "../infra/exec-obfuscation-detect.js"; import { buildNodeShellCommand } from "../infra/node-shell.js"; import { parsePreparedSystemRunPayload } from "../infra/system-run-approval-context.js"; import { logInfo } from "../logger.js"; -import { sendExecApprovalFollowup } from "./bash-tools.exec-approval-followup.js"; import { buildExecApprovalRequesterContext, buildExecApprovalTurnSourceContext, registerExecApprovalRequestForHostOrThrow, } from "./bash-tools.exec-approval-request.js"; +import * as execHostShared from "./bash-tools.exec-host-shared.js"; import { - createDefaultExecApprovalRequestContext, - resolveBaseExecApprovalDecision, - resolveApprovalDecisionOrUndefined, - resolveExecHostApprovalContext, -} from "./bash-tools.exec-host-shared.js"; -import { - buildApprovalPendingMessage, DEFAULT_NOTIFY_TAIL_CHARS, createApprovalSlug, normalizeNotifyOutput, @@ -66,7 +53,7 @@ export type ExecuteNodeHostCommandParams = { export async function executeNodeHostCommand( params: ExecuteNodeHostCommandParams, ): Promise> { - const { hostSecurity, hostAsk, askFallback } = resolveExecHostApprovalContext({ + const { hostSecurity, hostAsk, askFallback } = execHostShared.resolveExecHostApprovalContext({ agentId: params.agentId, security: params.security, ask: params.ask, @@ -221,82 +208,77 @@ export async function executeNodeHostCommand( }) satisfies Record; if (requiresAsk) { + const requestArgs = execHostShared.buildDefaultExecApprovalRequestArgs({ + warnings: params.warnings, + approvalRunningNoticeMs: params.approvalRunningNoticeMs, + createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + }); + const registerNodeApproval = async (approvalId: string) => + await registerExecApprovalRequestForHostOrThrow({ + approvalId, + systemRunPlan: prepared.plan, + env: nodeEnv, + workdir: runCwd, + host: "node", + nodeId, + security: hostSecurity, + ask: hostAsk, + ...buildExecApprovalRequesterContext({ + agentId: runAgentId, + sessionKey: runSessionKey, + }), + ...buildExecApprovalTurnSourceContext(params), + }); const { approvalId, approvalSlug, warningText, - expiresAtMs: defaultExpiresAtMs, - preResolvedDecision: defaultPreResolvedDecision, - } = createDefaultExecApprovalRequestContext({ - warnings: params.warnings, - approvalRunningNoticeMs: params.approvalRunningNoticeMs, - createApprovalSlug, + expiresAtMs, + preResolvedDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + } = await execHostShared.createAndRegisterDefaultExecApprovalRequest({ + ...requestArgs, + register: registerNodeApproval, }); - let expiresAtMs = defaultExpiresAtMs; - let preResolvedDecision = defaultPreResolvedDecision; - - // Register first so the returned approval ID is actionable immediately. - const registration = await registerExecApprovalRequestForHostOrThrow({ + const followupTarget = execHostShared.buildExecApprovalFollowupTarget({ approvalId, - systemRunPlan: prepared.plan, - env: nodeEnv, - workdir: runCwd, - host: "node", - nodeId, - security: hostSecurity, - ask: hostAsk, - ...buildExecApprovalRequesterContext({ - agentId: runAgentId, - sessionKey: runSessionKey, - }), - ...buildExecApprovalTurnSourceContext(params), + sessionKey: params.notifySessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); - expiresAtMs = registration.expiresAtMs; - preResolvedDecision = registration.finalDecision; - const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({ - channel: params.turnSourceChannel, - accountId: params.turnSourceAccountId, - }); - const cfg = loadConfig(); - const sentApproverDms = - (initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") && - hasConfiguredExecApprovalDmRoute(cfg); - const unavailableReason = - preResolvedDecision === null - ? "no-approval-route" - : initiatingSurface.kind === "disabled" - ? "initiating-platform-disabled" - : initiatingSurface.kind === "unsupported" - ? "initiating-platform-unsupported" - : null; void (async () => { - const decision = await resolveApprovalDecisionOrUndefined({ + const decision = await execHostShared.resolveApprovalDecisionOrUndefined({ approvalId, preResolvedDecision, onFailure: () => - void sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: `Exec denied (node=${nodeId} id=${approvalId}, approval-request-failed): ${params.command}`, - }), + void execHostShared.sendExecApprovalFollowupResult( + followupTarget, + `Exec denied (node=${nodeId} id=${approvalId}, approval-request-failed): ${params.command}`, + ), }); if (decision === undefined) { return; } - const baseDecision = resolveBaseExecApprovalDecision({ + const { + baseDecision, + approvedByAsk: initialApprovedByAsk, + deniedReason: initialDeniedReason, + } = execHostShared.createExecApprovalDecisionState({ decision, askFallback, obfuscationDetected: obfuscation.detected, }); - let approvedByAsk = baseDecision.approvedByAsk; + let approvedByAsk = initialApprovedByAsk; let approvalDecision: "allow-once" | "allow-always" | null = null; - let deniedReason = baseDecision.deniedReason; + let deniedReason = initialDeniedReason; if (baseDecision.timedOut && askFallback === "full" && approvedByAsk) { approvalDecision = "allow-once"; @@ -309,15 +291,10 @@ export async function executeNodeHostCommand( } if (deniedReason) { - await sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: `Exec denied (node=${nodeId} id=${approvalId}, ${deniedReason}): ${params.command}`, - }).catch(() => {}); + await execHostShared.sendExecApprovalFollowupResult( + followupTarget, + `Exec denied (node=${nodeId} id=${approvalId}, ${deniedReason}): ${params.command}`, + ); return; } @@ -351,76 +328,28 @@ export async function executeNodeHostCommand( const summary = output ? `Exec finished (node=${nodeId} id=${approvalId}, ${exitLabel})\n${output}` : `Exec finished (node=${nodeId} id=${approvalId}, ${exitLabel})`; - await sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: summary, - }).catch(() => {}); + await execHostShared.sendExecApprovalFollowupResult(followupTarget, summary); } catch { - await sendExecApprovalFollowup({ - approvalId, - sessionKey: params.notifySessionKey, - turnSourceChannel: params.turnSourceChannel, - turnSourceTo: params.turnSourceTo, - turnSourceAccountId: params.turnSourceAccountId, - turnSourceThreadId: params.turnSourceThreadId, - resultText: `Exec denied (node=${nodeId} id=${approvalId}, invoke-failed): ${params.command}`, - }).catch(() => {}); + await execHostShared.sendExecApprovalFollowupResult( + followupTarget, + `Exec denied (node=${nodeId} id=${approvalId}, invoke-failed): ${params.command}`, + ); } })(); - return { - content: [ - { - type: "text", - text: - unavailableReason !== null - ? (buildExecApprovalUnavailableReplyPayload({ - warningText, - reason: unavailableReason, - channelLabel: initiatingSurface.channelLabel, - sentApproverDms, - }).text ?? "") - : buildApprovalPendingMessage({ - warningText, - approvalSlug, - approvalId, - command: prepared.plan.commandText, - cwd: runCwd, - host: "node", - nodeId, - }), - }, - ], - details: - unavailableReason !== null - ? ({ - status: "approval-unavailable", - reason: unavailableReason, - channelLabel: initiatingSurface.channelLabel, - sentApproverDms, - host: "node", - command: params.command, - cwd: params.workdir, - nodeId, - warningText, - } satisfies ExecToolDetails) - : ({ - status: "approval-pending", - approvalId, - approvalSlug, - expiresAtMs, - host: "node", - command: params.command, - cwd: params.workdir, - nodeId, - warningText, - } satisfies ExecToolDetails), - }; + return execHostShared.buildExecApprovalPendingToolResult({ + host: "node", + command: params.command, + cwd: params.workdir, + warningText, + approvalId, + approvalSlug, + expiresAtMs, + initiatingSurface, + sentApproverDms, + unavailableReason, + nodeId, + }); } const startedAt = Date.now(); diff --git a/src/agents/bash-tools.exec-host-shared.ts b/src/agents/bash-tools.exec-host-shared.ts index c24e0a2f1fa..a9adaff17ee 100644 --- a/src/agents/bash-tools.exec-host-shared.ts +++ b/src/agents/bash-tools.exec-host-shared.ts @@ -1,4 +1,12 @@ import crypto from "node:crypto"; +import type { AgentToolResult } from "@mariozechner/pi-agent-core"; +import { loadConfig } from "../config/config.js"; +import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js"; +import { + hasConfiguredExecApprovalDmRoute, + type ExecApprovalInitiatingSurfaceState, + resolveExecApprovalInitiatingSurfaceState, +} from "../infra/exec-approval-surface.js"; import { maxAsk, minSecurity, @@ -6,8 +14,14 @@ import { type ExecAsk, type ExecSecurity, } from "../infra/exec-approvals.js"; -import { resolveRegisteredExecApprovalDecision } from "./bash-tools.exec-approval-request.js"; +import { sendExecApprovalFollowup } from "./bash-tools.exec-approval-followup.js"; +import { + type ExecApprovalRegistration, + resolveRegisteredExecApprovalDecision, +} from "./bash-tools.exec-approval-request.js"; +import { buildApprovalPendingMessage } from "./bash-tools.exec-runtime.js"; import { DEFAULT_APPROVAL_TIMEOUT_MS } from "./bash-tools.exec-runtime.js"; +import type { ExecToolDetails } from "./bash-tools.exec-types.js"; type ResolvedExecApprovals = ReturnType; @@ -28,6 +42,39 @@ export type ExecApprovalRequestState = ExecApprovalPendingState & { noticeSeconds: number; }; +export type ExecApprovalUnavailableReason = + | "no-approval-route" + | "initiating-platform-disabled" + | "initiating-platform-unsupported"; + +export type RegisteredExecApprovalRequestContext = { + approvalId: string; + approvalSlug: string; + warningText: string; + expiresAtMs: number; + preResolvedDecision: string | null | undefined; + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; +}; + +export type ExecApprovalFollowupTarget = { + approvalId: string; + sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; +}; + +export type DefaultExecApprovalRequestArgs = { + warnings: string[]; + approvalRunningNoticeMs: number; + createApprovalSlug: (approvalId: string) => string; + turnSourceChannel?: string; + turnSourceAccountId?: string; +}; + export function createExecApprovalPendingState(params: { warnings: string[]; timeoutMs: number; @@ -158,3 +205,197 @@ export async function resolveApprovalDecisionOrUndefined(params: { return undefined; } } + +export function resolveExecApprovalUnavailableState(params: { + turnSourceChannel?: string; + turnSourceAccountId?: string; + preResolvedDecision: string | null | undefined; +}): { + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; +} { + const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({ + channel: params.turnSourceChannel, + accountId: params.turnSourceAccountId, + }); + const sentApproverDms = + (initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") && + hasConfiguredExecApprovalDmRoute(loadConfig()); + const unavailableReason = + params.preResolvedDecision === null + ? "no-approval-route" + : initiatingSurface.kind === "disabled" + ? "initiating-platform-disabled" + : initiatingSurface.kind === "unsupported" + ? "initiating-platform-unsupported" + : null; + return { + initiatingSurface, + sentApproverDms, + unavailableReason, + }; +} + +export async function createAndRegisterDefaultExecApprovalRequest(params: { + warnings: string[]; + approvalRunningNoticeMs: number; + createApprovalSlug: (approvalId: string) => string; + turnSourceChannel?: string; + turnSourceAccountId?: string; + register: (approvalId: string) => Promise; +}): Promise { + const { + approvalId, + approvalSlug, + warningText, + expiresAtMs: defaultExpiresAtMs, + preResolvedDecision: defaultPreResolvedDecision, + } = createDefaultExecApprovalRequestContext({ + warnings: params.warnings, + approvalRunningNoticeMs: params.approvalRunningNoticeMs, + createApprovalSlug: params.createApprovalSlug, + }); + const registration = await params.register(approvalId); + const preResolvedDecision = registration.finalDecision; + const { initiatingSurface, sentApproverDms, unavailableReason } = + resolveExecApprovalUnavailableState({ + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + preResolvedDecision, + }); + + return { + approvalId, + approvalSlug, + warningText, + expiresAtMs: registration.expiresAtMs ?? defaultExpiresAtMs, + preResolvedDecision: + registration.finalDecision === undefined + ? defaultPreResolvedDecision + : registration.finalDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + }; +} + +export function buildDefaultExecApprovalRequestArgs( + params: DefaultExecApprovalRequestArgs, +): DefaultExecApprovalRequestArgs { + return { + warnings: params.warnings, + approvalRunningNoticeMs: params.approvalRunningNoticeMs, + createApprovalSlug: params.createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + }; +} + +export function buildExecApprovalFollowupTarget( + params: ExecApprovalFollowupTarget, +): ExecApprovalFollowupTarget { + return { + approvalId: params.approvalId, + sessionKey: params.sessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, + }; +} + +export function createExecApprovalDecisionState(params: { + decision: string | null | undefined; + askFallback: ResolvedExecApprovals["agent"]["askFallback"]; + obfuscationDetected: boolean; +}) { + const baseDecision = resolveBaseExecApprovalDecision({ + decision: params.decision ?? null, + askFallback: params.askFallback, + obfuscationDetected: params.obfuscationDetected, + }); + return { + baseDecision, + approvedByAsk: baseDecision.approvedByAsk, + deniedReason: baseDecision.deniedReason, + }; +} + +export async function sendExecApprovalFollowupResult( + target: ExecApprovalFollowupTarget, + resultText: string, +): Promise { + await sendExecApprovalFollowup({ + approvalId: target.approvalId, + sessionKey: target.sessionKey, + turnSourceChannel: target.turnSourceChannel, + turnSourceTo: target.turnSourceTo, + turnSourceAccountId: target.turnSourceAccountId, + turnSourceThreadId: target.turnSourceThreadId, + resultText, + }).catch(() => {}); +} + +export function buildExecApprovalPendingToolResult(params: { + host: "gateway" | "node"; + command: string; + cwd: string; + warningText: string; + approvalId: string; + approvalSlug: string; + expiresAtMs: number; + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; + nodeId?: string; +}): AgentToolResult { + return { + content: [ + { + type: "text", + text: + params.unavailableReason !== null + ? (buildExecApprovalUnavailableReplyPayload({ + warningText: params.warningText, + reason: params.unavailableReason, + channelLabel: params.initiatingSurface.channelLabel, + sentApproverDms: params.sentApproverDms, + }).text ?? "") + : buildApprovalPendingMessage({ + warningText: params.warningText, + approvalSlug: params.approvalSlug, + approvalId: params.approvalId, + command: params.command, + cwd: params.cwd, + host: params.host, + nodeId: params.nodeId, + }), + }, + ], + details: + params.unavailableReason !== null + ? ({ + status: "approval-unavailable", + reason: params.unavailableReason, + channelLabel: params.initiatingSurface.channelLabel, + sentApproverDms: params.sentApproverDms, + host: params.host, + command: params.command, + cwd: params.cwd, + nodeId: params.nodeId, + warningText: params.warningText, + } satisfies ExecToolDetails) + : ({ + status: "approval-pending", + approvalId: params.approvalId, + approvalSlug: params.approvalSlug, + expiresAtMs: params.expiresAtMs, + host: params.host, + command: params.command, + cwd: params.cwd, + nodeId: params.nodeId, + warningText: params.warningText, + } satisfies ExecToolDetails), + }; +} diff --git a/src/agents/bash-tools.exec.approval-id.test.ts b/src/agents/bash-tools.exec.approval-id.test.ts index cc94f83d665..211d8e3dcaa 100644 --- a/src/agents/bash-tools.exec.approval-id.test.ts +++ b/src/agents/bash-tools.exec.approval-id.test.ts @@ -43,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) { return buildSystemRunPreparePayload(params); } +function getTestConfigPath() { + return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); +} + +async function writeOpenClawConfig(config: Record, pretty = false) { + const configPath = getTestConfigPath(); + await fs.mkdir(path.dirname(configPath), { recursive: true }); + await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined)); +} + +async function writeExecApprovalsConfig(config: Record) { + const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); + await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); + await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2)); +} + +function acceptedApprovalResponse(params: unknown) { + return { status: "accepted", id: (params as { id?: string })?.id }; +} + +function getResultText(result: { content: Array<{ type?: string; text?: string }> }) { + return result.content.find((part) => part.type === "text")?.text ?? ""; +} + +function expectPendingApprovalText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + options: { + command: string; + host: "gateway" | "node"; + nodeId?: string; + interactive?: boolean; + }, +) { + expect(result.details.status).toBe("approval-pending"); + const details = result.details as { approvalId: string; approvalSlug: string }; + const pendingText = getResultText(result); + expect(pendingText).toContain( + `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, + ); + expect(pendingText).toContain(`full ${details.approvalId}`); + expect(pendingText).toContain(`Host: ${options.host}`); + if (options.nodeId) { + expect(pendingText).toContain(`Node: ${options.nodeId}`); + } + expect(pendingText).toContain(`CWD: ${process.cwd()}`); + expect(pendingText).toContain("Command:\n```sh\n"); + expect(pendingText).toContain(options.command); + if (options.interactive) { + expect(pendingText).toContain("Mode: foreground (interactive approvals available)."); + expect(pendingText).toContain("Background mode requires pre-approved policy"); + } + return details; +} + +function expectPendingCommandText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + command: string, +) { + expect(result.details.status).toBe("approval-pending"); + const text = getResultText(result); + expect(text).toContain("Command:\n```sh\n"); + expect(text).toContain(command); +} + +function mockGatewayOkCalls(calls: string[]) { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + calls.push(method); + return { ok: true }; + }); +} + +function createElevatedAllowlistExecTool() { + return createExecTool({ + ask: "on-miss", + security: "allowlist", + approvalRunningNoticeMs: 0, + elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, + }); +} + +async function expectGatewayExecWithoutApproval(options: { + config: Record; + command: string; + ask?: "always" | "on-miss" | "off"; +}) { + await writeExecApprovalsConfig(options.config); + const calls: string[] = []; + mockGatewayOkCalls(calls); + + const tool = createExecTool({ + host: "gateway", + ask: options.ask, + security: "full", + approvalRunningNoticeMs: 0, + }); + + const result = await tool.execute("call-no-approval", { command: options.command }); + expect(result.details.status).toBe("completed"); + expect(calls).not.toContain("exec.approval.request"); + expect(calls).not.toContain("exec.approval.waitDecision"); +} + +function mockAcceptedApprovalFlow(options: { + onAgent?: (params: Record) => void; + onNodeInvoke?: (params: unknown) => unknown; +}) { + vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { + if (method === "exec.approval.request") { + return acceptedApprovalResponse(params); + } + if (method === "exec.approval.waitDecision") { + return { decision: "allow-once" }; + } + if (method === "agent" && options.onAgent) { + options.onAgent(params as Record); + return { status: "ok" }; + } + if (method === "node.invoke" && options.onNodeInvoke) { + return await options.onNodeInvoke(params); + } + return { ok: true }; + }); +} + +function mockPendingApprovalRegistration() { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + if (method === "exec.approval.request") { + return { status: "accepted", id: "approval-id" }; + } + if (method === "exec.approval.waitDecision") { + return { decision: null }; + } + return { ok: true }; + }); +} + +function expectApprovalUnavailableText(result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; +}) { + expect(result.details.status).toBe("approval-unavailable"); + const text = result.content.find((part) => part.type === "text")?.text ?? ""; + expect(text).not.toContain("/approve"); + expect(text).not.toContain("npm view diver name version description"); + expect(text).not.toContain("Pending command:"); + expect(text).not.toContain("Host:"); + expect(text).not.toContain("CWD:"); + return text; +} + describe("exec approvals", () => { let previousHome: string | undefined; let previousUserProfile: string | undefined; @@ -81,18 +237,11 @@ describe("exec approvals", () => { let invokeParams: unknown; let agentParams: unknown; - vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; - } - if (method === "exec.approval.waitDecision") { - return { decision: "allow-once" }; - } - if (method === "agent") { + mockAcceptedApprovalFlow({ + onAgent: (params) => { agentParams = params; - return { status: "ok" }; - } - if (method === "node.invoke") { + }, + onNodeInvoke: (params) => { const invoke = params as { command?: string }; if (invoke.command === "system.run.prepare") { return buildPreparedSystemRunPayload(params); @@ -101,8 +250,7 @@ describe("exec approvals", () => { invokeParams = params; return { payload: { success: true, stdout: "ok" } }; } - } - return { ok: true }; + }, }); const tool = createExecTool({ @@ -113,19 +261,12 @@ describe("exec approvals", () => { }); const result = await tool.execute("call1", { command: "ls -la" }); - expect(result.details.status).toBe("approval-pending"); - const details = result.details as { approvalId: string; approvalSlug: string }; - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, - ); - expect(pendingText).toContain(`full ${details.approvalId}`); - expect(pendingText).toContain("Host: node"); - expect(pendingText).toContain("Node: node-1"); - expect(pendingText).toContain(`CWD: ${process.cwd()}`); - expect(pendingText).toContain("Command:\n```sh\nls -la\n```"); - expect(pendingText).toContain("Mode: foreground (interactive approvals available)."); - expect(pendingText).toContain("Background mode requires pre-approved policy"); + const details = expectPendingApprovalText(result, { + command: "ls -la", + host: "node", + nodeId: "node-1", + interactive: true, + }); const approvalId = details.approvalId; await expect @@ -214,74 +355,28 @@ describe("exec approvals", () => { }); it("uses exec-approvals ask=off to suppress gateway prompts", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: { - main: { security: "full", ask: "off", askFallback: "full" }, - }, + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: { + main: { security: "full", ask: "off", askFallback: "full" }, }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; - }); - - const tool = createExecTool({ - host: "gateway", + }, + command: "echo ok", ask: "on-miss", - security: "full", - approvalRunningNoticeMs: 0, }); - - const result = await tool.execute("call3b", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: {}, - }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: {}, + }, + command: "echo ok", }); - - const tool = createExecTool({ - host: "gateway", - security: "full", - approvalRunningNoticeMs: 0, - }); - - const result = await tool.execute("call3c", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("requires approval for elevated ask when allowlist misses", async () => { @@ -296,7 +391,7 @@ describe("exec approvals", () => { if (method === "exec.approval.request") { resolveApproval?.(); // Return registration confirmation - return { status: "accepted", id: (params as { id?: string })?.id }; + return acceptedApprovalResponse(params); } if (method === "exec.approval.waitDecision") { return { decision: "deny" }; @@ -304,24 +399,10 @@ describe("exec approvals", () => { return { ok: true }; }); - const tool = createExecTool({ - ask: "on-miss", - security: "allowlist", - approvalRunningNoticeMs: 0, - elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, - }); + const tool = createElevatedAllowlistExecTool(); const result = await tool.execute("call4", { command: "echo ok", elevated: true }); - expect(result.details.status).toBe("approval-pending"); - const details = result.details as { approvalId: string; approvalSlug: string }; - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, - ); - expect(pendingText).toContain(`full ${details.approvalId}`); - expect(pendingText).toContain("Host: gateway"); - expect(pendingText).toContain(`CWD: ${process.cwd()}`); - expect(pendingText).toContain("Command:\n```sh\necho ok\n```"); + expectPendingApprovalText(result, { command: "echo ok", host: "gateway" }); await approvalSeen; expect(calls).toContain("exec.approval.request"); expect(calls).toContain("exec.approval.waitDecision"); @@ -330,18 +411,10 @@ describe("exec approvals", () => { it("starts a direct agent follow-up after approved gateway exec completes", async () => { const agentCalls: Array> = []; - vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; - } - if (method === "exec.approval.waitDecision") { - return { decision: "allow-once" }; - } - if (method === "agent") { - agentCalls.push(params as Record); - return { status: "ok" }; - } - return { ok: true }; + mockAcceptedApprovalFlow({ + onAgent: (params) => { + agentCalls.push(params); + }, }); const tool = createExecTool({ @@ -388,7 +461,7 @@ describe("exec approvals", () => { if (typeof request.id === "string") { requestIds.push(request.id); } - return { status: "accepted", id: request.id }; + return acceptedApprovalResponse(request); } if (method === "exec.approval.waitDecision") { const wait = params as { id?: string }; @@ -400,12 +473,7 @@ describe("exec approvals", () => { return { ok: true }; }); - const tool = createExecTool({ - ask: "on-miss", - security: "allowlist", - approvalRunningNoticeMs: 0, - elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, - }); + const tool = createElevatedAllowlistExecTool(); const first = await tool.execute("call-seq-1", { command: "npm view diver --json", @@ -429,7 +497,7 @@ describe("exec approvals", () => { vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { calls.push(method); if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; + return acceptedApprovalResponse(params); } if (method === "exec.approval.waitDecision") { return { decision: "deny" }; @@ -448,11 +516,7 @@ describe("exec approvals", () => { command: "npm view diver --json | jq .name && brew outdated", }); - expect(result.details.status).toBe("approval-pending"); - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - "Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```", - ); + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); expect(calls).toContain("exec.approval.request"); }); @@ -480,11 +544,7 @@ describe("exec approvals", () => { command: "npm view diver --json | jq .name && brew outdated", }); - expect(result.details.status).toBe("approval-pending"); - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - "Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```", - ); + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); expect(calls).toContain("exec.approval.request"); }); @@ -551,30 +611,17 @@ describe("exec approvals", () => { }); it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => { - const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); - await fs.mkdir(path.dirname(configPath), { recursive: true }); - await fs.writeFile( - configPath, - JSON.stringify({ - channels: { - discord: { - enabled: true, - execApprovals: { enabled: false }, - }, + await writeOpenClawConfig({ + channels: { + discord: { + enabled: true, + execApprovals: { enabled: false }, }, - }), - ); - - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: "approval-id" }; - } - if (method === "exec.approval.waitDecision") { - return { decision: null }; - } - return { ok: true }; + }, }); + mockPendingApprovalRegistration(); + const tool = createExecTool({ host: "gateway", ask: "always", @@ -588,49 +635,29 @@ describe("exec approvals", () => { command: "npm view diver name version description", }); - expect(result.details.status).toBe("approval-unavailable"); - const text = result.content.find((part) => part.type === "text")?.text ?? ""; + const text = expectApprovalUnavailableText(result); expect(text).toContain("chat exec approvals are not enabled on Discord"); expect(text).toContain("Web UI or terminal UI"); - expect(text).not.toContain("/approve"); - expect(text).not.toContain("npm view diver name version description"); - expect(text).not.toContain("Pending command:"); - expect(text).not.toContain("Host:"); - expect(text).not.toContain("CWD:"); }); it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => { - const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); - await fs.mkdir(path.dirname(configPath), { recursive: true }); - await fs.writeFile( - configPath, - JSON.stringify( - { - channels: { - telegram: { - enabled: true, - execApprovals: { enabled: false }, - }, - discord: { - enabled: true, - execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, - }, + await writeOpenClawConfig( + { + channels: { + telegram: { + enabled: true, + execApprovals: { enabled: false }, + }, + discord: { + enabled: true, + execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, }, }, - null, - 2, - ), + }, + true, ); - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: "approval-id" }; - } - if (method === "exec.approval.waitDecision") { - return { decision: null }; - } - return { ok: true }; - }); + mockPendingApprovalRegistration(); const tool = createExecTool({ host: "gateway", @@ -645,14 +672,8 @@ describe("exec approvals", () => { command: "npm view diver name version description", }); - expect(result.details.status).toBe("approval-unavailable"); - const text = result.content.find((part) => part.type === "text")?.text ?? ""; + const text = expectApprovalUnavailableText(result); expect(text).toContain("Approval required. I sent the allowed approvers DMs."); - expect(text).not.toContain("/approve"); - expect(text).not.toContain("npm view diver name version description"); - expect(text).not.toContain("Pending command:"); - expect(text).not.toContain("Host:"); - expect(text).not.toContain("CWD:"); }); it("denies node obfuscated command when approval request times out", async () => { diff --git a/src/agents/context.lookup.test.ts b/src/agents/context.lookup.test.ts index 428d47759bc..a395f0b3089 100644 --- a/src/agents/context.lookup.test.ts +++ b/src/agents/context.lookup.test.ts @@ -1,8 +1,13 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; -function mockContextModuleDeps(loadConfigImpl: () => unknown) { +type DiscoveredModel = { id: string; contextWindow: number }; + +function mockContextDeps(params: { + loadConfig: () => unknown; + discoveredModels?: DiscoveredModel[]; +}) { vi.doMock("../config/config.js", () => ({ - loadConfig: loadConfigImpl, + loadConfig: params.loadConfig, })); vi.doMock("./models-config.js", () => ({ ensureOpenClawModelsJson: vi.fn(async () => {}), @@ -13,29 +18,42 @@ function mockContextModuleDeps(loadConfigImpl: () => unknown) { vi.doMock("./pi-model-discovery.js", () => ({ discoverAuthStorage: vi.fn(() => ({})), discoverModels: vi.fn(() => ({ - getAll: () => [], + getAll: () => params.discoveredModels ?? [], })), })); } +function mockContextModuleDeps(loadConfigImpl: () => unknown) { + mockContextDeps({ loadConfig: loadConfigImpl }); +} + // Shared mock setup used by multiple tests. function mockDiscoveryDeps( - models: Array<{ id: string; contextWindow: number }>, + models: DiscoveredModel[], configModels?: Record }>, ) { - vi.doMock("../config/config.js", () => ({ + mockContextDeps({ loadConfig: () => ({ models: configModels ? { providers: configModels } : {} }), - })); - vi.doMock("./models-config.js", () => ({ - ensureOpenClawModelsJson: vi.fn(async () => {}), - })); - vi.doMock("./agent-paths.js", () => ({ - resolveOpenClawAgentDir: () => "/tmp/openclaw-agent", - })); - vi.doMock("./pi-model-discovery.js", () => ({ - discoverAuthStorage: vi.fn(() => ({})), - discoverModels: vi.fn(() => ({ getAll: () => models })), - })); + discoveredModels: models, + }); +} + +function createContextOverrideConfig(provider: string, model: string, contextWindow: number) { + return { + models: { + providers: { + [provider]: { + models: [{ id: model, contextWindow }], + }, + }, + }, + }; +} + +async function importResolveContextTokensForModel() { + const { resolveContextTokensForModel } = await import("./context.js"); + await new Promise((r) => setTimeout(r, 0)); + return resolveContextTokensForModel; } describe("lookupContextTokens", () => { @@ -150,18 +168,8 @@ describe("lookupContextTokens", () => { { id: "google-gemini-cli/gemini-3.1-pro-preview", contextWindow: 1_048_576 }, ]); - const cfg = { - models: { - providers: { - "google-gemini-cli": { - models: [{ id: "gemini-3.1-pro-preview", contextWindow: 200_000 }], - }, - }, - }, - }; - - const { resolveContextTokensForModel } = await import("./context.js"); - await new Promise((r) => setTimeout(r, 0)); + const cfg = createContextOverrideConfig("google-gemini-cli", "gemini-3.1-pro-preview", 200_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); const result = resolveContextTokensForModel({ cfg: cfg as never, @@ -174,18 +182,8 @@ describe("lookupContextTokens", () => { it("resolveContextTokensForModel honors configured overrides when provider keys use mixed case", async () => { mockDiscoveryDeps([{ id: "openrouter/anthropic/claude-sonnet-4-5", contextWindow: 1_048_576 }]); - const cfg = { - models: { - providers: { - " OpenRouter ": { - models: [{ id: "anthropic/claude-sonnet-4-5", contextWindow: 200_000 }], - }, - }, - }, - }; - - const { resolveContextTokensForModel } = await import("./context.js"); - await new Promise((r) => setTimeout(r, 0)); + const cfg = createContextOverrideConfig(" OpenRouter ", "anthropic/claude-sonnet-4-5", 200_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); const result = resolveContextTokensForModel({ cfg: cfg as never, @@ -202,16 +200,8 @@ describe("lookupContextTokens", () => { // Real callers (status.summary.ts) always pass cfg when provider is explicit. mockDiscoveryDeps([{ id: "google/gemini-2.5-pro", contextWindow: 999_000 }]); - const cfg = { - models: { - providers: { - google: { models: [{ id: "gemini-2.5-pro", contextWindow: 2_000_000 }] }, - }, - }, - }; - - const { resolveContextTokensForModel } = await import("./context.js"); - await new Promise((r) => setTimeout(r, 0)); + const cfg = createContextOverrideConfig("google", "gemini-2.5-pro", 2_000_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); // Google with explicit cfg: config direct scan wins before any cache lookup. const googleResult = resolveContextTokensForModel({ @@ -272,16 +262,8 @@ describe("lookupContextTokens", () => { // window and misreport context limits for the OpenRouter session. mockDiscoveryDeps([{ id: "google/gemini-2.5-pro", contextWindow: 999_000 }]); - const cfg = { - models: { - providers: { - google: { models: [{ id: "gemini-2.5-pro", contextWindow: 2_000_000 }] }, - }, - }, - }; - - const { resolveContextTokensForModel } = await import("./context.js"); - await new Promise((r) => setTimeout(r, 0)); + const cfg = createContextOverrideConfig("google", "gemini-2.5-pro", 2_000_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); // model-only call (no explicit provider) must NOT apply config direct scan. // Falls through to bare cache lookup: "google/gemini-2.5-pro" → 999k ✓. diff --git a/src/agents/memory-search.test.ts b/src/agents/memory-search.test.ts index 8b1b4bc3494..feb0054b302 100644 --- a/src/agents/memory-search.test.ts +++ b/src/agents/memory-search.test.ts @@ -29,6 +29,56 @@ describe("memory search config", () => { }); } + function expectEmptyMultimodalConfig(resolved: ReturnType) { + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: [], + maxFileBytes: 10 * 1024 * 1024, + }); + } + + function configWithRemoteDefaults(remote: Record) { + return asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + remote, + }, + }, + list: [ + { + id: "main", + default: true, + memorySearch: { + remote: { + baseUrl: "https://agent.example/v1", + }, + }, + }, + ], + }, + }); + } + + function expectMergedRemoteConfig( + resolved: ReturnType, + apiKey: unknown, + ) { + expect(resolved?.remote).toEqual({ + baseUrl: "https://agent.example/v1", + apiKey, + headers: { "X-Default": "on" }, + batch: { + enabled: false, + wait: true, + concurrency: 2, + pollIntervalMs: 2000, + timeoutMinutes: 60, + }, + }); + } + it("returns null when disabled", () => { const cfg = asConfig({ agents: { @@ -171,11 +221,7 @@ describe("memory search config", () => { }, }); const resolved = resolveMemorySearchConfig(cfg, "main"); - expect(resolved?.multimodal).toEqual({ - enabled: true, - modalities: [], - maxFileBytes: 10 * 1024 * 1024, - }); + expectEmptyMultimodalConfig(resolved); expect(resolved?.provider).toBe("gemini"); }); @@ -196,11 +242,7 @@ describe("memory search config", () => { }, }); const resolved = resolveMemorySearchConfig(cfg, "main"); - expect(resolved?.multimodal).toEqual({ - enabled: true, - modalities: [], - maxFileBytes: 10 * 1024 * 1024, - }); + expectEmptyMultimodalConfig(resolved); }); it("rejects multimodal memory on unsupported providers", () => { @@ -289,85 +331,27 @@ describe("memory search config", () => { }); it("merges remote defaults with agent overrides", () => { - const cfg = asConfig({ - agents: { - defaults: { - memorySearch: { - provider: "openai", - remote: { - baseUrl: "https://default.example/v1", - apiKey: "default-key", // pragma: allowlist secret - headers: { "X-Default": "on" }, - }, - }, - }, - list: [ - { - id: "main", - default: true, - memorySearch: { - remote: { - baseUrl: "https://agent.example/v1", - }, - }, - }, - ], - }, - }); - const resolved = resolveMemorySearchConfig(cfg, "main"); - expect(resolved?.remote).toEqual({ - baseUrl: "https://agent.example/v1", + const cfg = configWithRemoteDefaults({ + baseUrl: "https://default.example/v1", apiKey: "default-key", // pragma: allowlist secret headers: { "X-Default": "on" }, - batch: { - enabled: false, - wait: true, - concurrency: 2, - pollIntervalMs: 2000, - timeoutMinutes: 60, - }, }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expectMergedRemoteConfig(resolved, "default-key"); // pragma: allowlist secret }); it("preserves SecretRef remote apiKey when merging defaults with agent overrides", () => { - const cfg = asConfig({ - agents: { - defaults: { - memorySearch: { - provider: "openai", - remote: { - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - headers: { "X-Default": "on" }, - }, - }, - }, - list: [ - { - id: "main", - default: true, - memorySearch: { - remote: { - baseUrl: "https://agent.example/v1", - }, - }, - }, - ], - }, + const cfg = configWithRemoteDefaults({ + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + headers: { "X-Default": "on" }, }); const resolved = resolveMemorySearchConfig(cfg, "main"); - expect(resolved?.remote).toEqual({ - baseUrl: "https://agent.example/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, - headers: { "X-Default": "on" }, - batch: { - enabled: false, - wait: true, - concurrency: 2, - pollIntervalMs: 2000, - timeoutMinutes: 60, - }, + expectMergedRemoteConfig(resolved, { + source: "env", + provider: "default", + id: "OPENAI_API_KEY", }); }); diff --git a/src/agents/model-auth-markers.ts b/src/agents/model-auth-markers.ts index e888f06d0c5..8a890d3a694 100644 --- a/src/agents/model-auth-markers.ts +++ b/src/agents/model-auth-markers.ts @@ -4,6 +4,7 @@ import { listKnownProviderEnvApiKeyNames } from "./model-auth-env-vars.js"; export const MINIMAX_OAUTH_MARKER = "minimax-oauth"; export const QWEN_OAUTH_MARKER = "qwen-oauth"; export const OLLAMA_LOCAL_AUTH_MARKER = "ollama-local"; +export const CUSTOM_LOCAL_AUTH_MARKER = "custom-local"; export const NON_ENV_SECRETREF_MARKER = "secretref-managed"; // pragma: allowlist secret export const SECRETREF_ENV_HEADER_MARKER_PREFIX = "secretref-env:"; // pragma: allowlist secret @@ -71,6 +72,7 @@ export function isNonSecretApiKeyMarker( trimmed === MINIMAX_OAUTH_MARKER || trimmed === QWEN_OAUTH_MARKER || trimmed === OLLAMA_LOCAL_AUTH_MARKER || + trimmed === CUSTOM_LOCAL_AUTH_MARKER || trimmed === NON_ENV_SECRETREF_MARKER || isAwsSdkAuthMarker(trimmed); if (isKnownMarker) { diff --git a/src/agents/model-auth.test.ts b/src/agents/model-auth.test.ts index 2deaeb7dbf6..de8f0f1b752 100644 --- a/src/agents/model-auth.test.ts +++ b/src/agents/model-auth.test.ts @@ -1,9 +1,12 @@ -import { describe, expect, it } from "vitest"; +import { streamSimpleOpenAICompletions, type Model } from "@mariozechner/pi-ai"; +import { afterEach, describe, expect, it, vi } from "vitest"; import type { AuthProfileStore } from "./auth-profiles.js"; -import { NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; +import { CUSTOM_LOCAL_AUTH_MARKER, NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; import { + applyLocalNoAuthHeaderOverride, hasUsableCustomProviderApiKey, requireApiKey, + resolveApiKeyForProvider, resolveAwsSdkEnvVarName, resolveModelAuthMode, resolveUsableCustomProviderApiKey, @@ -223,3 +226,334 @@ describe("resolveUsableCustomProviderApiKey", () => { } }); }); + +describe("resolveApiKeyForProvider – synthetic local auth for custom providers", () => { + it("synthesizes a local auth marker for custom providers with a local baseUrl and no apiKey", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "custom-127-0-0-1-8080", + cfg: { + models: { + providers: { + "custom-127-0-0-1-8080": { + baseUrl: "http://127.0.0.1:8080/v1", + api: "openai-completions", + models: [ + { + id: "qwen-3.5", + name: "Qwen 3.5", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + expect(auth.source).toContain("synthetic local key"); + }); + + it("synthesizes a local auth marker for localhost custom providers", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-local", + cfg: { + models: { + providers: { + "my-local": { + baseUrl: "http://localhost:11434/v1", + api: "openai-completions", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("synthesizes a local auth marker for IPv6 loopback (::1)", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-ipv6", + cfg: { + models: { + providers: { + "my-ipv6": { + baseUrl: "http://[::1]:8080/v1", + api: "openai-completions", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("synthesizes a local auth marker for 0.0.0.0", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-wildcard", + cfg: { + models: { + providers: { + "my-wildcard": { + baseUrl: "http://0.0.0.0:11434/v1", + api: "openai-completions", + models: [ + { + id: "qwen", + name: "Qwen", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("synthesizes a local auth marker for IPv4-mapped IPv6 (::ffff:127.0.0.1)", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-mapped", + cfg: { + models: { + providers: { + "my-mapped": { + baseUrl: "http://[::ffff:127.0.0.1]:8080/v1", + api: "openai-completions", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("does not synthesize auth for remote custom providers without apiKey", async () => { + await expect( + resolveApiKeyForProvider({ + provider: "my-remote", + cfg: { + models: { + providers: { + "my-remote": { + baseUrl: "https://api.example.com/v1", + api: "openai-completions", + models: [ + { + id: "gpt-5", + name: "GPT-5", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }), + ).rejects.toThrow("No API key found"); + }); + + it("does not synthesize local auth when apiKey is explicitly configured but unresolved", async () => { + const previous = process.env.OPENAI_API_KEY; + delete process.env.OPENAI_API_KEY; + try { + await expect( + resolveApiKeyForProvider({ + provider: "custom", + cfg: { + models: { + providers: { + custom: { + baseUrl: "http://127.0.0.1:8080/v1", + api: "openai-completions", + apiKey: "OPENAI_API_KEY", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }), + ).rejects.toThrow('No API key found for provider "custom"'); + } finally { + if (previous === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = previous; + } + } + }); + + it("does not synthesize local auth when auth mode explicitly requires oauth", async () => { + await expect( + resolveApiKeyForProvider({ + provider: "custom", + cfg: { + models: { + providers: { + custom: { + baseUrl: "http://127.0.0.1:8080/v1", + api: "openai-completions", + auth: "oauth", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }), + ).rejects.toThrow('No API key found for provider "custom"'); + }); + + it("keeps built-in aws-sdk fallback for local baseUrl overrides", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "amazon-bedrock", + cfg: { + models: { + providers: { + "amazon-bedrock": { + baseUrl: "http://127.0.0.1:8080/v1", + models: [], + }, + }, + }, + }, + }); + + expect(auth.mode).toBe("aws-sdk"); + expect(auth.apiKey).toBeUndefined(); + }); +}); + +describe("applyLocalNoAuthHeaderOverride", () => { + const originalFetch = globalThis.fetch; + + afterEach(() => { + globalThis.fetch = originalFetch; + vi.restoreAllMocks(); + }); + + it("clears Authorization for synthetic local OpenAI-compatible auth markers", async () => { + let capturedAuthorization: string | null | undefined; + let capturedXTest: string | null | undefined; + let resolveRequest: (() => void) | undefined; + const requestSeen = new Promise((resolve) => { + resolveRequest = resolve; + }); + globalThis.fetch = vi.fn(async (_input, init) => { + const headers = new Headers(init?.headers); + capturedAuthorization = headers.get("Authorization"); + capturedXTest = headers.get("X-Test"); + resolveRequest?.(); + return new Response(JSON.stringify({ error: { message: "unauthorized" } }), { + status: 401, + headers: { "content-type": "application/json" }, + }); + }) as typeof fetch; + + const model = applyLocalNoAuthHeaderOverride( + { + id: "local-llm", + name: "local-llm", + api: "openai-completions", + provider: "custom", + baseUrl: "http://127.0.0.1:8080/v1", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + headers: { "X-Test": "1" }, + } as Model<"openai-completions">, + { + apiKey: CUSTOM_LOCAL_AUTH_MARKER, + source: "models.providers.custom (synthetic local key)", + mode: "api-key", + }, + ); + + streamSimpleOpenAICompletions( + model, + { + messages: [ + { + role: "user", + content: "hello", + timestamp: Date.now(), + }, + ], + }, + { + apiKey: CUSTOM_LOCAL_AUTH_MARKER, + }, + ); + + await requestSeen; + + expect(capturedAuthorization).toBeNull(); + expect(capturedXTest).toBe("1"); + }); +}); diff --git a/src/agents/model-auth.ts b/src/agents/model-auth.ts index ffc7c1e2e9d..fb3abd1571e 100644 --- a/src/agents/model-auth.ts +++ b/src/agents/model-auth.ts @@ -3,6 +3,7 @@ import { type Api, getEnvApiKey, type Model } from "@mariozechner/pi-ai"; import { formatCliCommand } from "../cli/command-format.js"; import type { OpenClawConfig } from "../config/config.js"; import type { ModelProviderAuthMode, ModelProviderConfig } from "../config/types.js"; +import { coerceSecretRef } from "../config/types.secrets.js"; import { getShellEnvAppliedKeys } from "../infra/shell-env.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { @@ -19,6 +20,7 @@ import { } from "./auth-profiles.js"; import { PROVIDER_ENV_API_KEY_CANDIDATES } from "./model-auth-env-vars.js"; import { + CUSTOM_LOCAL_AUTH_MARKER, isKnownEnvApiKeyMarker, isNonSecretApiKeyMarker, OLLAMA_LOCAL_AUTH_MARKER, @@ -119,15 +121,44 @@ function resolveProviderAuthOverride( return undefined; } +function isLocalBaseUrl(baseUrl: string): boolean { + try { + const host = new URL(baseUrl).hostname.toLowerCase(); + return ( + host === "localhost" || + host === "127.0.0.1" || + host === "0.0.0.0" || + host === "[::1]" || + host === "[::ffff:7f00:1]" || + host === "[::ffff:127.0.0.1]" + ); + } catch { + return false; + } +} + +function hasExplicitProviderApiKeyConfig(providerConfig: ModelProviderConfig): boolean { + return ( + normalizeOptionalSecretInput(providerConfig.apiKey) !== undefined || + coerceSecretRef(providerConfig.apiKey) !== null + ); +} + +function isCustomLocalProviderConfig(providerConfig: ModelProviderConfig): boolean { + return ( + typeof providerConfig.baseUrl === "string" && + providerConfig.baseUrl.trim().length > 0 && + typeof providerConfig.api === "string" && + providerConfig.api.trim().length > 0 && + Array.isArray(providerConfig.models) && + providerConfig.models.length > 0 + ); +} + function resolveSyntheticLocalProviderAuth(params: { cfg: OpenClawConfig | undefined; provider: string; }): ResolvedProviderAuth | null { - const normalizedProvider = normalizeProviderId(params.provider); - if (normalizedProvider !== "ollama") { - return null; - } - const providerConfig = resolveProviderConfig(params.cfg, params.provider); if (!providerConfig) { return null; @@ -141,11 +172,38 @@ function resolveSyntheticLocalProviderAuth(params: { return null; } - return { - apiKey: OLLAMA_LOCAL_AUTH_MARKER, - source: "models.providers.ollama (synthetic local key)", - mode: "api-key", - }; + const normalizedProvider = normalizeProviderId(params.provider); + if (normalizedProvider === "ollama") { + return { + apiKey: OLLAMA_LOCAL_AUTH_MARKER, + source: "models.providers.ollama (synthetic local key)", + mode: "api-key", + }; + } + + const authOverride = resolveProviderAuthOverride(params.cfg, params.provider); + if (authOverride && authOverride !== "api-key") { + return null; + } + if (!isCustomLocalProviderConfig(providerConfig)) { + return null; + } + if (hasExplicitProviderApiKeyConfig(providerConfig)) { + return null; + } + + // Custom providers pointing at a local server (e.g. llama.cpp, vLLM, LocalAI) + // typically don't require auth. Synthesize a local key so the auth resolver + // doesn't reject them when the user left the API key blank during onboarding. + if (providerConfig.baseUrl && isLocalBaseUrl(providerConfig.baseUrl)) { + return { + apiKey: CUSTOM_LOCAL_AUTH_MARKER, + source: `models.providers.${params.provider} (synthetic local key)`, + mode: "api-key", + }; + } + + return null; } function resolveEnvSourceLabel(params: { @@ -439,3 +497,25 @@ export function requireApiKey(auth: ResolvedProviderAuth, provider: string): str } throw new Error(`No API key resolved for provider "${provider}" (auth mode: ${auth.mode}).`); } + +export function applyLocalNoAuthHeaderOverride>( + model: T, + auth: ResolvedProviderAuth | null | undefined, +): T { + if (auth?.apiKey !== CUSTOM_LOCAL_AUTH_MARKER || model.api !== "openai-completions") { + return model; + } + + // OpenAI's SDK always generates Authorization from apiKey. Keep the non-secret + // placeholder so construction succeeds, then clear the header at request build + // time for local servers that intentionally do not require auth. + const headers = { + ...model.headers, + Authorization: null, + } as unknown as Record; + + return { + ...model, + headers, + }; +} diff --git a/src/agents/model-fallback.probe.test.ts b/src/agents/model-fallback.probe.test.ts index d08bd0d4beb..3969416cd38 100644 --- a/src/agents/model-fallback.probe.test.ts +++ b/src/agents/model-fallback.probe.test.ts @@ -46,6 +46,20 @@ function expectFallbackUsed( expect(result.attempts[0]?.reason).toBe("rate_limit"); } +function expectPrimarySkippedForReason( + result: { result: unknown; attempts: Array<{ reason?: string }> }, + run: { + (...args: unknown[]): unknown; + mock: { calls: unknown[][] }; + }, + reason: string, +) { + expect(result.result).toBe("ok"); + expect(run).toHaveBeenCalledTimes(1); + expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); + expect(result.attempts[0]?.reason).toBe(reason); +} + function expectPrimaryProbeSuccess( result: { result: unknown }, run: { @@ -183,11 +197,7 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); it("probes primary model when within 2-min margin of cooldown expiry", async () => { @@ -540,10 +550,6 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); }); diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts index 35ac52dcf26..7fa8832e0e7 100644 --- a/src/agents/model-selection.test.ts +++ b/src/agents/model-selection.test.ts @@ -50,6 +50,60 @@ function resolveAnthropicOpusThinking(cfg: OpenClawConfig) { }); } +function createAgentFallbackConfig(params: { + primary?: string; + fallbacks?: string[]; + agentFallbacks?: string[]; +}) { + return { + agents: { + defaults: { + models: { + "openai/gpt-4o": {}, + }, + model: { + primary: params.primary ?? "openai/gpt-4o", + fallbacks: params.fallbacks ?? [], + }, + }, + ...(params.agentFallbacks + ? { + list: [ + { + id: "coder", + model: { + primary: params.primary ?? "openai/gpt-4o", + fallbacks: params.agentFallbacks, + }, + }, + ], + } + : {}), + }, + } as OpenClawConfig; +} + +function createProviderWithModelsConfig(provider: string, models: Array>) { + return { + models: { + providers: { + [provider]: { + baseUrl: `https://${provider}.example.com`, + models, + }, + }, + }, + } as Partial; +} + +function resolveConfiguredRefForTest(cfg: Partial) { + return resolveConfiguredModelRef({ + cfg: cfg as OpenClawConfig, + defaultProvider: "anthropic", + defaultModel: "claude-opus-4-6", + }); +} + describe("model-selection", () => { describe("normalizeProviderId", () => { it("should normalize provider names", () => { @@ -121,6 +175,12 @@ describe("model-selection", () => { defaultProvider: "anthropic", expected: { provider: "anthropic", model: "claude-sonnet-4-6" }, }, + { + name: "keeps dated anthropic model ids unchanged", + variants: ["anthropic/claude-sonnet-4-20250514", "claude-sonnet-4-20250514"], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-sonnet-4-20250514" }, + }, { name: "normalizes deprecated google flash preview ids", variants: ["google/gemini-3.1-flash-preview", "gemini-3.1-flash-preview"], @@ -181,6 +241,12 @@ describe("model-selection", () => { defaultProvider: "anthropic", expected: { provider: "openai", model: "gpt-5.3-codex-codex" }, }, + { + name: "normalizes gemini 3.1 flash-lite ids for google-vertex", + variants: ["google-vertex/gemini-3.1-flash-lite", "gemini-3.1-flash-lite"], + defaultProvider: "google-vertex", + expected: { provider: "google-vertex", model: "gemini-3.1-flash-lite-preview" }, + }, ])("$name", ({ variants, defaultProvider, expected }) => { expectParsedModelVariants(variants, defaultProvider, expected); }); @@ -192,7 +258,6 @@ describe("model-selection", () => { "anthropic/claude-opus-4-6", ); }); - it.each(["", " ", "/", "anthropic/", "/model"])("returns null for invalid ref %j", (raw) => { expect(parseModelRef(raw, "anthropic")).toBeNull(); }); @@ -320,19 +385,9 @@ describe("model-selection", () => { }); it("includes fallback models in allowed set", () => { - const cfg: OpenClawConfig = { - agents: { - defaults: { - models: { - "openai/gpt-4o": {}, - }, - model: { - primary: "openai/gpt-4o", - fallbacks: ["anthropic/claude-sonnet-4-6", "google/gemini-3-pro"], - }, - }, - }, - } as OpenClawConfig; + const cfg = createAgentFallbackConfig({ + fallbacks: ["anthropic/claude-sonnet-4-6", "google/gemini-3-pro"], + }); const result = buildAllowedModelSet({ cfg, @@ -348,19 +403,7 @@ describe("model-selection", () => { }); it("handles empty fallbacks gracefully", () => { - const cfg: OpenClawConfig = { - agents: { - defaults: { - models: { - "openai/gpt-4o": {}, - }, - model: { - primary: "openai/gpt-4o", - fallbacks: [], - }, - }, - }, - } as OpenClawConfig; + const cfg = createAgentFallbackConfig({}); const result = buildAllowedModelSet({ cfg, @@ -374,28 +417,10 @@ describe("model-selection", () => { }); it("prefers per-agent fallback overrides when agentId is provided", () => { - const cfg: OpenClawConfig = { - agents: { - defaults: { - models: { - "openai/gpt-4o": {}, - }, - model: { - primary: "openai/gpt-4o", - fallbacks: ["google/gemini-3-pro"], - }, - }, - list: [ - { - id: "coder", - model: { - primary: "openai/gpt-4o", - fallbacks: ["anthropic/claude-sonnet-4-6"], - }, - }, - ], - }, - } as OpenClawConfig; + const cfg = createAgentFallbackConfig({ + fallbacks: ["google/gemini-3-pro"], + agentFallbacks: ["anthropic/claude-sonnet-4-6"], + }); const result = buildAllowedModelSet({ cfg, @@ -626,79 +651,40 @@ describe("model-selection", () => { }); it("should prefer configured custom provider when default provider is not in models.providers", () => { - const cfg: Partial = { - models: { - providers: { - n1n: { - baseUrl: "https://n1n.example.com", - models: [ - { - id: "gpt-5.4", - name: "GPT 5.4", - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 4096, - }, - ], - }, - }, + const cfg = createProviderWithModelsConfig("n1n", [ + { + id: "gpt-5.4", + name: "GPT 5.4", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 4096, }, - }; - const result = resolveConfiguredModelRef({ - cfg: cfg as OpenClawConfig, - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-6", - }); + ]); + const result = resolveConfiguredRefForTest(cfg); expect(result).toEqual({ provider: "n1n", model: "gpt-5.4" }); }); it("should keep default provider when it is in models.providers", () => { - const cfg: Partial = { - models: { - providers: { - anthropic: { - baseUrl: "https://api.anthropic.com", - models: [ - { - id: "claude-opus-4-6", - name: "Claude Opus 4.6", - reasoning: true, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 200000, - maxTokens: 4096, - }, - ], - }, - }, + const cfg = createProviderWithModelsConfig("anthropic", [ + { + id: "claude-opus-4-6", + name: "Claude Opus 4.6", + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 200000, + maxTokens: 4096, }, - }; - const result = resolveConfiguredModelRef({ - cfg: cfg as OpenClawConfig, - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-6", - }); + ]); + const result = resolveConfiguredRefForTest(cfg); expect(result).toEqual({ provider: "anthropic", model: "claude-opus-4-6" }); }); it("should fall back to hardcoded default when no custom providers have models", () => { - const cfg: Partial = { - models: { - providers: { - "empty-provider": { - baseUrl: "https://example.com", - models: [], - }, - }, - }, - }; - const result = resolveConfiguredModelRef({ - cfg: cfg as OpenClawConfig, - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-6", - }); + const cfg = createProviderWithModelsConfig("empty-provider", []); + const result = resolveConfiguredRefForTest(cfg); expect(result).toEqual({ provider: "anthropic", model: "claude-opus-4-6" }); }); diff --git a/src/agents/model-selection.ts b/src/agents/model-selection.ts index 7bbd8ed8ba7..72cd5951292 100644 --- a/src/agents/model-selection.ts +++ b/src/agents/model-selection.ts @@ -31,13 +31,6 @@ export type ModelAliasIndex = { byKey: Map; }; -const ANTHROPIC_MODEL_ALIASES: Record = { - "opus-4.6": "claude-opus-4-6", - "opus-4.5": "claude-opus-4-5", - "sonnet-4.6": "claude-sonnet-4-6", - "sonnet-4.5": "claude-sonnet-4-5", -}; - function normalizeAliasKey(value: string): string { return value.trim().toLowerCase(); } @@ -151,7 +144,20 @@ function normalizeAnthropicModelId(model: string): string { return trimmed; } const lower = trimmed.toLowerCase(); - return ANTHROPIC_MODEL_ALIASES[lower] ?? trimmed; + // Keep alias resolution local so bundled startup paths cannot trip a TDZ on + // a module-level alias table while config parsing is still initializing. + switch (lower) { + case "opus-4.6": + return "claude-opus-4-6"; + case "opus-4.5": + return "claude-opus-4-5"; + case "sonnet-4.6": + return "claude-sonnet-4-6"; + case "sonnet-4.5": + return "claude-sonnet-4-5"; + default: + return trimmed; + } } function normalizeProviderModelId(provider: string, model: string): string { @@ -165,7 +171,7 @@ function normalizeProviderModelId(provider: string, model: string): string { return `anthropic/${normalizedAnthropicModel}`; } } - if (provider === "google") { + if (provider === "google" || provider === "google-vertex") { return normalizeGoogleModelId(model); } // OpenRouter-native models (e.g. "openrouter/aurora-alpha") need the full diff --git a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts index 1d214e2cc1a..036f4d00824 100644 --- a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts +++ b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts @@ -60,13 +60,31 @@ function createMergeConfigProvider() { }; } -async function runCustomProviderMergeTest(params: { - seedProvider: { - baseUrl: string; - apiKey: string; - api: string; - models: Array<{ id: string; name: string; input: string[]; api?: string }>; +type MergeSeedProvider = { + baseUrl: string; + apiKey: string; + api: string; + models: Array<{ id: string; name: string; input: string[]; api?: string }>; +}; + +type MergeConfigApiKeyRef = { + source: "env"; + provider: "default"; + id: string; +}; + +function createAgentSeedProvider(overrides: Partial = {}): MergeSeedProvider { + return { + baseUrl: "https://agent.example/v1", + apiKey: "AGENT_KEY", // pragma: allowlist secret + api: "openai-responses", + models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], + ...overrides, }; +} + +async function runCustomProviderMergeTest(params: { + seedProvider: MergeSeedProvider; existingProviderKey?: string; configProviderKey?: string; }) { @@ -86,6 +104,56 @@ async function runCustomProviderMergeTest(params: { }>(); } +async function expectCustomProviderMergeResult(params: { + seedProvider?: MergeSeedProvider; + existingProviderKey?: string; + configProviderKey?: string; + expectedApiKey: string; + expectedBaseUrl: string; +}) { + await withTempHome(async () => { + const parsed = await runCustomProviderMergeTest({ + seedProvider: params.seedProvider ?? createAgentSeedProvider(), + existingProviderKey: params.existingProviderKey, + configProviderKey: params.configProviderKey, + }); + expect(parsed.providers.custom?.apiKey).toBe(params.expectedApiKey); + expect(parsed.providers.custom?.baseUrl).toBe(params.expectedBaseUrl); + }); +} + +async function expectCustomProviderApiKeyRewrite(params: { + existingApiKey: string; + configuredApiKey: string | MergeConfigApiKeyRef; + expectedApiKey: string; +}) { + await withTempHome(async () => { + await writeAgentModelsJson({ + providers: { + custom: createAgentSeedProvider({ apiKey: params.existingApiKey }), + }, + }); + + await ensureOpenClawModelsJson({ + models: { + mode: "merge", + providers: { + custom: { + ...createMergeConfigProvider(), + apiKey: params.configuredApiKey, + }, + }, + }, + }); + + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers.custom?.apiKey).toBe(params.expectedApiKey); + expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + }); +} + function createMoonshotConfig(overrides: { contextWindow: number; maxTokens: number; @@ -113,6 +181,92 @@ function createMoonshotConfig(overrides: { }; } +function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig { + return { + models: { + ...(mergeMode ? { mode: "merge" as const } : {}), + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY} + api: "openai-completions", + models: [ + { + id: "gpt-4.1", + name: "GPT-4.1", + input: ["text"], + reasoning: false, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 16384, + }, + ], + }, + }, + }, + }; +} + +async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) { + await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { + await withTempHome(async () => { + if (options?.seedMergedProvider) { + await writeAgentModelsJson({ + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret + api: "openai-completions", + models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }], + }, + }, + }); + } + + await ensureOpenClawModelsJson( + createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider), + ); + const result = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + }); + }); +} + +async function expectMoonshotTokenLimits(params: { + contextWindow: number; + maxTokens: number; + expectedContextWindow: number; + expectedMaxTokens: number; +}) { + await withTempHome(async () => { + await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { + await ensureOpenClawModelsJson( + createMoonshotConfig({ + contextWindow: params.contextWindow, + maxTokens: params.maxTokens, + }), + ); + const parsed = await readGeneratedModelsJson<{ + providers: Record< + string, + { + models?: Array<{ + id: string; + contextWindow?: number; + maxTokens?: number; + }>; + } + >; + }>(); + const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); + expect(kimi?.contextWindow).toBe(params.expectedContextWindow); + expect(kimi?.maxTokens).toBe(params.expectedMaxTokens); + }); + }); +} + describe("models-config", () => { it("keeps anthropic api defaults when model entries omit api", async () => { await withTempHome(async () => { @@ -215,49 +369,26 @@ describe("models-config", () => { }); it("preserves non-empty agent apiKey but lets explicit config baseUrl win in merge mode", async () => { - await withTempHome(async () => { - const parsed = await runCustomProviderMergeTest({ - seedProvider: { - baseUrl: "https://agent.example/v1", - apiKey: "AGENT_KEY", // pragma: allowlist secret - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }); - expect(parsed.providers.custom?.apiKey).toBe("AGENT_KEY"); - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderMergeResult({ + expectedApiKey: "AGENT_KEY", + expectedBaseUrl: "https://config.example/v1", }); }); it("lets explicit config baseUrl win in merge mode when the config provider key is normalized", async () => { - await withTempHome(async () => { - const parsed = await runCustomProviderMergeTest({ - seedProvider: { - baseUrl: "https://agent.example/v1", - apiKey: "AGENT_KEY", // pragma: allowlist secret - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - existingProviderKey: "custom", - configProviderKey: " custom ", - }); - expect(parsed.providers.custom?.apiKey).toBe("AGENT_KEY"); - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderMergeResult({ + existingProviderKey: "custom", + configProviderKey: " custom ", + expectedApiKey: "AGENT_KEY", + expectedBaseUrl: "https://config.example/v1", }); }); it("replaces stale merged baseUrl when the provider api changes", async () => { - await withTempHome(async () => { - const parsed = await runCustomProviderMergeTest({ - seedProvider: { - baseUrl: "https://agent.example/v1", - apiKey: "AGENT_KEY", // pragma: allowlist secret - api: "openai-completions", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }); - expect(parsed.providers.custom?.apiKey).toBe("AGENT_KEY"); - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderMergeResult({ + seedProvider: createAgentSeedProvider({ api: "openai-completions" }), + expectedApiKey: "AGENT_KEY", + expectedBaseUrl: "https://config.example/v1", }); }); @@ -284,34 +415,14 @@ describe("models-config", () => { }); it("replaces stale merged apiKey when provider is SecretRef-managed in current config", async () => { - await withTempHome(async () => { - await writeAgentModelsJson({ - providers: { - custom: { - baseUrl: "https://agent.example/v1", - apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }, - }); - await ensureOpenClawModelsJson({ - models: { - mode: "merge", - providers: { - custom: { - ...createMergeConfigProvider(), - apiKey: { source: "env", provider: "default", id: "CUSTOM_PROVIDER_API_KEY" }, // pragma: allowlist secret - }, - }, - }, - }); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.custom?.apiKey).toBe("CUSTOM_PROVIDER_API_KEY"); // pragma: allowlist secret - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderApiKeyRewrite({ + existingApiKey: "STALE_AGENT_KEY", // pragma: allowlist secret + configuredApiKey: { + source: "env", + provider: "default", + id: "CUSTOM_PROVIDER_API_KEY", // pragma: allowlist secret + }, + expectedApiKey: "CUSTOM_PROVIDER_API_KEY", // pragma: allowlist secret }); }); @@ -363,34 +474,10 @@ describe("models-config", () => { }); it("replaces stale non-env marker when provider transitions back to plaintext config", async () => { - await withTempHome(async () => { - await writeAgentModelsJson({ - providers: { - custom: { - baseUrl: "https://agent.example/v1", - apiKey: NON_ENV_SECRETREF_MARKER, - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }, - }); - - await ensureOpenClawModelsJson({ - models: { - mode: "merge", - providers: { - custom: { - ...createMergeConfigProvider(), - apiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret - }, - }, - }, - }); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.custom?.apiKey).toBe("ALLCAPS_SAMPLE"); + await expectCustomProviderApiKeyRewrite({ + existingApiKey: NON_ENV_SECRETREF_MARKER, + configuredApiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret + expectedApiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret }); }); @@ -444,131 +531,28 @@ describe("models-config", () => { }); it("does not persist resolved env var value as plaintext in models.json", async () => { - await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { - await withTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig - api: "openai-completions", - models: [ - { - id: "gpt-4.1", - name: "GPT-4.1", - input: ["text"], - reasoning: false, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 16384, - }, - ], - }, - }, - }, - }; - await ensureOpenClawModelsJson(cfg); - const result = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); - }); - }); + await expectOpenAiEnvMarkerApiKey(); }); it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => { - await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { - await withTempHome(async () => { - await writeAgentModelsJson({ - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret - api: "openai-completions", - models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }], - }, - }, - }); - const cfg: OpenClawConfig = { - models: { - mode: "merge", - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY} - api: "openai-completions", - models: [ - { - id: "gpt-4.1", - name: "GPT-4.1", - input: ["text"], - reasoning: false, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 16384, - }, - ], - }, - }, - }, - }; - await ensureOpenClawModelsJson(cfg); - const result = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret - }); - }); + await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true }); }); it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(350000); - expect(kimi?.maxTokens).toBe(16384); - }); + await expectMoonshotTokenLimits({ + contextWindow: 350000, + maxTokens: 16384, + expectedContextWindow: 350000, + expectedMaxTokens: 16384, }); }); it("falls back to implicit token limits when explicit values are invalid", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(256000); - expect(kimi?.maxTokens).toBe(8192); - }); + await expectMoonshotTokenLimits({ + contextWindow: 0, + maxTokens: -1, + expectedContextWindow: 256000, + expectedMaxTokens: 8192, }); }); }); diff --git a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts index 8414fb10d08..890be151c6f 100644 --- a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts +++ b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts @@ -1,91 +1,82 @@ import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import type { ModelDefinitionConfig } from "../config/types.models.js"; import { installModelsConfigTestHooks, withModelsTempHome } from "./models-config.e2e-harness.js"; import { ensureOpenClawModelsJson } from "./models-config.js"; import { readGeneratedModelsJson } from "./models-config.test-utils.js"; +function createGoogleModelsConfig(models: ModelDefinitionConfig[]): OpenClawConfig { + return { + models: { + providers: { + google: { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + apiKey: "GEMINI_KEY", // pragma: allowlist secret + api: "google-generative-ai", + models, + }, + }, + }, + }; +} + +async function expectGeneratedGoogleModelIds(ids: string[]) { + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids); +} + describe("models-config", () => { installModelsConfigTestHooks(); it("normalizes gemini 3 ids to preview for google providers", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3-pro", - name: "Gemini 3 Pro", - api: "google-generative-ai", - reasoning: true, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - { - id: "gemini-3-flash", - name: "Gemini 3 Flash", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3-pro", + name: "Gemini 3 Pro", + api: "google-generative-ai", + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + { + id: "gemini-3-flash", + name: "Gemini 3 Flash", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, + }, + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]); }); }); it("normalizes the deprecated google flash preview id to the working preview id", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3.1-flash-preview", - name: "Gemini 3.1 Flash Preview", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3.1-flash-preview", + name: "Gemini 3.1 Flash Preview", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]); }); }); }); diff --git a/src/agents/models-config.providers.discovery-auth.test.ts b/src/agents/models-config.providers.discovery-auth.test.ts index e6aebc0d7cb..6fc492c1565 100644 --- a/src/agents/models-config.providers.discovery-auth.test.ts +++ b/src/agents/models-config.providers.discovery-auth.test.ts @@ -6,6 +6,11 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; import { resolveImplicitProvidersForTest } from "./models-config.e2e-harness.js"; +type AuthProfilesFile = { + version: 1; + profiles: Record>; +}; + describe("provider discovery auth marker guardrails", () => { let originalVitest: string | undefined; let originalNodeEnv: string | undefined; @@ -35,33 +40,35 @@ describe("provider discovery auth marker guardrails", () => { delete process.env.NODE_ENV; } - it("does not send marker value as vLLM bearer token during discovery", async () => { - enableDiscovery(); - const fetchMock = vi.fn().mockResolvedValue({ - ok: true, - json: async () => ({ data: [] }), - }); + function installFetchMock(response?: unknown) { + const fetchMock = + response === undefined + ? vi.fn() + : vi.fn().mockResolvedValue({ ok: true, json: async () => response }); globalThis.fetch = fetchMock as unknown as typeof fetch; + return fetchMock; + } + async function createAgentDirWithAuthProfiles(profiles: AuthProfilesFile["profiles"]) { const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); await writeFile( join(agentDir, "auth-profiles.json"), - JSON.stringify( - { - version: 1, - profiles: { - "vllm:default": { - type: "api_key", - provider: "vllm", - keyRef: { source: "file", provider: "vault", id: "/vllm/apiKey" }, - }, - }, - }, - null, - 2, - ), + JSON.stringify({ version: 1, profiles } satisfies AuthProfilesFile, null, 2), "utf8", ); + return agentDir; + } + + it("does not send marker value as vLLM bearer token during discovery", async () => { + enableDiscovery(); + const fetchMock = installFetchMock({ data: [] }); + const agentDir = await createAgentDirWithAuthProfiles({ + "vllm:default": { + type: "api_key", + provider: "vllm", + keyRef: { source: "file", provider: "vault", id: "/vllm/apiKey" }, + }, + }); const providers = await resolveImplicitProvidersForTest({ agentDir, env: {} }); expect(providers?.vllm?.apiKey).toBe(NON_ENV_SECRETREF_MARKER); @@ -73,28 +80,14 @@ describe("provider discovery auth marker guardrails", () => { it("does not call Hugging Face discovery with marker-backed credentials", async () => { enableDiscovery(); - const fetchMock = vi.fn(); - globalThis.fetch = fetchMock as unknown as typeof fetch; - - const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); - await writeFile( - join(agentDir, "auth-profiles.json"), - JSON.stringify( - { - version: 1, - profiles: { - "huggingface:default": { - type: "api_key", - provider: "huggingface", - keyRef: { source: "exec", provider: "vault", id: "providers/hf/token" }, - }, - }, - }, - null, - 2, - ), - "utf8", - ); + const fetchMock = installFetchMock(); + const agentDir = await createAgentDirWithAuthProfiles({ + "huggingface:default": { + type: "api_key", + provider: "huggingface", + keyRef: { source: "exec", provider: "vault", id: "providers/hf/token" }, + }, + }); const providers = await resolveImplicitProvidersForTest({ agentDir, env: {} }); expect(providers?.huggingface?.apiKey).toBe(NON_ENV_SECRETREF_MARKER); @@ -106,31 +99,14 @@ describe("provider discovery auth marker guardrails", () => { it("keeps all-caps plaintext API keys for authenticated discovery", async () => { enableDiscovery(); - const fetchMock = vi.fn().mockResolvedValue({ - ok: true, - json: async () => ({ data: [{ id: "vllm/test-model" }] }), + const fetchMock = installFetchMock({ data: [{ id: "vllm/test-model" }] }); + const agentDir = await createAgentDirWithAuthProfiles({ + "vllm:default": { + type: "api_key", + provider: "vllm", + key: "ALLCAPS_SAMPLE", + }, }); - globalThis.fetch = fetchMock as unknown as typeof fetch; - - const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); - await writeFile( - join(agentDir, "auth-profiles.json"), - JSON.stringify( - { - version: 1, - profiles: { - "vllm:default": { - type: "api_key", - provider: "vllm", - key: "ALLCAPS_SAMPLE", - }, - }, - }, - null, - 2, - ), - "utf8", - ); await resolveImplicitProvidersForTest({ agentDir, env: {} }); const vllmCall = fetchMock.mock.calls.find(([url]) => String(url).includes(":8000")); diff --git a/src/agents/models-config.providers.google-antigravity.test.ts b/src/agents/models-config.providers.google-antigravity.test.ts index 3886b237e27..ea20608b866 100644 --- a/src/agents/models-config.providers.google-antigravity.test.ts +++ b/src/agents/models-config.providers.google-antigravity.test.ts @@ -97,3 +97,33 @@ describe("google-antigravity provider normalization", () => { expect(normalized).toBe(providers); }); }); + +describe("google-vertex provider normalization", () => { + it("normalizes gemini flash-lite IDs for google-vertex providers", () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const providers = { + "google-vertex": buildProvider(["gemini-3.1-flash-lite", "gemini-3-flash-preview"]), + openai: buildProvider(["gpt-5"]), + }; + + const normalized = normalizeProviders({ providers, agentDir }); + + expect(normalized).not.toBe(providers); + expect(normalized?.["google-vertex"]?.models.map((model) => model.id)).toEqual([ + "gemini-3.1-flash-lite-preview", + "gemini-3-flash-preview", + ]); + expect(normalized?.openai).toBe(providers.openai); + }); + + it("returns original providers object when no google-vertex IDs need normalization", () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const providers = { + "google-vertex": buildProvider(["gemini-3.1-flash-lite-preview", "gemini-3-flash-preview"]), + }; + + const normalized = normalizeProviders({ providers, agentDir }); + + expect(normalized).toBe(providers); + }); +}); diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index 4c9febf2ef1..b4ef8f4b0b1 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -545,7 +545,7 @@ export function normalizeProviders(params: { } } - if (normalizedKey === "google") { + if (normalizedKey === "google" || normalizedKey === "google-vertex") { const googleNormalized = normalizeGoogleProvider(normalizedProvider); if (googleNormalized !== normalizedProvider) { mutated = true; diff --git a/src/agents/models-config.runtime-source-snapshot.test.ts b/src/agents/models-config.runtime-source-snapshot.test.ts index cc033fb56a6..a80ac010e86 100644 --- a/src/agents/models-config.runtime-source-snapshot.test.ts +++ b/src/agents/models-config.runtime-source-snapshot.test.ts @@ -16,47 +16,137 @@ import { readGeneratedModelsJson } from "./models-config.test-utils.js"; installModelsConfigTestHooks(); +function createOpenAiApiKeySourceConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; +} + +function createOpenAiApiKeyRuntimeConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; +} + +function createOpenAiHeaderSourceConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: { + source: "env", + provider: "default", + id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret + }, + "X-Tenant-Token": { + source: "file", + provider: "vault", + id: "/providers/openai/tenantToken", + }, + }, + models: [], + }, + }, + }, + }; +} + +function createOpenAiHeaderRuntimeConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: "Bearer runtime-openai-token", + "X-Tenant-Token": "runtime-tenant-token", + }, + models: [], + }, + }, + }, + }; +} + +function withGatewayTokenMode(config: OpenClawConfig): OpenClawConfig { + return { + ...config, + gateway: { + auth: { + mode: "token", + }, + }, + }; +} + +async function withGeneratedModelsFromRuntimeSource( + params: { + sourceConfig: OpenClawConfig; + runtimeConfig: OpenClawConfig; + candidateConfig?: OpenClawConfig; + }, + runAssertions: () => Promise, +) { + await withTempHome(async () => { + try { + setRuntimeConfigSnapshot(params.runtimeConfig, params.sourceConfig); + await ensureOpenClawModelsJson(params.candidateConfig ?? loadConfig()); + await runAssertions(); + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); +} + +async function expectGeneratedProviderApiKey(providerId: string, expected: string) { + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers[providerId]?.apiKey).toBe(expected); +} + +async function expectGeneratedOpenAiHeaderMarkers() { + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.openai?.headers?.Authorization).toBe( + "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret + ); + expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); +} + describe("models-config runtime source snapshot", () => { it("uses runtime source snapshot markers when passed the active runtime config", async () => { - await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - - try { - setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); - await ensureOpenClawModelsJson(loadConfig()); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret - } finally { - clearRuntimeConfigSnapshot(); - clearConfigCache(); - } - }); + await withGeneratedModelsFromRuntimeSource( + { + sourceConfig: createOpenAiApiKeySourceConfig(), + runtimeConfig: createOpenAiApiKeyRuntimeConfig(), + }, + async () => expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"), // pragma: allowlist secret + ); }); it("uses non-env marker from runtime source snapshot for file refs", async () => { @@ -103,30 +193,8 @@ describe("models-config runtime source snapshot", () => { it("projects cloned runtime configs onto source snapshot when preserving provider auth", async () => { await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; + const sourceConfig = createOpenAiApiKeySourceConfig(); + const runtimeConfig = createOpenAiApiKeyRuntimeConfig(); const clonedRuntimeConfig: OpenClawConfig = { ...runtimeConfig, agents: { @@ -139,11 +207,7 @@ describe("models-config runtime source snapshot", () => { try { setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); await ensureOpenClawModelsJson(clonedRuntimeConfig); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret } finally { clearRuntimeConfigSnapshot(); clearConfigCache(); @@ -152,121 +216,27 @@ describe("models-config runtime source snapshot", () => { }); it("uses header markers from runtime source snapshot instead of resolved runtime values", async () => { - await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: { - source: "env", - provider: "default", - id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret - }, - "X-Tenant-Token": { - source: "file", - provider: "vault", - id: "/providers/openai/tenantToken", - }, - }, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: "Bearer runtime-openai-token", - "X-Tenant-Token": "runtime-tenant-token", - }, - models: [], - }, - }, - }, - }; - - try { - setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); - await ensureOpenClawModelsJson(loadConfig()); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - expect(parsed.providers.openai?.headers?.Authorization).toBe( - "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret - ); - expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); - } finally { - clearRuntimeConfigSnapshot(); - clearConfigCache(); - } - }); + await withGeneratedModelsFromRuntimeSource( + { + sourceConfig: createOpenAiHeaderSourceConfig(), + runtimeConfig: createOpenAiHeaderRuntimeConfig(), + }, + expectGeneratedOpenAiHeaderMarkers, + ); }); it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => { await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; + const sourceConfig = withGatewayTokenMode(createOpenAiApiKeySourceConfig()); + const runtimeConfig = withGatewayTokenMode(createOpenAiApiKeyRuntimeConfig()); const incompatibleCandidate: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, + ...createOpenAiApiKeyRuntimeConfig(), }; try { setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); await ensureOpenClawModelsJson(incompatibleCandidate); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret } finally { clearRuntimeConfigSnapshot(); clearConfigCache(); @@ -276,81 +246,16 @@ describe("models-config runtime source snapshot", () => { it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => { await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: { - source: "env", - provider: "default", - id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret - }, - "X-Tenant-Token": { - source: "file", - provider: "vault", - id: "/providers/openai/tenantToken", - }, - }, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: "Bearer runtime-openai-token", - "X-Tenant-Token": "runtime-tenant-token", - }, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; + const sourceConfig = withGatewayTokenMode(createOpenAiHeaderSourceConfig()); + const runtimeConfig = withGatewayTokenMode(createOpenAiHeaderRuntimeConfig()); const incompatibleCandidate: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: "Bearer runtime-openai-token", - "X-Tenant-Token": "runtime-tenant-token", - }, - models: [], - }, - }, - }, + ...createOpenAiHeaderRuntimeConfig(), }; try { setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); await ensureOpenClawModelsJson(incompatibleCandidate); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - expect(parsed.providers.openai?.headers?.Authorization).toBe( - "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret - ); - expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); + await expectGeneratedOpenAiHeaderMarkers(); } finally { clearRuntimeConfigSnapshot(); clearConfigCache(); diff --git a/src/agents/ollama-models.test.ts b/src/agents/ollama-models.test.ts index 7877d40bdf9..d7b7d066c6f 100644 --- a/src/agents/ollama-models.test.ts +++ b/src/agents/ollama-models.test.ts @@ -1,31 +1,11 @@ import { afterEach, describe, expect, it, vi } from "vitest"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; import { enrichOllamaModelsWithContext, resolveOllamaApiBase, type OllamaTagModel, } from "./ollama-models.js"; -function jsonResponse(body: unknown, status = 200): Response { - return new Response(JSON.stringify(body), { - status, - headers: { "Content-Type": "application/json" }, - }); -} - -function requestUrl(input: string | URL | Request): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - return input.url; -} - -function requestBody(body: BodyInit | null | undefined): string { - return typeof body === "string" ? body : "{}"; -} - describe("ollama-models", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -43,7 +23,7 @@ describe("ollama-models", () => { if (!url.endsWith("/api/show")) { throw new Error(`Unexpected fetch: ${url}`); } - const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; if (body.name === "llama3:8b") { return jsonResponse({ model_info: { "llama.context_length": 65536 } }); } diff --git a/src/agents/ollama-stream.test.ts b/src/agents/ollama-stream.test.ts index 241c7a0f858..ded8064ea19 100644 --- a/src/agents/ollama-stream.test.ts +++ b/src/agents/ollama-stream.test.ts @@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader; } +async function expectDoneEventContent(lines: string[], expectedContent: unknown) { + await withMockNdjsonFetch(lines, async () => { + const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); + const events = await collectStreamEvents(stream); + + const doneEvent = events.at(-1); + if (!doneEvent || doneEvent.type !== "done") { + throw new Error("Expected done event"); + } + + expect(doneEvent.message.content).toEqual(expectedContent); + }); +} + describe("parseNdjsonStream", () => { it("parses text-only streaming chunks", async () => { const reader = mockNdjsonReader([ @@ -486,88 +500,48 @@ describe("createOllamaStreamFn", () => { }); it("drops thinking chunks when no final content is emitted", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([]); - }, + [], ); }); it("prefers streamed content over earlier thinking chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); it("drops reasoning chunks when no final content is emitted", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([]); - }, + [], ); }); it("prefers streamed content over earlier reasoning chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); }); diff --git a/src/agents/openclaw-tools.session-status.test.ts b/src/agents/openclaw-tools.session-status.test.ts index 8b2d9fc467f..0bc079d4ced 100644 --- a/src/agents/openclaw-tools.session-status.test.ts +++ b/src/agents/openclaw-tools.session-status.test.ts @@ -115,6 +115,50 @@ function resetSessionStore(store: Record) { mockConfig = createMockConfig(); } +function installSandboxedSessionStatusConfig() { + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; +} + +function mockSpawnedSessionList( + resolveSessions: (spawnedBy: string | undefined) => Array>, +) { + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) }; + } + return {}; + }); +} + +function expectSpawnedSessionLookupCalls(spawnedBy: string) { + const expectedCall = { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy, + }, + }; + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall); +} + function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) { const tool = createOpenClawTools({ agentSessionKey, @@ -242,27 +286,8 @@ describe("session_status tool", () => { updatedAt: 10, }, }); - mockConfig = { - session: { mainKey: "main", scope: "per-sender" }, - tools: { - sessions: { visibility: "all" }, - agentToAgent: { enabled: true, allow: ["*"] }, - }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - sandbox: { sessionToolsVisibility: "spawned" }, - }, - }, - }; - callGatewayMock.mockImplementation(async (opts: unknown) => { - const request = opts as { method?: string; params?: Record }; - if (request.method === "sessions.list") { - return { sessions: [] }; - } - return {}; - }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList(() => []); const tool = getSessionStatusTool("agent:main:subagent:child", { sandboxed: true, @@ -284,25 +309,7 @@ describe("session_status tool", () => { expect(loadSessionStoreMock).not.toHaveBeenCalled(); expect(updateSessionStoreMock).not.toHaveBeenCalled(); - expect(callGatewayMock).toHaveBeenCalledTimes(2); - expect(callGatewayMock).toHaveBeenNthCalledWith(1, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "agent:main:subagent:child", - }, - }); - expect(callGatewayMock).toHaveBeenNthCalledWith(2, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "agent:main:subagent:child", - }, - }); + expectSpawnedSessionLookupCalls("agent:main:subagent:child"); }); it("keeps legacy main requester keys for sandboxed session tree checks", async () => { @@ -316,30 +323,10 @@ describe("session_status tool", () => { updatedAt: 20, }, }); - mockConfig = { - session: { mainKey: "main", scope: "per-sender" }, - tools: { - sessions: { visibility: "all" }, - agentToAgent: { enabled: true, allow: ["*"] }, - }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - sandbox: { sessionToolsVisibility: "spawned" }, - }, - }, - }; - callGatewayMock.mockImplementation(async (opts: unknown) => { - const request = opts as { method?: string; params?: Record }; - if (request.method === "sessions.list") { - return { - sessions: - request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], - }; - } - return {}; - }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList((spawnedBy) => + spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], + ); const tool = getSessionStatusTool("main", { sandboxed: true, @@ -357,25 +344,7 @@ describe("session_status tool", () => { expect(childDetails.ok).toBe(true); expect(childDetails.sessionKey).toBe("agent:main:subagent:child"); - expect(callGatewayMock).toHaveBeenCalledTimes(2); - expect(callGatewayMock).toHaveBeenNthCalledWith(1, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "main", - }, - }); - expect(callGatewayMock).toHaveBeenNthCalledWith(2, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "main", - }, - }); + expectSpawnedSessionLookupCalls("main"); }); it("scopes bare session keys to the requester agent", async () => { diff --git a/src/agents/openclaw-tools.subagents.scope.test.ts b/src/agents/openclaw-tools.subagents.scope.test.ts index c985f1712e1..fc233015064 100644 --- a/src/agents/openclaw-tools.subagents.scope.test.ts +++ b/src/agents/openclaw-tools.subagents.scope.test.ts @@ -17,6 +17,63 @@ function writeStore(storePath: string, store: Record) { fs.writeFileSync(storePath, JSON.stringify(store, null, 2), "utf-8"); } +function seedLeafOwnedChildSession(storePath: string, leafKey = "agent:main:subagent:leaf") { + const childKey = `${leafKey}:subagent:child`; + writeStore(storePath, { + [leafKey]: { + sessionId: "leaf-session", + updatedAt: Date.now(), + spawnedBy: "agent:main:main", + subagentRole: "leaf", + subagentControlScope: "none", + }, + [childKey]: { + sessionId: "child-session", + updatedAt: Date.now(), + spawnedBy: leafKey, + subagentRole: "leaf", + subagentControlScope: "none", + }, + }); + + addSubagentRunForTests({ + runId: "run-child", + childSessionKey: childKey, + controllerSessionKey: leafKey, + requesterSessionKey: leafKey, + requesterDisplayKey: leafKey, + task: "impossible child", + cleanup: "keep", + createdAt: Date.now() - 30_000, + startedAt: Date.now() - 30_000, + }); + + return { + childKey, + tool: createSubagentsTool({ agentSessionKey: leafKey }), + }; +} + +async function expectLeafSubagentControlForbidden(params: { + storePath: string; + action: "kill" | "steer"; + callId: string; + message?: string; +}) { + const { childKey, tool } = seedLeafOwnedChildSession(params.storePath); + const result = await tool.execute(params.callId, { + action: params.action, + target: childKey, + ...(params.message ? { message: params.message } : {}), + }); + + expect(result.details).toMatchObject({ + status: "forbidden", + error: "Leaf subagents cannot control other sessions.", + }); + expect(callGatewayMock).not.toHaveBeenCalled(); +} + describe("openclaw-tools: subagents scope isolation", () => { let storePath = ""; @@ -151,95 +208,19 @@ describe("openclaw-tools: subagents scope isolation", () => { }); it("leaf subagents cannot kill even explicitly-owned child sessions", async () => { - const leafKey = "agent:main:subagent:leaf"; - const childKey = `${leafKey}:subagent:child`; - - writeStore(storePath, { - [leafKey]: { - sessionId: "leaf-session", - updatedAt: Date.now(), - spawnedBy: "agent:main:main", - subagentRole: "leaf", - subagentControlScope: "none", - }, - [childKey]: { - sessionId: "child-session", - updatedAt: Date.now(), - spawnedBy: leafKey, - subagentRole: "leaf", - subagentControlScope: "none", - }, - }); - - addSubagentRunForTests({ - runId: "run-child", - childSessionKey: childKey, - controllerSessionKey: leafKey, - requesterSessionKey: leafKey, - requesterDisplayKey: leafKey, - task: "impossible child", - cleanup: "keep", - createdAt: Date.now() - 30_000, - startedAt: Date.now() - 30_000, - }); - - const tool = createSubagentsTool({ agentSessionKey: leafKey }); - const result = await tool.execute("call-leaf-kill", { + await expectLeafSubagentControlForbidden({ + storePath, action: "kill", - target: childKey, + callId: "call-leaf-kill", }); - - expect(result.details).toMatchObject({ - status: "forbidden", - error: "Leaf subagents cannot control other sessions.", - }); - expect(callGatewayMock).not.toHaveBeenCalled(); }); it("leaf subagents cannot steer even explicitly-owned child sessions", async () => { - const leafKey = "agent:main:subagent:leaf"; - const childKey = `${leafKey}:subagent:child`; - - writeStore(storePath, { - [leafKey]: { - sessionId: "leaf-session", - updatedAt: Date.now(), - spawnedBy: "agent:main:main", - subagentRole: "leaf", - subagentControlScope: "none", - }, - [childKey]: { - sessionId: "child-session", - updatedAt: Date.now(), - spawnedBy: leafKey, - subagentRole: "leaf", - subagentControlScope: "none", - }, - }); - - addSubagentRunForTests({ - runId: "run-child", - childSessionKey: childKey, - controllerSessionKey: leafKey, - requesterSessionKey: leafKey, - requesterDisplayKey: leafKey, - task: "impossible child", - cleanup: "keep", - createdAt: Date.now() - 30_000, - startedAt: Date.now() - 30_000, - }); - - const tool = createSubagentsTool({ agentSessionKey: leafKey }); - const result = await tool.execute("call-leaf-steer", { + await expectLeafSubagentControlForbidden({ + storePath, action: "steer", - target: childKey, + callId: "call-leaf-steer", message: "continue", }); - - expect(result.details).toMatchObject({ - status: "forbidden", - error: "Leaf subagents cannot control other sessions.", - }); - expect(callGatewayMock).not.toHaveBeenCalled(); }); }); diff --git a/src/agents/openclaw-tools.ts b/src/agents/openclaw-tools.ts index 58b3570eb89..25b5cae0f59 100644 --- a/src/agents/openclaw-tools.ts +++ b/src/agents/openclaw-tools.ts @@ -87,15 +87,16 @@ export function createOpenClawTools( options?.spawnWorkspaceDir ?? options?.workspaceDir, ); const runtimeWebTools = getActiveRuntimeWebToolsMetadata(); + const sandbox = + options?.sandboxRoot && options?.sandboxFsBridge + ? { root: options.sandboxRoot, bridge: options.sandboxFsBridge } + : undefined; const imageTool = options?.agentDir?.trim() ? createImageTool({ config: options?.config, agentDir: options.agentDir, workspaceDir, - sandbox: - options?.sandboxRoot && options?.sandboxFsBridge - ? { root: options.sandboxRoot, bridge: options.sandboxFsBridge } - : undefined, + sandbox, fsPolicy: options?.fsPolicy, modelHasVision: options?.modelHasVision, }) @@ -105,10 +106,7 @@ export function createOpenClawTools( config: options?.config, agentDir: options.agentDir, workspaceDir, - sandbox: - options?.sandboxRoot && options?.sandboxFsBridge - ? { root: options.sandboxRoot, bridge: options.sandboxFsBridge } - : undefined, + sandbox, fsPolicy: options?.fsPolicy, }) : null; @@ -174,15 +172,18 @@ export function createOpenClawTools( createSessionsListTool({ agentSessionKey: options?.agentSessionKey, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsHistoryTool({ agentSessionKey: options?.agentSessionKey, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsSendTool({ agentSessionKey: options?.agentSessionKey, agentChannel: options?.agentChannel, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsYieldTool({ sessionId: options?.sessionId, diff --git a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts index e8578c7feb2..8c0a0b1994d 100644 --- a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts +++ b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts @@ -55,6 +55,14 @@ function expectMessageMatches( } } +function expectTimeoutFailoverSamples(samples: readonly string[]) { + for (const sample of samples) { + expect(isTimeoutErrorMessage(sample)).toBe(true); + expect(classifyFailoverReason(sample)).toBe("timeout"); + expect(isFailoverErrorMessage(sample)).toBe(true); + } +} + describe("isAuthPermanentErrorMessage", () => { it.each([ { @@ -567,36 +575,26 @@ describe("isFailoverErrorMessage", () => { }); it("matches abort stop-reason timeout variants", () => { - const samples = [ + expectTimeoutFailoverSamples([ "Unhandled stop reason: abort", "Unhandled stop reason: error", "stop reason: abort", "stop reason: error", "reason: abort", "reason: error", - ]; - for (const sample of samples) { - expect(isTimeoutErrorMessage(sample)).toBe(true); - expect(classifyFailoverReason(sample)).toBe("timeout"); - expect(isFailoverErrorMessage(sample)).toBe(true); - } + ]); }); it("matches Gemini MALFORMED_RESPONSE stop reason as timeout (#42149)", () => { - const samples = [ + expectTimeoutFailoverSamples([ "Unhandled stop reason: MALFORMED_RESPONSE", "Unhandled stop reason: malformed_response", "stop reason: MALFORMED_RESPONSE", - ]; - for (const sample of samples) { - expect(isTimeoutErrorMessage(sample)).toBe(true); - expect(classifyFailoverReason(sample)).toBe("timeout"); - expect(isFailoverErrorMessage(sample)).toBe(true); - } + ]); }); it("matches network errno codes in serialized error messages", () => { - const samples = [ + expectTimeoutFailoverSamples([ "Error: connect ETIMEDOUT 10.0.0.1:443", "Error: connect ESOCKETTIMEDOUT 10.0.0.1:443", "Error: connect EHOSTUNREACH 10.0.0.1:443", @@ -604,25 +602,15 @@ describe("isFailoverErrorMessage", () => { "Error: write EPIPE", "Error: read ENETRESET", "Error: connect EHOSTDOWN 192.168.1.1:443", - ]; - for (const sample of samples) { - expect(isTimeoutErrorMessage(sample)).toBe(true); - expect(classifyFailoverReason(sample)).toBe("timeout"); - expect(isFailoverErrorMessage(sample)).toBe(true); - } + ]); }); it("matches z.ai network_error stop reason as timeout", () => { - const samples = [ + expectTimeoutFailoverSamples([ "Unhandled stop reason: network_error", "stop reason: network_error", "reason: network_error", - ]; - for (const sample of samples) { - expect(isTimeoutErrorMessage(sample)).toBe(true); - expect(classifyFailoverReason(sample)).toBe("timeout"); - expect(isFailoverErrorMessage(sample)).toBe(true); - } + ]); }); it("does not classify MALFORMED_FUNCTION_CALL as timeout", () => { diff --git a/src/agents/pi-embedded-runner.e2e.test.ts b/src/agents/pi-embedded-runner.e2e.test.ts index 31056f6ffe1..5c7722b5d16 100644 --- a/src/agents/pi-embedded-runner.e2e.test.ts +++ b/src/agents/pi-embedded-runner.e2e.test.ts @@ -1,9 +1,14 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import "./test-helpers/fast-coding-tools.js"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; function createMockUsage(input: number, output: number) { return { @@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => { let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager; -let tempRoot: string | undefined; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; let agentDir: string; let workspaceDir: string; let sessionCounter = 0; @@ -98,50 +103,21 @@ beforeAll(async () => { vi.useRealTimers(); ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); ({ SessionManager } = await import("@mariozechner/pi-coding-agent")); - tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-")); - agentDir = path.join(tempRoot, "agent"); - workspaceDir = path.join(tempRoot, "workspace"); - await fs.mkdir(agentDir, { recursive: true }); - await fs.mkdir(workspaceDir, { recursive: true }); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-"); + ({ agentDir, workspaceDir } = e2eWorkspace); }, 180_000); afterAll(async () => { - if (!tempRoot) { - return; - } - await fs.rm(tempRoot, { recursive: true, force: true }); - tempRoot = undefined; + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; }); -const makeOpenAiConfig = (modelIds: string[]) => - ({ - models: { - providers: { - openai: { - api: "openai-responses", - apiKey: "sk-test", - baseUrl: "https://example.com", - models: modelIds.map((id) => ({ - id, - name: `Mock ${id}`, - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 16_000, - maxTokens: 2048, - })), - }, - }, - }, - }) satisfies OpenClawConfig; - const nextSessionFile = () => { sessionCounter += 1; return path.join(workspaceDir, `session-${sessionCounter}.jsonl`); }; const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`; const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`; -const immediateEnqueue = async (task: () => Promise) => task(); const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => { const sessionFile = nextSessionFile(); @@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string timestamp: Date.now(), }); - const cfg = makeOpenAiConfig(["mock-1"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]); return await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => { }; const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => { - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi describe("runEmbeddedPiAgent", () => { it("handles prompt error paths without dropping user state", async () => { const sessionFile = nextSessionFile(); - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); const sessionKey = nextSessionKey(); const result = await runEmbeddedPiAgent({ sessionId: "session:test", diff --git a/src/agents/pi-embedded-runner.sanitize-session-history.test.ts b/src/agents/pi-embedded-runner.sanitize-session-history.test.ts index 2a71e0c95a3..2003523e03f 100644 --- a/src/agents/pi-embedded-runner.sanitize-session-history.test.ts +++ b/src/agents/pi-embedded-runner.sanitize-session-history.test.ts @@ -177,6 +177,14 @@ describe("sanitizeSessionHistory", () => { AgentMessage & { usage?: unknown; content?: unknown } >; + const getSingleAssistantUsage = async (messages: AgentMessage[]) => { + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); + const result = await sanitizeOpenAIHistory(messages); + return result.find((message) => message.role === "assistant") as + | (AgentMessage & { usage?: unknown }) + | undefined; + }; + beforeEach(async () => { testTimestamp = 1; const harness = await loadSanitizeSessionHistoryWithCleanMocks(); @@ -358,43 +366,33 @@ describe("sanitizeSessionHistory", () => { }); it("adds a zeroed assistant usage snapshot when usage is missing", async () => { - vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer without usage" }], - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer without usage" }], + }, + ]), + ); expect(assistant?.usage).toEqual(makeZeroUsageSnapshot()); }); it("normalizes mixed partial assistant usage fields to numeric totals", async () => { - vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer with partial usage" }], - usage: { - output: 3, - cache_read_input_tokens: 9, + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer with partial usage" }], + usage: { + output: 3, + cache_read_input_tokens: 9, + }, }, - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + ]), + ); expect(assistant?.usage).toEqual({ input: 0, @@ -406,31 +404,26 @@ describe("sanitizeSessionHistory", () => { }); it("preserves existing usage cost while normalizing token fields", async () => { - vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer with partial usage and cost" }], - usage: { - output: 3, - cache_read_input_tokens: 9, - cost: { - input: 1.25, - output: 2.5, - cacheRead: 0.25, - cacheWrite: 0, - total: 4, + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer with partial usage and cost" }], + usage: { + output: 3, + cache_read_input_tokens: 9, + cost: { + input: 1.25, + output: 2.5, + cacheRead: 0.25, + cacheWrite: 0, + total: 4, + }, }, }, - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + ]), + ); expect(assistant?.usage).toEqual({ ...makeZeroUsageSnapshot(), @@ -450,27 +443,22 @@ describe("sanitizeSessionHistory", () => { }); it("preserves unknown cost when token fields already match", async () => { - vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer with complete numeric usage but no cost" }], - usage: { - input: 1, - output: 2, - cacheRead: 3, - cacheWrite: 4, - totalTokens: 10, + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer with complete numeric usage but no cost" }], + usage: { + input: 1, + output: 2, + cacheRead: 3, + cacheWrite: 4, + totalTokens: 10, + }, }, - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + ]), + ); expect(assistant?.usage).toEqual({ input: 1, diff --git a/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts index 18f439cd01f..d91cf63539b 100644 --- a/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts +++ b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts @@ -8,12 +8,17 @@ * Follows the same pattern as pi-embedded-runner.e2e.test.ts. */ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import "./test-helpers/fast-coding-tools.js"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; function createMockUsage(input: number, output: number) { return { @@ -126,7 +131,7 @@ vi.mock("@mariozechner/pi-ai", async () => { }); let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; -let tempRoot: string | undefined; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; let agentDir: string; let workspaceDir: string; @@ -136,45 +141,15 @@ beforeAll(async () => { responsePlan = []; observedContexts = []; ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); - tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-yield-e2e-")); - agentDir = path.join(tempRoot, "agent"); - workspaceDir = path.join(tempRoot, "workspace"); - await fs.mkdir(agentDir, { recursive: true }); - await fs.mkdir(workspaceDir, { recursive: true }); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-"); + ({ agentDir, workspaceDir } = e2eWorkspace); }, 180_000); afterAll(async () => { - if (!tempRoot) { - return; - } - await fs.rm(tempRoot, { recursive: true, force: true }); - tempRoot = undefined; + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; }); -const makeConfig = (modelIds: string[]) => - ({ - models: { - providers: { - openai: { - api: "openai-responses", - apiKey: "sk-test", - baseUrl: "https://example.com", - models: modelIds.map((id) => ({ - id, - name: `Mock ${id}`, - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 16_000, - maxTokens: 2048, - })), - }, - }, - }, - }) satisfies OpenClawConfig; - -const immediateEnqueue = async (task: () => Promise) => task(); - const readSessionMessages = async (sessionFile: string) => { const raw = await fs.readFile(sessionFile, "utf-8"); return raw @@ -205,7 +180,7 @@ describe("sessions_yield e2e", () => { const sessionId = "yield-e2e-parent"; const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl"); - const cfg = makeConfig(["mock-yield"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]); const result = await runEmbeddedPiAgent({ sessionId, @@ -304,7 +279,7 @@ describe("sessions_yield e2e", () => { const sessionId = "yield-e2e-abort"; const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl"); - const cfg = makeConfig(["mock-yield-abort"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]); const result = await runEmbeddedPiAgent({ sessionId, diff --git a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts index efed941762d..e04de8a5d6b 100644 --- a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts @@ -7,6 +7,7 @@ import { usesOpenAiStringModeAnthropicToolChoice, } from "../provider-capabilities.js"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07"; const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const; @@ -74,6 +75,17 @@ function resolveAnthropicFastServiceTier(enabled: boolean): AnthropicServiceTier return enabled ? "auto" : "standard_only"; } +function hasOpenAiAnthropicToolPayloadCompatFlag(model: { compat?: unknown }): boolean { + if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { + return false; + } + + return ( + (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) + .requiresOpenAiAnthropicToolPayload === true + ); +} + function requiresAnthropicToolPayloadCompatibilityForModel(model: { api?: unknown; provider?: unknown; @@ -89,15 +101,7 @@ function requiresAnthropicToolPayloadCompatibilityForModel(model: { ) { return true; } - - if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { - return false; - } - - return ( - (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) - .requiresOpenAiAnthropicToolPayload === true - ); + return hasOpenAiAnthropicToolPayloadCompatFlag(model); } function usesOpenAiFunctionAnthropicToolSchemaForModel(model: { @@ -107,13 +111,7 @@ function usesOpenAiFunctionAnthropicToolSchemaForModel(model: { if (typeof model.provider === "string" && usesOpenAiFunctionAnthropicToolSchema(model.provider)) { return true; } - if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { - return false; - } - return ( - (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) - .requiresOpenAiAnthropicToolPayload === true - ); + return hasOpenAiAnthropicToolPayloadCompatFlag(model); } function usesOpenAiStringModeAnthropicToolChoiceForModel(model: { @@ -126,13 +124,7 @@ function usesOpenAiStringModeAnthropicToolChoiceForModel(model: { ) { return true; } - if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { - return false; - } - return ( - (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) - .requiresOpenAiAnthropicToolPayload === true - ); + return hasOpenAiAnthropicToolPayloadCompatFlag(model); } function normalizeOpenAiFunctionAnthropicToolDefinition( @@ -341,18 +333,10 @@ export function createAnthropicFastModeWrapper( return underlying(model, context, options); } - const originalOnPayload = options?.onPayload; - return underlying(model, context, { - ...options, - onPayload: (payload) => { - if (payload && typeof payload === "object") { - const payloadObj = payload as Record; - if (payloadObj.service_tier === undefined) { - payloadObj.service_tier = serviceTier; - } - } - return originalOnPayload?.(payload, model); - }, + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } }); }; } diff --git a/src/agents/pi-embedded-runner/compact.hooks.test.ts b/src/agents/pi-embedded-runner/compact.hooks.test.ts index e3ef243b429..af7cfd7e1bf 100644 --- a/src/agents/pi-embedded-runner/compact.hooks.test.ts +++ b/src/agents/pi-embedded-runner/compact.hooks.test.ts @@ -93,6 +93,8 @@ vi.mock("@mariozechner/pi-ai/oauth", () => ({ vi.mock("@mariozechner/pi-coding-agent", () => { return { + AuthStorage: class AuthStorage {}, + ModelRegistry: class ModelRegistry {}, createAgentSession: vi.fn(async () => { const session = { sessionId: "session-1", @@ -278,6 +280,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({ })); vi.mock("../../utils/message-channel.js", () => ({ + INTERNAL_MESSAGE_CHANNEL: "webchat", normalizeMessageChannel: vi.fn(() => undefined), })); @@ -323,6 +326,57 @@ import { getApiProvider, unregisterApiProviders } from "@mariozechner/pi-ai"; import { getCustomApiRegistrySourceId } from "../custom-api-registry.js"; import { compactEmbeddedPiSessionDirect, compactEmbeddedPiSession } from "./compact.js"; +const TEST_SESSION_ID = "session-1"; +const TEST_SESSION_KEY = "agent:main:session-1"; +const TEST_SESSION_FILE = "/tmp/session.jsonl"; +const TEST_WORKSPACE_DIR = "/tmp"; +const TEST_CUSTOM_INSTRUCTIONS = "focus on decisions"; + +function mockResolvedModel() { + resolveModelMock.mockReset(); + resolveModelMock.mockReturnValue({ + model: { provider: "openai", api: "responses", id: "fake", input: [] }, + error: null, + authStorage: { setRuntimeApiKey: vi.fn() }, + modelRegistry: {}, + }); +} + +function compactionConfig(mode: "await" | "off" | "async") { + return { + agents: { + defaults: { + compaction: { + postIndexSync: mode, + }, + }, + }, + } as never; +} + +function directCompactionArgs(overrides: Record = {}) { + return { + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + sessionFile: TEST_SESSION_FILE, + workspaceDir: TEST_WORKSPACE_DIR, + customInstructions: TEST_CUSTOM_INSTRUCTIONS, + ...overrides, + }; +} + +function wrappedCompactionArgs(overrides: Record = {}) { + return { + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + sessionFile: TEST_SESSION_FILE, + workspaceDir: TEST_WORKSPACE_DIR, + customInstructions: TEST_CUSTOM_INSTRUCTIONS, + enqueue: async (task: () => Promise | T) => await task(), + ...overrides, + }; +} + const sessionHook = (action: string) => triggerInternalHook.mock.calls.find( (call) => call[0]?.type === "session" && call[0]?.action === action, @@ -335,13 +389,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { hookRunner.hasHooks.mockReset(); hookRunner.runBeforeCompaction.mockReset(); hookRunner.runAfterCompaction.mockReset(); - resolveModelMock.mockReset(); - resolveModelMock.mockReturnValue({ - model: { provider: "openai", api: "responses", id: "fake", input: [] }, - error: null, - authStorage: { setRuntimeApiKey: vi.fn() }, - modelRegistry: {}, - }); + mockResolvedModel(); sessionCompactImpl.mockReset(); sessionCompactImpl.mockResolvedValue({ summary: "summary", @@ -375,6 +423,14 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { unregisterApiProviders(getCustomApiRegistrySourceId("ollama")); }); + async function runDirectCompaction(customInstructions = TEST_CUSTOM_INSTRUCTIONS) { + return await compactEmbeddedPiSessionDirect( + directCompactionArgs({ + customInstructions, + }), + ); + } + it("bootstraps runtime plugins with the resolved workspace", async () => { await compactEmbeddedPiSessionDirect({ sessionId: "session-1", @@ -472,13 +528,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { hookRunner.hasHooks.mockReturnValue(true); sanitizeSessionHistoryMock.mockResolvedValue([]); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - }); + const result = await runDirectCompaction(); expect(result.ok).toBe(true); const beforeContext = sessionHook("compact:before")?.context; @@ -528,13 +578,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { details: { ok: true }, }); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - }); + const result = await runDirectCompaction(); expect(result).toMatchObject({ ok: true, @@ -595,26 +639,15 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { }, }); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - config: { - agents: { - defaults: { - compaction: { - postIndexSync: "await", - }, - }, - }, - } as never, - }); + const result = await compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("await"), + }), + ); expect(result.ok).toBe(true); expect(resolveSessionAgentIdMock).toHaveBeenCalledWith({ - sessionKey: "agent:main:session-1", + sessionKey: TEST_SESSION_KEY, config: expect.any(Object), }); expect(getMemorySearchManagerMock).not.toHaveBeenCalled(); @@ -630,22 +663,11 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); let settled = false; - const resultPromise = compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - config: { - agents: { - defaults: { - compaction: { - postIndexSync: "await", - }, - }, - }, - } as never, - }); + const resultPromise = compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("await"), + }), + ); void resultPromise.then(() => { settled = true; @@ -653,7 +675,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { await vi.waitFor(() => { expect(sync).toHaveBeenCalledWith({ reason: "post-compaction", - sessionFiles: ["/tmp/session.jsonl"], + sessionFiles: [TEST_SESSION_FILE], }); }); expect(settled).toBe(false); @@ -667,22 +689,11 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { const sync = vi.fn(async () => {}); getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - config: { - agents: { - defaults: { - compaction: { - postIndexSync: "off", - }, - }, - }, - } as never, - }); + const result = await compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("off"), + }), + ); expect(result.ok).toBe(true); expect(resolveSessionAgentIdMock).not.toHaveBeenCalled(); @@ -699,22 +710,11 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { getMemorySearchManagerMock.mockImplementation(() => managerGate); let settled = false; - const resultPromise = compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - config: { - agents: { - defaults: { - compaction: { - postIndexSync: "async", - }, - }, - }, - } as never, - }); + const resultPromise = compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("async"), + }), + ); await vi.waitFor(() => { expect(getMemorySearchManagerMock).toHaveBeenCalledTimes(1); @@ -731,7 +731,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { await vi.waitFor(() => { expect(sync).toHaveBeenCalledWith({ reason: "post-compaction", - sessionFiles: ["/tmp/session.jsonl"], + sessionFiles: [TEST_SESSION_FILE], }); }); const result = await resultPromise; @@ -791,35 +791,25 @@ describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { reason: undefined, result: { summary: "engine-summary", tokensAfter: 50 }, }); - resolveModelMock.mockReset(); - resolveModelMock.mockReturnValue({ - model: { provider: "openai", api: "responses", id: "fake", input: [] }, - error: null, - authStorage: { setRuntimeApiKey: vi.fn() }, - modelRegistry: {}, - }); + mockResolvedModel(); }); it("fires before_compaction with sentinel -1 and after_compaction on success", async () => { hookRunner.hasHooks.mockReturnValue(true); - const result = await compactEmbeddedPiSession({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - messageChannel: "telegram", - customInstructions: "focus on decisions", - enqueue: (task) => task(), - }); + const result = await compactEmbeddedPiSession( + wrappedCompactionArgs({ + messageChannel: "telegram", + }), + ); expect(result.ok).toBe(true); expect(result.compacted).toBe(true); expect(hookRunner.runBeforeCompaction).toHaveBeenCalledWith( - { messageCount: -1, sessionFile: "/tmp/session.jsonl" }, + { messageCount: -1, sessionFile: TEST_SESSION_FILE }, expect.objectContaining({ - sessionKey: "agent:main:session-1", + sessionKey: TEST_SESSION_KEY, messageProvider: "telegram", }), ); @@ -828,10 +818,10 @@ describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { messageCount: -1, compactedCount: -1, tokenCount: 50, - sessionFile: "/tmp/session.jsonl", + sessionFile: TEST_SESSION_FILE, }, expect.objectContaining({ - sessionKey: "agent:main:session-1", + sessionKey: TEST_SESSION_KEY, messageProvider: "telegram", }), ); @@ -844,30 +834,19 @@ describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); try { - const result = await compactEmbeddedPiSession({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: " /tmp/session.jsonl ", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - enqueue: (task) => task(), - config: { - agents: { - defaults: { - compaction: { - postIndexSync: "await", - }, - }, - }, - } as never, - }); + const result = await compactEmbeddedPiSession( + wrappedCompactionArgs({ + sessionFile: ` ${TEST_SESSION_FILE} `, + config: compactionConfig("await"), + }), + ); expect(result.ok).toBe(true); expect(listener).toHaveBeenCalledTimes(1); - expect(listener).toHaveBeenCalledWith({ sessionFile: "/tmp/session.jsonl" }); + expect(listener).toHaveBeenCalledWith({ sessionFile: TEST_SESSION_FILE }); expect(sync).toHaveBeenCalledWith({ reason: "post-compaction", - sessionFiles: ["/tmp/session.jsonl"], + sessionFiles: [TEST_SESSION_FILE], }); } finally { cleanup(); @@ -885,14 +864,7 @@ describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { result: undefined, }); - const result = await compactEmbeddedPiSession({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - enqueue: (task) => task(), - }); + const result = await compactEmbeddedPiSession(wrappedCompactionArgs()); expect(result.ok).toBe(false); expect(hookRunner.runBeforeCompaction).toHaveBeenCalled(); @@ -911,23 +883,11 @@ describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { }); try { - const result = await compactEmbeddedPiSession({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - enqueue: (task) => task(), - config: { - agents: { - defaults: { - compaction: { - postIndexSync: "await", - }, - }, - }, - } as never, - }); + const result = await compactEmbeddedPiSession( + wrappedCompactionArgs({ + config: compactionConfig("await"), + }), + ); expect(result.ok).toBe(true); expect(listener).not.toHaveBeenCalled(); @@ -941,14 +901,7 @@ describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { hookRunner.hasHooks.mockReturnValue(true); hookRunner.runBeforeCompaction.mockRejectedValue(new Error("hook boom")); - const result = await compactEmbeddedPiSession({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - enqueue: (task) => task(), - }); + const result = await compactEmbeddedPiSession(wrappedCompactionArgs()); expect(result.ok).toBe(true); expect(result.compacted).toBe(true); diff --git a/src/agents/pi-embedded-runner/compact.ts b/src/agents/pi-embedded-runner/compact.ts index b465ea7dc9c..8c490e113d4 100644 --- a/src/agents/pi-embedded-runner/compact.ts +++ b/src/agents/pi-embedded-runner/compact.ts @@ -41,7 +41,11 @@ import { formatUserTime, resolveUserTimeFormat, resolveUserTimezone } from "../d import { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../defaults.js"; import { resolveOpenClawDocsPath } from "../docs-path.js"; import { resolveMemorySearchConfig } from "../memory-search.js"; -import { getApiKeyForModel, resolveModelAuthMode } from "../model-auth.js"; +import { + applyLocalNoAuthHeaderOverride, + getApiKeyForModel, + resolveModelAuthMode, +} from "../model-auth.js"; import { supportsModelTools } from "../model-tool-support.js"; import { ensureOpenClawModelsJson } from "../models-config.js"; import { createConfiguredOllamaStreamFn } from "../ollama-stream.js"; @@ -429,8 +433,9 @@ export async function compactEmbeddedPiSessionDirect( const reason = error ?? `Unknown model: ${provider}/${modelId}`; return fail(reason); } + let apiKeyInfo: Awaited> | null = null; try { - const apiKeyInfo = await getApiKeyForModel({ + apiKeyInfo = await getApiKeyForModel({ model, cfg: params.config, profileId: authProfileId, @@ -518,10 +523,12 @@ export async function compactEmbeddedPiSessionDirect( modelContextWindow: model.contextWindow, defaultTokens: DEFAULT_CONTEXT_TOKENS, }); - const effectiveModel = + const effectiveModel = applyLocalNoAuthHeaderOverride( ctxInfo.tokens < (model.contextWindow ?? Infinity) ? { ...model, contextWindow: ctxInfo.tokens } - : model; + : model, + apiKeyInfo, + ); const runAbortController = new AbortController(); const toolsRaw = createOpenClawCodingTools({ diff --git a/src/agents/pi-embedded-runner/lanes.test.ts b/src/agents/pi-embedded-runner/lanes.test.ts new file mode 100644 index 00000000000..f3625ddc6ec --- /dev/null +++ b/src/agents/pi-embedded-runner/lanes.test.ts @@ -0,0 +1,44 @@ +import { describe, expect, it } from "vitest"; +import { CommandLane } from "../../process/lanes.js"; +import { resolveGlobalLane, resolveSessionLane } from "./lanes.js"; + +describe("resolveGlobalLane", () => { + it("defaults to main lane when no lane is provided", () => { + expect(resolveGlobalLane()).toBe(CommandLane.Main); + expect(resolveGlobalLane("")).toBe(CommandLane.Main); + expect(resolveGlobalLane(" ")).toBe(CommandLane.Main); + }); + + it("maps cron lane to nested lane to prevent deadlocks", () => { + // When cron jobs trigger nested agent runs, the outer execution holds + // the cron lane slot. Inner work must use a separate lane to avoid + // deadlock. See: https://github.com/openclaw/openclaw/issues/44805 + expect(resolveGlobalLane("cron")).toBe(CommandLane.Nested); + expect(resolveGlobalLane(" cron ")).toBe(CommandLane.Nested); + }); + + it("preserves other lanes as-is", () => { + expect(resolveGlobalLane("main")).toBe(CommandLane.Main); + expect(resolveGlobalLane("subagent")).toBe(CommandLane.Subagent); + expect(resolveGlobalLane("nested")).toBe(CommandLane.Nested); + expect(resolveGlobalLane("custom-lane")).toBe("custom-lane"); + expect(resolveGlobalLane(" custom ")).toBe("custom"); + }); +}); + +describe("resolveSessionLane", () => { + it("defaults to main lane and prefixes with session:", () => { + expect(resolveSessionLane("")).toBe("session:main"); + expect(resolveSessionLane(" ")).toBe("session:main"); + }); + + it("adds session: prefix if not present", () => { + expect(resolveSessionLane("abc123")).toBe("session:abc123"); + expect(resolveSessionLane(" xyz ")).toBe("session:xyz"); + }); + + it("preserves existing session: prefix", () => { + expect(resolveSessionLane("session:abc")).toBe("session:abc"); + expect(resolveSessionLane("session:main")).toBe("session:main"); + }); +}); diff --git a/src/agents/pi-embedded-runner/lanes.ts b/src/agents/pi-embedded-runner/lanes.ts index 81b742ded9f..57ffd1b4255 100644 --- a/src/agents/pi-embedded-runner/lanes.ts +++ b/src/agents/pi-embedded-runner/lanes.ts @@ -7,6 +7,10 @@ export function resolveSessionLane(key: string) { export function resolveGlobalLane(lane?: string) { const cleaned = lane?.trim(); + // Cron jobs hold the cron lane slot; inner operations must use nested to avoid deadlock. + if (cleaned === CommandLane.Cron) { + return CommandLane.Nested; + } return cleaned ? cleaned : CommandLane.Main; } diff --git a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts index d0b483e83ec..8542f329cbe 100644 --- a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts @@ -2,6 +2,7 @@ import type { StreamFn } from "@mariozechner/pi-agent-core"; import type { SimpleStreamOptions } from "@mariozechner/pi-ai"; import { streamSimple } from "@mariozechner/pi-ai"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; type OpenAIServiceTier = "auto" | "default" | "flex" | "priority"; type OpenAIReasoningEffort = "low" | "medium" | "high"; @@ -325,18 +326,10 @@ export function createOpenAIServiceTierWrapper( ) { return underlying(model, context, options); } - const originalOnPayload = options?.onPayload; - return underlying(model, context, { - ...options, - onPayload: (payload) => { - if (payload && typeof payload === "object") { - const payloadObj = payload as Record; - if (payloadObj.service_tier === undefined) { - payloadObj.service_tier = serviceTier; - } - } - return originalOnPayload?.(payload, model); - }, + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } }); }; } diff --git a/src/agents/pi-embedded-runner/run.ts b/src/agents/pi-embedded-runner/run.ts index dce7ff919d4..1839a9df1bb 100644 --- a/src/agents/pi-embedded-runner/run.ts +++ b/src/agents/pi-embedded-runner/run.ts @@ -30,6 +30,7 @@ import { import { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../defaults.js"; import { FailoverError, resolveFailoverStatus } from "../failover-error.js"; import { + applyLocalNoAuthHeaderOverride, ensureAuthProfileStore, getApiKeyForModel, resolveAuthProfileOrder, @@ -884,7 +885,7 @@ export async function runEmbeddedPiAgent( disableTools: params.disableTools, provider, modelId, - model: effectiveModel, + model: applyLocalNoAuthHeaderOverride(effectiveModel, apiKeyInfo), authProfileId: lastProfileId, authProfileIdSource: lockedProfileId ? "user" : "auto", authStorage, diff --git a/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts index c18d439e632..e67bb20d88d 100644 --- a/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts +++ b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts @@ -249,6 +249,96 @@ function createSubscriptionMock() { }; } +function resetEmbeddedAttemptHarness( + params: { + includeSpawnSubagent?: boolean; + subscribeImpl?: () => ReturnType; + sessionMessages?: AgentMessage[]; + } = {}, +) { + if (params.includeSpawnSubagent) { + hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({ + status: "accepted", + childSessionKey: "agent:main:subagent:child", + runId: "run-child", + }); + } + hoisted.createAgentSessionMock.mockReset(); + hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); + hoisted.resolveSandboxContextMock.mockReset(); + hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ + release: async () => {}, + }); + hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); + hoisted.sessionManager.branch.mockReset(); + hoisted.sessionManager.resetLeaf.mockReset(); + hoisted.sessionManager.buildSessionContext + .mockReset() + .mockReturnValue({ messages: params.sessionMessages ?? [] }); + hoisted.sessionManager.appendCustomEntry.mockReset(); + if (params.subscribeImpl) { + hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(params.subscribeImpl); + } +} + +async function cleanupTempPaths(tempPaths: string[]) { + while (tempPaths.length > 0) { + const target = tempPaths.pop(); + if (target) { + await fs.rm(target, { recursive: true, force: true }); + } + } +} + +function createDefaultEmbeddedSession(params?: { + prompt?: (session: MutableSession) => Promise; +}): MutableSession { + const session: MutableSession = { + sessionId: "embedded-session", + messages: [], + isCompacting: false, + isStreaming: false, + agent: { + replaceMessages: (messages: unknown[]) => { + session.messages = [...messages]; + }, + }, + prompt: async () => { + if (params?.prompt) { + await params.prompt(session); + return; + } + session.messages = [ + ...session.messages, + { role: "assistant", content: "done", timestamp: 2 }, + ]; + }, + abort: async () => {}, + dispose: () => {}, + steer: async () => {}, + }; + + return session; +} + +function createContextEngineBootstrapAndAssemble() { + return { + bootstrap: vi.fn(async (_params: { sessionKey?: string }) => ({ bootstrapped: true })), + assemble: vi.fn(async ({ messages }: { messages: AgentMessage[]; sessionKey?: string }) => ({ + messages, + estimatedTokens: 1, + })), + }; +} + +function expectCalledWithSessionKey(mock: ReturnType, sessionKey: string) { + expect(mock).toHaveBeenCalledWith( + expect.objectContaining({ + sessionKey, + }), + ); +} + const testModel = { api: "openai-completions", provider: "openai", @@ -269,32 +359,14 @@ describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => { const tempPaths: string[] = []; beforeEach(() => { - hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({ - status: "accepted", - childSessionKey: "agent:main:subagent:child", - runId: "run-child", + resetEmbeddedAttemptHarness({ + includeSpawnSubagent: true, + subscribeImpl: createSubscriptionMock, }); - hoisted.createAgentSessionMock.mockReset(); - hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); - hoisted.resolveSandboxContextMock.mockReset(); - hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(createSubscriptionMock); - hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ - release: async () => {}, - }); - hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); - hoisted.sessionManager.branch.mockReset(); - hoisted.sessionManager.resetLeaf.mockReset(); - hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] }); - hoisted.sessionManager.appendCustomEntry.mockReset(); }); afterEach(async () => { - while (tempPaths.length > 0) { - const target = tempPaths.pop(); - if (target) { - await fs.rm(target, { recursive: true, force: true }); - } - } + await cleanupTempPaths(tempPaths); }); it("passes the real workspace to sessions_spawn when workspaceAccess is ro", async () => { @@ -318,16 +390,7 @@ describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => { hoisted.createAgentSessionMock.mockImplementation( async (params: { customTools: ToolDefinition[] }) => { - const session: MutableSession = { - sessionId: "embedded-session", - messages: [], - isCompacting: false, - isStreaming: false, - agent: { - replaceMessages: (messages: unknown[]) => { - session.messages = [...messages]; - }, - }, + const session = createDefaultEmbeddedSession({ prompt: async () => { const spawnTool = params.customTools.find((tool) => tool.name === "sessions_spawn"); expect(spawnTool).toBeDefined(); @@ -342,10 +405,7 @@ describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => { {} as unknown as ExtensionContext, ); }, - abort: async () => {}, - dispose: () => {}, - steer: async () => {}, - }; + }); return { session }; }, @@ -394,26 +454,11 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => { const tempPaths: string[] = []; beforeEach(() => { - hoisted.createAgentSessionMock.mockReset(); - hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); - hoisted.resolveSandboxContextMock.mockReset(); - hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ - release: async () => {}, - }); - hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); - hoisted.sessionManager.branch.mockReset(); - hoisted.sessionManager.resetLeaf.mockReset(); - hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] }); - hoisted.sessionManager.appendCustomEntry.mockReset(); + resetEmbeddedAttemptHarness(); }); afterEach(async () => { - while (tempPaths.length > 0) { - const target = tempPaths.pop(); - if (target) { - await fs.rm(target, { recursive: true, force: true }); - } - } + await cleanupTempPaths(tempPaths); }); async function runAttemptWithCacheTtl(compactionCount: number) { @@ -428,30 +473,9 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => { getCompactionCount: () => compactionCount, })); - hoisted.createAgentSessionMock.mockImplementation(async () => { - const session: MutableSession = { - sessionId: "embedded-session", - messages: [], - isCompacting: false, - isStreaming: false, - agent: { - replaceMessages: (messages: unknown[]) => { - session.messages = [...messages]; - }, - }, - prompt: async () => { - session.messages = [ - ...session.messages, - { role: "assistant", content: "done", timestamp: 2 }, - ]; - }, - abort: async () => {}, - dispose: () => {}, - steer: async () => {}, - }; - - return { session }; - }); + hoisted.createAgentSessionMock.mockImplementation(async () => ({ + session: createDefaultEmbeddedSession(), + })); return await runEmbeddedAttempt({ sessionId: "embedded-session", @@ -591,30 +615,9 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => { .mockReset() .mockReturnValue({ messages: seedMessages }); - hoisted.createAgentSessionMock.mockImplementation(async () => { - const session: MutableSession = { - sessionId: "embedded-session", - messages: [], - isCompacting: false, - isStreaming: false, - agent: { - replaceMessages: (messages: unknown[]) => { - session.messages = [...messages]; - }, - }, - prompt: async () => { - session.messages = [ - ...session.messages, - { role: "assistant", content: "done", timestamp: 2 }, - ]; - }, - abort: async () => {}, - dispose: () => {}, - steer: async () => {}, - }; - - return { session }; - }); + hoisted.createAgentSessionMock.mockImplementation(async () => ({ + session: createDefaultEmbeddedSession(), + })); return await runEmbeddedAttempt({ sessionId: "embedded-session", @@ -659,13 +662,7 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => { } it("forwards sessionKey to bootstrap, assemble, and afterTurn", async () => { - const bootstrap = vi.fn(async (_params: { sessionKey?: string }) => ({ bootstrapped: true })); - const assemble = vi.fn( - async ({ messages }: { messages: AgentMessage[]; sessionKey?: string }) => ({ - messages, - estimatedTokens: 1, - }), - ); + const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble(); const afterTurn = vi.fn(async (_params: { sessionKey?: string }) => {}); const result = await runAttemptWithContextEngine({ @@ -675,31 +672,13 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => { }); expect(result.promptError).toBeNull(); - expect(bootstrap).toHaveBeenCalledWith( - expect.objectContaining({ - sessionKey, - }), - ); - expect(assemble).toHaveBeenCalledWith( - expect.objectContaining({ - sessionKey, - }), - ); - expect(afterTurn).toHaveBeenCalledWith( - expect.objectContaining({ - sessionKey, - }), - ); + expectCalledWithSessionKey(bootstrap, sessionKey); + expectCalledWithSessionKey(assemble, sessionKey); + expectCalledWithSessionKey(afterTurn, sessionKey); }); it("forwards sessionKey to ingestBatch when afterTurn is absent", async () => { - const bootstrap = vi.fn(async (_params: { sessionKey?: string }) => ({ bootstrapped: true })); - const assemble = vi.fn( - async ({ messages }: { messages: AgentMessage[]; sessionKey?: string }) => ({ - messages, - estimatedTokens: 1, - }), - ); + const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble(); const ingestBatch = vi.fn( async (_params: { sessionKey?: string; messages: AgentMessage[] }) => ({ ingestedCount: 1 }), ); @@ -711,21 +690,11 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => { }); expect(result.promptError).toBeNull(); - expect(ingestBatch).toHaveBeenCalledWith( - expect.objectContaining({ - sessionKey, - }), - ); + expectCalledWithSessionKey(ingestBatch, sessionKey); }); it("forwards sessionKey to per-message ingest when ingestBatch is absent", async () => { - const bootstrap = vi.fn(async (_params: { sessionKey?: string }) => ({ bootstrapped: true })); - const assemble = vi.fn( - async ({ messages }: { messages: AgentMessage[]; sessionKey?: string }) => ({ - messages, - estimatedTokens: 1, - }), - ); + const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble(); const ingest = vi.fn(async (_params: { sessionKey?: string; message: AgentMessage }) => ({ ingested: true, })); diff --git a/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts b/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts index 9a38127c84a..5e1088c3155 100644 --- a/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts +++ b/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts @@ -1,10 +1,28 @@ import { describe, expect, it, vi } from "vitest"; import { waitForCompactionRetryWithAggregateTimeout } from "./compaction-retry-aggregate-timeout.js"; +async function withFakeTimers(run: () => Promise) { + vi.useFakeTimers(); + try { + await run(); + } finally { + await vi.runOnlyPendingTimersAsync(); + vi.useRealTimers(); + } +} + +function expectClearedTimeoutState(onTimeout: ReturnType, timedOut: boolean) { + if (timedOut) { + expect(onTimeout).toHaveBeenCalledTimes(1); + } else { + expect(onTimeout).not.toHaveBeenCalled(); + } + expect(vi.getTimerCount()).toBe(0); +} + describe("waitForCompactionRetryWithAggregateTimeout", () => { it("times out and fires callback when compaction retry never resolves", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); const waitForCompactionRetry = vi.fn(async () => await new Promise(() => {})); @@ -19,17 +37,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { const result = await resultPromise; expect(result.timedOut).toBe(true); - expect(onTimeout).toHaveBeenCalledTimes(1); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, true); + }); }); it("keeps waiting while compaction remains in flight", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); let compactionInFlight = true; const waitForCompactionRetry = vi.fn( @@ -54,17 +67,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { const result = await resultPromise; expect(result.timedOut).toBe(false); - expect(onTimeout).not.toHaveBeenCalled(); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, false); + }); }); it("times out after an idle timeout window", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); let compactionInFlight = true; const waitForCompactionRetry = vi.fn(async () => await new Promise(() => {})); @@ -84,17 +92,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { const result = await resultPromise; expect(result.timedOut).toBe(true); - expect(onTimeout).toHaveBeenCalledTimes(1); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, true); + }); }); it("does not time out when compaction retry resolves", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); const waitForCompactionRetry = vi.fn(async () => {}); @@ -106,17 +109,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { }); expect(result.timedOut).toBe(false); - expect(onTimeout).not.toHaveBeenCalled(); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, false); + }); }); it("propagates abort errors from abortable and clears timer", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const abortError = new Error("aborted"); abortError.name = "AbortError"; const onTimeout = vi.fn(); @@ -133,11 +131,7 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { }), ).rejects.toThrow("aborted"); - expect(onTimeout).not.toHaveBeenCalled(); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, false); + }); }); }); diff --git a/src/agents/pi-embedded-runner/stream-payload-utils.ts b/src/agents/pi-embedded-runner/stream-payload-utils.ts new file mode 100644 index 00000000000..580bf5b1391 --- /dev/null +++ b/src/agents/pi-embedded-runner/stream-payload-utils.ts @@ -0,0 +1,20 @@ +import type { StreamFn } from "@mariozechner/pi-agent-core"; + +export function streamWithPayloadPatch( + underlying: StreamFn, + model: Parameters[0], + context: Parameters[1], + options: Parameters[2], + patchPayload: (payload: Record) => void, +) { + const originalOnPayload = options?.onPayload; + return underlying(model, context, { + ...options, + onPayload: (payload) => { + if (payload && typeof payload === "object") { + patchPayload(payload as Record); + } + return originalOnPayload?.(payload, model); + }, + }); +} diff --git a/src/agents/pi-embedded-runner/tool-result-char-estimator.ts b/src/agents/pi-embedded-runner/tool-result-char-estimator.ts index 16bdc5e43eb..6d022d62289 100644 --- a/src/agents/pi-embedded-runner/tool-result-char-estimator.ts +++ b/src/agents/pi-embedded-runner/tool-result-char-estimator.ts @@ -46,6 +46,20 @@ function getToolResultContent(msg: AgentMessage): unknown[] { return Array.isArray(content) ? content : []; } +function estimateContentBlockChars(content: unknown[]): number { + let chars = 0; + for (const block of content) { + if (isTextBlock(block)) { + chars += block.text.length; + } else if (isImageBlock(block)) { + chars += IMAGE_CHAR_ESTIMATE; + } else { + chars += estimateUnknownChars(block); + } + } + return chars; +} + export function getToolResultText(msg: AgentMessage): string { const content = getToolResultContent(msg); const chunks: string[] = []; @@ -67,19 +81,10 @@ function estimateMessageChars(msg: AgentMessage): number { if (typeof content === "string") { return content.length; } - let chars = 0; if (Array.isArray(content)) { - for (const block of content) { - if (isTextBlock(block)) { - chars += block.text.length; - } else if (isImageBlock(block)) { - chars += IMAGE_CHAR_ESTIMATE; - } else { - chars += estimateUnknownChars(block); - } - } + return estimateContentBlockChars(content); } - return chars; + return 0; } if (msg.role === "assistant") { @@ -115,17 +120,8 @@ function estimateMessageChars(msg: AgentMessage): number { } if (isToolResultMessage(msg)) { - let chars = 0; const content = getToolResultContent(msg); - for (const block of content) { - if (isTextBlock(block)) { - chars += block.text.length; - } else if (isImageBlock(block)) { - chars += IMAGE_CHAR_ESTIMATE; - } else { - chars += estimateUnknownChars(block); - } - } + let chars = estimateContentBlockChars(content); const details = (msg as { details?: unknown }).details; chars += estimateUnknownChars(details); const weightedChars = Math.ceil( diff --git a/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts b/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts index 5a7cb72ccb7..ed705842ada 100644 --- a/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts +++ b/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts @@ -160,7 +160,8 @@ describe("createOpenClawCodingTools", () => { it("mentions Chrome extension relay in browser tool description", () => { const browser = createBrowserTool(); expect(browser.description).toMatch(/Chrome extension/i); - expect(browser.description).toMatch(/profile="chrome"/i); + expect(browser.description).toMatch(/profile="user"/i); + expect(browser.description).toMatch(/profile="chrome-relay"/i); }); it("keeps browser tool schema properties after normalization", () => { const browser = defaultTools.find((tool) => tool.name === "browser"); diff --git a/src/agents/sandbox/fs-bridge-mutation-helper.test.ts b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts index 57f22cc84b6..973c81341d1 100644 --- a/src/agents/sandbox/fs-bridge-mutation-helper.test.ts +++ b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts @@ -1,22 +1,13 @@ import { spawnSync } from "node:child_process"; import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import { withTempDir } from "../../test-helpers/temp-dir.js"; import { buildPinnedWritePlan, SANDBOX_PINNED_MUTATION_PYTHON, } from "./fs-bridge-mutation-helper.js"; -async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { - const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); - try { - return await run(root); - } finally { - await fs.rm(root, { recursive: true, force: true }); - } -} - function runMutation(args: string[], input?: string) { return spawnSync("python3", ["-c", SANDBOX_PINNED_MUTATION_PYTHON, ...args], { input, @@ -56,7 +47,7 @@ function runWritePlan(args: string[], input?: string) { describe("sandbox pinned mutation helper", () => { it("writes through a pinned directory fd", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); await fs.mkdir(workspace, { recursive: true }); @@ -72,7 +63,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "preserves stdin payload bytes when the pinned write plan runs through sh", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); await fs.mkdir(workspace, { recursive: true }); @@ -92,7 +83,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "rejects symlink-parent writes instead of materializing a temp file outside the mount", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -108,7 +99,7 @@ describe("sandbox pinned mutation helper", () => { ); it.runIf(process.platform !== "win32")("rejects symlink segments during mkdirp", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -123,7 +114,7 @@ describe("sandbox pinned mutation helper", () => { }); it.runIf(process.platform !== "win32")("remove unlinks the symlink itself", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -144,7 +135,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "rejects symlink destination parents during rename", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -175,7 +166,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "copies directories across different mount roots during rename fallback", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const sourceRoot = path.join(root, "source"); const destRoot = path.join(root, "dest"); await fs.mkdir(path.join(sourceRoot, "dir", "nested"), { recursive: true }); diff --git a/src/agents/sandbox/fs-bridge-path-safety.ts b/src/agents/sandbox/fs-bridge-path-safety.ts index 9ca4c52e537..83fa4149974 100644 --- a/src/agents/sandbox/fs-bridge-path-safety.ts +++ b/src/agents/sandbox/fs-bridge-path-safety.ts @@ -87,6 +87,26 @@ export class SandboxFsPathGuard { return lexicalMount; } + private finalizePinnedEntry(params: { + mount: SandboxFsMount; + parentPath: string; + basename: string; + targetPath: string; + action: string; + }): PinnedSandboxEntry { + const relativeParentPath = path.posix.relative(params.mount.containerRoot, params.parentPath); + if (relativeParentPath.startsWith("..") || path.posix.isAbsolute(relativeParentPath)) { + throw new Error( + `Sandbox path escapes allowed mounts; cannot ${params.action}: ${params.targetPath}`, + ); + } + return { + mountRootPath: params.mount.containerRoot, + relativeParentPath: relativeParentPath === "." ? "" : relativeParentPath, + basename: params.basename, + }; + } + private async assertGuardedPathSafety( target: SandboxResolvedFsPath, options: PathSafetyOptions, @@ -146,17 +166,13 @@ export class SandboxFsPathGuard { } const parentPath = normalizeContainerPath(path.posix.dirname(target.containerPath)); const mount = this.resolveRequiredMount(parentPath, action); - const relativeParentPath = path.posix.relative(mount.containerRoot, parentPath); - if (relativeParentPath.startsWith("..") || path.posix.isAbsolute(relativeParentPath)) { - throw new Error( - `Sandbox path escapes allowed mounts; cannot ${action}: ${target.containerPath}`, - ); - } - return { - mountRootPath: mount.containerRoot, - relativeParentPath: relativeParentPath === "." ? "" : relativeParentPath, + return this.finalizePinnedEntry({ + mount, + parentPath, basename, - }; + targetPath: target.containerPath, + action, + }); } async resolveAnchoredSandboxEntry( @@ -185,20 +201,13 @@ export class SandboxFsPathGuard { ): Promise { const anchoredTarget = await this.resolveAnchoredSandboxEntry(target, action); const mount = this.resolveRequiredMount(anchoredTarget.canonicalParentPath, action); - const relativeParentPath = path.posix.relative( - mount.containerRoot, - anchoredTarget.canonicalParentPath, - ); - if (relativeParentPath.startsWith("..") || path.posix.isAbsolute(relativeParentPath)) { - throw new Error( - `Sandbox path escapes allowed mounts; cannot ${action}: ${target.containerPath}`, - ); - } - return { - mountRootPath: mount.containerRoot, - relativeParentPath: relativeParentPath === "." ? "" : relativeParentPath, + return this.finalizePinnedEntry({ + mount, + parentPath: anchoredTarget.canonicalParentPath, basename: anchoredTarget.basename, - }; + targetPath: target.containerPath, + action, + }); } resolvePinnedDirectoryEntry( diff --git a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts index 48e7e9e23f8..f92e99cc3c6 100644 --- a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts +++ b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts @@ -4,6 +4,7 @@ import { describe, expect, it } from "vitest"; import { createSandbox, createSandboxFsBridge, + createSeededSandboxFsBridge, dockerExecResult, findCallsByScriptFragment, findCallByDockerArg, @@ -103,17 +104,7 @@ describe("sandbox fs bridge anchored ops", () => { it.each(pinnedCases)("$name", async (testCase) => { await withTempDir("openclaw-fs-bridge-contract-write-", async (stateDir) => { - const workspaceDir = path.join(stateDir, "workspace"); - await fs.mkdir(path.join(workspaceDir, "nested"), { recursive: true }); - await fs.writeFile(path.join(workspaceDir, "from.txt"), "hello", "utf8"); - await fs.writeFile(path.join(workspaceDir, "nested", "file.txt"), "bye", "utf8"); - - const bridge = createSandboxFsBridge({ - sandbox: createSandbox({ - workspaceDir, - agentWorkspaceDir: workspaceDir, - }), - }); + const { bridge } = await createSeededSandboxFsBridge(stateDir); await testCase.invoke(bridge); diff --git a/src/agents/sandbox/fs-bridge.shell.test.ts b/src/agents/sandbox/fs-bridge.shell.test.ts index 1685759ad38..1e870ef0268 100644 --- a/src/agents/sandbox/fs-bridge.shell.test.ts +++ b/src/agents/sandbox/fs-bridge.shell.test.ts @@ -4,6 +4,7 @@ import { describe, expect, it } from "vitest"; import { createSandbox, createSandboxFsBridge, + createSeededSandboxFsBridge, getScriptsFromCalls, installFsBridgeTestHarness, mockedExecDockerRaw, @@ -140,16 +141,8 @@ describe("sandbox fs bridge shell compatibility", () => { it("routes mkdirp, remove, and rename through the pinned mutation helper", async () => { await withTempDir("openclaw-fs-bridge-shell-write-", async (stateDir) => { - const workspaceDir = path.join(stateDir, "workspace"); - await fs.mkdir(path.join(workspaceDir, "nested"), { recursive: true }); - await fs.writeFile(path.join(workspaceDir, "a.txt"), "hello", "utf8"); - await fs.writeFile(path.join(workspaceDir, "nested", "file.txt"), "bye", "utf8"); - - const bridge = createSandboxFsBridge({ - sandbox: createSandbox({ - workspaceDir, - agentWorkspaceDir: workspaceDir, - }), + const { bridge } = await createSeededSandboxFsBridge(stateDir, { + rootFileName: "a.txt", }); await bridge.mkdirp({ filePath: "nested" }); diff --git a/src/agents/sandbox/fs-bridge.test-helpers.ts b/src/agents/sandbox/fs-bridge.test-helpers.ts index 87a184154af..0747371478d 100644 --- a/src/agents/sandbox/fs-bridge.test-helpers.ts +++ b/src/agents/sandbox/fs-bridge.test-helpers.ts @@ -79,6 +79,36 @@ export function createSandbox(overrides?: Partial): SandboxConte }); } +export async function createSeededSandboxFsBridge( + stateDir: string, + params?: { + rootFileName?: string; + rootContents?: string; + nestedFileName?: string; + nestedContents?: string; + }, +) { + const workspaceDir = path.join(stateDir, "workspace"); + await fs.mkdir(path.join(workspaceDir, "nested"), { recursive: true }); + await fs.writeFile( + path.join(workspaceDir, params?.rootFileName ?? "from.txt"), + params?.rootContents ?? "hello", + "utf8", + ); + await fs.writeFile( + path.join(workspaceDir, "nested", params?.nestedFileName ?? "file.txt"), + params?.nestedContents ?? "bye", + "utf8", + ); + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + return { workspaceDir, bridge }; +} + export async function withTempDir( prefix: string, run: (stateDir: string) => Promise, diff --git a/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts b/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts index 0ee8a39a0b0..1f4da5163e1 100644 --- a/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts +++ b/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts @@ -25,6 +25,33 @@ async function createCaseDir(prefix: string): Promise { return dir; } +async function syncSourceSkillsToTarget(sourceWorkspace: string, targetWorkspace: string) { + await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => + syncSkillsToWorkspace({ + sourceWorkspaceDir: sourceWorkspace, + targetWorkspaceDir: targetWorkspace, + bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), + managedSkillsDir: path.join(sourceWorkspace, ".managed"), + }), + ); +} + +async function expectSyncedSkillConfinement(params: { + sourceWorkspace: string; + targetWorkspace: string; + safeSkillDirName: string; + escapedDest: string; +}) { + expect(await pathExists(params.escapedDest)).toBe(false); + await syncSourceSkillsToTarget(params.sourceWorkspace, params.targetWorkspace); + expect( + await pathExists( + path.join(params.targetWorkspace, "skills", params.safeSkillDirName, "SKILL.md"), + ), + ).toBe(true); + expect(await pathExists(params.escapedDest)).toBe(false); +} + beforeAll(async () => { fixtureRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-skills-sync-suite-")); syncSourceTemplateDir = await createCaseDir("source-template"); @@ -115,14 +142,7 @@ describe("buildWorkspaceSkillsPrompt", () => { "dir", ); - await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => - syncSkillsToWorkspace({ - sourceWorkspaceDir: sourceWorkspace, - targetWorkspaceDir: targetWorkspace, - bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), - managedSkillsDir: path.join(sourceWorkspace, ".managed"), - }), - ); + await syncSourceSkillsToTarget(sourceWorkspace, targetWorkspace); const prompt = buildPrompt(targetWorkspace, { bundledSkillsDir: path.join(targetWorkspace, ".bundled"), @@ -151,21 +171,12 @@ describe("buildWorkspaceSkillsPrompt", () => { expect(path.relative(path.join(targetWorkspace, "skills"), escapedDest).startsWith("..")).toBe( true, ); - expect(await pathExists(escapedDest)).toBe(false); - - await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => - syncSkillsToWorkspace({ - sourceWorkspaceDir: sourceWorkspace, - targetWorkspaceDir: targetWorkspace, - bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), - managedSkillsDir: path.join(sourceWorkspace, ".managed"), - }), - ); - - expect( - await pathExists(path.join(targetWorkspace, "skills", "safe-traversal-skill", "SKILL.md")), - ).toBe(true); - expect(await pathExists(escapedDest)).toBe(false); + await expectSyncedSkillConfinement({ + sourceWorkspace, + targetWorkspace, + safeSkillDirName: "safe-traversal-skill", + escapedDest, + }); }); it("keeps synced skills confined under target workspace when frontmatter name is absolute", async () => { const sourceWorkspace = await createCaseDir("source"); @@ -180,21 +191,12 @@ describe("buildWorkspaceSkillsPrompt", () => { description: "Absolute skill", }); - expect(await pathExists(absoluteDest)).toBe(false); - - await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => - syncSkillsToWorkspace({ - sourceWorkspaceDir: sourceWorkspace, - targetWorkspaceDir: targetWorkspace, - bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), - managedSkillsDir: path.join(sourceWorkspace, ".managed"), - }), - ); - - expect( - await pathExists(path.join(targetWorkspace, "skills", "safe-absolute-skill", "SKILL.md")), - ).toBe(true); - expect(await pathExists(absoluteDest)).toBe(false); + await expectSyncedSkillConfinement({ + sourceWorkspace, + targetWorkspace, + safeSkillDirName: "safe-absolute-skill", + escapedDest: absoluteDest, + }); }); it("filters skills based on env/config gates", async () => { const workspaceDir = await createCaseDir("workspace"); diff --git a/src/agents/skills.buildworkspaceskillsnapshot.test.ts b/src/agents/skills.buildworkspaceskillsnapshot.test.ts index aec0da8b49a..1292841ed13 100644 --- a/src/agents/skills.buildworkspaceskillsnapshot.test.ts +++ b/src/agents/skills.buildworkspaceskillsnapshot.test.ts @@ -43,22 +43,44 @@ function withWorkspaceHome(workspaceDir: string, cb: () => T): T { return withEnv({ HOME: workspaceDir, PATH: "" }, cb); } +function buildSnapshot( + workspaceDir: string, + options?: Parameters[1], +) { + return withWorkspaceHome(workspaceDir, () => + buildWorkspaceSkillSnapshot(workspaceDir, { + managedSkillsDir: path.join(workspaceDir, ".managed"), + bundledSkillsDir: path.join(workspaceDir, ".bundled"), + ...options, + }), + ); +} + async function cloneTemplateDir(templateDir: string, prefix: string): Promise { const cloned = await fixtureSuite.createCaseDir(prefix); await fs.cp(templateDir, cloned, { recursive: true }); return cloned; } +function expectSnapshotNamesAndPrompt( + snapshot: ReturnType, + params: { contains?: string[]; omits?: string[] }, +) { + for (const name of params.contains ?? []) { + expect(snapshot.skills.map((skill) => skill.name)).toContain(name); + expect(snapshot.prompt).toContain(name); + } + for (const name of params.omits ?? []) { + expect(snapshot.skills.map((skill) => skill.name)).not.toContain(name); + expect(snapshot.prompt).not.toContain(name); + } +} + describe("buildWorkspaceSkillSnapshot", () => { it("returns an empty snapshot when skills dirs are missing", async () => { const workspaceDir = await fixtureSuite.createCaseDir("workspace"); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + const snapshot = buildSnapshot(workspaceDir); expect(snapshot.prompt).toBe(""); expect(snapshot.skills).toEqual([]); @@ -78,12 +100,7 @@ describe("buildWorkspaceSkillSnapshot", () => { frontmatterExtra: "disable-model-invocation: true", }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + const snapshot = buildSnapshot(workspaceDir); expect(snapshot.prompt).toContain("visible-skill"); expect(snapshot.prompt).not.toContain("hidden-skill"); @@ -204,24 +221,20 @@ describe("buildWorkspaceSkillSnapshot", () => { body: "x".repeat(5_000), }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - config: { - skills: { - limits: { - maxSkillFileBytes: 1000, - }, + const snapshot = buildSnapshot(workspaceDir, { + config: { + skills: { + limits: { + maxSkillFileBytes: 1000, }, }, - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + }, + }); - expect(snapshot.skills.map((s) => s.name)).toContain("small-skill"); - expect(snapshot.skills.map((s) => s.name)).not.toContain("big-skill"); - expect(snapshot.prompt).toContain("small-skill"); - expect(snapshot.prompt).not.toContain("big-skill"); + expectSnapshotNamesAndPrompt(snapshot, { + contains: ["small-skill"], + omits: ["big-skill"], + }); }); it("detects nested skills roots beyond the first 25 entries", async () => { @@ -241,26 +254,23 @@ describe("buildWorkspaceSkillSnapshot", () => { description: "Nested skill discovered late", }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - config: { - skills: { - load: { - extraDirs: [repoDir], - }, - limits: { - maxCandidatesPerRoot: 30, - maxSkillsLoadedPerSource: 30, - }, + const snapshot = buildSnapshot(workspaceDir, { + config: { + skills: { + load: { + extraDirs: [repoDir], + }, + limits: { + maxCandidatesPerRoot: 30, + maxSkillsLoadedPerSource: 30, }, }, - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + }, + }); - expect(snapshot.skills.map((s) => s.name)).toContain("late-skill"); - expect(snapshot.prompt).toContain("late-skill"); + expectSnapshotNamesAndPrompt(snapshot, { + contains: ["late-skill"], + }); }); it("enforces maxSkillFileBytes for root-level SKILL.md", async () => { @@ -274,24 +284,21 @@ describe("buildWorkspaceSkillSnapshot", () => { body: "x".repeat(5_000), }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - config: { - skills: { - load: { - extraDirs: [rootSkillDir], - }, - limits: { - maxSkillFileBytes: 1000, - }, + const snapshot = buildSnapshot(workspaceDir, { + config: { + skills: { + load: { + extraDirs: [rootSkillDir], + }, + limits: { + maxSkillFileBytes: 1000, }, }, - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + }, + }); - expect(snapshot.skills.map((s) => s.name)).not.toContain("root-big-skill"); - expect(snapshot.prompt).not.toContain("root-big-skill"); + expectSnapshotNamesAndPrompt(snapshot, { + omits: ["root-big-skill"], + }); }); }); diff --git a/src/agents/skills.test.ts b/src/agents/skills.test.ts index 394f476ffa8..c5c8c2077d9 100644 --- a/src/agents/skills.test.ts +++ b/src/agents/skills.test.ts @@ -49,6 +49,16 @@ const withClearedEnv = ( } }; +async function writeEnvSkill(workspaceDir: string) { + const skillDir = path.join(workspaceDir, "skills", "env-skill"); + await writeSkill({ + dir: skillDir, + name: "env-skill", + description: "Needs env", + metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', + }); +} + beforeAll(async () => { await fixtureSuite.setup(); tempHome = await createTempHomeEnv("openclaw-skills-home-"); @@ -240,13 +250,7 @@ describe("buildWorkspaceSkillsPrompt", () => { describe("applySkillEnvOverrides", () => { it("sets and restores env vars", async () => { const workspaceDir = await makeWorkspace(); - const skillDir = path.join(workspaceDir, "skills", "env-skill"); - await writeSkill({ - dir: skillDir, - name: "env-skill", - description: "Needs env", - metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', - }); + await writeEnvSkill(workspaceDir); const entries = loadWorkspaceSkillEntries(workspaceDir, resolveTestSkillDirs(workspaceDir)); @@ -269,13 +273,7 @@ describe("applySkillEnvOverrides", () => { it("keeps env keys tracked until all overlapping overrides restore", async () => { const workspaceDir = await makeWorkspace(); - const skillDir = path.join(workspaceDir, "skills", "env-skill"); - await writeSkill({ - dir: skillDir, - name: "env-skill", - description: "Needs env", - metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', - }); + await writeEnvSkill(workspaceDir); const entries = loadWorkspaceSkillEntries(workspaceDir, resolveTestSkillDirs(workspaceDir)); @@ -301,13 +299,7 @@ describe("applySkillEnvOverrides", () => { it("applies env overrides from snapshots", async () => { const workspaceDir = await makeWorkspace(); - const skillDir = path.join(workspaceDir, "skills", "env-skill"); - await writeSkill({ - dir: skillDir, - name: "env-skill", - description: "Needs env", - metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', - }); + await writeEnvSkill(workspaceDir); const snapshot = buildWorkspaceSkillSnapshot(workspaceDir, { ...resolveTestSkillDirs(workspaceDir), diff --git a/src/agents/subagent-announce.timeout.test.ts b/src/agents/subagent-announce.timeout.test.ts index b003276e56e..5fae988fe73 100644 --- a/src/agents/subagent-announce.timeout.test.ts +++ b/src/agents/subagent-announce.timeout.test.ts @@ -120,6 +120,21 @@ function findGatewayCall(predicate: (call: GatewayCall) => boolean): GatewayCall return gatewayCalls.find(predicate); } +function findFinalDirectAgentCall(): GatewayCall | undefined { + return findGatewayCall((call) => call.method === "agent" && call.expectFinal === true); +} + +function setupParentSessionFallback(parentSessionKey: string): void { + requesterDepthResolver = (sessionKey?: string) => + sessionKey === parentSessionKey ? 1 : sessionKey?.includes(":subagent:") ? 1 : 0; + subagentSessionRunActive = false; + shouldIgnorePostCompletion = false; + fallbackRequesterResolution = { + requesterSessionKey: "agent:main:main", + requesterOrigin: { channel: "discord", to: "chan-main", accountId: "acct-main" }, + }; +} + describe("subagent announce timeout config", () => { beforeEach(() => { gatewayCalls.length = 0; @@ -244,9 +259,7 @@ describe("subagent announce timeout config", () => { requesterOrigin: { channel: "discord", to: "channel:cron-results", accountId: "acct-1" }, }); - const directAgentCall = findGatewayCall( - (call) => call.method === "agent" && call.expectFinal === true, - ); + const directAgentCall = findFinalDirectAgentCall(); expect(directAgentCall?.params?.sessionKey).toBe(cronSessionKey); expect(directAgentCall?.params?.deliver).toBe(false); expect(directAgentCall?.params?.channel).toBeUndefined(); @@ -256,14 +269,7 @@ describe("subagent announce timeout config", () => { it("regression, routes child announce to parent session instead of grandparent when parent session still exists", async () => { const parentSessionKey = "agent:main:subagent:parent"; - requesterDepthResolver = (sessionKey?: string) => - sessionKey === parentSessionKey ? 1 : sessionKey?.includes(":subagent:") ? 1 : 0; - subagentSessionRunActive = false; - shouldIgnorePostCompletion = false; - fallbackRequesterResolution = { - requesterSessionKey: "agent:main:main", - requesterOrigin: { channel: "discord", to: "chan-main", accountId: "acct-main" }, - }; + setupParentSessionFallback(parentSessionKey); // No sessionId on purpose: existence in store should still count as alive. sessionStore[parentSessionKey] = { updatedAt: Date.now() }; @@ -273,23 +279,14 @@ describe("subagent announce timeout config", () => { childSessionKey: `${parentSessionKey}:subagent:child`, }); - const directAgentCall = findGatewayCall( - (call) => call.method === "agent" && call.expectFinal === true, - ); + const directAgentCall = findFinalDirectAgentCall(); expect(directAgentCall?.params?.sessionKey).toBe(parentSessionKey); expect(directAgentCall?.params?.deliver).toBe(false); }); it("regression, falls back to grandparent only when parent subagent session is missing", async () => { const parentSessionKey = "agent:main:subagent:parent-missing"; - requesterDepthResolver = (sessionKey?: string) => - sessionKey === parentSessionKey ? 1 : sessionKey?.includes(":subagent:") ? 1 : 0; - subagentSessionRunActive = false; - shouldIgnorePostCompletion = false; - fallbackRequesterResolution = { - requesterSessionKey: "agent:main:main", - requesterOrigin: { channel: "discord", to: "chan-main", accountId: "acct-main" }, - }; + setupParentSessionFallback(parentSessionKey); await runAnnounceFlowForTest("run-parent-fallback", { requesterSessionKey: parentSessionKey, @@ -297,9 +294,7 @@ describe("subagent announce timeout config", () => { childSessionKey: `${parentSessionKey}:subagent:child`, }); - const directAgentCall = findGatewayCall( - (call) => call.method === "agent" && call.expectFinal === true, - ); + const directAgentCall = findFinalDirectAgentCall(); expect(directAgentCall?.params?.sessionKey).toBe("agent:main:main"); expect(directAgentCall?.params?.deliver).toBe(true); expect(directAgentCall?.params?.channel).toBe("discord"); diff --git a/src/agents/subagent-spawn.workspace.test.ts b/src/agents/subagent-spawn.workspace.test.ts index fef6bc7515c..9955e587c89 100644 --- a/src/agents/subagent-spawn.workspace.test.ts +++ b/src/agents/subagent-spawn.workspace.test.ts @@ -1,5 +1,6 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; import { spawnSubagentDirect } from "./subagent-spawn.js"; +import { installAcceptedSubagentGatewayMock } from "./test-helpers/subagent-gateway.js"; type TestAgentConfig = { id?: string; @@ -100,20 +101,7 @@ function createConfigOverride(overrides?: Record) { } function setupGatewayMock() { - hoisted.callGatewayMock.mockImplementation( - async (opts: { method?: string; params?: Record }) => { - if (opts.method === "sessions.patch") { - return { ok: true }; - } - if (opts.method === "sessions.delete") { - return { ok: true }; - } - if (opts.method === "agent") { - return { runId: "run-1" }; - } - return {}; - }, - ); + installAcceptedSubagentGatewayMock(hoisted.callGatewayMock); } function getRegisteredRun() { @@ -122,6 +110,27 @@ function getRegisteredRun() { | undefined; } +async function expectAcceptedWorkspace(params: { agentId: string; expectedWorkspaceDir: string }) { + const result = await spawnSubagentDirect( + { + task: "inspect workspace", + agentId: params.agentId, + }, + { + agentSessionKey: "agent:main:main", + agentChannel: "telegram", + agentAccountId: "123", + agentTo: "456", + workspaceDir: "/tmp/requester-workspace", + }, + ); + + expect(result.status).toBe("accepted"); + expect(getRegisteredRun()).toMatchObject({ + workspaceDir: params.expectedWorkspaceDir, + }); +} + describe("spawnSubagentDirect workspace inheritance", () => { beforeEach(() => { hoisted.callGatewayMock.mockClear(); @@ -149,44 +158,16 @@ describe("spawnSubagentDirect workspace inheritance", () => { }, }); - const result = await spawnSubagentDirect( - { - task: "inspect workspace", - agentId: "ops", - }, - { - agentSessionKey: "agent:main:main", - agentChannel: "telegram", - agentAccountId: "123", - agentTo: "456", - workspaceDir: "/tmp/requester-workspace", - }, - ); - - expect(result.status).toBe("accepted"); - expect(getRegisteredRun()).toMatchObject({ - workspaceDir: "/tmp/workspace-ops", + await expectAcceptedWorkspace({ + agentId: "ops", + expectedWorkspaceDir: "/tmp/workspace-ops", }); }); it("preserves the inherited workspace for same-agent spawns", async () => { - const result = await spawnSubagentDirect( - { - task: "inspect workspace", - agentId: "main", - }, - { - agentSessionKey: "agent:main:main", - agentChannel: "telegram", - agentAccountId: "123", - agentTo: "456", - workspaceDir: "/tmp/requester-workspace", - }, - ); - - expect(result.status).toBe("accepted"); - expect(getRegisteredRun()).toMatchObject({ - workspaceDir: "/tmp/requester-workspace", + await expectAcceptedWorkspace({ + agentId: "main", + expectedWorkspaceDir: "/tmp/requester-workspace", }); }); }); diff --git a/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts new file mode 100644 index 00000000000..1d987c44d1a --- /dev/null +++ b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts @@ -0,0 +1,57 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import type { OpenClawConfig } from "../../config/config.js"; + +export type EmbeddedPiRunnerTestWorkspace = { + tempRoot: string; + agentDir: string; + workspaceDir: string; +}; + +export async function createEmbeddedPiRunnerTestWorkspace( + prefix: string, +): Promise { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const agentDir = path.join(tempRoot, "agent"); + const workspaceDir = path.join(tempRoot, "workspace"); + await fs.mkdir(agentDir, { recursive: true }); + await fs.mkdir(workspaceDir, { recursive: true }); + return { tempRoot, agentDir, workspaceDir }; +} + +export async function cleanupEmbeddedPiRunnerTestWorkspace( + workspace: EmbeddedPiRunnerTestWorkspace | undefined, +): Promise { + if (!workspace) { + return; + } + await fs.rm(workspace.tempRoot, { recursive: true, force: true }); +} + +export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig { + return { + models: { + providers: { + openai: { + api: "openai-responses", + apiKey: "sk-test", + baseUrl: "https://example.com", + models: modelIds.map((id) => ({ + id, + name: `Mock ${id}`, + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 16_000, + maxTokens: 2048, + })), + }, + }, + }, + }; +} + +export async function immediateEnqueue(task: () => Promise): Promise { + return await task(); +} diff --git a/src/agents/test-helpers/subagent-gateway.ts b/src/agents/test-helpers/subagent-gateway.ts new file mode 100644 index 00000000000..9491d971c33 --- /dev/null +++ b/src/agents/test-helpers/subagent-gateway.ts @@ -0,0 +1,9 @@ +export function installAcceptedSubagentGatewayMock(mock: { + mockImplementation: ( + impl: (opts: { method?: string; params?: unknown }) => Promise, + ) => unknown; +}) { + mock.mockImplementation(async ({ method }) => + method === "agent" ? { runId: "run-1" } : method?.startsWith("sessions.") ? { ok: true } : {}, + ); +} diff --git a/src/agents/tool-display-common.ts b/src/agents/tool-display-common.ts index a7564c98052..f5d231fd898 100644 --- a/src/agents/tool-display-common.ts +++ b/src/agents/tool-display-common.ts @@ -1081,9 +1081,10 @@ export function resolveExecDetail(args: unknown): string | undefined { const displaySummary = cwd ? `${summary} (in ${cwd})` : summary; - // Append the raw command when the summary differs meaningfully from the command itself. + // Keep the raw command inline so chat surfaces do not break "Exec:" onto a + // separate paragraph/code block. if (compact && compact !== displaySummary && compact !== summary) { - return `${displaySummary}\n\n\`${compact}\``; + return `${displaySummary} · \`${compact}\``; } return displaySummary; diff --git a/src/agents/tool-display.test.ts b/src/agents/tool-display.test.ts index b41db4d0552..19ef7652ffb 100644 --- a/src/agents/tool-display.test.ts +++ b/src/agents/tool-display.test.ts @@ -112,9 +112,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe( - "install dependencies (in ~/my-project)\n\n`cd ~/my-project && npm install`", - ); + expect(detail).toBe("install dependencies (in ~/my-project), `cd ~/my-project && npm install`"); }); it("moves cd path to context suffix with multiple stages and raw command", () => { @@ -126,7 +124,7 @@ describe("tool display details", () => { ); expect(detail).toBe( - "install dependencies → run tests (in ~/my-project)\n\n`cd ~/my-project && npm install && npm test`", + "install dependencies → run tests (in ~/my-project), `cd ~/my-project && npm install && npm test`", ); }); @@ -138,7 +136,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe("check git status (in /tmp)\n\n`pushd /tmp && git status`"); + expect(detail).toBe("check git status (in /tmp), `pushd /tmp && git status`"); }); it("clears inferred cwd when popd is stripped from preamble", () => { @@ -149,7 +147,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe("install dependencies\n\n`pushd /tmp && popd && npm install`"); + expect(detail).toBe("install dependencies, `pushd /tmp && popd && npm install`"); }); it("moves cd path to context suffix with || separator", () => { @@ -173,7 +171,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe("install dependencies (in /app)\n\n`cd /tmp && npm install`"); + expect(detail).toBe("install dependencies (in /app), `cd /tmp && npm install`"); }); it("summarizes all stages and appends raw command", () => { @@ -185,7 +183,7 @@ describe("tool display details", () => { ); expect(detail).toBe( - "fetch git changes → rebase git branch\n\n`git fetch && git rebase origin/main`", + "fetch git changes → rebase git branch, `git fetch && git rebase origin/main`", ); }); diff --git a/src/agents/tools/browser-tool.actions.ts b/src/agents/tools/browser-tool.actions.ts index 673585d16b3..a4b6cb456af 100644 --- a/src/agents/tools/browser-tool.actions.ts +++ b/src/agents/tools/browser-tool.actions.ts @@ -54,8 +54,27 @@ function formatTabsToolResult(tabs: unknown[]): AgentToolResult { }; } +function formatConsoleToolResult(result: { + targetId?: string; + messages?: unknown[]; +}): AgentToolResult { + const wrapped = wrapBrowserExternalJson({ + kind: "console", + payload: result, + includeWarning: false, + }); + return { + content: [{ type: "text" as const, text: wrapped.wrappedText }], + details: { + ...wrapped.safeDetails, + targetId: typeof result.targetId === "string" ? result.targetId : undefined, + messageCount: Array.isArray(result.messages) ? result.messages.length : undefined, + }, + }; +} + function isChromeStaleTargetError(profile: string | undefined, err: unknown): boolean { - if (profile !== "chrome") { + if (profile !== "chrome-relay" && profile !== "chrome") { return false; } const msg = String(err); @@ -258,34 +277,10 @@ export async function executeConsoleAction(params: { targetId, }, })) as { ok?: boolean; targetId?: string; messages?: unknown[] }; - const wrapped = wrapBrowserExternalJson({ - kind: "console", - payload: result, - includeWarning: false, - }); - return { - content: [{ type: "text" as const, text: wrapped.wrappedText }], - details: { - ...wrapped.safeDetails, - targetId: typeof result.targetId === "string" ? result.targetId : undefined, - messageCount: Array.isArray(result.messages) ? result.messages.length : undefined, - }, - }; + return formatConsoleToolResult(result); } const result = await browserConsoleMessages(baseUrl, { level, targetId, profile }); - const wrapped = wrapBrowserExternalJson({ - kind: "console", - payload: result, - includeWarning: false, - }); - return { - content: [{ type: "text" as const, text: wrapped.wrappedText }], - details: { - ...wrapped.safeDetails, - targetId: result.targetId, - messageCount: result.messages.length, - }, - }; + return formatConsoleToolResult(result); } export async function executeActAction(params: { @@ -345,7 +340,7 @@ export async function executeActAction(params: { ); } throw new Error( - `Chrome tab not found (stale targetId?). Run action=tabs profile="chrome" and use one of the returned targetIds.`, + `Chrome tab not found (stale targetId?). Run action=tabs profile="chrome-relay" and use one of the returned targetIds.`, { cause: err }, ); } diff --git a/src/agents/tools/browser-tool.test.ts b/src/agents/tools/browser-tool.test.ts index 81996afb419..adaaea78221 100644 --- a/src/agents/tools/browser-tool.test.ts +++ b/src/agents/tools/browser-tool.test.ts @@ -54,7 +54,45 @@ const browserConfigMocks = vi.hoisted(() => ({ resolveBrowserConfig: vi.fn(() => ({ enabled: true, controlPort: 18791, + profiles: {}, + defaultProfile: "openclaw", })), + resolveProfile: vi.fn((resolved: Record, name: string) => { + const profile = (resolved.profiles as Record> | undefined)?.[ + name + ]; + if (!profile) { + return null; + } + const driver = + profile.driver === "extension" + ? "extension" + : profile.driver === "existing-session" + ? "existing-session" + : "openclaw"; + if (driver === "existing-session") { + return { + name, + driver, + cdpPort: 0, + cdpUrl: "", + cdpHost: "", + cdpIsLoopback: true, + color: typeof profile.color === "string" ? profile.color : "#FF4500", + attachOnly: true, + }; + } + return { + name, + driver, + cdpPort: typeof profile.cdpPort === "number" ? profile.cdpPort : 18792, + cdpUrl: typeof profile.cdpUrl === "string" ? profile.cdpUrl : "http://127.0.0.1:18792", + cdpHost: "127.0.0.1", + cdpIsLoopback: true, + color: typeof profile.color === "string" ? profile.color : "#FF4500", + attachOnly: profile.attachOnly === true, + }; + }), })); vi.mock("../../browser/config.js", () => browserConfigMocks); @@ -117,9 +155,27 @@ function mockSingleBrowserProxyNode() { function resetBrowserToolMocks() { vi.clearAllMocks(); configMocks.loadConfig.mockReturnValue({ browser: {} }); + browserConfigMocks.resolveBrowserConfig.mockReturnValue({ + enabled: true, + controlPort: 18791, + profiles: {}, + defaultProfile: "openclaw", + }); nodesUtilsMocks.listNodes.mockResolvedValue([]); } +function setResolvedBrowserProfiles( + profiles: Record>, + defaultProfile = "openclaw", +) { + browserConfigMocks.resolveBrowserConfig.mockReturnValue({ + enabled: true, + controlPort: 18791, + profiles, + defaultProfile, + }); +} + function registerBrowserToolAfterEachReset() { afterEach(() => { resetBrowserToolMocks(); @@ -231,26 +287,91 @@ describe("browser tool snapshot maxChars", () => { expect(opts?.mode).toBeUndefined(); }); - it("defaults to host when using profile=chrome (even in sandboxed sessions)", async () => { + it("defaults to host when using profile=chrome-relay (even in sandboxed sessions)", async () => { + setResolvedBrowserProfiles({ + "chrome-relay": { + driver: "extension", + cdpUrl: "http://127.0.0.1:18792", + color: "#0066CC", + }, + }); const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); - await tool.execute?.("call-1", { action: "snapshot", profile: "chrome", snapshotFormat: "ai" }); + await tool.execute?.("call-1", { + action: "snapshot", + profile: "chrome-relay", + snapshotFormat: "ai", + }); expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( undefined, expect.objectContaining({ - profile: "chrome", + profile: "chrome-relay", }), ); }); - it("lets the server choose snapshot format when the user does not request one", async () => { - const tool = createBrowserTool(); - await tool.execute?.("call-1", { action: "snapshot", profile: "chrome" }); + it("defaults to host when using profile=user (even in sandboxed sessions)", async () => { + setResolvedBrowserProfiles({ + user: { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + }); + const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); + await tool.execute?.("call-1", { + action: "snapshot", + profile: "user", + snapshotFormat: "ai", + }); expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( undefined, expect.objectContaining({ - profile: "chrome", + profile: "user", + }), + ); + }); + + it("defaults to host for custom existing-session profiles too", async () => { + setResolvedBrowserProfiles({ + "chrome-live": { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + }); + const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); + await tool.execute?.("call-1", { + action: "snapshot", + profile: "chrome-live", + snapshotFormat: "ai", + }); + + expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( + undefined, + expect.objectContaining({ + profile: "chrome-live", + }), + ); + }); + + it('rejects profile="user" with target="sandbox"', async () => { + setResolvedBrowserProfiles({ + user: { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + }); + const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); + + await expect( + tool.execute?.("call-1", { + action: "snapshot", + profile: "user", + target: "sandbox", + snapshotFormat: "ai", + }), + ).rejects.toThrow(/profile="user" cannot use the sandbox browser/i); + }); + + it("lets the server choose snapshot format when the user does not request one", async () => { + const tool = createBrowserTool(); + await tool.execute?.("call-1", { action: "snapshot", profile: "chrome-relay" }); + + expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( + undefined, + expect.objectContaining({ + profile: "chrome-relay", }), ); const opts = browserClientMocks.browserSnapshot.mock.calls.at(-1)?.[1] as @@ -317,14 +438,21 @@ describe("browser tool snapshot maxChars", () => { expect(gatewayMocks.callGatewayTool).not.toHaveBeenCalled(); }); - it("keeps chrome profile on host when node proxy is available", async () => { + it("keeps chrome-relay profile on host when node proxy is available", async () => { mockSingleBrowserProxyNode(); + setResolvedBrowserProfiles({ + "chrome-relay": { + driver: "extension", + cdpUrl: "http://127.0.0.1:18792", + color: "#0066CC", + }, + }); const tool = createBrowserTool(); - await tool.execute?.("call-1", { action: "status", profile: "chrome" }); + await tool.execute?.("call-1", { action: "status", profile: "chrome-relay" }); expect(browserClientMocks.browserStatus).toHaveBeenCalledWith( undefined, - expect.objectContaining({ profile: "chrome" }), + expect.objectContaining({ profile: "chrome-relay" }), ); expect(gatewayMocks.callGatewayTool).not.toHaveBeenCalled(); }); @@ -617,7 +745,7 @@ describe("browser tool external content wrapping", () => { describe("browser tool act stale target recovery", () => { registerBrowserToolAfterEachReset(); - it("retries safe chrome act once without targetId when exactly one tab remains", async () => { + it("retries safe chrome-relay act once without targetId when exactly one tab remains", async () => { browserActionsMocks.browserAct .mockRejectedValueOnce(new Error("404: tab not found")) .mockResolvedValueOnce({ ok: true }); @@ -626,7 +754,7 @@ describe("browser tool act stale target recovery", () => { const tool = createBrowserTool(); const result = await tool.execute?.("call-1", { action: "act", - profile: "chrome", + profile: "chrome-relay", request: { kind: "hover", targetId: "stale-tab", @@ -639,18 +767,18 @@ describe("browser tool act stale target recovery", () => { 1, undefined, expect.objectContaining({ targetId: "stale-tab", kind: "hover", ref: "btn-1" }), - expect.objectContaining({ profile: "chrome" }), + expect.objectContaining({ profile: "chrome-relay" }), ); expect(browserActionsMocks.browserAct).toHaveBeenNthCalledWith( 2, undefined, expect.not.objectContaining({ targetId: expect.anything() }), - expect.objectContaining({ profile: "chrome" }), + expect.objectContaining({ profile: "chrome-relay" }), ); expect(result?.details).toMatchObject({ ok: true }); }); - it("does not retry mutating chrome act requests without targetId", async () => { + it("does not retry mutating chrome-relay act requests without targetId", async () => { browserActionsMocks.browserAct.mockRejectedValueOnce(new Error("404: tab not found")); browserClientMocks.browserTabs.mockResolvedValueOnce([{ targetId: "only-tab" }]); @@ -658,14 +786,14 @@ describe("browser tool act stale target recovery", () => { await expect( tool.execute?.("call-1", { action: "act", - profile: "chrome", + profile: "chrome-relay", request: { kind: "click", targetId: "stale-tab", ref: "btn-1", }, }), - ).rejects.toThrow(/Run action=tabs profile="chrome"/i); + ).rejects.toThrow(/Run action=tabs profile="chrome-relay"/i); expect(browserActionsMocks.browserAct).toHaveBeenCalledTimes(1); }); diff --git a/src/agents/tools/browser-tool.ts b/src/agents/tools/browser-tool.ts index 200013ff1a7..8cb57435100 100644 --- a/src/agents/tools/browser-tool.ts +++ b/src/agents/tools/browser-tool.ts @@ -16,8 +16,9 @@ import { browserStatus, browserStop, } from "../../browser/client.js"; -import { resolveBrowserConfig } from "../../browser/config.js"; +import { resolveBrowserConfig, resolveProfile } from "../../browser/config.js"; import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "../../browser/paths.js"; +import { getBrowserProfileCapabilities } from "../../browser/profile-capabilities.js"; import { applyBrowserProxyPaths, persistBrowserProxyFiles } from "../../browser/proxy-files.js"; import { trackSessionBrowserTab, @@ -278,6 +279,24 @@ function resolveBrowserBaseUrl(params: { return undefined; } +function shouldPreferHostForProfile(profileName: string | undefined) { + if (!profileName) { + return false; + } + const cfg = loadConfig(); + const resolved = resolveBrowserConfig(cfg.browser, cfg); + const profile = resolveProfile(resolved, profileName); + if (!profile) { + return false; + } + const capabilities = getBrowserProfileCapabilities(profile); + return capabilities.requiresRelay || capabilities.usesChromeMcp; +} + +function isHostOnlyProfileName(profileName: string | undefined) { + return profileName === "user" || profileName === "chrome-relay"; +} + export function createBrowserTool(opts?: { sandboxBridgeUrl?: string; allowHostControl?: boolean; @@ -291,10 +310,12 @@ export function createBrowserTool(opts?: { name: "browser", description: [ "Control the browser via OpenClaw's browser control server (status/start/stop/profiles/tabs/open/snapshot/screenshot/actions).", - 'Profiles: use profile="chrome" for Chrome extension relay takeover (your existing Chrome tabs). Use profile="openclaw" for the isolated openclaw-managed browser.', - 'If the user mentions the Chrome extension / Browser Relay / toolbar button / “attach tab”, ALWAYS use profile="chrome" (do not ask which profile).', + "Browser choice: omit profile by default for the isolated OpenClaw-managed browser (`openclaw`).", + 'For the logged-in user browser on the local host, prefer profile="user". Use it only when existing logins/cookies matter and the user is present to click/approve any browser attach prompt.', + 'Use profile="chrome-relay" only for the Chrome extension / Browser Relay / toolbar-button attach-tab flow, or when the user explicitly asks for the extension relay.', + 'If the user mentions the Chrome extension / Browser Relay / toolbar button / “attach tab”, ALWAYS prefer profile="chrome-relay". Otherwise prefer profile="user" over the extension relay for user-browser work.', 'When a node-hosted browser proxy is available, the tool may auto-route to it. Pin a node with node= or target="node".', - "Chrome extension relay needs an attached tab: user must click the OpenClaw Browser Relay toolbar icon on the tab (badge ON). If no tab is connected, ask them to attach it.", + 'User-browser flows need user interaction: profile="user" may require approving a browser attach prompt; profile="chrome-relay" needs the user to click the OpenClaw Browser Relay toolbar icon on the tab (badge ON). If user presence is unclear, ask first.', "When using refs from snapshot (e.g. e12), keep the same tab: prefer passing targetId from the snapshot response into subsequent actions (act/click/type/etc).", 'For stable, self-resolving refs across calls, use snapshot with refs="aria" (Playwright aria-ref ids). Default refs="role" are role+name-based.', "Use snapshot+act for UI automation. Avoid act:wait by default; use only in exceptional cases when no reliable UI state exists.", @@ -312,9 +333,18 @@ export function createBrowserTool(opts?: { if (requestedNode && target && target !== "node") { throw new Error('node is only supported with target="node".'); } - - if (!target && !requestedNode && profile === "chrome") { - // Chrome extension relay takeover is a host Chrome feature; prefer host unless explicitly targeting a node. + if (isHostOnlyProfileName(profile)) { + if (requestedNode || target === "node") { + throw new Error(`profile="${profile}" only supports the local host browser.`); + } + if (target === "sandbox") { + throw new Error( + `profile="${profile}" cannot use the sandbox browser; use target="host" or omit target.`, + ); + } + } + if (!target && !requestedNode && shouldPreferHostForProfile(profile)) { + // Local host user-browser profiles should not silently bind to sandbox/node browsers. target = "host"; } diff --git a/src/agents/tools/common.ts b/src/agents/tools/common.ts index 19cca2d7927..81d3f4efc00 100644 --- a/src/agents/tools/common.ts +++ b/src/agents/tools/common.ts @@ -1,6 +1,7 @@ import fs from "node:fs/promises"; import type { AgentTool, AgentToolResult } from "@mariozechner/pi-agent-core"; import { detectMime } from "../../media/mime.js"; +import { readSnakeCaseParamRaw } from "../../param-key.js"; import type { ImageSanitizationLimits } from "../image-sanitization.js"; import { sanitizeToolResultImages } from "../tool-images.js"; @@ -53,22 +54,8 @@ export function createActionGate>( }; } -function toSnakeCaseKey(key: string): string { - return key - .replace(/([A-Z]+)([A-Z][a-z])/g, "$1_$2") - .replace(/([a-z0-9])([A-Z])/g, "$1_$2") - .toLowerCase(); -} - function readParamRaw(params: Record, key: string): unknown { - if (Object.hasOwn(params, key)) { - return params[key]; - } - const snakeKey = toSnakeCaseKey(key); - if (snakeKey !== key && Object.hasOwn(params, snakeKey)) { - return params[snakeKey]; - } - return undefined; + return readSnakeCaseParamRaw(params, key); } export function readStringParam( diff --git a/src/agents/tools/discord-actions-guild.ts b/src/agents/tools/discord-actions-guild.ts index 5fb10c87820..ba0ba300985 100644 --- a/src/agents/tools/discord-actions-guild.ts +++ b/src/agents/tools/discord-actions-guild.ts @@ -60,6 +60,13 @@ async function runRoleMutation(params: { await params.mutate({ guildId, userId, roleId }); } +function readChannelPermissionTarget(params: Record) { + return { + channelId: readStringParam(params, "channelId", { required: true }), + targetId: readStringParam(params, "targetId", { required: true }), + }; +} + export async function handleDiscordGuildAction( action: string, params: Record, @@ -453,10 +460,7 @@ export async function handleDiscordGuildAction( if (!isActionEnabled("channels")) { throw new Error("Discord channel management is disabled."); } - const channelId = readStringParam(params, "channelId", { - required: true, - }); - const targetId = readStringParam(params, "targetId", { required: true }); + const { channelId, targetId } = readChannelPermissionTarget(params); const targetTypeRaw = readStringParam(params, "targetType", { required: true, }); @@ -489,10 +493,7 @@ export async function handleDiscordGuildAction( if (!isActionEnabled("channels")) { throw new Error("Discord channel management is disabled."); } - const channelId = readStringParam(params, "channelId", { - required: true, - }); - const targetId = readStringParam(params, "targetId", { required: true }); + const { channelId, targetId } = readChannelPermissionTarget(params); if (accountId) { await removeChannelPermissionDiscord(channelId, targetId, { accountId }); } else { diff --git a/src/agents/tools/image-tool.test.ts b/src/agents/tools/image-tool.test.ts index 78a7754e84a..bcec7f32de7 100644 --- a/src/agents/tools/image-tool.test.ts +++ b/src/agents/tools/image-tool.test.ts @@ -48,6 +48,19 @@ async function withTempWorkspacePng( } } +function registerImageToolEnvReset(priorFetch: typeof global.fetch, keys: string[]) { + beforeEach(() => { + for (const key of keys) { + vi.stubEnv(key, ""); + } + }); + + afterEach(() => { + vi.unstubAllEnvs(); + global.fetch = priorFetch; + }); +} + function stubMinimaxOkFetch() { const fetch = vi.fn().mockResolvedValue({ ok: true, @@ -229,24 +242,18 @@ function findSchemaUnionKeywords(schema: unknown, path = "root"): string[] { describe("image tool implicit imageModel config", () => { const priorFetch = global.fetch; - - beforeEach(() => { - vi.stubEnv("OPENAI_API_KEY", ""); - vi.stubEnv("ANTHROPIC_API_KEY", ""); - vi.stubEnv("ANTHROPIC_OAUTH_TOKEN", ""); - vi.stubEnv("MINIMAX_API_KEY", ""); - vi.stubEnv("ZAI_API_KEY", ""); - vi.stubEnv("Z_AI_API_KEY", ""); + registerImageToolEnvReset(priorFetch, [ + "OPENAI_API_KEY", + "ANTHROPIC_API_KEY", + "ANTHROPIC_OAUTH_TOKEN", + "MINIMAX_API_KEY", + "ZAI_API_KEY", + "Z_AI_API_KEY", // Avoid implicit Copilot provider discovery hitting the network in tests. - vi.stubEnv("COPILOT_GITHUB_TOKEN", ""); - vi.stubEnv("GH_TOKEN", ""); - vi.stubEnv("GITHUB_TOKEN", ""); - }); - - afterEach(() => { - vi.unstubAllEnvs(); - global.fetch = priorFetch; - }); + "COPILOT_GITHUB_TOKEN", + "GH_TOKEN", + "GITHUB_TOKEN", + ]); it("stays disabled without auth when no pairing is possible", async () => { await withTempAgentDir(async (agentDir) => { @@ -683,18 +690,12 @@ describe("image tool MiniMax VLM routing", () => { const pngB64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/woAAn8B9FD5fHAAAAAASUVORK5CYII="; const priorFetch = global.fetch; - - beforeEach(() => { - vi.stubEnv("MINIMAX_API_KEY", ""); - vi.stubEnv("COPILOT_GITHUB_TOKEN", ""); - vi.stubEnv("GH_TOKEN", ""); - vi.stubEnv("GITHUB_TOKEN", ""); - }); - - afterEach(() => { - vi.unstubAllEnvs(); - global.fetch = priorFetch; - }); + registerImageToolEnvReset(priorFetch, [ + "MINIMAX_API_KEY", + "COPILOT_GITHUB_TOKEN", + "GH_TOKEN", + "GITHUB_TOKEN", + ]); async function createMinimaxVlmFixture(baseResp: { status_code: number; status_msg: string }) { const fetch = stubMinimaxFetch(baseResp, baseResp.status_code === 0 ? "ok" : ""); diff --git a/src/agents/tools/memory-tool.citations.test.ts b/src/agents/tools/memory-tool.citations.test.ts index 0fe84c6f5fa..ea097658ecf 100644 --- a/src/agents/tools/memory-tool.citations.test.ts +++ b/src/agents/tools/memory-tool.citations.test.ts @@ -6,24 +6,14 @@ import { setMemorySearchImpl, type MemoryReadParams, } from "../../../test/helpers/memory-tool-manager-mock.js"; -import type { OpenClawConfig } from "../../config/config.js"; -import { createMemoryGetTool, createMemorySearchTool } from "./memory-tool.js"; - -function asOpenClawConfig(config: Partial): OpenClawConfig { - return config as OpenClawConfig; -} - -function createToolConfig() { - return asOpenClawConfig({ agents: { list: [{ id: "main", default: true }] } }); -} - -function createMemoryGetToolOrThrow(config: OpenClawConfig = createToolConfig()) { - const tool = createMemoryGetTool({ config }); - if (!tool) { - throw new Error("tool missing"); - } - return tool; -} +import { + asOpenClawConfig, + createAutoCitationsMemorySearchTool, + createDefaultMemoryToolConfig, + createMemoryGetToolOrThrow, + createMemorySearchToolOrThrow, + expectUnavailableMemorySearchDetails, +} from "./memory-tool.test-helpers.js"; beforeEach(() => { resetMemoryToolMockState({ @@ -49,10 +39,7 @@ describe("memory search citations", () => { memory: { citations: "on" }, agents: { list: [{ id: "main", default: true }] }, }); - const tool = createMemorySearchTool({ config: cfg }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_citations_on", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string; citation?: string }> }; expect(details.results[0]?.snippet).toMatch(/Source: MEMORY.md#L5-L7/); @@ -65,10 +52,7 @@ describe("memory search citations", () => { memory: { citations: "off" }, agents: { list: [{ id: "main", default: true }] }, }); - const tool = createMemorySearchTool({ config: cfg }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_citations_off", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string; citation?: string }> }; expect(details.results[0]?.snippet).not.toMatch(/Source:/); @@ -81,10 +65,7 @@ describe("memory search citations", () => { memory: { citations: "on", backend: "qmd", qmd: { limits: { maxInjectedChars: 20 } } }, agents: { list: [{ id: "main", default: true }] }, }); - const tool = createMemorySearchTool({ config: cfg }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_citations_qmd", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string; citation?: string }> }; expect(details.results[0]?.snippet.length).toBeLessThanOrEqual(20); @@ -92,17 +73,7 @@ describe("memory search citations", () => { it("honors auto mode for direct chats", async () => { setMemoryBackend("builtin"); - const cfg = asOpenClawConfig({ - memory: { citations: "auto" }, - agents: { list: [{ id: "main", default: true }] }, - }); - const tool = createMemorySearchTool({ - config: cfg, - agentSessionKey: "agent:main:discord:dm:u123", - }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createAutoCitationsMemorySearchTool("agent:main:discord:dm:u123"); const result = await tool.execute("auto_mode_direct", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string }> }; expect(details.results[0]?.snippet).toMatch(/Source:/); @@ -110,17 +81,7 @@ describe("memory search citations", () => { it("suppresses citations for auto mode in group chats", async () => { setMemoryBackend("builtin"); - const cfg = asOpenClawConfig({ - memory: { citations: "auto" }, - agents: { list: [{ id: "main", default: true }] }, - }); - const tool = createMemorySearchTool({ - config: cfg, - agentSessionKey: "agent:main:discord:group:c123", - }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createAutoCitationsMemorySearchTool("agent:main:discord:group:c123"); const result = await tool.execute("auto_mode_group", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string }> }; expect(details.results[0]?.snippet).not.toMatch(/Source:/); @@ -133,18 +94,11 @@ describe("memory tools", () => { throw new Error("openai embeddings failed: 429 insufficient_quota"); }); - const cfg = { agents: { list: [{ id: "main", default: true }] } }; - const tool = createMemorySearchTool({ config: cfg }); - expect(tool).not.toBeNull(); - if (!tool) { - throw new Error("tool missing"); - } + const cfg = createDefaultMemoryToolConfig(); + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_1", { query: "hello" }); - expect(result.details).toEqual({ - results: [], - disabled: true, - unavailable: true, + expectUnavailableMemorySearchDetails(result.details, { error: "openai embeddings failed: 429 insufficient_quota", warning: "Memory search is unavailable because the embedding provider quota is exhausted.", action: "Top up or switch embedding provider, then retry memory_search.", diff --git a/src/agents/tools/memory-tool.test-helpers.ts b/src/agents/tools/memory-tool.test-helpers.ts new file mode 100644 index 00000000000..9a1d0e455f3 --- /dev/null +++ b/src/agents/tools/memory-tool.test-helpers.ts @@ -0,0 +1,63 @@ +import { expect } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import { createMemoryGetTool, createMemorySearchTool } from "./memory-tool.js"; + +export function asOpenClawConfig(config: Partial): OpenClawConfig { + return config as OpenClawConfig; +} + +export function createDefaultMemoryToolConfig(): OpenClawConfig { + return asOpenClawConfig({ agents: { list: [{ id: "main", default: true }] } }); +} + +export function createMemorySearchToolOrThrow(params?: { + config?: OpenClawConfig; + agentSessionKey?: string; +}) { + const tool = createMemorySearchTool({ + config: params?.config ?? createDefaultMemoryToolConfig(), + ...(params?.agentSessionKey ? { agentSessionKey: params.agentSessionKey } : {}), + }); + if (!tool) { + throw new Error("tool missing"); + } + return tool; +} + +export function createMemoryGetToolOrThrow( + config: OpenClawConfig = createDefaultMemoryToolConfig(), +) { + const tool = createMemoryGetTool({ config }); + if (!tool) { + throw new Error("tool missing"); + } + return tool; +} + +export function createAutoCitationsMemorySearchTool(agentSessionKey: string) { + return createMemorySearchToolOrThrow({ + config: asOpenClawConfig({ + memory: { citations: "auto" }, + agents: { list: [{ id: "main", default: true }] }, + }), + agentSessionKey, + }); +} + +export function expectUnavailableMemorySearchDetails( + details: unknown, + params: { + error: string; + warning: string; + action: string; + }, +) { + expect(details).toEqual({ + results: [], + disabled: true, + unavailable: true, + error: params.error, + warning: params.warning, + action: params.action, + }); +} diff --git a/src/agents/tools/memory-tool.test.ts b/src/agents/tools/memory-tool.test.ts index de907c01632..e8764bd9f46 100644 --- a/src/agents/tools/memory-tool.test.ts +++ b/src/agents/tools/memory-tool.test.ts @@ -1,9 +1,12 @@ -import { beforeEach, describe, expect, it } from "vitest"; +import { beforeEach, describe, it } from "vitest"; import { resetMemoryToolMockState, setMemorySearchImpl, } from "../../../test/helpers/memory-tool-manager-mock.js"; -import { createMemorySearchTool } from "./memory-tool.js"; +import { + createMemorySearchToolOrThrow, + expectUnavailableMemorySearchDetails, +} from "./memory-tool.test-helpers.js"; describe("memory_search unavailable payloads", () => { beforeEach(() => { @@ -15,18 +18,9 @@ describe("memory_search unavailable payloads", () => { throw new Error("openai embeddings failed: 429 insufficient_quota"); }); - const tool = createMemorySearchTool({ - config: { agents: { list: [{ id: "main", default: true }] } }, - }); - if (!tool) { - throw new Error("tool missing"); - } - + const tool = createMemorySearchToolOrThrow(); const result = await tool.execute("quota", { query: "hello" }); - expect(result.details).toEqual({ - results: [], - disabled: true, - unavailable: true, + expectUnavailableMemorySearchDetails(result.details, { error: "openai embeddings failed: 429 insufficient_quota", warning: "Memory search is unavailable because the embedding provider quota is exhausted.", action: "Top up or switch embedding provider, then retry memory_search.", @@ -38,18 +32,9 @@ describe("memory_search unavailable payloads", () => { throw new Error("embedding provider timeout"); }); - const tool = createMemorySearchTool({ - config: { agents: { list: [{ id: "main", default: true }] } }, - }); - if (!tool) { - throw new Error("tool missing"); - } - + const tool = createMemorySearchToolOrThrow(); const result = await tool.execute("generic", { query: "hello" }); - expect(result.details).toEqual({ - results: [], - disabled: true, - unavailable: true, + expectUnavailableMemorySearchDetails(result.details, { error: "embedding provider timeout", warning: "Memory search is unavailable due to an embedding/provider error.", action: "Check embedding provider configuration and retry memory_search.", diff --git a/src/agents/tools/memory-tool.ts b/src/agents/tools/memory-tool.ts index c0d595b21a2..bb5086bdb15 100644 --- a/src/agents/tools/memory-tool.ts +++ b/src/agents/tools/memory-tool.ts @@ -37,106 +37,135 @@ function resolveMemoryToolContext(options: { config?: OpenClawConfig; agentSessi return { cfg, agentId }; } +async function getMemoryManagerContext(params: { cfg: OpenClawConfig; agentId: string }): Promise< + | { + manager: NonNullable>["manager"]>; + } + | { + error: string | undefined; + } +> { + const { manager, error } = await getMemorySearchManager({ + cfg: params.cfg, + agentId: params.agentId, + }); + return manager ? { manager } : { error }; +} + +function createMemoryTool(params: { + options: { + config?: OpenClawConfig; + agentSessionKey?: string; + }; + label: string; + name: string; + description: string; + parameters: typeof MemorySearchSchema | typeof MemoryGetSchema; + execute: (ctx: { cfg: OpenClawConfig; agentId: string }) => AnyAgentTool["execute"]; +}): AnyAgentTool | null { + const ctx = resolveMemoryToolContext(params.options); + if (!ctx) { + return null; + } + return { + label: params.label, + name: params.name, + description: params.description, + parameters: params.parameters, + execute: params.execute(ctx), + }; +} + export function createMemorySearchTool(options: { config?: OpenClawConfig; agentSessionKey?: string; }): AnyAgentTool | null { - const ctx = resolveMemoryToolContext(options); - if (!ctx) { - return null; - } - const { cfg, agentId } = ctx; - return { + return createMemoryTool({ + options, label: "Memory Search", name: "memory_search", description: "Mandatory recall step: semantically search MEMORY.md + memory/*.md (and optional session transcripts) before answering questions about prior work, decisions, dates, people, preferences, or todos; returns top snippets with path + lines. If response has disabled=true, memory retrieval is unavailable and should be surfaced to the user.", parameters: MemorySearchSchema, - execute: async (_toolCallId, params) => { - const query = readStringParam(params, "query", { required: true }); - const maxResults = readNumberParam(params, "maxResults"); - const minScore = readNumberParam(params, "minScore"); - const { manager, error } = await getMemorySearchManager({ - cfg, - agentId, - }); - if (!manager) { - return jsonResult(buildMemorySearchUnavailableResult(error)); - } - try { - const citationsMode = resolveMemoryCitationsMode(cfg); - const includeCitations = shouldIncludeCitations({ - mode: citationsMode, - sessionKey: options.agentSessionKey, - }); - const rawResults = await manager.search(query, { - maxResults, - minScore, - sessionKey: options.agentSessionKey, - }); - const status = manager.status(); - const decorated = decorateCitations(rawResults, includeCitations); - const resolved = resolveMemoryBackendConfig({ cfg, agentId }); - const results = - status.backend === "qmd" - ? clampResultsByInjectedChars(decorated, resolved.qmd?.limits.maxInjectedChars) - : decorated; - const searchMode = (status.custom as { searchMode?: string } | undefined)?.searchMode; - return jsonResult({ - results, - provider: status.provider, - model: status.model, - fallback: status.fallback, - citations: citationsMode, - mode: searchMode, - }); - } catch (err) { - const message = err instanceof Error ? err.message : String(err); - return jsonResult(buildMemorySearchUnavailableResult(message)); - } - }, - }; + execute: + ({ cfg, agentId }) => + async (_toolCallId, params) => { + const query = readStringParam(params, "query", { required: true }); + const maxResults = readNumberParam(params, "maxResults"); + const minScore = readNumberParam(params, "minScore"); + const memory = await getMemoryManagerContext({ cfg, agentId }); + if ("error" in memory) { + return jsonResult(buildMemorySearchUnavailableResult(memory.error)); + } + try { + const citationsMode = resolveMemoryCitationsMode(cfg); + const includeCitations = shouldIncludeCitations({ + mode: citationsMode, + sessionKey: options.agentSessionKey, + }); + const rawResults = await memory.manager.search(query, { + maxResults, + minScore, + sessionKey: options.agentSessionKey, + }); + const status = memory.manager.status(); + const decorated = decorateCitations(rawResults, includeCitations); + const resolved = resolveMemoryBackendConfig({ cfg, agentId }); + const results = + status.backend === "qmd" + ? clampResultsByInjectedChars(decorated, resolved.qmd?.limits.maxInjectedChars) + : decorated; + const searchMode = (status.custom as { searchMode?: string } | undefined)?.searchMode; + return jsonResult({ + results, + provider: status.provider, + model: status.model, + fallback: status.fallback, + citations: citationsMode, + mode: searchMode, + }); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return jsonResult(buildMemorySearchUnavailableResult(message)); + } + }, + }); } export function createMemoryGetTool(options: { config?: OpenClawConfig; agentSessionKey?: string; }): AnyAgentTool | null { - const ctx = resolveMemoryToolContext(options); - if (!ctx) { - return null; - } - const { cfg, agentId } = ctx; - return { + return createMemoryTool({ + options, label: "Memory Get", name: "memory_get", description: "Safe snippet read from MEMORY.md or memory/*.md with optional from/lines; use after memory_search to pull only the needed lines and keep context small.", parameters: MemoryGetSchema, - execute: async (_toolCallId, params) => { - const relPath = readStringParam(params, "path", { required: true }); - const from = readNumberParam(params, "from", { integer: true }); - const lines = readNumberParam(params, "lines", { integer: true }); - const { manager, error } = await getMemorySearchManager({ - cfg, - agentId, - }); - if (!manager) { - return jsonResult({ path: relPath, text: "", disabled: true, error }); - } - try { - const result = await manager.readFile({ - relPath, - from: from ?? undefined, - lines: lines ?? undefined, - }); - return jsonResult(result); - } catch (err) { - const message = err instanceof Error ? err.message : String(err); - return jsonResult({ path: relPath, text: "", disabled: true, error: message }); - } - }, - }; + execute: + ({ cfg, agentId }) => + async (_toolCallId, params) => { + const relPath = readStringParam(params, "path", { required: true }); + const from = readNumberParam(params, "from", { integer: true }); + const lines = readNumberParam(params, "lines", { integer: true }); + const memory = await getMemoryManagerContext({ cfg, agentId }); + if ("error" in memory) { + return jsonResult({ path: relPath, text: "", disabled: true, error: memory.error }); + } + try { + const result = await memory.manager.readFile({ + relPath, + from: from ?? undefined, + lines: lines ?? undefined, + }); + return jsonResult(result); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return jsonResult({ path: relPath, text: "", disabled: true, error: message }); + } + }, + }); } function resolveMemoryCitationsMode(cfg: OpenClawConfig): MemoryCitationsMode { diff --git a/src/agents/tools/sessions-helpers.ts b/src/agents/tools/sessions-helpers.ts index e638438758c..74393ef44ad 100644 --- a/src/agents/tools/sessions-helpers.ts +++ b/src/agents/tools/sessions-helpers.ts @@ -12,6 +12,7 @@ export { resolveSandboxedSessionToolContext, resolveSessionToolsVisibility, } from "./sessions-access.js"; +import { resolveSandboxedSessionToolContext } from "./sessions-access.js"; export type { SessionReferenceResolution } from "./sessions-resolution.js"; export { isRequesterSpawnedSessionVisible, @@ -27,6 +28,7 @@ export { shouldResolveSessionIdInput, shouldVerifyRequesterSpawnedSessionVisibility, } from "./sessions-resolution.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { extractTextFromChatContent } from "../../shared/chat-content.js"; import { sanitizeUserFacingText } from "../pi-embedded-helpers.js"; import { @@ -73,6 +75,22 @@ function normalizeKey(value?: string) { return trimmed ? trimmed : undefined; } +export function resolveSessionToolContext(opts?: { + agentSessionKey?: string; + sandboxed?: boolean; + config?: OpenClawConfig; +}) { + const cfg = opts?.config ?? loadConfig(); + return { + cfg, + ...resolveSandboxedSessionToolContext({ + cfg, + agentSessionKey: opts?.agentSessionKey, + sandboxed: opts?.sandboxed, + }), + }; +} + export function classifySessionKind(params: { key: string; gatewayKind?: string | null; diff --git a/src/agents/tools/sessions-history-tool.ts b/src/agents/tools/sessions-history-tool.ts index 3d5deeadcdb..a3e8d4d9461 100644 --- a/src/agents/tools/sessions-history-tool.ts +++ b/src/agents/tools/sessions-history-tool.ts @@ -1,5 +1,5 @@ import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { callGateway } from "../../gateway/call.js"; import { capArrayByJsonBytes } from "../../gateway/session-utils.fs.js"; import { jsonUtf8Bytes } from "../../infra/json-utf8-bytes.js"; @@ -169,6 +169,7 @@ function enforceSessionsHistoryHardCap(params: { export function createSessionsHistoryTool(opts?: { agentSessionKey?: string; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Session History", @@ -180,7 +181,7 @@ export function createSessionsHistoryTool(opts?: { const sessionKeyParam = readStringParam(params, "sessionKey", { required: true, }); - const cfg = loadConfig(); + const cfg = opts?.config ?? loadConfig(); const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } = resolveSandboxedSessionToolContext({ cfg, diff --git a/src/agents/tools/sessions-list-tool.ts b/src/agents/tools/sessions-list-tool.ts index 0cba87e5653..ff3f56212d2 100644 --- a/src/agents/tools/sessions-list-tool.ts +++ b/src/agents/tools/sessions-list-tool.ts @@ -1,6 +1,6 @@ import path from "node:path"; import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { resolveSessionFilePath, resolveSessionFilePathOptions, @@ -33,6 +33,7 @@ const SessionsListToolSchema = Type.Object({ export function createSessionsListTool(opts?: { agentSessionKey?: string; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Sessions", @@ -41,7 +42,7 @@ export function createSessionsListTool(opts?: { parameters: SessionsListToolSchema, execute: async (_toolCallId, args) => { const params = args as Record; - const cfg = loadConfig(); + const cfg = opts?.config ?? loadConfig(); const { mainKey, alias, requesterInternalKey, restrictToSpawned } = resolveSandboxedSessionToolContext({ cfg, diff --git a/src/agents/tools/sessions-send-helpers.ts b/src/agents/tools/sessions-send-helpers.ts index 94dc3fe0c6a..d987932bb60 100644 --- a/src/agents/tools/sessions-send-helpers.ts +++ b/src/agents/tools/sessions-send-helpers.ts @@ -70,13 +70,13 @@ export function resolveAnnounceTargetFromKey(sessionKey: string): AnnounceTarget }; } -export function buildAgentToAgentMessageContext(params: { +function buildAgentSessionLines(params: { requesterSessionKey?: string; requesterChannel?: string; targetSessionKey: string; -}) { - const lines = [ - "Agent-to-agent message context:", + targetChannel?: string; +}): string[] { + return [ params.requesterSessionKey ? `Agent 1 (requester) session: ${params.requesterSessionKey}.` : undefined, @@ -84,7 +84,18 @@ export function buildAgentToAgentMessageContext(params: { ? `Agent 1 (requester) channel: ${params.requesterChannel}.` : undefined, `Agent 2 (target) session: ${params.targetSessionKey}.`, - ].filter(Boolean); + params.targetChannel ? `Agent 2 (target) channel: ${params.targetChannel}.` : undefined, + ].filter((line): line is string => Boolean(line)); +} + +export function buildAgentToAgentMessageContext(params: { + requesterSessionKey?: string; + requesterChannel?: string; + targetSessionKey: string; +}) { + const lines = ["Agent-to-agent message context:", ...buildAgentSessionLines(params)].filter( + Boolean, + ); return lines.join("\n"); } @@ -103,14 +114,7 @@ export function buildAgentToAgentReplyContext(params: { "Agent-to-agent reply step:", `Current agent: ${currentLabel}.`, `Turn ${params.turn} of ${params.maxTurns}.`, - params.requesterSessionKey - ? `Agent 1 (requester) session: ${params.requesterSessionKey}.` - : undefined, - params.requesterChannel - ? `Agent 1 (requester) channel: ${params.requesterChannel}.` - : undefined, - `Agent 2 (target) session: ${params.targetSessionKey}.`, - params.targetChannel ? `Agent 2 (target) channel: ${params.targetChannel}.` : undefined, + ...buildAgentSessionLines(params), `If you want to stop the ping-pong, reply exactly "${REPLY_SKIP_TOKEN}".`, ].filter(Boolean); return lines.join("\n"); @@ -127,14 +131,7 @@ export function buildAgentToAgentAnnounceContext(params: { }) { const lines = [ "Agent-to-agent announce step:", - params.requesterSessionKey - ? `Agent 1 (requester) session: ${params.requesterSessionKey}.` - : undefined, - params.requesterChannel - ? `Agent 1 (requester) channel: ${params.requesterChannel}.` - : undefined, - `Agent 2 (target) session: ${params.targetSessionKey}.`, - params.targetChannel ? `Agent 2 (target) channel: ${params.targetChannel}.` : undefined, + ...buildAgentSessionLines(params), `Original request: ${params.originalMessage}`, params.roundOneReply ? `Round 1 reply: ${params.roundOneReply}` diff --git a/src/agents/tools/sessions-send-tool.ts b/src/agents/tools/sessions-send-tool.ts index 82eff0adf7a..b2873e5cd1f 100644 --- a/src/agents/tools/sessions-send-tool.ts +++ b/src/agents/tools/sessions-send-tool.ts @@ -1,6 +1,6 @@ import crypto from "node:crypto"; import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import type { OpenClawConfig } from "../../config/config.js"; import { callGateway } from "../../gateway/call.js"; import { normalizeAgentId, resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; import { SESSION_LABEL_MAX_LENGTH } from "../../sessions/session-label.js"; @@ -17,7 +17,7 @@ import { extractAssistantText, resolveEffectiveSessionToolsVisibility, resolveSessionReference, - resolveSandboxedSessionToolContext, + resolveSessionToolContext, resolveVisibleSessionReference, stripToolMessages, } from "./sessions-helpers.js"; @@ -32,10 +32,41 @@ const SessionsSendToolSchema = Type.Object({ timeoutSeconds: Type.Optional(Type.Number({ minimum: 0 })), }); +async function startAgentRun(params: { + runId: string; + sendParams: Record; + sessionKey: string; +}): Promise<{ ok: true; runId: string } | { ok: false; result: ReturnType }> { + try { + const response = await callGateway<{ runId: string }>({ + method: "agent", + params: params.sendParams, + timeoutMs: 10_000, + }); + return { + ok: true, + runId: typeof response?.runId === "string" && response.runId ? response.runId : params.runId, + }; + } catch (err) { + const messageText = + err instanceof Error ? err.message : typeof err === "string" ? err : "error"; + return { + ok: false, + result: jsonResult({ + runId: params.runId, + status: "error", + error: messageText, + sessionKey: params.sessionKey, + }), + }; + } +} + export function createSessionsSendTool(opts?: { agentSessionKey?: string; agentChannel?: GatewayMessageChannel; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Session Send", @@ -46,13 +77,8 @@ export function createSessionsSendTool(opts?: { execute: async (_toolCallId, args) => { const params = args as Record; const message = readStringParam(params, "message", { required: true }); - const cfg = loadConfig(); - const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } = - resolveSandboxedSessionToolContext({ - cfg, - agentSessionKey: opts?.agentSessionKey, - sandboxed: opts?.sandboxed, - }); + const { cfg, mainKey, alias, effectiveRequesterKey, restrictToSpawned } = + resolveSessionToolContext(opts); const a2aPolicy = createAgentToAgentPolicy(cfg); const sessionVisibility = resolveEffectiveSessionToolsVisibility({ @@ -251,54 +277,34 @@ export function createSessionsSendTool(opts?: { }; if (timeoutSeconds === 0) { - try { - const response = await callGateway<{ runId: string }>({ - method: "agent", - params: sendParams, - timeoutMs: 10_000, - }); - if (typeof response?.runId === "string" && response.runId) { - runId = response.runId; - } - startA2AFlow(undefined, runId); - return jsonResult({ - runId, - status: "accepted", - sessionKey: displayKey, - delivery, - }); - } catch (err) { - const messageText = - err instanceof Error ? err.message : typeof err === "string" ? err : "error"; - return jsonResult({ - runId, - status: "error", - error: messageText, - sessionKey: displayKey, - }); - } - } - - try { - const response = await callGateway<{ runId: string }>({ - method: "agent", - params: sendParams, - timeoutMs: 10_000, - }); - if (typeof response?.runId === "string" && response.runId) { - runId = response.runId; - } - } catch (err) { - const messageText = - err instanceof Error ? err.message : typeof err === "string" ? err : "error"; - return jsonResult({ + const start = await startAgentRun({ runId, - status: "error", - error: messageText, + sendParams, sessionKey: displayKey, }); + if (!start.ok) { + return start.result; + } + runId = start.runId; + startA2AFlow(undefined, runId); + return jsonResult({ + runId, + status: "accepted", + sessionKey: displayKey, + delivery, + }); } + const start = await startAgentRun({ + runId, + sendParams, + sessionKey: displayKey, + }); + if (!start.ok) { + return start.result; + } + runId = start.runId; + let waitStatus: string | undefined; let waitError: string | undefined; try { diff --git a/src/agents/tools/web-fetch.cf-markdown.test.ts b/src/agents/tools/web-fetch.cf-markdown.test.ts index f22dc10df52..4dd22714574 100644 --- a/src/agents/tools/web-fetch.cf-markdown.test.ts +++ b/src/agents/tools/web-fetch.cf-markdown.test.ts @@ -4,6 +4,7 @@ import { withFetchPreconnect } from "../../test-utils/fetch-mock.js"; import { createBaseWebFetchToolConfig, installWebFetchSsrfHarness, + makeFetchHeaders, } from "./web-fetch.test-harness.js"; import "./web-fetch.test-mocks.js"; import { createWebFetchTool } from "./web-tools.js"; @@ -11,17 +12,14 @@ import { createWebFetchTool } from "./web-tools.js"; const baseToolConfig = createBaseWebFetchToolConfig(); installWebFetchSsrfHarness(); -function makeHeaders(map: Record): { get: (key: string) => string | null } { - return { - get: (key) => map[key.toLowerCase()] ?? null, - }; -} - function markdownResponse(body: string, extraHeaders: Record = {}): Response { return { ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/markdown; charset=utf-8", ...extraHeaders }), + headers: makeFetchHeaders({ + "content-type": "text/markdown; charset=utf-8", + ...extraHeaders, + }), text: async () => body, } as Response; } @@ -30,7 +28,7 @@ function htmlResponse(body: string): Response { return { ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/html; charset=utf-8" }), + headers: makeFetchHeaders({ "content-type": "text/html; charset=utf-8" }), text: async () => body, } as Response; } diff --git a/src/agents/tools/web-fetch.ssrf.test.ts b/src/agents/tools/web-fetch.ssrf.test.ts index eb868068ece..c0489c9b5ba 100644 --- a/src/agents/tools/web-fetch.ssrf.test.ts +++ b/src/agents/tools/web-fetch.ssrf.test.ts @@ -1,21 +1,16 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; import { type FetchMock, withFetchPreconnect } from "../../test-utils/fetch-mock.js"; +import { makeFetchHeaders } from "./web-fetch.test-harness.js"; const lookupMock = vi.fn(); const resolvePinnedHostname = ssrf.resolvePinnedHostname; -function makeHeaders(map: Record): { get: (key: string) => string | null } { - return { - get: (key) => map[key.toLowerCase()] ?? null, - }; -} - function redirectResponse(location: string): Response { return { ok: false, status: 302, - headers: makeHeaders({ location }), + headers: makeFetchHeaders({ location }), body: { cancel: vi.fn() }, } as unknown as Response; } @@ -24,7 +19,7 @@ function textResponse(body: string): Response { return { ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => body, } as unknown as Response; } diff --git a/src/agents/tools/web-fetch.test-harness.ts b/src/agents/tools/web-fetch.test-harness.ts index c86a028e155..1bd8e33e89b 100644 --- a/src/agents/tools/web-fetch.test-harness.ts +++ b/src/agents/tools/web-fetch.test-harness.ts @@ -1,6 +1,14 @@ import { afterEach, beforeEach, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; +export function makeFetchHeaders(map: Record): { + get: (key: string) => string | null; +} { + return { + get: (key) => map[key.toLowerCase()] ?? null, + }; +} + export function installWebFetchSsrfHarness() { const lookupMock = vi.fn(); const resolvePinnedHostname = ssrf.resolvePinnedHostname; diff --git a/src/agents/tools/web-tools.fetch.test.ts b/src/agents/tools/web-tools.fetch.test.ts index 9da57a35b45..e9bfabbee7a 100644 --- a/src/agents/tools/web-tools.fetch.test.ts +++ b/src/agents/tools/web-tools.fetch.test.ts @@ -1,7 +1,9 @@ import { EnvHttpProxyAgent } from "undici"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; +import { resolveRequestUrl } from "../../plugin-sdk/request-url.js"; import { withFetchPreconnect } from "../../test-utils/fetch-mock.js"; +import { makeFetchHeaders } from "./web-fetch.test-harness.js"; import { createWebFetchTool } from "./web-tools.js"; type MockResponse = { @@ -13,18 +15,12 @@ type MockResponse = { json?: () => Promise; }; -function makeHeaders(map: Record): { get: (key: string) => string | null } { - return { - get: (key) => map[key.toLowerCase()] ?? null, - }; -} - function htmlResponse(html: string, url = "https://example.com/"): MockResponse { return { ok: true, status: 200, url, - headers: makeHeaders({ "content-type": "text/html; charset=utf-8" }), + headers: makeFetchHeaders({ "content-type": "text/html; charset=utf-8" }), text: async () => html, }; } @@ -62,7 +58,7 @@ function textResponse( ok: true, status: 200, url, - headers: makeHeaders({ "content-type": contentType }), + headers: makeFetchHeaders({ "content-type": contentType }), text: async () => text, }; } @@ -77,23 +73,10 @@ function errorHtmlResponse( ok: false, status, url, - headers: contentType ? makeHeaders({ "content-type": contentType }) : makeHeaders({}), + headers: contentType ? makeFetchHeaders({ "content-type": contentType }) : makeFetchHeaders({}), text: async () => html, }; } -function requestUrl(input: RequestInfo | URL): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - if ("url" in input && typeof input.url === "string") { - return input.url; - } - return ""; -} - function installMockFetch( impl: (input: RequestInfo | URL, init?: RequestInit) => Promise, ) { @@ -125,9 +108,9 @@ function installPlainTextFetch(text: string) { Promise.resolve({ ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => text, - url: requestUrl(input), + url: resolveRequestUrl(input), } as Response), ); } @@ -215,9 +198,9 @@ describe("web_fetch extraction fallbacks", () => { Promise.resolve({ ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => longText, - url: requestUrl(input), + url: resolveRequestUrl(input), } as Response), ); @@ -277,9 +260,9 @@ describe("web_fetch extraction fallbacks", () => { Promise.resolve({ ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => "proxy body", - url: requestUrl(input), + url: resolveRequestUrl(input), } as Response), ); const tool = createFetchTool({ firecrawl: { enabled: false } }); @@ -298,7 +281,7 @@ describe("web_fetch extraction fallbacks", () => { it("falls back to firecrawl when readability returns no content", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve(firecrawlResponse("firecrawl content")) as Promise; } @@ -316,7 +299,7 @@ describe("web_fetch extraction fallbacks", () => { it("normalizes firecrawl Authorization header values", async () => { const fetchSpy = installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev/v2/scrape")) { return Promise.resolve(firecrawlResponse("firecrawl normalized")) as Promise; } @@ -333,7 +316,7 @@ describe("web_fetch extraction fallbacks", () => { expect(result?.details).toMatchObject({ extractor: "firecrawl" }); const firecrawlCall = fetchSpy.mock.calls.find((call) => - requestUrl(call[0]).includes("/v2/scrape"), + resolveRequestUrl(call[0]).includes("/v2/scrape"), ); expect(firecrawlCall).toBeTruthy(); const init = firecrawlCall?.[1]; @@ -345,7 +328,7 @@ describe("web_fetch extraction fallbacks", () => { installMockFetch( (input: RequestInfo | URL) => Promise.resolve( - htmlResponse("hi", requestUrl(input)), + htmlResponse("hi", resolveRequestUrl(input)), ) as Promise, ); @@ -361,7 +344,7 @@ describe("web_fetch extraction fallbacks", () => { it("throws when readability is empty and firecrawl fails", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve(firecrawlError()) as Promise; } @@ -378,14 +361,14 @@ describe("web_fetch extraction fallbacks", () => { it("uses firecrawl when direct fetch fails", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve(firecrawlResponse("firecrawl fallback", url)) as Promise; } return Promise.resolve({ ok: false, status: 403, - headers: makeHeaders({ "content-type": "text/html" }), + headers: makeFetchHeaders({ "content-type": "text/html" }), text: async () => "blocked", } as Response); }); @@ -404,7 +387,7 @@ describe("web_fetch extraction fallbacks", () => { const large = "a".repeat(80_000); installMockFetch( (input: RequestInfo | URL) => - Promise.resolve(textResponse(large, requestUrl(input))) as Promise, + Promise.resolve(textResponse(large, resolveRequestUrl(input))) as Promise, ); const tool = createFetchTool({ @@ -432,7 +415,7 @@ describe("web_fetch extraction fallbacks", () => { installMockFetch( (input: RequestInfo | URL) => Promise.resolve( - errorHtmlResponse(html, 404, requestUrl(input), "Text/HTML; charset=utf-8"), + errorHtmlResponse(html, 404, resolveRequestUrl(input), "Text/HTML; charset=utf-8"), ) as Promise, ); @@ -455,7 +438,9 @@ describe("web_fetch extraction fallbacks", () => { "Oops

Oops

"; installMockFetch( (input: RequestInfo | URL) => - Promise.resolve(errorHtmlResponse(html, 500, requestUrl(input), null)) as Promise, + Promise.resolve( + errorHtmlResponse(html, 500, resolveRequestUrl(input), null), + ) as Promise, ); const tool = createFetchTool({ firecrawl: { enabled: false } }); @@ -471,7 +456,7 @@ describe("web_fetch extraction fallbacks", () => { it("wraps firecrawl error details", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve({ ok: false, diff --git a/src/agents/venice-models.test.ts b/src/agents/venice-models.test.ts index 5a93568f9b7..ed1769cf044 100644 --- a/src/agents/venice-models.test.ts +++ b/src/agents/venice-models.test.ts @@ -59,6 +59,55 @@ function makeModelsResponse(id: string): Response { ); } +type ModelSpecOverride = { + id: string; + availableContextTokens?: number; + maxCompletionTokens?: number; + capabilities?: { + supportsReasoning?: boolean; + supportsVision?: boolean; + supportsFunctionCalling?: boolean; + }; + includeModelSpec?: boolean; +}; + +function makeModelRow(params: ModelSpecOverride) { + if (params.includeModelSpec === false) { + return { id: params.id }; + } + return { + id: params.id, + model_spec: { + name: params.id, + privacy: "private", + ...(params.availableContextTokens === undefined + ? {} + : { availableContextTokens: params.availableContextTokens }), + ...(params.maxCompletionTokens === undefined + ? {} + : { maxCompletionTokens: params.maxCompletionTokens }), + ...(params.capabilities === undefined ? {} : { capabilities: params.capabilities }), + }, + }; +} + +function stubVeniceModelsFetch(rows: ModelSpecOverride[]) { + const fetchMock = vi.fn( + async () => + new Response( + JSON.stringify({ + data: rows.map((row) => makeModelRow(row)), + }), + { + status: 200, + headers: { "Content-Type": "application/json" }, + }, + ), + ); + vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + return fetchMock; +} + describe("venice-models", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -96,34 +145,18 @@ describe("venice-models", () => { }); it("uses API maxCompletionTokens for catalog models when present", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "llama-3.3-70b", - model_spec: { - name: "llama-3.3-70b", - privacy: "private", - availableContextTokens: 131072, - maxCompletionTokens: 2048, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "llama-3.3-70b", + availableContextTokens: 131072, + maxCompletionTokens: 2048, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const llama = models.find((m) => m.id === "llama-3.3-70b"); @@ -131,33 +164,17 @@ describe("venice-models", () => { }); it("retains catalog maxTokens when the API omits maxCompletionTokens", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "qwen3-235b-a22b-instruct-2507", - model_spec: { - name: "qwen3-235b-a22b-instruct-2507", - privacy: "private", - availableContextTokens: 131072, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "qwen3-235b-a22b-instruct-2507", + availableContextTokens: 131072, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const qwen = models.find((m) => m.id === "qwen3-235b-a22b-instruct-2507"); @@ -172,34 +189,18 @@ describe("venice-models", () => { }); it("uses a conservative bounded maxTokens value for new models", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "new-model-2026", - model_spec: { - name: "new-model-2026", - privacy: "private", - availableContextTokens: 50_000, - maxCompletionTokens: 200_000, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: false, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "new-model-2026", + availableContextTokens: 50_000, + maxCompletionTokens: 200_000, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: false, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const newModel = models.find((m) => m.id === "new-model-2026"); @@ -209,33 +210,17 @@ describe("venice-models", () => { }); it("caps new-model maxTokens to the fallback context window when API context is missing", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "new-model-without-context", - model_spec: { - name: "new-model-without-context", - privacy: "private", - maxCompletionTokens: 200_000, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "new-model-without-context", + maxCompletionTokens: 200_000, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const newModel = models.find((m) => m.id === "new-model-without-context"); @@ -244,37 +229,17 @@ describe("venice-models", () => { }); it("ignores missing capabilities on partial metadata instead of aborting discovery", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "llama-3.3-70b", - model_spec: { - name: "llama-3.3-70b", - privacy: "private", - availableContextTokens: 131072, - maxCompletionTokens: 2048, - }, - }, - { - id: "new-model-partial", - model_spec: { - name: "new-model-partial", - privacy: "private", - maxCompletionTokens: 2048, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "llama-3.3-70b", + availableContextTokens: 131072, + maxCompletionTokens: 2048, + }, + { + id: "new-model-partial", + maxCompletionTokens: 2048, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const knownModel = models.find((m) => m.id === "llama-3.3-70b"); @@ -287,37 +252,19 @@ describe("venice-models", () => { }); it("keeps known models discoverable when a row omits model_spec", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "llama-3.3-70b", - }, - { - id: "new-model-valid", - model_spec: { - name: "new-model-valid", - privacy: "private", - availableContextTokens: 32_000, - maxCompletionTokens: 2_048, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { id: "llama-3.3-70b", includeModelSpec: false }, + { + id: "new-model-valid", + availableContextTokens: 32_000, + maxCompletionTokens: 2_048, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const knownModel = models.find((m) => m.id === "llama-3.3-70b"); diff --git a/src/auto-reply/reply.stage-sandbox-media.scp-remote-path.test.ts b/src/auto-reply/reply.stage-sandbox-media.scp-remote-path.test.ts new file mode 100644 index 00000000000..d5d628421d9 --- /dev/null +++ b/src/auto-reply/reply.stage-sandbox-media.scp-remote-path.test.ts @@ -0,0 +1,75 @@ +import fs from "node:fs/promises"; +import { basename, join } from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + createSandboxMediaContexts, + createSandboxMediaStageConfig, + withSandboxMediaTempHome, +} from "./stage-sandbox-media.test-harness.js"; + +const sandboxMocks = vi.hoisted(() => ({ + ensureSandboxWorkspaceForSession: vi.fn(), +})); +const childProcessMocks = vi.hoisted(() => ({ + spawn: vi.fn(), +})); + +vi.mock("../agents/sandbox.js", () => sandboxMocks); +vi.mock("node:child_process", () => childProcessMocks); + +import { stageSandboxMedia } from "./reply/stage-sandbox-media.js"; + +afterEach(() => { + vi.restoreAllMocks(); + childProcessMocks.spawn.mockClear(); +}); + +function createRemoteStageParams(home: string): { + cfg: ReturnType; + workspaceDir: string; + sessionKey: string; + remoteCacheDir: string; +} { + const sessionKey = "agent:main:main"; + vi.mocked(sandboxMocks.ensureSandboxWorkspaceForSession).mockResolvedValue(null); + return { + cfg: createSandboxMediaStageConfig(home), + workspaceDir: join(home, "openclaw"), + sessionKey, + remoteCacheDir: join(home, ".openclaw", "media", "remote-cache", sessionKey), + }; +} + +function createRemoteContexts(remotePath: string) { + const { ctx, sessionCtx } = createSandboxMediaContexts(remotePath); + ctx.Provider = "imessage"; + ctx.MediaRemoteHost = "user@gateway-host"; + sessionCtx.Provider = "imessage"; + sessionCtx.MediaRemoteHost = "user@gateway-host"; + return { ctx, sessionCtx }; +} + +describe("stageSandboxMedia scp remote paths", () => { + it("rejects remote attachment filenames with shell metacharacters before spawning scp", async () => { + await withSandboxMediaTempHome("openclaw-triggers-", async (home) => { + const { cfg, workspaceDir, sessionKey, remoteCacheDir } = createRemoteStageParams(home); + const remotePath = "/Users/demo/Library/Messages/Attachments/ab/cd/evil$(touch pwned).jpg"; + const { ctx, sessionCtx } = createRemoteContexts(remotePath); + + await stageSandboxMedia({ + ctx, + sessionCtx, + cfg, + sessionKey, + workspaceDir, + }); + + expect(childProcessMocks.spawn).not.toHaveBeenCalled(); + await expect(fs.stat(join(remoteCacheDir, basename(remotePath)))).rejects.toThrow(); + expect(ctx.MediaPath).toBe(remotePath); + expect(sessionCtx.MediaPath).toBe(remotePath); + expect(ctx.MediaUrl).toBe(remotePath); + expect(sessionCtx.MediaUrl).toBe(remotePath); + }); + }); +}); diff --git a/src/auto-reply/reply/agent-runner-execution.ts b/src/auto-reply/reply/agent-runner-execution.ts index ff3838a1936..27a31c2387a 100644 --- a/src/auto-reply/reply/agent-runner-execution.ts +++ b/src/auto-reply/reply/agent-runner-execution.ts @@ -40,8 +40,7 @@ import { } from "../tokens.js"; import type { GetReplyOptions, ReplyPayload } from "../types.js"; import { - buildEmbeddedRunBaseParams, - buildEmbeddedRunContexts, + buildEmbeddedRunExecutionParams, resolveModelFallbackOptions, } from "./agent-runner-utils.js"; import { type BlockReplyPipeline } from "./block-reply-pipeline.js"; @@ -308,20 +307,17 @@ export async function runAgentTurnWithFallback(params: { } })(); } - const { authProfile, embeddedContext, senderContext } = buildEmbeddedRunContexts({ - run: params.followupRun.run, - sessionCtx: params.sessionCtx, - hasRepliedRef: params.opts?.hasRepliedRef, - provider, - }); - const runBaseParams = buildEmbeddedRunBaseParams({ - run: params.followupRun.run, - provider, - model, - runId, - authProfile, - allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe, - }); + const { embeddedContext, senderContext, runBaseParams } = buildEmbeddedRunExecutionParams( + { + run: params.followupRun.run, + sessionCtx: params.sessionCtx, + hasRepliedRef: params.opts?.hasRepliedRef, + provider, + runId, + allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe, + model, + }, + ); return (async () => { const result = await runEmbeddedPiAgent({ ...embeddedContext, diff --git a/src/auto-reply/reply/agent-runner-memory.ts b/src/auto-reply/reply/agent-runner-memory.ts index 623bb9c1490..d52c6d05761 100644 --- a/src/auto-reply/reply/agent-runner-memory.ts +++ b/src/auto-reply/reply/agent-runner-memory.ts @@ -27,8 +27,7 @@ import type { TemplateContext } from "../templating.js"; import type { VerboseLevel } from "../thinking.js"; import type { GetReplyOptions } from "../types.js"; import { - buildEmbeddedRunBaseParams, - buildEmbeddedRunContexts, + buildEmbeddedRunExecutionParams, resolveModelFallbackOptions, } from "./agent-runner-utils.js"; import { @@ -482,18 +481,13 @@ export async function runMemoryFlushIfNeeded(params: { ...resolveModelFallbackOptions(params.followupRun.run), runId: flushRunId, run: async (provider, model, runOptions) => { - const { authProfile, embeddedContext, senderContext } = buildEmbeddedRunContexts({ + const { embeddedContext, senderContext, runBaseParams } = buildEmbeddedRunExecutionParams({ run: params.followupRun.run, sessionCtx: params.sessionCtx, hasRepliedRef: params.opts?.hasRepliedRef, provider, - }); - const runBaseParams = buildEmbeddedRunBaseParams({ - run: params.followupRun.run, - provider, model, runId: flushRunId, - authProfile, allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe, }); const result = await runEmbeddedPiAgent({ diff --git a/src/auto-reply/reply/agent-runner-payloads.test.ts b/src/auto-reply/reply/agent-runner-payloads.test.ts index 26f23d7a42c..db237848e3c 100644 --- a/src/auto-reply/reply/agent-runner-payloads.test.ts +++ b/src/auto-reply/reply/agent-runner-payloads.test.ts @@ -9,6 +9,20 @@ const baseParams = { replyToMode: "off" as const, }; +async function expectSameTargetRepliesSuppressed(params: { provider: string; to: string }) { + const { replyPayloads } = await buildReplyPayloads({ + ...baseParams, + payloads: [{ text: "hello world!" }], + messageProvider: "heartbeat", + originatingChannel: "feishu", + originatingTo: "ou_abc123", + messagingToolSentTexts: ["different message"], + messagingToolSentTargets: [{ tool: "message", provider: params.provider, to: params.to }], + }); + + expect(replyPayloads).toHaveLength(0); +} + describe("buildReplyPayloads media filter integration", () => { it("strips media URL from payload when in messagingToolSentMediaUrls", async () => { const { replyPayloads } = await buildReplyPayloads({ @@ -142,31 +156,11 @@ describe("buildReplyPayloads media filter integration", () => { }); it("suppresses same-target replies when message tool target provider is generic", async () => { - const { replyPayloads } = await buildReplyPayloads({ - ...baseParams, - payloads: [{ text: "hello world!" }], - messageProvider: "heartbeat", - originatingChannel: "feishu", - originatingTo: "ou_abc123", - messagingToolSentTexts: ["different message"], - messagingToolSentTargets: [{ tool: "message", provider: "message", to: "ou_abc123" }], - }); - - expect(replyPayloads).toHaveLength(0); + await expectSameTargetRepliesSuppressed({ provider: "message", to: "ou_abc123" }); }); it("suppresses same-target replies when target provider is channel alias", async () => { - const { replyPayloads } = await buildReplyPayloads({ - ...baseParams, - payloads: [{ text: "hello world!" }], - messageProvider: "heartbeat", - originatingChannel: "feishu", - originatingTo: "ou_abc123", - messagingToolSentTexts: ["different message"], - messagingToolSentTargets: [{ tool: "message", provider: "lark", to: "ou_abc123" }], - }); - - expect(replyPayloads).toHaveLength(0); + await expectSameTargetRepliesSuppressed({ provider: "lark", to: "ou_abc123" }); }); it("drops all final payloads when block pipeline streamed successfully", async () => { diff --git a/src/auto-reply/reply/agent-runner-utils.ts b/src/auto-reply/reply/agent-runner-utils.ts index 99b2b6392f6..c6e71a9bab0 100644 --- a/src/auto-reply/reply/agent-runner-utils.ts +++ b/src/auto-reply/reply/agent-runner-utils.ts @@ -263,6 +263,31 @@ export function buildEmbeddedRunContexts(params: { }; } +export function buildEmbeddedRunExecutionParams(params: { + run: FollowupRun["run"]; + sessionCtx: TemplateContext; + hasRepliedRef: { value: boolean } | undefined; + provider: string; + model: string; + runId: string; + allowTransientCooldownProbe?: boolean; +}) { + const { authProfile, embeddedContext, senderContext } = buildEmbeddedRunContexts(params); + const runBaseParams = buildEmbeddedRunBaseParams({ + run: params.run, + provider: params.provider, + model: params.model, + runId: params.runId, + authProfile, + allowTransientCooldownProbe: params.allowTransientCooldownProbe, + }); + return { + embeddedContext, + senderContext, + runBaseParams, + }; +} + export function resolveProviderScopedAuthProfile(params: { provider: string; primaryProvider: string; diff --git a/src/auto-reply/reply/agent-runner.media-paths.test.ts b/src/auto-reply/reply/agent-runner.media-paths.test.ts index f5658287aff..a759c539bdc 100644 --- a/src/auto-reply/reply/agent-runner.media-paths.test.ts +++ b/src/auto-reply/reply/agent-runner.media-paths.test.ts @@ -2,7 +2,7 @@ import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; import type { TemplateContext } from "../templating.js"; import type { FollowupRun, QueueSettings } from "./queue.js"; -import { createMockTypingController } from "./test-helpers.js"; +import { createMockFollowupRun, createMockTypingController } from "./test-helpers.js"; const runEmbeddedPiAgentMock = vi.fn(); const runWithModelFallbackMock = vi.fn(); @@ -72,32 +72,15 @@ describe("runReplyAgent media path normalization", () => { const result = await runReplyAgent({ commandBody: "generate", - followupRun: { + followupRun: createMockFollowupRun({ prompt: "generate", - enqueuedAt: Date.now(), run: { agentId: "main", agentDir: "/tmp/agent", - sessionId: "session", - sessionKey: "main", messageProvider: "telegram", - sessionFile: "/tmp/session.jsonl", workspaceDir: "/tmp/workspace", - config: {}, - provider: "anthropic", - model: "claude", - thinkLevel: "low", - verboseLevel: "off", - elevatedLevel: "off", - bashElevated: { - enabled: false, - allowed: false, - defaultLevel: "off", - }, - timeoutMs: 1_000, - blockReplyBreak: "message_end", }, - } as unknown as FollowupRun, + }) as unknown as FollowupRun, queueKey: "main", resolvedQueue: { mode: "interrupt" } as QueueSettings, shouldSteer: false, diff --git a/src/auto-reply/reply/block-streaming.ts b/src/auto-reply/reply/block-streaming.ts index 6d306b166c1..b24ee8cac1a 100644 --- a/src/auto-reply/reply/block-streaming.ts +++ b/src/auto-reply/reply/block-streaming.ts @@ -26,6 +26,22 @@ function normalizeChunkProvider(provider?: string): TextChunkProvider | undefine : undefined; } +function resolveProviderChunkContext( + cfg: OpenClawConfig | undefined, + provider?: string, + accountId?: string | null, +) { + const providerKey = normalizeChunkProvider(provider); + const providerId = providerKey ? normalizeChannelId(providerKey) : null; + const providerChunkLimit = providerId + ? getChannelDock(providerId)?.outbound?.textChunkLimit + : undefined; + const textLimit = resolveTextChunkLimit(cfg, providerKey, accountId, { + fallbackLimit: providerChunkLimit, + }); + return { providerKey, providerId, textLimit }; +} + type ProviderBlockStreamingConfig = { blockStreamingCoalesce?: BlockStreamingCoalesceConfig; accounts?: Record; @@ -97,14 +113,7 @@ export function resolveEffectiveBlockStreamingConfig(params: { chunking: BlockStreamingChunking; coalescing: BlockStreamingCoalescing; } { - const providerKey = normalizeChunkProvider(params.provider); - const providerId = providerKey ? normalizeChannelId(providerKey) : null; - const providerChunkLimit = providerId - ? getChannelDock(providerId)?.outbound?.textChunkLimit - : undefined; - const textLimit = resolveTextChunkLimit(params.cfg, providerKey, params.accountId, { - fallbackLimit: providerChunkLimit, - }); + const { textLimit } = resolveProviderChunkContext(params.cfg, params.provider, params.accountId); const chunkingDefaults = params.chunking ?? resolveBlockStreamingChunking(params.cfg, params.provider, params.accountId); const chunkingMax = clampPositiveInteger(params.maxChunkChars, chunkingDefaults.maxChars, { @@ -154,21 +163,13 @@ export function resolveBlockStreamingChunking( provider?: string, accountId?: string | null, ): BlockStreamingChunking { - const providerKey = normalizeChunkProvider(provider); - const providerConfigKey = providerKey; - const providerId = providerKey ? normalizeChannelId(providerKey) : null; - const providerChunkLimit = providerId - ? getChannelDock(providerId)?.outbound?.textChunkLimit - : undefined; - const textLimit = resolveTextChunkLimit(cfg, providerConfigKey, accountId, { - fallbackLimit: providerChunkLimit, - }); + const { providerKey, textLimit } = resolveProviderChunkContext(cfg, provider, accountId); const chunkCfg = cfg?.agents?.defaults?.blockStreamingChunk; // When chunkMode="newline", the outbound delivery splits on paragraph boundaries. // The block chunker should flush eagerly on \n\n boundaries during streaming, // regardless of minChars, so each paragraph is sent as its own message. - const chunkMode = resolveChunkMode(cfg, providerConfigKey, accountId); + const chunkMode = resolveChunkMode(cfg, providerKey, accountId); const maxRequested = Math.max(1, Math.floor(chunkCfg?.maxChars ?? DEFAULT_BLOCK_STREAM_MAX)); const maxChars = Math.max(1, Math.min(maxRequested, textLimit)); @@ -198,20 +199,15 @@ export function resolveBlockStreamingCoalescing( }, opts?: { chunkMode?: "length" | "newline" }, ): BlockStreamingCoalescing | undefined { - const providerKey = normalizeChunkProvider(provider); - const providerConfigKey = providerKey; + const { providerKey, providerId, textLimit } = resolveProviderChunkContext( + cfg, + provider, + accountId, + ); // Resolve the outbound chunkMode so the coalescer can flush on paragraph boundaries // when chunkMode="newline", matching the delivery-time splitting behavior. - const chunkMode = opts?.chunkMode ?? resolveChunkMode(cfg, providerConfigKey, accountId); - - const providerId = providerKey ? normalizeChannelId(providerKey) : null; - const providerChunkLimit = providerId - ? getChannelDock(providerId)?.outbound?.textChunkLimit - : undefined; - const textLimit = resolveTextChunkLimit(cfg, providerConfigKey, accountId, { - fallbackLimit: providerChunkLimit, - }); + const chunkMode = opts?.chunkMode ?? resolveChunkMode(cfg, providerKey, accountId); const providerDefaults = providerId ? getChannelDock(providerId)?.streaming?.blockStreamingCoalesceDefaults : undefined; diff --git a/src/auto-reply/reply/commands-acp/context.ts b/src/auto-reply/reply/commands-acp/context.ts index 16291713fda..84acb828015 100644 --- a/src/auto-reply/reply/commands-acp/context.ts +++ b/src/auto-reply/reply/commands-acp/context.ts @@ -1,40 +1,33 @@ import { buildTelegramTopicConversationId, + normalizeConversationText, parseTelegramChatIdFromTarget, } from "../../../acp/conversation-id.js"; import { DISCORD_THREAD_BINDING_CHANNEL } from "../../../channels/thread-bindings-policy.js"; import { resolveConversationIdFromTargets } from "../../../infra/outbound/conversation-id.js"; -import { parseAgentSessionKey } from "../../../routing/session-key.js"; import type { HandleCommandsParams } from "../commands-types.js"; +import { parseDiscordParentChannelFromSessionKey } from "../discord-parent-channel.js"; import { resolveTelegramConversationId } from "../telegram-context.js"; -function normalizeString(value: unknown): string { - if (typeof value === "string") { - return value.trim(); - } - if (typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") { - return `${value}`.trim(); - } - return ""; -} - export function resolveAcpCommandChannel(params: HandleCommandsParams): string { const raw = params.ctx.OriginatingChannel ?? params.command.channel ?? params.ctx.Surface ?? params.ctx.Provider; - return normalizeString(raw).toLowerCase(); + return normalizeConversationText(raw).toLowerCase(); } export function resolveAcpCommandAccountId(params: HandleCommandsParams): string { - const accountId = normalizeString(params.ctx.AccountId); + const accountId = normalizeConversationText(params.ctx.AccountId); return accountId || "default"; } export function resolveAcpCommandThreadId(params: HandleCommandsParams): string | undefined { const threadId = - params.ctx.MessageThreadId != null ? normalizeString(String(params.ctx.MessageThreadId)) : ""; + params.ctx.MessageThreadId != null + ? normalizeConversationText(String(params.ctx.MessageThreadId)) + : ""; return threadId || undefined; } @@ -71,21 +64,8 @@ export function resolveAcpCommandConversationId(params: HandleCommandsParams): s }); } -function parseDiscordParentChannelFromSessionKey(raw: unknown): string | undefined { - const sessionKey = normalizeString(raw); - if (!sessionKey) { - return undefined; - } - const scoped = parseAgentSessionKey(sessionKey)?.rest ?? sessionKey.toLowerCase(); - const match = scoped.match(/(?:^|:)channel:([^:]+)$/); - if (!match?.[1]) { - return undefined; - } - return match[1]; -} - function parseDiscordParentChannelFromContext(raw: unknown): string | undefined { - const parentId = normalizeString(raw); + const parentId = normalizeConversationText(raw); if (!parentId) { return undefined; } diff --git a/src/auto-reply/reply/commands-allowlist.ts b/src/auto-reply/reply/commands-allowlist.ts index ffba3bf2505..fcecb0b31f3 100644 --- a/src/auto-reply/reply/commands-allowlist.ts +++ b/src/auto-reply/reply/commands-allowlist.ts @@ -1,10 +1,5 @@ import { getChannelDock } from "../../channels/dock.js"; -import { - authorizeConfigWrite, - canBypassConfigWritePolicy, - formatConfigWriteDeniedMessage, - resolveExplicitConfigWriteTarget, -} from "../../channels/plugins/config-writes.js"; +import { resolveExplicitConfigWriteTarget } from "../../channels/plugins/config-writes.js"; import { listPairingChannels } from "../../channels/plugins/pairing.js"; import type { ChannelId } from "../../channels/plugins/types.js"; import { normalizeChannelId } from "../../channels/registry.js"; @@ -36,6 +31,7 @@ import { resolveTelegramAccount } from "../../telegram/accounts.js"; import { resolveWhatsAppAccount } from "../../web/accounts.js"; import { rejectUnauthorizedCommand, requireCommandFlagEnabled } from "./command-gates.js"; import type { CommandHandler } from "./commands-types.js"; +import { resolveConfigWriteDeniedText } from "./config-write-authorization.js"; type AllowlistScope = "dm" | "group" | "all"; type AllowlistAction = "list" | "add" | "remove"; @@ -628,20 +624,19 @@ export const handleAllowlistCommand: CommandHandler = async (params, allowTextCo accountId: normalizedAccountId, writeTarget, } = resolveAccountTarget(parsedConfig, channelId, accountId); - const writeAuth = authorizeConfigWrite({ + const deniedText = resolveConfigWriteDeniedText({ cfg: params.cfg, - origin: { channelId, accountId: params.ctx.AccountId }, + channel: params.command.channel, + channelId, + accountId: params.ctx.AccountId, + gatewayClientScopes: params.ctx.GatewayClientScopes, target: writeTarget, - allowBypass: canBypassConfigWritePolicy({ - channel: params.command.channel, - gatewayClientScopes: params.ctx.GatewayClientScopes, - }), }); - if (!writeAuth.allowed) { + if (deniedText) { return { shouldContinue: false, reply: { - text: formatConfigWriteDeniedMessage({ result: writeAuth, fallbackChannelId: channelId }), + text: deniedText, }, }; } diff --git a/src/auto-reply/reply/commands-config.ts b/src/auto-reply/reply/commands-config.ts index 96b5a5d9be5..b40032758d3 100644 --- a/src/auto-reply/reply/commands-config.ts +++ b/src/auto-reply/reply/commands-config.ts @@ -1,9 +1,4 @@ -import { - authorizeConfigWrite, - canBypassConfigWritePolicy, - formatConfigWriteDeniedMessage, - resolveConfigWriteTargetFromPath, -} from "../../channels/plugins/config-writes.js"; +import { resolveConfigWriteTargetFromPath } from "../../channels/plugins/config-writes.js"; import { normalizeChannelId } from "../../channels/registry.js"; import { getConfigValueAtPath, @@ -31,6 +26,7 @@ import { } from "./command-gates.js"; import type { CommandHandler } from "./commands-types.js"; import { parseConfigCommand } from "./config-commands.js"; +import { resolveConfigWriteDeniedText } from "./config-write-authorization.js"; import { parseDebugCommand } from "./debug-commands.js"; export const handleConfigCommand: CommandHandler = async (params, allowTextCommands) => { @@ -84,20 +80,19 @@ export const handleConfigCommand: CommandHandler = async (params, allowTextComma } parsedWritePath = parsedPath.path; const channelId = params.command.channelId ?? normalizeChannelId(params.command.channel); - const writeAuth = authorizeConfigWrite({ + const deniedText = resolveConfigWriteDeniedText({ cfg: params.cfg, - origin: { channelId, accountId: params.ctx.AccountId }, + channel: params.command.channel, + channelId, + accountId: params.ctx.AccountId, + gatewayClientScopes: params.ctx.GatewayClientScopes, target: resolveConfigWriteTargetFromPath(parsedWritePath), - allowBypass: canBypassConfigWritePolicy({ - channel: params.command.channel, - gatewayClientScopes: params.ctx.GatewayClientScopes, - }), }); - if (!writeAuth.allowed) { + if (deniedText) { return { shouldContinue: false, reply: { - text: formatConfigWriteDeniedMessage({ result: writeAuth, fallbackChannelId: channelId }), + text: deniedText, }, }; } diff --git a/src/auto-reply/reply/commands-session-abort.ts b/src/auto-reply/reply/commands-session-abort.ts index e8abdb845d6..2991ede75cd 100644 --- a/src/auto-reply/reply/commands-session-abort.ts +++ b/src/auto-reply/reply/commands-session-abort.ts @@ -86,6 +86,23 @@ async function applyAbortTarget(params: { } } +function buildAbortTargetApplyParams( + params: Parameters[0], + abortTarget: AbortTarget, +) { + return { + abortTarget, + sessionStore: params.sessionStore, + storePath: params.storePath, + abortKey: params.command.abortKey, + abortCutoff: resolveAbortCutoffForTarget({ + ctx: params.ctx, + commandSessionKey: params.sessionKey, + targetSessionKey: abortTarget.key, + }), + }; +} + export const handleStopCommand: CommandHandler = async (params, allowTextCommands) => { if (!allowTextCommands) { return null; @@ -109,17 +126,7 @@ export const handleStopCommand: CommandHandler = async (params, allowTextCommand `stop: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, ); } - await applyAbortTarget({ - abortTarget, - sessionStore: params.sessionStore, - storePath: params.storePath, - abortKey: params.command.abortKey, - abortCutoff: resolveAbortCutoffForTarget({ - ctx: params.ctx, - commandSessionKey: params.sessionKey, - targetSessionKey: abortTarget.key, - }), - }); + await applyAbortTarget(buildAbortTargetApplyParams(params, abortTarget)); // Trigger internal hook for stop command const hookEvent = createInternalHookEvent( @@ -160,16 +167,6 @@ export const handleAbortTrigger: CommandHandler = async (params, allowTextComman sessionEntry: params.sessionEntry, sessionStore: params.sessionStore, }); - await applyAbortTarget({ - abortTarget, - sessionStore: params.sessionStore, - storePath: params.storePath, - abortKey: params.command.abortKey, - abortCutoff: resolveAbortCutoffForTarget({ - ctx: params.ctx, - commandSessionKey: params.sessionKey, - targetSessionKey: abortTarget.key, - }), - }); + await applyAbortTarget(buildAbortTargetApplyParams(params, abortTarget)); return { shouldContinue: false, reply: { text: "⚙️ Agent was aborted." } }; }; diff --git a/src/auto-reply/reply/commands-session-lifecycle.test.ts b/src/auto-reply/reply/commands-session-lifecycle.test.ts index 79882f13921..baf5addc60e 100644 --- a/src/auto-reply/reply/commands-session-lifecycle.test.ts +++ b/src/auto-reply/reply/commands-session-lifecycle.test.ts @@ -139,6 +139,21 @@ function createTelegramBinding(overrides?: Partial): Sessi }; } +function expectIdleTimeoutSetReply( + mock: ReturnType, + text: string, + idleTimeoutMs: number, + idleTimeoutLabel: string, +) { + expect(mock).toHaveBeenCalledWith({ + targetSessionKey: "agent:main:subagent:child", + accountId: "default", + idleTimeoutMs, + }); + expect(text).toContain(`Idle timeout set to ${idleTimeoutLabel}`); + expect(text).toContain("2026-02-20T02:00:00.000Z"); +} + function createFakeThreadBindingManager(binding: FakeBinding | null) { return { getByThreadId: vi.fn((_threadId: string) => binding), @@ -175,13 +190,12 @@ describe("/session idle and /session max-age", () => { const result = await handleSessionCommand(createDiscordCommandParams("/session idle 2h"), true); const text = result?.reply?.text ?? ""; - expect(hoisted.setThreadBindingIdleTimeoutBySessionKeyMock).toHaveBeenCalledWith({ - targetSessionKey: "agent:main:subagent:child", - accountId: "default", - idleTimeoutMs: 2 * 60 * 60 * 1000, - }); - expect(text).toContain("Idle timeout set to 2h"); - expect(text).toContain("2026-02-20T02:00:00.000Z"); + expectIdleTimeoutSetReply( + hoisted.setThreadBindingIdleTimeoutBySessionKeyMock, + text, + 2 * 60 * 60 * 1000, + "2h", + ); }); it("shows active idle timeout when no value is provided", async () => { @@ -248,13 +262,12 @@ describe("/session idle and /session max-age", () => { ); const text = result?.reply?.text ?? ""; - expect(hoisted.setTelegramThreadBindingIdleTimeoutBySessionKeyMock).toHaveBeenCalledWith({ - targetSessionKey: "agent:main:subagent:child", - accountId: "default", - idleTimeoutMs: 2 * 60 * 60 * 1000, - }); - expect(text).toContain("Idle timeout set to 2h"); - expect(text).toContain("2026-02-20T02:00:00.000Z"); + expectIdleTimeoutSetReply( + hoisted.setTelegramThreadBindingIdleTimeoutBySessionKeyMock, + text, + 2 * 60 * 60 * 1000, + "2h", + ); }); it("reports Telegram max-age expiry from the original bind time", async () => { diff --git a/src/auto-reply/reply/config-write-authorization.ts b/src/auto-reply/reply/config-write-authorization.ts new file mode 100644 index 00000000000..a2c2142709f --- /dev/null +++ b/src/auto-reply/reply/config-write-authorization.ts @@ -0,0 +1,33 @@ +import { + authorizeConfigWrite, + canBypassConfigWritePolicy, + formatConfigWriteDeniedMessage, +} from "../../channels/plugins/config-writes.js"; +import type { ChannelId } from "../../channels/plugins/types.js"; +import type { OpenClawConfig } from "../../config/config.js"; + +export function resolveConfigWriteDeniedText(params: { + cfg: OpenClawConfig; + channel?: string | null; + channelId: ChannelId | null; + accountId?: string; + gatewayClientScopes?: string[]; + target: Parameters[0]["target"]; +}): string | null { + const writeAuth = authorizeConfigWrite({ + cfg: params.cfg, + origin: { channelId: params.channelId, accountId: params.accountId }, + target: params.target, + allowBypass: canBypassConfigWritePolicy({ + channel: params.channel ?? "", + gatewayClientScopes: params.gatewayClientScopes, + }), + }); + if (writeAuth.allowed) { + return null; + } + return formatConfigWriteDeniedMessage({ + result: writeAuth, + fallbackChannelId: params.channelId, + }); +} diff --git a/src/auto-reply/reply/directive-handling.auth.test.ts b/src/auto-reply/reply/directive-handling.auth.test.ts index 4faad0c3ee6..5e1248c8a61 100644 --- a/src/auto-reply/reply/directive-handling.auth.test.ts +++ b/src/auto-reply/reply/directive-handling.auth.test.ts @@ -4,6 +4,11 @@ import type { OpenClawConfig } from "../../config/config.js"; let mockStore: AuthProfileStore; let mockOrder: string[]; +const githubCopilotTokenRefProfile: AuthProfileStore["profiles"][string] = { + type: "token", + provider: "github-copilot", + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, +}; vi.mock("../../agents/auth-health.js", () => ({ formatRemainingShort: () => "1h", @@ -39,6 +44,28 @@ vi.mock("../../agents/model-auth.js", () => ({ const { resolveAuthLabel } = await import("./directive-handling.auth.js"); +async function resolveRefOnlyAuthLabel(params: { + provider: string; + profileId: string; + profile: + | (AuthProfileStore["profiles"][string] & { type: "api_key" }) + | (AuthProfileStore["profiles"][string] & { type: "token" }); + mode: "compact" | "verbose"; +}) { + mockStore.profiles = { + [params.profileId]: params.profile, + }; + mockOrder = [params.profileId]; + + return resolveAuthLabel( + params.provider, + {} as OpenClawConfig, + "/tmp/models.json", + undefined, + params.mode, + ); +} + describe("resolveAuthLabel ref-aware labels", () => { beforeEach(() => { mockStore = { @@ -49,64 +76,38 @@ describe("resolveAuthLabel ref-aware labels", () => { }); it("shows api-key (ref) for keyRef-only profiles in compact mode", async () => { - mockStore.profiles = { - "openai:default": { + const result = await resolveRefOnlyAuthLabel({ + provider: "openai", + profileId: "openai:default", + profile: { type: "api_key", provider: "openai", keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, }, - }; - mockOrder = ["openai:default"]; - - const result = await resolveAuthLabel( - "openai", - {} as OpenClawConfig, - "/tmp/models.json", - undefined, - "compact", - ); + mode: "compact", + }); expect(result.label).toBe("openai:default api-key (ref)"); }); it("shows token (ref) for tokenRef-only profiles in compact mode", async () => { - mockStore.profiles = { - "github-copilot:default": { - type: "token", - provider: "github-copilot", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }; - mockOrder = ["github-copilot:default"]; - - const result = await resolveAuthLabel( - "github-copilot", - {} as OpenClawConfig, - "/tmp/models.json", - undefined, - "compact", - ); + const result = await resolveRefOnlyAuthLabel({ + provider: "github-copilot", + profileId: "github-copilot:default", + profile: githubCopilotTokenRefProfile, + mode: "compact", + }); expect(result.label).toBe("github-copilot:default token (ref)"); }); it("uses token:ref instead of token:missing in verbose mode", async () => { - mockStore.profiles = { - "github-copilot:default": { - type: "token", - provider: "github-copilot", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }; - mockOrder = ["github-copilot:default"]; - - const result = await resolveAuthLabel( - "github-copilot", - {} as OpenClawConfig, - "/tmp/models.json", - undefined, - "verbose", - ); + const result = await resolveRefOnlyAuthLabel({ + provider: "github-copilot", + profileId: "github-copilot:default", + profile: githubCopilotTokenRefProfile, + mode: "verbose", + }); expect(result.label).toContain("github-copilot:default=token:ref"); expect(result.label).not.toContain("token:missing"); diff --git a/src/auto-reply/reply/directive-handling.auth.ts b/src/auto-reply/reply/directive-handling.auth.ts index 26647d77c68..604e7473ae8 100644 --- a/src/auto-reply/reply/directive-handling.auth.ts +++ b/src/auto-reply/reply/directive-handling.auth.ts @@ -33,6 +33,22 @@ function resolveStoredCredentialLabel(params: { return "missing"; } +function formatExpirationLabel( + expires: unknown, + now: number, + formatUntil: (timestampMs: number) => string, + compactExpiredPrefix = " expired", +) { + if (typeof expires !== "number" || !Number.isFinite(expires) || expires <= 0) { + return ""; + } + return expires <= now ? compactExpiredPrefix : ` exp ${formatUntil(expires)}`; +} + +function formatFlagsSuffix(flags: string[]) { + return flags.length > 0 ? ` (${flags.join(", ")})` : ""; +} + export const resolveAuthLabel = async ( provider: string, cfg: OpenClawConfig, @@ -89,14 +105,7 @@ export const resolveAuthLabel = async ( refValue: profile.tokenRef, mode, }); - const exp = - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ? profile.expires <= now - ? " expired" - : ` exp ${formatUntil(profile.expires)}` - : ""; + const exp = formatExpirationLabel(profile.expires, now, formatUntil); return { label: `${profileId} token ${tokenLabel}${exp}${more}`, source: "", @@ -104,14 +113,7 @@ export const resolveAuthLabel = async ( } const display = resolveAuthProfileDisplayLabel({ cfg, store, profileId }); const label = display === profileId ? profileId : display; - const exp = - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ? profile.expires <= now - ? " expired" - : ` exp ${formatUntil(profile.expires)}` - : ""; + const exp = formatExpirationLabel(profile.expires, now, formatUntil); return { label: `${label} oauth${exp}${more}`, source: "" }; } @@ -140,7 +142,7 @@ export const resolveAuthLabel = async ( configProfile.mode !== profile.type && !(configProfile.mode === "oauth" && profile.type === "token")) ) { - const suffix = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffix = formatFlagsSuffix(flags); return `${profileId}=missing${suffix}`; } if (profile.type === "api_key") { @@ -149,7 +151,7 @@ export const resolveAuthLabel = async ( refValue: profile.keyRef, mode, }); - const suffix = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffix = formatFlagsSuffix(flags); return `${profileId}=${keyLabel}${suffix}`; } if (profile.type === "token") { @@ -158,14 +160,11 @@ export const resolveAuthLabel = async ( refValue: profile.tokenRef, mode, }); - if ( - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ) { - flags.push(profile.expires <= now ? "expired" : `exp ${formatUntil(profile.expires)}`); + const expirationFlag = formatExpirationLabel(profile.expires, now, formatUntil, "expired"); + if (expirationFlag) { + flags.push(expirationFlag); } - const suffix = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffix = formatFlagsSuffix(flags); return `${profileId}=token:${tokenLabel}${suffix}`; } const display = resolveAuthProfileDisplayLabel({ @@ -179,15 +178,12 @@ export const resolveAuthLabel = async ( : display.startsWith(profileId) ? display.slice(profileId.length).trim() : `(${display})`; - if ( - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ) { - flags.push(profile.expires <= now ? "expired" : `exp ${formatUntil(profile.expires)}`); + const expirationFlag = formatExpirationLabel(profile.expires, now, formatUntil, "expired"); + if (expirationFlag) { + flags.push(expirationFlag); } const suffixLabel = suffix ? ` ${suffix}` : ""; - const suffixFlags = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffixFlags = formatFlagsSuffix(flags); return `${profileId}=OAuth${suffixLabel}${suffixFlags}`; }); return { diff --git a/src/auto-reply/reply/directive-handling.model.test.ts b/src/auto-reply/reply/directive-handling.model.test.ts index 5d4a23f3efb..b815ecfc9b9 100644 --- a/src/auto-reply/reply/directive-handling.model.test.ts +++ b/src/auto-reply/reply/directive-handling.model.test.ts @@ -57,24 +57,28 @@ function resolveModelSelectionForCommand(params: { }); } +async function resolveModelInfoReply( + overrides: Partial[0]> = {}, +) { + return maybeHandleModelDirectiveInfo({ + directives: parseInlineDirectives("/model"), + cfg: baseConfig(), + agentDir: "/tmp/agent", + activeAgentId: "main", + provider: "anthropic", + model: "claude-opus-4-5", + defaultProvider: "anthropic", + defaultModel: "claude-opus-4-5", + aliasIndex: baseAliasIndex(), + allowedModelCatalog: [], + resetModelOverride: false, + ...overrides, + }); +} + describe("/model chat UX", () => { it("shows summary for /model with no args", async () => { - const directives = parseInlineDirectives("/model"); - const cfg = { commands: { text: true } } as unknown as OpenClawConfig; - - const reply = await maybeHandleModelDirectiveInfo({ - directives, - cfg, - agentDir: "/tmp/agent", - activeAgentId: "main", - provider: "anthropic", - model: "claude-opus-4-5", - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-5", - aliasIndex: baseAliasIndex(), - allowedModelCatalog: [], - resetModelOverride: false, - }); + const reply = await resolveModelInfoReply(); expect(reply?.text).toContain("Current:"); expect(reply?.text).toContain("Browse: /models"); @@ -82,21 +86,11 @@ describe("/model chat UX", () => { }); it("shows active runtime model when different from selected model", async () => { - const directives = parseInlineDirectives("/model"); - const cfg = { commands: { text: true } } as unknown as OpenClawConfig; - - const reply = await maybeHandleModelDirectiveInfo({ - directives, - cfg, - agentDir: "/tmp/agent", - activeAgentId: "main", + const reply = await resolveModelInfoReply({ provider: "fireworks", model: "fireworks/minimax-m2p5", defaultProvider: "fireworks", defaultModel: "fireworks/minimax-m2p5", - aliasIndex: baseAliasIndex(), - allowedModelCatalog: [], - resetModelOverride: false, sessionEntry: { modelProvider: "deepinfra", model: "moonshotai/Kimi-K2.5", diff --git a/src/auto-reply/reply/discord-parent-channel.ts b/src/auto-reply/reply/discord-parent-channel.ts new file mode 100644 index 00000000000..877c4593ea7 --- /dev/null +++ b/src/auto-reply/reply/discord-parent-channel.ts @@ -0,0 +1,15 @@ +import { normalizeConversationText } from "../../acp/conversation-id.js"; +import { parseAgentSessionKey } from "../../routing/session-key.js"; + +export function parseDiscordParentChannelFromSessionKey(raw: unknown): string | undefined { + const sessionKey = normalizeConversationText(raw); + if (!sessionKey) { + return undefined; + } + const scoped = parseAgentSessionKey(sessionKey)?.rest ?? sessionKey.toLowerCase(); + const match = scoped.match(/(?:^|:)channel:([^:]+)$/); + if (!match?.[1]) { + return undefined; + } + return match[1]; +} diff --git a/src/auto-reply/reply/followup-runner.test.ts b/src/auto-reply/reply/followup-runner.test.ts index a02ce0b2038..8d12e815685 100644 --- a/src/auto-reply/reply/followup-runner.test.ts +++ b/src/auto-reply/reply/followup-runner.test.ts @@ -4,7 +4,7 @@ import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { loadSessionStore, saveSessionStore, type SessionEntry } from "../../config/sessions.js"; import type { FollowupRun } from "./queue.js"; -import { createMockTypingController } from "./test-helpers.js"; +import { createMockFollowupRun, createMockTypingController } from "./test-helpers.js"; const runEmbeddedPiAgentMock = vi.fn(); const routeReplyMock = vi.fn(); @@ -50,47 +50,12 @@ beforeEach(() => { }); const baseQueuedRun = (messageProvider = "whatsapp"): FollowupRun => - ({ - prompt: "hello", - summaryLine: "hello", - enqueuedAt: Date.now(), - originatingTo: "channel:C1", - run: { - sessionId: "session", - sessionKey: "main", - messageProvider, - agentAccountId: "primary", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - config: {}, - skillsSnapshot: {}, - provider: "anthropic", - model: "claude", - thinkLevel: "low", - verboseLevel: "off", - elevatedLevel: "off", - bashElevated: { - enabled: false, - allowed: false, - defaultLevel: "off", - }, - timeoutMs: 1_000, - blockReplyBreak: "message_end", - }, - }) as FollowupRun; + createMockFollowupRun({ run: { messageProvider } }); function createQueuedRun( overrides: Partial> & { run?: Partial } = {}, ): FollowupRun { - const base = baseQueuedRun(); - return { - ...base, - ...overrides, - run: { - ...base.run, - ...overrides.run, - }, - }; + return createMockFollowupRun(overrides); } function mockCompactionRun(params: { diff --git a/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts b/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts index 51351f05de8..36b5910ecae 100644 --- a/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts +++ b/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts @@ -84,6 +84,19 @@ const createHandleInlineActionsInput = (params: { }; }; +async function expectInlineActionSkipped(params: { + ctx: ReturnType; + typing: TypingController; + cleanedBody: string; + command?: Partial; + overrides?: Partial>; +}) { + const result = await handleInlineActions(createHandleInlineActionsInput(params)); + expect(result).toEqual({ kind: "reply", reply: undefined }); + expect(params.typing.cleanup).toHaveBeenCalled(); + expect(handleCommandsMock).not.toHaveBeenCalled(); +} + describe("handleInlineActions", () => { beforeEach(() => { handleCommandsMock.mockReset(); @@ -97,18 +110,12 @@ describe("handleInlineActions", () => { To: "whatsapp:+123", Body: "hi", }); - const result = await handleInlineActions( - createHandleInlineActionsInput({ - ctx, - typing, - cleanedBody: "hi", - command: { to: "whatsapp:+123" }, - }), - ); - - expect(result).toEqual({ kind: "reply", reply: undefined }); - expect(typing.cleanup).toHaveBeenCalled(); - expect(handleCommandsMock).not.toHaveBeenCalled(); + await expectInlineActionSkipped({ + ctx, + typing, + cleanedBody: "hi", + command: { to: "whatsapp:+123" }, + }); }); it("forwards agentDir into handleCommands", async () => { @@ -163,25 +170,19 @@ describe("handleInlineActions", () => { MessageSid: "41", }); - const result = await handleInlineActions( - createHandleInlineActionsInput({ - ctx, - typing, - cleanedBody: "old queued message", - command: { - rawBodyNormalized: "old queued message", - commandBodyNormalized: "old queued message", - }, - overrides: { - sessionEntry, - sessionStore, - }, - }), - ); - - expect(result).toEqual({ kind: "reply", reply: undefined }); - expect(typing.cleanup).toHaveBeenCalled(); - expect(handleCommandsMock).not.toHaveBeenCalled(); + await expectInlineActionSkipped({ + ctx, + typing, + cleanedBody: "old queued message", + command: { + rawBodyNormalized: "old queued message", + commandBodyNormalized: "old queued message", + }, + overrides: { + sessionEntry, + sessionStore, + }, + }); }); it("clears /stop cutoff when a newer message arrives", async () => { diff --git a/src/auto-reply/reply/normalize-reply.ts b/src/auto-reply/reply/normalize-reply.ts index 9aafb66bd34..793cbcc326f 100644 --- a/src/auto-reply/reply/normalize-reply.ts +++ b/src/auto-reply/reply/normalize-reply.ts @@ -12,11 +12,13 @@ import { resolveResponsePrefixTemplate, type ResponsePrefixContext, } from "./response-prefix-template.js"; +import { hasSlackDirectives, parseSlackDirectives } from "./slack-directives.js"; export type NormalizeReplySkipReason = "empty" | "silent" | "heartbeat"; export type NormalizeReplyOptions = { responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; /** Context for template variable interpolation in responsePrefix */ responsePrefixContext?: ResponsePrefixContext; onHeartbeatStrip?: () => void; @@ -105,5 +107,10 @@ export function normalizeReplyPayload( text = `${effectivePrefix} ${text}`; } - return { ...enrichedPayload, text }; + enrichedPayload = { ...enrichedPayload, text }; + if (opts.enableSlackInteractiveReplies && text && hasSlackDirectives(text)) { + enrichedPayload = parseSlackDirectives(enrichedPayload); + } + + return enrichedPayload; } diff --git a/src/auto-reply/reply/post-compaction-context.test.ts b/src/auto-reply/reply/post-compaction-context.test.ts index 02a4a27e6de..3af8bceab00 100644 --- a/src/auto-reply/reply/post-compaction-context.test.ts +++ b/src/auto-reply/reply/post-compaction-context.test.ts @@ -15,6 +15,28 @@ describe("readPostCompactionContext", () => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); + async function expectLegacySectionFallback( + postCompactionSections: string[], + expectDefaultProse = false, + ) { + const content = `## Every Session\n\nDo startup things.\n\n## Safety\n\nBe safe.\n`; + fs.writeFileSync(path.join(tmpDir, "AGENTS.md"), content); + const cfg = { + agents: { + defaults: { + compaction: { postCompactionSections }, + }, + }, + } as OpenClawConfig; + const result = await readPostCompactionContext(tmpDir, cfg); + expect(result).not.toBeNull(); + expect(result).toContain("Do startup things"); + expect(result).toContain("Be safe"); + if (expectDefaultProse) { + expect(result).toContain("Run your Session Startup sequence"); + } + } + it("returns null when no AGENTS.md exists", async () => { const result = await readPostCompactionContext(tmpDir); expect(result).toBeNull(); @@ -339,36 +361,11 @@ Read WORKFLOW.md on startup. // Older AGENTS.md templates use "Every Session" / "Safety" instead of // "Session Startup" / "Red Lines". Explicitly setting the defaults should // still trigger the legacy fallback — same behavior as leaving the field unset. - const content = `## Every Session\n\nDo startup things.\n\n## Safety\n\nBe safe.\n`; - fs.writeFileSync(path.join(tmpDir, "AGENTS.md"), content); - const cfg = { - agents: { - defaults: { - compaction: { postCompactionSections: ["Session Startup", "Red Lines"] }, - }, - }, - } as OpenClawConfig; - const result = await readPostCompactionContext(tmpDir, cfg); - expect(result).not.toBeNull(); - expect(result).toContain("Do startup things"); - expect(result).toContain("Be safe"); + await expectLegacySectionFallback(["Session Startup", "Red Lines"]); }); it("falls back to legacy sections when default sections are configured in a different order", async () => { - const content = `## Every Session\n\nDo startup things.\n\n## Safety\n\nBe safe.\n`; - fs.writeFileSync(path.join(tmpDir, "AGENTS.md"), content); - const cfg = { - agents: { - defaults: { - compaction: { postCompactionSections: ["Red Lines", "Session Startup"] }, - }, - }, - } as OpenClawConfig; - const result = await readPostCompactionContext(tmpDir, cfg); - expect(result).not.toBeNull(); - expect(result).toContain("Do startup things"); - expect(result).toContain("Be safe"); - expect(result).toContain("Run your Session Startup sequence"); + await expectLegacySectionFallback(["Red Lines", "Session Startup"], true); }); it("custom section names are matched case-insensitively", async () => { diff --git a/src/auto-reply/reply/reply-dispatcher.ts b/src/auto-reply/reply/reply-dispatcher.ts index 7272a3081a2..d212245ef59 100644 --- a/src/auto-reply/reply/reply-dispatcher.ts +++ b/src/auto-reply/reply/reply-dispatcher.ts @@ -43,6 +43,7 @@ function getHumanDelay(config: HumanDelayConfig | undefined): number { export type ReplyDispatcherOptions = { deliver: ReplyDispatchDeliverer; responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; /** Static context for response prefix template interpolation. */ responsePrefixContext?: ResponsePrefixContext; /** Dynamic context provider for response prefix template interpolation. @@ -84,7 +85,11 @@ export type ReplyDispatcher = { type NormalizeReplyPayloadInternalOptions = Pick< ReplyDispatcherOptions, - "responsePrefix" | "responsePrefixContext" | "responsePrefixContextProvider" | "onHeartbeatStrip" + | "responsePrefix" + | "enableSlackInteractiveReplies" + | "responsePrefixContext" + | "responsePrefixContextProvider" + | "onHeartbeatStrip" > & { onSkip?: (reason: NormalizeReplySkipReason) => void; }; @@ -98,6 +103,7 @@ function normalizeReplyPayloadInternal( return normalizeReplyPayload(payload, { responsePrefix: opts.responsePrefix, + enableSlackInteractiveReplies: opts.enableSlackInteractiveReplies, responsePrefixContext: prefixContext, onHeartbeatStrip: opts.onHeartbeatStrip, onSkip: opts.onSkip, @@ -129,6 +135,7 @@ export function createReplyDispatcher(options: ReplyDispatcherOptions): ReplyDis const enqueue = (kind: ReplyDispatchKind, payload: ReplyPayload) => { const normalized = normalizeReplyPayloadInternal(payload, { responsePrefix: options.responsePrefix, + enableSlackInteractiveReplies: options.enableSlackInteractiveReplies, responsePrefixContext: options.responsePrefixContext, responsePrefixContextProvider: options.responsePrefixContextProvider, onHeartbeatStrip: options.onHeartbeatStrip, diff --git a/src/auto-reply/reply/reply-elevated.test.ts b/src/auto-reply/reply/reply-elevated.test.ts index 74fba60acf7..28259c34638 100644 --- a/src/auto-reply/reply/reply-elevated.test.ts +++ b/src/auto-reply/reply/reply-elevated.test.ts @@ -27,68 +27,65 @@ function buildContext(overrides?: Partial): MsgContext { } as MsgContext; } +function expectAllowFromDecision(params: { + allowFrom: string[]; + ctx?: Partial; + allowed: boolean; +}) { + const result = resolveElevatedPermissions({ + cfg: buildConfig(params.allowFrom), + agentId: "main", + provider: "whatsapp", + ctx: buildContext(params.ctx), + }); + + expect(result.enabled).toBe(true); + expect(result.allowed).toBe(params.allowed); + if (params.allowed) { + expect(result.failures).toHaveLength(0); + return; + } + + expect(result.failures).toContainEqual({ + gate: "allowFrom", + key: "tools.elevated.allowFrom.whatsapp", + }); +} + describe("resolveElevatedPermissions", () => { it("authorizes when sender matches allowFrom", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["+15550001111"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext(), + expectAllowFromDecision({ + allowFrom: ["+15550001111"], + allowed: true, }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(true); - expect(result.failures).toHaveLength(0); }); it("does not authorize when only recipient matches allowFrom", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["+15559990000"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext(), - }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(false); - expect(result.failures).toContainEqual({ - gate: "allowFrom", - key: "tools.elevated.allowFrom.whatsapp", + expectAllowFromDecision({ + allowFrom: ["+15559990000"], + allowed: false, }); }); it("does not authorize untyped mutable sender fields", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["owner-display-name"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext({ + expectAllowFromDecision({ + allowFrom: ["owner-display-name"], + allowed: false, + ctx: { SenderName: "owner-display-name", SenderUsername: "owner-display-name", SenderTag: "owner-display-name", - }), - }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(false); - expect(result.failures).toContainEqual({ - gate: "allowFrom", - key: "tools.elevated.allowFrom.whatsapp", + }, }); }); it("authorizes mutable sender fields only with explicit prefix", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["username:owner_username"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext({ + expectAllowFromDecision({ + allowFrom: ["username:owner_username"], + allowed: true, + ctx: { SenderUsername: "owner_username", - }), + }, }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(true); - expect(result.failures).toHaveLength(0); }); }); diff --git a/src/auto-reply/reply/reply-flow.test.ts b/src/auto-reply/reply/reply-flow.test.ts index d0fd692c2e1..d7efa640b1c 100644 --- a/src/auto-reply/reply/reply-flow.test.ts +++ b/src/auto-reply/reply/reply-flow.test.ts @@ -16,6 +16,7 @@ import { } from "./queue.js"; import { createReplyDispatcher } from "./reply-dispatcher.js"; import { createReplyToModeFilter, resolveReplyToMode } from "./reply-threading.js"; +import { parseSlackDirectives, hasSlackDirectives } from "./slack-directives.js"; describe("normalizeInboundTextNewlines", () => { it("normalizes real newlines and preserves literal backslash-n sequences", () => { @@ -196,6 +197,8 @@ describe("inbound context contract (providers + extensions)", () => { const getLineData = (result: ReturnType) => (result.channelData?.line as Record | undefined) ?? {}; +const getSlackData = (result: ReturnType) => + (result.channelData?.slack as Record | undefined) ?? {}; describe("hasLineDirectives", () => { it("matches expected detection across directive patterns", () => { @@ -219,6 +222,24 @@ describe("hasLineDirectives", () => { }); }); +describe("hasSlackDirectives", () => { + it("matches expected detection across Slack directive patterns", () => { + const cases: Array<{ text: string; expected: boolean }> = [ + { text: "Pick one [[slack_buttons: Approve:approve, Reject:reject]]", expected: true }, + { + text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]", + expected: true, + }, + { text: "Just regular text", expected: false }, + { text: "[[buttons: Menu | Choose | A:a]]", expected: false }, + ]; + + for (const testCase of cases) { + expect(hasSlackDirectives(testCase.text)).toBe(testCase.expected); + } + }); +}); + describe("parseLineDirectives", () => { describe("quick_replies", () => { it("parses quick replies variants", () => { @@ -579,6 +600,279 @@ describe("parseLineDirectives", () => { }); }); +describe("parseSlackDirectives", () => { + it("builds section and button blocks from slack_buttons directives", () => { + const result = parseSlackDirectives({ + text: "Choose an action [[slack_buttons: Approve:approve, Reject:reject]]", + }); + + expect(result.text).toBe("Choose an action"); + expect(getSlackData(result).blocks).toEqual([ + { + type: "section", + text: { + type: "mrkdwn", + text: "Choose an action", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Approve", + emoji: true, + }, + value: "reply_1_approve", + }, + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Reject", + emoji: true, + }, + value: "reply_2_reject", + }, + ], + }, + ]); + }); + + it("builds static select blocks from slack_select directives", () => { + const result = parseSlackDirectives({ + text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]", + }); + + expect(result.text).toBeUndefined(); + expect(getSlackData(result).blocks).toEqual([ + { + type: "actions", + block_id: "openclaw_reply_select_1", + elements: [ + { + type: "static_select", + action_id: "openclaw:reply_select", + placeholder: { + type: "plain_text", + text: "Choose a project", + emoji: true, + }, + options: [ + { + text: { + type: "plain_text", + text: "Alpha", + emoji: true, + }, + value: "reply_1_alpha", + }, + { + text: { + type: "plain_text", + text: "Beta", + emoji: true, + }, + value: "reply_2_beta", + }, + ], + }, + ], + }, + ]); + }); + + it("appends Slack interactive blocks to existing slack blocks", () => { + const result = parseSlackDirectives({ + text: "Act now [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: [{ type: "divider" }], + }, + }, + }); + + expect(result.text).toBe("Act now"); + expect(getSlackData(result).blocks).toEqual([ + { type: "divider" }, + { + type: "section", + text: { + type: "mrkdwn", + text: "Act now", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + ], + }, + ]); + }); + + it("preserves authored order for mixed Slack directives", () => { + const result = parseSlackDirectives({ + text: "[[slack_select: Pick one | Alpha:alpha]] then [[slack_buttons: Retry:retry]]", + }); + + expect(getSlackData(result).blocks).toEqual([ + { + type: "actions", + block_id: "openclaw_reply_select_1", + elements: [ + { + type: "static_select", + action_id: "openclaw:reply_select", + placeholder: { + type: "plain_text", + text: "Pick one", + emoji: true, + }, + options: [ + { + text: { + type: "plain_text", + text: "Alpha", + emoji: true, + }, + value: "reply_1_alpha", + }, + ], + }, + ], + }, + { + type: "section", + text: { + type: "mrkdwn", + text: "then", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + ], + }, + ]); + }); + + it("truncates Slack interactive reply strings to safe Block Kit limits", () => { + const long = "x".repeat(120); + const result = parseSlackDirectives({ + text: `${"y".repeat(3100)} [[slack_select: ${long} | ${long}:${long}]] [[slack_buttons: ${long}:${long}]]`, + }); + + const blocks = getSlackData(result).blocks as Array>; + expect(blocks).toHaveLength(3); + expect(((blocks[0]?.text as { text?: string })?.text ?? "").length).toBeLessThanOrEqual(3000); + expect( + ( + ( + (blocks[1]?.elements as Array>)?.[0]?.placeholder as { + text?: string; + } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ( + ( + (blocks[1]?.elements as Array>)?.[0]?.options as Array< + Record + > + )?.[0]?.text as { text?: string } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + (( + (blocks[1]?.elements as Array>)?.[0]?.options as Array< + Record + > + )?.[0]?.value as string | undefined) ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ( + (blocks[2]?.elements as Array>)?.[0]?.text as { + text?: string; + } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ((blocks[2]?.elements as Array>)?.[0]?.value as + | string + | undefined) ?? "" + ).length, + ).toBeLessThanOrEqual(75); + }); + + it("falls back to the original payload when generated blocks would exceed Slack limits", () => { + const result = parseSlackDirectives({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: Array.from({ length: 49 }, () => ({ type: "divider" })), + }, + }, + }); + + expect(result).toEqual({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: Array.from({ length: 49 }, () => ({ type: "divider" })), + }, + }, + }); + }); + + it("ignores malformed existing Slack blocks during directive compilation", () => { + expect(() => + parseSlackDirectives({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: "{not json}", + }, + }, + }), + ).not.toThrow(); + }); +}); + function createDeferred() { let resolve!: (value: T) => void; let reject!: (reason?: unknown) => void; @@ -1485,6 +1779,43 @@ describe("createReplyDispatcher", () => { expect(onHeartbeatStrip).toHaveBeenCalledTimes(2); }); + it("compiles Slack directives in dispatcher flows when enabled", async () => { + const deliver = vi.fn().mockResolvedValue(undefined); + const dispatcher = createReplyDispatcher({ + deliver, + enableSlackInteractiveReplies: true, + }); + + expect( + dispatcher.sendFinalReply({ + text: "Choose [[slack_buttons: Retry:retry]]", + }), + ).toBe(true); + await dispatcher.waitForIdle(); + + expect(deliver).toHaveBeenCalledTimes(1); + expect(deliver.mock.calls[0]?.[0]).toMatchObject({ + text: "Choose", + channelData: { + slack: { + blocks: [ + { + type: "section", + text: { + type: "mrkdwn", + text: "Choose", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + }, + ], + }, + }, + }); + }); + it("avoids double-prefixing and keeps media when heartbeat is the only text", async () => { const deliver = vi.fn().mockResolvedValue(undefined); const dispatcher = createReplyDispatcher({ diff --git a/src/auto-reply/reply/reply-utils.test.ts b/src/auto-reply/reply/reply-utils.test.ts index c1e76e50403..88f092bf1e5 100644 --- a/src/auto-reply/reply/reply-utils.test.ts +++ b/src/auto-reply/reply/reply-utils.test.ts @@ -150,6 +150,67 @@ describe("normalizeReplyPayload", () => { expect(result!.text).toBe(""); expect(result!.mediaUrl).toBe("https://example.com/img.png"); }); + + it("does not compile Slack directives unless interactive replies are enabled", () => { + const result = normalizeReplyPayload({ + text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]", + }); + + expect(result).not.toBeNull(); + expect(result!.text).toBe("hello [[slack_buttons: Retry:retry, Ignore:ignore]]"); + expect(result!.channelData).toBeUndefined(); + }); + + it("applies responsePrefix before compiling Slack directives into blocks", () => { + const result = normalizeReplyPayload( + { + text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]", + }, + { responsePrefix: "[bot]", enableSlackInteractiveReplies: true }, + ); + + expect(result).not.toBeNull(); + expect(result!.text).toBe("[bot] hello"); + expect(result!.channelData).toEqual({ + slack: { + blocks: [ + { + type: "section", + text: { + type: "mrkdwn", + text: "[bot] hello", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Ignore", + emoji: true, + }, + value: "reply_2_ignore", + }, + ], + }, + ], + }, + }); + }); }); describe("typing controller", () => { diff --git a/src/auto-reply/reply/route-reply.test.ts b/src/auto-reply/reply/route-reply.test.ts index 9b5d432149a..62f91097223 100644 --- a/src/auto-reply/reply/route-reply.test.ts +++ b/src/auto-reply/reply/route-reply.test.ts @@ -105,6 +105,23 @@ const createMSTeamsPlugin = (params: { outbound: ChannelOutboundAdapter }): Chan outbound: params.outbound, }); +async function expectSlackNoSend( + payload: Parameters[0]["payload"], + overrides: Partial[0]> = {}, +) { + mocks.sendMessageSlack.mockClear(); + const res = await routeReply({ + payload, + channel: "slack", + to: "channel:C123", + cfg: {} as never, + ...overrides, + }); + expect(res.ok).toBe(true); + expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + return res; +} + describe("routeReply", () => { beforeEach(() => { setActivePluginRegistry(defaultRegistry); @@ -132,39 +149,15 @@ describe("routeReply", () => { }); it("no-ops on empty payload", async () => { - mocks.sendMessageSlack.mockClear(); - const res = await routeReply({ - payload: {}, - channel: "slack", - to: "channel:C123", - cfg: {} as never, - }); - expect(res.ok).toBe(true); - expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + await expectSlackNoSend({}); }); it("suppresses reasoning payloads", async () => { - mocks.sendMessageSlack.mockClear(); - const res = await routeReply({ - payload: { text: "Reasoning:\n_step_", isReasoning: true }, - channel: "slack", - to: "channel:C123", - cfg: {} as never, - }); - expect(res.ok).toBe(true); - expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + await expectSlackNoSend({ text: "Reasoning:\n_step_", isReasoning: true }); }); it("drops silent token payloads", async () => { - mocks.sendMessageSlack.mockClear(); - const res = await routeReply({ - payload: { text: SILENT_REPLY_TOKEN }, - channel: "slack", - to: "channel:C123", - cfg: {} as never, - }); - expect(res.ok).toBe(true); - expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + await expectSlackNoSend({ text: SILENT_REPLY_TOKEN }); }); it("does not drop payloads that merely start with the silent token", async () => { @@ -201,6 +194,46 @@ describe("routeReply", () => { ); }); + it("routes directive-only Slack replies when interactive replies are enabled", async () => { + mocks.sendMessageSlack.mockClear(); + const cfg = { + channels: { + slack: { + capabilities: { interactiveReplies: true }, + }, + }, + } as unknown as OpenClawConfig; + await routeReply({ + payload: { text: "[[slack_select: Choose one | Alpha:alpha]]" }, + channel: "slack", + to: "channel:C123", + cfg, + }); + expect(mocks.sendMessageSlack).toHaveBeenCalledWith( + "channel:C123", + "", + expect.objectContaining({ + blocks: [ + expect.objectContaining({ + type: "actions", + block_id: "openclaw_reply_select_1", + }), + ], + }), + ); + }); + + it("does not bypass the empty-reply guard for invalid Slack blocks", async () => { + await expectSlackNoSend({ + text: " ", + channelData: { + slack: { + blocks: " ", + }, + }, + }); + }); + it("does not derive responsePrefix from agent identity when routing", async () => { mocks.sendMessageSlack.mockClear(); const cfg = { diff --git a/src/auto-reply/reply/route-reply.ts b/src/auto-reply/reply/route-reply.ts index a489bedcbbf..8b3319698b2 100644 --- a/src/auto-reply/reply/route-reply.ts +++ b/src/auto-reply/reply/route-reply.ts @@ -12,6 +12,8 @@ import { resolveEffectiveMessagesConfig } from "../../agents/identity.js"; import { normalizeChannelId } from "../../channels/plugins/index.js"; import type { OpenClawConfig } from "../../config/config.js"; import { buildOutboundSessionContext } from "../../infra/outbound/session-context.js"; +import { parseSlackBlocksInput } from "../../slack/blocks-input.js"; +import { isSlackInteractiveRepliesEnabled } from "../../slack/interactive-replies.js"; import { INTERNAL_MESSAGE_CHANNEL, normalizeMessageChannel } from "../../utils/message-channel.js"; import type { OriginatingChannelType } from "../templating.js"; import type { ReplyPayload } from "../types.js"; @@ -94,6 +96,8 @@ export async function routeReply(params: RouteReplyParams): Promise; + sessionKey?: string; + storePath?: string; + nextEntry: SessionEntry; +}) { + if (!params.sessionStore || !params.sessionKey) { + return; + } + params.sessionStore[params.sessionKey] = { + ...params.sessionStore[params.sessionKey], + ...params.nextEntry, + }; + if (!params.storePath) { + return; + } + await updateSessionStore(params.storePath, (store) => { + store[params.sessionKey!] = { ...store[params.sessionKey!], ...params.nextEntry }; + }); +} + export async function ensureSkillSnapshot(params: { sessionEntry?: SessionEntry; sessionStore?: Record; @@ -185,12 +206,7 @@ export async function ensureSkillSnapshot(params: { systemSent: true, skillsSnapshot: skillSnapshot, }; - sessionStore[sessionKey] = { ...sessionStore[sessionKey], ...nextEntry }; - if (storePath) { - await updateSessionStore(storePath, (store) => { - store[sessionKey] = { ...store[sessionKey], ...nextEntry }; - }); - } + await persistSessionEntryUpdate({ sessionStore, sessionKey, storePath, nextEntry }); systemSent = true; } @@ -227,12 +243,7 @@ export async function ensureSkillSnapshot(params: { updatedAt: Date.now(), skillsSnapshot, }; - sessionStore[sessionKey] = { ...sessionStore[sessionKey], ...nextEntry }; - if (storePath) { - await updateSessionStore(storePath, (store) => { - store[sessionKey] = { ...store[sessionKey], ...nextEntry }; - }); - } + await persistSessionEntryUpdate({ sessionStore, sessionKey, storePath, nextEntry }); } return { sessionEntry: nextEntry, skillsSnapshot, systemSent }; diff --git a/src/auto-reply/reply/session.ts b/src/auto-reply/reply/session.ts index 6db6b1708cb..a2c0b1c7cf4 100644 --- a/src/auto-reply/reply/session.ts +++ b/src/auto-reply/reply/session.ts @@ -2,6 +2,7 @@ import crypto from "node:crypto"; import path from "node:path"; import { buildTelegramTopicConversationId, + normalizeConversationText, parseTelegramChatIdFromTarget, } from "../../acp/conversation-id.js"; import { resolveSessionAgentId } from "../../agents/agent-scope.js"; @@ -33,11 +34,12 @@ import { resolveConversationIdFromTargets } from "../../infra/outbound/conversat import { deliverSessionMaintenanceWarning } from "../../infra/session-maintenance-warning.js"; import { createSubsystemLogger } from "../../logging/subsystem.js"; import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js"; -import { normalizeMainKey, parseAgentSessionKey } from "../../routing/session-key.js"; +import { normalizeMainKey } from "../../routing/session-key.js"; import { normalizeSessionDeliveryFields } from "../../utils/delivery-context.js"; import { resolveCommandAuthorization } from "../command-auth.js"; import type { MsgContext, TemplateContext } from "../templating.js"; import { resolveEffectiveResetTargetSessionKey } from "./acp-reset-target.js"; +import { parseDiscordParentChannelFromSessionKey } from "./discord-parent-channel.js"; import { normalizeInboundTextNewlines } from "./inbound-text.js"; import { stripMentions, stripStructuralPrefixes } from "./mentions.js"; import { @@ -69,44 +71,21 @@ export type SessionInitResult = { triggerBodyNormalized: string; }; -function normalizeSessionText(value: unknown): string { - if (typeof value === "string") { - return value.trim(); - } - if (typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") { - return `${value}`.trim(); - } - return ""; -} - -function parseDiscordParentChannelFromSessionKey(raw: unknown): string | undefined { - const sessionKey = normalizeSessionText(raw); - if (!sessionKey) { - return undefined; - } - const scoped = parseAgentSessionKey(sessionKey)?.rest ?? sessionKey.toLowerCase(); - const match = scoped.match(/(?:^|:)channel:([^:]+)$/); - if (!match?.[1]) { - return undefined; - } - return match[1]; -} - function resolveAcpResetBindingContext(ctx: MsgContext): { channel: string; accountId: string; conversationId: string; parentConversationId?: string; } | null { - const channelRaw = normalizeSessionText( + const channelRaw = normalizeConversationText( ctx.OriginatingChannel ?? ctx.Surface ?? ctx.Provider ?? "", ).toLowerCase(); if (!channelRaw) { return null; } - const accountId = normalizeSessionText(ctx.AccountId) || "default"; + const accountId = normalizeConversationText(ctx.AccountId) || "default"; const normalizedThreadId = - ctx.MessageThreadId != null ? normalizeSessionText(String(ctx.MessageThreadId)) : ""; + ctx.MessageThreadId != null ? normalizeConversationText(String(ctx.MessageThreadId)) : ""; if (channelRaw === "telegram") { const parentConversationId = @@ -143,7 +122,7 @@ function resolveAcpResetBindingContext(ctx: MsgContext): { } let parentConversationId: string | undefined; if (channelRaw === "discord" && normalizedThreadId) { - const fromContext = normalizeSessionText(ctx.ThreadParentId); + const fromContext = normalizeConversationText(ctx.ThreadParentId); if (fromContext && fromContext !== conversationId) { parentConversationId = fromContext; } else { @@ -172,7 +151,7 @@ function resolveBoundAcpSessionForReset(params: { cfg: OpenClawConfig; ctx: MsgContext; }): string | undefined { - const activeSessionKey = normalizeSessionText(params.ctx.SessionKey); + const activeSessionKey = normalizeConversationText(params.ctx.SessionKey); const bindingContext = resolveAcpResetBindingContext(params.ctx); return resolveEffectiveResetTargetSessionKey({ cfg: params.cfg, diff --git a/src/auto-reply/reply/slack-directives.ts b/src/auto-reply/reply/slack-directives.ts new file mode 100644 index 00000000000..fe58f0c5961 --- /dev/null +++ b/src/auto-reply/reply/slack-directives.ts @@ -0,0 +1,218 @@ +import { parseSlackBlocksInput } from "../../slack/blocks-input.js"; +import { truncateSlackText } from "../../slack/truncate.js"; +import type { ReplyPayload } from "../types.js"; + +const SLACK_REPLY_BUTTON_ACTION_ID = "openclaw:reply_button"; +const SLACK_REPLY_SELECT_ACTION_ID = "openclaw:reply_select"; +const SLACK_MAX_BLOCKS = 50; +const SLACK_BUTTON_MAX_ITEMS = 5; +const SLACK_SELECT_MAX_ITEMS = 100; +const SLACK_SECTION_TEXT_MAX = 3000; +const SLACK_PLAIN_TEXT_MAX = 75; +const SLACK_OPTION_VALUE_MAX = 75; +const SLACK_DIRECTIVE_RE = /\[\[(slack_buttons|slack_select):\s*([^\]]+)\]\]/gi; + +type SlackBlock = Record; +type SlackChannelData = { + blocks?: unknown; +}; + +type SlackChoice = { + label: string; + value: string; +}; + +function parseChoice(raw: string): SlackChoice | null { + const trimmed = raw.trim(); + if (!trimmed) { + return null; + } + const delimiter = trimmed.indexOf(":"); + if (delimiter === -1) { + return { + label: trimmed, + value: trimmed, + }; + } + const label = trimmed.slice(0, delimiter).trim(); + const value = trimmed.slice(delimiter + 1).trim(); + if (!label || !value) { + return null; + } + return { label, value }; +} + +function parseChoices(raw: string, maxItems: number): SlackChoice[] { + return raw + .split(",") + .map((entry) => parseChoice(entry)) + .filter((entry): entry is SlackChoice => Boolean(entry)) + .slice(0, maxItems); +} + +function buildSlackReplyChoiceToken(value: string, index: number): string { + const slug = value + .trim() + .toLowerCase() + .replace(/[^a-z0-9]+/g, "_") + .replace(/^_+|_+$/g, ""); + return truncateSlackText(`reply_${index}_${slug || "choice"}`, SLACK_OPTION_VALUE_MAX); +} + +function buildSectionBlock(text: string): SlackBlock | null { + const trimmed = text.trim(); + if (!trimmed) { + return null; + } + return { + type: "section", + text: { + type: "mrkdwn", + text: truncateSlackText(trimmed, SLACK_SECTION_TEXT_MAX), + }, + }; +} + +function buildButtonsBlock(raw: string, index: number): SlackBlock | null { + const choices = parseChoices(raw, SLACK_BUTTON_MAX_ITEMS); + if (choices.length === 0) { + return null; + } + return { + type: "actions", + block_id: `openclaw_reply_buttons_${index}`, + elements: choices.map((choice, choiceIndex) => ({ + type: "button", + action_id: SLACK_REPLY_BUTTON_ACTION_ID, + text: { + type: "plain_text", + text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1), + })), + }; +} + +function buildSelectBlock(raw: string, index: number): SlackBlock | null { + const parts = raw + .split("|") + .map((entry) => entry.trim()) + .filter(Boolean); + if (parts.length === 0) { + return null; + } + const [first, second] = parts; + const placeholder = parts.length >= 2 ? first : "Choose an option"; + const choices = parseChoices(parts.length >= 2 ? second : first, SLACK_SELECT_MAX_ITEMS); + if (choices.length === 0) { + return null; + } + return { + type: "actions", + block_id: `openclaw_reply_select_${index}`, + elements: [ + { + type: "static_select", + action_id: SLACK_REPLY_SELECT_ACTION_ID, + placeholder: { + type: "plain_text", + text: truncateSlackText(placeholder, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + options: choices.map((choice, choiceIndex) => ({ + text: { + type: "plain_text", + text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1), + })), + }, + ], + }; +} + +function readExistingSlackBlocks(payload: ReplyPayload): SlackBlock[] { + const slackData = payload.channelData?.slack as SlackChannelData | undefined; + try { + const blocks = parseSlackBlocksInput(slackData?.blocks) as SlackBlock[] | undefined; + return blocks ?? []; + } catch { + return []; + } +} + +export function hasSlackDirectives(text: string): boolean { + SLACK_DIRECTIVE_RE.lastIndex = 0; + return SLACK_DIRECTIVE_RE.test(text); +} + +export function parseSlackDirectives(payload: ReplyPayload): ReplyPayload { + const text = payload.text; + if (!text) { + return payload; + } + + const generatedBlocks: SlackBlock[] = []; + const visibleTextParts: string[] = []; + let buttonIndex = 0; + let selectIndex = 0; + let cursor = 0; + let matchedDirective = false; + let generatedInteractiveBlock = false; + SLACK_DIRECTIVE_RE.lastIndex = 0; + + for (const match of text.matchAll(SLACK_DIRECTIVE_RE)) { + matchedDirective = true; + const matchText = match[0]; + const directiveType = match[1]; + const body = match[2]; + const index = match.index ?? 0; + const precedingText = text.slice(cursor, index); + visibleTextParts.push(precedingText); + const section = buildSectionBlock(precedingText); + if (section) { + generatedBlocks.push(section); + } + const block = + directiveType.toLowerCase() === "slack_buttons" + ? buildButtonsBlock(body, ++buttonIndex) + : buildSelectBlock(body, ++selectIndex); + if (block) { + generatedInteractiveBlock = true; + generatedBlocks.push(block); + } + cursor = index + matchText.length; + } + + const trailingText = text.slice(cursor); + visibleTextParts.push(trailingText); + const trailingSection = buildSectionBlock(trailingText); + if (trailingSection) { + generatedBlocks.push(trailingSection); + } + const cleanedText = visibleTextParts.join(""); + + if (!matchedDirective || !generatedInteractiveBlock) { + return payload; + } + + const existingBlocks = readExistingSlackBlocks(payload); + if (existingBlocks.length + generatedBlocks.length > SLACK_MAX_BLOCKS) { + return payload; + } + const nextBlocks = [...existingBlocks, ...generatedBlocks]; + + return { + ...payload, + text: cleanedText.trim() || undefined, + channelData: { + ...payload.channelData, + slack: { + ...(payload.channelData?.slack as Record | undefined), + blocks: nextBlocks, + }, + }, + }; +} diff --git a/src/auto-reply/reply/stage-sandbox-media.ts b/src/auto-reply/reply/stage-sandbox-media.ts index d364fa6a554..3d3dec1738f 100644 --- a/src/auto-reply/reply/stage-sandbox-media.ts +++ b/src/auto-reply/reply/stage-sandbox-media.ts @@ -7,7 +7,7 @@ import { ensureSandboxWorkspaceForSession } from "../../agents/sandbox.js"; import type { OpenClawConfig } from "../../config/config.js"; import { logVerbose } from "../../globals.js"; import { copyFileWithinRoot, SafeOpenError } from "../../infra/fs-safe.js"; -import { normalizeScpRemoteHost } from "../../infra/scp-host.js"; +import { normalizeScpRemoteHost, normalizeScpRemotePath } from "../../infra/scp-host.js"; import { resolvePreferredOpenClawTmpDir } from "../../infra/tmp-openclaw-dir.js"; import { isInboundPathAllowed, @@ -293,6 +293,10 @@ async function scpFile(remoteHost: string, remotePath: string, localPath: string if (!safeRemoteHost) { throw new Error("invalid remote host for SCP"); } + const safeRemotePath = normalizeScpRemotePath(remotePath); + if (!safeRemotePath) { + throw new Error("invalid remote path for SCP"); + } return new Promise((resolve, reject) => { const child = spawn( "/usr/bin/scp", @@ -302,7 +306,7 @@ async function scpFile(remoteHost: string, remotePath: string, localPath: string "-o", "StrictHostKeyChecking=yes", "--", - `${safeRemoteHost}:${remotePath}`, + `${safeRemoteHost}:${safeRemotePath}`, localPath, ], { stdio: ["ignore", "ignore", "pipe"] }, diff --git a/src/auto-reply/reply/test-helpers.ts b/src/auto-reply/reply/test-helpers.ts index 4c30ae6756a..fe1913e723d 100644 --- a/src/auto-reply/reply/test-helpers.ts +++ b/src/auto-reply/reply/test-helpers.ts @@ -1,4 +1,5 @@ import { vi } from "vitest"; +import type { FollowupRun } from "./queue.js"; import type { TypingController } from "./typing.js"; export function createMockTypingController( @@ -16,3 +17,49 @@ export function createMockTypingController( ...overrides, }; } + +export function createMockFollowupRun( + overrides: Partial> & { run?: Partial } = {}, +): FollowupRun { + const base: FollowupRun = { + prompt: "hello", + summaryLine: "hello", + enqueuedAt: Date.now(), + originatingTo: "channel:C1", + run: { + agentId: "agent", + agentDir: "/tmp/agent", + sessionId: "session", + sessionKey: "main", + messageProvider: "whatsapp", + agentAccountId: "primary", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + config: {}, + skillsSnapshot: { + prompt: "", + skills: [], + }, + provider: "anthropic", + model: "claude", + thinkLevel: "low", + verboseLevel: "off", + elevatedLevel: "off", + bashElevated: { + enabled: false, + allowed: false, + defaultLevel: "off", + }, + timeoutMs: 1_000, + blockReplyBreak: "message_end", + }, + }; + return { + ...base, + ...overrides, + run: { + ...base.run, + ...overrides.run, + }, + }; +} diff --git a/src/browser/chrome-mcp.snapshot.test.ts b/src/browser/chrome-mcp.snapshot.test.ts new file mode 100644 index 00000000000..3fe3288848f --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.test.ts @@ -0,0 +1,68 @@ +import { describe, expect, it } from "vitest"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "./chrome-mcp.snapshot.js"; + +const snapshot = { + id: "root", + role: "document", + name: "Example", + children: [ + { + id: "btn-1", + role: "button", + name: "Continue", + }, + { + id: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + }, + ], +}; + +describe("chrome MCP snapshot conversion", () => { + it("flattens structured snapshots into aria-style nodes", () => { + const nodes = flattenChromeMcpSnapshotToAriaNodes(snapshot, 10); + expect(nodes).toEqual([ + { + ref: "root", + role: "document", + name: "Example", + value: undefined, + description: undefined, + depth: 0, + }, + { + ref: "btn-1", + role: "button", + name: "Continue", + value: undefined, + description: undefined, + depth: 1, + }, + { + ref: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + description: undefined, + depth: 1, + }, + ]); + }); + + it("builds AI snapshots that preserve Chrome MCP uids as refs", () => { + const result = buildAiSnapshotFromChromeMcpSnapshot({ root: snapshot }); + + expect(result.snapshot).toContain('- button "Continue" [ref=btn-1]'); + expect(result.snapshot).toContain('- textbox "Email" [ref=txt-1] value="peter@example.com"'); + expect(result.refs).toEqual({ + "btn-1": { role: "button", name: "Continue" }, + "txt-1": { role: "textbox", name: "Email" }, + }); + expect(result.stats.refs).toBe(2); + }); +}); diff --git a/src/browser/chrome-mcp.snapshot.ts b/src/browser/chrome-mcp.snapshot.ts new file mode 100644 index 00000000000..f0a1413736a --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.ts @@ -0,0 +1,193 @@ +import type { SnapshotAriaNode } from "./client.js"; +import { + getRoleSnapshotStats, + type RoleRefMap, + type RoleSnapshotOptions, +} from "./pw-role-snapshot.js"; +import { CONTENT_ROLES, INTERACTIVE_ROLES, STRUCTURAL_ROLES } from "./snapshot-roles.js"; + +export type ChromeMcpSnapshotNode = { + id?: string; + role?: string; + name?: string; + value?: string | number | boolean; + description?: string; + children?: ChromeMcpSnapshotNode[]; +}; + +function normalizeRole(node: ChromeMcpSnapshotNode): string { + const role = typeof node.role === "string" ? node.role.trim().toLowerCase() : ""; + return role || "generic"; +} + +function normalizeString(value: unknown): string | undefined { + if (typeof value === "string") { + const trimmed = value.trim(); + return trimmed || undefined; + } + if (typeof value === "number" || typeof value === "boolean") { + return String(value); + } + return undefined; +} + +function escapeQuoted(value: string): string { + return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"'); +} + +function shouldIncludeNode(params: { + role: string; + name?: string; + options?: RoleSnapshotOptions; +}): boolean { + if (params.options?.interactive && !INTERACTIVE_ROLES.has(params.role)) { + return false; + } + if (params.options?.compact && STRUCTURAL_ROLES.has(params.role) && !params.name) { + return false; + } + return true; +} + +function shouldCreateRef(role: string, name?: string): boolean { + return INTERACTIVE_ROLES.has(role) || (CONTENT_ROLES.has(role) && Boolean(name)); +} + +type DuplicateTracker = { + counts: Map; + keysByRef: Map; + duplicates: Set; +}; + +function createDuplicateTracker(): DuplicateTracker { + return { + counts: new Map(), + keysByRef: new Map(), + duplicates: new Set(), + }; +} + +function registerRef( + tracker: DuplicateTracker, + ref: string, + role: string, + name?: string, +): number | undefined { + const key = `${role}:${name ?? ""}`; + const count = tracker.counts.get(key) ?? 0; + tracker.counts.set(key, count + 1); + tracker.keysByRef.set(ref, key); + if (count > 0) { + tracker.duplicates.add(key); + return count; + } + return undefined; +} + +export function flattenChromeMcpSnapshotToAriaNodes( + root: ChromeMcpSnapshotNode, + limit = 500, +): SnapshotAriaNode[] { + const boundedLimit = Math.max(1, Math.min(2000, Math.floor(limit))); + const out: SnapshotAriaNode[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + if (out.length >= boundedLimit) { + return; + } + const ref = normalizeString(node.id); + if (ref) { + out.push({ + ref, + role: normalizeRole(node), + name: normalizeString(node.name) ?? "", + value: normalizeString(node.value), + description: normalizeString(node.description), + depth, + }); + } + for (const child of node.children ?? []) { + visit(child, depth + 1); + if (out.length >= boundedLimit) { + return; + } + } + }; + + visit(root, 0); + return out; +} + +export function buildAiSnapshotFromChromeMcpSnapshot(params: { + root: ChromeMcpSnapshotNode; + options?: RoleSnapshotOptions; + maxChars?: number; +}): { + snapshot: string; + truncated?: boolean; + refs: RoleRefMap; + stats: { lines: number; chars: number; refs: number; interactive: number }; +} { + const refs: RoleRefMap = {}; + const tracker = createDuplicateTracker(); + const lines: string[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + const role = normalizeRole(node); + const name = normalizeString(node.name); + const value = normalizeString(node.value); + const description = normalizeString(node.description); + const maxDepth = params.options?.maxDepth; + if (maxDepth !== undefined && depth > maxDepth) { + return; + } + + const includeNode = shouldIncludeNode({ role, name, options: params.options }); + if (includeNode) { + let line = `${" ".repeat(depth)}- ${role}`; + if (name) { + line += ` "${escapeQuoted(name)}"`; + } + const ref = normalizeString(node.id); + if (ref && shouldCreateRef(role, name)) { + const nth = registerRef(tracker, ref, role, name); + refs[ref] = nth === undefined ? { role, name } : { role, name, nth }; + line += ` [ref=${ref}]`; + } + if (value) { + line += ` value="${escapeQuoted(value)}"`; + } + if (description) { + line += ` description="${escapeQuoted(description)}"`; + } + lines.push(line); + } + + for (const child of node.children ?? []) { + visit(child, depth + 1); + } + }; + + visit(params.root, 0); + + for (const [ref, data] of Object.entries(refs)) { + const key = tracker.keysByRef.get(ref); + if (key && !tracker.duplicates.has(key)) { + delete data.nth; + } + } + + let snapshot = lines.join("\n"); + let truncated = false; + const maxChars = + typeof params.maxChars === "number" && Number.isFinite(params.maxChars) && params.maxChars > 0 + ? Math.floor(params.maxChars) + : undefined; + if (maxChars && snapshot.length > maxChars) { + snapshot = `${snapshot.slice(0, maxChars)}\n\n[...TRUNCATED - page too large]`; + truncated = true; + } + + const stats = getRoleSnapshotStats(snapshot, refs); + return truncated ? { snapshot, truncated, refs, stats } : { snapshot, refs, stats }; +} diff --git a/src/browser/chrome-mcp.test.ts b/src/browser/chrome-mcp.test.ts new file mode 100644 index 00000000000..a77149d7a72 --- /dev/null +++ b/src/browser/chrome-mcp.test.ts @@ -0,0 +1,270 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + evaluateChromeMcpScript, + listChromeMcpTabs, + openChromeMcpTab, + resetChromeMcpSessionsForTest, + setChromeMcpSessionFactoryForTest, +} from "./chrome-mcp.js"; + +type ToolCall = { + name: string; + arguments?: Record; +}; + +type ChromeMcpSessionFactory = Exclude< + Parameters[0], + null +>; +type ChromeMcpSession = Awaited>; + +function createFakeSession(): ChromeMcpSession { + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "list_pages") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session [selected]", + "2: https://github.com/openclaw/openclaw/pull/45318", + ].join("\n"), + }, + ], + }; + } + if (name === "new_page") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + "2: https://github.com/openclaw/openclaw/pull/45318", + "3: https://example.com/ [selected]", + ].join("\n"), + }, + ], + }; + } + if (name === "evaluate_script") { + return { + content: [ + { + type: "text", + text: "```json\n123\n```", + }, + ], + }; + } + throw new Error(`unexpected tool ${name}`); + }); + + return { + client: { + callTool, + listTools: vi.fn().mockResolvedValue({ tools: [{ name: "list_pages" }] }), + close: vi.fn().mockResolvedValue(undefined), + connect: vi.fn().mockResolvedValue(undefined), + }, + transport: { + pid: 123, + }, + ready: Promise.resolve(), + } as unknown as ChromeMcpSession; +} + +describe("chrome MCP page parsing", () => { + beforeEach(async () => { + await resetChromeMcpSessionsForTest(); + }); + + it("parses list_pages text responses when structuredContent is missing", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const tabs = await listChromeMcpTabs("chrome-live"); + + expect(tabs).toEqual([ + { + targetId: "1", + title: "", + url: "https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + type: "page", + }, + { + targetId: "2", + title: "", + url: "https://github.com/openclaw/openclaw/pull/45318", + type: "page", + }, + ]); + }); + + it("parses new_page text responses and returns the created tab", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const tab = await openChromeMcpTab("chrome-live", "https://example.com/"); + + expect(tab).toEqual({ + targetId: "3", + title: "", + url: "https://example.com/", + type: "page", + }); + }); + + it("parses evaluate_script text responses when structuredContent is missing", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const result = await evaluateChromeMcpScript({ + profileName: "chrome-live", + targetId: "1", + fn: "() => 123", + }); + + expect(result).toBe(123); + }); + + it("surfaces MCP tool errors instead of JSON parse noise", async () => { + const factory: ChromeMcpSessionFactory = async () => { + const session = createFakeSession(); + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "evaluate_script") { + return { + content: [ + { + type: "text", + text: "Cannot read properties of null (reading 'value')", + }, + ], + isError: true, + }; + } + throw new Error(`unexpected tool ${name}`); + }); + session.client.callTool = callTool as typeof session.client.callTool; + return session; + }; + setChromeMcpSessionFactoryForTest(factory); + + await expect( + evaluateChromeMcpScript({ + profileName: "chrome-live", + targetId: "1", + fn: "() => document.getElementById('missing').value", + }), + ).rejects.toThrow(/Cannot read properties of null/); + }); + + it("reuses a single pending session for concurrent requests", async () => { + let factoryCalls = 0; + let releaseFactory!: () => void; + const factoryGate = new Promise((resolve) => { + releaseFactory = resolve; + }); + + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + await factoryGate; + return createFakeSession(); + }; + setChromeMcpSessionFactoryForTest(factory); + + const tabsPromise = listChromeMcpTabs("chrome-live"); + const evalPromise = evaluateChromeMcpScript({ + profileName: "chrome-live", + targetId: "1", + fn: "() => 123", + }); + + releaseFactory(); + const [tabs, result] = await Promise.all([tabsPromise, evalPromise]); + + expect(factoryCalls).toBe(1); + expect(tabs).toHaveLength(2); + expect(result).toBe(123); + }); + + it("preserves session after tool-level errors (isError)", async () => { + let factoryCalls = 0; + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + const session = createFakeSession(); + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "evaluate_script") { + return { + content: [{ type: "text", text: "element not found" }], + isError: true, + }; + } + if (name === "list_pages") { + return { + content: [{ type: "text", text: "## Pages\n1: https://example.com [selected]" }], + }; + } + throw new Error(`unexpected tool ${name}`); + }); + session.client.callTool = callTool as typeof session.client.callTool; + return session; + }; + setChromeMcpSessionFactoryForTest(factory); + + // First call: tool error (isError: true) — should NOT destroy session + await expect( + evaluateChromeMcpScript({ profileName: "chrome-live", targetId: "1", fn: "() => null" }), + ).rejects.toThrow(/element not found/); + + // Second call: should reuse the same session (factory called only once) + const tabs = await listChromeMcpTabs("chrome-live"); + expect(factoryCalls).toBe(1); + expect(tabs).toHaveLength(1); + }); + + it("destroys session on transport errors so next call reconnects", async () => { + let factoryCalls = 0; + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + const session = createFakeSession(); + if (factoryCalls === 1) { + // First session: transport error (callTool throws) + const callTool = vi.fn(async () => { + throw new Error("connection reset"); + }); + session.client.callTool = callTool as typeof session.client.callTool; + } + return session; + }; + setChromeMcpSessionFactoryForTest(factory); + + // First call: transport error — should destroy session + await expect(listChromeMcpTabs("chrome-live")).rejects.toThrow(/connection reset/); + + // Second call: should create a new session (factory called twice) + const tabs = await listChromeMcpTabs("chrome-live"); + expect(factoryCalls).toBe(2); + expect(tabs).toHaveLength(2); + }); + + it("clears failed pending sessions so the next call can retry", async () => { + let factoryCalls = 0; + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + if (factoryCalls === 1) { + throw new Error("attach failed"); + } + return createFakeSession(); + }; + setChromeMcpSessionFactoryForTest(factory); + + await expect(listChromeMcpTabs("chrome-live")).rejects.toThrow(/attach failed/); + + const tabs = await listChromeMcpTabs("chrome-live"); + expect(factoryCalls).toBe(2); + expect(tabs).toHaveLength(2); + }); +}); diff --git a/src/browser/chrome-mcp.ts b/src/browser/chrome-mcp.ts new file mode 100644 index 00000000000..25ae39b2293 --- /dev/null +++ b/src/browser/chrome-mcp.ts @@ -0,0 +1,542 @@ +import { randomUUID } from "node:crypto"; +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; +import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"; +import type { ChromeMcpSnapshotNode } from "./chrome-mcp.snapshot.js"; +import type { BrowserTab } from "./client.js"; +import { BrowserProfileUnavailableError, BrowserTabNotFoundError } from "./errors.js"; + +type ChromeMcpStructuredPage = { + id: number; + url?: string; + selected?: boolean; +}; + +type ChromeMcpToolResult = { + structuredContent?: Record; + content?: Array>; + isError?: boolean; +}; + +type ChromeMcpSession = { + client: Client; + transport: StdioClientTransport; + ready: Promise; +}; + +type ChromeMcpSessionFactory = (profileName: string) => Promise; + +const DEFAULT_CHROME_MCP_COMMAND = "npx"; +const DEFAULT_CHROME_MCP_ARGS = [ + "-y", + "chrome-devtools-mcp@latest", + "--autoConnect", + // Direct chrome-devtools-mcp launches do not enable structuredContent by default. + "--experimentalStructuredContent", + "--experimental-page-id-routing", +]; + +const sessions = new Map(); +const pendingSessions = new Map>(); +let sessionFactory: ChromeMcpSessionFactory | null = null; + +function asRecord(value: unknown): Record | null { + return value && typeof value === "object" && !Array.isArray(value) + ? (value as Record) + : null; +} + +function asPages(value: unknown): ChromeMcpStructuredPage[] { + if (!Array.isArray(value)) { + return []; + } + const out: ChromeMcpStructuredPage[] = []; + for (const entry of value) { + const record = asRecord(entry); + if (!record || typeof record.id !== "number") { + continue; + } + out.push({ + id: record.id, + url: typeof record.url === "string" ? record.url : undefined, + selected: record.selected === true, + }); + } + return out; +} + +function parsePageId(targetId: string): number { + const parsed = Number.parseInt(targetId.trim(), 10); + if (!Number.isFinite(parsed)) { + throw new BrowserTabNotFoundError(); + } + return parsed; +} + +function toBrowserTabs(pages: ChromeMcpStructuredPage[]): BrowserTab[] { + return pages.map((page) => ({ + targetId: String(page.id), + title: "", + url: page.url ?? "", + type: "page", + })); +} + +function extractStructuredContent(result: ChromeMcpToolResult): Record { + return asRecord(result.structuredContent) ?? {}; +} + +function extractTextContent(result: ChromeMcpToolResult): string[] { + const content = Array.isArray(result.content) ? result.content : []; + return content + .map((entry) => { + const record = asRecord(entry); + return record && typeof record.text === "string" ? record.text : ""; + }) + .filter(Boolean); +} + +function extractTextPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const pages: ChromeMcpStructuredPage[] = []; + for (const block of extractTextContent(result)) { + for (const line of block.split(/\r?\n/)) { + const match = line.match(/^\s*(\d+):\s+(.+?)(?:\s+\[(selected)\])?\s*$/i); + if (!match) { + continue; + } + pages.push({ + id: Number.parseInt(match[1] ?? "", 10), + url: match[2]?.trim() || undefined, + selected: Boolean(match[3]), + }); + } + } + return pages; +} + +function extractStructuredPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const structured = asPages(extractStructuredContent(result).pages); + return structured.length > 0 ? structured : extractTextPages(result); +} + +function extractSnapshot(result: ChromeMcpToolResult): ChromeMcpSnapshotNode { + const structured = extractStructuredContent(result); + const snapshot = asRecord(structured.snapshot); + if (!snapshot) { + throw new Error("Chrome MCP snapshot response was missing structured snapshot data."); + } + return snapshot as unknown as ChromeMcpSnapshotNode; +} + +function extractJsonBlock(text: string): unknown { + const match = text.match(/```json\s*([\s\S]*?)\s*```/i); + const raw = match?.[1]?.trim() || text.trim(); + return raw ? JSON.parse(raw) : null; +} + +function extractMessageText(result: ChromeMcpToolResult): string { + const message = extractStructuredContent(result).message; + if (typeof message === "string" && message.trim()) { + return message; + } + const blocks = extractTextContent(result); + return blocks.find((block) => block.trim()) ?? ""; +} + +function extractToolErrorMessage(result: ChromeMcpToolResult, name: string): string { + const message = extractMessageText(result).trim(); + return message || `Chrome MCP tool "${name}" failed.`; +} + +function extractJsonMessage(result: ChromeMcpToolResult): unknown { + const candidates = [extractMessageText(result), ...extractTextContent(result)].filter((text) => + text.trim(), + ); + let lastError: unknown; + for (const candidate of candidates) { + try { + return extractJsonBlock(candidate); + } catch (err) { + lastError = err; + } + } + if (lastError) { + throw lastError; + } + return null; +} + +async function createRealSession(profileName: string): Promise { + const transport = new StdioClientTransport({ + command: DEFAULT_CHROME_MCP_COMMAND, + args: DEFAULT_CHROME_MCP_ARGS, + stderr: "pipe", + }); + const client = new Client( + { + name: "openclaw-browser", + version: "0.0.0", + }, + {}, + ); + + const ready = (async () => { + try { + await client.connect(transport); + const tools = await client.listTools(); + if (!tools.tools.some((tool) => tool.name === "list_pages")) { + throw new Error("Chrome MCP server did not expose the expected navigation tools."); + } + } catch (err) { + await client.close().catch(() => {}); + throw new BrowserProfileUnavailableError( + `Chrome MCP existing-session attach failed for profile "${profileName}". ` + + `Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` + + `Details: ${String(err)}`, + ); + } + })(); + + return { + client, + transport, + ready, + }; +} + +async function getSession(profileName: string): Promise { + let session = sessions.get(profileName); + if (session && session.transport.pid === null) { + sessions.delete(profileName); + session = undefined; + } + if (!session) { + let pending = pendingSessions.get(profileName); + if (!pending) { + pending = (async () => { + const created = await (sessionFactory ?? createRealSession)(profileName); + sessions.set(profileName, created); + return created; + })(); + pendingSessions.set(profileName, pending); + } + try { + session = await pending; + } finally { + if (pendingSessions.get(profileName) === pending) { + pendingSessions.delete(profileName); + } + } + } + try { + await session.ready; + return session; + } catch (err) { + const current = sessions.get(profileName); + if (current?.transport === session.transport) { + sessions.delete(profileName); + } + throw err; + } +} + +async function callTool( + profileName: string, + name: string, + args: Record = {}, +): Promise { + const session = await getSession(profileName); + let result: ChromeMcpToolResult; + try { + result = (await session.client.callTool({ + name, + arguments: args, + })) as ChromeMcpToolResult; + } catch (err) { + // Transport/connection error — tear down session so it reconnects on next call + sessions.delete(profileName); + await session.client.close().catch(() => {}); + throw err; + } + // Tool-level errors (element not found, script error, etc.) don't indicate a + // broken connection — don't tear down the session for these. + if (result.isError) { + throw new Error(extractToolErrorMessage(result, name)); + } + return result; +} + +async function withTempFile(fn: (filePath: string) => Promise): Promise { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-chrome-mcp-")); + const filePath = path.join(dir, randomUUID()); + try { + return await fn(filePath); + } finally { + await fs.rm(dir, { recursive: true, force: true }).catch(() => {}); + } +} + +async function findPageById(profileName: string, pageId: number): Promise { + const pages = await listChromeMcpPages(profileName); + const page = pages.find((entry) => entry.id === pageId); + if (!page) { + throw new BrowserTabNotFoundError(); + } + return page; +} + +export async function ensureChromeMcpAvailable(profileName: string): Promise { + await getSession(profileName); +} + +export function getChromeMcpPid(profileName: string): number | null { + return sessions.get(profileName)?.transport.pid ?? null; +} + +export async function closeChromeMcpSession(profileName: string): Promise { + pendingSessions.delete(profileName); + const session = sessions.get(profileName); + if (!session) { + return false; + } + sessions.delete(profileName); + await session.client.close().catch(() => {}); + return true; +} + +export async function stopAllChromeMcpSessions(): Promise { + const names = [...sessions.keys()]; + for (const name of names) { + await closeChromeMcpSession(name).catch(() => {}); + } +} + +export async function listChromeMcpPages(profileName: string): Promise { + const result = await callTool(profileName, "list_pages"); + return extractStructuredPages(result); +} + +export async function listChromeMcpTabs(profileName: string): Promise { + return toBrowserTabs(await listChromeMcpPages(profileName)); +} + +export async function openChromeMcpTab(profileName: string, url: string): Promise { + const result = await callTool(profileName, "new_page", { url }); + const pages = extractStructuredPages(result); + const chosen = pages.find((page) => page.selected) ?? pages.at(-1); + if (!chosen) { + throw new Error("Chrome MCP did not return the created page."); + } + return { + targetId: String(chosen.id), + title: "", + url: chosen.url ?? url, + type: "page", + }; +} + +export async function focusChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "select_page", { + pageId: parsePageId(targetId), + bringToFront: true, + }); +} + +export async function closeChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "close_page", { pageId: parsePageId(targetId) }); +} + +export async function navigateChromeMcpPage(params: { + profileName: string; + targetId: string; + url: string; + timeoutMs?: number; +}): Promise<{ url: string }> { + await callTool(params.profileName, "navigate_page", { + pageId: parsePageId(params.targetId), + type: "url", + url: params.url, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); + const page = await findPageById(params.profileName, parsePageId(params.targetId)); + return { url: page.url ?? params.url }; +} + +export async function takeChromeMcpSnapshot(params: { + profileName: string; + targetId: string; +}): Promise { + const result = await callTool(params.profileName, "take_snapshot", { + pageId: parsePageId(params.targetId), + }); + return extractSnapshot(result); +} + +export async function takeChromeMcpScreenshot(params: { + profileName: string; + targetId: string; + uid?: string; + fullPage?: boolean; + format?: "png" | "jpeg"; +}): Promise { + return await withTempFile(async (filePath) => { + await callTool(params.profileName, "take_screenshot", { + pageId: parsePageId(params.targetId), + filePath, + format: params.format ?? "png", + ...(params.uid ? { uid: params.uid } : {}), + ...(params.fullPage ? { fullPage: true } : {}), + }); + return await fs.readFile(filePath); + }); +} + +export async function clickChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + doubleClick?: boolean; +}): Promise { + await callTool(params.profileName, "click", { + pageId: parsePageId(params.targetId), + uid: params.uid, + ...(params.doubleClick ? { dblClick: true } : {}), + }); +} + +export async function fillChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + value: string; +}): Promise { + await callTool(params.profileName, "fill", { + pageId: parsePageId(params.targetId), + uid: params.uid, + value: params.value, + }); +} + +export async function fillChromeMcpForm(params: { + profileName: string; + targetId: string; + elements: Array<{ uid: string; value: string }>; +}): Promise { + await callTool(params.profileName, "fill_form", { + pageId: parsePageId(params.targetId), + elements: params.elements, + }); +} + +export async function hoverChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; +}): Promise { + await callTool(params.profileName, "hover", { + pageId: parsePageId(params.targetId), + uid: params.uid, + }); +} + +export async function dragChromeMcpElement(params: { + profileName: string; + targetId: string; + fromUid: string; + toUid: string; +}): Promise { + await callTool(params.profileName, "drag", { + pageId: parsePageId(params.targetId), + from_uid: params.fromUid, + to_uid: params.toUid, + }); +} + +export async function uploadChromeMcpFile(params: { + profileName: string; + targetId: string; + uid: string; + filePath: string; +}): Promise { + await callTool(params.profileName, "upload_file", { + pageId: parsePageId(params.targetId), + uid: params.uid, + filePath: params.filePath, + }); +} + +export async function pressChromeMcpKey(params: { + profileName: string; + targetId: string; + key: string; +}): Promise { + await callTool(params.profileName, "press_key", { + pageId: parsePageId(params.targetId), + key: params.key, + }); +} + +export async function resizeChromeMcpPage(params: { + profileName: string; + targetId: string; + width: number; + height: number; +}): Promise { + await callTool(params.profileName, "resize_page", { + pageId: parsePageId(params.targetId), + width: params.width, + height: params.height, + }); +} + +export async function handleChromeMcpDialog(params: { + profileName: string; + targetId: string; + action: "accept" | "dismiss"; + promptText?: string; +}): Promise { + await callTool(params.profileName, "handle_dialog", { + pageId: parsePageId(params.targetId), + action: params.action, + ...(params.promptText ? { promptText: params.promptText } : {}), + }); +} + +export async function evaluateChromeMcpScript(params: { + profileName: string; + targetId: string; + fn: string; + args?: string[]; +}): Promise { + const result = await callTool(params.profileName, "evaluate_script", { + pageId: parsePageId(params.targetId), + function: params.fn, + ...(params.args?.length ? { args: params.args } : {}), + }); + return extractJsonMessage(result); +} + +export async function waitForChromeMcpText(params: { + profileName: string; + targetId: string; + text: string[]; + timeoutMs?: number; +}): Promise { + await callTool(params.profileName, "wait_for", { + pageId: parsePageId(params.targetId), + text: params.text, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); +} + +export function setChromeMcpSessionFactoryForTest(factory: ChromeMcpSessionFactory | null): void { + sessionFactory = factory; +} + +export async function resetChromeMcpSessionsForTest(): Promise { + sessionFactory = null; + pendingSessions.clear(); + await stopAllChromeMcpSessions(); +} diff --git a/src/browser/client-actions-core.ts b/src/browser/client-actions-core.ts index 72e27cd9afa..149ca54fadf 100644 --- a/src/browser/client-actions-core.ts +++ b/src/browser/client-actions-core.ts @@ -15,16 +15,19 @@ export type BrowserFormField = { export type BrowserActRequest = | { kind: "click"; - ref: string; + ref?: string; + selector?: string; targetId?: string; doubleClick?: boolean; button?: string; modifiers?: string[]; + delayMs?: number; timeoutMs?: number; } | { kind: "type"; - ref: string; + ref?: string; + selector?: string; text: string; targetId?: string; submit?: boolean; @@ -32,23 +35,33 @@ export type BrowserActRequest = timeoutMs?: number; } | { kind: "press"; key: string; targetId?: string; delayMs?: number } - | { kind: "hover"; ref: string; targetId?: string; timeoutMs?: number } + | { + kind: "hover"; + ref?: string; + selector?: string; + targetId?: string; + timeoutMs?: number; + } | { kind: "scrollIntoView"; - ref: string; + ref?: string; + selector?: string; targetId?: string; timeoutMs?: number; } | { kind: "drag"; - startRef: string; - endRef: string; + startRef?: string; + startSelector?: string; + endRef?: string; + endSelector?: string; targetId?: string; timeoutMs?: number; } | { kind: "select"; - ref: string; + ref?: string; + selector?: string; values: string[]; targetId?: string; timeoutMs?: number; @@ -73,13 +86,20 @@ export type BrowserActRequest = timeoutMs?: number; } | { kind: "evaluate"; fn: string; ref?: string; targetId?: string; timeoutMs?: number } - | { kind: "close"; targetId?: string }; + | { kind: "close"; targetId?: string } + | { + kind: "batch"; + actions: BrowserActRequest[]; + targetId?: string; + stopOnError?: boolean; + }; export type BrowserActResponse = { ok: true; targetId: string; url?: string; result?: unknown; + results?: Array<{ ok: boolean; error?: string }>; }; export type BrowserDownloadPayload = { diff --git a/src/browser/client-fetch.loopback-auth.test.ts b/src/browser/client-fetch.loopback-auth.test.ts index 7967d11c76e..bf982322027 100644 --- a/src/browser/client-fetch.loopback-auth.test.ts +++ b/src/browser/client-fetch.loopback-auth.test.ts @@ -50,6 +50,27 @@ function stubJsonFetchOk() { return fetchMock; } +async function expectThrownBrowserFetchError( + request: () => Promise, + params: { + contains: string[]; + omits?: string[]; + }, +) { + const thrown = await request().catch((err: unknown) => err); + expect(thrown).toBeInstanceOf(Error); + if (!(thrown instanceof Error)) { + throw new Error(`Expected Error, got ${String(thrown)}`); + } + for (const snippet of params.contains) { + expect(thrown.message).toContain(snippet); + } + for (const snippet of params.omits ?? []) { + expect(thrown.message).not.toContain(snippet); + } + return thrown; +} + describe("fetchBrowserJson loopback auth", () => { beforeEach(() => { vi.restoreAllMocks(); @@ -127,15 +148,10 @@ describe("fetchBrowserJson loopback auth", () => { it("preserves dispatcher error context while keeping no-retry hint", async () => { mocks.dispatch.mockRejectedValueOnce(new Error("Chrome CDP handshake timeout")); - const thrown = await fetchBrowserJson<{ ok: boolean }>("/tabs").catch((err: unknown) => err); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("Chrome CDP handshake timeout"); - expect(thrown.message).toContain("Do NOT retry the browser tool"); - expect(thrown.message).not.toContain("Can't reach the OpenClaw browser control service"); + await expectThrownBrowserFetchError(() => fetchBrowserJson<{ ok: boolean }>("/tabs"), { + contains: ["Chrome CDP handshake timeout", "Do NOT retry the browser tool"], + omits: ["Can't reach the OpenClaw browser control service"], + }); }); it("surfaces 429 from HTTP URL as rate-limit error with no-retry hint", async () => { @@ -147,17 +163,13 @@ describe("fetchBrowserJson loopback auth", () => { vi.fn(async () => response), ); - const thrown = await fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/").catch( - (err: unknown) => err, + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/"), + { + contains: ["Browser service rate limit reached", "Do NOT retry the browser tool"], + omits: ["max concurrent sessions exceeded"], + }, ); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("Browser service rate limit reached"); - expect(thrown.message).toContain("Do NOT retry the browser tool"); - expect(thrown.message).not.toContain("max concurrent sessions exceeded"); expect(text).not.toHaveBeenCalled(); expect(cancel).toHaveBeenCalledOnce(); }); @@ -168,16 +180,12 @@ describe("fetchBrowserJson loopback auth", () => { vi.fn(async () => new Response("", { status: 429 })), ); - const thrown = await fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/").catch( - (err: unknown) => err, + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/"), + { + contains: ["rate limit reached", "Do NOT retry the browser tool"], + }, ); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("rate limit reached"); - expect(thrown.message).toContain("Do NOT retry the browser tool"); }); it("keeps Browserbase-specific wording for Browserbase 429 responses", async () => { @@ -186,17 +194,13 @@ describe("fetchBrowserJson loopback auth", () => { vi.fn(async () => new Response("max concurrent sessions exceeded", { status: 429 })), ); - const thrown = await fetchBrowserJson<{ ok: boolean }>( - "https://connect.browserbase.com/session", - ).catch((err: unknown) => err); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("Browserbase rate limit reached"); - expect(thrown.message).toContain("upgrade your plan"); - expect(thrown.message).not.toContain("max concurrent sessions exceeded"); + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("https://connect.browserbase.com/session"), + { + contains: ["Browserbase rate limit reached", "upgrade your plan"], + omits: ["max concurrent sessions exceeded"], + }, + ); }); it("non-429 errors still produce generic messages", async () => { @@ -205,16 +209,13 @@ describe("fetchBrowserJson loopback auth", () => { vi.fn(async () => new Response("internal error", { status: 500 })), ); - const thrown = await fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/").catch( - (err: unknown) => err, + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/"), + { + contains: ["internal error"], + omits: ["rate limit"], + }, ); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("internal error"); - expect(thrown.message).not.toContain("rate limit"); }); it("surfaces 429 from dispatcher path as rate-limit error", async () => { @@ -223,15 +224,10 @@ describe("fetchBrowserJson loopback auth", () => { body: { error: "too many sessions" }, }); - const thrown = await fetchBrowserJson<{ ok: boolean }>("/tabs").catch((err: unknown) => err); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("Browser service rate limit reached"); - expect(thrown.message).toContain("Do NOT retry the browser tool"); - expect(thrown.message).not.toContain("too many sessions"); + await expectThrownBrowserFetchError(() => fetchBrowserJson<{ ok: boolean }>("/tabs"), { + contains: ["Browser service rate limit reached", "Do NOT retry the browser tool"], + omits: ["too many sessions"], + }); }); it("keeps absolute URL failures wrapped as reachability errors", async () => { @@ -242,15 +238,14 @@ describe("fetchBrowserJson loopback auth", () => { }), ); - const thrown = await fetchBrowserJson<{ ok: boolean }>("http://example.com/").catch( - (err: unknown) => err, + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://example.com/"), + { + contains: [ + "Can't reach the OpenClaw browser control service", + "Do NOT retry the browser tool", + ], + }, ); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("Can't reach the OpenClaw browser control service"); - expect(thrown.message).toContain("Do NOT retry the browser tool"); }); }); diff --git a/src/browser/client.test.ts b/src/browser/client.test.ts index a4f95c23007..64d37580e35 100644 --- a/src/browser/client.test.ts +++ b/src/browser/client.test.ts @@ -160,6 +160,7 @@ describe("browser client", () => { targetId: "t1", url: "https://x", result: 1, + results: [{ ok: true }], }), } as unknown as Response; } @@ -258,7 +259,7 @@ describe("browser client", () => { ).resolves.toMatchObject({ ok: true, targetId: "t1" }); await expect( browserAct("http://127.0.0.1:18791", { kind: "click", ref: "1" }), - ).resolves.toMatchObject({ ok: true, targetId: "t1" }); + ).resolves.toMatchObject({ ok: true, targetId: "t1", results: [{ ok: true }] }); await expect( browserArmFileChooser("http://127.0.0.1:18791", { paths: ["/tmp/a.txt"], diff --git a/src/browser/client.ts b/src/browser/client.ts index 953c9efcd11..8e30762bfb1 100644 --- a/src/browser/client.ts +++ b/src/browser/client.ts @@ -1,14 +1,18 @@ import { fetchBrowserJson } from "./client-fetch.js"; +export type BrowserTransport = "cdp" | "chrome-mcp"; + export type BrowserStatus = { enabled: boolean; profile?: string; + driver?: "openclaw" | "extension" | "existing-session"; + transport?: BrowserTransport; running: boolean; cdpReady?: boolean; cdpHttp?: boolean; pid: number | null; - cdpPort: number; - cdpUrl?: string; + cdpPort: number | null; + cdpUrl?: string | null; chosenBrowser: string | null; detectedBrowser?: string | null; detectedExecutablePath?: string | null; @@ -23,9 +27,11 @@ export type BrowserStatus = { export type ProfileStatus = { name: string; - cdpPort: number; - cdpUrl: string; + transport?: BrowserTransport; + cdpPort: number | null; + cdpUrl: string | null; color: string; + driver: "openclaw" | "extension" | "existing-session"; running: boolean; tabCount: number; isDefault: boolean; @@ -153,8 +159,9 @@ export async function browserResetProfile( export type BrowserCreateProfileResult = { ok: true; profile: string; - cdpPort: number; - cdpUrl: string; + transport?: BrowserTransport; + cdpPort: number | null; + cdpUrl: string | null; color: string; isRemote: boolean; }; @@ -165,7 +172,7 @@ export async function browserCreateProfile( name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }, ): Promise { return await fetchBrowserJson( diff --git a/src/browser/config.test.ts b/src/browser/config.test.ts index d2643a6784b..5c16dd54dc6 100644 --- a/src/browser/config.test.ts +++ b/src/browser/config.test.ts @@ -1,6 +1,7 @@ import { describe, expect, it } from "vitest"; import { withEnv } from "../test-utils/env.js"; import { resolveBrowserConfig, resolveProfile, shouldStartLocalBrowserServer } from "./config.js"; +import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; describe("browser config", () => { it("defaults to enabled with loopback defaults and lobster-orange color", () => { @@ -21,10 +22,14 @@ describe("browser config", () => { expect(openclaw?.driver).toBe("openclaw"); expect(openclaw?.cdpPort).toBe(18800); expect(openclaw?.cdpUrl).toBe("http://127.0.0.1:18800"); - const chrome = resolveProfile(resolved, "chrome"); - expect(chrome?.driver).toBe("extension"); - expect(chrome?.cdpPort).toBe(18792); - expect(chrome?.cdpUrl).toBe("http://127.0.0.1:18792"); + const user = resolveProfile(resolved, "user"); + expect(user?.driver).toBe("existing-session"); + expect(user?.cdpPort).toBe(0); + expect(user?.cdpUrl).toBe(""); + const chromeRelay = resolveProfile(resolved, "chrome-relay"); + expect(chromeRelay?.driver).toBe("extension"); + expect(chromeRelay?.cdpPort).toBe(18792); + expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:18792"); expect(resolved.remoteCdpTimeoutMs).toBe(1500); expect(resolved.remoteCdpHandshakeTimeoutMs).toBe(3000); }); @@ -33,10 +38,10 @@ describe("browser config", () => { withEnv({ OPENCLAW_GATEWAY_PORT: "19001" }, () => { const resolved = resolveBrowserConfig(undefined); expect(resolved.controlPort).toBe(19003); - const chrome = resolveProfile(resolved, "chrome"); - expect(chrome?.driver).toBe("extension"); - expect(chrome?.cdpPort).toBe(19004); - expect(chrome?.cdpUrl).toBe("http://127.0.0.1:19004"); + const chromeRelay = resolveProfile(resolved, "chrome-relay"); + expect(chromeRelay?.driver).toBe("extension"); + expect(chromeRelay?.cdpPort).toBe(19004); + expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:19004"); const openclaw = resolveProfile(resolved, "openclaw"); expect(openclaw?.cdpPort).toBe(19012); @@ -48,10 +53,10 @@ describe("browser config", () => { withEnv({ OPENCLAW_GATEWAY_PORT: undefined }, () => { const resolved = resolveBrowserConfig(undefined, { gateway: { port: 19011 } }); expect(resolved.controlPort).toBe(19013); - const chrome = resolveProfile(resolved, "chrome"); - expect(chrome?.driver).toBe("extension"); - expect(chrome?.cdpPort).toBe(19014); - expect(chrome?.cdpUrl).toBe("http://127.0.0.1:19014"); + const chromeRelay = resolveProfile(resolved, "chrome-relay"); + expect(chromeRelay?.driver).toBe("extension"); + expect(chromeRelay?.cdpPort).toBe(19014); + expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:19014"); const openclaw = resolveProfile(resolved, "openclaw"); expect(openclaw?.cdpPort).toBe(19022); @@ -204,13 +209,13 @@ describe("browser config", () => { ); }); - it("does not add the built-in chrome extension profile if the derived relay port is already used", () => { + it("does not add the built-in chrome-relay profile if the derived relay port is already used", () => { const resolved = resolveBrowserConfig({ profiles: { openclaw: { cdpPort: 18792, color: "#FF4500" }, }, }); - expect(resolveProfile(resolved, "chrome")).toBe(null); + expect(resolveProfile(resolved, "chrome-relay")).toBe(null); expect(resolved.defaultProfile).toBe("openclaw"); }); @@ -278,6 +283,47 @@ describe("browser config", () => { expect(resolved.ssrfPolicy).toEqual({}); }); + it("resolves existing-session profiles without cdpPort or cdpUrl", () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }, + }, + }); + const profile = resolveProfile(resolved, "chrome-live"); + expect(profile).not.toBeNull(); + expect(profile?.driver).toBe("existing-session"); + expect(profile?.attachOnly).toBe(true); + expect(profile?.cdpPort).toBe(0); + expect(profile?.cdpUrl).toBe(""); + expect(profile?.cdpIsLoopback).toBe(true); + expect(profile?.color).toBe("#00AA00"); + }); + + it("sets usesChromeMcp only for existing-session profiles", () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + work: { cdpPort: 18801, color: "#0066CC" }, + }, + }); + + const existingSession = resolveProfile(resolved, "chrome-live")!; + expect(getBrowserProfileCapabilities(existingSession).usesChromeMcp).toBe(true); + + const managed = resolveProfile(resolved, "openclaw")!; + expect(getBrowserProfileCapabilities(managed).usesChromeMcp).toBe(false); + + const extension = resolveProfile(resolved, "chrome-relay")!; + expect(getBrowserProfileCapabilities(extension).usesChromeMcp).toBe(false); + + const work = resolveProfile(resolved, "work")!; + expect(getBrowserProfileCapabilities(work).usesChromeMcp).toBe(false); + }); + describe("default profile preference", () => { it("defaults to openclaw profile when defaultProfile is not configured", () => { const resolved = resolveBrowserConfig({ @@ -312,17 +358,17 @@ describe("browser config", () => { it("explicit defaultProfile config overrides defaults in headless mode", () => { const resolved = resolveBrowserConfig({ headless: true, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", }); - expect(resolved.defaultProfile).toBe("chrome"); + expect(resolved.defaultProfile).toBe("chrome-relay"); }); it("explicit defaultProfile config overrides defaults in noSandbox mode", () => { const resolved = resolveBrowserConfig({ noSandbox: true, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", }); - expect(resolved.defaultProfile).toBe("chrome"); + expect(resolved.defaultProfile).toBe("chrome-relay"); }); it("allows custom profile as default even in headless mode", () => { diff --git a/src/browser/config.ts b/src/browser/config.ts index 6d24a07a287..8bcd51d0a68 100644 --- a/src/browser/config.ts +++ b/src/browser/config.ts @@ -46,7 +46,7 @@ export type ResolvedBrowserProfile = { cdpHost: string; cdpIsLoopback: boolean; color: string; - driver: "openclaw" | "extension"; + driver: "openclaw" | "extension" | "existing-session"; attachOnly: boolean; }; @@ -180,17 +180,35 @@ function ensureDefaultProfile( } /** - * Ensure a built-in "chrome" profile exists for the Chrome extension relay. + * Ensure a built-in "user" profile exists for Chrome's existing-session attach flow. + */ +function ensureDefaultUserBrowserProfile( + profiles: Record, +): Record { + const result = { ...profiles }; + if (result.user) { + return result; + } + result.user = { + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }; + return result; +} + +/** + * Ensure a built-in "chrome-relay" profile exists for the Chrome extension relay. * * Note: this is an OpenClaw browser profile (routing config), not a Chrome user profile. * It points at the local relay CDP endpoint (controlPort + 1). */ -function ensureDefaultChromeExtensionProfile( +function ensureDefaultChromeRelayProfile( profiles: Record, controlPort: number, ): Record { const result = { ...profiles }; - if (result.chrome) { + if (result["chrome-relay"]) { return result; } const relayPort = controlPort + 1; @@ -202,7 +220,7 @@ function ensureDefaultChromeExtensionProfile( if (getUsedPorts(result).has(relayPort)) { return result; } - result.chrome = { + result["chrome-relay"] = { driver: "extension", cdpUrl: `http://127.0.0.1:${relayPort}`, color: "#00AA00", @@ -268,13 +286,15 @@ export function resolveBrowserConfig( const legacyCdpPort = rawCdpUrl ? cdpInfo.port : undefined; const isWsUrl = cdpInfo.parsed.protocol === "ws:" || cdpInfo.parsed.protocol === "wss:"; const legacyCdpUrl = rawCdpUrl && isWsUrl ? cdpInfo.normalized : undefined; - const profiles = ensureDefaultChromeExtensionProfile( - ensureDefaultProfile( - cfg?.profiles, - defaultColor, - legacyCdpPort, - cdpPortRangeStart, - legacyCdpUrl, + const profiles = ensureDefaultChromeRelayProfile( + ensureDefaultUserBrowserProfile( + ensureDefaultProfile( + cfg?.profiles, + defaultColor, + legacyCdpPort, + cdpPortRangeStart, + legacyCdpUrl, + ), ), controlPort, ); @@ -286,7 +306,7 @@ export function resolveBrowserConfig( ? DEFAULT_BROWSER_DEFAULT_PROFILE_NAME : profiles[DEFAULT_OPENCLAW_BROWSER_PROFILE_NAME] ? DEFAULT_OPENCLAW_BROWSER_PROFILE_NAME - : "chrome"); + : "user"); const extraArgs = Array.isArray(cfg?.extraArgs) ? cfg.extraArgs.filter((a): a is string => typeof a === "string" && a.trim().length > 0) @@ -335,7 +355,26 @@ export function resolveProfile( let cdpHost = resolved.cdpHost; let cdpPort = profile.cdpPort ?? 0; let cdpUrl = ""; - const driver = profile.driver === "extension" ? "extension" : "openclaw"; + const driver = + profile.driver === "extension" + ? "extension" + : profile.driver === "existing-session" + ? "existing-session" + : "openclaw"; + + if (driver === "existing-session") { + // existing-session uses Chrome MCP auto-connect; no CDP port/URL needed + return { + name: profileName, + cdpPort: 0, + cdpUrl: "", + cdpHost: "", + cdpIsLoopback: true, + color: profile.color, + driver, + attachOnly: true, + }; + } if (rawProfileUrl) { const parsed = parseHttpUrl(rawProfileUrl, `browser.profiles.${profileName}.cdpUrl`); diff --git a/src/browser/profile-capabilities.ts b/src/browser/profile-capabilities.ts index 07a70ba00c4..b736a77d943 100644 --- a/src/browser/profile-capabilities.ts +++ b/src/browser/profile-capabilities.ts @@ -1,10 +1,16 @@ import type { ResolvedBrowserProfile } from "./config.js"; -export type BrowserProfileMode = "local-managed" | "local-extension-relay" | "remote-cdp"; +export type BrowserProfileMode = + | "local-managed" + | "local-extension-relay" + | "local-existing-session" + | "remote-cdp"; export type BrowserProfileCapabilities = { mode: BrowserProfileMode; isRemote: boolean; + /** Profile uses the Chrome DevTools MCP server (existing-session driver). */ + usesChromeMcp: boolean; requiresRelay: boolean; requiresAttachedTab: boolean; usesPersistentPlaywright: boolean; @@ -21,6 +27,7 @@ export function getBrowserProfileCapabilities( return { mode: "local-extension-relay", isRemote: false, + usesChromeMcp: false, requiresRelay: true, requiresAttachedTab: true, usesPersistentPlaywright: false, @@ -31,10 +38,26 @@ export function getBrowserProfileCapabilities( }; } + if (profile.driver === "existing-session") { + return { + mode: "local-existing-session", + isRemote: false, + usesChromeMcp: true, + requiresRelay: false, + requiresAttachedTab: false, + usesPersistentPlaywright: false, + supportsPerTabWs: false, + supportsJsonTabEndpoints: false, + supportsReset: false, + supportsManagedTabLimit: false, + }; + } + if (!profile.cdpIsLoopback) { return { mode: "remote-cdp", isRemote: true, + usesChromeMcp: false, requiresRelay: false, requiresAttachedTab: false, usesPersistentPlaywright: true, @@ -48,6 +71,7 @@ export function getBrowserProfileCapabilities( return { mode: "local-managed", isRemote: false, + usesChromeMcp: false, requiresRelay: false, requiresAttachedTab: false, usesPersistentPlaywright: false, @@ -75,6 +99,9 @@ export function resolveDefaultSnapshotFormat(params: { if (capabilities.mode === "local-extension-relay") { return "aria"; } + if (capabilities.mode === "local-existing-session") { + return "ai"; + } return params.hasPlaywright ? "ai" : "aria"; } diff --git a/src/browser/profiles-service.test.ts b/src/browser/profiles-service.test.ts index 3dc714d33f3..13bbdf27c49 100644 --- a/src/browser/profiles-service.test.ts +++ b/src/browser/profiles-service.test.ts @@ -1,6 +1,6 @@ import fs from "node:fs"; import path from "node:path"; -import { describe, expect, it, vi } from "vitest"; +import { beforeEach, describe, expect, it, vi } from "vitest"; import { resolveBrowserConfig } from "./config.js"; import { createBrowserProfilesService } from "./profiles-service.js"; import type { BrowserRouteContext, BrowserServerState } from "./server-context.js"; @@ -57,6 +57,10 @@ async function createWorkProfileWithConfig(params: { } describe("BrowserProfilesService", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + it("allocates next local port for new profiles", async () => { const { result, state } = await createWorkProfileWithConfig({ resolved: resolveBrowserConfig({}), @@ -163,6 +167,56 @@ describe("BrowserProfilesService", () => { ).rejects.toThrow(/requires an explicit loopback cdpUrl/i); }); + it("creates existing-session profiles as attach-only local entries", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx, state } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + const result = await service.createProfile({ + name: "chrome-live", + driver: "existing-session", + }); + + expect(result.transport).toBe("chrome-mcp"); + expect(result.cdpPort).toBeNull(); + expect(result.cdpUrl).toBeNull(); + expect(result.isRemote).toBe(false); + expect(state.resolved.profiles["chrome-live"]).toEqual({ + driver: "existing-session", + attachOnly: true, + color: expect.any(String), + }); + expect(writeConfigFile).toHaveBeenCalledWith( + expect.objectContaining({ + browser: expect.objectContaining({ + profiles: expect.objectContaining({ + "chrome-live": expect.objectContaining({ + driver: "existing-session", + attachOnly: true, + }), + }), + }), + }), + ); + }); + + it("rejects driver=existing-session when cdpUrl is provided", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + + await expect( + service.createProfile({ + name: "chrome-live", + driver: "existing-session", + cdpUrl: "http://127.0.0.1:9222", + }), + ).rejects.toThrow(/does not accept cdpUrl/i); + }); + it("deletes remote profiles without stopping or removing local data", async () => { const resolved = resolveBrowserConfig({ profiles: { @@ -218,4 +272,40 @@ describe("BrowserProfilesService", () => { expect(result.deleted).toBe(true); expect(movePathToTrash).toHaveBeenCalledWith(path.dirname(userDataDir)); }); + + it("deletes existing-session profiles without touching local browser data", async () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }); + const { ctx } = createCtx(resolved); + + vi.mocked(loadConfig).mockReturnValue({ + browser: { + defaultProfile: "openclaw", + profiles: { + openclaw: { cdpPort: 18800, color: "#FF4500" }, + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }, + }); + + const service = createBrowserProfilesService(ctx); + const result = await service.deleteProfile("chrome-live"); + + expect(result.deleted).toBe(false); + expect(ctx.forProfile).not.toHaveBeenCalled(); + expect(movePathToTrash).not.toHaveBeenCalled(); + }); }); diff --git a/src/browser/profiles-service.ts b/src/browser/profiles-service.ts index 962c6408522..86321006e98 100644 --- a/src/browser/profiles-service.ts +++ b/src/browser/profiles-service.ts @@ -6,13 +6,13 @@ import { deriveDefaultBrowserCdpPortRange } from "../config/port-defaults.js"; import { isLoopbackHost } from "../gateway/net.js"; import { resolveOpenClawUserDataDir } from "./chrome.js"; import { parseHttpUrl, resolveProfile } from "./config.js"; -import { DEFAULT_BROWSER_DEFAULT_PROFILE_NAME } from "./constants.js"; import { BrowserConflictError, BrowserProfileNotFoundError, BrowserResourceExhaustedError, BrowserValidationError, } from "./errors.js"; +import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; import { allocateCdpPort, allocateColor, @@ -27,14 +27,15 @@ export type CreateProfileParams = { name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }; export type CreateProfileResult = { ok: true; profile: string; - cdpPort: number; - cdpUrl: string; + transport: "cdp" | "chrome-mcp"; + cdpPort: number | null; + cdpUrl: string | null; color: string; isRemote: boolean; }; @@ -79,7 +80,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { const createProfile = async (params: CreateProfileParams): Promise => { const name = params.name.trim(); const rawCdpUrl = params.cdpUrl?.trim() || undefined; - const driver = params.driver === "extension" ? "extension" : undefined; + const driver = + params.driver === "extension" + ? "extension" + : params.driver === "existing-session" + ? "existing-session" + : undefined; if (!isValidProfileName(name)) { throw new BrowserValidationError( @@ -105,7 +111,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { let profileConfig: BrowserProfileConfig; if (rawCdpUrl) { - const parsed = parseHttpUrl(rawCdpUrl, "browser.profiles.cdpUrl"); + let parsed: ReturnType; + try { + parsed = parseHttpUrl(rawCdpUrl, "browser.profiles.cdpUrl"); + } catch (err) { + throw new BrowserValidationError(String(err)); + } if (driver === "extension") { if (!isLoopbackHost(parsed.parsed.hostname)) { throw new BrowserValidationError( @@ -118,6 +129,11 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { ); } } + if (driver === "existing-session") { + throw new BrowserValidationError( + "driver=existing-session does not accept cdpUrl; it attaches via the Chrome MCP auto-connect flow", + ); + } profileConfig = { cdpUrl: parsed.normalized, ...(driver ? { driver } : {}), @@ -127,17 +143,26 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { if (driver === "extension") { throw new BrowserValidationError("driver=extension requires an explicit loopback cdpUrl"); } - const usedPorts = getUsedPorts(resolvedProfiles); - const range = cdpPortRange(state.resolved); - const cdpPort = allocateCdpPort(usedPorts, range); - if (cdpPort === null) { - throw new BrowserResourceExhaustedError("no available CDP ports in range"); + if (driver === "existing-session") { + // existing-session uses Chrome MCP auto-connect; no CDP port needed + profileConfig = { + driver, + attachOnly: true, + color: profileColor, + }; + } else { + const usedPorts = getUsedPorts(resolvedProfiles); + const range = cdpPortRange(state.resolved); + const cdpPort = allocateCdpPort(usedPorts, range); + if (cdpPort === null) { + throw new BrowserResourceExhaustedError("no available CDP ports in range"); + } + profileConfig = { + cdpPort, + ...(driver ? { driver } : {}), + color: profileColor, + }; } - profileConfig = { - cdpPort, - ...(driver ? { driver } : {}), - color: profileColor, - }; } const nextConfig: OpenClawConfig = { @@ -158,12 +183,14 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { if (!resolved) { throw new BrowserProfileNotFoundError(`profile "${name}" not found after creation`); } + const capabilities = getBrowserProfileCapabilities(resolved); return { ok: true, profile: name, - cdpPort: resolved.cdpPort, - cdpUrl: resolved.cdpUrl, + transport: capabilities.usesChromeMcp ? "chrome-mcp" : "cdp", + cdpPort: capabilities.usesChromeMcp ? null : resolved.cdpPort, + cdpUrl: capabilities.usesChromeMcp ? null : resolved.cdpUrl, color: resolved.color, isRemote: !resolved.cdpIsLoopback, }; @@ -178,24 +205,23 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { throw new BrowserValidationError("invalid profile name"); } + const state = ctx.state(); const cfg = loadConfig(); const profiles = cfg.browser?.profiles ?? {}; - if (!(name in profiles)) { - throw new BrowserProfileNotFoundError(`profile "${name}" not found`); - } - - const defaultProfile = cfg.browser?.defaultProfile ?? DEFAULT_BROWSER_DEFAULT_PROFILE_NAME; + const defaultProfile = cfg.browser?.defaultProfile ?? state.resolved.defaultProfile; if (name === defaultProfile) { throw new BrowserValidationError( `cannot delete the default profile "${name}"; change browser.defaultProfile first`, ); } + if (!(name in profiles)) { + throw new BrowserProfileNotFoundError(`profile "${name}" not found`); + } let deleted = false; - const state = ctx.state(); const resolved = resolveProfile(state.resolved, name); - if (resolved?.cdpIsLoopback) { + if (resolved?.cdpIsLoopback && resolved.driver === "openclaw") { try { await ctx.forProfile(name).stopRunningBrowser(); } catch { diff --git a/src/browser/pw-ai.ts b/src/browser/pw-ai.ts index 6da8b410c83..f8d538b5394 100644 --- a/src/browser/pw-ai.ts +++ b/src/browser/pw-ai.ts @@ -19,6 +19,7 @@ export { export { armDialogViaPlaywright, armFileUploadViaPlaywright, + batchViaPlaywright, clickViaPlaywright, closePageViaPlaywright, cookiesClearViaPlaywright, diff --git a/src/browser/pw-role-snapshot.ts b/src/browser/pw-role-snapshot.ts index 7a0b0ae70fe..312abcf872f 100644 --- a/src/browser/pw-role-snapshot.ts +++ b/src/browser/pw-role-snapshot.ts @@ -1,3 +1,5 @@ +import { CONTENT_ROLES, INTERACTIVE_ROLES, STRUCTURAL_ROLES } from "./snapshot-roles.js"; + export type RoleRef = { role: string; name?: string; @@ -23,60 +25,6 @@ export type RoleSnapshotOptions = { compact?: boolean; }; -const INTERACTIVE_ROLES = new Set([ - "button", - "link", - "textbox", - "checkbox", - "radio", - "combobox", - "listbox", - "menuitem", - "menuitemcheckbox", - "menuitemradio", - "option", - "searchbox", - "slider", - "spinbutton", - "switch", - "tab", - "treeitem", -]); - -const CONTENT_ROLES = new Set([ - "heading", - "cell", - "gridcell", - "columnheader", - "rowheader", - "listitem", - "article", - "region", - "main", - "navigation", -]); - -const STRUCTURAL_ROLES = new Set([ - "generic", - "group", - "list", - "table", - "row", - "rowgroup", - "grid", - "treegrid", - "menu", - "menubar", - "toolbar", - "tablist", - "tree", - "directory", - "document", - "application", - "presentation", - "none", -]); - export function getRoleSnapshotStats(snapshot: string, refs: RoleRefMap): RoleSnapshotStats { const interactive = Object.values(refs).filter((r) => INTERACTIVE_ROLES.has(r.role)).length; return { diff --git a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts index 43f1a6c7e09..8f64b2bf575 100644 --- a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts +++ b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts @@ -12,40 +12,49 @@ afterEach(async () => { await closePlaywrightBrowserConnection().catch(() => {}); }); +function createExtensionFallbackBrowserHarness(options?: { + urls?: string[]; + newCDPSessionError?: string; +}) { + const pageOn = vi.fn(); + const contextOn = vi.fn(); + const browserOn = vi.fn(); + const browserClose = vi.fn(async () => {}); + const newCDPSession = vi.fn(async () => { + throw new Error(options?.newCDPSessionError ?? "Not allowed"); + }); + + const context = { + pages: () => [], + on: contextOn, + newCDPSession, + } as unknown as import("playwright-core").BrowserContext; + + const pages = (options?.urls ?? [undefined]).map( + (url) => + ({ + on: pageOn, + context: () => context, + ...(url ? { url: () => url } : {}), + }) as unknown as import("playwright-core").Page, + ); + (context as unknown as { pages: () => unknown[] }).pages = () => pages; + + const browser = { + contexts: () => [context], + on: browserOn, + close: browserClose, + } as unknown as import("playwright-core").Browser; + + connectOverCdpSpy.mockResolvedValue(browser); + getChromeWebSocketUrlSpy.mockResolvedValue(null); + return { browserClose, newCDPSession, pages }; +} + describe("pw-session getPageForTargetId", () => { it("falls back to the only page when CDP session attachment is blocked (extension relays)", async () => { - connectOverCdpSpy.mockClear(); - getChromeWebSocketUrlSpy.mockClear(); - - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const page = { - on: pageOn, - context: () => context, - } as unknown as import("playwright-core").Page; - - // Fill pages() after page exists. - (context as unknown as { pages: () => unknown[] }).pages = () => [page]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const { browserClose, pages } = createExtensionFallbackBrowserHarness(); + const [page] = pages; const resolved = await getPageForTargetId({ cdpUrl: "http://127.0.0.1:18792", @@ -58,40 +67,9 @@ describe("pw-session getPageForTargetId", () => { }); it("uses the shared HTTP-base normalization when falling back to /json/list for direct WebSocket CDP URLs", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + }).pages; const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue({ ok: true, @@ -117,41 +95,11 @@ describe("pw-session getPageForTargetId", () => { }); it("resolves extension-relay pages from /json/list without probing page CDP sessions first", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - const newCDPSession = vi.fn(async () => { - throw new Error("Target.attachToBrowserTarget: Not allowed"); + const { newCDPSession, pages } = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + newCDPSessionError: "Target.attachToBrowserTarget: Not allowed", }); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession, - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = pages; const fetchSpy = vi.spyOn(globalThis, "fetch"); fetchSpy diff --git a/src/browser/pw-tools-core.responses.ts b/src/browser/pw-tools-core.responses.ts index 5a6ddc1818c..4b153692a20 100644 --- a/src/browser/pw-tools-core.responses.ts +++ b/src/browser/pw-tools-core.responses.ts @@ -1,22 +1,7 @@ import { formatCliCommand } from "../cli/command-format.js"; import { ensurePageState, getPageForTargetId } from "./pw-session.js"; import { normalizeTimeoutMs } from "./pw-tools-core.shared.js"; - -function matchUrlPattern(pattern: string, url: string): boolean { - const p = pattern.trim(); - if (!p) { - return false; - } - if (p === url) { - return true; - } - if (p.includes("*")) { - const escaped = p.replace(/[|\\{}()[\]^$+?.]/g, "\\$&"); - const regex = new RegExp(`^${escaped.replace(/\*\*/g, ".*").replace(/\*/g, ".*")}$`); - return regex.test(url); - } - return url.includes(p); -} +import { matchBrowserUrlPattern } from "./url-pattern.js"; export async function responseBodyViaPlaywright(opts: { cdpUrl: string; @@ -65,7 +50,7 @@ export async function responseBodyViaPlaywright(opts: { } const r = resp as { url?: () => string }; const u = r.url?.() || ""; - if (!matchUrlPattern(pattern, u)) { + if (!matchBrowserUrlPattern(pattern, u)) { return; } done = true; diff --git a/src/browser/pw-tools-core.shared.ts b/src/browser/pw-tools-core.shared.ts index d5ad74477d4..b6132de92bf 100644 --- a/src/browser/pw-tools-core.shared.ts +++ b/src/browser/pw-tools-core.shared.ts @@ -29,6 +29,21 @@ export function requireRef(value: unknown): string { return ref; } +export function requireRefOrSelector( + ref: string | undefined, + selector: string | undefined, +): { ref?: string; selector?: string } { + const trimmedRef = typeof ref === "string" ? ref.trim() : ""; + const trimmedSelector = typeof selector === "string" ? selector.trim() : ""; + if (!trimmedRef && !trimmedSelector) { + throw new Error("ref or selector is required"); + } + return { + ref: trimmedRef || undefined, + selector: trimmedSelector || undefined, + }; +} + export function normalizeTimeoutMs(timeoutMs: number | undefined, fallback: number) { return Math.max(500, Math.min(120_000, timeoutMs ?? fallback)); } diff --git a/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts b/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts index d976f7d7fb8..e5aa5bac2e0 100644 --- a/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts +++ b/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts @@ -291,6 +291,6 @@ describe("pw-tools-core", () => { targetId: "T1", ref: " ", }), - ).rejects.toThrow(/ref is required/i); + ).rejects.toThrow(/ref or selector is required/i); }); }); diff --git a/src/browser/resolved-config-refresh.ts b/src/browser/resolved-config-refresh.ts index fe934069a80..999a7ca1229 100644 --- a/src/browser/resolved-config-refresh.ts +++ b/src/browser/resolved-config-refresh.ts @@ -1,4 +1,4 @@ -import { createConfigIO, loadConfig } from "../config/config.js"; +import { createConfigIO, getRuntimeConfigSnapshot } from "../config/config.js"; import { resolveBrowserConfig, resolveProfile, type ResolvedBrowserProfile } from "./config.js"; import type { BrowserServerState } from "./server-context.types.js"; @@ -29,7 +29,13 @@ function applyResolvedConfig( current: BrowserServerState, freshResolved: BrowserServerState["resolved"], ) { - current.resolved = freshResolved; + current.resolved = { + ...freshResolved, + // Keep the runtime evaluate gate stable across request-time profile refreshes. + // Security-sensitive behavior should only change via full runtime config reload, + // not as a side effect of resolving profiles/tabs during a request. + evaluateEnabled: current.resolved.evaluateEnabled, + }; for (const [name, runtime] of current.profiles) { const nextProfile = resolveProfile(freshResolved, name); if (nextProfile) { @@ -63,7 +69,11 @@ export function refreshResolvedBrowserConfigFromDisk(params: { if (!params.refreshConfigFromDisk) { return; } - const cfg = params.mode === "fresh" ? createConfigIO().loadConfig() : loadConfig(); + + // Route-level browser config hot reload should observe on-disk changes immediately. + // The shared loadConfig() helper may return a cached snapshot for the configured TTL, + // which can leave request-time browser guards stale (for example evaluateEnabled). + const cfg = getRuntimeConfigSnapshot() ?? createConfigIO().loadConfig(); const freshResolved = resolveBrowserConfig(cfg.browser, cfg); applyResolvedConfig(params.current, freshResolved); } diff --git a/src/browser/routes/agent.act.download.ts b/src/browser/routes/agent.act.download.ts index d08287fea59..cfdf1362797 100644 --- a/src/browser/routes/agent.act.download.ts +++ b/src/browser/routes/agent.act.download.ts @@ -1,5 +1,11 @@ +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { ensureOutputRootDir, resolveWritableOutputPathOrRespond } from "./output-paths.js"; import { DEFAULT_DOWNLOAD_DIR } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; @@ -23,13 +29,23 @@ export function registerBrowserAgentActDownloadRoutes( const out = toStringOrEmpty(body.path) || ""; const timeoutMs = toNumber(body.timeoutMs); - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "wait for download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "download waiting is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "wait for download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); let downloadPath: string | undefined; if (out.trim()) { @@ -67,13 +83,23 @@ export function registerBrowserAgentActDownloadRoutes( return jsonError(res, 400, "path is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "downloads are not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); const downloadPath = await resolveWritableOutputPathOrRespond({ res, diff --git a/src/browser/routes/agent.act.hooks.ts b/src/browser/routes/agent.act.hooks.ts index 56d97bb03d3..a141a9cbe5a 100644 --- a/src/browser/routes/agent.act.hooks.ts +++ b/src/browser/routes/agent.act.hooks.ts @@ -1,5 +1,12 @@ +import { evaluateChromeMcpScript, uploadChromeMcpFile } from "../chrome-mcp.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; @@ -20,13 +27,12 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "paths are required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "file chooser hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const uploadPathsResult = await resolveExistingPathsWithinRoot({ rootDir: DEFAULT_UPLOAD_DIR, requestedPaths: paths, @@ -38,6 +44,39 @@ export function registerBrowserAgentActHookRoutes( } const resolvedPaths = uploadPathsResult.paths; + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (element) { + return jsonError( + res, + 501, + "existing-session file uploads do not support element selectors; use ref/inputRef.", + ); + } + if (resolvedPaths.length !== 1) { + return jsonError( + res, + 501, + "existing-session file uploads currently support one file at a time.", + ); + } + const uid = inputRef || ref; + if (!uid) { + return jsonError(res, 501, "existing-session file uploads require ref or inputRef."); + } + await uploadChromeMcpFile({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid, + filePath: resolvedPaths[0] ?? "", + }); + return res.json({ ok: true }); + } + + const pw = await requirePwAi(res, "file chooser hook"); + if (!pw) { + return; + } + if (inputRef || element) { if (ref) { return jsonError(res, 400, "ref cannot be combined with inputRef/element"); @@ -79,13 +118,69 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "accept is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "dialog hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session dialog handling does not support timeoutMs.", + ); + } + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + fn: `() => { + const state = (window.__openclawDialogHook ??= {}); + if (!state.originals) { + state.originals = { + alert: window.alert.bind(window), + confirm: window.confirm.bind(window), + prompt: window.prompt.bind(window), + }; + } + const originals = state.originals; + const restore = () => { + window.alert = originals.alert; + window.confirm = originals.confirm; + window.prompt = originals.prompt; + delete window.__openclawDialogHook; + }; + window.alert = (...args) => { + try { + return undefined; + } finally { + restore(); + } + }; + window.confirm = (...args) => { + try { + return ${accept ? "true" : "false"}; + } finally { + restore(); + } + }; + window.prompt = (...args) => { + try { + return ${accept ? JSON.stringify(promptText ?? "") : "null"}; + } finally { + restore(); + } + }; + return true; + }`, + }); + return res.json({ ok: true }); + } + const pw = await requirePwAi(res, "dialog hook"); + if (!pw) { + return; + } await pw.armDialogViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.act.shared.ts b/src/browser/routes/agent.act.shared.ts index 81ca8caab71..b22f35e7ef2 100644 --- a/src/browser/routes/agent.act.shared.ts +++ b/src/browser/routes/agent.act.shared.ts @@ -1,4 +1,5 @@ export const ACT_KINDS = [ + "batch", "click", "close", "drag", diff --git a/src/browser/routes/agent.act.ts b/src/browser/routes/agent.act.ts index 2ae6073c7cf..1b444d1b963 100644 --- a/src/browser/routes/agent.act.ts +++ b/src/browser/routes/agent.act.ts @@ -1,6 +1,19 @@ -import type { BrowserFormField } from "../client-actions-core.js"; +import { + clickChromeMcpElement, + closeChromeMcpTab, + dragChromeMcpElement, + evaluateChromeMcpScript, + fillChromeMcpElement, + fillChromeMcpForm, + hoverChromeMcpElement, + pressChromeMcpKey, + resizeChromeMcpPage, +} from "../chrome-mcp.js"; +import type { BrowserActRequest, BrowserFormField } from "../client-actions-core.js"; import { normalizeBrowserFormField } from "../form-fields.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import type { BrowserRouteContext } from "../server-context.js"; +import { matchBrowserUrlPattern } from "../url-pattern.js"; import { registerBrowserAgentActDownloadRoutes } from "./agent.act.download.js"; import { registerBrowserAgentActHookRoutes } from "./agent.act.hooks.js"; import { @@ -11,13 +24,426 @@ import { } from "./agent.act.shared.js"; import { readBody, + requirePwAi, resolveTargetIdFromBody, - withPlaywrightRouteContext, + withRouteTabContext, SELECTOR_UNSUPPORTED_MESSAGE, } from "./agent.shared.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +function browserEvaluateDisabledMessage(action: "wait" | "evaluate"): string { + return [ + action === "wait" + ? "wait --fn is disabled by config (browser.evaluateEnabled=false)." + : "act:evaluate is disabled by config (browser.evaluateEnabled=false).", + "Docs: /gateway/configuration#browser-openclaw-managed-browser", + ].join("\n"); +} + +function buildExistingSessionWaitPredicate(params: { + text?: string; + textGone?: string; + selector?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; +}): string | null { + const checks: string[] = []; + if (params.text) { + checks.push(`Boolean(document.body?.innerText?.includes(${JSON.stringify(params.text)}))`); + } + if (params.textGone) { + checks.push(`!document.body?.innerText?.includes(${JSON.stringify(params.textGone)})`); + } + if (params.selector) { + checks.push(`Boolean(document.querySelector(${JSON.stringify(params.selector)}))`); + } + if (params.loadState === "domcontentloaded") { + checks.push(`document.readyState === "interactive" || document.readyState === "complete"`); + } else if (params.loadState === "load") { + checks.push(`document.readyState === "complete"`); + } + if (params.fn) { + checks.push(`Boolean(await (${params.fn})())`); + } + if (checks.length === 0) { + return null; + } + return checks.length === 1 ? checks[0] : checks.map((check) => `(${check})`).join(" && "); +} + +async function waitForExistingSessionCondition(params: { + profileName: string; + targetId: string; + timeMs?: number; + text?: string; + textGone?: string; + selector?: string; + url?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; + timeoutMs?: number; +}): Promise { + if (params.timeMs && params.timeMs > 0) { + await sleep(params.timeMs); + } + const predicate = buildExistingSessionWaitPredicate(params); + if (!predicate && !params.url) { + return; + } + const timeoutMs = Math.max(250, params.timeoutMs ?? 10_000); + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + let ready = true; + if (predicate) { + ready = Boolean( + await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `async () => ${predicate}`, + }), + ); + } + if (ready && params.url) { + const currentUrl = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: "() => window.location.href", + }); + ready = typeof currentUrl === "string" && matchBrowserUrlPattern(params.url, currentUrl); + } + if (ready) { + return; + } + await sleep(250); + } + throw new Error("Timed out waiting for condition"); +} + +const SELECTOR_ALLOWED_KINDS: ReadonlySet = new Set([ + "batch", + "click", + "drag", + "hover", + "scrollIntoView", + "select", + "type", + "wait", +]); +const MAX_BATCH_ACTIONS = 100; +const MAX_BATCH_CLICK_DELAY_MS = 5_000; +const MAX_BATCH_WAIT_TIME_MS = 30_000; + +function normalizeBoundedNonNegativeMs( + value: unknown, + fieldName: string, + maxMs: number, +): number | undefined { + const ms = toNumber(value); + if (ms === undefined) { + return undefined; + } + if (ms < 0) { + throw new Error(`${fieldName} must be >= 0`); + } + const normalized = Math.floor(ms); + if (normalized > maxMs) { + throw new Error(`${fieldName} exceeds maximum of ${maxMs}ms`); + } + return normalized; +} + +function countBatchActions(actions: BrowserActRequest[]): number { + let count = 0; + for (const action of actions) { + count += 1; + if (action.kind === "batch") { + count += countBatchActions(action.actions); + } + } + return count; +} + +function validateBatchTargetIds(actions: BrowserActRequest[], targetId: string): string | null { + for (const action of actions) { + if (action.targetId && action.targetId !== targetId) { + return "batched action targetId must match request targetId"; + } + if (action.kind === "batch") { + const nestedError = validateBatchTargetIds(action.actions, targetId); + if (nestedError) { + return nestedError; + } + } + } + return null; +} + +function normalizeBatchAction(value: unknown): BrowserActRequest { + if (!value || typeof value !== "object" || Array.isArray(value)) { + throw new Error("batch actions must be objects"); + } + const raw = value as Record; + const kind = toStringOrEmpty(raw.kind); + if (!isActKind(kind)) { + throw new Error("batch actions must use a supported kind"); + } + + switch (kind) { + case "click": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + if (!ref && !selector) { + throw new Error("click requires ref or selector"); + } + const buttonRaw = toStringOrEmpty(raw.button); + const button = buttonRaw ? parseClickButton(buttonRaw) : undefined; + if (buttonRaw && !button) { + throw new Error("click button must be left|right|middle"); + } + const modifiersRaw = toStringArray(raw.modifiers) ?? []; + const parsedModifiers = parseClickModifiers(modifiersRaw); + if (parsedModifiers.error) { + throw new Error(parsedModifiers.error); + } + const doubleClick = toBoolean(raw.doubleClick); + const delayMs = normalizeBoundedNonNegativeMs( + raw.delayMs, + "click delayMs", + MAX_BATCH_CLICK_DELAY_MS, + ); + const timeoutMs = toNumber(raw.timeoutMs); + const targetId = toStringOrEmpty(raw.targetId) || undefined; + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + ...(targetId ? { targetId } : {}), + ...(doubleClick !== undefined ? { doubleClick } : {}), + ...(button ? { button } : {}), + ...(parsedModifiers.modifiers ? { modifiers: parsedModifiers.modifiers } : {}), + ...(delayMs !== undefined ? { delayMs } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "type": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + const text = raw.text; + if (!ref && !selector) { + throw new Error("type requires ref or selector"); + } + if (typeof text !== "string") { + throw new Error("type requires text"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const submit = toBoolean(raw.submit); + const slowly = toBoolean(raw.slowly); + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + text, + ...(targetId ? { targetId } : {}), + ...(submit !== undefined ? { submit } : {}), + ...(slowly !== undefined ? { slowly } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "press": { + const key = toStringOrEmpty(raw.key); + if (!key) { + throw new Error("press requires key"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const delayMs = toNumber(raw.delayMs); + return { + kind, + key, + ...(targetId ? { targetId } : {}), + ...(delayMs !== undefined ? { delayMs } : {}), + }; + } + case "hover": + case "scrollIntoView": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + if (!ref && !selector) { + throw new Error(`${kind} requires ref or selector`); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "drag": { + const startRef = toStringOrEmpty(raw.startRef) || undefined; + const startSelector = toStringOrEmpty(raw.startSelector) || undefined; + const endRef = toStringOrEmpty(raw.endRef) || undefined; + const endSelector = toStringOrEmpty(raw.endSelector) || undefined; + if (!startRef && !startSelector) { + throw new Error("drag requires startRef or startSelector"); + } + if (!endRef && !endSelector) { + throw new Error("drag requires endRef or endSelector"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(startRef ? { startRef } : {}), + ...(startSelector ? { startSelector } : {}), + ...(endRef ? { endRef } : {}), + ...(endSelector ? { endSelector } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "select": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + const values = toStringArray(raw.values); + if ((!ref && !selector) || !values?.length) { + throw new Error("select requires ref/selector and values"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + values, + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "fill": { + const rawFields = Array.isArray(raw.fields) ? raw.fields : []; + const fields = rawFields + .map((field) => { + if (!field || typeof field !== "object") { + return null; + } + return normalizeBrowserFormField(field as Record); + }) + .filter((field): field is BrowserFormField => field !== null); + if (!fields.length) { + throw new Error("fill requires fields"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + fields, + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "resize": { + const width = toNumber(raw.width); + const height = toNumber(raw.height); + if (width === undefined || height === undefined) { + throw new Error("resize requires width and height"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + return { + kind, + width, + height, + ...(targetId ? { targetId } : {}), + }; + } + case "wait": { + const loadStateRaw = toStringOrEmpty(raw.loadState); + const loadState = + loadStateRaw === "load" || + loadStateRaw === "domcontentloaded" || + loadStateRaw === "networkidle" + ? loadStateRaw + : undefined; + const timeMs = normalizeBoundedNonNegativeMs( + raw.timeMs, + "wait timeMs", + MAX_BATCH_WAIT_TIME_MS, + ); + const text = toStringOrEmpty(raw.text) || undefined; + const textGone = toStringOrEmpty(raw.textGone) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + const url = toStringOrEmpty(raw.url) || undefined; + const fn = toStringOrEmpty(raw.fn) || undefined; + if (timeMs === undefined && !text && !textGone && !selector && !url && !loadState && !fn) { + throw new Error( + "wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn", + ); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(timeMs !== undefined ? { timeMs } : {}), + ...(text ? { text } : {}), + ...(textGone ? { textGone } : {}), + ...(selector ? { selector } : {}), + ...(url ? { url } : {}), + ...(loadState ? { loadState } : {}), + ...(fn ? { fn } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "evaluate": { + const fn = toStringOrEmpty(raw.fn); + if (!fn) { + throw new Error("evaluate requires fn"); + } + const ref = toStringOrEmpty(raw.ref) || undefined; + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + fn, + ...(ref ? { ref } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "close": { + const targetId = toStringOrEmpty(raw.targetId) || undefined; + return { + kind, + ...(targetId ? { targetId } : {}), + }; + } + case "batch": { + const actions = Array.isArray(raw.actions) ? raw.actions.map(normalizeBatchAction) : []; + if (!actions.length) { + throw new Error("batch requires actions"); + } + if (countBatchActions(actions) > MAX_BATCH_ACTIONS) { + throw new Error(`batch exceeds maximum of ${MAX_BATCH_ACTIONS} actions`); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const stopOnError = toBoolean(raw.stopOnError); + return { + kind, + actions, + ...(targetId ? { targetId } : {}), + ...(stopOnError !== undefined ? { stopOnError } : {}), + }; + } + } +} + export function registerBrowserAgentActRoutes( app: BrowserRouteRegistrar, ctx: BrowserRouteContext, @@ -30,27 +456,41 @@ export function registerBrowserAgentActRoutes( } const kind: ActKind = kindRaw; const targetId = resolveTargetIdFromBody(body); - if (Object.hasOwn(body, "selector") && kind !== "wait") { + if (Object.hasOwn(body, "selector") && !SELECTOR_ALLOWED_KINDS.has(kind)) { return jsonError(res, 400, SELECTOR_UNSUPPORTED_MESSAGE); } + const earlyFn = kind === "wait" || kind === "evaluate" ? toStringOrEmpty(body.fn) : ""; + if ( + (kind === "evaluate" || (kind === "wait" && earlyFn)) && + !ctx.state().resolved.evaluateEnabled + ) { + return jsonError( + res, + 403, + browserEvaluateDisabledMessage(kind === "evaluate" ? "evaluate" : "wait"), + ); + } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: `act:${kind}`, - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const evaluateEnabled = ctx.state().resolved.evaluateEnabled; + const isExistingSession = getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp; + const profileName = profileCtx.profile.name; switch (kind) { case "click": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } const doubleClick = toBoolean(body.doubleClick) ?? false; const timeoutMs = toNumber(body.timeoutMs); + const delayMs = toNumber(body.delayMs); const buttonRaw = toStringOrEmpty(body.button) || ""; const button = buttonRaw ? parseClickButton(buttonRaw) : undefined; if (buttonRaw && !button) { @@ -63,18 +503,53 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, parsedModifiers.error); } const modifiers = parsedModifiers.modifiers; + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session click does not support selector targeting yet; use ref.", + ); + } + if ((button && button !== "left") || (modifiers && modifiers.length > 0)) { + return jsonError( + res, + 501, + "existing-session click currently supports left-click only (no button overrides/modifiers).", + ); + } + await clickChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref!, + doubleClick, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const clickRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, - ref, doubleClick, }; + if (ref) { + clickRequest.ref = ref; + } + if (selector) { + clickRequest.selector = selector; + } if (button) { clickRequest.button = button; } if (modifiers) { clickRequest.modifiers = modifiers; } + if (delayMs) { + clickRequest.delayMs = delayMs; + } if (timeoutMs) { clickRequest.timeoutMs = timeoutMs; } @@ -82,9 +557,10 @@ export function registerBrowserAgentActRoutes( return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); } case "type": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } if (typeof body.text !== "string") { return jsonError(res, 400, "text is required"); @@ -93,14 +569,53 @@ export function registerBrowserAgentActRoutes( const submit = toBoolean(body.submit) ?? false; const slowly = toBoolean(body.slowly) ?? false; const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session type does not support selector targeting yet; use ref.", + ); + } + if (slowly) { + return jsonError( + res, + 501, + "existing-session type does not support slowly=true; use fill/press instead.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref!, + value: text, + }); + if (submit) { + await pressChromeMcpKey({ + profileName, + targetId: tab.targetId, + key: "Enter", + }); + } + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const typeRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, - ref, text, submit, slowly, }; + if (ref) { + typeRequest.ref = ref; + } + if (selector) { + typeRequest.selector = selector; + } if (timeoutMs) { typeRequest.timeoutMs = timeoutMs; } @@ -113,6 +628,17 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "key is required"); } const delayMs = toNumber(body.delayMs); + if (isExistingSession) { + if (delayMs) { + return jsonError(res, 501, "existing-session press does not support delayMs."); + } + await pressChromeMcpKey({ profileName, targetId: tab.targetId, key }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.pressKeyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -122,30 +648,87 @@ export function registerBrowserAgentActRoutes( return res.json({ ok: true, targetId: tab.targetId }); } case "hover": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session hover does not support selector targeting yet; use ref.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session hover does not support timeoutMs overrides.", + ); + } + await hoverChromeMcpElement({ profileName, targetId: tab.targetId, uid: ref! }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.hoverViaPlaywright({ cdpUrl, targetId: tab.targetId, ref, + selector, timeoutMs: timeoutMs ?? undefined, }); return res.json({ ok: true, targetId: tab.targetId }); } case "scrollIntoView": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session scrollIntoView does not support selector targeting yet; use ref.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session scrollIntoView does not support timeoutMs overrides.", + ); + } + await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn: `(el) => { el.scrollIntoView({ block: "center", inline: "center" }); return true; }`, + args: [ref!], + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const scrollRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, - ref, }; + if (ref) { + scrollRequest.ref = ref; + } + if (selector) { + scrollRequest.selector = selector; + } if (timeoutMs) { scrollRequest.timeoutMs = timeoutMs; } @@ -153,32 +736,102 @@ export function registerBrowserAgentActRoutes( return res.json({ ok: true, targetId: tab.targetId }); } case "drag": { - const startRef = toStringOrEmpty(body.startRef); - const endRef = toStringOrEmpty(body.endRef); - if (!startRef || !endRef) { - return jsonError(res, 400, "startRef and endRef are required"); + const startRef = toStringOrEmpty(body.startRef) || undefined; + const startSelector = toStringOrEmpty(body.startSelector) || undefined; + const endRef = toStringOrEmpty(body.endRef) || undefined; + const endSelector = toStringOrEmpty(body.endSelector) || undefined; + if (!startRef && !startSelector) { + return jsonError(res, 400, "startRef or startSelector is required"); + } + if (!endRef && !endSelector) { + return jsonError(res, 400, "endRef or endSelector is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (startSelector || endSelector) { + return jsonError( + res, + 501, + "existing-session drag does not support selector targeting yet; use startRef/endRef.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session drag does not support timeoutMs overrides.", + ); + } + await dragChromeMcpElement({ + profileName, + targetId: tab.targetId, + fromUid: startRef!, + toUid: endRef!, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.dragViaPlaywright({ cdpUrl, targetId: tab.targetId, startRef, + startSelector, endRef, + endSelector, timeoutMs: timeoutMs ?? undefined, }); return res.json({ ok: true, targetId: tab.targetId }); } case "select": { - const ref = toStringOrEmpty(body.ref); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; const values = toStringArray(body.values); - if (!ref || !values?.length) { - return jsonError(res, 400, "ref and values are required"); + if ((!ref && !selector) || !values?.length) { + return jsonError(res, 400, "ref/selector and values are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session select does not support selector targeting yet; use ref.", + ); + } + if (values.length !== 1) { + return jsonError( + res, + 501, + "existing-session select currently supports a single value only.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session select does not support timeoutMs overrides.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref!, + value: values[0] ?? "", + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.selectOptionViaPlaywright({ cdpUrl, targetId: tab.targetId, ref, + selector, values, timeoutMs: timeoutMs ?? undefined, }); @@ -198,6 +851,28 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "fields are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session fill does not support timeoutMs overrides.", + ); + } + await fillChromeMcpForm({ + profileName, + targetId: tab.targetId, + elements: fields.map((field) => ({ + uid: field.ref, + value: String(field.value ?? ""), + })), + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.fillFormViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -212,6 +887,19 @@ export function registerBrowserAgentActRoutes( if (!width || !height) { return jsonError(res, 400, "width and height are required"); } + if (isExistingSession) { + await resizeChromeMcpPage({ + profileName, + targetId: tab.targetId, + width, + height, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.resizeViewportViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -236,14 +924,7 @@ export function registerBrowserAgentActRoutes( const fn = toStringOrEmpty(body.fn) || undefined; const timeoutMs = toNumber(body.timeoutMs) ?? undefined; if (fn && !evaluateEnabled) { - return jsonError( - res, - 403, - [ - "wait --fn is disabled by config (browser.evaluateEnabled=false).", - "Docs: /gateway/configuration#browser-openclaw-managed-browser", - ].join("\n"), - ); + return jsonError(res, 403, browserEvaluateDisabledMessage("wait")); } if ( timeMs === undefined && @@ -260,6 +941,32 @@ export function registerBrowserAgentActRoutes( "wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn", ); } + if (isExistingSession) { + if (loadState === "networkidle") { + return jsonError( + res, + 501, + "existing-session wait does not support loadState=networkidle yet.", + ); + } + await waitForExistingSessionCondition({ + profileName, + targetId: tab.targetId, + timeMs, + text, + textGone, + selector, + url, + loadState, + fn, + timeoutMs, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.waitForViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -276,14 +983,7 @@ export function registerBrowserAgentActRoutes( } case "evaluate": { if (!evaluateEnabled) { - return jsonError( - res, - 403, - [ - "act:evaluate is disabled by config (browser.evaluateEnabled=false).", - "Docs: /gateway/configuration#browser-openclaw-managed-browser", - ].join("\n"), - ); + return jsonError(res, 403, browserEvaluateDisabledMessage("evaluate")); } const fn = toStringOrEmpty(body.fn); if (!fn) { @@ -291,6 +991,31 @@ export function registerBrowserAgentActRoutes( } const ref = toStringOrEmpty(body.ref) || undefined; const evalTimeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (evalTimeoutMs !== undefined) { + return jsonError( + res, + 501, + "existing-session evaluate does not support timeoutMs overrides.", + ); + } + const result = await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn, + args: ref ? [ref] : undefined, + }); + return res.json({ + ok: true, + targetId: tab.targetId, + url: tab.url, + result, + }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const evalRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -310,9 +1035,55 @@ export function registerBrowserAgentActRoutes( }); } case "close": { + if (isExistingSession) { + await closeChromeMcpTab(profileName, tab.targetId); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.closePageViaPlaywright({ cdpUrl, targetId: tab.targetId }); return res.json({ ok: true, targetId: tab.targetId }); } + case "batch": { + if (isExistingSession) { + return jsonError( + res, + 501, + "existing-session batch is not supported yet; send actions individually.", + ); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } + let actions: BrowserActRequest[]; + try { + actions = Array.isArray(body.actions) ? body.actions.map(normalizeBatchAction) : []; + } catch (err) { + return jsonError(res, 400, err instanceof Error ? err.message : String(err)); + } + if (!actions.length) { + return jsonError(res, 400, "actions are required"); + } + if (countBatchActions(actions) > MAX_BATCH_ACTIONS) { + return jsonError(res, 400, `batch exceeds maximum of ${MAX_BATCH_ACTIONS} actions`); + } + const targetIdError = validateBatchTargetIds(actions, tab.targetId); + if (targetIdError) { + return jsonError(res, 403, targetIdError); + } + const stopOnError = toBoolean(body.stopOnError) ?? true; + const result = await pw.batchViaPlaywright({ + cdpUrl, + targetId: tab.targetId, + actions, + stopOnError, + evaluateEnabled, + }); + return res.json({ ok: true, targetId: tab.targetId, results: result.results }); + } default: { return jsonError(res, 400, "unsupported kind"); } @@ -334,13 +1105,23 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "response body", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "response body is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "response body"); + if (!pw) { + return; + } const result = await pw.responseBodyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -361,13 +1142,39 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "highlight", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + args: [ref], + fn: `(el) => { + if (!(el instanceof Element)) { + return false; + } + el.scrollIntoView({ block: "center", inline: "center" }); + const previousOutline = el.style.outline; + const previousOffset = el.style.outlineOffset; + el.style.outline = "3px solid #FF4500"; + el.style.outlineOffset = "2px"; + setTimeout(() => { + el.style.outline = previousOutline; + el.style.outlineOffset = previousOffset; + }, 2000); + return true; + }`, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, "highlight"); + if (!pw) { + return; + } await pw.highlightViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.existing-session.test.ts b/src/browser/routes/agent.existing-session.test.ts new file mode 100644 index 00000000000..4f8211114ea --- /dev/null +++ b/src/browser/routes/agent.existing-session.test.ts @@ -0,0 +1,252 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { registerBrowserAgentActRoutes } from "./agent.act.js"; +import { registerBrowserAgentSnapshotRoutes } from "./agent.snapshot.js"; +import { createBrowserRouteApp, createBrowserRouteResponse } from "./test-helpers.js"; +import type { BrowserRequest } from "./types.js"; + +const routeState = vi.hoisted(() => ({ + profileCtx: { + profile: { + driver: "existing-session" as const, + name: "chrome-live", + }, + ensureTabAvailable: vi.fn(async () => ({ + targetId: "7", + url: "https://example.com", + })), + }, + tab: { + targetId: "7", + url: "https://example.com", + }, +})); + +const chromeMcpMocks = vi.hoisted(() => ({ + evaluateChromeMcpScript: vi.fn( + async (_params: { profileName: string; targetId: string; fn: string }) => true, + ), + navigateChromeMcpPage: vi.fn(async ({ url }: { url: string }) => ({ url })), + takeChromeMcpScreenshot: vi.fn(async () => Buffer.from("png")), + takeChromeMcpSnapshot: vi.fn(async () => ({ + id: "root", + role: "document", + name: "Example", + children: [{ id: "btn-1", role: "button", name: "Continue" }], + })), +})); + +vi.mock("../chrome-mcp.js", () => ({ + clickChromeMcpElement: vi.fn(async () => {}), + closeChromeMcpTab: vi.fn(async () => {}), + dragChromeMcpElement: vi.fn(async () => {}), + evaluateChromeMcpScript: chromeMcpMocks.evaluateChromeMcpScript, + fillChromeMcpElement: vi.fn(async () => {}), + fillChromeMcpForm: vi.fn(async () => {}), + hoverChromeMcpElement: vi.fn(async () => {}), + navigateChromeMcpPage: chromeMcpMocks.navigateChromeMcpPage, + pressChromeMcpKey: vi.fn(async () => {}), + resizeChromeMcpPage: vi.fn(async () => {}), + takeChromeMcpScreenshot: chromeMcpMocks.takeChromeMcpScreenshot, + takeChromeMcpSnapshot: chromeMcpMocks.takeChromeMcpSnapshot, +})); + +vi.mock("../cdp.js", () => ({ + captureScreenshot: vi.fn(), + snapshotAria: vi.fn(), +})); + +vi.mock("../navigation-guard.js", () => ({ + assertBrowserNavigationAllowed: vi.fn(async () => {}), + assertBrowserNavigationResultAllowed: vi.fn(async () => {}), + withBrowserNavigationPolicy: vi.fn(() => ({})), +})); + +vi.mock("../screenshot.js", () => ({ + DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES: 128, + DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE: 64, + normalizeBrowserScreenshot: vi.fn(async (buffer: Buffer) => ({ + buffer, + contentType: "image/png", + })), +})); + +vi.mock("../../media/store.js", () => ({ + ensureMediaDir: vi.fn(async () => {}), + saveMediaBuffer: vi.fn(async () => ({ path: "/tmp/fake.png" })), +})); + +vi.mock("./agent.shared.js", () => ({ + getPwAiModule: vi.fn(async () => null), + handleRouteError: vi.fn(), + readBody: vi.fn((req: BrowserRequest) => req.body ?? {}), + requirePwAi: vi.fn(async () => { + throw new Error("Playwright should not be used for existing-session tests"); + }), + resolveProfileContext: vi.fn(() => routeState.profileCtx), + resolveTargetIdFromBody: vi.fn((body: Record) => + typeof body.targetId === "string" ? body.targetId : undefined, + ), + withPlaywrightRouteContext: vi.fn(), + withRouteTabContext: vi.fn(async ({ run }: { run: (args: unknown) => Promise }) => { + await run({ + profileCtx: routeState.profileCtx, + cdpUrl: "http://127.0.0.1:18800", + tab: routeState.tab, + }); + }), +})); + +function getSnapshotGetHandler() { + const { app, getHandlers } = createBrowserRouteApp(); + registerBrowserAgentSnapshotRoutes(app, { + state: () => ({ resolved: { ssrfPolicy: undefined } }), + } as never); + const handler = getHandlers.get("/snapshot"); + expect(handler).toBeTypeOf("function"); + return handler; +} + +function getSnapshotPostHandler() { + const { app, postHandlers } = createBrowserRouteApp(); + registerBrowserAgentSnapshotRoutes(app, { + state: () => ({ resolved: { ssrfPolicy: undefined } }), + } as never); + const handler = postHandlers.get("/screenshot"); + expect(handler).toBeTypeOf("function"); + return handler; +} + +function getActPostHandler() { + const { app, postHandlers } = createBrowserRouteApp(); + registerBrowserAgentActRoutes(app, { + state: () => ({ resolved: { evaluateEnabled: true } }), + } as never); + const handler = postHandlers.get("/act"); + expect(handler).toBeTypeOf("function"); + return handler; +} + +describe("existing-session browser routes", () => { + beforeEach(() => { + routeState.profileCtx.ensureTabAvailable.mockClear(); + chromeMcpMocks.evaluateChromeMcpScript.mockReset(); + chromeMcpMocks.navigateChromeMcpPage.mockClear(); + chromeMcpMocks.takeChromeMcpScreenshot.mockClear(); + chromeMcpMocks.takeChromeMcpSnapshot.mockClear(); + chromeMcpMocks.evaluateChromeMcpScript + .mockResolvedValueOnce({ labels: 1, skipped: 0 } as never) + .mockResolvedValueOnce(true); + }); + + it("allows labeled AI snapshots for existing-session profiles", async () => { + const handler = getSnapshotGetHandler(); + const response = createBrowserRouteResponse(); + await handler?.({ params: {}, query: { format: "ai", labels: "1" } }, response.res); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ + ok: true, + format: "ai", + labels: true, + labelsCount: 1, + labelsSkipped: 0, + }); + expect(chromeMcpMocks.takeChromeMcpSnapshot).toHaveBeenCalledWith({ + profileName: "chrome-live", + targetId: "7", + }); + expect(chromeMcpMocks.takeChromeMcpScreenshot).toHaveBeenCalled(); + }); + + it("allows ref screenshots for existing-session profiles", async () => { + const handler = getSnapshotPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { ref: "btn-1", type: "jpeg" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ + ok: true, + path: "/tmp/fake.png", + targetId: "7", + }); + expect(chromeMcpMocks.takeChromeMcpScreenshot).toHaveBeenCalledWith({ + profileName: "chrome-live", + targetId: "7", + uid: "btn-1", + fullPage: false, + format: "jpeg", + }); + }); + + it("rejects selector-based element screenshots for existing-session profiles", async () => { + const handler = getSnapshotPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { element: "#submit" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(400); + expect(response.body).toMatchObject({ + error: expect.stringContaining("element screenshots are not supported"), + }); + expect(chromeMcpMocks.takeChromeMcpScreenshot).not.toHaveBeenCalled(); + }); + + it("fails closed for existing-session networkidle waits", async () => { + const handler = getActPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { kind: "wait", loadState: "networkidle" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(501); + expect(response.body).toMatchObject({ + error: expect.stringContaining("loadState=networkidle"), + }); + expect(chromeMcpMocks.evaluateChromeMcpScript).not.toHaveBeenCalled(); + }); + + it("supports glob URL waits for existing-session profiles", async () => { + chromeMcpMocks.evaluateChromeMcpScript.mockReset(); + chromeMcpMocks.evaluateChromeMcpScript.mockImplementation( + async ({ fn }: { fn: string }) => + (fn === "() => window.location.href" ? "https://example.com/" : true) as never, + ); + + const handler = getActPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { kind: "wait", url: "**/example.com/" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ ok: true, targetId: "7" }); + expect(chromeMcpMocks.evaluateChromeMcpScript).toHaveBeenCalledWith({ + profileName: "chrome-live", + targetId: "7", + fn: "() => window.location.href", + }); + }); +}); diff --git a/src/browser/routes/agent.snapshot.plan.test.ts b/src/browser/routes/agent.snapshot.plan.test.ts index 493fbcdfbad..71870aa1a6d 100644 --- a/src/browser/routes/agent.snapshot.plan.test.ts +++ b/src/browser/routes/agent.snapshot.plan.test.ts @@ -3,9 +3,9 @@ import { resolveBrowserConfig, resolveProfile } from "../config.js"; import { resolveSnapshotPlan } from "./agent.snapshot.plan.js"; describe("resolveSnapshotPlan", () => { - it("defaults chrome extension relay snapshots to aria when format is omitted", () => { + it("defaults chrome-relay snapshots to aria when format is omitted", () => { const resolved = resolveBrowserConfig({}); - const profile = resolveProfile(resolved, "chrome"); + const profile = resolveProfile(resolved, "chrome-relay"); expect(profile).toBeTruthy(); const plan = resolveSnapshotPlan({ diff --git a/src/browser/routes/agent.snapshot.ts b/src/browser/routes/agent.snapshot.ts index c750cafe723..80c11693a11 100644 --- a/src/browser/routes/agent.snapshot.ts +++ b/src/browser/routes/agent.snapshot.ts @@ -1,7 +1,22 @@ import path from "node:path"; import { ensureMediaDir, saveMediaBuffer } from "../../media/store.js"; import { captureScreenshot, snapshotAria } from "../cdp.js"; +import { + evaluateChromeMcpScript, + navigateChromeMcpPage, + takeChromeMcpScreenshot, + takeChromeMcpSnapshot, +} from "../chrome-mcp.js"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "../chrome-mcp.snapshot.js"; +import { + assertBrowserNavigationAllowed, + assertBrowserNavigationResultAllowed, +} from "../navigation-guard.js"; import { withBrowserNavigationPolicy } from "../navigation-guard.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import { DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, @@ -25,6 +40,110 @@ import { import type { BrowserResponse, BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toStringOrEmpty } from "./utils.js"; +const CHROME_MCP_OVERLAY_ATTR = "data-openclaw-mcp-overlay"; + +async function clearChromeMcpOverlay(params: { + profileName: string; + targetId: string; +}): Promise { + await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `() => { + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + return true; + }`, + }).catch(() => {}); +} + +async function renderChromeMcpLabels(params: { + profileName: string; + targetId: string; + refs: string[]; +}): Promise<{ labels: number; skipped: number }> { + const refList = JSON.stringify(params.refs); + const result = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + args: params.refs, + fn: `(...elements) => { + const refs = ${refList}; + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + const root = document.createElement("div"); + root.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "labels"); + root.style.position = "fixed"; + root.style.inset = "0"; + root.style.pointerEvents = "none"; + root.style.zIndex = "2147483647"; + let labels = 0; + let skipped = 0; + elements.forEach((el, index) => { + if (!(el instanceof Element)) { + skipped += 1; + return; + } + const rect = el.getBoundingClientRect(); + if (rect.width <= 0 && rect.height <= 0) { + skipped += 1; + return; + } + labels += 1; + const badge = document.createElement("div"); + badge.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "label"); + badge.textContent = refs[index] || String(labels); + badge.style.position = "fixed"; + badge.style.left = \`\${Math.max(0, rect.left)}px\`; + badge.style.top = \`\${Math.max(0, rect.top)}px\`; + badge.style.transform = "translateY(-100%)"; + badge.style.padding = "2px 6px"; + badge.style.borderRadius = "999px"; + badge.style.background = "#FF4500"; + badge.style.color = "#fff"; + badge.style.font = "600 12px ui-monospace, SFMono-Regular, Menlo, monospace"; + badge.style.boxShadow = "0 2px 6px rgba(0,0,0,0.35)"; + badge.style.whiteSpace = "nowrap"; + root.appendChild(badge); + }); + document.documentElement.appendChild(root); + return { labels, skipped }; + }`, + }); + const labels = + result && + typeof result === "object" && + typeof (result as { labels?: unknown }).labels === "number" + ? (result as { labels: number }).labels + : 0; + const skipped = + result && + typeof result === "object" && + typeof (result as { skipped?: unknown }).skipped === "number" + ? (result as { skipped: number }).skipped + : 0; + return { labels, skipped }; +} + +async function saveNormalizedScreenshotResponse(params: { + res: BrowserResponse; + buffer: Buffer; + type: "png" | "jpeg"; + targetId: string; + url: string; +}) { + const normalized = await normalizeBrowserScreenshot(params.buffer, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await saveBrowserMediaResponse({ + res: params.res, + buffer: normalized.buffer, + contentType: normalized.contentType ?? `image/${params.type}`, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + targetId: params.targetId, + url: params.url, + }); +} + async function saveBrowserMediaResponse(params: { res: BrowserResponse; buffer: Buffer; @@ -56,7 +175,10 @@ export async function resolveTargetIdAfterNavigate(opts: { }): Promise { let currentTargetId = opts.oldTargetId; try { - const pickReplacement = (tabs: Array<{ targetId: string; url: string }>) => { + const pickReplacement = ( + tabs: Array<{ targetId: string; url: string }>, + options?: { allowSingleTabFallback?: boolean }, + ) => { if (tabs.some((tab) => tab.targetId === opts.oldTargetId)) { return opts.oldTargetId; } @@ -68,7 +190,7 @@ export async function resolveTargetIdAfterNavigate(opts: { if (uniqueReplacement.length === 1) { return uniqueReplacement[0]?.targetId ?? opts.oldTargetId; } - if (tabs.length === 1) { + if (options?.allowSingleTabFallback && tabs.length === 1) { return tabs[0]?.targetId ?? opts.oldTargetId; } return opts.oldTargetId; @@ -77,7 +199,9 @@ export async function resolveTargetIdAfterNavigate(opts: { currentTargetId = pickReplacement(await opts.listTabs()); if (currentTargetId === opts.oldTargetId) { await new Promise((r) => setTimeout(r, 800)); - currentTargetId = pickReplacement(await opts.listTabs()); + currentTargetId = pickReplacement(await opts.listTabs(), { + allowSingleTabFallback: true, + }); } } catch { // Best-effort: fall back to pre-navigation targetId @@ -96,13 +220,27 @@ export function registerBrowserAgentSnapshotRoutes( if (!url) { return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "navigate", - run: async ({ cdpUrl, tab, pw, profileCtx }) => { + run: async ({ profileCtx, tab, cdpUrl }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + const ssrfPolicyOpts = withBrowserNavigationPolicy(ctx.state().resolved.ssrfPolicy); + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const result = await navigateChromeMcpPage({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + url, + }); + await assertBrowserNavigationResultAllowed({ url: result.url, ...ssrfPolicyOpts }); + return res.json({ ok: true, targetId: tab.targetId, ...result }); + } + const pw = await requirePwAi(res, "navigate"); + if (!pw) { + return; + } const result = await pw.navigateViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -122,6 +260,17 @@ export function registerBrowserAgentSnapshotRoutes( app.post("/pdf", async (req, res) => { const body = readBody(req); const targetId = toStringOrEmpty(body.targetId) || undefined; + const profileCtx = resolveProfileContext(req, res, ctx); + if (!profileCtx) { + return; + } + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "pdf is not supported for existing-session profiles yet; use screenshot/snapshot instead.", + ); + } await withPlaywrightRouteContext({ req, res, @@ -163,6 +312,31 @@ export function registerBrowserAgentSnapshotRoutes( ctx, targetId, run: async ({ profileCtx, tab, cdpUrl }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (element) { + return jsonError( + res, + 400, + "element screenshots are not supported for existing-session profiles; use ref from snapshot.", + ); + } + const buffer = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid: ref, + fullPage, + format: type, + }); + await saveNormalizedScreenshotResponse({ + res, + buffer, + type, + targetId: tab.targetId, + url: tab.url, + }); + return; + } + let buffer: Buffer; const shouldUsePlaywright = shouldUsePlaywrightForScreenshot({ profile: profileCtx.profile, @@ -193,15 +367,10 @@ export function registerBrowserAgentSnapshotRoutes( }); } - const normalized = await normalizeBrowserScreenshot(buffer, { - maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, - maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, - }); - await saveBrowserMediaResponse({ + await saveNormalizedScreenshotResponse({ res, - buffer: normalized.buffer, - contentType: normalized.contentType ?? `image/${type}`, - maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + buffer, + type, targetId: tab.targetId, url: tab.url, }); @@ -227,6 +396,87 @@ export function registerBrowserAgentSnapshotRoutes( if ((plan.labels || plan.mode === "efficient") && plan.format === "aria") { return jsonError(res, 400, "labels/mode=efficient require format=ai"); } + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (plan.selectorValue || plan.frameSelectorValue) { + return jsonError( + res, + 400, + "selector/frame snapshots are not supported for existing-session profiles; snapshot the whole page and use refs.", + ); + } + const snapshot = await takeChromeMcpSnapshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + if (plan.format === "aria") { + return res.json({ + ok: true, + format: "aria", + targetId: tab.targetId, + url: tab.url, + nodes: flattenChromeMcpSnapshotToAriaNodes(snapshot, plan.limit), + }); + } + const built = buildAiSnapshotFromChromeMcpSnapshot({ + root: snapshot, + options: { + interactive: plan.interactive ?? undefined, + compact: plan.compact ?? undefined, + maxDepth: plan.depth ?? undefined, + }, + maxChars: plan.resolvedMaxChars, + }); + if (plan.labels) { + const refs = Object.keys(built.refs); + const labelResult = await renderChromeMcpLabels({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + refs, + }); + try { + const labeled = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + format: "png", + }); + const normalized = await normalizeBrowserScreenshot(labeled, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await ensureMediaDir(); + const saved = await saveMediaBuffer( + normalized.buffer, + normalized.contentType ?? "image/png", + "browser", + DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + ); + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + labels: true, + labelsCount: labelResult.labels, + labelsSkipped: labelResult.skipped, + imagePath: path.resolve(saved.path), + imageType: normalized.contentType?.includes("jpeg") ? "jpeg" : "png", + ...built, + }); + } finally { + await clearChromeMcpOverlay({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + } + } + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + ...built, + }); + } if (plan.format === "ai") { const pw = await requirePwAi(res, "ai snapshot"); if (!pw) { diff --git a/src/browser/routes/basic.existing-session.test.ts b/src/browser/routes/basic.existing-session.test.ts new file mode 100644 index 00000000000..34bcd9ee00b --- /dev/null +++ b/src/browser/routes/basic.existing-session.test.ts @@ -0,0 +1,94 @@ +import { describe, expect, it, vi } from "vitest"; +import { BrowserProfileUnavailableError } from "../errors.js"; +import { registerBrowserBasicRoutes } from "./basic.js"; +import { createBrowserRouteApp, createBrowserRouteResponse } from "./test-helpers.js"; + +vi.mock("../chrome-mcp.js", () => ({ + getChromeMcpPid: vi.fn(() => 4321), +})); + +describe("basic browser routes", () => { + it("maps existing-session status failures to JSON browser errors", async () => { + const { app, getHandlers } = createBrowserRouteApp(); + registerBrowserBasicRoutes(app, { + state: () => ({ + resolved: { + enabled: true, + headless: false, + noSandbox: false, + executablePath: undefined, + }, + profiles: new Map(), + }), + forProfile: () => + ({ + profile: { + name: "chrome-live", + driver: "existing-session", + cdpPort: 0, + cdpUrl: "", + color: "#00AA00", + attachOnly: true, + }, + isHttpReachable: async () => { + throw new BrowserProfileUnavailableError("attach failed"); + }, + isReachable: async () => true, + }) as never, + } as never); + + const handler = getHandlers.get("/"); + expect(handler).toBeTypeOf("function"); + + const response = createBrowserRouteResponse(); + await handler?.({ params: {}, query: { profile: "chrome-live" } }, response.res); + + expect(response.statusCode).toBe(409); + expect(response.body).toMatchObject({ error: "attach failed" }); + }); + + it("reports Chrome MCP transport without fake CDP fields", async () => { + const { app, getHandlers } = createBrowserRouteApp(); + registerBrowserBasicRoutes(app, { + state: () => ({ + resolved: { + enabled: true, + headless: false, + noSandbox: false, + executablePath: undefined, + }, + profiles: new Map(), + }), + forProfile: () => + ({ + profile: { + name: "chrome-live", + driver: "existing-session", + cdpPort: 0, + cdpUrl: "", + color: "#00AA00", + attachOnly: true, + }, + isHttpReachable: async () => true, + isReachable: async () => true, + }) as never, + } as never); + + const handler = getHandlers.get("/"); + expect(handler).toBeTypeOf("function"); + + const response = createBrowserRouteResponse(); + await handler?.({ params: {}, query: { profile: "chrome-live" } }, response.res); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ + profile: "chrome-live", + driver: "existing-session", + transport: "chrome-mcp", + running: true, + cdpPort: null, + cdpUrl: null, + pid: 4321, + }); + }); +}); diff --git a/src/browser/routes/basic.ts b/src/browser/routes/basic.ts index 5f32c86729b..f6123ac4cf0 100644 --- a/src/browser/routes/basic.ts +++ b/src/browser/routes/basic.ts @@ -1,11 +1,21 @@ +import { getChromeMcpPid } from "../chrome-mcp.js"; import { resolveBrowserExecutableForPlatform } from "../chrome.executables.js"; import { toBrowserErrorResponse } from "../errors.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import { createBrowserProfilesService } from "../profiles-service.js"; import type { BrowserRouteContext, ProfileContext } from "../server-context.js"; import { resolveProfileContext } from "./agent.shared.js"; import type { BrowserRequest, BrowserResponse, BrowserRouteRegistrar } from "./types.js"; import { getProfileContext, jsonError, toStringOrEmpty } from "./utils.js"; +function handleBrowserRouteError(res: BrowserResponse, err: unknown) { + const mapped = toBrowserErrorResponse(err); + if (mapped) { + return jsonError(res, mapped.status, mapped.message); + } + jsonError(res, 500, String(err)); +} + async function withBasicProfileRoute(params: { req: BrowserRequest; res: BrowserResponse; @@ -19,11 +29,21 @@ async function withBasicProfileRoute(params: { try { await params.run(profileCtx); } catch (err) { - const mapped = toBrowserErrorResponse(err); - if (mapped) { - return jsonError(params.res, mapped.status, mapped.message); - } - jsonError(params.res, 500, String(err)); + return handleBrowserRouteError(params.res, err); + } +} + +async function withProfilesServiceMutation(params: { + res: BrowserResponse; + ctx: BrowserRouteContext; + run: (service: ReturnType) => Promise; +}) { + try { + const service = createBrowserProfilesService(params.ctx); + const result = await params.run(service); + params.res.json(result); + } catch (err) { + return handleBrowserRouteError(params.res, err); } } @@ -53,46 +73,59 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow return jsonError(res, profileCtx.status, profileCtx.error); } - const [cdpHttp, cdpReady] = await Promise.all([ - profileCtx.isHttpReachable(300), - profileCtx.isReachable(600), - ]); - - const profileState = current.profiles.get(profileCtx.profile.name); - let detectedBrowser: string | null = null; - let detectedExecutablePath: string | null = null; - let detectError: string | null = null; - try { - const detected = resolveBrowserExecutableForPlatform(current.resolved, process.platform); - if (detected) { - detectedBrowser = detected.kind; - detectedExecutablePath = detected.path; - } - } catch (err) { - detectError = String(err); - } + const [cdpHttp, cdpReady] = await Promise.all([ + profileCtx.isHttpReachable(300), + profileCtx.isReachable(600), + ]); - res.json({ - enabled: current.resolved.enabled, - profile: profileCtx.profile.name, - running: cdpReady, - cdpReady, - cdpHttp, - pid: profileState?.running?.pid ?? null, - cdpPort: profileCtx.profile.cdpPort, - cdpUrl: profileCtx.profile.cdpUrl, - chosenBrowser: profileState?.running?.exe.kind ?? null, - detectedBrowser, - detectedExecutablePath, - detectError, - userDataDir: profileState?.running?.userDataDir ?? null, - color: profileCtx.profile.color, - headless: current.resolved.headless, - noSandbox: current.resolved.noSandbox, - executablePath: current.resolved.executablePath ?? null, - attachOnly: profileCtx.profile.attachOnly, - }); + const profileState = current.profiles.get(profileCtx.profile.name); + const capabilities = getBrowserProfileCapabilities(profileCtx.profile); + let detectedBrowser: string | null = null; + let detectedExecutablePath: string | null = null; + let detectError: string | null = null; + + try { + const detected = resolveBrowserExecutableForPlatform(current.resolved, process.platform); + if (detected) { + detectedBrowser = detected.kind; + detectedExecutablePath = detected.path; + } + } catch (err) { + detectError = String(err); + } + + res.json({ + enabled: current.resolved.enabled, + profile: profileCtx.profile.name, + driver: profileCtx.profile.driver, + transport: capabilities.usesChromeMcp ? "chrome-mcp" : "cdp", + running: cdpReady, + cdpReady, + cdpHttp, + pid: capabilities.usesChromeMcp + ? getChromeMcpPid(profileCtx.profile.name) + : (profileState?.running?.pid ?? null), + cdpPort: capabilities.usesChromeMcp ? null : profileCtx.profile.cdpPort, + cdpUrl: capabilities.usesChromeMcp ? null : profileCtx.profile.cdpUrl, + chosenBrowser: profileState?.running?.exe.kind ?? null, + detectedBrowser, + detectedExecutablePath, + detectError, + userDataDir: profileState?.running?.userDataDir ?? null, + color: profileCtx.profile.color, + headless: current.resolved.headless, + noSandbox: current.resolved.noSandbox, + executablePath: current.resolved.executablePath ?? null, + attachOnly: profileCtx.profile.attachOnly, + }); + } catch (err) { + const mapped = toBrowserErrorResponse(err); + if (mapped) { + return jsonError(res, mapped.status, mapped.message); + } + jsonError(res, 500, String(err)); + } }); // Start browser (profile-aware) @@ -146,28 +179,29 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow const driver = toStringOrEmpty((req.body as { driver?: unknown })?.driver) as | "openclaw" | "extension" + | "existing-session" | ""; if (!name) { return jsonError(res, 400, "name is required"); } - try { - const service = createBrowserProfilesService(ctx); - const result = await service.createProfile({ - name, - color: color || undefined, - cdpUrl: cdpUrl || undefined, - driver: driver === "extension" ? "extension" : undefined, - }); - res.json(result); - } catch (err) { - const mapped = toBrowserErrorResponse(err); - if (mapped) { - return jsonError(res, mapped.status, mapped.message); - } - jsonError(res, 500, String(err)); - } + await withProfilesServiceMutation({ + res, + ctx, + run: async (service) => + await service.createProfile({ + name, + color: color || undefined, + cdpUrl: cdpUrl || undefined, + driver: + driver === "extension" + ? "extension" + : driver === "existing-session" + ? "existing-session" + : undefined, + }), + }); }); // Delete a profile @@ -177,16 +211,10 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow return jsonError(res, 400, "profile name is required"); } - try { - const service = createBrowserProfilesService(ctx); - const result = await service.deleteProfile(name); - res.json(result); - } catch (err) { - const mapped = toBrowserErrorResponse(err); - if (mapped) { - return jsonError(res, mapped.status, mapped.message); - } - jsonError(res, 500, String(err)); - } + await withProfilesServiceMutation({ + res, + ctx, + run: async (service) => await service.deleteProfile(name), + }); }); } diff --git a/src/browser/routes/test-helpers.ts b/src/browser/routes/test-helpers.ts new file mode 100644 index 00000000000..e6b046a9878 --- /dev/null +++ b/src/browser/routes/test-helpers.ts @@ -0,0 +1,36 @@ +import type { BrowserResponse, BrowserRouteHandler, BrowserRouteRegistrar } from "./types.js"; + +export function createBrowserRouteApp() { + const getHandlers = new Map(); + const postHandlers = new Map(); + const deleteHandlers = new Map(); + const app: BrowserRouteRegistrar = { + get: (path, handler) => void getHandlers.set(path, handler), + post: (path, handler) => void postHandlers.set(path, handler), + delete: (path, handler) => void deleteHandlers.set(path, handler), + }; + return { app, getHandlers, postHandlers, deleteHandlers }; +} + +export function createBrowserRouteResponse() { + let statusCode = 200; + let jsonBody: unknown; + const res: BrowserResponse = { + status(code) { + statusCode = code; + return res; + }, + json(body) { + jsonBody = body; + }, + }; + return { + res, + get statusCode() { + return statusCode; + }, + get body() { + return jsonBody; + }, + }; +} diff --git a/src/browser/server-context.availability.ts b/src/browser/server-context.availability.ts index 3b00ff99dff..3b991bbbdfe 100644 --- a/src/browser/server-context.availability.ts +++ b/src/browser/server-context.availability.ts @@ -3,6 +3,11 @@ import { PROFILE_POST_RESTART_WS_TIMEOUT_MS, resolveCdpReachabilityTimeouts, } from "./cdp-timeouts.js"; +import { + closeChromeMcpSession, + ensureChromeMcpAvailable, + listChromeMcpTabs, +} from "./chrome-mcp.js"; import { isChromeCdpReady, isChromeReachable, @@ -60,11 +65,19 @@ export function createProfileAvailability({ }); const isReachable = async (timeoutMs?: number) => { + if (capabilities.usesChromeMcp) { + // listChromeMcpTabs creates the session if needed — no separate ensureChromeMcpAvailable call required + await listChromeMcpTabs(profile.name); + return true; + } const { httpTimeoutMs, wsTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeCdpReady(profile.cdpUrl, httpTimeoutMs, wsTimeoutMs); }; const isHttpReachable = async (timeoutMs?: number) => { + if (capabilities.usesChromeMcp) { + return await isReachable(timeoutMs); + } const { httpTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeReachable(profile.cdpUrl, httpTimeoutMs); }; @@ -109,6 +122,9 @@ export function createProfileAvailability({ if (previousProfile.driver === "extension") { await stopChromeExtensionRelayServer({ cdpUrl: previousProfile.cdpUrl }).catch(() => false); } + if (getBrowserProfileCapabilities(previousProfile).usesChromeMcp) { + await closeChromeMcpSession(previousProfile.name).catch(() => false); + } await closePlaywrightBrowserConnectionForProfile(previousProfile.cdpUrl); if (previousProfile.cdpUrl !== profile.cdpUrl) { await closePlaywrightBrowserConnectionForProfile(profile.cdpUrl); @@ -138,6 +154,10 @@ export function createProfileAvailability({ const ensureBrowserAvailable = async (): Promise => { await reconcileProfileRuntime(); + if (capabilities.usesChromeMcp) { + await ensureChromeMcpAvailable(profile.name); + return; + } const current = state(); const remoteCdp = capabilities.isRemote; const attachOnly = profile.attachOnly; @@ -238,6 +258,10 @@ export function createProfileAvailability({ const stopRunningBrowser = async (): Promise<{ stopped: boolean }> => { await reconcileProfileRuntime(); + if (capabilities.usesChromeMcp) { + const stopped = await closeChromeMcpSession(profile.name); + return { stopped }; + } if (capabilities.requiresRelay) { const stopped = await stopChromeExtensionRelayServer({ cdpUrl: profile.cdpUrl, diff --git a/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts b/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts index 13c5f82e31d..ceaafc46d41 100644 --- a/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts +++ b/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts @@ -25,9 +25,9 @@ function makeBrowserState(): BrowserServerState { headless: true, noSandbox: false, attachOnly: false, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", profiles: { - chrome: { + "chrome-relay": { driver: "extension", cdpUrl: "http://127.0.0.1:18792", cdpPort: 18792, diff --git a/src/browser/server-context.existing-session.test.ts b/src/browser/server-context.existing-session.test.ts new file mode 100644 index 00000000000..abbd222342e --- /dev/null +++ b/src/browser/server-context.existing-session.test.ts @@ -0,0 +1,102 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { createBrowserRouteContext } from "./server-context.js"; +import type { BrowserServerState } from "./server-context.js"; + +vi.mock("./chrome-mcp.js", () => ({ + closeChromeMcpSession: vi.fn(async () => true), + ensureChromeMcpAvailable: vi.fn(async () => {}), + focusChromeMcpTab: vi.fn(async () => {}), + listChromeMcpTabs: vi.fn(async () => [ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]), + openChromeMcpTab: vi.fn(async () => ({ + targetId: "8", + title: "", + url: "https://openclaw.ai", + type: "page", + })), + closeChromeMcpTab: vi.fn(async () => {}), + getChromeMcpPid: vi.fn(() => 4321), +})); + +import * as chromeMcp from "./chrome-mcp.js"; + +function makeState(): BrowserServerState { + return { + server: null, + port: 0, + resolved: { + enabled: true, + evaluateEnabled: true, + controlPort: 18791, + cdpPortRangeStart: 18800, + cdpPortRangeEnd: 18899, + cdpProtocol: "http", + cdpHost: "127.0.0.1", + cdpIsLoopback: true, + remoteCdpTimeoutMs: 1500, + remoteCdpHandshakeTimeoutMs: 3000, + color: "#FF4500", + headless: false, + noSandbox: false, + attachOnly: false, + defaultProfile: "chrome-live", + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + extraArgs: [], + ssrfPolicy: { dangerouslyAllowPrivateNetwork: true }, + }, + profiles: new Map(), + }; +} + +afterEach(() => { + vi.clearAllMocks(); +}); + +describe("browser server-context existing-session profile", () => { + it("routes tab operations through the Chrome MCP backend", async () => { + const state = makeState(); + const ctx = createBrowserRouteContext({ getState: () => state }); + const live = ctx.forProfile("chrome-live"); + + vi.mocked(chromeMcp.listChromeMcpTabs) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]); + + await live.ensureBrowserAvailable(); + const tabs = await live.listTabs(); + expect(tabs.map((tab) => tab.targetId)).toEqual(["7"]); + + const opened = await live.openTab("https://openclaw.ai"); + expect(opened.targetId).toBe("8"); + + const selected = await live.ensureTabAvailable(); + expect(selected.targetId).toBe("8"); + + await live.focusTab("7"); + await live.stopRunningBrowser(); + + expect(chromeMcp.ensureChromeMcpAvailable).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.listChromeMcpTabs).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.openChromeMcpTab).toHaveBeenCalledWith("chrome-live", "https://openclaw.ai"); + expect(chromeMcp.focusChromeMcpTab).toHaveBeenCalledWith("chrome-live", "7"); + expect(chromeMcp.closeChromeMcpSession).toHaveBeenCalledWith("chrome-live"); + }); +}); diff --git a/src/browser/server-context.hot-reload-profiles.test.ts b/src/browser/server-context.hot-reload-profiles.test.ts index ec0c7e072aa..f9eb2452ce2 100644 --- a/src/browser/server-context.hot-reload-profiles.test.ts +++ b/src/browser/server-context.hot-reload-profiles.test.ts @@ -30,6 +30,7 @@ vi.mock("../config/config.js", () => ({ return buildConfig(); }, }), + getRuntimeConfigSnapshot: () => null, loadConfig: () => { // simulate stale loadConfig that doesn't see updates unless cache cleared if (!cachedConfig) { diff --git a/src/browser/server-context.selection.ts b/src/browser/server-context.selection.ts index 8a9cfa19c42..f0ce3e25e06 100644 --- a/src/browser/server-context.selection.ts +++ b/src/browser/server-context.selection.ts @@ -1,5 +1,6 @@ import { fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath } from "./cdp.js"; +import { closeChromeMcpTab, focusChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { BrowserTabNotFoundError, BrowserTargetAmbiguousError } from "./errors.js"; import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; @@ -111,6 +112,13 @@ export function createProfileSelectionOps({ const focusTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (capabilities.usesChromeMcp) { + await focusChromeMcpTab(profile.name, resolvedTargetId); + const profileState = getProfileState(); + profileState.lastTargetId = resolvedTargetId; + return; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const focusPageByTargetIdViaPlaywright = (mod as Partial | null) @@ -134,6 +142,11 @@ export function createProfileSelectionOps({ const closeTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (capabilities.usesChromeMcp) { + await closeChromeMcpTab(profile.name, resolvedTargetId); + return; + } + // For remote profiles, use Playwright's persistent connection to close tabs if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); diff --git a/src/browser/server-context.tab-ops.ts b/src/browser/server-context.tab-ops.ts index 24985430bdc..66a134564c6 100644 --- a/src/browser/server-context.tab-ops.ts +++ b/src/browser/server-context.tab-ops.ts @@ -1,6 +1,7 @@ import { CDP_JSON_NEW_TIMEOUT_MS } from "./cdp-timeouts.js"; import { fetchJson, fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath, createTargetViaCdp, normalizeCdpWsUrl } from "./cdp.js"; +import { listChromeMcpTabs, openChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { assertBrowserNavigationAllowed, @@ -65,6 +66,10 @@ export function createProfileTabOps({ const capabilities = getBrowserProfileCapabilities(profile); const listTabs = async (): Promise => { + if (capabilities.usesChromeMcp) { + return await listChromeMcpTabs(profile.name); + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const listPagesViaPlaywright = (mod as Partial | null)?.listPagesViaPlaywright; @@ -134,6 +139,15 @@ export function createProfileTabOps({ const openTab = async (url: string): Promise => { const ssrfPolicyOpts = withBrowserNavigationPolicy(state().resolved.ssrfPolicy); + if (capabilities.usesChromeMcp) { + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const page = await openChromeMcpTab(profile.name, url); + const profileState = getProfileState(); + profileState.lastTargetId = page.targetId; + await assertBrowserNavigationResultAllowed({ url: page.url, ...ssrfPolicyOpts }); + return page; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const createPageViaPlaywright = (mod as Partial | null)?.createPageViaPlaywright; diff --git a/src/browser/server-context.ts b/src/browser/server-context.ts index d75b14c2471..0ba29ad38cf 100644 --- a/src/browser/server-context.ts +++ b/src/browser/server-context.ts @@ -4,6 +4,7 @@ import type { ResolvedBrowserProfile } from "./config.js"; import { resolveProfile } from "./config.js"; import { BrowserProfileNotFoundError, toBrowserErrorResponse } from "./errors.js"; import { InvalidBrowserNavigationUrlError } from "./navigation-guard.js"; +import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; import { refreshResolvedBrowserConfigFromDisk, resolveBrowserProfileWithHotReload, @@ -159,15 +160,26 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon if (!profile) { continue; } + const capabilities = getBrowserProfileCapabilities(profile); let tabCount = 0; let running = false; + const profileCtx = createProfileContext(opts, profile); - if (profileState?.running) { + if (capabilities.usesChromeMcp) { + try { + running = await profileCtx.isReachable(300); + if (running) { + const tabs = await profileCtx.listTabs(); + tabCount = tabs.filter((t) => t.type === "page").length; + } + } catch { + // Chrome MCP not available + } + } else if (profileState?.running) { running = true; try { - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs(); + const tabs = await profileCtx.listTabs(); tabCount = tabs.filter((t) => t.type === "page").length; } catch { // Browser might not be responsive @@ -178,8 +190,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon const reachable = await isChromeReachable(profile.cdpUrl, 200); if (reachable) { running = true; - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs().catch(() => []); + const tabs = await profileCtx.listTabs().catch(() => []); tabCount = tabs.filter((t) => t.type === "page").length; } } catch { @@ -189,9 +200,11 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon result.push({ name, - cdpPort: profile.cdpPort, - cdpUrl: profile.cdpUrl, + transport: capabilities.usesChromeMcp ? "chrome-mcp" : "cdp", + cdpPort: capabilities.usesChromeMcp ? null : profile.cdpPort, + cdpUrl: capabilities.usesChromeMcp ? null : profile.cdpUrl, color: profile.color, + driver: profile.driver, running, tabCount, isDefault: name === current.resolved.defaultProfile, diff --git a/src/browser/server-context.types.ts b/src/browser/server-context.types.ts index f05e90e9e77..b8ad7aa329d 100644 --- a/src/browser/server-context.types.ts +++ b/src/browser/server-context.types.ts @@ -1,5 +1,6 @@ import type { Server } from "node:http"; import type { RunningChrome } from "./chrome.js"; +import type { BrowserTransport } from "./client.js"; import type { BrowserTab } from "./client.js"; import type { ResolvedBrowserConfig, ResolvedBrowserProfile } from "./config.js"; @@ -53,9 +54,11 @@ export type ProfileContext = { export type ProfileStatus = { name: string; - cdpPort: number; - cdpUrl: string; + transport: BrowserTransport; + cdpPort: number | null; + cdpUrl: string | null; color: string; + driver: ResolvedBrowserProfile["driver"]; running: boolean; tabCount: number; isDefault: boolean; diff --git a/src/browser/server-lifecycle.test.ts b/src/browser/server-lifecycle.test.ts index e2395f99f04..5ef331f1784 100644 --- a/src/browser/server-lifecycle.test.ts +++ b/src/browser/server-lifecycle.test.ts @@ -43,7 +43,7 @@ describe("ensureExtensionRelayForProfiles", () => { it("starts relay only for extension profiles", async () => { resolveProfileMock.mockImplementation((_resolved: unknown, name: string) => { - if (name === "chrome") { + if (name === "chrome-relay") { return { driver: "extension", cdpUrl: "http://127.0.0.1:18888" }; } return { driver: "openclaw", cdpUrl: "http://127.0.0.1:18889" }; @@ -53,7 +53,7 @@ describe("ensureExtensionRelayForProfiles", () => { await ensureExtensionRelayForProfiles({ resolved: { profiles: { - chrome: {}, + "chrome-relay": {}, openclaw: {}, }, } as never, @@ -72,12 +72,12 @@ describe("ensureExtensionRelayForProfiles", () => { const onWarn = vi.fn(); await ensureExtensionRelayForProfiles({ - resolved: { profiles: { chrome: {} } } as never, + resolved: { profiles: { "chrome-relay": {} } } as never, onWarn, }); expect(onWarn).toHaveBeenCalledWith( - 'Chrome extension relay init failed for profile "chrome": Error: boom', + 'Chrome extension relay init failed for profile "chrome-relay": Error: boom', ); }); }); @@ -91,10 +91,10 @@ describe("stopKnownBrowserProfiles", () => { }); it("stops all known profiles and ignores per-profile failures", async () => { - listKnownProfileNamesMock.mockReturnValue(["openclaw", "chrome"]); + listKnownProfileNamesMock.mockReturnValue(["openclaw", "chrome-relay"]); const stopMap: Record> = { openclaw: vi.fn(async () => {}), - chrome: vi.fn(async () => { + "chrome-relay": vi.fn(async () => { throw new Error("profile stop failed"); }), }; @@ -112,7 +112,7 @@ describe("stopKnownBrowserProfiles", () => { }); expect(stopMap.openclaw).toHaveBeenCalledTimes(1); - expect(stopMap.chrome).toHaveBeenCalledTimes(1); + expect(stopMap["chrome-relay"]).toHaveBeenCalledTimes(1); expect(onWarn).not.toHaveBeenCalled(); }); diff --git a/src/browser/server.agent-contract-form-layout-act-commands.test.ts b/src/browser/server.agent-contract-form-layout-act-commands.test.ts index 738bf8b7e2d..c8b76c4b886 100644 --- a/src/browser/server.agent-contract-form-layout-act-commands.test.ts +++ b/src/browser/server.agent-contract-form-layout-act-commands.test.ts @@ -51,12 +51,14 @@ describe("browser control server", () => { values: ["a", "b"], }); expect(select.ok).toBe(true); - expect(pwMocks.selectOptionViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - ref: "5", - values: ["a", "b"], - }); + expect(pwMocks.selectOptionViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + ref: "5", + values: ["a", "b"], + }), + ); const fillCases: Array<{ input: Record; @@ -81,11 +83,13 @@ describe("browser control server", () => { fields: [input], }); expect(fill.ok).toBe(true); - expect(pwMocks.fillFormViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - fields: [expected], - }); + expect(pwMocks.fillFormViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + fields: [expected], + }), + ); } const resize = await postJson<{ ok: boolean }>(`${base}/act`, { @@ -94,12 +98,14 @@ describe("browser control server", () => { height: 600, }); expect(resize.ok).toBe(true); - expect(pwMocks.resizeViewportViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - width: 800, - height: 600, - }); + expect(pwMocks.resizeViewportViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + width: 800, + height: 600, + }), + ); const wait = await postJson<{ ok: boolean }>(`${base}/act`, { kind: "wait", @@ -150,13 +156,152 @@ describe("browser control server", () => { kind: "evaluate", fn: "() => 1", }); - expect(res.error).toContain("browser.evaluateEnabled=false"); expect(pwMocks.evaluateViaPlaywright).not.toHaveBeenCalled(); }, slowTimeoutMs, ); + it( + "normalizes batch actions and threads evaluateEnabled into the batch executor", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ ok: boolean; results?: Array<{ ok: boolean }> }>( + `${base}/act`, + { + kind: "batch", + stopOnError: "false", + actions: [ + { kind: "click", selector: "button.save", doubleClick: "true", delayMs: "25" }, + { kind: "wait", fn: " () => window.ready === true " }, + ], + }, + ); + + expect(batchRes.ok).toBe(true); + expect(pwMocks.batchViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + stopOnError: false, + evaluateEnabled: true, + actions: [ + { + kind: "click", + selector: "button.save", + doubleClick: true, + delayMs: 25, + }, + { + kind: "wait", + fn: "() => window.ready === true", + }, + ], + }), + ); + }, + slowTimeoutMs, + ); + + it( + "preserves exact type text in batch normalization", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ ok: boolean }>(`${base}/act`, { + kind: "batch", + actions: [ + { kind: "type", selector: "input.name", text: " padded " }, + { kind: "type", selector: "input.clearable", text: "" }, + ], + }); + + expect(batchRes.ok).toBe(true); + expect(pwMocks.batchViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + actions: [ + { + kind: "type", + selector: "input.name", + text: " padded ", + }, + { + kind: "type", + selector: "input.clearable", + text: "", + }, + ], + }), + ); + }, + slowTimeoutMs, + ); + + it( + "rejects malformed batch actions before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: [{ kind: "click", ref: {} }], + }); + + expect(batchRes.error).toContain("click requires ref or selector"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + + it( + "rejects batched action targetId overrides before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: [{ kind: "click", ref: "5", targetId: "other-tab" }], + }); + + expect(batchRes.error).toContain("batched action targetId must match request targetId"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + + it( + "rejects oversized batch delays before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: [{ kind: "click", selector: "button.save", delayMs: 5001 }], + }); + + expect(batchRes.error).toContain("click delayMs exceeds maximum of 5000ms"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + + it( + "rejects oversized top-level batches before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: Array.from({ length: 101 }, () => ({ kind: "press", key: "Enter" })), + }); + + expect(batchRes.error).toContain("batch exceeds maximum of 100 actions"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + it("agent contract: hooks + response + downloads + screenshot", async () => { const base = await startServerAndBase(); @@ -165,13 +310,15 @@ describe("browser control server", () => { timeoutMs: 1234, }); expect(upload).toMatchObject({ ok: true }); - expect(pwMocks.armFileUploadViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - // The server resolves paths (which adds a drive letter on Windows for `\\tmp\\...` style roots). - paths: [path.resolve(DEFAULT_UPLOAD_DIR, "a.txt")], - timeoutMs: 1234, - }); + expect(pwMocks.armFileUploadViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + // The server resolves paths (which adds a drive letter on Windows for `\\tmp\\...` style roots). + paths: [path.resolve(DEFAULT_UPLOAD_DIR, "a.txt")], + timeoutMs: 1234, + }), + ); const uploadWithRef = await postJson(`${base}/hooks/file-chooser`, { paths: ["b.txt"], @@ -280,7 +427,7 @@ describe("browser control server", () => { expect(res.path).toContain("safe-trace.zip"); expect(pwMocks.traceStopViaPlaywright).toHaveBeenCalledWith( expect.objectContaining({ - cdpUrl: state.cdpBaseUrl, + cdpUrl: expect.any(String), targetId: "abcd1234", path: expect.stringContaining("safe-trace.zip"), }), @@ -369,7 +516,7 @@ describe("browser control server", () => { expect(res.ok).toBe(true); expect(pwMocks.waitForDownloadViaPlaywright).toHaveBeenCalledWith( expect.objectContaining({ - cdpUrl: state.cdpBaseUrl, + cdpUrl: expect.any(String), targetId: "abcd1234", path: expect.stringContaining("safe-wait.pdf"), }), @@ -385,7 +532,7 @@ describe("browser control server", () => { expect(res.ok).toBe(true); expect(pwMocks.downloadViaPlaywright).toHaveBeenCalledWith( expect.objectContaining({ - cdpUrl: state.cdpBaseUrl, + cdpUrl: expect.any(String), targetId: "abcd1234", ref: "e12", path: expect.stringContaining("safe-download.pdf"), diff --git a/src/browser/server.agent-contract-snapshot-endpoints.test.ts b/src/browser/server.agent-contract-snapshot-endpoints.test.ts index 7e300fe5aee..837a122becd 100644 --- a/src/browser/server.agent-contract-snapshot-endpoints.test.ts +++ b/src/browser/server.agent-contract-snapshot-endpoints.test.ts @@ -96,10 +96,14 @@ describe("browser control server", () => { headers: { "Content-Type": "application/json" }, body: JSON.stringify({ kind: "click", selector: "button.save" }), }); - expect(clickSelector.status).toBe(400); - expect(((await clickSelector.json()) as { error?: string }).error).toMatch( - /'selector' is not supported/i, - ); + expect(clickSelector.status).toBe(200); + expect(((await clickSelector.json()) as { ok?: boolean }).ok).toBe(true); + expect(pwMocks.clickViaPlaywright).toHaveBeenNthCalledWith(2, { + cdpUrl: state.cdpBaseUrl, + targetId: "abcd1234", + selector: "button.save", + doubleClick: false, + }); const type = await postJson<{ ok: boolean }>(`${base}/act`, { kind: "type", diff --git a/src/browser/server.control-server.test-harness.ts b/src/browser/server.control-server.test-harness.ts index 5721d9eb17b..118c83dbb73 100644 --- a/src/browser/server.control-server.test-harness.ts +++ b/src/browser/server.control-server.test-harness.ts @@ -11,6 +11,17 @@ type HarnessState = { reachable: boolean; cfgAttachOnly: boolean; cfgEvaluateEnabled: boolean; + cfgDefaultProfile: string; + cfgProfiles: Record< + string, + { + cdpPort?: number; + cdpUrl?: string; + color: string; + driver?: "openclaw" | "extension" | "existing-session"; + attachOnly?: boolean; + } + >; createTargetId: string | null; prevGatewayPort: string | undefined; prevGatewayToken: string | undefined; @@ -23,6 +34,8 @@ const state: HarnessState = { reachable: false, cfgAttachOnly: false, cfgEvaluateEnabled: true, + cfgDefaultProfile: "openclaw", + cfgProfiles: {}, createTargetId: null, prevGatewayPort: undefined, prevGatewayToken: undefined, @@ -61,6 +74,14 @@ export function setBrowserControlServerReachable(reachable: boolean): void { state.reachable = reachable; } +export function setBrowserControlServerProfiles( + profiles: HarnessState["cfgProfiles"], + defaultProfile = Object.keys(profiles)[0] ?? "openclaw", +): void { + state.cfgProfiles = profiles; + state.cfgDefaultProfile = defaultProfile; +} + const cdpMocks = vi.hoisted(() => ({ createTargetViaCdp: vi.fn<() => Promise<{ targetId: string }>>(async () => { throw new Error("cdp disabled"); @@ -77,6 +98,7 @@ export function getCdpMocks(): { createTargetViaCdp: MockFn; snapshotAria: MockF const pwMocks = vi.hoisted(() => ({ armDialogViaPlaywright: vi.fn(async () => {}), armFileUploadViaPlaywright: vi.fn(async () => {}), + batchViaPlaywright: vi.fn(async () => ({ results: [] })), clickViaPlaywright: vi.fn(async () => {}), closePageViaPlaywright: vi.fn(async () => {}), closePlaywrightBrowserConnection: vi.fn(async () => {}), @@ -121,6 +143,44 @@ export function getPwMocks(): Record { return pwMocks as unknown as Record; } +const chromeMcpMocks = vi.hoisted(() => ({ + clickChromeMcpElement: vi.fn(async () => {}), + closeChromeMcpSession: vi.fn(async () => true), + closeChromeMcpTab: vi.fn(async () => {}), + dragChromeMcpElement: vi.fn(async () => {}), + ensureChromeMcpAvailable: vi.fn(async () => {}), + evaluateChromeMcpScript: vi.fn(async () => true), + fillChromeMcpElement: vi.fn(async () => {}), + fillChromeMcpForm: vi.fn(async () => {}), + focusChromeMcpTab: vi.fn(async () => {}), + getChromeMcpPid: vi.fn(() => 4321), + hoverChromeMcpElement: vi.fn(async () => {}), + listChromeMcpTabs: vi.fn(async () => [ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]), + navigateChromeMcpPage: vi.fn(async ({ url }: { url: string }) => ({ url })), + openChromeMcpTab: vi.fn(async (_profile: string, url: string) => ({ + targetId: "8", + title: "", + url, + type: "page", + })), + pressChromeMcpKey: vi.fn(async () => {}), + resizeChromeMcpPage: vi.fn(async () => {}), + takeChromeMcpScreenshot: vi.fn(async () => Buffer.from("png")), + takeChromeMcpSnapshot: vi.fn(async () => ({ + id: "root", + role: "document", + name: "Example", + children: [{ id: "btn-1", role: "button", name: "Continue" }], + })), + uploadChromeMcpFile: vi.fn(async () => {}), +})); + +export function getChromeMcpMocks(): Record { + return chromeMcpMocks as unknown as Record; +} + const chromeUserDataDir = vi.hoisted(() => ({ dir: "/tmp/openclaw" })); installChromeUserDataDirHooks(chromeUserDataDir); @@ -147,24 +207,40 @@ function makeProc(pid = 123) { const proc = makeProc(); +function defaultProfilesForState(testPort: number): HarnessState["cfgProfiles"] { + return { + openclaw: { cdpPort: testPort + 9, color: "#FF4500" }, + }; +} + vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); - return { - ...actual, - loadConfig: () => ({ + const loadConfig = () => { + return { browser: { enabled: true, evaluateEnabled: state.cfgEvaluateEnabled, color: "#FF4500", attachOnly: state.cfgAttachOnly, headless: true, - defaultProfile: "openclaw", - profiles: { - openclaw: { cdpPort: state.testPort + 1, color: "#FF4500" }, - }, + defaultProfile: state.cfgDefaultProfile, + profiles: + Object.keys(state.cfgProfiles).length > 0 + ? state.cfgProfiles + : defaultProfilesForState(state.testPort), }, - }), - writeConfigFile: vi.fn(async () => {}), + }; + }; + const writeConfigFile = vi.fn(async () => {}); + return { + ...actual, + createConfigIO: vi.fn(() => ({ + loadConfig, + writeConfigFile, + })), + getRuntimeConfigSnapshot: vi.fn(() => null), + loadConfig, + writeConfigFile, }; }); @@ -209,8 +285,12 @@ vi.mock("./cdp.js", () => ({ vi.mock("./pw-ai.js", () => pwMocks); +vi.mock("./chrome-mcp.js", () => chromeMcpMocks); + vi.mock("../media/store.js", () => ({ + MEDIA_MAX_BYTES: 5 * 1024 * 1024, ensureMediaDir: vi.fn(async () => {}), + getMediaDir: vi.fn(() => "/tmp"), saveMediaBuffer: vi.fn(async () => ({ path: "/tmp/fake.png" })), })); @@ -251,13 +331,18 @@ function mockClearAll(obj: Record unknown }>) { export async function resetBrowserControlServerTestContext(): Promise { state.reachable = false; state.cfgAttachOnly = false; + state.cfgEvaluateEnabled = true; + state.cfgDefaultProfile = "openclaw"; + state.cfgProfiles = defaultProfilesForState(state.testPort); state.createTargetId = null; mockClearAll(pwMocks); mockClearAll(cdpMocks); + mockClearAll(chromeMcpMocks); state.testPort = await getFreePort(); - state.cdpBaseUrl = `http://127.0.0.1:${state.testPort + 1}`; + state.cdpBaseUrl = `http://127.0.0.1:${state.testPort + 9}`; + state.cfgProfiles = defaultProfilesForState(state.testPort); state.prevGatewayPort = process.env.OPENCLAW_GATEWAY_PORT; process.env.OPENCLAW_GATEWAY_PORT = String(state.testPort - 2); // Avoid flaky auth coupling: some suites temporarily set gateway env auth diff --git a/src/browser/snapshot-roles.ts b/src/browser/snapshot-roles.ts new file mode 100644 index 00000000000..8e5d873e557 --- /dev/null +++ b/src/browser/snapshot-roles.ts @@ -0,0 +1,63 @@ +/** + * Shared ARIA role classification sets used by both the Playwright and Chrome MCP + * snapshot paths. Keep these in sync — divergence causes the two drivers to produce + * different snapshot output for the same page. + */ + +/** Roles that represent user-interactive elements and always get a ref. */ +export const INTERACTIVE_ROLES = new Set([ + "button", + "checkbox", + "combobox", + "link", + "listbox", + "menuitem", + "menuitemcheckbox", + "menuitemradio", + "option", + "radio", + "searchbox", + "slider", + "spinbutton", + "switch", + "tab", + "textbox", + "treeitem", +]); + +/** Roles that carry meaningful content and get a ref when named. */ +export const CONTENT_ROLES = new Set([ + "article", + "cell", + "columnheader", + "gridcell", + "heading", + "listitem", + "main", + "navigation", + "region", + "rowheader", +]); + +/** Structural/container roles — typically skipped in compact mode. */ +export const STRUCTURAL_ROLES = new Set([ + "application", + "directory", + "document", + "generic", + "grid", + "group", + "ignored", + "list", + "menu", + "menubar", + "none", + "presentation", + "row", + "rowgroup", + "table", + "tablist", + "toolbar", + "tree", + "treegrid", +]); diff --git a/src/browser/url-pattern.test.ts b/src/browser/url-pattern.test.ts new file mode 100644 index 00000000000..1cfdc06c36f --- /dev/null +++ b/src/browser/url-pattern.test.ts @@ -0,0 +1,26 @@ +import { describe, expect, it } from "vitest"; +import { matchBrowserUrlPattern } from "./url-pattern.js"; + +describe("browser url pattern matching", () => { + it("matches exact URLs", () => { + expect(matchBrowserUrlPattern("https://example.com/a", "https://example.com/a")).toBe(true); + expect(matchBrowserUrlPattern("https://example.com/a", "https://example.com/b")).toBe(false); + }); + + it("matches substring patterns without wildcards", () => { + expect(matchBrowserUrlPattern("example.com", "https://example.com/a")).toBe(true); + expect(matchBrowserUrlPattern("/dash", "https://example.com/app/dash")).toBe(true); + expect(matchBrowserUrlPattern("nope", "https://example.com/a")).toBe(false); + }); + + it("matches glob patterns", () => { + expect(matchBrowserUrlPattern("**/dash", "https://example.com/app/dash")).toBe(true); + expect(matchBrowserUrlPattern("https://example.com/*", "https://example.com/a")).toBe(true); + expect(matchBrowserUrlPattern("https://example.com/*", "https://other.com/a")).toBe(false); + }); + + it("rejects empty patterns", () => { + expect(matchBrowserUrlPattern("", "https://example.com")).toBe(false); + expect(matchBrowserUrlPattern(" ", "https://example.com")).toBe(false); + }); +}); diff --git a/src/browser/url-pattern.ts b/src/browser/url-pattern.ts new file mode 100644 index 00000000000..2ff99657d26 --- /dev/null +++ b/src/browser/url-pattern.ts @@ -0,0 +1,15 @@ +export function matchBrowserUrlPattern(pattern: string, url: string): boolean { + const trimmedPattern = pattern.trim(); + if (!trimmedPattern) { + return false; + } + if (trimmedPattern === url) { + return true; + } + if (trimmedPattern.includes("*")) { + const escaped = trimmedPattern.replace(/[|\\{}()[\]^$+?.]/g, "\\$&"); + const regex = new RegExp(`^${escaped.replace(/\*\*/g, ".*").replace(/\*/g, ".*")}$`); + return regex.test(url); + } + return url.includes(trimmedPattern); +} diff --git a/src/channels/allowlist-match.ts b/src/channels/allowlist-match.ts index f32d5a2487c..8c105f1e51b 100644 --- a/src/channels/allowlist-match.ts +++ b/src/channels/allowlist-match.ts @@ -60,11 +60,24 @@ export function resolveAllowlistCandidates(params: { return { allowed: false }; } +export function resolveCompiledAllowlistMatch(params: { + compiledAllowlist: CompiledAllowlist; + candidates: Array<{ value?: string; source: TSource }>; +}): AllowlistMatch { + if (params.compiledAllowlist.set.size === 0) { + return { allowed: false }; + } + if (params.compiledAllowlist.wildcard) { + return { allowed: true, matchKey: "*", matchSource: "wildcard" as TSource }; + } + return resolveAllowlistCandidates(params); +} + export function resolveAllowlistMatchByCandidates(params: { allowList: ReadonlyArray; candidates: Array<{ value?: string; source: TSource }>; }): AllowlistMatch { - return resolveAllowlistCandidates({ + return resolveCompiledAllowlistMatch({ compiledAllowlist: compileAllowlist(params.allowList), candidates: params.candidates, }); diff --git a/src/channels/command-gating.test.ts b/src/channels/command-gating.test.ts index 5ea0614e287..9b3f645e515 100644 --- a/src/channels/command-gating.test.ts +++ b/src/channels/command-gating.test.ts @@ -2,6 +2,7 @@ import { describe, expect, it } from "vitest"; import { resolveCommandAuthorizedFromAuthorizers, resolveControlCommandGate, + resolveDualTextControlCommandGate, } from "./command-gating.js"; describe("resolveCommandAuthorizedFromAuthorizers", () => { @@ -94,4 +95,17 @@ describe("resolveControlCommandGate", () => { }); expect(result.shouldBlock).toBe(false); }); + + it("supports the dual-authorizer text gate helper", () => { + const result = resolveDualTextControlCommandGate({ + useAccessGroups: true, + primaryConfigured: true, + primaryAllowed: false, + secondaryConfigured: true, + secondaryAllowed: true, + hasControlCommand: true, + }); + expect(result.commandAuthorized).toBe(true); + expect(result.shouldBlock).toBe(false); + }); }); diff --git a/src/channels/command-gating.ts b/src/channels/command-gating.ts index 1492d4760a4..068db8328be 100644 --- a/src/channels/command-gating.ts +++ b/src/channels/command-gating.ts @@ -43,3 +43,24 @@ export function resolveControlCommandGate(params: { const shouldBlock = params.allowTextCommands && params.hasControlCommand && !commandAuthorized; return { commandAuthorized, shouldBlock }; } + +export function resolveDualTextControlCommandGate(params: { + useAccessGroups: boolean; + primaryConfigured: boolean; + primaryAllowed: boolean; + secondaryConfigured: boolean; + secondaryAllowed: boolean; + hasControlCommand: boolean; + modeWhenAccessGroupsOff?: CommandGatingModeWhenAccessGroupsOff; +}): { commandAuthorized: boolean; shouldBlock: boolean } { + return resolveControlCommandGate({ + useAccessGroups: params.useAccessGroups, + authorizers: [ + { configured: params.primaryConfigured, allowed: params.primaryAllowed }, + { configured: params.secondaryConfigured, allowed: params.secondaryAllowed }, + ], + allowTextCommands: true, + hasControlCommand: params.hasControlCommand, + modeWhenAccessGroupsOff: params.modeWhenAccessGroupsOff, + }); +} diff --git a/src/channels/plugins/directory-config-helpers.test.ts b/src/channels/plugins/directory-config-helpers.test.ts index c9ba1429791..15aa8f0d298 100644 --- a/src/channels/plugins/directory-config-helpers.test.ts +++ b/src/channels/plugins/directory-config-helpers.test.ts @@ -6,6 +6,13 @@ import { listDirectoryUserEntriesFromAllowFrom, } from "./directory-config-helpers.js"; +function expectUserDirectoryEntries(entries: unknown) { + expect(entries).toEqual([ + { kind: "user", id: "alice" }, + { kind: "user", id: "carla" }, + ]); +} + describe("listDirectoryUserEntriesFromAllowFrom", () => { it("normalizes, deduplicates, filters, and limits user ids", () => { const entries = listDirectoryUserEntriesFromAllowFrom({ @@ -15,10 +22,7 @@ describe("listDirectoryUserEntriesFromAllowFrom", () => { limit: 2, }); - expect(entries).toEqual([ - { kind: "user", id: "alice" }, - { kind: "user", id: "carla" }, - ]); + expectUserDirectoryEntries(entries); }); }); @@ -54,10 +58,7 @@ describe("listDirectoryUserEntriesFromAllowFromAndMapKeys", () => { limit: 2, }); - expect(entries).toEqual([ - { kind: "user", id: "alice" }, - { kind: "user", id: "carla" }, - ]); + expectUserDirectoryEntries(entries); }); }); diff --git a/src/channels/plugins/directory-config-helpers.ts b/src/channels/plugins/directory-config-helpers.ts index 72f589bc0a7..edfab553677 100644 --- a/src/channels/plugins/directory-config-helpers.ts +++ b/src/channels/plugins/directory-config-helpers.ts @@ -22,12 +22,12 @@ export function toDirectoryEntries(kind: "user" | "group", ids: string[]): Chann return ids.map((id) => ({ kind, id }) as const); } -function collectDirectoryIdsFromEntries(params: { - entries?: readonly unknown[]; +function normalizeDirectoryIds(params: { + rawIds: readonly string[]; normalizeId?: (entry: string) => string | null | undefined; }): string[] { - return (params.entries ?? []) - .map((entry) => String(entry).trim()) + return params.rawIds + .map((entry) => entry.trim()) .filter((entry) => Boolean(entry) && entry !== "*") .map((entry) => { const normalized = params.normalizeId ? params.normalizeId(entry) : entry; @@ -36,18 +36,24 @@ function collectDirectoryIdsFromEntries(params: { .filter(Boolean); } +function collectDirectoryIdsFromEntries(params: { + entries?: readonly unknown[]; + normalizeId?: (entry: string) => string | null | undefined; +}): string[] { + return normalizeDirectoryIds({ + rawIds: (params.entries ?? []).map((entry) => String(entry)), + normalizeId: params.normalizeId, + }); +} + function collectDirectoryIdsFromMapKeys(params: { groups?: Record; normalizeId?: (entry: string) => string | null | undefined; }): string[] { - return Object.keys(params.groups ?? {}) - .map((entry) => entry.trim()) - .filter((entry) => Boolean(entry) && entry !== "*") - .map((entry) => { - const normalized = params.normalizeId ? params.normalizeId(entry) : entry; - return typeof normalized === "string" ? normalized.trim() : ""; - }) - .filter(Boolean); + return normalizeDirectoryIds({ + rawIds: Object.keys(params.groups ?? {}), + normalizeId: params.normalizeId, + }); } function dedupeDirectoryIds(ids: string[]): string[] { diff --git a/src/channels/plugins/helpers.test.ts b/src/channels/plugins/helpers.test.ts index 2b85d7fea06..6b5f56c2ca3 100644 --- a/src/channels/plugins/helpers.test.ts +++ b/src/channels/plugins/helpers.test.ts @@ -1,6 +1,10 @@ import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../../config/config.js"; -import { buildAccountScopedDmSecurityPolicy, formatPairingApproveHint } from "./helpers.js"; +import { + buildAccountScopedDmSecurityPolicy, + formatPairingApproveHint, + parseOptionalDelimitedEntries, +} from "./helpers.js"; function cfgWithChannel(channelKey: string, accounts?: Record): OpenClawConfig { return { @@ -93,3 +97,18 @@ describe("buildAccountScopedDmSecurityPolicy", () => { }); }); }); + +describe("parseOptionalDelimitedEntries", () => { + it("returns undefined for empty input", () => { + expect(parseOptionalDelimitedEntries(" ")).toBeUndefined(); + }); + + it("splits comma, newline, and semicolon separated entries", () => { + expect(parseOptionalDelimitedEntries("alpha, beta\ngamma; delta")).toEqual([ + "alpha", + "beta", + "gamma", + "delta", + ]); + }); +}); diff --git a/src/channels/plugins/helpers.ts b/src/channels/plugins/helpers.ts index 135547d6e9a..40b01beb4d8 100644 --- a/src/channels/plugins/helpers.ts +++ b/src/channels/plugins/helpers.ts @@ -20,6 +20,17 @@ export function formatPairingApproveHint(channelId: string): string { return `Approve via: ${listCmd} / ${approveCmd}`; } +export function parseOptionalDelimitedEntries(value?: string): string[] | undefined { + if (!value?.trim()) { + return undefined; + } + const parsed = value + .split(/[\n,;]+/g) + .map((entry) => entry.trim()) + .filter(Boolean); + return parsed.length > 0 ? parsed : undefined; +} + export function buildAccountScopedDmSecurityPolicy(params: { cfg: OpenClawConfig; channelKey: string; diff --git a/src/channels/plugins/outbound/whatsapp.poll.test.ts b/src/channels/plugins/outbound/whatsapp.poll.test.ts index 7164a6b152e..6474322264a 100644 --- a/src/channels/plugins/outbound/whatsapp.poll.test.ts +++ b/src/channels/plugins/outbound/whatsapp.poll.test.ts @@ -1,5 +1,8 @@ import { describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../../../config/config.js"; +import { + createWhatsAppPollFixture, + expectWhatsAppPollSent, +} from "../../../test-helpers/whatsapp-outbound.js"; const hoisted = vi.hoisted(() => ({ sendPollWhatsApp: vi.fn(async () => ({ messageId: "poll-1", toJid: "1555@s.whatsapp.net" })), @@ -17,25 +20,16 @@ import { whatsappOutbound } from "./whatsapp.js"; describe("whatsappOutbound sendPoll", () => { it("threads cfg through poll send options", async () => { - const cfg = { marker: "resolved-cfg" } as OpenClawConfig; - const poll = { - question: "Lunch?", - options: ["Pizza", "Sushi"], - maxSelections: 1, - }; + const { cfg, poll, to, accountId } = createWhatsAppPollFixture(); const result = await whatsappOutbound.sendPoll!({ cfg, - to: "+1555", + to, poll, - accountId: "work", + accountId, }); - expect(hoisted.sendPollWhatsApp).toHaveBeenCalledWith("+1555", poll, { - verbose: false, - accountId: "work", - cfg, - }); + expectWhatsAppPollSent(hoisted.sendPollWhatsApp, { cfg, poll, to, accountId }); expect(result).toEqual({ messageId: "poll-1", toJid: "1555@s.whatsapp.net" }); }); }); diff --git a/src/channels/plugins/outbound/whatsapp.ts b/src/channels/plugins/outbound/whatsapp.ts index 58004676e6e..0cd797c6c10 100644 --- a/src/channels/plugins/outbound/whatsapp.ts +++ b/src/channels/plugins/outbound/whatsapp.ts @@ -1,8 +1,8 @@ import { chunkText } from "../../../auto-reply/chunk.js"; import { shouldLogVerbose } from "../../../globals.js"; import { sendPollWhatsApp } from "../../../web/outbound.js"; -import { resolveWhatsAppOutboundTarget } from "../../../whatsapp/resolve-outbound-target.js"; import type { ChannelOutboundAdapter } from "../types.js"; +import { createWhatsAppOutboundBase } from "../whatsapp-shared.js"; import { sendTextMediaPayload } from "./direct-text-media.js"; function trimLeadingWhitespace(text: string | undefined): string { @@ -10,13 +10,15 @@ function trimLeadingWhitespace(text: string | undefined): string { } export const whatsappOutbound: ChannelOutboundAdapter = { - deliveryMode: "gateway", - chunker: chunkText, - chunkerMode: "text", - textChunkLimit: 4000, - pollMaxOptions: 12, - resolveTarget: ({ to, allowFrom, mode }) => - resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), + ...createWhatsAppOutboundBase({ + chunker: chunkText, + sendMessageWhatsApp: async (...args) => + (await import("../../../web/outbound.js")).sendMessageWhatsApp(...args), + sendPollWhatsApp, + shouldLogVerbose, + normalizeText: trimLeadingWhitespace, + skipEmptyText: true, + }), sendPayload: async (ctx) => { const text = trimLeadingWhitespace(ctx.payload.text); const hasMedia = Boolean(ctx.payload.mediaUrl) || (ctx.payload.mediaUrls?.length ?? 0) > 0; @@ -35,39 +37,4 @@ export const whatsappOutbound: ChannelOutboundAdapter = { adapter: whatsappOutbound, }); }, - sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { - const normalizedText = trimLeadingWhitespace(text); - if (!normalizedText) { - return { channel: "whatsapp", messageId: "" }; - } - const send = - deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, normalizedText, { - verbose: false, - cfg, - accountId: accountId ?? undefined, - gifPlayback, - }); - return { channel: "whatsapp", ...result }; - }, - sendMedia: async ({ cfg, to, text, mediaUrl, mediaLocalRoots, accountId, deps, gifPlayback }) => { - const normalizedText = trimLeadingWhitespace(text); - const send = - deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, normalizedText, { - verbose: false, - cfg, - mediaUrl, - mediaLocalRoots, - accountId: accountId ?? undefined, - gifPlayback, - }); - return { channel: "whatsapp", ...result }; - }, - sendPoll: async ({ cfg, to, poll, accountId }) => - await sendPollWhatsApp(to, poll, { - verbose: shouldLogVerbose(), - accountId: accountId ?? undefined, - cfg, - }), }; diff --git a/src/channels/plugins/plugins-core.test.ts b/src/channels/plugins/plugins-core.test.ts index 9ccbaac8946..30ed835873d 100644 --- a/src/channels/plugins/plugins-core.test.ts +++ b/src/channels/plugins/plugins-core.test.ts @@ -410,33 +410,43 @@ describe("resolveChannelConfigWrites", () => { }); describe("authorizeConfigWrite", () => { - it("blocks when a target account disables writes", () => { - const cfg = makeSlackConfigWritesCfg("work"); + function expectConfigWriteBlocked(params: { + disabledAccountId: string; + reason: "target-disabled" | "origin-disabled"; + blockedScope: "target" | "origin"; + }) { expect( authorizeConfigWrite({ - cfg, + cfg: makeSlackConfigWritesCfg(params.disabledAccountId), origin: { channelId: "slack", accountId: "default" }, target: resolveExplicitConfigWriteTarget({ channelId: "slack", accountId: "work" }), }), ).toEqual({ allowed: false, + reason: params.reason, + blockedScope: { + kind: params.blockedScope, + scope: { + channelId: "slack", + accountId: params.blockedScope === "target" ? "work" : "default", + }, + }, + }); + } + + it("blocks when a target account disables writes", () => { + expectConfigWriteBlocked({ + disabledAccountId: "work", reason: "target-disabled", - blockedScope: { kind: "target", scope: { channelId: "slack", accountId: "work" } }, + blockedScope: "target", }); }); it("blocks when the origin account disables writes", () => { - const cfg = makeSlackConfigWritesCfg("default"); - expect( - authorizeConfigWrite({ - cfg, - origin: { channelId: "slack", accountId: "default" }, - target: resolveExplicitConfigWriteTarget({ channelId: "slack", accountId: "work" }), - }), - ).toEqual({ - allowed: false, + expectConfigWriteBlocked({ + disabledAccountId: "default", reason: "origin-disabled", - blockedScope: { kind: "origin", scope: { channelId: "slack", accountId: "default" } }, + blockedScope: "origin", }); }); diff --git a/src/channels/plugins/status.ts b/src/channels/plugins/status.ts index cc7de671a3a..689c50c6710 100644 --- a/src/channels/plugins/status.ts +++ b/src/channels/plugins/status.ts @@ -41,6 +41,19 @@ async function buildSnapshotFromAccount(params: { }; } +function inspectChannelAccount(params: { + plugin: ChannelPlugin; + cfg: OpenClawConfig; + accountId: string; +}): ResolvedAccount | null { + return (params.plugin.config.inspectAccount?.(params.cfg, params.accountId) ?? + inspectReadOnlyChannelAccount({ + channelId: params.plugin.id, + cfg: params.cfg, + accountId: params.accountId, + })) as ResolvedAccount | null; +} + export async function buildReadOnlySourceChannelAccountSnapshot(params: { plugin: ChannelPlugin; cfg: OpenClawConfig; @@ -49,13 +62,7 @@ export async function buildReadOnlySourceChannelAccountSnapshot probe?: unknown; audit?: unknown; }): Promise { - const inspectedAccount = - params.plugin.config.inspectAccount?.(params.cfg, params.accountId) ?? - inspectReadOnlyChannelAccount({ - channelId: params.plugin.id, - cfg: params.cfg, - accountId: params.accountId, - }); + const inspectedAccount = inspectChannelAccount(params); if (!inspectedAccount) { return null; } @@ -73,15 +80,9 @@ export async function buildChannelAccountSnapshot(params: { probe?: unknown; audit?: unknown; }): Promise { - const inspectedAccount = - params.plugin.config.inspectAccount?.(params.cfg, params.accountId) ?? - inspectReadOnlyChannelAccount({ - channelId: params.plugin.id, - cfg: params.cfg, - accountId: params.accountId, - }); - const account = (inspectedAccount ?? - params.plugin.config.resolveAccount(params.cfg, params.accountId)) as ResolvedAccount; + const inspectedAccount = inspectChannelAccount(params); + const account = + inspectedAccount ?? params.plugin.config.resolveAccount(params.cfg, params.accountId); return await buildSnapshotFromAccount({ ...params, account, diff --git a/src/channels/plugins/whatsapp-shared.ts b/src/channels/plugins/whatsapp-shared.ts index 368b58454fb..1174dff7c73 100644 --- a/src/channels/plugins/whatsapp-shared.ts +++ b/src/channels/plugins/whatsapp-shared.ts @@ -1,4 +1,7 @@ +import type { PluginRuntimeChannel } from "../../plugins/runtime/types-channel.js"; import { escapeRegExp } from "../../utils.js"; +import { resolveWhatsAppOutboundTarget } from "../../whatsapp/resolve-outbound-target.js"; +import type { ChannelOutboundAdapter } from "./types.js"; export const WHATSAPP_GROUP_INTRO_HINT = "WhatsApp IDs: SenderId is the participant JID (group participant id)."; @@ -15,3 +18,89 @@ export function resolveWhatsAppMentionStripPatterns(ctx: { To?: string | null }) const escaped = escapeRegExp(selfE164); return [escaped, `@${escaped}`]; } + +type WhatsAppChunker = NonNullable; +type WhatsAppSendMessage = PluginRuntimeChannel["whatsapp"]["sendMessageWhatsApp"]; +type WhatsAppSendPoll = PluginRuntimeChannel["whatsapp"]["sendPollWhatsApp"]; + +type CreateWhatsAppOutboundBaseParams = { + chunker: WhatsAppChunker; + sendMessageWhatsApp: WhatsAppSendMessage; + sendPollWhatsApp: WhatsAppSendPoll; + shouldLogVerbose: () => boolean; + resolveTarget?: ChannelOutboundAdapter["resolveTarget"]; + normalizeText?: (text: string | undefined) => string; + skipEmptyText?: boolean; +}; + +export function createWhatsAppOutboundBase({ + chunker, + sendMessageWhatsApp, + sendPollWhatsApp, + shouldLogVerbose, + resolveTarget = ({ to, allowFrom, mode }) => + resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), + normalizeText = (text) => text ?? "", + skipEmptyText = false, +}: CreateWhatsAppOutboundBaseParams): Pick< + ChannelOutboundAdapter, + | "deliveryMode" + | "chunker" + | "chunkerMode" + | "textChunkLimit" + | "pollMaxOptions" + | "resolveTarget" + | "sendText" + | "sendMedia" + | "sendPoll" +> { + return { + deliveryMode: "gateway", + chunker, + chunkerMode: "text", + textChunkLimit: 4000, + pollMaxOptions: 12, + resolveTarget, + sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { + const normalizedText = normalizeText(text); + if (skipEmptyText && !normalizedText) { + return { channel: "whatsapp", messageId: "" }; + } + const send = deps?.sendWhatsApp ?? sendMessageWhatsApp; + const result = await send(to, normalizedText, { + verbose: false, + cfg, + accountId: accountId ?? undefined, + gifPlayback, + }); + return { channel: "whatsapp", ...result }; + }, + sendMedia: async ({ + cfg, + to, + text, + mediaUrl, + mediaLocalRoots, + accountId, + deps, + gifPlayback, + }) => { + const send = deps?.sendWhatsApp ?? sendMessageWhatsApp; + const result = await send(to, normalizeText(text), { + verbose: false, + cfg, + mediaUrl, + mediaLocalRoots, + accountId: accountId ?? undefined, + gifPlayback, + }); + return { channel: "whatsapp", ...result }; + }, + sendPoll: async ({ cfg, to, poll, accountId }) => + await sendPollWhatsApp(to, poll, { + verbose: shouldLogVerbose(), + accountId: accountId ?? undefined, + cfg, + }), + }; +} diff --git a/src/channels/reply-prefix.ts b/src/channels/reply-prefix.ts index 2ae6f3d221a..59f0a29381d 100644 --- a/src/channels/reply-prefix.ts +++ b/src/channels/reply-prefix.ts @@ -5,19 +5,24 @@ import { } from "../auto-reply/reply/response-prefix-template.js"; import type { GetReplyOptions } from "../auto-reply/types.js"; import type { OpenClawConfig } from "../config/config.js"; +import { isSlackInteractiveRepliesEnabled } from "../slack/interactive-replies.js"; type ModelSelectionContext = Parameters>[0]; export type ReplyPrefixContextBundle = { prefixContext: ResponsePrefixContext; responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; responsePrefixContextProvider: () => ResponsePrefixContext; onModelSelected: (ctx: ModelSelectionContext) => void; }; export type ReplyPrefixOptions = Pick< ReplyPrefixContextBundle, - "responsePrefix" | "responsePrefixContextProvider" | "onModelSelected" + | "responsePrefix" + | "enableSlackInteractiveReplies" + | "responsePrefixContextProvider" + | "onModelSelected" >; export function createReplyPrefixContext(params: { @@ -45,6 +50,10 @@ export function createReplyPrefixContext(params: { channel: params.channel, accountId: params.accountId, }).responsePrefix, + enableSlackInteractiveReplies: + params.channel === "slack" + ? isSlackInteractiveRepliesEnabled({ cfg, accountId: params.accountId }) + : undefined, responsePrefixContextProvider: () => prefixContext, onModelSelected, }; @@ -56,7 +65,16 @@ export function createReplyPrefixOptions(params: { channel?: string; accountId?: string; }): ReplyPrefixOptions { - const { responsePrefix, responsePrefixContextProvider, onModelSelected } = - createReplyPrefixContext(params); - return { responsePrefix, responsePrefixContextProvider, onModelSelected }; + const { + responsePrefix, + enableSlackInteractiveReplies, + responsePrefixContextProvider, + onModelSelected, + } = createReplyPrefixContext(params); + return { + responsePrefix, + enableSlackInteractiveReplies, + responsePrefixContextProvider, + onModelSelected, + }; } diff --git a/src/cli/browser-cli-manage.test.ts b/src/cli/browser-cli-manage.test.ts new file mode 100644 index 00000000000..e1d01132be3 --- /dev/null +++ b/src/cli/browser-cli-manage.test.ts @@ -0,0 +1,151 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { registerBrowserManageCommands } from "./browser-cli-manage.js"; +import { createBrowserProgram } from "./browser-cli-test-helpers.js"; + +const mocks = vi.hoisted(() => { + const runtimeLog = vi.fn(); + const runtimeError = vi.fn(); + const runtimeExit = vi.fn(); + return { + callBrowserRequest: vi.fn< + ( + opts: unknown, + req: { path?: string }, + runtimeOpts?: { timeoutMs?: number }, + ) => Promise> + >(async () => ({})), + runtimeLog, + runtimeError, + runtimeExit, + runtime: { + log: runtimeLog, + error: runtimeError, + exit: runtimeExit, + }, + }; +}); + +vi.mock("./browser-cli-shared.js", () => ({ + callBrowserRequest: mocks.callBrowserRequest, +})); + +vi.mock("./cli-utils.js", () => ({ + runCommandWithRuntime: async ( + _runtime: unknown, + action: () => Promise, + onError: (err: unknown) => void, + ) => await action().catch(onError), +})); + +vi.mock("../runtime.js", () => ({ + defaultRuntime: mocks.runtime, +})); + +function createProgram() { + const { program, browser, parentOpts } = createBrowserProgram(); + registerBrowserManageCommands(browser, parentOpts); + return program; +} + +describe("browser manage output", () => { + beforeEach(() => { + mocks.callBrowserRequest.mockClear(); + mocks.runtimeLog.mockClear(); + mocks.runtimeError.mockClear(); + mocks.runtimeExit.mockClear(); + }); + + it("shows chrome-mcp transport for existing-session status without fake CDP fields", async () => { + mocks.callBrowserRequest.mockImplementation(async (_opts: unknown, req: { path?: string }) => + req.path === "/" + ? { + enabled: true, + profile: "chrome-live", + driver: "existing-session", + transport: "chrome-mcp", + running: true, + cdpReady: true, + cdpHttp: true, + pid: 4321, + cdpPort: null, + cdpUrl: null, + chosenBrowser: null, + userDataDir: null, + color: "#00AA00", + headless: false, + noSandbox: false, + executablePath: null, + attachOnly: true, + } + : {}, + ); + + const program = createProgram(); + await program.parseAsync(["browser", "--browser-profile", "chrome-live", "status"], { + from: "user", + }); + + const output = mocks.runtimeLog.mock.calls.at(-1)?.[0] as string; + expect(output).toContain("transport: chrome-mcp"); + expect(output).not.toContain("cdpPort:"); + expect(output).not.toContain("cdpUrl:"); + }); + + it("shows chrome-mcp transport in browser profiles output", async () => { + mocks.callBrowserRequest.mockImplementation(async (_opts: unknown, req: { path?: string }) => + req.path === "/profiles" + ? { + profiles: [ + { + name: "chrome-live", + driver: "existing-session", + transport: "chrome-mcp", + running: true, + tabCount: 2, + isDefault: false, + isRemote: false, + cdpPort: null, + cdpUrl: null, + color: "#00AA00", + }, + ], + } + : {}, + ); + + const program = createProgram(); + await program.parseAsync(["browser", "profiles"], { from: "user" }); + + const output = mocks.runtimeLog.mock.calls.at(-1)?.[0] as string; + expect(output).toContain("chrome-live: running (2 tabs) [existing-session]"); + expect(output).toContain("transport: chrome-mcp"); + expect(output).not.toContain("port: 0"); + }); + + it("shows chrome-mcp transport after creating an existing-session profile", async () => { + mocks.callBrowserRequest.mockImplementation(async (_opts: unknown, req: { path?: string }) => + req.path === "/profiles/create" + ? { + ok: true, + profile: "chrome-live", + transport: "chrome-mcp", + cdpPort: null, + cdpUrl: null, + color: "#00AA00", + isRemote: false, + } + : {}, + ); + + const program = createProgram(); + await program.parseAsync( + ["browser", "create-profile", "--name", "chrome-live", "--driver", "existing-session"], + { from: "user" }, + ); + + const output = mocks.runtimeLog.mock.calls.at(-1)?.[0] as string; + expect(output).toContain('Created profile "chrome-live"'); + expect(output).toContain("transport: chrome-mcp"); + expect(output).not.toContain("port: 0"); + }); +}); diff --git a/src/cli/browser-cli-manage.timeout-option.test.ts b/src/cli/browser-cli-manage.timeout-option.test.ts index 134f13bc3c3..7338d97701e 100644 --- a/src/cli/browser-cli-manage.timeout-option.test.ts +++ b/src/cli/browser-cli-manage.timeout-option.test.ts @@ -76,4 +76,48 @@ describe("browser manage start timeout option", () => { expect(startCall?.[0]).toMatchObject({ timeout: "60000" }); expect(startCall?.[2]).toBeUndefined(); }); + + it("uses a longer built-in timeout for browser status", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "status"], { from: "user" }); + + const statusCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(statusCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); + + it("uses a longer built-in timeout for browser tabs", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "tabs"], { from: "user" }); + + const tabsCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/tabs", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(tabsCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); + + it("uses a longer built-in timeout for browser profiles", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "profiles"], { from: "user" }); + + const profilesCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/profiles", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(profilesCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); + + it("uses a longer built-in timeout for browser open", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "open", "https://example.com"], { from: "user" }); + + const openCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/tabs/open", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(openCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); }); diff --git a/src/cli/browser-cli-manage.ts b/src/cli/browser-cli-manage.ts index 53b83ca3f97..5bac9b621bf 100644 --- a/src/cli/browser-cli-manage.ts +++ b/src/cli/browser-cli-manage.ts @@ -1,5 +1,6 @@ import type { Command } from "commander"; import type { + BrowserTransport, BrowserCreateProfileResult, BrowserDeleteProfileResult, BrowserResetProfileResult, @@ -13,6 +14,8 @@ import { shortenHomePath } from "../utils.js"; import { callBrowserRequest, type BrowserParentOpts } from "./browser-cli-shared.js"; import { runCommandWithRuntime } from "./cli-utils.js"; +const BROWSER_MANAGE_REQUEST_TIMEOUT_MS = 45_000; + function resolveProfileQuery(profile?: string) { return profile ? { profile } : undefined; } @@ -38,7 +41,7 @@ async function callTabAction( query: resolveProfileQuery(profile), body, }, - { timeoutMs: 10_000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); } @@ -54,7 +57,7 @@ async function fetchBrowserStatus( query: resolveProfileQuery(profile), }, { - timeoutMs: 1500, + timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS, }, ); } @@ -99,6 +102,29 @@ function logBrowserTabs(tabs: BrowserTab[], json?: boolean) { ); } +function usesChromeMcpTransport(params: { + transport?: BrowserTransport; + driver?: "openclaw" | "extension" | "existing-session"; +}): boolean { + return params.transport === "chrome-mcp" || params.driver === "existing-session"; +} + +function formatBrowserConnectionSummary(params: { + transport?: BrowserTransport; + driver?: "openclaw" | "extension" | "existing-session"; + isRemote?: boolean; + cdpPort?: number | null; + cdpUrl?: string | null; +}): string { + if (usesChromeMcpTransport(params)) { + return "transport: chrome-mcp"; + } + if (params.isRemote) { + return `cdpUrl: ${params.cdpUrl ?? "(unset)"}`; + } + return `port: ${params.cdpPort ?? "(unset)"}`; +} + export function registerBrowserManageCommands( browser: Command, parentOpts: (cmd: Command) => BrowserParentOpts, @@ -120,8 +146,15 @@ export function registerBrowserManageCommands( `profile: ${status.profile ?? "openclaw"}`, `enabled: ${status.enabled}`, `running: ${status.running}`, - `cdpPort: ${status.cdpPort}`, - `cdpUrl: ${status.cdpUrl ?? `http://127.0.0.1:${status.cdpPort}`}`, + `transport: ${ + usesChromeMcpTransport(status) ? "chrome-mcp" : (status.transport ?? "cdp") + }`, + ...(!usesChromeMcpTransport(status) + ? [ + `cdpPort: ${status.cdpPort ?? "(unset)"}`, + `cdpUrl: ${status.cdpUrl ?? `http://127.0.0.1:${status.cdpPort}`}`, + ] + : []), `browser: ${status.chosenBrowser ?? "unknown"}`, `detectedBrowser: ${status.detectedBrowser ?? "unknown"}`, `detectedPath: ${detectedDisplay}`, @@ -196,7 +229,7 @@ export function registerBrowserManageCommands( path: "/tabs", query: resolveProfileQuery(profile), }, - { timeoutMs: 3000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); const tabs = result.tabs ?? []; logBrowserTabs(tabs, parent?.json); @@ -220,7 +253,7 @@ export function registerBrowserManageCommands( action: "list", }, }, - { timeoutMs: 10_000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); const tabs = result.tabs ?? []; logBrowserTabs(tabs, parent?.json); @@ -305,7 +338,7 @@ export function registerBrowserManageCommands( query: resolveProfileQuery(profile), body: { url }, }, - { timeoutMs: 15000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); if (printJsonResult(parent, tab)) { return; @@ -330,7 +363,7 @@ export function registerBrowserManageCommands( query: resolveProfileQuery(profile), body: { targetId }, }, - { timeoutMs: 5000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); if (printJsonResult(parent, { ok: true })) { return; @@ -355,7 +388,7 @@ export function registerBrowserManageCommands( path: `/tabs/${encodeURIComponent(targetId.trim())}`, query: resolveProfileQuery(profile), }, - { timeoutMs: 5000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); } else { await callBrowserRequest( @@ -366,7 +399,7 @@ export function registerBrowserManageCommands( query: resolveProfileQuery(profile), body: { kind: "close" }, }, - { timeoutMs: 20000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); } if (printJsonResult(parent, { ok: true })) { @@ -389,7 +422,7 @@ export function registerBrowserManageCommands( method: "GET", path: "/profiles", }, - { timeoutMs: 3000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); const profiles = result.profiles ?? []; if (printJsonResult(parent, { profiles })) { @@ -405,9 +438,10 @@ export function registerBrowserManageCommands( const status = p.running ? "running" : "stopped"; const tabs = p.running ? ` (${p.tabCount} tabs)` : ""; const def = p.isDefault ? " [default]" : ""; - const loc = p.isRemote ? `cdpUrl: ${p.cdpUrl}` : `port: ${p.cdpPort}`; + const loc = formatBrowserConnectionSummary(p); const remote = p.isRemote ? " [remote]" : ""; - return `${p.name}: ${status}${tabs}${def}${remote}\n ${loc}, color: ${p.color}`; + const driver = p.driver !== "openclaw" ? ` [${p.driver}]` : ""; + return `${p.name}: ${status}${tabs}${def}${remote}${driver}\n ${loc}, color: ${p.color}`; }) .join("\n"), ); @@ -420,7 +454,10 @@ export function registerBrowserManageCommands( .requiredOption("--name ", "Profile name (lowercase, numbers, hyphens)") .option("--color ", "Profile color (hex format, e.g. #0066CC)") .option("--cdp-url ", "CDP URL for remote Chrome (http/https)") - .option("--driver ", "Profile driver (openclaw|extension). Default: openclaw") + .option( + "--driver ", + "Profile driver (openclaw|extension|existing-session). Default: openclaw", + ) .action( async (opts: { name: string; color?: string; cdpUrl?: string; driver?: string }, cmd) => { const parent = parentOpts(cmd); @@ -434,7 +471,12 @@ export function registerBrowserManageCommands( name: opts.name, color: opts.color, cdpUrl: opts.cdpUrl, - driver: opts.driver === "extension" ? "extension" : undefined, + driver: + opts.driver === "extension" + ? "extension" + : opts.driver === "existing-session" + ? "existing-session" + : undefined, }, }, { timeoutMs: 10_000 }, @@ -442,11 +484,15 @@ export function registerBrowserManageCommands( if (printJsonResult(parent, result)) { return; } - const loc = result.isRemote ? ` cdpUrl: ${result.cdpUrl}` : ` port: ${result.cdpPort}`; + const loc = ` ${formatBrowserConnectionSummary(result)}`; defaultRuntime.log( info( `🦞 Created profile "${result.profile}"\n${loc}\n color: ${result.color}${ - opts.driver === "extension" ? "\n driver: extension" : "" + opts.driver === "extension" + ? "\n driver: extension" + : opts.driver === "existing-session" + ? "\n driver: existing-session" + : "" }`, ), ); diff --git a/src/cli/command-secret-gateway.test.ts b/src/cli/command-secret-gateway.test.ts index 6d0f89f6349..74c47f637e9 100644 --- a/src/cli/command-secret-gateway.test.ts +++ b/src/cli/command-secret-gateway.test.ts @@ -64,6 +64,17 @@ describe("resolveCommandSecretRefsViaGateway", () => { }); } + function expectGatewayUnavailableLocalFallbackDiagnostics( + result: Awaited>, + ) { + expect( + result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), + ).toBe(true); + expect( + result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), + ).toBe(true); + } + it("returns config unchanged when no target SecretRefs are configured", async () => { const config = { talk: { @@ -208,11 +219,8 @@ describe("resolveCommandSecretRefsViaGateway", () => { it("falls back to local resolution for web search SecretRefs when gateway is unavailable", async () => { const envKey = "WEB_SEARCH_GEMINI_API_KEY_LOCAL_FALLBACK"; - const priorValue = process.env[envKey]; - process.env[envKey] = "gemini-local-fallback-key"; - callGateway.mockRejectedValueOnce(new Error("gateway closed")); - - try { + await withEnvValue(envKey, "gemini-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); const result = await resolveCommandSecretRefsViaGateway({ config: { tools: { @@ -234,28 +242,14 @@ describe("resolveCommandSecretRefsViaGateway", () => { "gemini-local-fallback-key", ); expect(result.targetStatesByPath["tools.web.search.gemini.apiKey"]).toBe("resolved_local"); - expect( - result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), - ).toBe(true); - expect( - result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), - ).toBe(true); - } finally { - if (priorValue === undefined) { - delete process.env[envKey]; - } else { - process.env[envKey] = priorValue; - } - } + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); }); it("falls back to local resolution for Firecrawl SecretRefs when gateway is unavailable", async () => { const envKey = "WEB_FETCH_FIRECRAWL_API_KEY_LOCAL_FALLBACK"; - const priorValue = process.env[envKey]; - process.env[envKey] = "firecrawl-local-fallback-key"; - callGateway.mockRejectedValueOnce(new Error("gateway closed")); - - try { + await withEnvValue(envKey, "firecrawl-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); const result = await resolveCommandSecretRefsViaGateway({ config: { tools: { @@ -276,19 +270,8 @@ describe("resolveCommandSecretRefsViaGateway", () => { "firecrawl-local-fallback-key", ); expect(result.targetStatesByPath["tools.web.fetch.firecrawl.apiKey"]).toBe("resolved_local"); - expect( - result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), - ).toBe(true); - expect( - result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), - ).toBe(true); - } finally { - if (priorValue === undefined) { - delete process.env[envKey]; - } else { - process.env[envKey] = priorValue; - } - } + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); }); it("marks web SecretRefs inactive when the web surface is disabled during local fallback", async () => { diff --git a/src/cli/daemon-cli/install.test.ts b/src/cli/daemon-cli/install.test.ts index 7401dc3b1a2..6d7b618a17a 100644 --- a/src/cli/daemon-cli/install.test.ts +++ b/src/cli/daemon-cli/install.test.ts @@ -84,8 +84,28 @@ vi.mock("../../commands/daemon-install-helpers.js", () => ({ vi.mock("./shared.js", () => ({ parsePort: parsePortMock, + createDaemonInstallActionContext: (jsonFlag: unknown) => { + const json = Boolean(jsonFlag); + return { + json, + stdout: process.stdout, + warnings: actionState.warnings, + emit: (payload: DaemonActionResponse) => { + actionState.emitted.push(payload); + }, + fail: (message: string, hints?: string[]) => { + actionState.failed.push({ message, hints }); + }, + }; + }, + failIfNixDaemonInstallMode: (fail: (message: string, hints?: string[]) => void) => { + if (!resolveIsNixModeMock()) { + return false; + } + fail("Nix mode detected; service install is disabled."); + return true; + }, })); - vi.mock("../../commands/daemon-runtime.js", () => ({ DEFAULT_GATEWAY_DAEMON_RUNTIME: "node", isGatewayDaemonRuntime: isGatewayDaemonRuntimeMock, @@ -97,16 +117,6 @@ vi.mock("../../daemon/service.js", () => ({ vi.mock("./response.js", () => ({ buildDaemonServiceSnapshot: vi.fn(), - createDaemonActionContext: vi.fn(() => ({ - stdout: process.stdout, - warnings: actionState.warnings, - emit: (payload: DaemonActionResponse) => { - actionState.emitted.push(payload); - }, - fail: (message: string, hints?: string[]) => { - actionState.failed.push({ message, hints }); - }, - })), installDaemonServiceAndEmit: installDaemonServiceAndEmitMock, })); @@ -126,6 +136,15 @@ function expectFirstInstallPlanCallOmitsToken() { expect(firstArg && "token" in firstArg).toBe(false); } +function mockResolvedGatewayTokenSecretRef() { + resolveSecretInputRefMock.mockReturnValue({ + ref: { source: "env", provider: "default", id: "OPENCLAW_GATEWAY_TOKEN" }, + }); + resolveSecretRefValuesMock.mockResolvedValue( + new Map([["env:default:OPENCLAW_GATEWAY_TOKEN", "resolved-from-secretref"]]), + ); +} + const { runDaemonInstall } = await import("./install.js"); const envSnapshot = captureFullEnv(); @@ -195,12 +214,7 @@ describe("runDaemonInstall", () => { }); it("validates token SecretRef but does not serialize resolved token into service env", async () => { - resolveSecretInputRefMock.mockReturnValue({ - ref: { source: "env", provider: "default", id: "OPENCLAW_GATEWAY_TOKEN" }, - }); - resolveSecretRefValuesMock.mockResolvedValue( - new Map([["env:default:OPENCLAW_GATEWAY_TOKEN", "resolved-from-secretref"]]), - ); + mockResolvedGatewayTokenSecretRef(); await runDaemonInstall({ json: true }); @@ -219,12 +233,7 @@ describe("runDaemonInstall", () => { loadConfigMock.mockReturnValue({ gateway: { auth: { mode: "token", token: "${OPENCLAW_GATEWAY_TOKEN}" } }, }); - resolveSecretInputRefMock.mockReturnValue({ - ref: { source: "env", provider: "default", id: "OPENCLAW_GATEWAY_TOKEN" }, - }); - resolveSecretRefValuesMock.mockResolvedValue( - new Map([["env:default:OPENCLAW_GATEWAY_TOKEN", "resolved-from-secretref"]]), - ); + mockResolvedGatewayTokenSecretRef(); await runDaemonInstall({ json: true }); diff --git a/src/cli/daemon-cli/install.ts b/src/cli/daemon-cli/install.ts index 96a74bdc748..023ea5e520e 100644 --- a/src/cli/daemon-cli/install.ts +++ b/src/cli/daemon-cli/install.ts @@ -5,25 +5,21 @@ import { } from "../../commands/daemon-runtime.js"; import { resolveGatewayInstallToken } from "../../commands/gateway-install-token.js"; import { readBestEffortConfig, resolveGatewayPort } from "../../config/config.js"; -import { resolveIsNixMode } from "../../config/paths.js"; import { resolveGatewayService } from "../../daemon/service.js"; import { isNonFatalSystemdInstallProbeError } from "../../daemon/systemd.js"; import { defaultRuntime } from "../../runtime.js"; import { formatCliCommand } from "../command-format.js"; +import { buildDaemonServiceSnapshot, installDaemonServiceAndEmit } from "./response.js"; import { - buildDaemonServiceSnapshot, - createDaemonActionContext, - installDaemonServiceAndEmit, -} from "./response.js"; -import { parsePort } from "./shared.js"; + createDaemonInstallActionContext, + failIfNixDaemonInstallMode, + parsePort, +} from "./shared.js"; import type { DaemonInstallOptions } from "./types.js"; export async function runDaemonInstall(opts: DaemonInstallOptions) { - const json = Boolean(opts.json); - const { stdout, warnings, emit, fail } = createDaemonActionContext({ action: "install", json }); - - if (resolveIsNixMode(process.env)) { - fail("Nix mode detected; service install is disabled."); + const { json, stdout, warnings, emit, fail } = createDaemonInstallActionContext(opts.json); + if (failIfNixDaemonInstallMode(fail)) { return; } diff --git a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts index 188e7090915..59a2926e993 100644 --- a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts @@ -1,30 +1,15 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const readConfigFileSnapshotMock = vi.fn(); const loadConfig = vi.fn(() => ({})); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readConfigFileSnapshot: () => readConfigFileSnapshotMock(), @@ -42,6 +27,28 @@ vi.mock("../../runtime.js", () => ({ defaultRuntime, })); +function setConfigSnapshot(params: { + exists: boolean; + valid: boolean; + issues?: Array<{ path: string; message: string }>; +}) { + readConfigFileSnapshotMock.mockResolvedValue({ + exists: params.exists, + valid: params.valid, + config: {}, + issues: params.issues ?? [], + }); +} + +function createServiceRunArgs() { + return { + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + }; +} + describe("runServiceRestart config pre-flight (#35862)", () => { let runServiceRestart: typeof import("./lifecycle-core.js").runServiceRestart; @@ -50,80 +57,40 @@ describe("runServiceRestart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: true, valid: true }); loadConfig.mockReset(); loadConfig.mockReturnValue({}); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.readCommand.mockResolvedValue({ environment: {} }); - service.restart.mockResolvedValue({ outcome: "completed" }); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + resetLifecycleServiceMocks(); + stubEmptyGatewayEnv(); }); it("aborts restart when config is invalid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ + setConfigSnapshot({ exists: true, valid: false, - config: {}, issues: [{ path: "agents.defaults.pdfModel", message: "Unrecognized key" }], }); - await expect( - runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }), - ).rejects.toThrow("__exit__:1"); + await expect(runServiceRestart(createServiceRunArgs())).rejects.toThrow("__exit__:1"); expect(service.restart).not.toHaveBeenCalled(); }); it("proceeds with restart when config is valid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: true, valid: true }); - const result = await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + const result = await runServiceRestart(createServiceRunArgs()); expect(result).toBe(true); expect(service.restart).toHaveBeenCalledTimes(1); }); it("proceeds with restart when config file does not exist", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ - exists: false, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: false, valid: true }); - const result = await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + const result = await runServiceRestart(createServiceRunArgs()); expect(result).toBe(true); expect(service.restart).toHaveBeenCalledTimes(1); @@ -132,12 +99,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => { it("proceeds with restart when snapshot read throws", async () => { readConfigFileSnapshotMock.mockRejectedValue(new Error("read failed")); - const result = await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + const result = await runServiceRestart(createServiceRunArgs()); expect(result).toBe(true); expect(service.restart).toHaveBeenCalledTimes(1); @@ -152,54 +114,28 @@ describe("runServiceStart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); - service.isLoaded.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.restart.mockResolvedValue({ outcome: "completed" }); + setConfigSnapshot({ exists: true, valid: true }); + resetLifecycleServiceMocks(); }); it("aborts start when config is invalid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ + setConfigSnapshot({ exists: true, valid: false, - config: {}, issues: [{ path: "agents.defaults.pdfModel", message: "Unrecognized key" }], }); - await expect( - runServiceStart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }), - ).rejects.toThrow("__exit__:1"); + await expect(runServiceStart(createServiceRunArgs())).rejects.toThrow("__exit__:1"); expect(service.restart).not.toHaveBeenCalled(); }); it("proceeds with start when config is valid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: true, valid: true }); - await runServiceStart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + await runServiceStart(createServiceRunArgs()); expect(service.restart).toHaveBeenCalledTimes(1); }); diff --git a/src/cli/daemon-cli/lifecycle-core.test.ts b/src/cli/daemon-cli/lifecycle-core.test.ts index ff66bd17653..2f17269eb6c 100644 --- a/src/cli/daemon-cli/lifecycle-core.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.test.ts @@ -1,4 +1,12 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + runtimeLogs, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const loadConfig = vi.fn(() => ({ gateway: { @@ -8,28 +16,6 @@ const loadConfig = vi.fn(() => ({ }, })); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readBestEffortConfig: async () => loadConfig(), @@ -43,13 +29,28 @@ let runServiceRestart: typeof import("./lifecycle-core.js").runServiceRestart; let runServiceStart: typeof import("./lifecycle-core.js").runServiceStart; let runServiceStop: typeof import("./lifecycle-core.js").runServiceStop; +function readJsonLog() { + const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); + return JSON.parse(jsonLine ?? "{}") as T; +} + +function createServiceRunArgs(checkTokenDrift?: boolean) { + return { + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true as const }, + ...(checkTokenDrift ? { checkTokenDrift } : {}), + }; +} + describe("runServiceRestart token drift", () => { beforeAll(async () => { ({ runServiceRestart, runServiceStart, runServiceStop } = await import("./lifecycle-core.js")); }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); loadConfig.mockReset(); loadConfig.mockReturnValue({ gateway: { @@ -58,33 +59,19 @@ describe("runServiceRestart token drift", () => { }, }, }); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); + resetLifecycleServiceMocks(); service.readCommand.mockResolvedValue({ + programArguments: [], environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" }, }); - service.restart.mockResolvedValue({ outcome: "completed" }); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); - vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); + stubEmptyGatewayEnv(); }); it("emits drift warning when enabled", async () => { - await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - checkTokenDrift: true, - }); + await runServiceRestart(createServiceRunArgs(true)); expect(loadConfig).toHaveBeenCalledTimes(1); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { warnings?: string[] }; + const payload = readJsonLog<{ warnings?: string[] }>(); expect(payload.warnings).toEqual( expect.arrayContaining([expect.stringContaining("gateway install --force")]), ); @@ -99,20 +86,14 @@ describe("runServiceRestart token drift", () => { }, }); service.readCommand.mockResolvedValue({ + programArguments: [], environment: { OPENCLAW_GATEWAY_TOKEN: "env-token" }, }); vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "env-token"); - await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - checkTokenDrift: true, - }); + await runServiceRestart(createServiceRunArgs(true)); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { warnings?: string[] }; + const payload = readJsonLog<{ warnings?: string[] }>(); expect(payload.warnings).toEqual( expect.arrayContaining([expect.stringContaining("gateway install --force")]), ); @@ -128,8 +109,7 @@ describe("runServiceRestart token drift", () => { expect(loadConfig).not.toHaveBeenCalled(); expect(service.readCommand).not.toHaveBeenCalled(); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { warnings?: string[] }; + const payload = readJsonLog<{ warnings?: string[] }>(); expect(payload.warnings).toBeUndefined(); }); @@ -146,8 +126,7 @@ describe("runServiceRestart token drift", () => { }), }); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + const payload = readJsonLog<{ result?: string; message?: string }>(); expect(payload.result).toBe("stopped"); expect(payload.message).toContain("unmanaged process"); expect(service.stop).not.toHaveBeenCalled(); @@ -172,8 +151,7 @@ describe("runServiceRestart token drift", () => { expect(postRestartCheck).toHaveBeenCalledTimes(1); expect(service.restart).not.toHaveBeenCalled(); expect(service.readCommand).not.toHaveBeenCalled(); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + const payload = readJsonLog<{ result?: string; message?: string }>(); expect(payload.result).toBe("restarted"); expect(payload.message).toContain("unmanaged process"); }); @@ -192,8 +170,7 @@ describe("runServiceRestart token drift", () => { expect(result).toBe(true); expect(postRestartCheck).not.toHaveBeenCalled(); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + const payload = readJsonLog<{ result?: string; message?: string }>(); expect(payload.result).toBe("scheduled"); expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); }); @@ -209,8 +186,7 @@ describe("runServiceRestart token drift", () => { }); expect(service.isLoaded).toHaveBeenCalledTimes(1); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + const payload = readJsonLog<{ result?: string; message?: string }>(); expect(payload.result).toBe("scheduled"); expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); }); diff --git a/src/cli/daemon-cli/lifecycle-core.ts b/src/cli/daemon-cli/lifecycle-core.ts index a1ad4073584..8def6aeefe6 100644 --- a/src/cli/daemon-cli/lifecycle-core.ts +++ b/src/cli/daemon-cli/lifecycle-core.ts @@ -339,6 +339,22 @@ export async function runServiceRestart(params: { const { stdout, emit, fail } = createActionIO({ action: "restart", json }); const warnings: string[] = []; let handledNotLoaded: NotLoadedActionResult | null = null; + const emitScheduledRestart = ( + restartStatus: ReturnType, + serviceLoaded: boolean, + ) => { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, serviceLoaded), + warnings: warnings.length ? warnings : undefined, + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return true; + }; const loaded = await resolveServiceLoadedOrFail({ serviceNoun: params.serviceNoun, @@ -423,34 +439,14 @@ export async function runServiceRestart(params: { } let restartStatus = describeGatewayServiceRestart(params.serviceNoun, restartResult); if (restartStatus.scheduled) { - emit({ - ok: true, - result: restartStatus.daemonActionResult, - message: restartStatus.message, - service: buildDaemonServiceSnapshot(params.service, loaded), - warnings: warnings.length ? warnings : undefined, - }); - if (!json) { - defaultRuntime.log(restartStatus.message); - } - return true; + return emitScheduledRestart(restartStatus, loaded); } if (params.postRestartCheck) { const postRestartResult = await params.postRestartCheck({ json, stdout, warnings, fail }); if (postRestartResult) { restartStatus = describeGatewayServiceRestart(params.serviceNoun, postRestartResult); if (restartStatus.scheduled) { - emit({ - ok: true, - result: restartStatus.daemonActionResult, - message: restartStatus.message, - service: buildDaemonServiceSnapshot(params.service, loaded), - warnings: warnings.length ? warnings : undefined, - }); - if (!json) { - defaultRuntime.log(restartStatus.message); - } - return true; + return emitScheduledRestart(restartStatus, loaded); } } } diff --git a/src/cli/daemon-cli/lifecycle.test.ts b/src/cli/daemon-cli/lifecycle.test.ts index 61899e4e78c..f026f81399f 100644 --- a/src/cli/daemon-cli/lifecycle.test.ts +++ b/src/cli/daemon-cli/lifecycle.test.ts @@ -1,8 +1,5 @@ import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; -const mockReadFileSync = vi.hoisted(() => vi.fn()); -const mockSpawnSync = vi.hoisted(() => vi.fn()); - type RestartHealthSnapshot = { healthy: boolean; staleGatewayPids: number[]; @@ -35,7 +32,9 @@ const terminateStaleGatewayPids = vi.fn(); const renderGatewayPortHealthDiagnostics = vi.fn(() => ["diag: unhealthy port"]); const renderRestartDiagnostics = vi.fn(() => ["diag: unhealthy runtime"]); const resolveGatewayPort = vi.fn(() => 18789); -const findGatewayPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []); +const findVerifiedGatewayListenerPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []); +const signalVerifiedGatewayPidSync = vi.fn<(pid: number, signal: "SIGTERM" | "SIGUSR1") => void>(); +const formatGatewayPidList = vi.fn<(pids: number[]) => string>((pids) => pids.join(", ")); const probeGateway = vi.fn< (opts: { url: string; @@ -49,24 +48,18 @@ const probeGateway = vi.fn< const isRestartEnabled = vi.fn<(config?: { commands?: unknown }) => boolean>(() => true); const loadConfig = vi.fn(() => ({})); -vi.mock("node:fs", () => ({ - default: { - readFileSync: (...args: unknown[]) => mockReadFileSync(...args), - }, -})); - -vi.mock("node:child_process", () => ({ - spawnSync: (...args: unknown[]) => mockSpawnSync(...args), -})); - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readBestEffortConfig: async () => loadConfig(), resolveGatewayPort, })); -vi.mock("../../infra/restart.js", () => ({ - findGatewayPidsOnPortSync: (port: number) => findGatewayPidsOnPortSync(port), +vi.mock("../../infra/gateway-processes.js", () => ({ + findVerifiedGatewayListenerPidsOnPortSync: (port: number) => + findVerifiedGatewayListenerPidsOnPortSync(port), + signalVerifiedGatewayPidSync: (pid: number, signal: "SIGTERM" | "SIGUSR1") => + signalVerifiedGatewayPidSync(pid, signal), + formatGatewayPidList: (pids: number[]) => formatGatewayPidList(pids), })); vi.mock("../../gateway/probe.js", () => ({ @@ -106,6 +99,29 @@ describe("runDaemonRestart health checks", () => { let runDaemonRestart: (opts?: { json?: boolean }) => Promise; let runDaemonStop: (opts?: { json?: boolean }) => Promise; + function mockUnmanagedRestart({ + runPostRestartCheck = false, + }: { + runPostRestartCheck?: boolean; + } = {}) { + runServiceRestart.mockImplementation( + async (params: RestartParams & { onNotLoaded?: () => Promise }) => { + await params.onNotLoaded?.(); + if (runPostRestartCheck) { + await params.postRestartCheck?.({ + json: Boolean(params.opts?.json), + stdout: process.stdout, + warnings: [], + fail: (message: string) => { + throw new Error(message); + }, + }); + } + return true; + }, + ); + } + beforeAll(async () => { ({ runDaemonRestart, runDaemonStop } = await import("./lifecycle.js")); }); @@ -121,12 +137,12 @@ describe("runDaemonRestart health checks", () => { renderGatewayPortHealthDiagnostics.mockReset(); renderRestartDiagnostics.mockReset(); resolveGatewayPort.mockReset(); - findGatewayPidsOnPortSync.mockReset(); + findVerifiedGatewayListenerPidsOnPortSync.mockReset(); + signalVerifiedGatewayPidSync.mockReset(); + formatGatewayPidList.mockReset(); probeGateway.mockReset(); isRestartEnabled.mockReset(); loadConfig.mockReset(); - mockReadFileSync.mockReset(); - mockSpawnSync.mockReset(); service.readCommand.mockResolvedValue({ programArguments: ["openclaw", "gateway", "--port", "18789"], @@ -158,23 +174,8 @@ describe("runDaemonRestart health checks", () => { configSnapshot: { commands: { restart: true } }, }); isRestartEnabled.mockReturnValue(true); - mockReadFileSync.mockImplementation((path: string) => { - const match = path.match(/\/proc\/(\d+)\/cmdline$/); - if (!match) { - throw new Error(`unexpected path ${path}`); - } - const pid = Number.parseInt(match[1] ?? "", 10); - if ([4200, 4300].includes(pid)) { - return ["openclaw", "gateway", "--port", "18789", ""].join("\0"); - } - throw new Error(`unknown pid ${pid}`); - }); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: "openclaw gateway --port 18789", - stderr: "", - }); + signalVerifiedGatewayPidSync.mockImplementation(() => {}); + formatGatewayPidList.mockImplementation((pids) => pids.join(", ")); }); afterEach(() => { @@ -242,57 +243,26 @@ describe("runDaemonRestart health checks", () => { }); it("signals an unmanaged gateway process on stop", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200, 4200, 4300]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4200, 4300]); runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); }); await runDaemonStop({ json: true }); - expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789); - expect(killSpy).toHaveBeenCalledWith(4200, "SIGTERM"); - expect(killSpy).toHaveBeenCalledWith(4300, "SIGTERM"); + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGTERM"); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4300, "SIGTERM"); }); it("signals a single unmanaged gateway process on restart", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); - runServiceRestart.mockImplementation( - async (params: RestartParams & { onNotLoaded?: () => Promise }) => { - await params.onNotLoaded?.(); - await params.postRestartCheck?.({ - json: Boolean(params.opts?.json), - stdout: process.stdout, - warnings: [], - fail: (message: string) => { - throw new Error(message); - }, - }); - return true; - }, - ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]); + mockUnmanagedRestart({ runPostRestartCheck: true }); await runDaemonRestart({ json: true }); - expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789); - expect(killSpy).toHaveBeenCalledWith(4200, "SIGUSR1"); + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGUSR1"); expect(probeGateway).toHaveBeenCalledTimes(1); expect(waitForGatewayHealthyListener).toHaveBeenCalledTimes(1); expect(waitForGatewayHealthyRestart).not.toHaveBeenCalled(); @@ -301,21 +271,8 @@ describe("runDaemonRestart health checks", () => { }); it("fails unmanaged restart when multiple gateway listeners are present", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - findGatewayPidsOnPortSync.mockReturnValue([4200, 4300]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); - runServiceRestart.mockImplementation( - async (params: RestartParams & { onNotLoaded?: () => Promise }) => { - await params.onNotLoaded?.(); - return true; - }, - ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4300]); + mockUnmanagedRestart(); await expect(runDaemonRestart({ json: true })).rejects.toThrow( "multiple gateway processes are listening on port 18789", @@ -323,18 +280,13 @@ describe("runDaemonRestart health checks", () => { }); it("fails unmanaged restart when the running gateway has commands.restart disabled", async () => { - findGatewayPidsOnPortSync.mockReturnValue([4200]); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]); probeGateway.mockResolvedValue({ ok: true, configSnapshot: { commands: { restart: false } }, }); isRestartEnabled.mockReturnValue(false); - runServiceRestart.mockImplementation( - async (params: RestartParams & { onNotLoaded?: () => Promise }) => { - await params.onNotLoaded?.(); - return true; - }, - ); + mockUnmanagedRestart(); await expect(runDaemonRestart({ json: true })).rejects.toThrow( "Gateway restart is disabled in the running gateway config", @@ -342,21 +294,13 @@ describe("runDaemonRestart health checks", () => { }); it("skips unmanaged signaling for pids that are not live gateway processes", async () => { - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200]); - mockReadFileSync.mockReturnValue(["python", "-m", "http.server", ""].join("\0")); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: "python -m http.server", - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); }); await runDaemonStop({ json: true }); - expect(killSpy).not.toHaveBeenCalled(); + expect(signalVerifiedGatewayPidSync).not.toHaveBeenCalled(); }); }); diff --git a/src/cli/daemon-cli/lifecycle.ts b/src/cli/daemon-cli/lifecycle.ts index 2b0775b0c48..53efaff9495 100644 --- a/src/cli/daemon-cli/lifecycle.ts +++ b/src/cli/daemon-cli/lifecycle.ts @@ -1,12 +1,12 @@ -import { spawnSync } from "node:child_process"; -import fsSync from "node:fs"; import { isRestartEnabled } from "../../config/commands.js"; import { readBestEffortConfig, resolveGatewayPort } from "../../config/config.js"; -import { parseCmdScriptCommandLine } from "../../daemon/cmd-argv.js"; import { resolveGatewayService } from "../../daemon/service.js"; import { probeGateway } from "../../gateway/probe.js"; -import { isGatewayArgv, parseProcCmdline } from "../../infra/gateway-process-argv.js"; -import { findGatewayPidsOnPortSync } from "../../infra/restart.js"; +import { + findVerifiedGatewayListenerPidsOnPortSync, + formatGatewayPidList, + signalVerifiedGatewayPidSync, +} from "../../infra/gateway-processes.js"; import { defaultRuntime } from "../../runtime.js"; import { theme } from "../../terminal/theme.js"; import { formatCliCommand } from "../command-format.js"; @@ -43,85 +43,12 @@ async function resolveGatewayLifecyclePort(service = resolveGatewayService()) { return portFromArgs ?? resolveGatewayPort(await readBestEffortConfig(), mergedEnv); } -function extractWindowsCommandLine(raw: string): string | null { - const lines = raw - .split(/\r?\n/) - .map((line) => line.trim()) - .filter(Boolean); - for (const line of lines) { - if (!line.toLowerCase().startsWith("commandline=")) { - continue; - } - const value = line.slice("commandline=".length).trim(); - return value || null; - } - return lines.find((line) => line.toLowerCase() !== "commandline") ?? null; -} - -function readGatewayProcessArgsSync(pid: number): string[] | null { - if (process.platform === "linux") { - try { - return parseProcCmdline(fsSync.readFileSync(`/proc/${pid}/cmdline`, "utf8")); - } catch { - return null; - } - } - if (process.platform === "darwin") { - const ps = spawnSync("ps", ["-o", "command=", "-p", String(pid)], { - encoding: "utf8", - timeout: 1000, - }); - if (ps.error || ps.status !== 0) { - return null; - } - const command = ps.stdout.trim(); - return command ? command.split(/\s+/) : null; - } - if (process.platform === "win32") { - const wmic = spawnSync( - "wmic", - ["process", "where", `ProcessId=${pid}`, "get", "CommandLine", "/value"], - { - encoding: "utf8", - timeout: 1000, - }, - ); - if (wmic.error || wmic.status !== 0) { - return null; - } - const command = extractWindowsCommandLine(wmic.stdout); - return command ? parseCmdScriptCommandLine(command) : null; - } - return null; -} - -function resolveGatewayListenerPids(port: number): number[] { - return Array.from(new Set(findGatewayPidsOnPortSync(port))) - .filter((pid): pid is number => Number.isFinite(pid) && pid > 0) - .filter((pid) => { - const args = readGatewayProcessArgsSync(pid); - return args != null && isGatewayArgv(args, { allowGatewayBinary: true }); - }); -} - function resolveGatewayPortFallback(): Promise { return readBestEffortConfig() .then((cfg) => resolveGatewayPort(cfg, process.env)) .catch(() => resolveGatewayPort(undefined, process.env)); } -function signalGatewayPid(pid: number, signal: "SIGTERM" | "SIGUSR1") { - const args = readGatewayProcessArgsSync(pid); - if (!args || !isGatewayArgv(args, { allowGatewayBinary: true })) { - throw new Error(`refusing to signal non-gateway process pid ${pid}`); - } - process.kill(pid, signal); -} - -function formatGatewayPidList(pids: number[]): string { - return pids.join(", "); -} - async function assertUnmanagedGatewayRestartEnabled(port: number): Promise { const probe = await probeGateway({ url: `ws://127.0.0.1:${port}`, @@ -143,7 +70,7 @@ async function assertUnmanagedGatewayRestartEnabled(port: number): Promise } function resolveVerifiedGatewayListenerPids(port: number): number[] { - return resolveGatewayListenerPids(port).filter( + return findVerifiedGatewayListenerPidsOnPortSync(port).filter( (pid): pid is number => Number.isFinite(pid) && pid > 0, ); } @@ -154,7 +81,7 @@ async function stopGatewayWithoutServiceManager(port: number) { return null; } for (const pid of pids) { - signalGatewayPid(pid, "SIGTERM"); + signalVerifiedGatewayPidSync(pid, "SIGTERM"); } return { result: "stopped" as const, @@ -173,7 +100,7 @@ async function restartGatewayWithoutServiceManager(port: number) { `multiple gateway processes are listening on port ${port}: ${formatGatewayPidList(pids)}; use "openclaw gateway status --deep" before retrying restart`, ); } - signalGatewayPid(pids[0], "SIGUSR1"); + signalVerifiedGatewayPidSync(pids[0], "SIGUSR1"); return { result: "restarted" as const, message: `Gateway restart signal sent to unmanaged process on port ${port}: ${pids[0]}.`, diff --git a/src/cli/daemon-cli/register-service-commands.test.ts b/src/cli/daemon-cli/register-service-commands.test.ts index e249b00c835..64a1e24589b 100644 --- a/src/cli/daemon-cli/register-service-commands.test.ts +++ b/src/cli/daemon-cli/register-service-commands.test.ts @@ -67,6 +67,17 @@ describe("addGatewayServiceCommands", () => { ); }, }, + { + name: "forwards require-rpc for status", + argv: ["status", "--require-rpc"], + assert: () => { + expect(runDaemonStatus).toHaveBeenCalledWith( + expect.objectContaining({ + requireRpc: true, + }), + ); + }, + }, ])("$name", async ({ argv, assert }) => { const gateway = createGatewayParentLikeCommand(); await gateway.parseAsync(argv, { from: "user" }); diff --git a/src/cli/daemon-cli/register-service-commands.ts b/src/cli/daemon-cli/register-service-commands.ts index 5d4ce0a9c28..2690eb91d7f 100644 --- a/src/cli/daemon-cli/register-service-commands.ts +++ b/src/cli/daemon-cli/register-service-commands.ts @@ -44,12 +44,14 @@ export function addGatewayServiceCommands(parent: Command, opts?: { statusDescri .option("--password ", "Gateway password (password auth)") .option("--timeout ", "Timeout in ms", "10000") .option("--no-probe", "Skip RPC probe") + .option("--require-rpc", "Exit non-zero when the RPC probe fails", false) .option("--deep", "Scan system-level services", false) .option("--json", "Output JSON", false) .action(async (cmdOpts, command) => { await runDaemonStatus({ rpc: resolveRpcOptions(cmdOpts, command), probe: Boolean(cmdOpts.probe), + requireRpc: Boolean(cmdOpts.requireRpc), deep: Boolean(cmdOpts.deep), json: Boolean(cmdOpts.json), }); diff --git a/src/cli/daemon-cli/restart-health.test.ts b/src/cli/daemon-cli/restart-health.test.ts index 0202f591cc2..c4b8eb3b07c 100644 --- a/src/cli/daemon-cli/restart-health.test.ts +++ b/src/cli/daemon-cli/restart-health.test.ts @@ -20,28 +20,45 @@ vi.mock("../../gateway/probe.js", () => ({ const originalPlatform = process.platform; +function makeGatewayService( + runtime: { status: "running"; pid: number } | { status: "stopped" }, +): GatewayService { + return { + readRuntime: vi.fn(async () => runtime), + } as unknown as GatewayService; +} + +async function inspectGatewayRestartWithSnapshot(params: { + runtime: { status: "running"; pid: number } | { status: "stopped" }; + portUsage: PortUsage; + includeUnknownListenersAsStale?: boolean; +}) { + const service = makeGatewayService(params.runtime); + inspectPortUsage.mockResolvedValue(params.portUsage); + const { inspectGatewayRestart } = await import("./restart-health.js"); + return inspectGatewayRestart({ + service, + port: 18789, + ...(params.includeUnknownListenersAsStale === undefined + ? {} + : { includeUnknownListenersAsStale: params.includeUnknownListenersAsStale }), + }); +} + async function inspectUnknownListenerFallback(params: { runtime: { status: "running"; pid: number } | { status: "stopped" }; includeUnknownListenersAsStale: boolean; }) { Object.defineProperty(process, "platform", { value: "win32", configurable: true }); classifyPortListener.mockReturnValue("unknown"); - - const service = { - readRuntime: vi.fn(async () => params.runtime), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 10920, command: "unknown" }], - hints: [], - }); - - const { inspectGatewayRestart } = await import("./restart-health.js"); - return inspectGatewayRestart({ - service, - port: 18789, + return inspectGatewayRestartWithSnapshot({ + runtime: params.runtime, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 10920, command: "unknown" }], + hints: [], + }, includeUnknownListenersAsStale: params.includeUnknownListenersAsStale, }); } @@ -49,21 +66,17 @@ async function inspectUnknownListenerFallback(params: { async function inspectAmbiguousOwnershipWithProbe( probeResult: Awaited>, ) { - const service = { - readRuntime: vi.fn(async () => ({ status: "running", pid: 8000 })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ commandLine: "" }], - hints: [], - }); classifyPortListener.mockReturnValue("unknown"); probeGateway.mockResolvedValue(probeResult); - - const { inspectGatewayRestart } = await import("./restart-health.js"); - return inspectGatewayRestart({ service, port: 18789 }); + return inspectGatewayRestartWithSnapshot({ + runtime: { status: "running", pid: 8000 }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ commandLine: "" }], + hints: [], + }, + }); } describe("inspectGatewayRestart", () => { @@ -89,39 +102,31 @@ describe("inspectGatewayRestart", () => { }); it("treats a gateway listener child pid as healthy ownership", async () => { - const service = { - readRuntime: vi.fn(async () => ({ status: "running", pid: 7000 })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 7001, ppid: 7000, commandLine: "openclaw-gateway" }], - hints: [], + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "running", pid: 7000 }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 7001, ppid: 7000, commandLine: "openclaw-gateway" }], + hints: [], + }, }); - const { inspectGatewayRestart } = await import("./restart-health.js"); - const snapshot = await inspectGatewayRestart({ service, port: 18789 }); - expect(snapshot.healthy).toBe(true); expect(snapshot.staleGatewayPids).toEqual([]); }); it("marks non-owned gateway listener pids as stale while runtime is running", async () => { - const service = { - readRuntime: vi.fn(async () => ({ status: "running", pid: 8000 })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 9000, ppid: 8999, commandLine: "openclaw-gateway" }], - hints: [], + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "running", pid: 8000 }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 9000, ppid: 8999, commandLine: "openclaw-gateway" }], + hints: [], + }, }); - const { inspectGatewayRestart } = await import("./restart-health.js"); - const snapshot = await inspectGatewayRestart({ service, port: 18789 }); - expect(snapshot.healthy).toBe(false); expect(snapshot.staleGatewayPids).toEqual([9000]); }); @@ -157,21 +162,14 @@ describe("inspectGatewayRestart", () => { Object.defineProperty(process, "platform", { value: "win32", configurable: true }); classifyPortListener.mockReturnValue("ssh"); - const service = { - readRuntime: vi.fn(async () => ({ status: "stopped" })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 22001, command: "nginx.exe" }], - hints: [], - }); - - const { inspectGatewayRestart } = await import("./restart-health.js"); - const snapshot = await inspectGatewayRestart({ - service, - port: 18789, + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "stopped" }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 22001, command: "nginx.exe" }], + hints: [], + }, includeUnknownListenersAsStale: true, }); @@ -190,6 +188,28 @@ describe("inspectGatewayRestart", () => { ); }); + it("treats a busy port as healthy when runtime status lags but the probe succeeds", async () => { + Object.defineProperty(process, "platform", { value: "win32", configurable: true }); + classifyPortListener.mockReturnValue("gateway"); + probeGateway.mockResolvedValue({ + ok: true, + close: null, + }); + + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "stopped" }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 9100, commandLine: "openclaw-gateway" }], + hints: [], + }, + }); + + expect(snapshot.healthy).toBe(true); + expect(snapshot.staleGatewayPids).toEqual([]); + }); + it("treats auth-closed probe as healthy gateway reachability", async () => { const snapshot = await inspectAmbiguousOwnershipWithProbe({ ok: false, diff --git a/src/cli/daemon-cli/restart-health.ts b/src/cli/daemon-cli/restart-health.ts index 13741d2e9c4..9bfe3476ee6 100644 --- a/src/cli/daemon-cli/restart-health.ts +++ b/src/cli/daemon-cli/restart-health.ts @@ -65,7 +65,8 @@ async function confirmGatewayReachable(port: number): Promise { const probe = await probeGateway({ url: `ws://127.0.0.1:${port}`, auth: token || password ? { token, password } : undefined, - timeoutMs: 1_000, + timeoutMs: 3_000, + includeDetails: false, }); return probe.ok || looksLikeAuthClose(probe.close?.code, probe.close?.reason); } @@ -123,6 +124,22 @@ export async function inspectGatewayRestart(params: { }; } + if (portUsage.status === "busy" && runtime.status !== "running") { + try { + const reachable = await confirmGatewayReachable(params.port); + if (reachable) { + return { + runtime, + portUsage, + healthy: true, + staleGatewayPids: [], + }; + } + } catch { + // Probe is best-effort; keep the ownership-based diagnostics. + } + } + const gatewayListeners = portUsage.status === "busy" ? portUsage.listeners.filter( diff --git a/src/cli/daemon-cli/shared.ts b/src/cli/daemon-cli/shared.ts index 525b04682b0..eb2760c2630 100644 --- a/src/cli/daemon-cli/shared.ts +++ b/src/cli/daemon-cli/shared.ts @@ -1,3 +1,4 @@ +import { resolveIsNixMode } from "../../config/paths.js"; import { resolveGatewayLaunchAgentLabel, resolveGatewaySystemdServiceName, @@ -12,10 +13,30 @@ import { getResolvedLoggerSettings } from "../../logging.js"; import { colorize, isRich, theme } from "../../terminal/theme.js"; import { formatCliCommand } from "../command-format.js"; import { parsePort } from "../shared/parse-port.js"; +import { createDaemonActionContext } from "./response.js"; export { formatRuntimeStatus }; export { parsePort }; +export function createDaemonInstallActionContext(jsonFlag: unknown) { + const json = Boolean(jsonFlag); + return { + json, + ...createDaemonActionContext({ action: "install", json }), + }; +} + +export function failIfNixDaemonInstallMode( + fail: (message: string, hints?: string[]) => void, + env: NodeJS.ProcessEnv = process.env, +): boolean { + if (!resolveIsNixMode(env)) { + return false; + } + fail("Nix mode detected; service install is disabled."); + return true; +} + export function createCliStatusTextStyles() { const rich = isRich(); return { diff --git a/src/cli/daemon-cli/status.gather.test.ts b/src/cli/daemon-cli/status.gather.test.ts index 9b4d6428d1e..b0c08715abe 100644 --- a/src/cli/daemon-cli/status.gather.test.ts +++ b/src/cli/daemon-cli/status.gather.test.ts @@ -18,7 +18,12 @@ const readLastGatewayErrorLine = vi.fn(async (_env?: NodeJS.ProcessEnv) => null) const auditGatewayServiceConfig = vi.fn(async (_opts?: unknown) => undefined); const serviceIsLoaded = vi.fn(async (_opts?: unknown) => true); const serviceReadRuntime = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ status: "running" })); -const serviceReadCommand = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ +const serviceReadCommand = vi.fn< + (env?: NodeJS.ProcessEnv) => Promise<{ + programArguments: string[]; + environment?: Record; + }> +>(async (_env?: NodeJS.ProcessEnv) => ({ programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"], environment: { OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon", @@ -190,6 +195,37 @@ describe("gatherDaemonStatus", () => { expect(status.rpc?.url).toBe("wss://override.example:18790"); }); + it("reuses command environment when reading runtime status", async () => { + serviceReadCommand.mockResolvedValueOnce({ + programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"], + environment: { + OPENCLAW_GATEWAY_PORT: "19001", + OPENCLAW_CONFIG_PATH: "/tmp/openclaw-daemon/openclaw.json", + OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon", + } as Record, + }); + serviceReadRuntime.mockImplementationOnce(async (env?: NodeJS.ProcessEnv) => ({ + status: env?.OPENCLAW_GATEWAY_PORT === "19001" ? "running" : "unknown", + detail: env?.OPENCLAW_GATEWAY_PORT ?? "missing-port", + })); + + const status = await gatherDaemonStatus({ + rpc: {}, + probe: false, + deep: false, + }); + + expect(serviceReadRuntime).toHaveBeenCalledWith( + expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "19001", + }), + ); + expect(status.service.runtime).toMatchObject({ + status: "running", + detail: "19001", + }); + }); + it("resolves daemon gateway auth password SecretRef values before probing", async () => { daemonLoadedConfig = { gateway: { diff --git a/src/cli/daemon-cli/status.gather.ts b/src/cli/daemon-cli/status.gather.ts index a44ef93c656..ef15a377438 100644 --- a/src/cli/daemon-cli/status.gather.ts +++ b/src/cli/daemon-cli/status.gather.ts @@ -258,17 +258,21 @@ export async function gatherDaemonStatus( } & FindExtraGatewayServicesOptions, ): Promise { const service = resolveGatewayService(); - const [loaded, command, runtime] = await Promise.all([ - service.isLoaded({ env: process.env }).catch(() => false), - service.readCommand(process.env).catch(() => null), - service.readRuntime(process.env).catch((err) => ({ status: "unknown", detail: String(err) })), + const command = await service.readCommand(process.env).catch(() => null); + const serviceEnv = command?.environment + ? ({ + ...process.env, + ...command.environment, + } satisfies NodeJS.ProcessEnv) + : process.env; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env: serviceEnv }).catch(() => false), + service.readRuntime(serviceEnv).catch((err) => ({ status: "unknown", detail: String(err) })), ]); const configAudit = await auditGatewayServiceConfig({ env: process.env, command, }); - - const serviceEnv = command?.environment ?? undefined; const { mergedDaemonEnv, cliCfg, @@ -276,7 +280,7 @@ export async function gatherDaemonStatus( cliConfigSummary, daemonConfigSummary, configMismatch, - } = await loadDaemonConfigContext(serviceEnv); + } = await loadDaemonConfigContext(command?.environment); const { gateway, daemonPort, cliPort, probeUrlOverride } = await resolveGatewayStatusSummary({ cliCfg, daemonCfg, diff --git a/src/cli/daemon-cli/status.test.ts b/src/cli/daemon-cli/status.test.ts new file mode 100644 index 00000000000..5cf0484120e --- /dev/null +++ b/src/cli/daemon-cli/status.test.ts @@ -0,0 +1,92 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { createCliRuntimeCapture } from "../test-runtime-capture.js"; +import type { DaemonStatus } from "./status.gather.js"; + +const gatherDaemonStatus = vi.fn( + async (_opts?: unknown): Promise => ({ + service: { + label: "LaunchAgent", + loaded: true, + loadedText: "loaded", + notLoadedText: "not loaded", + }, + rpc: { + ok: true, + url: "ws://127.0.0.1:18789", + }, + extraServices: [], + }), +); +const printDaemonStatus = vi.fn(); + +const { runtimeErrors, defaultRuntime, resetRuntimeCapture } = createCliRuntimeCapture(); + +vi.mock("../../runtime.js", () => ({ + defaultRuntime, +})); + +vi.mock("../../terminal/theme.js", () => ({ + colorize: (_rich: boolean, _color: unknown, text: string) => text, + isRich: () => false, + theme: { error: "error" }, +})); + +vi.mock("./status.gather.js", () => ({ + gatherDaemonStatus: (opts: unknown) => gatherDaemonStatus(opts), +})); + +vi.mock("./status.print.js", () => ({ + printDaemonStatus: (...args: unknown[]) => printDaemonStatus(...args), +})); + +const { runDaemonStatus } = await import("./status.js"); + +describe("runDaemonStatus", () => { + beforeEach(() => { + gatherDaemonStatus.mockClear(); + printDaemonStatus.mockClear(); + resetRuntimeCapture(); + }); + + it("exits when require-rpc is set and the probe fails", async () => { + gatherDaemonStatus.mockResolvedValueOnce({ + service: { + label: "LaunchAgent", + loaded: true, + loadedText: "loaded", + notLoadedText: "not loaded", + }, + rpc: { + ok: false, + url: "ws://127.0.0.1:18789", + error: "gateway closed", + }, + extraServices: [], + }); + + await expect( + runDaemonStatus({ + rpc: {}, + probe: true, + requireRpc: true, + json: false, + }), + ).rejects.toThrow("__exit__:1"); + + expect(printDaemonStatus).toHaveBeenCalledTimes(1); + }); + + it("rejects require-rpc when probing is disabled", async () => { + await expect( + runDaemonStatus({ + rpc: {}, + probe: false, + requireRpc: true, + json: false, + }), + ).rejects.toThrow("__exit__:1"); + + expect(gatherDaemonStatus).not.toHaveBeenCalled(); + expect(runtimeErrors.join("\n")).toContain("--require-rpc cannot be used with --no-probe"); + }); +}); diff --git a/src/cli/daemon-cli/status.ts b/src/cli/daemon-cli/status.ts index 2af5a1977ec..44ae4b0a686 100644 --- a/src/cli/daemon-cli/status.ts +++ b/src/cli/daemon-cli/status.ts @@ -6,12 +6,20 @@ import type { DaemonStatusOptions } from "./types.js"; export async function runDaemonStatus(opts: DaemonStatusOptions) { try { + if (opts.requireRpc && !opts.probe) { + defaultRuntime.error("Gateway status failed: --require-rpc cannot be used with --no-probe."); + defaultRuntime.exit(1); + return; + } const status = await gatherDaemonStatus({ rpc: opts.rpc, probe: Boolean(opts.probe), deep: Boolean(opts.deep), }); printDaemonStatus(status, { json: Boolean(opts.json) }); + if (opts.requireRpc && !status.rpc?.ok) { + defaultRuntime.exit(1); + } } catch (err) { const rich = isRich(); defaultRuntime.error(colorize(rich, theme.error, `Gateway status failed: ${String(err)}`)); diff --git a/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts new file mode 100644 index 00000000000..6e2a93d5633 --- /dev/null +++ b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts @@ -0,0 +1,65 @@ +import { vi } from "vitest"; +import type { GatewayService } from "../../../daemon/service.js"; +import type { RuntimeEnv } from "../../../runtime.js"; +import type { MockFn } from "../../../test-utils/vitest-mock-fn.js"; + +export const runtimeLogs: string[] = []; + +type LifecycleRuntimeHarness = RuntimeEnv & { + error: MockFn; + exit: MockFn; +}; + +type LifecycleServiceHarness = GatewayService & { + install: MockFn; + uninstall: MockFn; + stop: MockFn; + isLoaded: MockFn; + readCommand: MockFn; + readRuntime: MockFn; + restart: MockFn; +}; + +export const defaultRuntime: LifecycleRuntimeHarness = { + log: (...args: unknown[]) => { + runtimeLogs.push(args.map((arg) => String(arg)).join(" ")); + }, + error: vi.fn(), + exit: vi.fn((code: number) => { + throw new Error(`__exit__:${code}`); + }), +}; + +export const service: LifecycleServiceHarness = { + label: "TestService", + loadedText: "loaded", + notLoadedText: "not loaded", + install: vi.fn(), + uninstall: vi.fn(), + stop: vi.fn(), + isLoaded: vi.fn(), + readCommand: vi.fn(), + readRuntime: vi.fn(), + restart: vi.fn(), +}; + +export function resetLifecycleRuntimeLogs() { + runtimeLogs.length = 0; +} + +export function resetLifecycleServiceMocks() { + service.isLoaded.mockClear(); + service.readCommand.mockClear(); + service.restart.mockClear(); + service.isLoaded.mockResolvedValue(true); + service.readCommand.mockResolvedValue({ programArguments: [], environment: {} }); + service.restart.mockResolvedValue({ outcome: "completed" }); +} + +export function stubEmptyGatewayEnv() { + vi.unstubAllEnvs(); + vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); +} diff --git a/src/cli/daemon-cli/types.ts b/src/cli/daemon-cli/types.ts index 602d47e9fd1..08a6d407329 100644 --- a/src/cli/daemon-cli/types.ts +++ b/src/cli/daemon-cli/types.ts @@ -11,6 +11,7 @@ export type GatewayRpcOpts = { export type DaemonStatusOptions = { rpc: GatewayRpcOpts; probe: boolean; + requireRpc: boolean; json: boolean; } & FindExtraGatewayServicesOptions; diff --git a/src/cli/gateway-cli/discover.ts b/src/cli/gateway-cli/discover.ts index 8465cf449ca..51eac4feb76 100644 --- a/src/cli/gateway-cli/discover.ts +++ b/src/cli/gateway-cli/discover.ts @@ -1,5 +1,6 @@ import type { GatewayBonjourBeacon } from "../../infra/bonjour-discovery.js"; import { colorize, theme } from "../../terminal/theme.js"; +import { parseTimeoutMsWithFallback } from "../parse-timeout.js"; export type GatewayDiscoverOpts = { timeout?: string; @@ -7,26 +8,7 @@ export type GatewayDiscoverOpts = { }; export function parseDiscoverTimeoutMs(raw: unknown, fallbackMs: number): number { - if (raw === undefined || raw === null) { - return fallbackMs; - } - const value = - typeof raw === "string" - ? raw.trim() - : typeof raw === "number" || typeof raw === "bigint" - ? String(raw) - : null; - if (value === null) { - throw new Error("invalid --timeout"); - } - if (!value) { - return fallbackMs; - } - const parsed = Number.parseInt(value, 10); - if (!Number.isFinite(parsed) || parsed <= 0) { - throw new Error(`invalid --timeout: ${value}`); - } - return parsed; + return parseTimeoutMsWithFallback(raw, fallbackMs, { invalidType: "error" }); } export function pickBeaconHost(beacon: GatewayBonjourBeacon): string | null { diff --git a/src/cli/node-cli/daemon.ts b/src/cli/node-cli/daemon.ts index b293c88c15c..f56b8af3fff 100644 --- a/src/cli/node-cli/daemon.ts +++ b/src/cli/node-cli/daemon.ts @@ -3,7 +3,6 @@ import { DEFAULT_NODE_DAEMON_RUNTIME, isNodeDaemonRuntime, } from "../../commands/node-daemon-runtime.js"; -import { resolveIsNixMode } from "../../config/paths.js"; import { resolveNodeLaunchAgentLabel, resolveNodeSystemdServiceName, @@ -25,13 +24,11 @@ import { runServiceStop, runServiceUninstall, } from "../daemon-cli/lifecycle-core.js"; -import { - buildDaemonServiceSnapshot, - createDaemonActionContext, - installDaemonServiceAndEmit, -} from "../daemon-cli/response.js"; +import { buildDaemonServiceSnapshot, installDaemonServiceAndEmit } from "../daemon-cli/response.js"; import { createCliStatusTextStyles, + createDaemonInstallActionContext, + failIfNixDaemonInstallMode, formatRuntimeStatus, parsePort, resolveRuntimeStatusColor, @@ -89,11 +86,8 @@ function resolveNodeDefaults( } export async function runNodeDaemonInstall(opts: NodeDaemonInstallOptions) { - const json = Boolean(opts.json); - const { stdout, warnings, emit, fail } = createDaemonActionContext({ action: "install", json }); - - if (resolveIsNixMode(process.env)) { - fail("Nix mode detected; service install is disabled."); + const { json, stdout, warnings, emit, fail } = createDaemonInstallActionContext(opts.json); + if (failIfNixDaemonInstallMode(fail)) { return; } diff --git a/src/cli/nodes-cli/register.camera.ts b/src/cli/nodes-cli/register.camera.ts index 82cde2a35f3..9c813cecc5f 100644 --- a/src/cli/nodes-cli/register.camera.ts +++ b/src/cli/nodes-cli/register.camera.ts @@ -31,6 +31,12 @@ const parseFacing = (value: string): CameraFacing => { throw new Error(`invalid facing: ${value} (expected front|back)`); }; +function getGatewayInvokePayload(raw: unknown): unknown { + return typeof raw === "object" && raw !== null + ? (raw as { payload?: unknown }).payload + : undefined; +} + export function registerNodesCameraCommands(nodes: Command) { const camera = nodes.command("camera").description("Capture camera media from a paired node"); @@ -157,9 +163,7 @@ export function registerNodesCameraCommands(nodes: Command) { }); const raw = await callGatewayCli("node.invoke", opts, invokeParams); - const res = - typeof raw === "object" && raw !== null ? (raw as { payload?: unknown }) : {}; - const payload = parseCameraSnapPayload(res.payload); + const payload = parseCameraSnapPayload(getGatewayInvokePayload(raw)); const filePath = cameraTempPath({ kind: "snap", facing, @@ -229,8 +233,7 @@ export function registerNodesCameraCommands(nodes: Command) { }); const raw = await callGatewayCli("node.invoke", opts, invokeParams); - const res = typeof raw === "object" && raw !== null ? (raw as { payload?: unknown }) : {}; - const payload = parseCameraClipPayload(res.payload); + const payload = parseCameraClipPayload(getGatewayInvokePayload(raw)); const filePath = await writeCameraClipPayloadToFile({ payload, facing, diff --git a/src/cli/parse-timeout.test.ts b/src/cli/parse-timeout.test.ts new file mode 100644 index 00000000000..9d05cf2d244 --- /dev/null +++ b/src/cli/parse-timeout.test.ts @@ -0,0 +1,43 @@ +import { describe, expect, it } from "vitest"; +import { parseTimeoutMs, parseTimeoutMsWithFallback } from "./parse-timeout.js"; + +describe("parseTimeoutMs", () => { + it("parses positive string values", () => { + expect(parseTimeoutMs("1500")).toBe(1500); + }); + + it("returns undefined for empty or invalid values", () => { + expect(parseTimeoutMs(undefined)).toBeUndefined(); + expect(parseTimeoutMs("")).toBeUndefined(); + expect(parseTimeoutMs("nope")).toBeUndefined(); + }); +}); + +describe("parseTimeoutMsWithFallback", () => { + it("returns the fallback for missing or empty values", () => { + expect(parseTimeoutMsWithFallback(undefined, 3000)).toBe(3000); + expect(parseTimeoutMsWithFallback(null, 3000)).toBe(3000); + expect(parseTimeoutMsWithFallback(" ", 3000)).toBe(3000); + }); + + it("parses positive numbers and strings", () => { + expect(parseTimeoutMsWithFallback(2500, 3000)).toBe(2500); + expect(parseTimeoutMsWithFallback(2500n, 3000)).toBe(2500); + expect(parseTimeoutMsWithFallback("2500", 3000)).toBe(2500); + }); + + it("falls back on unsupported types by default", () => { + expect(parseTimeoutMsWithFallback({}, 3000)).toBe(3000); + }); + + it("throws on unsupported types when requested", () => { + expect(() => parseTimeoutMsWithFallback({}, 3000, { invalidType: "error" })).toThrow( + "invalid --timeout", + ); + }); + + it("throws on non-positive parsed values", () => { + expect(() => parseTimeoutMsWithFallback("0", 3000)).toThrow("invalid --timeout: 0"); + expect(() => parseTimeoutMsWithFallback("-1", 3000)).toThrow("invalid --timeout: -1"); + }); +}); diff --git a/src/cli/parse-timeout.ts b/src/cli/parse-timeout.ts index 090559add6e..139393c0176 100644 --- a/src/cli/parse-timeout.ts +++ b/src/cli/parse-timeout.ts @@ -16,3 +16,39 @@ export function parseTimeoutMs(raw: unknown): number | undefined { } return Number.isFinite(value) ? value : undefined; } + +export function parseTimeoutMsWithFallback( + raw: unknown, + fallbackMs: number, + options: { + invalidType?: "fallback" | "error"; + } = {}, +): number { + if (raw === undefined || raw === null) { + return fallbackMs; + } + + const value = + typeof raw === "string" + ? raw.trim() + : typeof raw === "number" || typeof raw === "bigint" + ? String(raw) + : null; + + if (value === null) { + if (options.invalidType === "error") { + throw new Error("invalid --timeout"); + } + return fallbackMs; + } + + if (!value) { + return fallbackMs; + } + + const parsed = Number.parseInt(value, 10); + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`invalid --timeout: ${value}`); + } + return parsed; +} diff --git a/src/cli/program/help.test.ts b/src/cli/program/help.test.ts index 6acceb5cc41..07b6a8d8f90 100644 --- a/src/cli/program/help.test.ts +++ b/src/cli/program/help.test.ts @@ -90,6 +90,23 @@ describe("configureProgramHelp", () => { } } + function expectVersionExit(params: { expectedVersion: string }) { + const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(((code?: number) => { + throw new Error(`exit:${code ?? ""}`); + }) as typeof process.exit); + + try { + const program = makeProgramWithCommands(); + expect(() => configureProgramHelp(program, testProgramContext)).toThrow("exit:0"); + expect(logSpy).toHaveBeenCalledWith(params.expectedVersion); + expect(exitSpy).toHaveBeenCalledWith(0); + } finally { + logSpy.mockRestore(); + exitSpy.mockRestore(); + } + } + it("adds root help hint and marks commands with subcommands", () => { process.argv = ["node", "openclaw", "--help"]; const program = makeProgramWithCommands(); @@ -115,35 +132,12 @@ describe("configureProgramHelp", () => { it("prints version and exits immediately when version flags are present", () => { process.argv = ["node", "openclaw", "--version"]; - const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); - const exitSpy = vi.spyOn(process, "exit").mockImplementation(((code?: number) => { - throw new Error(`exit:${code ?? ""}`); - }) as typeof process.exit); - - const program = makeProgramWithCommands(); - expect(() => configureProgramHelp(program, testProgramContext)).toThrow("exit:0"); - expect(logSpy).toHaveBeenCalledWith("OpenClaw 9.9.9-test (abc1234)"); - expect(exitSpy).toHaveBeenCalledWith(0); - - logSpy.mockRestore(); - exitSpy.mockRestore(); + expectVersionExit({ expectedVersion: "OpenClaw 9.9.9-test (abc1234)" }); }); it("prints version and exits immediately without commit metadata", () => { process.argv = ["node", "openclaw", "--version"]; resolveCommitHashMock.mockReturnValue(null); - - const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); - const exitSpy = vi.spyOn(process, "exit").mockImplementation(((code?: number) => { - throw new Error(`exit:${code ?? ""}`); - }) as typeof process.exit); - - const program = makeProgramWithCommands(); - expect(() => configureProgramHelp(program, testProgramContext)).toThrow("exit:0"); - expect(logSpy).toHaveBeenCalledWith("OpenClaw 9.9.9-test"); - expect(exitSpy).toHaveBeenCalledWith(0); - - logSpy.mockRestore(); - exitSpy.mockRestore(); + expectVersionExit({ expectedVersion: "OpenClaw 9.9.9-test" }); }); }); diff --git a/src/cli/qr-cli.test.ts b/src/cli/qr-cli.test.ts index d77cd1406be..1bc8a645719 100644 --- a/src/cli/qr-cli.test.ts +++ b/src/cli/qr-cli.test.ts @@ -104,6 +104,12 @@ function createLocalGatewayPasswordRefAuth(secretId: string) { }; } +function createLocalGatewayEnvPasswordRefAuth(secretId: string) { + return { + password: { source: "env", provider: "default", id: secretId }, + }; +} + describe("registerQrCli", () => { function createProgram() { const program = new Command(); @@ -129,6 +135,18 @@ describe("registerQrCli", () => { }; } + function expectLoggedSetupCode(url: string) { + const expected = encodePairingSetupCode({ + url, + bootstrapToken: "bootstrap-123", + }); + expect(runtime.log).toHaveBeenCalledWith(expected); + } + + function expectLoggedLocalSetupCode() { + expectLoggedSetupCode("ws://gateway.local:18789"); + } + function mockTailscaleStatusLookup() { runCommandWithTimeout.mockResolvedValue({ code: 0, @@ -198,11 +216,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only", "--token", "override-token"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - bootstrapToken: "bootstrap-123", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); }); it("skips local password SecretRef resolution when --token override is provided", async () => { @@ -214,11 +228,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only", "--token", "override-token"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - bootstrapToken: "bootstrap-123", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); }); it("resolves local gateway auth password SecretRefs before setup code generation", async () => { @@ -231,11 +241,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - bootstrapToken: "bootstrap-123", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); @@ -249,11 +255,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - bootstrapToken: "bootstrap-123", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); @@ -262,17 +264,13 @@ describe("registerQrCli", () => { createLocalGatewayConfigWithAuth({ mode: "token", token: "token-123", - password: { source: "env", provider: "default", id: "MISSING_LOCAL_GATEWAY_PASSWORD" }, + ...createLocalGatewayEnvPasswordRefAuth("MISSING_LOCAL_GATEWAY_PASSWORD"), }), ); await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - bootstrapToken: "bootstrap-123", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); @@ -280,17 +278,13 @@ describe("registerQrCli", () => { vi.stubEnv("QR_INFERRED_GATEWAY_PASSWORD", "inferred-password"); loadConfig.mockReturnValue( createLocalGatewayConfigWithAuth({ - password: { source: "env", provider: "default", id: "QR_INFERRED_GATEWAY_PASSWORD" }, + ...createLocalGatewayEnvPasswordRefAuth("QR_INFERRED_GATEWAY_PASSWORD"), }), ); await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - bootstrapToken: "bootstrap-123", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); diff --git a/src/cli/update-cli.test.ts b/src/cli/update-cli.test.ts index d1713ee0e4c..f2138215327 100644 --- a/src/cli/update-cli.test.ts +++ b/src/cli/update-cli.test.ts @@ -624,12 +624,98 @@ describe("update-cli", () => { expect(runCommandWithTimeout).toHaveBeenCalledWith( [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], - expect.objectContaining({ timeoutMs: 60_000 }), + expect.objectContaining({ cwd: root, timeoutMs: 60_000 }), ); expect(runDaemonInstall).not.toHaveBeenCalled(); expect(runRestartScript).toHaveBeenCalled(); }); + it("updateCommand preserves invocation-relative service env overrides during refresh", async () => { + const root = createCaseDir("openclaw-updated-root"); + const entryPath = path.join(root, "dist", "entry.js"); + pathExists.mockImplementation(async (candidate: string) => candidate === entryPath); + + vi.mocked(runGatewayUpdate).mockResolvedValue({ + status: "ok", + mode: "npm", + root, + steps: [], + durationMs: 100, + }); + serviceLoaded.mockResolvedValue(true); + + await withEnvAsync( + { + OPENCLAW_STATE_DIR: "./state", + OPENCLAW_CONFIG_PATH: "./config/openclaw.json", + }, + async () => { + await updateCommand({}); + }, + ); + + expect(runCommandWithTimeout).toHaveBeenCalledWith( + [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], + expect.objectContaining({ + cwd: root, + env: expect.objectContaining({ + OPENCLAW_STATE_DIR: path.resolve("./state"), + OPENCLAW_CONFIG_PATH: path.resolve("./config/openclaw.json"), + }), + timeoutMs: 60_000, + }), + ); + expect(runDaemonInstall).not.toHaveBeenCalled(); + }); + + it("updateCommand reuses the captured invocation cwd when process.cwd later fails", async () => { + const root = createCaseDir("openclaw-updated-root"); + const entryPath = path.join(root, "dist", "entry.js"); + pathExists.mockImplementation(async (candidate: string) => candidate === entryPath); + + const originalCwd = process.cwd(); + let restoreCwd: (() => void) | undefined; + vi.mocked(runGatewayUpdate).mockImplementation(async () => { + const cwdSpy = vi.spyOn(process, "cwd").mockImplementation(() => { + throw new Error("ENOENT: current working directory is gone"); + }); + restoreCwd = () => cwdSpy.mockRestore(); + return { + status: "ok", + mode: "npm", + root, + steps: [], + durationMs: 100, + }; + }); + serviceLoaded.mockResolvedValue(true); + + try { + await withEnvAsync( + { + OPENCLAW_STATE_DIR: "./state", + }, + async () => { + await updateCommand({}); + }, + ); + } finally { + restoreCwd?.(); + } + + expect(runCommandWithTimeout).toHaveBeenCalledWith( + [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], + expect.objectContaining({ + cwd: root, + env: expect.objectContaining({ + OPENCLAW_STATE_DIR: path.resolve(originalCwd, "./state"), + }), + timeoutMs: 60_000, + }), + ); + expect(runDaemonInstall).not.toHaveBeenCalled(); + }); + it("updateCommand falls back to restart when env refresh install fails", async () => { await runRestartFallbackScenario({ daemonInstall: "fail" }); }); diff --git a/src/cli/update-cli/restart-helper.test.ts b/src/cli/update-cli/restart-helper.test.ts index c8b59d69afa..847893e9f23 100644 --- a/src/cli/update-cli/restart-helper.test.ts +++ b/src/cli/update-cli/restart-helper.test.ts @@ -287,6 +287,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("/bin/sh", [scriptPath], { detached: true, stdio: "ignore", + windowsHide: true, }); expect(mockChild.unref).toHaveBeenCalled(); }); @@ -302,6 +303,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", scriptPath], { detached: true, stdio: "ignore", + windowsHide: true, }); expect(mockChild.unref).toHaveBeenCalled(); }); @@ -317,6 +319,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", `"${scriptPath}"`], { detached: true, stdio: "ignore", + windowsHide: true, }); }); }); diff --git a/src/cli/update-cli/restart-helper.ts b/src/cli/update-cli/restart-helper.ts index c27f25cdc49..a68fab161fa 100644 --- a/src/cli/update-cli/restart-helper.ts +++ b/src/cli/update-cli/restart-helper.ts @@ -169,6 +169,7 @@ export async function runRestartScript(scriptPath: string): Promise { const child = spawn(file, args, { detached: true, stdio: "ignore", + windowsHide: true, }); child.unref(); } diff --git a/src/cli/update-cli/update-command.ts b/src/cli/update-cli/update-command.ts index 6063eb5f163..b94fbd4ffb9 100644 --- a/src/cli/update-cli/update-command.ts +++ b/src/cli/update-cli/update-command.ts @@ -69,6 +69,13 @@ import { suppressDeprecations } from "./suppress-deprecations.js"; const CLI_NAME = resolveCliName(); const SERVICE_REFRESH_TIMEOUT_MS = 60_000; +const SERVICE_REFRESH_PATH_ENV_KEYS = [ + "OPENCLAW_HOME", + "OPENCLAW_STATE_DIR", + "CLAWDBOT_STATE_DIR", + "OPENCLAW_CONFIG_PATH", + "CLAWDBOT_CONFIG_PATH", +] as const; const UPDATE_QUIPS = [ "Leveled up! New skills unlocked. You're welcome.", @@ -117,6 +124,37 @@ function formatCommandFailure(stdout: string, stderr: string): string { return detail.split("\n").slice(-3).join("\n"); } +function tryResolveInvocationCwd(): string | undefined { + try { + return process.cwd(); + } catch { + return undefined; + } +} + +function resolveServiceRefreshEnv( + env: NodeJS.ProcessEnv, + invocationCwd?: string, +): NodeJS.ProcessEnv { + const resolvedEnv: NodeJS.ProcessEnv = { ...env }; + for (const key of SERVICE_REFRESH_PATH_ENV_KEYS) { + const rawValue = resolvedEnv[key]?.trim(); + if (!rawValue) { + continue; + } + if (rawValue.startsWith("~") || path.isAbsolute(rawValue) || path.win32.isAbsolute(rawValue)) { + resolvedEnv[key] = rawValue; + continue; + } + if (!invocationCwd) { + resolvedEnv[key] = rawValue; + continue; + } + resolvedEnv[key] = path.resolve(invocationCwd, rawValue); + } + return resolvedEnv; +} + type UpdateDryRunPreview = { dryRun: true; root: string; @@ -179,6 +217,7 @@ function printDryRunPreview(preview: UpdateDryRunPreview, jsonMode: boolean): vo async function refreshGatewayServiceEnv(params: { result: UpdateRunResult; jsonMode: boolean; + invocationCwd?: string; }): Promise { const args = ["gateway", "install", "--force"]; if (params.jsonMode) { @@ -190,6 +229,8 @@ async function refreshGatewayServiceEnv(params: { continue; } const res = await runCommandWithTimeout([resolveNodeRunner(), candidate, ...args], { + cwd: params.result.root, + env: resolveServiceRefreshEnv(process.env, params.invocationCwd), timeoutMs: SERVICE_REFRESH_TIMEOUT_MS, }); if (res.code === 0) { @@ -519,6 +560,7 @@ async function maybeRestartService(params: { refreshServiceEnv: boolean; gatewayPort: number; restartScriptPath?: string | null; + invocationCwd?: string; }): Promise { if (params.shouldRestart) { if (!params.opts.json) { @@ -534,6 +576,7 @@ async function maybeRestartService(params: { await refreshGatewayServiceEnv({ result: params.result, jsonMode: Boolean(params.opts.json), + invocationCwd: params.invocationCwd, }); } catch (err) { if (!params.opts.json) { @@ -639,6 +682,7 @@ async function maybeRestartService(params: { export async function updateCommand(opts: UpdateCommandOptions): Promise { suppressDeprecations(); + const invocationCwd = tryResolveInvocationCwd(); const timeoutMs = parseTimeoutMsOrExit(opts.timeout); const shouldRestart = opts.restart !== false; @@ -921,6 +965,7 @@ export async function updateCommand(opts: UpdateCommandOptions): Promise { refreshServiceEnv: refreshGatewayServiceEnv, gatewayPort, restartScriptPath, + invocationCwd, }); if (!opts.json) { diff --git a/src/commands/agent.acp.test.ts b/src/commands/agent.acp.test.ts index ab8c9da8a6e..4ad423dcf18 100644 --- a/src/commands/agent.acp.test.ts +++ b/src/commands/agent.acp.test.ts @@ -171,6 +171,61 @@ function subscribeAssistantEvents() { return { assistantEvents, stop }; } +async function runAcpTurnWithAssistantEvents(chunks: string[]) { + const { assistantEvents, stop } = subscribeAssistantEvents(); + const runTurn = createRunTurnFromTextDeltas(chunks); + + mockAcpManager({ + runTurn: (params: unknown) => runTurn(params), + }); + + try { + await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); + } finally { + stop(); + } + + const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); + return { assistantEvents, logLines }; +} + +async function runAcpTurnWithTextDeltas(params: { message?: string; chunks: string[] }) { + const runTurn = createRunTurnFromTextDeltas(params.chunks); + mockAcpManager({ + runTurn: (input: unknown) => runTurn(input), + }); + await agentCommand( + { + message: params.message ?? "ping", + sessionKey: "agent:codex:acp:test", + }, + runtime, + ); + return { runTurn }; +} + +function expectPersistedAcpTranscript(params: { + storePath: string; + userContent: string; + assistantText: string; +}) { + const persistedStore = JSON.parse(fs.readFileSync(params.storePath, "utf-8")) as Record< + string, + { sessionFile?: string } + >; + const sessionFile = persistedStore["agent:codex:acp:test"]?.sessionFile; + const messages = readSessionMessages("acp-session-1", params.storePath, sessionFile); + expect(messages).toHaveLength(2); + expect(messages[0]).toMatchObject({ + role: "user", + content: params.userContent, + }); + expect(messages[1]).toMatchObject({ + role: "assistant", + content: [{ type: "text", text: params.assistantText }], + }); +} + async function runAcpSessionWithPolicyOverrides(params: { acpOverrides: Partial>; resolveSession?: Parameters[0]["resolveSession"]; @@ -209,13 +264,7 @@ describe("agentCommand ACP runtime routing", () => { it("routes ACP sessions through AcpSessionManager instead of embedded agent", async () => { await withAcpSessionEnv(async () => { - const runTurn = createRunTurnFromTextDeltas(["ACP_", "OK"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); + const { runTurn } = await runAcpTurnWithTextDeltas({ chunks: ["ACP_", "OK"] }); expect(runTurn).toHaveBeenCalledWith( expect.objectContaining({ @@ -234,64 +283,32 @@ describe("agentCommand ACP runtime routing", () => { it("persists ACP child session history to the transcript store", async () => { await withAcpSessionEnvInfo(async ({ storePath }) => { - const runTurn = createRunTurnFromTextDeltas(["ACP_", "OK"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - - const persistedStore = JSON.parse(fs.readFileSync(storePath, "utf-8")) as Record< - string, - { sessionFile?: string } - >; - const sessionFile = persistedStore["agent:codex:acp:test"]?.sessionFile; - const messages = readSessionMessages("acp-session-1", storePath, sessionFile); - expect(messages).toHaveLength(2); - expect(messages[0]).toMatchObject({ - role: "user", - content: "ping", - }); - expect(messages[1]).toMatchObject({ - role: "assistant", - content: [{ type: "text", text: "ACP_OK" }], + await runAcpTurnWithTextDeltas({ chunks: ["ACP_", "OK"] }); + expectPersistedAcpTranscript({ + storePath, + userContent: "ping", + assistantText: "ACP_OK", }); }); }); it("preserves exact ACP transcript text without trimming whitespace", async () => { await withAcpSessionEnvInfo(async ({ storePath }) => { - const runTurn = createRunTurnFromTextDeltas([" ACP_OK\n"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), + await runAcpTurnWithTextDeltas({ + message: " ping\n", + chunks: [" ACP_OK\n"], }); - - await agentCommand({ message: " ping\n", sessionKey: "agent:codex:acp:test" }, runtime); - - const persistedStore = JSON.parse(fs.readFileSync(storePath, "utf-8")) as Record< - string, - { sessionFile?: string } - >; - const sessionFile = persistedStore["agent:codex:acp:test"]?.sessionFile; - const messages = readSessionMessages("acp-session-1", storePath, sessionFile); - expect(messages).toHaveLength(2); - expect(messages[0]).toMatchObject({ - role: "user", - content: " ping\n", - }); - expect(messages[1]).toMatchObject({ - role: "assistant", - content: [{ type: "text", text: " ACP_OK\n" }], + expectPersistedAcpTranscript({ + storePath, + userContent: " ping\n", + assistantText: " ACP_OK\n", }); }); }); it("suppresses ACP NO_REPLY lead fragments before emitting assistant text", async () => { await withAcpSessionEnv(async () => { - const { assistantEvents, stop } = subscribeAssistantEvents(); - const runTurn = createRunTurnFromTextDeltas([ + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([ "NO", "NO_", "NO_RE", @@ -299,19 +316,7 @@ describe("agentCommand ACP runtime routing", () => { "Actual answer", ]); - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } - expect(assistantEvents).toEqual([{ text: "Actual answer", delta: "Actual answer" }]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); expect(logLines.some((line) => line.includes("NO_REPLY"))).toBe(false); expect(logLines.some((line) => line.includes("Actual answer"))).toBe(true); }); @@ -319,31 +324,13 @@ describe("agentCommand ACP runtime routing", () => { it("keeps silent-only ACP turns out of assistant output", async () => { await withAcpSessionEnv(async () => { - const assistantEvents: string[] = []; - const stop = onAgentEvent((evt) => { - if (evt.stream !== "assistant") { - return; - } - if (typeof evt.data?.text === "string") { - assistantEvents.push(evt.data.text); - } - }); - - const runTurn = createRunTurnFromTextDeltas(["NO", "NO_", "NO_RE", "NO_REPLY"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } - - expect(assistantEvents).toEqual([]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([ + "NO", + "NO_", + "NO_RE", + "NO_REPLY", + ]); + expect(assistantEvents.map((event) => event.text).filter(Boolean)).toEqual([]); expect(logLines.some((line) => line.includes("NO_REPLY"))).toBe(false); expect(logLines.some((line) => line.includes("No reply from agent."))).toBe(true); }); @@ -351,18 +338,12 @@ describe("agentCommand ACP runtime routing", () => { it("preserves repeated identical ACP delta chunks", async () => { await withAcpSessionEnv(async () => { - const { assistantEvents, stop } = subscribeAssistantEvents(); - const runTurn = createRunTurnFromTextDeltas(["b", "o", "o", "k"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([ + "b", + "o", + "o", + "k", + ]); expect(assistantEvents).toEqual([ { text: "b", delta: "b" }, @@ -370,30 +351,15 @@ describe("agentCommand ACP runtime routing", () => { { text: "boo", delta: "o" }, { text: "book", delta: "k" }, ]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); expect(logLines.some((line) => line.includes("book"))).toBe(true); }); }); it("re-emits buffered NO prefix when ACP text becomes visible content", async () => { await withAcpSessionEnv(async () => { - const { assistantEvents, stop } = subscribeAssistantEvents(); - const runTurn = createRunTurnFromTextDeltas(["NO", "W"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents(["NO", "W"]); expect(assistantEvents).toEqual([{ text: "NOW", delta: "NOW" }]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); expect(logLines.some((line) => line.includes("NOW"))).toBe(true); }); }); diff --git a/src/commands/backup-verify.test.ts b/src/commands/backup-verify.test.ts index 9288d2fb8c1..a5f0384e61b 100644 --- a/src/commands/backup-verify.test.ts +++ b/src/commands/backup-verify.test.ts @@ -8,6 +8,92 @@ import { buildBackupArchiveRoot } from "./backup-shared.js"; import { backupVerifyCommand } from "./backup-verify.js"; import { backupCreateCommand } from "./backup.js"; +const TEST_ARCHIVE_ROOT = "2026-03-09T00-00-00.000Z-openclaw-backup"; + +const createBackupVerifyRuntime = () => ({ + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), +}); + +function createBackupManifest(assetArchivePath: string) { + return { + schemaVersion: 1, + createdAt: "2026-03-09T00:00:00.000Z", + archiveRoot: TEST_ARCHIVE_ROOT, + runtimeVersion: "test", + platform: process.platform, + nodeVersion: process.version, + assets: [ + { + kind: "state", + sourcePath: "/tmp/.openclaw", + archivePath: assetArchivePath, + }, + ], + }; +} + +async function withBrokenArchiveFixture( + options: { + tempPrefix: string; + manifestAssetArchivePath: string; + payloads: Array<{ fileName: string; contents: string; archivePath?: string }>; + buildTarEntries?: (paths: { manifestPath: string; payloadPaths: string[] }) => string[]; + }, + run: (archivePath: string) => Promise, +) { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), options.tempPrefix)); + const archivePath = path.join(tempDir, "broken.tar.gz"); + const manifestPath = path.join(tempDir, "manifest.json"); + const payloadSpecs = await Promise.all( + options.payloads.map(async (payload) => { + const payloadPath = path.join(tempDir, payload.fileName); + await fs.writeFile(payloadPath, payload.contents, "utf8"); + return { + path: payloadPath, + archivePath: payload.archivePath ?? options.manifestAssetArchivePath, + }; + }), + ); + const payloadEntryPathBySource = new Map( + payloadSpecs.map((payload) => [payload.path, payload.archivePath]), + ); + + try { + await fs.writeFile( + manifestPath, + `${JSON.stringify(createBackupManifest(options.manifestAssetArchivePath), null, 2)}\n`, + "utf8", + ); + await tar.c( + { + file: archivePath, + gzip: true, + portable: true, + preservePaths: true, + onWriteEntry: (entry) => { + if (entry.path === manifestPath) { + entry.path = `${TEST_ARCHIVE_ROOT}/manifest.json`; + return; + } + const payloadEntryPath = payloadEntryPathBySource.get(entry.path); + if (payloadEntryPath) { + entry.path = payloadEntryPath; + } + }, + }, + options.buildTarEntries?.({ + manifestPath, + payloadPaths: payloadSpecs.map((payload) => payload.path), + }) ?? [manifestPath, ...payloadSpecs.map((payload) => payload.path)], + ); + await run(archivePath); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } +} + describe("backupVerifyCommand", () => { let tempHome: TempHomeEnv; @@ -26,12 +112,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(path.join(stateDir, "state.txt"), "hello\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0); const created = await backupCreateCommand(runtime, { output: archiveDir, nowMs }); const verified = await backupVerifyCommand(runtime, { archive: created.archivePath }); @@ -53,12 +134,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(root, "payload", "data.txt"), "x\n", "utf8"); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, ["root"]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /expected exactly one backup manifest entry/i, ); @@ -95,12 +171,7 @@ describe("backupVerifyCommand", () => { ); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, [rootName]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /missing payload for manifest asset/i, ); @@ -110,119 +181,37 @@ describe("backupVerifyCommand", () => { }); it("fails when archive paths contain traversal segments", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-traversal-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const traversalPath = `${rootName}/payload/../escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: traversalPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = traversalPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /path traversal segments/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const traversalPath = `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-traversal-", + manifestAssetArchivePath: traversalPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: traversalPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /path traversal segments/i, + ); + }, + ); }); it("fails when archive paths contain backslashes", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-backslash-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const invalidPath = `${rootName}/payload\\..\\escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: invalidPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = invalidPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /forward slashes/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const invalidPath = `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-backslash-", + manifestAssetArchivePath: invalidPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: invalidPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /forward slashes/i, + ); + }, + ); }); it("ignores payload manifest.json files when locating the backup manifest", async () => { @@ -251,12 +240,7 @@ describe("backupVerifyCommand", () => { "utf8", ); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const created = await backupCreateCommand(runtime, { output: archiveDir, includeWorkspace: true, @@ -274,119 +258,44 @@ describe("backupVerifyCommand", () => { }); it("fails when the archive contains duplicate root manifest entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-manifest-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: `${rootName}/payload/posix/tmp/.openclaw/payload.txt`, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-manifest-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [{ fileName: "payload.txt", contents: "payload\n" }], + buildTarEntries: ({ manifestPath, payloadPaths }) => [ + manifestPath, + manifestPath, + ...payloadPaths, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - } - }, - }, - [manifestPath, manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /expected exactly one backup manifest entry, found 2/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /expected exactly one backup manifest entry, found 2/i, + ); + }, + ); }); it("fails when the archive contains duplicate payload entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-payload-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPathA = path.join(tempDir, "payload-a.txt"); - const payloadPathB = path.join(tempDir, "payload-b.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const payloadArchivePath = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: payloadArchivePath, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-payload-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [ + { fileName: "payload-a.txt", contents: "payload-a\n", archivePath: payloadArchivePath }, + { fileName: "payload-b.txt", contents: "payload-b\n", archivePath: payloadArchivePath }, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPathA, "payload-a\n", "utf8"); - await fs.writeFile(payloadPathB, "payload-b\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPathA || entry.path === payloadPathB) { - entry.path = payloadArchivePath; - } - }, - }, - [manifestPath, payloadPathA, payloadPathB], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /duplicate entry path/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /duplicate entry path/i, + ); + }, + ); }); }); diff --git a/src/commands/backup.test.ts b/src/commands/backup.test.ts index 349714e4d15..decc55e6c05 100644 --- a/src/commands/backup.test.ts +++ b/src/commands/backup.test.ts @@ -3,6 +3,7 @@ import os from "node:os"; import path from "node:path"; import * as tar from "tar"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js"; import { buildBackupArchiveRoot, @@ -41,6 +42,39 @@ describe("backup commands", () => { await tempHome.restore(); }); + function createRuntime(): RuntimeEnv { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } satisfies RuntimeEnv; + } + + async function withInvalidWorkspaceBackupConfig(fn: (runtime: RuntimeEnv) => Promise) { + const stateDir = path.join(tempHome.home, ".openclaw"); + const configPath = path.join(tempHome.home, "custom-config.json"); + process.env.OPENCLAW_CONFIG_PATH = configPath; + await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); + await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); + const runtime = createRuntime(); + + try { + return await fn(runtime); + } finally { + delete process.env.OPENCLAW_CONFIG_PATH; + } + } + + function expectWorkspaceCoveredByState( + plan: Awaited>, + ) { + expect(plan.included).toHaveLength(1); + expect(plan.included[0]?.kind).toBe("state"); + expect(plan.skipped).toEqual( + expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), + ); + } + it("collapses default config, credentials, and workspace into the state backup root", async () => { const stateDir = path.join(tempHome.home, ".openclaw"); await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); @@ -50,12 +84,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "workspace", "SOUL.md"), "# soul\n", "utf8"); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); }); it("orders coverage checks by canonical path so symlinked workspaces do not duplicate state", async () => { @@ -84,12 +113,7 @@ describe("backup commands", () => { ); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); } finally { await fs.rm(symlinkDir, { recursive: true, force: true }); } @@ -116,11 +140,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); await fs.writeFile(path.join(externalWorkspace, "SOUL.md"), "# external\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0); const result = await backupCreateCommand(runtime, { @@ -189,11 +209,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { output: archiveDir, @@ -214,11 +230,7 @@ describe("backup commands", () => { const stateDir = path.join(tempHome.home, ".openclaw"); await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); await expect( backupCreateCommand(runtime, { @@ -239,11 +251,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.symlink(stateDir, symlinkPath); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); await expect( backupCreateCommand(runtime, { @@ -263,11 +271,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(workspaceDir, "SOUL.md"), "# soul\n", "utf8"); process.chdir(workspaceDir); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 1, 2, 3); const result = await backupCreateCommand(runtime, { nowMs }); @@ -294,11 +298,7 @@ describe("backup commands", () => { await fs.symlink(workspaceDir, workspaceLink); process.chdir(workspaceLink); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 1, 3, 4); const result = await backupCreateCommand(runtime, { nowMs }); @@ -318,11 +318,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(existingArchive, "already here", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { output: existingArchive, @@ -336,41 +332,15 @@ describe("backup commands", () => { }); it("fails fast when config is invalid and workspace backup is enabled", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { await expect(backupCreateCommand(runtime, { dryRun: true })).rejects.toThrow( /--no-include-workspace/i, ); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("allows explicit partial backups when config is invalid", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { const result = await backupCreateCommand(runtime, { dryRun: true, includeWorkspace: false, @@ -378,9 +348,7 @@ describe("backup commands", () => { expect(result.includeWorkspace).toBe(false); expect(result.assets.some((asset) => asset.kind === "workspace")).toBe(false); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("backs up only the active config file when --only-config is requested", async () => { @@ -391,11 +359,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); await fs.writeFile(path.join(stateDir, "credentials", "oauth.json"), "{}", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { dryRun: true, @@ -413,11 +377,7 @@ describe("backup commands", () => { process.env.OPENCLAW_CONFIG_PATH = configPath; await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); try { const result = await backupCreateCommand(runtime, { diff --git a/src/commands/channels.config-only-status-output.test.ts b/src/commands/channels.config-only-status-output.test.ts index 89ff1cc2614..7019c84bb3a 100644 --- a/src/commands/channels.config-only-status-output.test.ts +++ b/src/commands/channels.config-only-status-output.test.ts @@ -5,24 +5,60 @@ import { makeDirectPlugin } from "../test-utils/channel-plugin-test-fixtures.js" import { createTestRegistry } from "../test-utils/channel-plugins.js"; import { formatConfigChannelsStatusLines } from "./channels/status.js"; +function registerSingleTestPlugin(pluginId: string, plugin: ChannelPlugin) { + setActivePluginRegistry( + createTestRegistry([ + { + pluginId, + source: "test", + plugin, + }, + ]), + ); +} + +async function formatLocalStatusSummary( + cfg: unknown, + options?: { + sourceConfig?: unknown; + }, +) { + const lines = await formatConfigChannelsStatusLines( + cfg as never, + { mode: "local" }, + options?.sourceConfig ? { sourceConfig: options.sourceConfig as never } : undefined, + ); + return lines.join("\n"); +} + +function unresolvedTokenAccount() { + return { + name: "Primary", + enabled: true, + configured: true, + token: "", + tokenSource: "config", + tokenStatus: "configured_unavailable", + } as const; +} + +function tokenOnlyPluginConfig() { + return { + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + isConfigured: () => true, + isEnabled: () => true, + } as const; +} + function makeUnavailableTokenPlugin(): ChannelPlugin { return makeDirectPlugin({ id: "token-only", label: "TokenOnly", docsPath: "/channels/token-only", config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - resolveAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - token: "", - tokenSource: "config", - tokenStatus: "configured_unavailable", - }), - isConfigured: () => true, - isEnabled: () => true, + ...tokenOnlyPluginConfig(), + resolveAccount: () => unresolvedTokenAccount(), }, }); } @@ -33,8 +69,7 @@ function makeResolvedTokenPlugin(): ChannelPlugin { label: "TokenOnly", docsPath: "/channels/token-only", config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", + ...tokenOnlyPluginConfig(), inspectAccount: (cfg) => (cfg as { secretResolved?: boolean }).secretResolved ? { @@ -46,25 +81,8 @@ function makeResolvedTokenPlugin(): ChannelPlugin { tokenSource: "config", tokenStatus: "available", } - : { - accountId: "primary", - name: "Primary", - enabled: true, - configured: true, - token: "", - tokenSource: "config", - tokenStatus: "configured_unavailable", - }, - resolveAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - token: "", - tokenSource: "config", - tokenStatus: "configured_unavailable", - }), - isConfigured: () => true, - isEnabled: () => true, + : { accountId: "primary", ...unresolvedTokenAccount() }, + resolveAccount: () => unresolvedTokenAccount(), }, }); } @@ -156,92 +174,42 @@ describe("config-only channels status output", () => { }); it("shows configured-but-unavailable credentials distinctly from not configured", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "token-only", - source: "test", - plugin: makeUnavailableTokenPlugin(), - }, - ]), - ); + registerSingleTestPlugin("token-only", makeUnavailableTokenPlugin()); - const lines = await formatConfigChannelsStatusLines({ channels: {} } as never, { - mode: "local", - }); - - const joined = lines.join("\n"); + const joined = await formatLocalStatusSummary({ channels: {} }); expect(joined).toContain("TokenOnly"); expect(joined).toContain("configured, secret unavailable in this command path"); expect(joined).toContain("token:config (unavailable)"); }); it("prefers resolved config snapshots when command-local secret resolution succeeds", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "token-only", - source: "test", - plugin: makeResolvedTokenPlugin(), - }, - ]), - ); + registerSingleTestPlugin("token-only", makeResolvedTokenPlugin()); - const lines = await formatConfigChannelsStatusLines( - { secretResolved: true, channels: {} } as never, + const joined = await formatLocalStatusSummary( + { secretResolved: true, channels: {} }, { - mode: "local", - }, - { - sourceConfig: { channels: {} } as never, + sourceConfig: { channels: {} }, }, ); - - const joined = lines.join("\n"); expectResolvedTokenStatusSummary(joined, { includeUnavailableTokenLine: false }); }); it("does not resolve raw source config for extension channels without inspectAccount", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "token-only", - source: "test", - plugin: makeResolvedTokenPluginWithoutInspectAccount(), - }, - ]), - ); + registerSingleTestPlugin("token-only", makeResolvedTokenPluginWithoutInspectAccount()); - const lines = await formatConfigChannelsStatusLines( - { secretResolved: true, channels: {} } as never, + const joined = await formatLocalStatusSummary( + { secretResolved: true, channels: {} }, { - mode: "local", - }, - { - sourceConfig: { channels: {} } as never, + sourceConfig: { channels: {} }, }, ); - - const joined = lines.join("\n"); expectResolvedTokenStatusSummary(joined); }); it("renders Slack HTTP signing-secret availability in config-only status", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "slack", - source: "test", - plugin: makeUnavailableHttpSlackPlugin(), - }, - ]), - ); + registerSingleTestPlugin("slack", makeUnavailableHttpSlackPlugin()); - const lines = await formatConfigChannelsStatusLines({ channels: {} } as never, { - mode: "local", - }); - - const joined = lines.join("\n"); + const joined = await formatLocalStatusSummary({ channels: {} }); expect(joined).toContain("Slack"); expect(joined).toContain("configured, secret unavailable in this command path"); expect(joined).toContain("mode:http"); diff --git a/src/commands/channels/add.ts b/src/commands/channels/add.ts index 882e7f16ca5..ebf80e6a735 100644 --- a/src/commands/channels/add.ts +++ b/src/commands/channels/add.ts @@ -1,5 +1,6 @@ import { resolveAgentWorkspaceDir, resolveDefaultAgentId } from "../../agents/agent-scope.js"; import { listChannelPluginCatalogEntries } from "../../channels/plugins/catalog.js"; +import { parseOptionalDelimitedEntries } from "../../channels/plugins/helpers.js"; import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js"; import { moveSingleAccountChannelSectionToDefaultAccount } from "../../channels/plugins/setup-helpers.js"; import type { ChannelId, ChannelSetupInput } from "../../channels/plugins/types.js"; @@ -28,17 +29,6 @@ export type ChannelsAddOptions = { dmAllowlist?: string; } & Omit; -function parseList(value: string | undefined): string[] | undefined { - if (!value?.trim()) { - return undefined; - } - const parsed = value - .split(/[\n,;]+/g) - .map((entry) => entry.trim()) - .filter(Boolean); - return parsed.length > 0 ? parsed : undefined; -} - function resolveCatalogChannelEntry(raw: string, cfg: OpenClawConfig | null) { const trimmed = raw.trim().toLowerCase(); if (!trimmed) { @@ -225,8 +215,8 @@ export async function channelsAddCommand( : typeof opts.initialSyncLimit === "string" && opts.initialSyncLimit.trim() ? Number.parseInt(opts.initialSyncLimit, 10) : undefined; - const groupChannels = parseList(opts.groupChannels); - const dmAllowlist = parseList(opts.dmAllowlist); + const groupChannels = parseOptionalDelimitedEntries(opts.groupChannels); + const dmAllowlist = parseOptionalDelimitedEntries(opts.dmAllowlist); const input: ChannelSetupInput = { name: opts.name, diff --git a/src/commands/daemon-install-helpers.test.ts b/src/commands/daemon-install-helpers.test.ts index 704c193880c..931a983a8ee 100644 --- a/src/commands/daemon-install-helpers.test.ts +++ b/src/commands/daemon-install-helpers.test.ts @@ -1,6 +1,7 @@ import { afterEach, describe, expect, it, vi } from "vitest"; const mocks = vi.hoisted(() => ({ + loadAuthProfileStoreForSecretsRuntime: vi.fn(), resolvePreferredNodePath: vi.fn(), resolveGatewayProgramArguments: vi.fn(), resolveSystemNodeInfo: vi.fn(), @@ -8,6 +9,10 @@ const mocks = vi.hoisted(() => ({ buildServiceEnvironment: vi.fn(), })); +vi.mock("../agents/auth-profiles.js", () => ({ + loadAuthProfileStoreForSecretsRuntime: mocks.loadAuthProfileStoreForSecretsRuntime, +})); + vi.mock("../daemon/runtime-paths.js", () => ({ resolvePreferredNodePath: mocks.resolvePreferredNodePath, resolveSystemNodeInfo: mocks.resolveSystemNodeInfo, @@ -63,6 +68,10 @@ function mockNodeGatewayPlanFixture( programArguments: ["node", "gateway"], workingDirectory, }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: {}, + }); mocks.resolveSystemNodeInfo.mockResolvedValue({ path: "/opt/node", version, @@ -232,6 +241,67 @@ describe("buildGatewayInstallPlan", () => { expect(plan.environment.HOME).toBe("/Users/service"); expect(plan.environment.OPENCLAW_PORT).toBe("3000"); }); + + it("merges env-backed auth-profile refs into the service environment", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + "anthropic:default": { + type: "token", + provider: "anthropic", + tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: { + OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret + ANTHROPIC_TOKEN: "ant-test-token", + }, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test"); + expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token"); + }); + + it("skips unresolved auth-profile env refs", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: {}, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBeUndefined(); + }); }); describe("gatewayInstallErrorHint", () => { diff --git a/src/commands/daemon-install-helpers.ts b/src/commands/daemon-install-helpers.ts index 7a3bd42e2fc..91248cb86a7 100644 --- a/src/commands/daemon-install-helpers.ts +++ b/src/commands/daemon-install-helpers.ts @@ -1,3 +1,7 @@ +import { + loadAuthProfileStoreForSecretsRuntime, + type AuthProfileStore, +} from "../agents/auth-profiles.js"; import { formatCliCommand } from "../cli/command-format.js"; import { collectConfigServiceEnvVars } from "../config/env-vars.js"; import type { OpenClawConfig } from "../config/types.js"; @@ -19,6 +23,33 @@ export type GatewayInstallPlan = { environment: Record; }; +function collectAuthProfileServiceEnvVars(params: { + env: Record; + authStore?: AuthProfileStore; +}): Record { + const authStore = params.authStore ?? loadAuthProfileStoreForSecretsRuntime(); + const entries: Record = {}; + + for (const credential of Object.values(authStore.profiles)) { + const ref = + credential.type === "api_key" + ? credential.keyRef + : credential.type === "token" + ? credential.tokenRef + : undefined; + if (!ref || ref.source !== "env") { + continue; + } + const value = params.env[ref.id]?.trim(); + if (!value) { + continue; + } + entries[ref.id] = value; + } + + return entries; +} + export async function buildGatewayInstallPlan(params: { env: Record; port: number; @@ -28,6 +59,7 @@ export async function buildGatewayInstallPlan(params: { warn?: DaemonInstallWarnFn; /** Full config to extract env vars from (env vars + inline env keys). */ config?: OpenClawConfig; + authStore?: AuthProfileStore; }): Promise { const { devMode, nodePath } = await resolveDaemonInstallRuntimeInputs({ env: params.env, @@ -61,6 +93,10 @@ export async function buildGatewayInstallPlan(params: { // Config env vars are added first so service-specific vars take precedence. const environment: Record = { ...collectConfigServiceEnvVars(params.config), + ...collectAuthProfileServiceEnvVars({ + env: params.env, + authStore: params.authStore, + }), }; Object.assign(environment, serviceEnvironment); diff --git a/src/commands/doctor-cron.test.ts b/src/commands/doctor-cron.test.ts index e7af38f662c..3ad4f2811ed 100644 --- a/src/commands/doctor-cron.test.ts +++ b/src/commands/doctor-cron.test.ts @@ -27,44 +27,55 @@ function makePrompter(confirmResult = true) { }; } +function createCronConfig(storePath: string): OpenClawConfig { + return { + cron: { + store: storePath, + webhook: "https://example.invalid/cron-finished", + }, + }; +} + +function createLegacyCronJob(overrides: Record = {}) { + return { + jobId: "legacy-job", + name: "Legacy job", + notify: true, + createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), + updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), + schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" }, + payload: { + kind: "systemEvent", + text: "Morning brief", + }, + state: {}, + ...overrides, + }; +} + +async function writeCronStore(storePath: string, jobs: Array>) { + await fs.mkdir(path.dirname(storePath), { recursive: true }); + await fs.writeFile( + storePath, + JSON.stringify( + { + version: 1, + jobs, + }, + null, + 2, + ), + "utf-8", + ); +} + describe("maybeRepairLegacyCronStore", () => { it("repairs legacy cron store fields and migrates notify fallback to webhook delivery", async () => { const storePath = await makeTempStorePath(); - await fs.mkdir(path.dirname(storePath), { recursive: true }); - await fs.writeFile( - storePath, - JSON.stringify( - { - version: 1, - jobs: [ - { - jobId: "legacy-job", - name: "Legacy job", - notify: true, - createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), - updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), - schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" }, - payload: { - kind: "systemEvent", - text: "Morning brief", - }, - state: {}, - }, - ], - }, - null, - 2, - ), - "utf-8", - ); + await writeCronStore(storePath, [createLegacyCronJob()]); const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {}); - const cfg: OpenClawConfig = { - cron: { - store: storePath, - webhook: "https://example.invalid/cron-finished", - }, - }; + const cfg = createCronConfig(storePath); await maybeRepairLegacyCronStore({ cfg, @@ -158,44 +169,13 @@ describe("maybeRepairLegacyCronStore", () => { it("does not auto-repair in non-interactive mode without explicit repair approval", async () => { const storePath = await makeTempStorePath(); - await fs.mkdir(path.dirname(storePath), { recursive: true }); - await fs.writeFile( - storePath, - JSON.stringify( - { - version: 1, - jobs: [ - { - jobId: "legacy-job", - name: "Legacy job", - notify: true, - createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), - updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), - schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" }, - payload: { - kind: "systemEvent", - text: "Morning brief", - }, - state: {}, - }, - ], - }, - null, - 2, - ), - "utf-8", - ); + await writeCronStore(storePath, [createLegacyCronJob()]); const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {}); const prompter = makePrompter(false); await maybeRepairLegacyCronStore({ - cfg: { - cron: { - store: storePath, - webhook: "https://example.invalid/cron-finished", - }, - }, + cfg: createCronConfig(storePath), options: { nonInteractive: true }, prompter, }); diff --git a/src/commands/doctor-state-migrations.test.ts b/src/commands/doctor-state-migrations.test.ts index 4116a6fca6e..ec465632cfa 100644 --- a/src/commands/doctor-state-migrations.test.ts +++ b/src/commands/doctor-state-migrations.test.ts @@ -26,6 +26,32 @@ async function makeRootWithEmptyCfg() { return { root, cfg }; } +function writeLegacyTelegramAllowFromStore(oauthDir: string) { + fs.writeFileSync( + path.join(oauthDir, "telegram-allowFrom.json"), + JSON.stringify( + { + version: 1, + allowFrom: ["123456"], + }, + null, + 2, + ) + "\n", + "utf-8", + ); +} + +async function runTelegramAllowFromMigration(params: { root: string; cfg: OpenClawConfig }) { + const oauthDir = ensureCredentialsDir(params.root); + writeLegacyTelegramAllowFromStore(oauthDir); + const detected = await detectLegacyStateMigrations({ + cfg: params.cfg, + env: { OPENCLAW_STATE_DIR: params.root } as NodeJS.ProcessEnv, + }); + const result = await runLegacyStateMigrations({ detected, now: () => 123 }); + return { oauthDir, detected, result }; +} + afterEach(async () => { resetAutoMigrateLegacyStateForTest(); resetAutoMigrateLegacyStateDirForTest(); @@ -277,30 +303,11 @@ describe("doctor legacy state migrations", () => { it("migrates legacy Telegram pairing allowFrom store to account-scoped default file", async () => { const { root, cfg } = await makeRootWithEmptyCfg(); - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)), ).toEqual(["telegram-default-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const target = path.join(oauthDir, "telegram-default-allowFrom.json"); @@ -323,30 +330,11 @@ describe("doctor legacy state migrations", () => { }, }, }; - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)).toSorted(), ).toEqual(["telegram-bot1-allowFrom.json", "telegram-bot2-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const bot1Target = path.join(oauthDir, "telegram-bot1-allowFrom.json"); diff --git a/src/commands/gateway-status.test.ts b/src/commands/gateway-status.test.ts index 64d515c0b4d..452bcb3691b 100644 --- a/src/commands/gateway-status.test.ts +++ b/src/commands/gateway-status.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it, vi } from "vitest"; +import type { GatewayProbeResult } from "../gateway/probe.js"; import type { RuntimeEnv } from "../runtime.js"; import { withEnvAsync } from "../test-utils/env.js"; @@ -33,7 +34,7 @@ const startSshPortForward = vi.fn(async (_opts?: unknown) => ({ stderr: [], stop: sshStop, })); -const probeGateway = vi.fn(async (opts: { url: string }) => { +const probeGateway = vi.fn(async (opts: { url: string }): Promise => { const { url } = opts; if (url.includes("127.0.0.1")) { return { @@ -52,7 +53,16 @@ const probeGateway = vi.fn(async (opts: { url: string }) => { }, sessions: { count: 0 }, }, - presence: [{ mode: "gateway", reason: "self", host: "local", ip: "127.0.0.1" }], + presence: [ + { + mode: "gateway", + reason: "self", + host: "local", + ip: "127.0.0.1", + text: "Gateway: local (127.0.0.1) · app test · mode gateway · reason self", + ts: Date.now(), + }, + ], configSnapshot: { path: "/tmp/cfg.json", exists: true, @@ -81,7 +91,16 @@ const probeGateway = vi.fn(async (opts: { url: string }) => { }, sessions: { count: 2 }, }, - presence: [{ mode: "gateway", reason: "self", host: "remote", ip: "100.64.0.2" }], + presence: [ + { + mode: "gateway", + reason: "self", + host: "remote", + ip: "100.64.0.2", + text: "Gateway: remote (100.64.0.2) · app test · mode gateway · reason self", + ts: Date.now(), + }, + ], configSnapshot: { path: "/tmp/remote.json", exists: true, @@ -201,6 +220,54 @@ describe("gateway-status command", () => { expect(targets[0]?.summary).toBeTruthy(); }); + it("treats missing-scope RPC probe failures as degraded but reachable", async () => { + const { runtime, runtimeLogs, runtimeErrors } = createRuntimeCapture(); + readBestEffortConfig.mockResolvedValueOnce({ + gateway: { + mode: "local", + auth: { mode: "token", token: "ltok" }, + }, + } as never); + probeGateway.mockResolvedValueOnce({ + ok: false, + url: "ws://127.0.0.1:18789", + connectLatencyMs: 51, + error: "missing scope: operator.read", + close: null, + health: null, + status: null, + presence: null, + configSnapshot: null, + }); + + await runGatewayStatus(runtime, { timeout: "1000", json: true }); + + expect(runtimeErrors).toHaveLength(0); + const parsed = JSON.parse(runtimeLogs.join("\n")) as { + ok?: boolean; + degraded?: boolean; + warnings?: Array<{ code?: string; targetIds?: string[] }>; + targets?: Array<{ + connect?: { + ok?: boolean; + rpcOk?: boolean; + scopeLimited?: boolean; + }; + }>; + }; + expect(parsed.ok).toBe(true); + expect(parsed.degraded).toBe(true); + expect(parsed.targets?.[0]?.connect).toMatchObject({ + ok: true, + rpcOk: false, + scopeLimited: true, + }); + const scopeLimitedWarning = parsed.warnings?.find( + (warning) => warning.code === "probe_scope_limited", + ); + expect(scopeLimitedWarning?.targetIds).toContain("localLoopback"); + }); + it("surfaces unresolved SecretRef auth diagnostics in warnings", async () => { const { runtime, runtimeLogs, runtimeErrors } = createRuntimeCapture(); await withEnvAsync({ MISSING_GATEWAY_TOKEN: undefined }, async () => { @@ -361,7 +428,16 @@ describe("gateway-status command", () => { }, sessions: { count: 1 }, }, - presence: [{ mode: "gateway", reason: "self", host: "remote", ip: "100.64.0.2" }], + presence: [ + { + mode: "gateway", + reason: "self", + host: "remote", + ip: "100.64.0.2", + text: "Gateway: remote (100.64.0.2) · app test · mode gateway · reason self", + ts: Date.now(), + }, + ], configSnapshot: { path: "/tmp/secretref-config.json", exists: true, diff --git a/src/commands/gateway-status.ts b/src/commands/gateway-status.ts index 4ac54eca0c4..be0b9abf69a 100644 --- a/src/commands/gateway-status.ts +++ b/src/commands/gateway-status.ts @@ -10,6 +10,8 @@ import { colorize, isRich, theme } from "../terminal/theme.js"; import { buildNetworkHints, extractConfigSummary, + isProbeReachable, + isScopeLimitedProbeFailure, type GatewayStatusTarget, parseTimeoutMs, pickGatewaySelfPresence, @@ -193,8 +195,10 @@ export async function gatewayStatusCommand( }, ); - const reachable = probed.filter((p) => p.probe.ok); + const reachable = probed.filter((p) => isProbeReachable(p.probe)); const ok = reachable.length > 0; + const degradedScopeLimited = probed.filter((p) => isScopeLimitedProbeFailure(p.probe)); + const degraded = degradedScopeLimited.length > 0; const multipleGateways = reachable.length > 1; const primary = reachable.find((p) => p.target.kind === "explicit") ?? @@ -236,12 +240,21 @@ export async function gatewayStatusCommand( }); } } + for (const result of degradedScopeLimited) { + warnings.push({ + code: "probe_scope_limited", + message: + "Probe diagnostics are limited by gateway scopes (missing operator.read). Connection succeeded, but status details may be incomplete. Hint: pair device identity or use credentials with operator.read.", + targetIds: [result.target.id], + }); + } if (opts.json) { runtime.log( JSON.stringify( { ok, + degraded, ts: Date.now(), durationMs: Date.now() - startedAt, timeoutMs: overallTimeoutMs, @@ -274,7 +287,9 @@ export async function gatewayStatusCommand( active: p.target.active, tunnel: p.target.tunnel ?? null, connect: { - ok: p.probe.ok, + ok: isProbeReachable(p.probe), + rpcOk: p.probe.ok, + scopeLimited: isScopeLimitedProbeFailure(p.probe), latencyMs: p.probe.connectLatencyMs, error: p.probe.error, close: p.probe.close, diff --git a/src/commands/gateway-status/helpers.test.ts b/src/commands/gateway-status/helpers.test.ts index c726db00829..e0c1ecee763 100644 --- a/src/commands/gateway-status/helpers.test.ts +++ b/src/commands/gateway-status/helpers.test.ts @@ -1,6 +1,12 @@ import { describe, expect, it } from "vitest"; import { withEnvAsync } from "../../test-utils/env.js"; -import { extractConfigSummary, resolveAuthForTarget } from "./helpers.js"; +import { + extractConfigSummary, + isProbeReachable, + isScopeLimitedProbeFailure, + renderProbeSummaryLine, + resolveAuthForTarget, +} from "./helpers.js"; describe("extractConfigSummary", () => { it("marks SecretRef-backed gateway auth credentials as configured", () => { @@ -67,6 +73,37 @@ describe("extractConfigSummary", () => { }); describe("resolveAuthForTarget", () => { + function createConfigRemoteTarget() { + return { + id: "configRemote", + kind: "configRemote" as const, + url: "wss://remote.example:18789", + active: true, + }; + } + + function createRemoteGatewayTargetConfig(params?: { mode?: "none" | "password" | "token" }) { + return { + secrets: { + providers: { + default: { source: "env" as const }, + }, + }, + gateway: { + ...(params?.mode + ? { + auth: { + mode: params.mode, + }, + } + : {}), + remote: { + token: { source: "env" as const, provider: "default", id: "REMOTE_GATEWAY_TOKEN" }, + }, + }, + }; + } + it("resolves local auth token SecretRef before probing local targets", async () => { await withEnvAsync( { @@ -109,24 +146,8 @@ describe("resolveAuthForTarget", () => { }, async () => { const auth = await resolveAuthForTarget( - { - secrets: { - providers: { - default: { source: "env" }, - }, - }, - gateway: { - remote: { - token: { source: "env", provider: "default", id: "REMOTE_GATEWAY_TOKEN" }, - }, - }, - }, - { - id: "configRemote", - kind: "configRemote", - url: "wss://remote.example:18789", - active: true, - }, + createRemoteGatewayTargetConfig(), + createConfigRemoteTarget(), {}, ); @@ -142,27 +163,8 @@ describe("resolveAuthForTarget", () => { }, async () => { const auth = await resolveAuthForTarget( - { - secrets: { - providers: { - default: { source: "env" }, - }, - }, - gateway: { - auth: { - mode: "none", - }, - remote: { - token: { source: "env", provider: "default", id: "REMOTE_GATEWAY_TOKEN" }, - }, - }, - }, - { - id: "configRemote", - kind: "configRemote", - url: "wss://remote.example:18789", - active: true, - }, + createRemoteGatewayTargetConfig({ mode: "none" }), + createConfigRemoteTarget(), {}, ); @@ -233,3 +235,41 @@ describe("resolveAuthForTarget", () => { ); }); }); + +describe("probe reachability classification", () => { + it("treats missing-scope RPC failures as scope-limited and reachable", () => { + const probe = { + ok: false, + url: "ws://127.0.0.1:18789", + connectLatencyMs: 51, + error: "missing scope: operator.read", + close: null, + health: null, + status: null, + presence: null, + configSnapshot: null, + }; + + expect(isScopeLimitedProbeFailure(probe)).toBe(true); + expect(isProbeReachable(probe)).toBe(true); + expect(renderProbeSummaryLine(probe, false)).toContain("RPC: limited"); + }); + + it("keeps non-scope RPC failures as unreachable", () => { + const probe = { + ok: false, + url: "ws://127.0.0.1:18789", + connectLatencyMs: 43, + error: "unknown method: status", + close: null, + health: null, + status: null, + presence: null, + configSnapshot: null, + }; + + expect(isScopeLimitedProbeFailure(probe)).toBe(false); + expect(isProbeReachable(probe)).toBe(false); + expect(renderProbeSummaryLine(probe, false)).toContain("RPC: failed"); + }); +}); diff --git a/src/commands/gateway-status/helpers.ts b/src/commands/gateway-status/helpers.ts index 24519e6e8be..5f1a5e2f5ee 100644 --- a/src/commands/gateway-status/helpers.ts +++ b/src/commands/gateway-status/helpers.ts @@ -1,3 +1,4 @@ +import { parseTimeoutMsWithFallback } from "../../cli/parse-timeout.js"; import { resolveGatewayPort } from "../../config/config.js"; import type { OpenClawConfig, ConfigFileSnapshot } from "../../config/types.js"; import { hasConfiguredSecretInput } from "../../config/types.secrets.js"; @@ -8,6 +9,8 @@ import { pickPrimaryTailnetIPv4 } from "../../infra/tailnet.js"; import { colorize, theme } from "../../terminal/theme.js"; import { pickGatewaySelfPresence } from "../gateway-presence.js"; +const MISSING_SCOPE_PATTERN = /\bmissing scope:\s*[a-z0-9._-]+/i; + type TargetKind = "explicit" | "configRemote" | "localLoopback" | "sshTunnel"; export type GatewayStatusTarget = { @@ -64,20 +67,7 @@ function parseIntOrNull(value: unknown): number | null { } export function parseTimeoutMs(raw: unknown, fallbackMs: number): number { - const value = - typeof raw === "string" - ? raw.trim() - : typeof raw === "number" || typeof raw === "bigint" - ? String(raw) - : ""; - if (!value) { - return fallbackMs; - } - const parsed = Number.parseInt(value, 10); - if (!Number.isFinite(parsed) || parsed <= 0) { - throw new Error(`invalid --timeout: ${value}`); - } - return parsed; + return parseTimeoutMsWithFallback(raw, fallbackMs); } function normalizeWsUrl(value: string): string | null { @@ -336,6 +326,17 @@ export function renderTargetHeader(target: GatewayStatusTarget, rich: boolean) { return `${colorize(rich, theme.heading, kindLabel)} ${colorize(rich, theme.muted, target.url)}`; } +export function isScopeLimitedProbeFailure(probe: GatewayProbeResult): boolean { + if (probe.ok || probe.connectLatencyMs == null) { + return false; + } + return MISSING_SCOPE_PATTERN.test(probe.error ?? ""); +} + +export function isProbeReachable(probe: GatewayProbeResult): boolean { + return probe.ok || isScopeLimitedProbeFailure(probe); +} + export function renderProbeSummaryLine(probe: GatewayProbeResult, rich: boolean) { if (probe.ok) { const latency = @@ -347,7 +348,10 @@ export function renderProbeSummaryLine(probe: GatewayProbeResult, rich: boolean) if (probe.connectLatencyMs != null) { const latency = typeof probe.connectLatencyMs === "number" ? `${probe.connectLatencyMs}ms` : "unknown"; - return `${colorize(rich, theme.success, "Connect: ok")} (${latency}) · ${colorize(rich, theme.error, "RPC: failed")}${detail}`; + const rpcStatus = isScopeLimitedProbeFailure(probe) + ? colorize(rich, theme.warn, "RPC: limited") + : colorize(rich, theme.error, "RPC: failed"); + return `${colorize(rich, theme.success, "Connect: ok")} (${latency}) · ${rpcStatus}${detail}`; } return `${colorize(rich, theme.error, "Connect: failed")}${detail}`; diff --git a/src/commands/models/list.auth-overview.test.ts b/src/commands/models/list.auth-overview.test.ts index 69807a5d7a7..65c324d4b42 100644 --- a/src/commands/models/list.auth-overview.test.ts +++ b/src/commands/models/list.auth-overview.test.ts @@ -1,7 +1,28 @@ import { describe, expect, it } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "../../agents/model-auth-markers.js"; +import { withEnv } from "../../test-utils/env.js"; import { resolveProviderAuthOverview } from "./list.auth-overview.js"; +function resolveOpenAiOverview(apiKey: string) { + return resolveProviderAuthOverview({ + provider: "openai", + cfg: { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey, + models: [], + }, + }, + }, + } as never, + store: { version: 1, profiles: {} } as never, + modelsPath: "/tmp/models.json", + }); +} + describe("resolveProviderAuthOverview", () => { it("does not throw when token profile only has tokenRef", () => { const overview = resolveProviderAuthOverview({ @@ -24,23 +45,9 @@ describe("resolveProviderAuthOverview", () => { }); it("renders marker-backed models.json auth as marker detail", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: NON_ENV_SECRETREF_MARKER, - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview(NON_ENV_SECRETREF_MARKER), + ); expect(overview.effective.kind).toBe("missing"); expect(overview.effective.detail).toBe("missing"); @@ -48,23 +55,9 @@ describe("resolveProviderAuthOverview", () => { }); it("keeps env-var-shaped models.json values masked to avoid accidental plaintext exposure", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "OPENAI_API_KEY", // pragma: allowlist secret - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview("OPENAI_API_KEY"), + ); expect(overview.effective.kind).toBe("missing"); expect(overview.effective.detail).toBe("missing"); @@ -76,23 +69,7 @@ describe("resolveProviderAuthOverview", () => { const prior = process.env.OPENAI_API_KEY; process.env.OPENAI_API_KEY = "sk-openai-from-env"; // pragma: allowlist secret try { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "OPENAI_API_KEY", // pragma: allowlist secret - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = resolveOpenAiOverview("OPENAI_API_KEY"); expect(overview.effective.kind).toBe("env"); expect(overview.effective.detail).not.toContain("OPENAI_API_KEY"); } finally { diff --git a/src/commands/models/list.configured.ts b/src/commands/models/list.configured.ts index fed70a4fe47..d83dd9d7f1b 100644 --- a/src/commands/models/list.configured.ts +++ b/src/commands/models/list.configured.ts @@ -39,6 +39,17 @@ export function resolveConfiguredEntries(cfg: OpenClawConfig) { tagsByKey.get(key)?.add(tag); }; + const addResolvedModelRef = (raw: string, tag: string) => { + const resolved = resolveModelRefFromString({ + raw, + defaultProvider: DEFAULT_PROVIDER, + aliasIndex, + }); + if (resolved) { + addEntry(resolved.ref, tag); + } + }; + addEntry(resolvedDefault, "default"); const modelFallbacks = resolveAgentModelFallbackValues(cfg.agents?.defaults?.model); @@ -46,38 +57,15 @@ export function resolveConfiguredEntries(cfg: OpenClawConfig) { const imagePrimary = resolveAgentModelPrimaryValue(cfg.agents?.defaults?.imageModel) ?? ""; modelFallbacks.forEach((raw, idx) => { - const resolved = resolveModelRefFromString({ - raw: String(raw ?? ""), - defaultProvider: DEFAULT_PROVIDER, - aliasIndex, - }); - if (!resolved) { - return; - } - addEntry(resolved.ref, `fallback#${idx + 1}`); + addResolvedModelRef(String(raw ?? ""), `fallback#${idx + 1}`); }); if (imagePrimary) { - const resolved = resolveModelRefFromString({ - raw: imagePrimary, - defaultProvider: DEFAULT_PROVIDER, - aliasIndex, - }); - if (resolved) { - addEntry(resolved.ref, "image"); - } + addResolvedModelRef(imagePrimary, "image"); } imageFallbacks.forEach((raw, idx) => { - const resolved = resolveModelRefFromString({ - raw: String(raw ?? ""), - defaultProvider: DEFAULT_PROVIDER, - aliasIndex, - }); - if (!resolved) { - return; - } - addEntry(resolved.ref, `img-fallback#${idx + 1}`); + addResolvedModelRef(String(raw ?? ""), `img-fallback#${idx + 1}`); }); for (const key of Object.keys(cfg.agents?.defaults?.models ?? {})) { diff --git a/src/commands/models/list.list-command.forward-compat.test.ts b/src/commands/models/list.list-command.forward-compat.test.ts index b17e8c07b8f..f0cc594ab35 100644 --- a/src/commands/models/list.list-command.forward-compat.test.ts +++ b/src/commands/models/list.list-command.forward-compat.test.ts @@ -96,6 +96,23 @@ function lastPrintedRows() { return (mocks.printModelTable.mock.calls.at(-1)?.[0] ?? []) as T[]; } +function mockDiscoveredCodex53Registry() { + mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] }); + mocks.loadModelRegistry.mockResolvedValueOnce({ + models: [{ ...OPENAI_CODEX_53_MODEL }], + availableKeys: new Set(["openai-codex/gpt-5.3-codex"]), + registry: { + getAll: () => [{ ...OPENAI_CODEX_53_MODEL }], + }, + }); +} + +async function runAllOpenAiCodexCommand() { + const runtime = createRuntime(); + await modelsListCommand({ all: true, provider: "openai-codex", json: true }, runtime as never); + expect(mocks.printModelTable).toHaveBeenCalled(); +} + vi.mock("../../config/config.js", () => ({ loadConfig: mocks.loadConfig, getRuntimeConfigSnapshot: vi.fn().mockReturnValue(null), @@ -261,14 +278,7 @@ describe("modelsListCommand forward-compat", () => { describe("--all catalog supplementation", () => { it("includes synthetic codex gpt-5.4 in --all output when catalog supports it", async () => { - mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] }); - mocks.loadModelRegistry.mockResolvedValueOnce({ - models: [{ ...OPENAI_CODEX_53_MODEL }], - availableKeys: new Set(["openai-codex/gpt-5.3-codex"]), - registry: { - getAll: () => [{ ...OPENAI_CODEX_53_MODEL }], - }, - }); + mockDiscoveredCodex53Registry(); mocks.loadModelCatalog.mockResolvedValueOnce([ { provider: "openai-codex", @@ -304,14 +314,7 @@ describe("modelsListCommand forward-compat", () => { return undefined; }, ); - const runtime = createRuntime(); - - await modelsListCommand( - { all: true, provider: "openai-codex", json: true }, - runtime as never, - ); - - expect(mocks.printModelTable).toHaveBeenCalled(); + await runAllOpenAiCodexCommand(); expect(lastPrintedRows<{ key: string; available: boolean }>()).toEqual([ expect.objectContaining({ key: "openai-codex/gpt-5.3-codex", @@ -324,23 +327,9 @@ describe("modelsListCommand forward-compat", () => { }); it("keeps discovered rows in --all output when catalog lookup is empty", async () => { - mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] }); - mocks.loadModelRegistry.mockResolvedValueOnce({ - models: [{ ...OPENAI_CODEX_53_MODEL }], - availableKeys: new Set(["openai-codex/gpt-5.3-codex"]), - registry: { - getAll: () => [{ ...OPENAI_CODEX_53_MODEL }], - }, - }); + mockDiscoveredCodex53Registry(); mocks.loadModelCatalog.mockResolvedValueOnce([]); - const runtime = createRuntime(); - - await modelsListCommand( - { all: true, provider: "openai-codex", json: true }, - runtime as never, - ); - - expect(mocks.printModelTable).toHaveBeenCalled(); + await runAllOpenAiCodexCommand(); expect(lastPrintedRows<{ key: string }>()).toEqual([ expect.objectContaining({ key: "openai-codex/gpt-5.3-codex", diff --git a/src/commands/models/list.probe.ts b/src/commands/models/list.probe.ts index 5311b004ce2..7b75d1be726 100644 --- a/src/commands/models/list.probe.ts +++ b/src/commands/models/list.probe.ts @@ -431,12 +431,24 @@ async function probeTarget(params: { error: "No model available for probe", }; } + const model = target.model; const sessionId = `probe-${target.provider}-${crypto.randomUUID()}`; const sessionFile = resolveSessionTranscriptPath(sessionId, agentId); await fs.mkdir(sessionDir, { recursive: true }); const start = Date.now(); + const buildResult = (status: AuthProbeResult["status"], error?: string): AuthProbeResult => ({ + provider: target.provider, + model: `${model.provider}/${model.model}`, + profileId: target.profileId, + label: target.label, + source: target.source, + mode: target.mode, + status, + ...(error ? { error } : {}), + latencyMs: Date.now() - start, + }); try { await runEmbeddedPiAgent({ sessionId, @@ -458,29 +470,13 @@ async function probeTarget(params: { verboseLevel: "off", streamParams: { maxTokens }, }); - return { - provider: target.provider, - model: `${target.model.provider}/${target.model.model}`, - profileId: target.profileId, - label: target.label, - source: target.source, - mode: target.mode, - status: "ok", - latencyMs: Date.now() - start, - }; + return buildResult("ok"); } catch (err) { const described = describeFailoverError(err); - return { - provider: target.provider, - model: `${target.model.provider}/${target.model.model}`, - profileId: target.profileId, - label: target.label, - source: target.source, - mode: target.mode, - status: mapFailoverReasonToProbeStatus(described.reason), - error: redactSecrets(described.message), - latencyMs: Date.now() - start, - }; + return buildResult( + mapFailoverReasonToProbeStatus(described.reason), + redactSecrets(described.message), + ); } } diff --git a/src/commands/models/load-config.test.ts b/src/commands/models/load-config.test.ts index b8969fd4681..2d35c012a49 100644 --- a/src/commands/models/load-config.test.ts +++ b/src/commands/models/load-config.test.ts @@ -25,6 +25,27 @@ vi.mock("../../cli/command-secret-targets.js", () => ({ import { loadModelsConfig, loadModelsConfigWithSource } from "./load-config.js"; describe("models load-config", () => { + const runtimeConfig = { + models: { providers: { openai: { apiKey: "sk-runtime" } } }, // pragma: allowlist secret + }; + const resolvedConfig = { + models: { providers: { openai: { apiKey: "sk-resolved" } } }, // pragma: allowlist secret + }; + const targetIds = new Set(["models.providers.*.apiKey"]); + + function mockResolvedConfigFlow(params: { sourceConfig: unknown; diagnostics: string[] }) { + mocks.loadConfig.mockReturnValue(runtimeConfig); + mocks.readConfigFileSnapshotForWrite.mockResolvedValue({ + snapshot: { valid: true, resolved: params.sourceConfig }, + writeOptions: {}, + }); + mocks.getModelsCommandSecretTargetIds.mockReturnValue(targetIds); + mocks.resolveCommandSecretRefsViaGateway.mockResolvedValue({ + resolvedConfig, + diagnostics: params.diagnostics, + }); + } + beforeEach(() => { vi.clearAllMocks(); }); @@ -39,25 +60,9 @@ describe("models load-config", () => { }, }, }; - const runtimeConfig = { - models: { providers: { openai: { apiKey: "sk-runtime" } } }, // pragma: allowlist secret - }; - const resolvedConfig = { - models: { providers: { openai: { apiKey: "sk-resolved" } } }, // pragma: allowlist secret - }; - const targetIds = new Set(["models.providers.*.apiKey"]); const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; - mocks.loadConfig.mockReturnValue(runtimeConfig); - mocks.readConfigFileSnapshotForWrite.mockResolvedValue({ - snapshot: { valid: true, resolved: sourceConfig }, - writeOptions: {}, - }); - mocks.getModelsCommandSecretTargetIds.mockReturnValue(targetIds); - mocks.resolveCommandSecretRefsViaGateway.mockResolvedValue({ - resolvedConfig, - diagnostics: ["diag-one", "diag-two"], - }); + mockResolvedConfigFlow({ sourceConfig, diagnostics: ["diag-one", "diag-two"] }); const result = await loadModelsConfigWithSource({ commandName: "models list", runtime }); @@ -78,24 +83,7 @@ describe("models load-config", () => { it("loadModelsConfig returns resolved config while preserving runtime snapshot behavior", async () => { const sourceConfig = { models: { providers: {} } }; - const runtimeConfig = { - models: { providers: { openai: { apiKey: "sk-runtime" } } }, // pragma: allowlist secret - }; - const resolvedConfig = { - models: { providers: { openai: { apiKey: "sk-resolved" } } }, // pragma: allowlist secret - }; - const targetIds = new Set(["models.providers.*.apiKey"]); - - mocks.loadConfig.mockReturnValue(runtimeConfig); - mocks.readConfigFileSnapshotForWrite.mockResolvedValue({ - snapshot: { valid: true, resolved: sourceConfig }, - writeOptions: {}, - }); - mocks.getModelsCommandSecretTargetIds.mockReturnValue(targetIds); - mocks.resolveCommandSecretRefsViaGateway.mockResolvedValue({ - resolvedConfig, - diagnostics: [], - }); + mockResolvedConfigFlow({ sourceConfig, diagnostics: [] }); await expect(loadModelsConfig({ commandName: "models list" })).resolves.toBe(resolvedConfig); expect(mocks.setRuntimeConfigSnapshot).toHaveBeenCalledWith(resolvedConfig, sourceConfig); diff --git a/src/commands/ollama-setup.test.ts b/src/commands/ollama-setup.test.ts index 124254c53b2..0b9b5d0e414 100644 --- a/src/commands/ollama-setup.test.ts +++ b/src/commands/ollama-setup.test.ts @@ -1,5 +1,6 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import type { RuntimeEnv } from "../runtime.js"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; import type { WizardPrompter } from "../wizard/prompts.js"; import { configureOllamaNonInteractive, @@ -23,27 +24,6 @@ vi.mock("./oauth-env.js", () => ({ isRemoteEnvironment: isRemoteEnvironmentMock, })); -function jsonResponse(body: unknown, status = 200): Response { - return new Response(JSON.stringify(body), { - status, - headers: { "Content-Type": "application/json" }, - }); -} - -function requestUrl(input: string | URL | Request): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - return input.url; -} - -function requestBody(body: BodyInit | null | undefined): string { - return typeof body === "string" ? body : "{}"; -} - function createOllamaFetchMock(params: { tags?: string[]; show?: Record; @@ -61,7 +41,7 @@ function createOllamaFetchMock(params: { return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) }); } if (url.endsWith("/api/show")) { - const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; const contextWindow = body.name ? params.show?.[body.name] : undefined; return contextWindow ? jsonResponse({ model_info: { "llama.context_length": contextWindow } }) @@ -77,6 +57,45 @@ function createOllamaFetchMock(params: { }); } +function createModePrompter( + mode: "local" | "remote", + params?: { confirm?: boolean }, +): WizardPrompter { + return { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce(mode), + ...(params?.confirm !== undefined + ? { confirm: vi.fn().mockResolvedValueOnce(params.confirm) } + : {}), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; +} + +function createSignedOutRemoteFetchMock() { + return createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ + jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), + jsonResponse({ username: "testuser" }), + ], + }); +} + +function createDefaultOllamaConfig(primary: string) { + return { + agents: { defaults: { model: { primary } } }, + models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, + }; +} + +function createRuntime() { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; +} + describe("ollama setup", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -86,11 +105,7 @@ describe("ollama setup", () => { }); it("returns suggested default model for local mode", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -101,11 +116,7 @@ describe("ollama setup", () => { }); it("returns suggested default model for remote mode", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("remote"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -116,11 +127,7 @@ describe("ollama setup", () => { }); it("mode selection affects model ordering (local)", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] }); vi.stubGlobal("fetch", fetchMock); @@ -134,20 +141,8 @@ describe("ollama setup", () => { }); it("cloud+local mode triggers /api/me check and opens sign-in URL", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - confirm: vi.fn().mockResolvedValueOnce(true), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; - - const fetchMock = createOllamaFetchMock({ - tags: ["llama3:8b"], - meResponses: [ - jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - jsonResponse({ username: "testuser" }), - ], - }); + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -158,20 +153,8 @@ describe("ollama setup", () => { it("cloud+local mode does not open browser in remote environment", async () => { isRemoteEnvironmentMock.mockReturnValue(true); - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - confirm: vi.fn().mockResolvedValueOnce(true), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; - - const fetchMock = createOllamaFetchMock({ - tags: ["llama3:8b"], - meResponses: [ - jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - jsonResponse({ username: "testuser" }), - ], - }); + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -180,11 +163,7 @@ describe("ollama setup", () => { }); it("local mode does not trigger cloud auth", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -258,10 +237,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), prompter, }); @@ -276,10 +252,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), prompter, }); @@ -292,10 +265,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/kimi-k2.5:cloud" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/kimi-k2.5:cloud"), prompter, }); @@ -324,12 +294,7 @@ describe("ollama setup", () => { pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }), }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: { @@ -362,12 +327,7 @@ describe("ollama setup", () => { pullResponse: new Response('{"status":"success"}\n', { status: 200 }), }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: {}, @@ -379,7 +339,7 @@ describe("ollama setup", () => { }); const pullRequest = fetchMock.mock.calls[1]?.[1]; - expect(JSON.parse(requestBody(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); + expect(JSON.parse(requestBodyText(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); expect(result.agents?.defaults?.model).toEqual( expect.objectContaining({ primary: "ollama/llama3.2:latest" }), ); @@ -388,12 +348,7 @@ describe("ollama setup", () => { it("accepts cloud models in non-interactive mode without pulling", async () => { const fetchMock = createOllamaFetchMock({ tags: [] }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: {}, diff --git a/src/commands/onboard-auth.config-core.ts b/src/commands/onboard-auth.config-core.ts index 4bda29df1bf..8c41bfb939c 100644 --- a/src/commands/onboard-auth.config-core.ts +++ b/src/commands/onboard-auth.config-core.ts @@ -85,6 +85,29 @@ import { MODELSTUDIO_DEFAULT_MODEL_REF, } from "./onboard-auth.models.js"; +function mergeProviderModels( + existingProvider: Record | undefined, + defaultModels: T[], +): T[] { + const existingModels = Array.isArray(existingProvider?.models) + ? (existingProvider.models as T[]) + : []; + const mergedModels = [...existingModels]; + const seen = new Set(existingModels.map((model) => model.id)); + for (const model of defaultModels) { + if (!seen.has(model.id)) { + mergedModels.push(model); + seen.add(model.id); + } + } + return mergedModels; +} + +function getNormalizedProviderApiKey(existingProvider: Record | undefined) { + const { apiKey } = (existingProvider ?? {}) as { apiKey?: string }; + return typeof apiKey === "string" ? apiKey.trim() || undefined : undefined; +} + export function applyZaiProviderConfig( cfg: OpenClawConfig, params?: { endpoint?: string; modelId?: string }, @@ -100,7 +123,6 @@ export function applyZaiProviderConfig( const providers = { ...cfg.models?.providers }; const existingProvider = providers.zai; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; const defaultModels = [ buildZaiModelDefinition({ id: "glm-5" }), @@ -109,21 +131,13 @@ export function applyZaiProviderConfig( buildZaiModelDefinition({ id: "glm-4.7-flashx" }), ]; - const mergedModels = [...existingModels]; - const seen = new Set(existingModels.map((m) => m.id)); - for (const model of defaultModels) { - if (!seen.has(model.id)) { - mergedModels.push(model); - seen.add(model.id); - } - } + const mergedModels = mergeProviderModels(existingProvider, defaultModels); - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); const baseUrl = params?.endpoint ? resolveZaiBaseUrl(params.endpoint) @@ -256,12 +270,11 @@ export function applySyntheticProviderConfig(cfg: OpenClawConfig): OpenClawConfi (model) => !existingModels.some((existing) => existing.id === model.id), ), ]; - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); providers.synthetic = { ...existingProviderRest, baseUrl: SYNTHETIC_BASE_URL, @@ -609,7 +622,6 @@ function applyModelStudioProviderConfigWithBaseUrl( const providers = { ...cfg.models?.providers }; const existingProvider = providers.modelstudio; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; const defaultModels = [ buildModelStudioModelDefinition({ id: "qwen3.5-plus" }), @@ -622,21 +634,13 @@ function applyModelStudioProviderConfigWithBaseUrl( buildModelStudioModelDefinition({ id: "kimi-k2.5" }), ]; - const mergedModels = [...existingModels]; - const seen = new Set(existingModels.map((m) => m.id)); - for (const model of defaultModels) { - if (!seen.has(model.id)) { - mergedModels.push(model); - seen.add(model.id); - } - } + const mergedModels = mergeProviderModels(existingProvider, defaultModels); - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); providers.modelstudio = { ...existingProviderRest, diff --git a/src/commands/onboard-non-interactive.gateway.test.ts b/src/commands/onboard-non-interactive.gateway.test.ts index f2e0724b53b..83a81f340b3 100644 --- a/src/commands/onboard-non-interactive.gateway.test.ts +++ b/src/commands/onboard-non-interactive.gateway.test.ts @@ -1,9 +1,11 @@ import fs from "node:fs/promises"; import path from "node:path"; import { afterAll, afterEach, beforeAll, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; import { makeTempWorkspace } from "../test-helpers/workspace.js"; import { captureEnv } from "../test-utils/env.js"; import { createThrowingRuntime, readJsonFile } from "./onboard-non-interactive.test-helpers.js"; +import type { installGatewayDaemonNonInteractive } from "./onboard-non-interactive/local/daemon-install.js"; const gatewayClientCalls: Array<{ url?: string; @@ -13,7 +15,23 @@ const gatewayClientCalls: Array<{ onClose?: (code: number, reason: string) => void; }> = []; const ensureWorkspaceAndSessionsMock = vi.fn(async (..._args: unknown[]) => {}); -const installGatewayDaemonNonInteractiveMock = vi.hoisted(() => vi.fn(async () => {})); +type InstallGatewayDaemonResult = Awaited>; +const installGatewayDaemonNonInteractiveMock = vi.hoisted(() => + vi.fn(async (): Promise => ({ installed: true })), +); +const gatewayServiceMock = vi.hoisted(() => ({ + label: "LaunchAgent", + loadedText: "loaded", + isLoaded: vi.fn(async () => true), + readRuntime: vi.fn(async () => ({ + status: "running", + state: "active", + pid: 4242, + })), +})); +const readLastGatewayErrorLineMock = vi.hoisted(() => + vi.fn(async () => "Gateway failed to start: required secrets are unavailable."), +); let waitForGatewayReachableMock: | ((params: { url: string; token?: string; password?: string; deadlineMs?: number }) => Promise<{ ok: boolean; @@ -64,6 +82,14 @@ vi.mock("./onboard-non-interactive/local/daemon-install.js", () => ({ installGatewayDaemonNonInteractive: installGatewayDaemonNonInteractiveMock, })); +vi.mock("../daemon/service.js", () => ({ + resolveGatewayService: () => gatewayServiceMock, +})); + +vi.mock("../daemon/diagnostics.js", () => ({ + readLastGatewayErrorLine: readLastGatewayErrorLineMock, +})); + const { runNonInteractiveOnboarding } = await import("./onboard-non-interactive.js"); const { resolveConfigPath: resolveStateConfigPath } = await import("../config/paths.js"); const { resolveConfigPath } = await import("../config/config.js"); @@ -134,6 +160,9 @@ describe("onboard (non-interactive): gateway and remote auth", () => { afterEach(() => { waitForGatewayReachableMock = undefined; installGatewayDaemonNonInteractiveMock.mockClear(); + gatewayServiceMock.isLoaded.mockClear(); + gatewayServiceMock.readRuntime.mockClear(); + readLastGatewayErrorLineMock.mockClear(); }); it("writes gateway token auth into config", async () => { @@ -376,6 +405,157 @@ describe("onboard (non-interactive): gateway and remote auth", () => { }); }, 60_000); + it("emits a daemon-install failure when Linux user systemd is unavailable", async () => { + await withStateDir("state-local-daemon-install-json-fail-", async (stateDir) => { + installGatewayDaemonNonInteractiveMock.mockResolvedValueOnce({ + installed: false, + skippedReason: "systemd-user-unavailable", + }); + + let capturedError = ""; + const runtimeWithCapture: RuntimeEnv = { + log: () => {}, + error: (...args: unknown[]) => { + const firstArg = args[0]; + capturedError = + typeof firstArg === "string" + ? firstArg + : firstArg instanceof Error + ? firstArg.message + : (JSON.stringify(firstArg) ?? ""); + throw new Error(capturedError); + }, + exit: (_code: number) => { + throw new Error("exit should not be reached after runtime.error"); + }, + }; + + const originalPlatform = process.platform; + Object.defineProperty(process, "platform", { + configurable: true, + value: "linux", + }); + + try { + await expect( + runNonInteractiveOnboarding( + { + nonInteractive: true, + mode: "local", + workspace: path.join(stateDir, "openclaw"), + authChoice: "skip", + skipSkills: true, + skipHealth: false, + installDaemon: true, + gatewayBind: "loopback", + json: true, + }, + runtimeWithCapture, + ), + ).rejects.toThrow(/"phase": "daemon-install"/); + } finally { + Object.defineProperty(process, "platform", { + configurable: true, + value: originalPlatform, + }); + } + + const parsed = JSON.parse(capturedError) as { + ok: boolean; + phase: string; + daemonInstall?: { + requested?: boolean; + installed?: boolean; + skippedReason?: string; + }; + hints?: string[]; + }; + expect(parsed.ok).toBe(false); + expect(parsed.phase).toBe("daemon-install"); + expect(parsed.daemonInstall).toEqual({ + requested: true, + installed: false, + skippedReason: "systemd-user-unavailable", + }); + expect(parsed.hints).toContain( + "Fix: rerun without `--install-daemon` for one-shot setup, or enable a working user-systemd session and retry.", + ); + }); + }, 60_000); + + it("emits structured JSON diagnostics when daemon health fails", async () => { + await withStateDir("state-local-daemon-health-json-fail-", async (stateDir) => { + waitForGatewayReachableMock = vi.fn(async () => ({ + ok: false, + detail: "gateway closed (1006 abnormal closure (no close frame)): no close reason", + })); + + let capturedError = ""; + const runtimeWithCapture: RuntimeEnv = { + log: () => {}, + error: (...args: unknown[]) => { + const firstArg = args[0]; + capturedError = + typeof firstArg === "string" + ? firstArg + : firstArg instanceof Error + ? firstArg.message + : (JSON.stringify(firstArg) ?? ""); + throw new Error(capturedError); + }, + exit: (_code: number) => { + throw new Error("exit should not be reached after runtime.error"); + }, + }; + + await expect( + runNonInteractiveOnboarding( + { + nonInteractive: true, + mode: "local", + workspace: path.join(stateDir, "openclaw"), + authChoice: "skip", + skipSkills: true, + skipHealth: false, + installDaemon: true, + gatewayBind: "loopback", + json: true, + }, + runtimeWithCapture, + ), + ).rejects.toThrow(/"phase": "gateway-health"/); + + const parsed = JSON.parse(capturedError) as { + ok: boolean; + phase: string; + installDaemon: boolean; + detail?: string; + gateway?: { wsUrl?: string }; + hints?: string[]; + diagnostics?: { + service?: { + label?: string; + loaded?: boolean; + runtimeStatus?: string; + pid?: number; + }; + lastGatewayError?: string; + }; + }; + expect(parsed.ok).toBe(false); + expect(parsed.phase).toBe("gateway-health"); + expect(parsed.installDaemon).toBe(true); + expect(parsed.detail).toContain("1006 abnormal closure"); + expect(parsed.gateway?.wsUrl).toContain("ws://127.0.0.1:"); + expect(parsed.hints).toContain("Run `openclaw gateway status --deep` for more detail."); + expect(parsed.diagnostics?.service?.label).toBe("LaunchAgent"); + expect(parsed.diagnostics?.service?.loaded).toBe(true); + expect(parsed.diagnostics?.service?.runtimeStatus).toBe("running"); + expect(parsed.diagnostics?.service?.pid).toBe(4242); + expect(parsed.diagnostics?.lastGatewayError).toContain("required secrets are unavailable"); + }); + }, 60_000); + it("auto-generates token auth when binding LAN and persists the token", async () => { if (process.platform === "win32") { // Windows runner occasionally drops the temp config write in this flow; skip to keep CI green. diff --git a/src/commands/onboard-non-interactive/local.ts b/src/commands/onboard-non-interactive/local.ts index 0765eb1a513..5e26bf50d24 100644 --- a/src/commands/onboard-non-interactive/local.ts +++ b/src/commands/onboard-non-interactive/local.ts @@ -15,13 +15,58 @@ import { import type { OnboardOptions } from "../onboard-types.js"; import { inferAuthChoiceFromFlags } from "./local/auth-choice-inference.js"; import { applyNonInteractiveGatewayConfig } from "./local/gateway-config.js"; -import { logNonInteractiveOnboardingJson } from "./local/output.js"; +import { + type GatewayHealthFailureDiagnostics, + logNonInteractiveOnboardingFailure, + logNonInteractiveOnboardingJson, +} from "./local/output.js"; import { applyNonInteractiveSkillsConfig } from "./local/skills-config.js"; import { resolveNonInteractiveWorkspaceDir } from "./local/workspace.js"; const INSTALL_DAEMON_HEALTH_DEADLINE_MS = 45_000; const ATTACH_EXISTING_GATEWAY_HEALTH_DEADLINE_MS = 15_000; +async function collectGatewayHealthFailureDiagnostics(): Promise< + GatewayHealthFailureDiagnostics | undefined +> { + const diagnostics: GatewayHealthFailureDiagnostics = {}; + + try { + const { resolveGatewayService } = await import("../../daemon/service.js"); + const service = resolveGatewayService(); + const env = process.env as Record; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env }).catch(() => false), + service.readRuntime(env).catch(() => undefined), + ]); + diagnostics.service = { + label: service.label, + loaded, + loadedText: service.loadedText, + runtimeStatus: runtime?.status, + state: runtime?.state, + pid: runtime?.pid, + lastExitStatus: runtime?.lastExitStatus, + lastExitReason: runtime?.lastExitReason, + }; + } catch (err) { + diagnostics.inspectError = `service diagnostics failed: ${String(err)}`; + } + + try { + const { readLastGatewayErrorLine } = await import("../../daemon/diagnostics.js"); + diagnostics.lastGatewayError = (await readLastGatewayErrorLine(process.env)) ?? undefined; + } catch (err) { + diagnostics.inspectError = diagnostics.inspectError + ? `${diagnostics.inspectError}; log diagnostics failed: ${String(err)}` + : `log diagnostics failed: ${String(err)}`; + } + + return diagnostics.service || diagnostics.lastGatewayError || diagnostics.inspectError + ? diagnostics + : undefined; +} + export async function runNonInteractiveOnboardingLocal(params: { opts: OnboardOptions; runtime: RuntimeEnv; @@ -88,17 +133,62 @@ export async function runNonInteractiveOnboardingLocal(params: { skipBootstrap: Boolean(nextConfig.agents?.defaults?.skipBootstrap), }); + const daemonRuntimeRaw = opts.daemonRuntime ?? DEFAULT_GATEWAY_DAEMON_RUNTIME; + let daemonInstallStatus: + | { + requested: boolean; + installed: boolean; + skippedReason?: "systemd-user-unavailable"; + } + | undefined; if (opts.installDaemon) { const { installGatewayDaemonNonInteractive } = await import("./local/daemon-install.js"); - await installGatewayDaemonNonInteractive({ + const daemonInstall = await installGatewayDaemonNonInteractive({ nextConfig, opts, runtime, port: gatewayResult.port, }); + daemonInstallStatus = daemonInstall.installed + ? { + requested: true, + installed: true, + } + : { + requested: true, + installed: false, + skippedReason: daemonInstall.skippedReason, + }; + if (!daemonInstall.installed && !opts.skipHealth) { + logNonInteractiveOnboardingFailure({ + opts, + runtime, + mode, + phase: "daemon-install", + message: + daemonInstall.skippedReason === "systemd-user-unavailable" + ? "Gateway service install is unavailable because systemd user services are not reachable in this Linux session." + : "Gateway service install did not complete successfully.", + installDaemon: true, + daemonInstall: { + requested: true, + installed: false, + skippedReason: daemonInstall.skippedReason, + }, + daemonRuntime: daemonRuntimeRaw, + hints: + daemonInstall.skippedReason === "systemd-user-unavailable" + ? [ + "Fix: rerun without `--install-daemon` for one-shot setup, or enable a working user-systemd session and retry.", + "If your auth profile uses env-backed refs, keep those env vars set in the shell that runs `openclaw gateway run` or `openclaw agent --local`.", + ] + : [`Run \`${formatCliCommand("openclaw gateway status --deep")}\` for more detail.`], + }); + runtime.exit(1); + return; + } } - const daemonRuntimeRaw = opts.daemonRuntime ?? DEFAULT_GATEWAY_DAEMON_RUNTIME; if (!opts.skipHealth) { const { healthCommand } = await import("../health.js"); const links = resolveControlUiLinks({ @@ -115,24 +205,34 @@ export async function runNonInteractiveOnboardingLocal(params: { : ATTACH_EXISTING_GATEWAY_HEALTH_DEADLINE_MS, }); if (!probe.ok) { - const message = [ - `Gateway did not become reachable at ${links.wsUrl}.`, - probe.detail ? `Last probe: ${probe.detail}` : undefined, - !opts.installDaemon + const diagnostics = opts.installDaemon + ? await collectGatewayHealthFailureDiagnostics() + : undefined; + logNonInteractiveOnboardingFailure({ + opts, + runtime, + mode, + phase: "gateway-health", + message: `Gateway did not become reachable at ${links.wsUrl}.`, + detail: probe.detail, + gateway: { + wsUrl: links.wsUrl, + httpUrl: links.httpUrl, + }, + installDaemon: Boolean(opts.installDaemon), + daemonInstall: daemonInstallStatus, + daemonRuntime: opts.installDaemon ? daemonRuntimeRaw : undefined, + diagnostics, + hints: !opts.installDaemon ? [ "Non-interactive local onboarding only waits for an already-running gateway unless you pass --install-daemon.", `Fix: start \`${formatCliCommand("openclaw gateway run")}\`, re-run with \`--install-daemon\`, or use \`--skip-health\`.`, process.platform === "win32" ? "Native Windows managed gateway install tries Scheduled Tasks first and falls back to a per-user Startup-folder login item when task creation is denied." : undefined, - ] - .filter(Boolean) - .join("\n") - : undefined, - ] - .filter(Boolean) - .join("\n"); - runtime.error(message); + ].filter((value): value is string => Boolean(value)) + : [`Run \`${formatCliCommand("openclaw gateway status --deep")}\` for more detail.`], + }); runtime.exit(1); return; } @@ -152,6 +252,7 @@ export async function runNonInteractiveOnboardingLocal(params: { tailscaleMode: gatewayResult.tailscaleMode, }, installDaemon: Boolean(opts.installDaemon), + daemonInstall: daemonInstallStatus, daemonRuntime: opts.installDaemon ? daemonRuntimeRaw : undefined, skipSkills: Boolean(opts.skipSkills), skipHealth: Boolean(opts.skipHealth), diff --git a/src/commands/onboard-non-interactive/local/daemon-install.test.ts b/src/commands/onboard-non-interactive/local/daemon-install.test.ts index c3e87a1d48d..d45cf4cafad 100644 --- a/src/commands/onboard-non-interactive/local/daemon-install.test.ts +++ b/src/commands/onboard-non-interactive/local/daemon-install.test.ts @@ -6,6 +6,7 @@ const gatewayInstallErrorHint = vi.hoisted(() => vi.fn(() => "hint")); const resolveGatewayInstallToken = vi.hoisted(() => vi.fn()); const serviceInstall = vi.hoisted(() => vi.fn(async () => {})); const ensureSystemdUserLingerNonInteractive = vi.hoisted(() => vi.fn(async () => {})); +const isSystemdUserServiceAvailable = vi.hoisted(() => vi.fn(async () => true)); vi.mock("../../daemon-install-helpers.js", () => ({ buildGatewayInstallPlan, @@ -23,7 +24,7 @@ vi.mock("../../../daemon/service.js", () => ({ })); vi.mock("../../../daemon/systemd.js", () => ({ - isSystemdUserServiceAvailable: vi.fn(async () => true), + isSystemdUserServiceAvailable, })); vi.mock("../../daemon-runtime.js", () => ({ @@ -40,6 +41,7 @@ const { installGatewayDaemonNonInteractive } = await import("./daemon-install.js describe("installGatewayDaemonNonInteractive", () => { beforeEach(() => { vi.clearAllMocks(); + isSystemdUserServiceAvailable.mockResolvedValue(true); resolveGatewayInstallToken.mockResolvedValue({ token: undefined, tokenRefConfigured: true, @@ -100,4 +102,39 @@ describe("installGatewayDaemonNonInteractive", () => { expect(buildGatewayInstallPlan).not.toHaveBeenCalled(); expect(serviceInstall).not.toHaveBeenCalled(); }); + + it("returns a skipped result when Linux user systemd is unavailable", async () => { + const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; + const originalPlatform = process.platform; + + isSystemdUserServiceAvailable.mockResolvedValue(false); + Object.defineProperty(process, "platform", { + configurable: true, + value: "linux", + }); + + try { + const result = await installGatewayDaemonNonInteractive({ + nextConfig: {} as OpenClawConfig, + opts: { installDaemon: true }, + runtime, + port: 18789, + }); + + expect(result).toEqual({ + installed: false, + skippedReason: "systemd-user-unavailable", + }); + expect(runtime.log).toHaveBeenCalledWith( + expect.stringContaining("Systemd user services are unavailable"), + ); + expect(buildGatewayInstallPlan).not.toHaveBeenCalled(); + expect(serviceInstall).not.toHaveBeenCalled(); + } finally { + Object.defineProperty(process, "platform", { + configurable: true, + value: originalPlatform, + }); + } + }); }); diff --git a/src/commands/onboard-non-interactive/local/daemon-install.ts b/src/commands/onboard-non-interactive/local/daemon-install.ts index d3b759227d6..6236b410f75 100644 --- a/src/commands/onboard-non-interactive/local/daemon-install.ts +++ b/src/commands/onboard-non-interactive/local/daemon-install.ts @@ -13,24 +13,34 @@ export async function installGatewayDaemonNonInteractive(params: { opts: OnboardOptions; runtime: RuntimeEnv; port: number; -}) { +}): Promise< + | { + installed: true; + } + | { + installed: false; + skippedReason?: "systemd-user-unavailable"; + } +> { const { opts, runtime, port } = params; if (!opts.installDaemon) { - return; + return { installed: false }; } const daemonRuntimeRaw = opts.daemonRuntime ?? DEFAULT_GATEWAY_DAEMON_RUNTIME; const systemdAvailable = process.platform === "linux" ? await isSystemdUserServiceAvailable() : true; if (process.platform === "linux" && !systemdAvailable) { - runtime.log("Systemd user services are unavailable; skipping service install."); - return; + runtime.log( + "Systemd user services are unavailable; skipping service install. Use a direct shell run (`openclaw gateway run`) or rerun without --install-daemon on this session.", + ); + return { installed: false, skippedReason: "systemd-user-unavailable" }; } if (!isGatewayDaemonRuntime(daemonRuntimeRaw)) { runtime.error("Invalid --daemon-runtime (use node or bun)"); runtime.exit(1); - return; + return { installed: false }; } const service = resolveGatewayService(); @@ -50,7 +60,7 @@ export async function installGatewayDaemonNonInteractive(params: { ].join(" "), ); runtime.exit(1); - return; + return { installed: false }; } const { programArguments, workingDirectory, environment } = await buildGatewayInstallPlan({ env: process.env, @@ -70,7 +80,8 @@ export async function installGatewayDaemonNonInteractive(params: { } catch (err) { runtime.error(`Gateway service install failed: ${String(err)}`); runtime.log(gatewayInstallErrorHint()); - return; + return { installed: false }; } await ensureSystemdUserLingerNonInteractive({ runtime }); + return { installed: true }; } diff --git a/src/commands/onboard-non-interactive/local/output.ts b/src/commands/onboard-non-interactive/local/output.ts index d4296e3500c..a91df06aee6 100644 --- a/src/commands/onboard-non-interactive/local/output.ts +++ b/src/commands/onboard-non-interactive/local/output.ts @@ -1,6 +1,21 @@ import type { RuntimeEnv } from "../../../runtime.js"; import type { OnboardOptions } from "../../onboard-types.js"; +export type GatewayHealthFailureDiagnostics = { + service?: { + label: string; + loaded: boolean; + loadedText: string; + runtimeStatus?: string; + state?: string; + pid?: number; + lastExitStatus?: number; + lastExitReason?: string; + }; + lastGatewayError?: string; + inspectError?: string; +}; + export function logNonInteractiveOnboardingJson(params: { opts: OnboardOptions; runtime: RuntimeEnv; @@ -14,6 +29,11 @@ export function logNonInteractiveOnboardingJson(params: { tailscaleMode: string; }; installDaemon?: boolean; + daemonInstall?: { + requested: boolean; + installed: boolean; + skippedReason?: string; + }; daemonRuntime?: string; skipSkills?: boolean; skipHealth?: boolean; @@ -24,11 +44,13 @@ export function logNonInteractiveOnboardingJson(params: { params.runtime.log( JSON.stringify( { + ok: true, mode: params.mode, workspace: params.workspaceDir, authChoice: params.authChoice, gateway: params.gateway, installDaemon: Boolean(params.installDaemon), + daemonInstall: params.daemonInstall, daemonRuntime: params.daemonRuntime, skipSkills: Boolean(params.skipSkills), skipHealth: Boolean(params.skipHealth), @@ -38,3 +60,94 @@ export function logNonInteractiveOnboardingJson(params: { ), ); } + +function formatGatewayRuntimeSummary( + diagnostics: GatewayHealthFailureDiagnostics | undefined, +): string | undefined { + const service = diagnostics?.service; + if (!service?.runtimeStatus) { + return undefined; + } + const parts = [service.runtimeStatus]; + if (typeof service.pid === "number") { + parts.push(`pid ${service.pid}`); + } + if (service.state) { + parts.push(`state ${service.state}`); + } + if (typeof service.lastExitStatus === "number") { + parts.push(`last exit ${service.lastExitStatus}`); + } + if (service.lastExitReason) { + parts.push(`reason ${service.lastExitReason}`); + } + return parts.join(", "); +} + +export function logNonInteractiveOnboardingFailure(params: { + opts: OnboardOptions; + runtime: RuntimeEnv; + mode: "local" | "remote"; + phase: string; + message: string; + detail?: string; + hints?: string[]; + gateway?: { + wsUrl?: string; + httpUrl?: string; + }; + installDaemon?: boolean; + daemonInstall?: { + requested: boolean; + installed: boolean; + skippedReason?: string; + }; + daemonRuntime?: string; + diagnostics?: GatewayHealthFailureDiagnostics; +}) { + const hints = params.hints?.filter(Boolean) ?? []; + const gatewayRuntime = formatGatewayRuntimeSummary(params.diagnostics); + + if (params.opts.json) { + params.runtime.error( + JSON.stringify( + { + ok: false, + mode: params.mode, + phase: params.phase, + message: params.message, + detail: params.detail, + gateway: params.gateway, + installDaemon: Boolean(params.installDaemon), + daemonInstall: params.daemonInstall, + daemonRuntime: params.daemonRuntime, + diagnostics: params.diagnostics, + hints: hints.length > 0 ? hints : undefined, + }, + null, + 2, + ), + ); + return; + } + + const lines = [ + params.message, + params.detail ? `Last probe: ${params.detail}` : undefined, + params.diagnostics?.service + ? `Service: ${params.diagnostics.service.label} (${params.diagnostics.service.loaded ? params.diagnostics.service.loadedText : "not loaded"})` + : undefined, + gatewayRuntime ? `Runtime: ${gatewayRuntime}` : undefined, + params.diagnostics?.lastGatewayError + ? `Last gateway error: ${params.diagnostics.lastGatewayError}` + : undefined, + params.diagnostics?.inspectError + ? `Diagnostics warning: ${params.diagnostics.inspectError}` + : undefined, + hints.length > 0 ? hints.join("\n") : undefined, + ] + .filter(Boolean) + .join("\n"); + + params.runtime.error(lines); +} diff --git a/src/commands/onboard.test.ts b/src/commands/onboard.test.ts index 1233222bf54..5d1dc20634d 100644 --- a/src/commands/onboard.test.ts +++ b/src/commands/onboard.test.ts @@ -60,6 +60,26 @@ describe("onboardCommand", () => { expect(mocks.runNonInteractiveOnboarding).not.toHaveBeenCalled(); }); + it("logs ASCII-safe Windows guidance before onboarding", async () => { + const runtime = makeRuntime(); + const platformSpy = vi.spyOn(process, "platform", "get").mockReturnValue("win32"); + + try { + await onboardCommand({}, runtime); + + expect(runtime.log).toHaveBeenCalledWith( + [ + "Windows detected - OpenClaw runs great on WSL2!", + "Native Windows might be trickier.", + "Quick setup: wsl --install (one command, one reboot)", + "Guide: https://docs.openclaw.ai/windows", + ].join("\n"), + ); + } finally { + platformSpy.mockRestore(); + } + }); + it("defaults --reset to config+creds+sessions scope", async () => { const runtime = makeRuntime(); diff --git a/src/commands/onboard.ts b/src/commands/onboard.ts index 9c55bddf1d6..6762998f815 100644 --- a/src/commands/onboard.ts +++ b/src/commands/onboard.ts @@ -77,7 +77,7 @@ export async function onboardCommand(opts: OnboardOptions, runtime: RuntimeEnv = if (process.platform === "win32") { runtime.log( [ - "Windows detected — OpenClaw runs great on WSL2!", + "Windows detected - OpenClaw runs great on WSL2!", "Native Windows might be trickier.", "Quick setup: wsl --install (one command, one reboot)", "Guide: https://docs.openclaw.ai/windows", diff --git a/src/commands/self-hosted-provider-setup.ts b/src/commands/self-hosted-provider-setup.ts index 6a50820ce91..c067d797f15 100644 --- a/src/commands/self-hosted-provider-setup.ts +++ b/src/commands/self-hosted-provider-setup.ts @@ -2,6 +2,8 @@ import { upsertAuthProfileWithLock } from "../agents/auth-profiles.js"; import type { ApiKeyCredential, AuthProfileCredential } from "../agents/auth-profiles/types.js"; import type { OpenClawConfig } from "../config/config.js"; import type { + ProviderDiscoveryContext, + ProviderAuthResult, ProviderAuthMethodNonInteractiveContext, ProviderNonInteractiveApiKeyResult, } from "../plugins/types.js"; @@ -85,7 +87,7 @@ function buildOpenAICompatibleSelfHostedProviderConfig(params: { }; } -export async function promptAndConfigureOpenAICompatibleSelfHostedProvider(params: { +type OpenAICompatibleSelfHostedProviderSetupParams = { cfg: OpenClawConfig; prompter: WizardPrompter; providerId: string; @@ -97,13 +99,34 @@ export async function promptAndConfigureOpenAICompatibleSelfHostedProvider(param reasoning?: boolean; contextWindow?: number; maxTokens?: number; -}): Promise<{ +}; + +type OpenAICompatibleSelfHostedProviderPromptResult = { config: OpenClawConfig; credential: AuthProfileCredential; modelId: string; modelRef: string; profileId: string; -}> { +}; + +function buildSelfHostedProviderAuthResult( + result: OpenAICompatibleSelfHostedProviderPromptResult, +): ProviderAuthResult { + return { + profiles: [ + { + profileId: result.profileId, + credential: result.credential, + }, + ], + configPatch: result.config, + defaultModel: result.modelRef, + }; +} + +export async function promptAndConfigureOpenAICompatibleSelfHostedProvider( + params: OpenAICompatibleSelfHostedProviderSetupParams, +): Promise { const baseUrlRaw = await params.prompter.text({ message: `${params.providerLabel} base URL`, initialValue: params.defaultBaseUrl, @@ -152,6 +175,35 @@ export async function promptAndConfigureOpenAICompatibleSelfHostedProvider(param }; } +export async function promptAndConfigureOpenAICompatibleSelfHostedProviderAuth( + params: OpenAICompatibleSelfHostedProviderSetupParams, +): Promise { + const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider(params); + return buildSelfHostedProviderAuthResult(result); +} + +export async function discoverOpenAICompatibleSelfHostedProvider< + T extends Record, +>(params: { + ctx: ProviderDiscoveryContext; + providerId: string; + buildProvider: (params: { apiKey?: string }) => Promise; +}): Promise<{ provider: T & { apiKey: string } } | null> { + if (params.ctx.config.models?.providers?.[params.providerId]) { + return null; + } + const { apiKey, discoveryApiKey } = params.ctx.resolveProviderApiKey(params.providerId); + if (!apiKey) { + return null; + } + return { + provider: { + ...(await params.buildProvider({ apiKey: discoveryApiKey })), + apiKey, + }, + }; +} + function buildMissingNonInteractiveModelIdMessage(params: { authChoice: string; providerLabel: string; diff --git a/src/commands/status-all/channels.mattermost-token-summary.test.ts b/src/commands/status-all/channels.mattermost-token-summary.test.ts index a797d028d9f..a012a3a3647 100644 --- a/src/commands/status-all/channels.mattermost-token-summary.test.ts +++ b/src/commands/status-all/channels.mattermost-token-summary.test.ts @@ -37,139 +37,94 @@ function makeMattermostPlugin(): ChannelPlugin { }; } -function makeSlackPlugin(params?: { botToken?: string; appToken?: string }): ChannelPlugin { - return { - id: "slack", - meta: { - id: "slack", - label: "Slack", - selectionLabel: "Slack", - docsPath: "/channels/slack", - blurb: "test", - }, - capabilities: { chatTypes: ["direct"] }, - config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - inspectAccount: () => ({ - name: "Primary", - enabled: true, - botToken: params?.botToken ?? "bot-token", - appToken: params?.appToken ?? "app-token", - }), - resolveAccount: () => ({ - name: "Primary", - enabled: true, - botToken: params?.botToken ?? "bot-token", - appToken: params?.appToken ?? "app-token", - }), - isConfigured: () => true, - isEnabled: () => true, - }, - actions: { - listActions: () => ["send"], - }, - }; -} +type TestTable = Awaited>; -function makeUnavailableSlackPlugin(): ChannelPlugin { - return { - id: "slack", - meta: { - id: "slack", - label: "Slack", - selectionLabel: "Slack", - docsPath: "/channels/slack", - blurb: "test", - }, - capabilities: { chatTypes: ["direct"] }, - config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - inspectAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - botToken: "", - appToken: "", - botTokenSource: "config", - appTokenSource: "config", - botTokenStatus: "configured_unavailable", - appTokenStatus: "configured_unavailable", - }), - resolveAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - botToken: "", - appToken: "", - botTokenSource: "config", - appTokenSource: "config", - botTokenStatus: "configured_unavailable", - appTokenStatus: "configured_unavailable", - }), - isConfigured: () => true, - isEnabled: () => true, - }, - actions: { - listActions: () => ["send"], - }, - }; -} - -function makeSourceAwareUnavailablePlugin(): ChannelPlugin { +function makeSlackDirectPlugin(config: ChannelPlugin["config"]): ChannelPlugin { return makeDirectPlugin({ id: "slack", label: "Slack", docsPath: "/channels/slack", - config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - inspectAccount: (cfg) => - (cfg as { marker?: string }).marker === "source" - ? { - name: "Primary", - enabled: true, - configured: true, - botToken: "", - appToken: "", - botTokenSource: "config", - appTokenSource: "config", - botTokenStatus: "configured_unavailable", - appTokenStatus: "configured_unavailable", - } - : { - name: "Primary", - enabled: true, - configured: false, - botToken: "", - appToken: "", - botTokenSource: "none", - appTokenSource: "none", - }, - resolveAccount: () => ({ - name: "Primary", - enabled: true, - botToken: "", - appToken: "", - }), - isConfigured: (account) => Boolean((account as { configured?: boolean }).configured), - isEnabled: () => true, - }, + config, + }); +} + +function createSlackTokenAccount(params?: { botToken?: string; appToken?: string }) { + return { + name: "Primary", + enabled: true, + botToken: params?.botToken ?? "bot-token", + appToken: params?.appToken ?? "app-token", + }; +} + +function createUnavailableSlackTokenAccount() { + return { + name: "Primary", + enabled: true, + configured: true, + botToken: "", + appToken: "", + botTokenSource: "config", + appTokenSource: "config", + botTokenStatus: "configured_unavailable", + appTokenStatus: "configured_unavailable", + }; +} + +function makeSlackPlugin(params?: { botToken?: string; appToken?: string }): ChannelPlugin { + return makeSlackDirectPlugin({ + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + inspectAccount: () => createSlackTokenAccount(params), + resolveAccount: () => createSlackTokenAccount(params), + isConfigured: () => true, + isEnabled: () => true, + }); +} + +function makeUnavailableSlackPlugin(): ChannelPlugin { + return makeSlackDirectPlugin({ + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + inspectAccount: () => createUnavailableSlackTokenAccount(), + resolveAccount: () => createUnavailableSlackTokenAccount(), + isConfigured: () => true, + isEnabled: () => true, + }); +} + +function makeSourceAwareUnavailablePlugin(): ChannelPlugin { + return makeSlackDirectPlugin({ + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + inspectAccount: (cfg) => + (cfg as { marker?: string }).marker === "source" + ? createUnavailableSlackTokenAccount() + : { + name: "Primary", + enabled: true, + configured: false, + botToken: "", + appToken: "", + botTokenSource: "none", + appTokenSource: "none", + }, + resolveAccount: () => ({ + name: "Primary", + enabled: true, + botToken: "", + appToken: "", + }), + isConfigured: (account) => Boolean((account as { configured?: boolean }).configured), + isEnabled: () => true, }); } function makeSourceUnavailableResolvedAvailablePlugin(): ChannelPlugin { - return { + return makeDirectPlugin({ id: "discord", - meta: { - id: "discord", - label: "Discord", - selectionLabel: "Discord", - docsPath: "/channels/discord", - blurb: "test", - }, - capabilities: { chatTypes: ["direct"] }, + label: "Discord", + docsPath: "/channels/discord", config: { listAccountIds: () => ["primary"], defaultAccountId: () => "primary", @@ -199,10 +154,7 @@ function makeSourceUnavailableResolvedAvailablePlugin(): ChannelPlugin { isConfigured: (account) => Boolean((account as { configured?: boolean }).configured), isEnabled: () => true, }, - actions: { - listActions: () => ["send"], - }, - }; + }); } function makeHttpSlackUnavailablePlugin(): ChannelPlugin { @@ -263,64 +215,76 @@ function makeTokenPlugin(): ChannelPlugin { }); } +async function buildTestTable( + plugins: ChannelPlugin[], + params?: { cfg?: Record; sourceConfig?: Record }, +) { + vi.mocked(listChannelPlugins).mockReturnValue(plugins); + return await buildChannelsTable((params?.cfg ?? { channels: {} }) as never, { + showSecrets: false, + sourceConfig: params?.sourceConfig as never, + }); +} + +function expectTableRow( + table: TestTable, + params: { id: string; state: string; detailContains?: string; detailEquals?: string }, +) { + const row = table.rows.find((entry) => entry.id === params.id); + expect(row).toBeDefined(); + expect(row?.state).toBe(params.state); + if (params.detailContains) { + expect(row?.detail).toContain(params.detailContains); + } + if (params.detailEquals) { + expect(row?.detail).toBe(params.detailEquals); + } + return row; +} + +function expectTableDetailRows( + table: TestTable, + title: string, + rows: Array>, +) { + const detail = table.details.find((entry) => entry.title === title); + expect(detail).toBeDefined(); + expect(detail?.rows).toEqual(rows); +} + describe("buildChannelsTable - mattermost token summary", () => { it("does not require appToken for mattermost accounts", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeMattermostPlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, - }); - - const mattermostRow = table.rows.find((row) => row.id === "mattermost"); - expect(mattermostRow).toBeDefined(); - expect(mattermostRow?.state).toBe("ok"); + const table = await buildTestTable([makeMattermostPlugin()]); + const mattermostRow = expectTableRow(table, { id: "mattermost", state: "ok" }); expect(mattermostRow?.detail).not.toContain("need bot+app"); }); it("keeps bot+app requirement when both fields exist", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([ - makeSlackPlugin({ botToken: "bot-token", appToken: "" }), - ]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, - }); - - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("need bot+app"); + const table = await buildTestTable([makeSlackPlugin({ botToken: "bot-token", appToken: "" })]); + expectTableRow(table, { id: "slack", state: "warn", detailContains: "need bot+app" }); }); it("reports configured-but-unavailable Slack credentials as warn", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeUnavailableSlackPlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, + const table = await buildTestTable([makeUnavailableSlackPlugin()]); + expectTableRow(table, { + id: "slack", + state: "warn", + detailContains: "unavailable in this command path", }); - - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("unavailable in this command path"); }); it("preserves unavailable credential state from the source config snapshot", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeSourceAwareUnavailablePlugin()]); - - const table = await buildChannelsTable({ marker: "resolved", channels: {} } as never, { - showSecrets: false, - sourceConfig: { marker: "source", channels: {} } as never, + const table = await buildTestTable([makeSourceAwareUnavailablePlugin()], { + cfg: { marker: "resolved", channels: {} }, + sourceConfig: { marker: "source", channels: {} }, }); - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("unavailable in this command path"); - - const slackDetails = table.details.find((detail) => detail.title === "Slack accounts"); - expect(slackDetails).toBeDefined(); - expect(slackDetails?.rows).toEqual([ + expectTableRow(table, { + id: "slack", + state: "warn", + detailContains: "unavailable in this command path", + }); + expectTableDetailRows(table, "Slack accounts", [ { Account: "primary (Primary)", Notes: "bot:config · app:config · secret unavailable in this command path", @@ -330,21 +294,13 @@ describe("buildChannelsTable - mattermost token summary", () => { }); it("treats status-only available credentials as resolved", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeSourceUnavailableResolvedAvailablePlugin()]); - - const table = await buildChannelsTable({ marker: "resolved", channels: {} } as never, { - showSecrets: false, - sourceConfig: { marker: "source", channels: {} } as never, + const table = await buildTestTable([makeSourceUnavailableResolvedAvailablePlugin()], { + cfg: { marker: "resolved", channels: {} }, + sourceConfig: { marker: "source", channels: {} }, }); - const discordRow = table.rows.find((row) => row.id === "discord"); - expect(discordRow).toBeDefined(); - expect(discordRow?.state).toBe("ok"); - expect(discordRow?.detail).toBe("configured"); - - const discordDetails = table.details.find((detail) => detail.title === "Discord accounts"); - expect(discordDetails).toBeDefined(); - expect(discordDetails?.rows).toEqual([ + expectTableRow(table, { id: "discord", state: "ok", detailEquals: "configured" }); + expectTableDetailRows(table, "Discord accounts", [ { Account: "primary (Primary)", Notes: "token:config", @@ -354,20 +310,13 @@ describe("buildChannelsTable - mattermost token summary", () => { }); it("treats Slack HTTP signing-secret availability as required config", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeHttpSlackUnavailablePlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, + const table = await buildTestTable([makeHttpSlackUnavailablePlugin()]); + expectTableRow(table, { + id: "slack", + state: "warn", + detailContains: "configured http credentials unavailable", }); - - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("configured http credentials unavailable"); - - const slackDetails = table.details.find((detail) => detail.title === "Slack accounts"); - expect(slackDetails).toBeDefined(); - expect(slackDetails?.rows).toEqual([ + expectTableDetailRows(table, "Slack accounts", [ { Account: "primary (Primary)", Notes: "bot:config · signing:config · secret unavailable in this command path", @@ -377,15 +326,7 @@ describe("buildChannelsTable - mattermost token summary", () => { }); it("still reports single-token channels as ok", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeTokenPlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, - }); - - const tokenRow = table.rows.find((row) => row.id === "token-only"); - expect(tokenRow).toBeDefined(); - expect(tokenRow?.state).toBe("ok"); - expect(tokenRow?.detail).toContain("token"); + const table = await buildTestTable([makeTokenPlugin()]); + expectTableRow(table, { id: "token-only", state: "ok", detailContains: "token" }); }); }); diff --git a/src/commands/status.service-summary.test.ts b/src/commands/status.service-summary.test.ts index f1a688ea092..f730137a111 100644 --- a/src/commands/status.service-summary.test.ts +++ b/src/commands/status.service-summary.test.ts @@ -1,5 +1,6 @@ import { describe, expect, it, vi } from "vitest"; import type { GatewayService } from "../daemon/service.js"; +import type { GatewayServiceEnvArgs } from "../daemon/service.js"; import { readServiceStatusSummary } from "./status.service-summary.js"; function createService(overrides: Partial): GatewayService { @@ -57,4 +58,41 @@ describe("readServiceStatusSummary", () => { expect(summary.externallyManaged).toBe(false); expect(summary.loadedText).toBe("disabled"); }); + + it("passes command environment to runtime and loaded checks", async () => { + const isLoaded = vi.fn(async ({ env }: GatewayServiceEnvArgs) => { + return env?.OPENCLAW_GATEWAY_PORT === "18789"; + }); + const readRuntime = vi.fn(async (env?: NodeJS.ProcessEnv) => ({ + status: env?.OPENCLAW_GATEWAY_PORT === "18789" ? ("running" as const) : ("unknown" as const), + })); + + const summary = await readServiceStatusSummary( + createService({ + isLoaded, + readCommand: vi.fn(async () => ({ + programArguments: ["openclaw", "gateway", "run", "--port", "18789"], + environment: { OPENCLAW_GATEWAY_PORT: "18789" }, + })), + readRuntime, + }), + "Daemon", + ); + + expect(isLoaded).toHaveBeenCalledWith( + expect.objectContaining({ + env: expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "18789", + }), + }), + ); + expect(readRuntime).toHaveBeenCalledWith( + expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "18789", + }), + ); + expect(summary.installed).toBe(true); + expect(summary.loaded).toBe(true); + expect(summary.runtime).toMatchObject({ status: "running" }); + }); }); diff --git a/src/commands/status.service-summary.ts b/src/commands/status.service-summary.ts index d750fe7eb02..cc366c2c7ba 100644 --- a/src/commands/status.service-summary.ts +++ b/src/commands/status.service-summary.ts @@ -16,10 +16,16 @@ export async function readServiceStatusSummary( fallbackLabel: string, ): Promise { try { - const [loaded, runtime, command] = await Promise.all([ - service.isLoaded({ env: process.env }).catch(() => false), - service.readRuntime(process.env).catch(() => undefined), - service.readCommand(process.env).catch(() => null), + const command = await service.readCommand(process.env).catch(() => null); + const serviceEnv = command?.environment + ? ({ + ...process.env, + ...command.environment, + } satisfies NodeJS.ProcessEnv) + : process.env; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env: serviceEnv }).catch(() => false), + service.readRuntime(serviceEnv).catch(() => undefined), ]); const managedByOpenClaw = command != null; const externallyManaged = !managedByOpenClaw && runtime?.status === "running"; diff --git a/src/config/channel-capabilities.test.ts b/src/config/channel-capabilities.test.ts index 423cc3e2f74..75083317e82 100644 --- a/src/config/channel-capabilities.test.ts +++ b/src/config/channel-capabilities.test.ts @@ -125,6 +125,23 @@ describe("resolveChannelCapabilities", () => { }), ).toBeUndefined(); }); + + it("handles Slack object-format capabilities gracefully", () => { + const cfg = { + channels: { + slack: { + capabilities: { interactiveReplies: true }, + }, + }, + } as unknown as Partial; + + expect( + resolveChannelCapabilities({ + cfg, + channel: "slack", + }), + ).toBeUndefined(); + }); }); const createStubPlugin = (id: string): ChannelPlugin => ({ diff --git a/src/config/channel-capabilities.ts b/src/config/channel-capabilities.ts index 0e66f755e3b..b7edc354596 100644 --- a/src/config/channel-capabilities.ts +++ b/src/config/channel-capabilities.ts @@ -2,9 +2,10 @@ import { normalizeChannelId } from "../channels/plugins/index.js"; import { resolveAccountEntry } from "../routing/account-lookup.js"; import { normalizeAccountId } from "../routing/session-key.js"; import type { OpenClawConfig } from "./config.js"; +import type { SlackCapabilitiesConfig } from "./types.slack.js"; import type { TelegramCapabilitiesConfig } from "./types.telegram.js"; -type CapabilitiesConfig = TelegramCapabilitiesConfig; +type CapabilitiesConfig = TelegramCapabilitiesConfig | SlackCapabilitiesConfig; const isStringArray = (value: unknown): value is string[] => Array.isArray(value) && value.every((entry) => typeof entry === "string"); diff --git a/src/config/config.pruning-defaults.test.ts b/src/config/config.pruning-defaults.test.ts index f2f66ce6bac..92d46f4ab75 100644 --- a/src/config/config.pruning-defaults.test.ts +++ b/src/config/config.pruning-defaults.test.ts @@ -15,6 +15,22 @@ async function writeConfigForTest(home: string, config: unknown): Promise ); } +async function loadConfigForHome(config: unknown) { + return await withTempHome(async (home) => { + await writeConfigForTest(home, config); + return loadConfig(); + }); +} + +function expectAnthropicPruningDefaults( + cfg: ReturnType, + heartbeatEvery = "30m", +) { + expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("cache-ttl"); + expect(cfg.agents?.defaults?.contextPruning?.ttl).toBe("1h"); + expect(cfg.agents?.defaults?.heartbeat?.every).toBe(heartbeatEvery); +} + describe("config pruning defaults", () => { it("does not enable contextPruning by default", async () => { await withEnvAsync({ ANTHROPIC_API_KEY: "", ANTHROPIC_OAUTH_TOKEN: "" }, async () => { @@ -29,105 +45,103 @@ describe("config pruning defaults", () => { }); it("enables cache-ttl pruning + 1h heartbeat for Anthropic OAuth", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:me": { provider: "anthropic", mode: "oauth", email: "me@example.com" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:me": { provider: "anthropic", mode: "oauth", email: "me@example.com" }, }, - agents: { defaults: {} }, - }); - - const cfg = loadConfig(); - - expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("cache-ttl"); - expect(cfg.agents?.defaults?.contextPruning?.ttl).toBe("1h"); - expect(cfg.agents?.defaults?.heartbeat?.every).toBe("1h"); + }, + agents: { defaults: {} }, }); + + expectAnthropicPruningDefaults(cfg, "1h"); }); it("enables cache-ttl pruning + 1h cache TTL for Anthropic API keys", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:api": { provider: "anthropic", mode: "api_key" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, }, - }); - - const cfg = loadConfig(); - - expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("cache-ttl"); - expect(cfg.agents?.defaults?.contextPruning?.ttl).toBe("1h"); - expect(cfg.agents?.defaults?.heartbeat?.every).toBe("30m"); - expect( - cfg.agents?.defaults?.models?.["anthropic/claude-opus-4-5"]?.params?.cacheRetention, - ).toBe("short"); + }, }); + + expectAnthropicPruningDefaults(cfg); + expect( + cfg.agents?.defaults?.models?.["anthropic/claude-opus-4-5"]?.params?.cacheRetention, + ).toBe("short"); + }); + + it("adds cacheRetention defaults for dated Anthropic primary model refs", async () => { + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, + }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-sonnet-4-20250514" }, + }, + }, + }); + + expectAnthropicPruningDefaults(cfg); + expect( + cfg.agents?.defaults?.models?.["anthropic/claude-sonnet-4-20250514"]?.params?.cacheRetention, + ).toBe("short"); }); it("adds default cacheRetention for Anthropic Claude models on Bedrock", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:api": { provider: "anthropic", mode: "api_key" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, }, - agents: { - defaults: { - model: { primary: "amazon-bedrock/us.anthropic.claude-opus-4-6-v1" }, - }, + }, + agents: { + defaults: { + model: { primary: "amazon-bedrock/us.anthropic.claude-opus-4-6-v1" }, }, - }); - - const cfg = loadConfig(); - - expect( - cfg.agents?.defaults?.models?.["amazon-bedrock/us.anthropic.claude-opus-4-6-v1"]?.params - ?.cacheRetention, - ).toBe("short"); + }, }); + + expect( + cfg.agents?.defaults?.models?.["amazon-bedrock/us.anthropic.claude-opus-4-6-v1"]?.params + ?.cacheRetention, + ).toBe("short"); }); it("does not add default cacheRetention for non-Anthropic Bedrock models", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:api": { provider: "anthropic", mode: "api_key" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, }, - agents: { - defaults: { - model: { primary: "amazon-bedrock/amazon.nova-micro-v1:0" }, - }, + }, + agents: { + defaults: { + model: { primary: "amazon-bedrock/amazon.nova-micro-v1:0" }, }, - }); - - const cfg = loadConfig(); - - expect( - cfg.agents?.defaults?.models?.["amazon-bedrock/amazon.nova-micro-v1:0"]?.params - ?.cacheRetention, - ).toBeUndefined(); + }, }); + + expect( + cfg.agents?.defaults?.models?.["amazon-bedrock/amazon.nova-micro-v1:0"]?.params + ?.cacheRetention, + ).toBeUndefined(); }); it("does not override explicit contextPruning mode", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { agents: { defaults: { contextPruning: { mode: "off" } } } }); - - const cfg = loadConfig(); - - expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("off"); + const cfg = await loadConfigForHome({ + agents: { defaults: { contextPruning: { mode: "off" } } }, }); + + expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("off"); }); }); diff --git a/src/config/config.talk-validation.test.ts b/src/config/config.talk-validation.test.ts index cb948d75c75..d2fb463613c 100644 --- a/src/config/config.talk-validation.test.ts +++ b/src/config/config.talk-validation.test.ts @@ -8,38 +8,42 @@ describe("talk config validation fail-closed behavior", () => { vi.restoreAllMocks(); }); + async function expectInvalidTalkConfig(config: unknown, messagePattern: RegExp) { + await withTempHomeConfig(config, async () => { + const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + + let thrown: unknown; + try { + loadConfig(); + } catch (error) { + thrown = error; + } + + expect(thrown).toBeInstanceOf(Error); + expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); + expect((thrown as Error).message).toMatch(messagePattern); + expect(consoleSpy).toHaveBeenCalled(); + }); + } + it.each([ ["boolean", true], ["string", "1500"], ["float", 1500.5], ])("rejects %s talk.silenceTimeoutMs during config load", async (_label, value) => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { silenceTimeoutMs: value, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/silenceTimeoutMs|talk/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /silenceTimeoutMs|talk/i, ); }); it("rejects talk.provider when it does not match talk.providers during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -51,26 +55,12 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|talk\.providers|acme/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|talk\.providers|acme/i, ); }); it("rejects multi-provider talk config without talk.provider during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -84,21 +74,7 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|required/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|required/i, ); }); }); diff --git a/src/config/paths.test.ts b/src/config/paths.test.ts index b8afe7674cb..6d2ffcfaf08 100644 --- a/src/config/paths.test.ts +++ b/src/config/paths.test.ts @@ -1,7 +1,7 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; import { resolveDefaultConfigCandidates, resolveConfigPathCandidate, @@ -37,15 +37,6 @@ describe("oauth paths", () => { }); describe("state + config path candidates", () => { - async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { - const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); - try { - await run(root); - } finally { - await fs.rm(root, { recursive: true, force: true }); - } - } - function expectOpenClawHomeDefaults(env: NodeJS.ProcessEnv): void { const configuredHome = env.OPENCLAW_HOME; if (!configuredHome) { @@ -107,7 +98,7 @@ describe("state + config path candidates", () => { }); it("prefers ~/.openclaw when it exists and legacy dir is missing", async () => { - await withTempRoot("openclaw-state-", async (root) => { + await withTempDir({ prefix: "openclaw-state-" }, async (root) => { const newDir = path.join(root, ".openclaw"); await fs.mkdir(newDir, { recursive: true }); const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root); @@ -116,7 +107,7 @@ describe("state + config path candidates", () => { }); it("falls back to existing legacy state dir when ~/.openclaw is missing", async () => { - await withTempRoot("openclaw-state-legacy-", async (root) => { + await withTempDir({ prefix: "openclaw-state-legacy-" }, async (root) => { const legacyDir = path.join(root, ".clawdbot"); await fs.mkdir(legacyDir, { recursive: true }); const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root); @@ -125,7 +116,7 @@ describe("state + config path candidates", () => { }); it("CONFIG_PATH prefers existing config when present", async () => { - await withTempRoot("openclaw-config-", async (root) => { + await withTempDir({ prefix: "openclaw-config-" }, async (root) => { const legacyDir = path.join(root, ".openclaw"); await fs.mkdir(legacyDir, { recursive: true }); const legacyPath = path.join(legacyDir, "openclaw.json"); @@ -137,7 +128,7 @@ describe("state + config path candidates", () => { }); it("respects state dir overrides when config is missing", async () => { - await withTempRoot("openclaw-config-override-", async (root) => { + await withTempDir({ prefix: "openclaw-config-override-" }, async (root) => { const legacyDir = path.join(root, ".openclaw"); await fs.mkdir(legacyDir, { recursive: true }); const legacyConfig = path.join(legacyDir, "openclaw.json"); diff --git a/src/config/paths.ts b/src/config/paths.ts index 5f9afc85a46..84c27749bcf 100644 --- a/src/config/paths.ts +++ b/src/config/paths.ts @@ -1,7 +1,7 @@ import fs from "node:fs"; import os from "node:os"; import path from "node:path"; -import { expandHomePrefix, resolveRequiredHomeDir } from "../infra/home-dir.js"; +import { resolveHomeRelativePath, resolveRequiredHomeDir } from "../infra/home-dir.js"; import type { OpenClawConfig } from "./types.js"; /** @@ -93,19 +93,7 @@ function resolveUserPath( env: NodeJS.ProcessEnv = process.env, homedir: () => string = envHomedir(env), ): string { - const trimmed = input.trim(); - if (!trimmed) { - return trimmed; - } - if (trimmed.startsWith("~")) { - const expanded = expandHomePrefix(trimmed, { - home: resolveRequiredHomeDir(env, homedir), - env, - homedir, - }); - return path.resolve(expanded); - } - return path.resolve(trimmed); + return resolveHomeRelativePath(input, { env, homedir }); } export const STATE_DIR = resolveStateDir(); diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts index 7038c1effd9..215a17d77d8 100644 --- a/src/config/schema.help.ts +++ b/src/config/schema.help.ts @@ -1431,6 +1431,8 @@ export const FIELD_HELP: Record = { "Optional Slack user token for workflows requiring user-context API access beyond bot permissions. Use sparingly and audit scopes because this token can carry broader authority.", "channels.slack.userTokenReadOnly": "When true, treat configured Slack user token usage as read-only helper behavior where possible. Keep enabled if you only need supplemental reads without user-context writes.", + "channels.slack.capabilities.interactiveReplies": + "Enable agent-authored Slack interactive reply directives (`[[slack_buttons: ...]]`, `[[slack_select: ...]]`). Default: false.", "channels.mattermost.configWrites": "Allow Mattermost to write config in response to channel events/commands (default: true).", "channels.discord.configWrites": diff --git a/src/config/schema.labels.ts b/src/config/schema.labels.ts index 774597463a8..9b1fdb73445 100644 --- a/src/config/schema.labels.ts +++ b/src/config/schema.labels.ts @@ -813,6 +813,7 @@ export const FIELD_LABELS: Record = { "channels.slack.appToken": "Slack App Token", "channels.slack.userToken": "Slack User Token", "channels.slack.userTokenReadOnly": "Slack User Token Read Only", + "channels.slack.capabilities.interactiveReplies": "Slack Interactive Replies", "channels.slack.streaming": "Slack Streaming Mode", "channels.slack.nativeStreaming": "Slack Native Streaming", "channels.slack.streamMode": "Slack Stream Mode (Legacy)", diff --git a/src/config/sessions/sessions.test.ts b/src/config/sessions/sessions.test.ts index 6866d6c10c1..2773b6d0fe7 100644 --- a/src/config/sessions/sessions.test.ts +++ b/src/config/sessions/sessions.test.ts @@ -283,18 +283,25 @@ describe("session store lock (Promise chain mutex)", () => { describe("appendAssistantMessageToSessionTranscript", () => { const fixture = useTempSessionsFixture("transcript-test-"); + const sessionId = "test-session-id"; + const sessionKey = "test-session"; + + function writeTranscriptStore() { + fs.writeFileSync( + fixture.storePath(), + JSON.stringify({ + [sessionKey]: { + sessionId, + chatType: "direct", + channel: "discord", + }, + }), + "utf-8", + ); + } it("creates transcript file and appends message for valid session", async () => { - const sessionId = "test-session-id"; - const sessionKey = "test-session"; - const store = { - [sessionKey]: { - sessionId, - chatType: "direct", - channel: "discord", - }, - }; - fs.writeFileSync(fixture.storePath(), JSON.stringify(store), "utf-8"); + writeTranscriptStore(); const result = await appendAssistantMessageToSessionTranscript({ sessionKey, @@ -326,16 +333,7 @@ describe("appendAssistantMessageToSessionTranscript", () => { }); it("does not append a duplicate delivery mirror for the same idempotency key", async () => { - const sessionId = "test-session-id"; - const sessionKey = "test-session"; - const store = { - [sessionKey]: { - sessionId, - chatType: "direct", - channel: "discord", - }, - }; - fs.writeFileSync(fixture.storePath(), JSON.stringify(store), "utf-8"); + writeTranscriptStore(); await appendAssistantMessageToSessionTranscript({ sessionKey, @@ -360,16 +358,7 @@ describe("appendAssistantMessageToSessionTranscript", () => { }); it("ignores malformed transcript lines when checking mirror idempotency", async () => { - const sessionId = "test-session-id"; - const sessionKey = "test-session"; - const store = { - [sessionKey]: { - sessionId, - chatType: "direct", - channel: "discord", - }, - }; - fs.writeFileSync(fixture.storePath(), JSON.stringify(store), "utf-8"); + writeTranscriptStore(); const sessionFile = resolveSessionTranscriptPathInDir(sessionId, fixture.sessionsDir()); fs.writeFileSync( diff --git a/src/config/sessions/targets.test.ts b/src/config/sessions/targets.test.ts index 720cc3e892e..43674233a3a 100644 --- a/src/config/sessions/targets.test.ts +++ b/src/config/sessions/targets.test.ts @@ -40,6 +40,14 @@ function createCustomRootCfg(customRoot: string, defaultAgentId = "ops"): OpenCl }; } +async function resolveTargetsForCustomRoot(home: string, agentIds: string[]) { + const customRoot = path.join(home, "custom-state"); + const storePaths = await createAgentSessionStores(customRoot, agentIds); + const cfg = createCustomRootCfg(customRoot); + const targets = await resolveAllAgentSessionStoreTargets(cfg, { env: process.env }); + return { storePaths, targets }; +} + function expectTargetsToContainStores( targets: Array<{ agentId: string; storePath: string }>, stores: Record, @@ -152,11 +160,7 @@ describe("resolveAllAgentSessionStoreTargets", () => { it("discovers retired agent stores under a configured custom session root", async () => { await withTempHome(async (home) => { - const customRoot = path.join(home, "custom-state"); - const storePaths = await createAgentSessionStores(customRoot, ["ops", "retired"]); - const cfg = createCustomRootCfg(customRoot); - - const targets = await resolveAllAgentSessionStoreTargets(cfg, { env: process.env }); + const { storePaths, targets } = await resolveTargetsForCustomRoot(home, ["ops", "retired"]); expectTargetsToContainStores(targets, storePaths); expect(targets.filter((target) => target.storePath === storePaths.ops)).toHaveLength(1); @@ -165,11 +169,10 @@ describe("resolveAllAgentSessionStoreTargets", () => { it("keeps the actual on-disk store path for discovered retired agents", async () => { await withTempHome(async (home) => { - const customRoot = path.join(home, "custom-state"); - const storePaths = await createAgentSessionStores(customRoot, ["ops", "Retired Agent"]); - const cfg = createCustomRootCfg(customRoot); - - const targets = await resolveAllAgentSessionStoreTargets(cfg, { env: process.env }); + const { storePaths, targets } = await resolveTargetsForCustomRoot(home, [ + "ops", + "Retired Agent", + ]); expect(targets).toEqual( expect.arrayContaining([ diff --git a/src/config/types.browser.ts b/src/config/types.browser.ts index 57d036bd88c..5f8e28a0ebe 100644 --- a/src/config/types.browser.ts +++ b/src/config/types.browser.ts @@ -4,7 +4,7 @@ export type BrowserProfileConfig = { /** CDP URL for this profile (use for remote Chrome). */ cdpUrl?: string; /** Profile driver (default: openclaw). */ - driver?: "openclaw" | "clawd" | "extension"; + driver?: "openclaw" | "clawd" | "extension" | "existing-session"; /** If true, never launch a browser for this profile; only attach. Falls back to browser.attachOnly. */ attachOnly?: boolean; /** Profile color (hex). Auto-assigned at creation. */ diff --git a/src/config/types.slack.ts b/src/config/types.slack.ts index 96abe2641d6..a90f1ed5020 100644 --- a/src/config/types.slack.ts +++ b/src/config/types.slack.ts @@ -47,6 +47,11 @@ export type SlackChannelConfig = { export type SlackReactionNotificationMode = "off" | "own" | "all" | "allowlist"; export type SlackStreamingMode = "off" | "partial" | "block" | "progress"; export type SlackLegacyStreamMode = "replace" | "status_final" | "append"; +export type SlackCapabilitiesConfig = + | string[] + | { + interactiveReplies?: boolean; + }; export type SlackActionConfig = { reactions?: boolean; @@ -89,7 +94,7 @@ export type SlackAccountConfig = { /** Slack Events API webhook path (default: /slack/events). */ webhookPath?: string; /** Optional provider capability tags used for agent/runtime guidance. */ - capabilities?: string[]; + capabilities?: SlackCapabilitiesConfig; /** Markdown formatting overrides (tables). */ markdown?: MarkdownConfig; /** Override native command registration for Slack (bool or "auto"). */ diff --git a/src/config/zod-schema.providers-core.ts b/src/config/zod-schema.providers-core.ts index 47f76614dd8..ced89bd8512 100644 --- a/src/config/zod-schema.providers-core.ts +++ b/src/config/zod-schema.providers-core.ts @@ -59,6 +59,14 @@ const TelegramCapabilitiesSchema = z.union([ }) .strict(), ]); +const SlackCapabilitiesSchema = z.union([ + z.array(z.string()), + z + .object({ + interactiveReplies: z.boolean().optional(), + }) + .strict(), +]); export const TelegramTopicSchema = z .object({ @@ -831,7 +839,7 @@ export const SlackAccountSchema = z mode: z.enum(["socket", "http"]).optional(), signingSecret: SecretInputSchema.optional().register(sensitive), webhookPath: z.string().optional(), - capabilities: z.array(z.string()).optional(), + capabilities: SlackCapabilitiesSchema.optional(), markdown: MarkdownConfigSchema, enabled: z.boolean().optional(), commands: ProviderCommandsSchema, diff --git a/src/config/zod-schema.ts b/src/config/zod-schema.ts index 0064afddd20..8c78d049d0e 100644 --- a/src/config/zod-schema.ts +++ b/src/config/zod-schema.ts @@ -360,15 +360,23 @@ export const OpenClawSchema = z cdpPort: z.number().int().min(1).max(65535).optional(), cdpUrl: z.string().optional(), driver: z - .union([z.literal("openclaw"), z.literal("clawd"), z.literal("extension")]) + .union([ + z.literal("openclaw"), + z.literal("clawd"), + z.literal("extension"), + z.literal("existing-session"), + ]) .optional(), attachOnly: z.boolean().optional(), color: HexColorSchema, }) .strict() - .refine((value) => value.cdpPort || value.cdpUrl, { - message: "Profile must set cdpPort or cdpUrl", - }), + .refine( + (value) => value.driver === "existing-session" || value.cdpPort || value.cdpUrl, + { + message: "Profile must set cdpPort or cdpUrl", + }, + ), ) .optional(), extraArgs: z.array(z.string()).optional(), diff --git a/src/cron/isolated-agent.lane.test.ts b/src/cron/isolated-agent.lane.test.ts index 5d26faff327..3790c5e511a 100644 --- a/src/cron/isolated-agent.lane.test.ts +++ b/src/cron/isolated-agent.lane.test.ts @@ -1,6 +1,7 @@ import "./isolated-agent.mocks.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; import { makeCfg, @@ -9,27 +10,6 @@ import { writeSessionStoreEntries, } from "./isolated-agent.test-harness.js"; -function makeDeps() { - return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), - }; -} - -function mockEmbeddedOk() { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ - payloads: [{ text: "ok" }], - meta: { - durationMs: 5, - agentMeta: { sessionId: "s", provider: "p", model: "m" }, - }, - }); -} - function lastEmbeddedLane(): string | undefined { const calls = vi.mocked(runEmbeddedPiAgent).mock.calls; expect(calls.length).toBeGreaterThan(0); @@ -45,11 +25,11 @@ async function runLaneCase(home: string, lane?: string) { lastTo: "", }, }); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); await runCronIsolatedAgentTurn({ cfg: makeCfg(home, storePath), - deps: makeDeps(), + deps: createCliDeps(), job: makeJob({ kind: "agentTurn", message: "do it", deliver: false }), message: "do it", sessionKey: "cron:job-1", diff --git a/src/cron/isolated-agent.model-formatting.test.ts b/src/cron/isolated-agent.model-formatting.test.ts index e78f251dc8b..b09a9db5ea1 100644 --- a/src/cron/isolated-agent.model-formatting.test.ts +++ b/src/cron/isolated-agent.model-formatting.test.ts @@ -2,6 +2,7 @@ import "./isolated-agent.mocks.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { loadModelCatalog } from "../agents/model-catalog.js"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; import { makeCfg, @@ -13,27 +14,6 @@ import type { CronJob } from "./types.js"; const withTempHome = withTempCronHome; -function makeDeps() { - return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), - }; -} - -function mockEmbeddedOk() { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ - payloads: [{ text: "ok" }], - meta: { - durationMs: 5, - agentMeta: { sessionId: "s", provider: "p", model: "m" }, - }, - }); -} - /** * Extract the provider and model from the last runEmbeddedPiAgent call. */ @@ -44,6 +24,8 @@ function lastEmbeddedCall(): { provider?: string; model?: string } { } const DEFAULT_MESSAGE = "do it"; +const DEFAULT_PROVIDER = "anthropic"; +const DEFAULT_MODEL = "claude-opus-4-5"; type TurnOptions = { cfgOverrides?: Parameters[2]; @@ -62,7 +44,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { }, ...options.storeEntries, }); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const jobPayload = options.jobPayload ?? { kind: "agentTurn" as const, @@ -72,7 +54,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { const res = await runCronIsolatedAgentTurn({ cfg: makeCfg(home, storePath, options.cfgOverrides), - deps: makeDeps(), + deps: createCliDeps(), job: makeJob(jobPayload), message: DEFAULT_MESSAGE, sessionKey: options.sessionKey ?? "cron:job-1", @@ -93,6 +75,50 @@ async function runTurn(home: string, options: TurnOptions = {}) { return { res, call: lastEmbeddedCall() }; } +function expectSelectedModel( + call: { provider?: string; model?: string }, + params: { provider: string; model: string }, +) { + expect(call.provider).toBe(params.provider); + expect(call.model).toBe(params.model); +} + +function expectDefaultSelectedModel(call: { provider?: string; model?: string }) { + expectSelectedModel(call, { provider: DEFAULT_PROVIDER, model: DEFAULT_MODEL }); +} + +function createCronSessionOverrideStore( + overrides: Record, + sessionId = "existing-session", +) { + return { + "agent:main:cron:job-1": { + sessionId, + updatedAt: Date.now(), + ...overrides, + }, + }; +} + +async function expectTurnModel( + home: string, + options: TurnOptions, + expected: { provider: string; model: string }, +) { + const { res, call } = await runTurn(home, options); + expect(res.status).toBe("ok"); + expectSelectedModel(call, expected); +} + +async function expectInvalidModel(home: string, model: string) { + const { res } = await runErrorTurn(home, { + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model }, + }); + expect(res.status).toBe("error"); + expect(res.error).toMatch(/invalid model/i); + expect(vi.mocked(runEmbeddedPiAgent)).not.toHaveBeenCalled(); +} + // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- @@ -119,16 +145,17 @@ describe("cron model formatting and precedence edge cases", () => { it("handles leading/trailing whitespace in model string", async () => { await withTempHome(async (home) => { - const { res, call } = await runTurn(home, { - jobPayload: { - kind: "agentTurn", - message: DEFAULT_MESSAGE, - model: " openai/gpt-4.1-mini ", + await expectTurnModel( + home, + { + jobPayload: { + kind: "agentTurn", + message: DEFAULT_MESSAGE, + model: " openai/gpt-4.1-mini ", + }, }, - }); - expect(res.status).toBe("ok"); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); @@ -149,38 +176,29 @@ describe("cron model formatting and precedence edge cases", () => { it("rejects model with trailing slash (empty model name)", async () => { await withTempHome(async (home) => { - const { res } = await runErrorTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: "openai/" }, - }); - expect(res.status).toBe("error"); - expect(res.error).toMatch(/invalid model/i); - expect(vi.mocked(runEmbeddedPiAgent)).not.toHaveBeenCalled(); + await expectInvalidModel(home, "openai/"); }); }); it("rejects model with leading slash (empty provider)", async () => { await withTempHome(async (home) => { - const { res } = await runErrorTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: "/gpt-4.1-mini" }, - }); - expect(res.status).toBe("error"); - expect(res.error).toMatch(/invalid model/i); - expect(vi.mocked(runEmbeddedPiAgent)).not.toHaveBeenCalled(); + await expectInvalidModel(home, "/gpt-4.1-mini"); }); }); it("normalizes provider casing", async () => { await withTempHome(async (home) => { - const { res, call } = await runTurn(home, { - jobPayload: { - kind: "agentTurn", - message: DEFAULT_MESSAGE, - model: "OpenAI/gpt-4.1-mini", + await expectTurnModel( + home, + { + jobPayload: { + kind: "agentTurn", + message: DEFAULT_MESSAGE, + model: "OpenAI/gpt-4.1-mini", + }, }, - }); - expect(res.status).toBe("ok"); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); @@ -237,43 +255,39 @@ describe("cron model formatting and precedence edge cases", () => { // No model in job payload. Session store has openai override. // Provider must be openai, not the default anthropic. await withTempHome(async (home) => { - const { call } = await runTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), + await expectTurnModel( + home, + { + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, + storeEntries: createCronSessionOverrideStore({ providerOverride: "openai", modelOverride: "gpt-4.1-mini", - }, + }), }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); it("job payload model wins over conflicting session override", async () => { // Job payload says anthropic. Session says openai. Job must win. await withTempHome(async (home) => { - const { call } = await runTurn(home, { - jobPayload: { - kind: "agentTurn", - message: DEFAULT_MESSAGE, - model: "anthropic/claude-sonnet-4-5", - deliver: false, - }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), + await expectTurnModel( + home, + { + jobPayload: { + kind: "agentTurn", + message: DEFAULT_MESSAGE, + model: "anthropic/claude-sonnet-4-5", + deliver: false, + }, + storeEntries: createCronSessionOverrideStore({ providerOverride: "openai", modelOverride: "gpt-4.1-mini", - }, + }), }, - }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-sonnet-4-5"); + { provider: "anthropic", model: "claude-sonnet-4-5" }, + ); }); }); @@ -282,9 +296,7 @@ describe("cron model formatting and precedence edge cases", () => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }); - // makeCfg default is anthropic/claude-opus-4-5 - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); }); @@ -310,24 +322,19 @@ describe("cron model formatting and precedence edge cases", () => { // Step 2: No job model, session store says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: "gpt-4.1-mini", - }, - }, + storeEntries: createCronSessionOverrideStore({ + providerOverride: "openai", + modelOverride: "gpt-4.1-mini", + }), }); - expect(step2.call.provider).toBe("openai"); - expect(step2.call.model).toBe("gpt-4.1-mini"); + expectSelectedModel(step2.call, { provider: "openai", model: "gpt-4.1-mini" }); // Step 3: Job payload says anthropic, session store still says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step3 = await runTurn(home, { jobPayload: { kind: "agentTurn", @@ -335,17 +342,12 @@ describe("cron model formatting and precedence edge cases", () => { model: "anthropic/claude-opus-4-5", deliver: false, }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: "gpt-4.1-mini", - }, - }, + storeEntries: createCronSessionOverrideStore({ + providerOverride: "openai", + modelOverride: "gpt-4.1-mini", + }), }); - expect(step3.call.provider).toBe("anthropic"); - expect(step3.call.model).toBe("claude-opus-4-5"); + expectSelectedModel(step3.call, { provider: "anthropic", model: "claude-opus-4-5" }); }); }); @@ -365,12 +367,11 @@ describe("cron model formatting and precedence edge cases", () => { // Run 2: no override — must revert to default anthropic vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const r2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }); - expect(r2.call.provider).toBe("anthropic"); - expect(r2.call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(r2.call); }); }); }); @@ -383,19 +384,20 @@ describe("cron model formatting and precedence edge cases", () => { // The stored modelOverride/providerOverride must still be read and applied // (resolveCronSession spreads ...entry before overriding core fields). await withTempHome(async (home) => { - const { call } = await runTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "old-session-id", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: "gpt-4.1-mini", - }, + await expectTurnModel( + home, + { + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, + storeEntries: createCronSessionOverrideStore( + { + providerOverride: "openai", + modelOverride: "gpt-4.1-mini", + }, + "old-session-id", + ), }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); @@ -403,16 +405,9 @@ describe("cron model formatting and precedence edge cases", () => { await withTempHome(async (home) => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "old-session-id", - updatedAt: Date.now(), - // No providerOverride or modelOverride - }, - }, + storeEntries: createCronSessionOverrideStore({}, "old-session-id"), }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); }); @@ -425,8 +420,7 @@ describe("cron model formatting and precedence edge cases", () => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: " " }, }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); @@ -435,8 +429,7 @@ describe("cron model formatting and precedence edge cases", () => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: "" }, }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); @@ -444,18 +437,13 @@ describe("cron model formatting and precedence edge cases", () => { await withTempHome(async (home) => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "old", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: " ", - }, - }, + storeEntries: createCronSessionOverrideStore( + { providerOverride: "openai", modelOverride: " " }, + "old", + ), }); // Whitespace modelOverride should be ignored → default - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); }); @@ -465,35 +453,39 @@ describe("cron model formatting and precedence edge cases", () => { describe("config model format variations", () => { it("default model as string 'provider/model'", async () => { await withTempHome(async (home) => { - const { call } = await runTurn(home, { - cfgOverrides: { - agents: { - defaults: { - model: "openai/gpt-4.1", + await expectTurnModel( + home, + { + cfgOverrides: { + agents: { + defaults: { + model: "openai/gpt-4.1", + }, }, }, + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }, - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1"); + { provider: "openai", model: "gpt-4.1" }, + ); }); }); it("default model as object with primary field", async () => { await withTempHome(async (home) => { - const { call } = await runTurn(home, { - cfgOverrides: { - agents: { - defaults: { - model: { primary: "openai/gpt-4.1" }, + await expectTurnModel( + home, + { + cfgOverrides: { + agents: { + defaults: { + model: { primary: "openai/gpt-4.1" }, + }, }, }, + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }, - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1"); + { provider: "openai", model: "gpt-4.1" }, + ); }); }); diff --git a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts index b9c0fddb3a3..5abbb453f35 100644 --- a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts +++ b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts @@ -84,18 +84,13 @@ async function expectStructuredTelegramFailure(params: { }, }); - expect(res.status).toBe(params.expectedStatus); - if (params.expectedStatus === "ok") { - expect(res.delivered).toBe(false); - } - if (params.expectDeliveryAttempted !== undefined) { - expect(res.deliveryAttempted).toBe(params.expectDeliveryAttempted); - } - if (params.expectedErrorFragment) { - expect(res.error).toContain(params.expectedErrorFragment); - } - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); + expectFailedTelegramDeliveryResult({ + res, + deps, + expectedStatus: params.expectedStatus, + expectedErrorFragment: params.expectedErrorFragment, + expectDeliveryAttempted: params.expectDeliveryAttempted, + }); }, { deps: { @@ -105,6 +100,29 @@ async function expectStructuredTelegramFailure(params: { ); } +function expectFailedTelegramDeliveryResult(params: { + res: Awaited>; + deps: CliDeps; + expectedStatus: "ok" | "error"; + expectedErrorFragment?: string; + expectDeliveryAttempted?: boolean; +}) { + expect(params.res.status).toBe(params.expectedStatus); + if (params.expectedStatus === "ok") { + expect(params.res.delivered).toBe(false); + } else { + expect(params.res.delivered).toBeUndefined(); + } + if (params.expectDeliveryAttempted !== undefined) { + expect(params.res.deliveryAttempted).toBe(params.expectDeliveryAttempted); + } + if (params.expectedErrorFragment) { + expect(params.res.error).toContain(params.expectedErrorFragment); + } + expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expect(params.deps.sendMessageTelegram).toHaveBeenCalledTimes(1); +} + async function runTelegramDeliveryResult(bestEffort: boolean) { let outcome: | { @@ -112,6 +130,35 @@ async function runTelegramDeliveryResult(bestEffort: boolean) { deps: CliDeps; } | undefined; + await withTelegramTextDelivery({ bestEffort }, async ({ res, deps }) => { + outcome = { res, deps }; + }); + if (!outcome) { + throw new Error("telegram delivery did not produce an outcome"); + } + return outcome; +} + +function expectSuccessfulTelegramTextDelivery(params: { + res: Awaited>; + deps: CliDeps; +}): void { + expect(params.res.status).toBe("ok"); + expect(params.res.delivered).toBe(true); + expect(params.res.deliveryAttempted).toBe(true); + expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); +} + +async function withTelegramTextDelivery( + params: { bestEffort: boolean }, + run: (params: { + home: string; + storePath: string; + deps: CliDeps; + res: Awaited>; + }) => Promise, + fixtureParams?: Parameters[1], +) { await withTelegramAnnounceFixture(async ({ home, storePath, deps }) => { mockAgentPayloads([{ text: "hello from cron" }]); const res = await runTelegramAnnounceTurn({ @@ -122,15 +169,35 @@ async function runTelegramDeliveryResult(bestEffort: boolean) { mode: "announce", channel: "telegram", to: "123", - bestEffort, + bestEffort: params.bestEffort, }, }); - outcome = { res, deps }; - }); - if (!outcome) { - throw new Error("telegram delivery did not produce an outcome"); - } - return outcome; + await run({ home, storePath, deps, res }); + }, fixtureParams); +} + +async function expectTelegramTextDeliveryFailure(params: { + bestEffort: boolean; + expectedStatus: "ok" | "error"; + expectedErrorFragment?: string; +}) { + await withTelegramTextDelivery( + { bestEffort: params.bestEffort }, + async ({ deps, res }) => { + expectFailedTelegramDeliveryResult({ + res, + deps, + expectedStatus: params.expectedStatus, + expectedErrorFragment: params.expectedErrorFragment, + expectDeliveryAttempted: true, + }); + }, + { + deps: { + sendMessageTelegram: vi.fn().mockRejectedValue(new Error("boom")), + }, + }, + ); } async function runSignalDeliveryResult(bestEffort: boolean) { @@ -348,121 +415,37 @@ describe("runCronIsolatedAgentTurn", () => { }); it("reports not-delivered when text direct delivery fails and best-effort is enabled", async () => { - await withTelegramAnnounceFixture( - async ({ home, storePath, deps }) => { - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: true, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(false); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); - }, - { - deps: { - sendMessageTelegram: vi.fn().mockRejectedValue(new Error("boom")), - }, - }, - ); + await expectTelegramTextDeliveryFailure({ + bestEffort: true, + expectedStatus: "ok", + }); }); it("delivers text directly when best-effort is disabled", async () => { - await withTempHome(async (home) => { - const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" }); - const deps = createCliDeps(); - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expectDirectTelegramDelivery(deps, { - chatId: "123", - text: "hello from cron", - }); + const { res, deps } = await runTelegramDeliveryResult(false); + expectSuccessfulTelegramTextDelivery({ res, deps }); + expectDirectTelegramDelivery(deps, { + chatId: "123", + text: "hello from cron", }); }); it("returns error when text direct delivery fails and best-effort is disabled", async () => { - await withTelegramAnnounceFixture( - async ({ home, storePath, deps }) => { - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("error"); - expect(res.delivered).toBeUndefined(); - expect(res.deliveryAttempted).toBe(true); - expect(res.error).toContain("boom"); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); - }, - { - deps: { - sendMessageTelegram: vi.fn().mockRejectedValue(new Error("boom")), - }, - }, - ); + await expectTelegramTextDeliveryFailure({ + bestEffort: false, + expectedStatus: "error", + expectedErrorFragment: "boom", + }); }); it("retries transient text direct delivery failures before succeeding", async () => { const previousFastMode = process.env.OPENCLAW_TEST_FAST; process.env.OPENCLAW_TEST_FAST = "1"; try { - await withTelegramAnnounceFixture( - async ({ home, storePath, deps }) => { - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + await withTelegramTextDelivery( + { bestEffort: false }, + async ({ deps, res }) => { + expectSuccessfulTelegramTextDelivery({ res, deps }); expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(2); expect(deps.sendMessageTelegram).toHaveBeenLastCalledWith( "123", @@ -490,10 +473,7 @@ describe("runCronIsolatedAgentTurn", () => { it("delivers text directly when best-effort is enabled", async () => { const { res, deps } = await runTelegramDeliveryResult(true); - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expectSuccessfulTelegramTextDelivery({ res, deps }); expectDirectTelegramDelivery(deps, { chatId: "123", text: "hello from cron", diff --git a/src/cron/isolated-agent/delivery-target.test.ts b/src/cron/isolated-agent/delivery-target.test.ts index cfc492abe3b..df7d29d419f 100644 --- a/src/cron/isolated-agent/delivery-target.test.ts +++ b/src/cron/isolated-agent/delivery-target.test.ts @@ -64,6 +64,23 @@ function setMainSessionEntry(entry?: SessionStore[string]) { vi.mocked(loadSessionStore).mockReturnValue(store); } +function setLastSessionEntry(params: { + sessionId: string; + lastChannel: string; + lastTo: string; + lastThreadId?: string; + lastAccountId?: string; +}) { + setMainSessionEntry({ + sessionId: params.sessionId, + updatedAt: 1000, + lastChannel: params.lastChannel, + lastTo: params.lastTo, + ...(params.lastThreadId ? { lastThreadId: params.lastThreadId } : {}), + ...(params.lastAccountId ? { lastAccountId: params.lastAccountId } : {}), + }); +} + function setWhatsAppAllowFrom(allowFrom: string[]) { vi.mocked(resolveWhatsAppAccount).mockReturnValue({ allowFrom, @@ -86,11 +103,17 @@ async function resolveForAgent(params: { }); } +async function resolveLastTarget(cfg: OpenClawConfig) { + return resolveForAgent({ + cfg, + target: { channel: "last", to: undefined }, + }); +} + describe("resolveDeliveryTarget", () => { it("reroutes implicit whatsapp delivery to authorized allowFrom recipient", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-w1", - updatedAt: 1000, lastChannel: "whatsapp", lastTo: "+15550000099", }); @@ -98,16 +121,15 @@ describe("resolveDeliveryTarget", () => { setStoredWhatsAppAllowFrom(["+15550000001"]); const cfg = makeCfg({ bindings: [] }); - const result = await resolveDeliveryTarget(cfg, AGENT_ID, { channel: "last", to: undefined }); + const result = await resolveLastTarget(cfg); expect(result.channel).toBe("whatsapp"); expect(result.to).toBe("+15550000001"); }); it("keeps explicit whatsapp target unchanged", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-w2", - updatedAt: 1000, lastChannel: "whatsapp", lastTo: "+15550000099", }); @@ -220,9 +242,8 @@ describe("resolveDeliveryTarget", () => { }); it("drops session threadId when destination does not match the previous recipient", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-2", - updatedAt: 1000, lastChannel: "telegram", lastTo: "999999", lastThreadId: "thread-1", @@ -233,9 +254,8 @@ describe("resolveDeliveryTarget", () => { }); it("keeps session threadId when destination matches the previous recipient", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-3", - updatedAt: 1000, lastChannel: "telegram", lastTo: "123456", lastThreadId: "thread-2", @@ -248,10 +268,7 @@ describe("resolveDeliveryTarget", () => { it("uses single configured channel when neither explicit nor session channel exists", async () => { setMainSessionEntry(undefined); - const result = await resolveForAgent({ - cfg: makeCfg({ bindings: [] }), - target: { channel: "last", to: undefined }, - }); + const result = await resolveLastTarget(makeCfg({ bindings: [] })); expect(result.channel).toBe("telegram"); expect(result.ok).toBe(false); if (result.ok) { @@ -268,10 +285,7 @@ describe("resolveDeliveryTarget", () => { new Error("Channel is required when multiple channels are configured: telegram, slack"), ); - const result = await resolveForAgent({ - cfg: makeCfg({ bindings: [] }), - target: { channel: "last", to: undefined }, - }); + const result = await resolveLastTarget(makeCfg({ bindings: [] })); expect(result.channel).toBeUndefined(); expect(result.to).toBeUndefined(); expect(result.ok).toBe(false); @@ -308,17 +322,13 @@ describe("resolveDeliveryTarget", () => { }); it("uses main session channel when channel=last and session route exists", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-4", - updatedAt: 1000, lastChannel: "telegram", lastTo: "987654", }); - const result = await resolveForAgent({ - cfg: makeCfg({ bindings: [] }), - target: { channel: "last", to: undefined }, - }); + const result = await resolveLastTarget(makeCfg({ bindings: [] })); expect(result.channel).toBe("telegram"); expect(result.to).toBe("987654"); @@ -326,9 +336,8 @@ describe("resolveDeliveryTarget", () => { }); it("explicit delivery.accountId overrides session-derived accountId", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-5", - updatedAt: 1000, lastChannel: "telegram", lastTo: "chat-999", lastAccountId: "default", diff --git a/src/cron/isolated-agent/run.fast-mode.test.ts b/src/cron/isolated-agent/run.fast-mode.test.ts index 471471e9ecd..abe50ea5554 100644 --- a/src/cron/isolated-agent/run.fast-mode.test.ts +++ b/src/cron/isolated-agent/run.fast-mode.test.ts @@ -14,169 +14,102 @@ import { const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn(); +const OPENAI_GPT4_MODEL = "openai/gpt-4"; + +function mockSuccessfulModelFallback() { + runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { + await run(provider, model); + return { + result: { + payloads: [{ text: "ok" }], + meta: { agentMeta: { usage: { input: 10, output: 20 } } }, + }, + provider, + model, + attempts: [], + }; + }); +} + +async function runFastModeCase(params: { + configFastMode: boolean; + expectedFastMode: boolean; + message: string; + sessionFastMode?: boolean; +}) { + const baseSession = makeCronSession(); + resolveCronSessionMock.mockReturnValue( + params.sessionFastMode === undefined + ? baseSession + : makeCronSession({ + sessionEntry: { + ...baseSession.sessionEntry, + fastMode: params.sessionFastMode, + }, + }), + ); + mockSuccessfulModelFallback(); + + const result = await runCronIsolatedAgentTurn( + makeIsolatedAgentTurnParams({ + cfg: { + agents: { + defaults: { + models: { + [OPENAI_GPT4_MODEL]: { + params: { + fastMode: params.configFastMode, + }, + }, + }, + }, + }, + }, + job: makeIsolatedAgentTurnJob({ + payload: { + kind: "agentTurn", + message: params.message, + model: OPENAI_GPT4_MODEL, + }, + }), + }), + ); + + expect(result.status).toBe("ok"); + expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); + expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ + provider: "openai", + model: "gpt-4", + fastMode: params.expectedFastMode, + }); +} + describe("runCronIsolatedAgentTurn — fast mode", () => { setupRunCronIsolatedAgentTurnSuite(); it("passes config-driven fast mode into embedded cron runs", async () => { - const cronSession = makeCronSession(); - resolveCronSessionMock.mockReturnValue(cronSession); - - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - await run(provider, model); - return { - result: { - payloads: [{ text: "ok" }], - meta: { agentMeta: { usage: { input: 10, output: 20 } } }, - }, - provider, - model, - attempts: [], - }; - }); - - const result = await runCronIsolatedAgentTurn( - makeIsolatedAgentTurnParams({ - cfg: { - agents: { - defaults: { - models: { - "openai/gpt-4": { - params: { - fastMode: true, - }, - }, - }, - }, - }, - }, - job: makeIsolatedAgentTurnJob({ - payload: { - kind: "agentTurn", - message: "test fast mode", - model: "openai/gpt-4", - }, - }), - }), - ); - - expect(result.status).toBe("ok"); - expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); - expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ - provider: "openai", - model: "gpt-4", - fastMode: true, + await runFastModeCase({ + configFastMode: true, + expectedFastMode: true, + message: "test fast mode", }); }); it("honors session fastMode=false over config fastMode=true", async () => { - const cronSession = makeCronSession({ - sessionEntry: { - ...makeCronSession().sessionEntry, - fastMode: false, - }, - }); - resolveCronSessionMock.mockReturnValue(cronSession); - - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - await run(provider, model); - return { - result: { - payloads: [{ text: "ok" }], - meta: { agentMeta: { usage: { input: 10, output: 20 } } }, - }, - provider, - model, - attempts: [], - }; - }); - - const result = await runCronIsolatedAgentTurn( - makeIsolatedAgentTurnParams({ - cfg: { - agents: { - defaults: { - models: { - "openai/gpt-4": { - params: { - fastMode: true, - }, - }, - }, - }, - }, - }, - job: makeIsolatedAgentTurnJob({ - payload: { - kind: "agentTurn", - message: "test fast mode override", - model: "openai/gpt-4", - }, - }), - }), - ); - - expect(result.status).toBe("ok"); - expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); - expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ - provider: "openai", - model: "gpt-4", - fastMode: false, + await runFastModeCase({ + configFastMode: true, + expectedFastMode: false, + message: "test fast mode override", + sessionFastMode: false, }); }); it("honors session fastMode=true over config fastMode=false", async () => { - const cronSession = makeCronSession({ - sessionEntry: { - ...makeCronSession().sessionEntry, - fastMode: true, - }, - }); - resolveCronSessionMock.mockReturnValue(cronSession); - - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - await run(provider, model); - return { - result: { - payloads: [{ text: "ok" }], - meta: { agentMeta: { usage: { input: 10, output: 20 } } }, - }, - provider, - model, - attempts: [], - }; - }); - - const result = await runCronIsolatedAgentTurn( - makeIsolatedAgentTurnParams({ - cfg: { - agents: { - defaults: { - models: { - "openai/gpt-4": { - params: { - fastMode: false, - }, - }, - }, - }, - }, - }, - job: makeIsolatedAgentTurnJob({ - payload: { - kind: "agentTurn", - message: "test fast mode session override", - model: "openai/gpt-4", - }, - }), - }), - ); - - expect(result.status).toBe("ok"); - expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); - expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ - provider: "openai", - model: "gpt-4", - fastMode: true, + await runFastModeCase({ + configFastMode: false, + expectedFastMode: true, + message: "test fast mode session override", + sessionFastMode: true, }); }); }); diff --git a/src/cron/isolated-agent/run.interim-retry.test.ts b/src/cron/isolated-agent/run.interim-retry.test.ts index 90d663ed020..6f01a2e9232 100644 --- a/src/cron/isolated-agent/run.interim-retry.test.ts +++ b/src/cron/isolated-agent/run.interim-retry.test.ts @@ -7,6 +7,7 @@ import { countActiveDescendantRunsMock, listDescendantRunsForRequesterMock, loadRunCronIsolatedAgentTurn, + mockRunCronFallbackPassthrough, pickLastNonEmptyTextFromPayloadsMock, runEmbeddedPiAgentMock, runWithModelFallbackMock, @@ -17,13 +18,6 @@ const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn(); describe("runCronIsolatedAgentTurn — interim ack retry", () => { setupRunCronIsolatedAgentTurnSuite(); - const mockFallbackPassthrough = () => { - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - const result = await run(provider, model); - return { result, provider, model, attempts: [] }; - }); - }; - const runTurnAndExpectOk = async (expectedFallbackCalls: number, expectedAgentCalls: number) => { const result = await runCronIsolatedAgentTurn(makeIsolatedAgentTurnParams()); expect(result.status).toBe("ok"); @@ -62,7 +56,7 @@ describe("runCronIsolatedAgentTurn — interim ack retry", () => { meta: { agentMeta: { usage: { input: 10, output: 20 } } }, }); - mockFallbackPassthrough(); + mockRunCronFallbackPassthrough(); await runTurnAndExpectOk(2, 2); expect(runEmbeddedPiAgentMock.mock.calls[1]?.[0]?.prompt).toContain( "previous response was only an acknowledgement", @@ -76,7 +70,7 @@ describe("runCronIsolatedAgentTurn — interim ack retry", () => { meta: { agentMeta: { usage: { input: 10, output: 20 } } }, }); - mockFallbackPassthrough(); + mockRunCronFallbackPassthrough(); await runTurnAndExpectOk(1, 1); }); @@ -93,7 +87,7 @@ describe("runCronIsolatedAgentTurn — interim ack retry", () => { ]); countActiveDescendantRunsMock.mockReturnValue(0); - mockFallbackPassthrough(); + mockRunCronFallbackPassthrough(); await runTurnAndExpectOk(1, 1); }); }); diff --git a/src/cron/isolated-agent/run.message-tool-policy.test.ts b/src/cron/isolated-agent/run.message-tool-policy.test.ts index 2d576900b9d..a92b19f5337 100644 --- a/src/cron/isolated-agent/run.message-tool-policy.test.ts +++ b/src/cron/isolated-agent/run.message-tool-policy.test.ts @@ -2,12 +2,12 @@ import { afterEach, beforeEach, describe, expect, it } from "vitest"; import { clearFastTestEnv, loadRunCronIsolatedAgentTurn, + mockRunCronFallbackPassthrough, resetRunCronIsolatedAgentTurnHarness, resolveCronDeliveryPlanMock, resolveDeliveryTargetMock, restoreFastTestEnv, runEmbeddedPiAgentMock, - runWithModelFallbackMock, } from "./run.test-harness.js"; const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn(); @@ -32,12 +32,18 @@ function makeParams() { describe("runCronIsolatedAgentTurn message tool policy", () => { let previousFastTestEnv: string | undefined; - const mockFallbackPassthrough = () => { - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - const result = await run(provider, model); - return { result, provider, model, attempts: [] }; - }); - }; + async function expectMessageToolDisabledForPlan(plan: { + requested: boolean; + mode: "none" | "announce"; + channel?: string; + to?: string; + }) { + mockRunCronFallbackPassthrough(); + resolveCronDeliveryPlanMock.mockReturnValue(plan); + await runCronIsolatedAgentTurn(makeParams()); + expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1); + expect(runEmbeddedPiAgentMock.mock.calls[0]?.[0]?.disableMessageTool).toBe(true); + } beforeEach(() => { previousFastTestEnv = clearFastTestEnv(); @@ -56,35 +62,23 @@ describe("runCronIsolatedAgentTurn message tool policy", () => { }); it('disables the message tool when delivery.mode is "none"', async () => { - mockFallbackPassthrough(); - resolveCronDeliveryPlanMock.mockReturnValue({ + await expectMessageToolDisabledForPlan({ requested: false, mode: "none", }); - - await runCronIsolatedAgentTurn(makeParams()); - - expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1); - expect(runEmbeddedPiAgentMock.mock.calls[0]?.[0]?.disableMessageTool).toBe(true); }); it("disables the message tool when cron delivery is active", async () => { - mockFallbackPassthrough(); - resolveCronDeliveryPlanMock.mockReturnValue({ + await expectMessageToolDisabledForPlan({ requested: true, mode: "announce", channel: "telegram", to: "123", }); - - await runCronIsolatedAgentTurn(makeParams()); - - expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1); - expect(runEmbeddedPiAgentMock.mock.calls[0]?.[0]?.disableMessageTool).toBe(true); }); it("keeps the message tool enabled for shared callers when delivery is not requested", async () => { - mockFallbackPassthrough(); + mockRunCronFallbackPassthrough(); resolveCronDeliveryPlanMock.mockReturnValue({ requested: false, mode: "none", diff --git a/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts b/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts index 28f3d87cb09..edaee62daa6 100644 --- a/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts +++ b/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts @@ -54,6 +54,31 @@ function makeParams(overrides?: Record) { }; } +function expectDefaultSandboxPreserved( + runCfg: + | { + agents?: { defaults?: { sandbox?: unknown } }; + } + | undefined, +) { + expect(runCfg?.agents?.defaults?.sandbox).toEqual({ + mode: "all", + workspaceAccess: "rw", + docker: { + network: "none", + dangerouslyAllowContainerNamespaceJoin: true, + dangerouslyAllowExternalBindSources: true, + }, + browser: { + enabled: true, + autoStart: false, + }, + prune: { + maxAgeDays: 7, + }, + }); +} + describe("runCronIsolatedAgentTurn sandbox config preserved", () => { let previousFastTestEnv: string | undefined; @@ -79,22 +104,7 @@ describe("runCronIsolatedAgentTurn sandbox config preserved", () => { expect(runWithModelFallbackMock).toHaveBeenCalledTimes(1); const runCfg = runWithModelFallbackMock.mock.calls[0]?.[0]?.cfg; - expect(runCfg?.agents?.defaults?.sandbox).toEqual({ - mode: "all", - workspaceAccess: "rw", - docker: { - network: "none", - dangerouslyAllowContainerNamespaceJoin: true, - dangerouslyAllowExternalBindSources: true, - }, - browser: { - enabled: true, - autoStart: false, - }, - prune: { - maxAgeDays: 7, - }, - }); + expectDefaultSandboxPreserved(runCfg); }); it("keeps global sandbox defaults when agent override is partial", async () => { @@ -118,22 +128,7 @@ describe("runCronIsolatedAgentTurn sandbox config preserved", () => { const runCfg = runWithModelFallbackMock.mock.calls[0]?.[0]?.cfg; const resolvedSandbox = resolveSandboxConfigForAgent(runCfg, "specialist"); - expect(runCfg?.agents?.defaults?.sandbox).toEqual({ - mode: "all", - workspaceAccess: "rw", - docker: { - network: "none", - dangerouslyAllowContainerNamespaceJoin: true, - dangerouslyAllowExternalBindSources: true, - }, - browser: { - enabled: true, - autoStart: false, - }, - prune: { - maxAgeDays: 7, - }, - }); + expectDefaultSandboxPreserved(runCfg); expect(resolvedSandbox.mode).toBe("all"); expect(resolvedSandbox.workspaceAccess).toBe("rw"); expect(resolvedSandbox.docker).toMatchObject({ diff --git a/src/cron/isolated-agent/run.test-harness.ts b/src/cron/isolated-agent/run.test-harness.ts index 74b5eed43e1..81e4c8b902b 100644 --- a/src/cron/isolated-agent/run.test-harness.ts +++ b/src/cron/isolated-agent/run.test-harness.ts @@ -341,6 +341,13 @@ function makeDefaultEmbeddedResult() { }; } +export function mockRunCronFallbackPassthrough(): void { + runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { + const result = await run(provider, model); + return { result, provider, model, attempts: [] }; + }); +} + export function resetRunCronIsolatedAgentTurnHarness(): void { vi.clearAllMocks(); diff --git a/src/cron/isolated-agent/subagent-followup.test.ts b/src/cron/isolated-agent/subagent-followup.test.ts index c670e4c8c13..7861c75ff35 100644 --- a/src/cron/isolated-agent/subagent-followup.test.ts +++ b/src/cron/isolated-agent/subagent-followup.test.ts @@ -33,6 +33,29 @@ async function resolveAfterAdvancingTimers(promise: Promise, advanceMs = 1 return promise; } +function createDescendantRun(params?: { + runId?: string; + childSessionKey?: string; + task?: string; + cleanup?: "keep" | "delete"; + endedAt?: number; + frozenResultText?: string | null; +}) { + return { + runId: params?.runId ?? "run-1", + childSessionKey: params?.childSessionKey ?? "child-1", + requesterSessionKey: "test-session", + requesterDisplayKey: "test-session", + task: params?.task ?? "task-1", + cleanup: params?.cleanup ?? "keep", + createdAt: 1000, + endedAt: params?.endedAt ?? 2000, + ...(params?.frozenResultText === undefined + ? {} + : { frozenResultText: params.frozenResultText }), + }; +} + describe("isLikelyInterimCronMessage", () => { it("detects 'on it' as interim", () => { expect(isLikelyInterimCronMessage("on it")).toBe(true); @@ -85,18 +108,7 @@ describe("readDescendantSubagentFallbackReply", () => { }); it("reads reply from child session transcript", async () => { - vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - }, - ]); + vi.mocked(listDescendantRunsForRequester).mockReturnValue([createDescendantRun()]); vi.mocked(readLatestAssistantReply).mockResolvedValue("child output text"); const result = await readDescendantSubagentFallbackReply({ sessionKey: "test-session", @@ -107,17 +119,10 @@ describe("readDescendantSubagentFallbackReply", () => { it("falls back to frozenResultText when session transcript unavailable", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", + createDescendantRun({ cleanup: "delete", - createdAt: 1000, - endedAt: 2000, frozenResultText: "frozen child output", - }, + }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue(undefined); const result = await readDescendantSubagentFallbackReply({ @@ -129,17 +134,7 @@ describe("readDescendantSubagentFallbackReply", () => { it("prefers session transcript over frozenResultText", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - frozenResultText: "frozen text", - }, + createDescendantRun({ frozenResultText: "frozen text" }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue("live transcript text"); const result = await readDescendantSubagentFallbackReply({ @@ -151,28 +146,14 @@ describe("readDescendantSubagentFallbackReply", () => { it("joins replies from multiple descendants", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - frozenResultText: "first child output", - }, - { + createDescendantRun({ frozenResultText: "first child output" }), + createDescendantRun({ runId: "run-2", childSessionKey: "child-2", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", task: "task-2", - cleanup: "keep", - createdAt: 1000, endedAt: 3000, frozenResultText: "second child output", - }, + }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue(undefined); const result = await readDescendantSubagentFallbackReply({ @@ -184,27 +165,14 @@ describe("readDescendantSubagentFallbackReply", () => { it("skips SILENT_REPLY_TOKEN descendants", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - }, - { + createDescendantRun(), + createDescendantRun({ runId: "run-2", childSessionKey: "child-2", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", task: "task-2", - cleanup: "keep", - createdAt: 1000, endedAt: 3000, frozenResultText: "useful output", - }, + }), ]); vi.mocked(readLatestAssistantReply).mockImplementation(async (params) => { if (params.sessionKey === "child-1") { @@ -221,17 +189,10 @@ describe("readDescendantSubagentFallbackReply", () => { it("returns undefined when frozenResultText is null", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", + createDescendantRun({ cleanup: "delete", - createdAt: 1000, - endedAt: 2000, frozenResultText: null, - }, + }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue(undefined); const result = await readDescendantSubagentFallbackReply({ diff --git a/src/cron/isolated-agent/subagent-followup.ts b/src/cron/isolated-agent/subagent-followup.ts index 9d6ec7e78ac..a337fe528b7 100644 --- a/src/cron/isolated-agent/subagent-followup.ts +++ b/src/cron/isolated-agent/subagent-followup.ts @@ -169,7 +169,7 @@ export async function waitForDescendantSubagentSummary(params: { // CRON_SUBAGENT_FINAL_REPLY_GRACE_MS) to capture that synthesis. const gracePeriodDeadline = Math.min(Date.now() + CRON_SUBAGENT_FINAL_REPLY_GRACE_MS, deadline); - while (Date.now() < gracePeriodDeadline) { + const resolveUsableLatestReply = async () => { const latest = (await readLatestAssistantReply({ sessionKey: params.sessionKey }))?.trim(); if ( latest && @@ -178,16 +178,20 @@ export async function waitForDescendantSubagentSummary(params: { ) { return latest; } + return undefined; + }; + + while (Date.now() < gracePeriodDeadline) { + const latest = await resolveUsableLatestReply(); + if (latest) { + return latest; + } await new Promise((resolve) => setTimeout(resolve, CRON_SUBAGENT_GRACE_POLL_MS)); } // Final read after grace period expires. - const latest = (await readLatestAssistantReply({ sessionKey: params.sessionKey }))?.trim(); - if ( - latest && - latest.toUpperCase() !== SILENT_REPLY_TOKEN.toUpperCase() && - (latest !== initialReply || !isLikelyInterimCronMessage(latest)) - ) { + const latest = await resolveUsableLatestReply(); + if (latest) { return latest; } diff --git a/src/cron/service.restart-catchup.test.ts b/src/cron/service.restart-catchup.test.ts index f0c9c3e4dc9..70da886b9a0 100644 --- a/src/cron/service.restart-catchup.test.ts +++ b/src/cron/service.restart-catchup.test.ts @@ -47,326 +47,274 @@ describe("CronService restart catch-up", () => { }; } - it("executes an overdue recurring job immediately on start", async () => { + async function withRestartedCron( + jobs: unknown[], + run: (params: { + cron: CronService; + enqueueSystemEvent: ReturnType; + requestHeartbeatNow: ReturnType; + }) => Promise, + ) { const store = await makeStorePath(); const enqueueSystemEvent = vi.fn(); const requestHeartbeatNow = vi.fn(); + await writeStoreJobs(store.storePath, jobs); + + const cron = createRestartCronService({ + storePath: store.storePath, + enqueueSystemEvent, + requestHeartbeatNow, + }); + + try { + await cron.start(); + await run({ cron, enqueueSystemEvent, requestHeartbeatNow }); + } finally { + cron.stop(); + await store.cleanup(); + } + } + + it("executes an overdue recurring job immediately on start", async () => { const dueAt = Date.parse("2025-12-13T15:00:00.000Z"); const lastRunAt = Date.parse("2025-12-12T15:00:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-overdue-job", - name: "daily digest", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"), - schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "digest now" }, - state: { - nextRunAtMs: dueAt, - lastRunAtMs: lastRunAt, - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-overdue-job", + name: "daily digest", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"), + schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "digest now" }, + state: { + nextRunAtMs: dueAt, + lastRunAtMs: lastRunAt, + lastStatus: "ok", + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "digest now", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-overdue-job"); + expect(updated?.state.lastStatus).toBe("ok"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); + expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z")); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "digest now", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-overdue-job"); - expect(updated?.state.lastStatus).toBe("ok"); - expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); - expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z")); - - cron.stop(); - await store.cleanup(); }); it("clears stale running markers without replaying interrupted startup jobs", async () => { - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - const dueAt = Date.parse("2025-12-13T16:00:00.000Z"); const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-stale-running", - name: "daily stale marker", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), - schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "resume stale marker" }, - state: { - nextRunAtMs: dueAt, - runningAtMs: staleRunningAt, + await withRestartedCron( + [ + { + id: "restart-stale-running", + name: "daily stale marker", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), + schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "resume stale marker" }, + state: { + nextRunAtMs: dueAt, + runningAtMs: staleRunningAt, + }, }, + ], + async ({ cron, enqueueSystemEvent }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(noopLogger.warn).toHaveBeenCalledWith( + expect.objectContaining({ jobId: "restart-stale-running" }), + "cron: clearing stale running marker on startup", + ); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-stale-running"); + expect(updated?.state.runningAtMs).toBeUndefined(); + expect(updated?.state.lastStatus).toBeUndefined(); + expect(updated?.state.lastRunAtMs).toBeUndefined(); + expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe( + true, + ); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(noopLogger.warn).toHaveBeenCalledWith( - expect.objectContaining({ jobId: "restart-stale-running" }), - "cron: clearing stale running marker on startup", ); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-stale-running"); - expect(updated?.state.runningAtMs).toBeUndefined(); - expect(updated?.state.lastStatus).toBeUndefined(); - expect(updated?.state.lastRunAtMs).toBeUndefined(); - expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe(true); - - cron.stop(); - await store.cleanup(); }); it("replays the most recent missed cron slot after restart when nextRunAtMs already advanced", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-missed-slot", - name: "every ten minutes +1", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "catch missed slot" }, - state: { - // Persisted state may already be recomputed from restart time and - // point to the future slot, even though 04:01 was missed. - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-missed-slot", + name: "every ten minutes +1", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "catch missed slot" }, + state: { + // Persisted state may already be recomputed from restart time and + // point to the future slot, even though 04:01 was missed. + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), + lastStatus: "ok", + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "catch missed slot", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-missed-slot"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z")); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "catch missed slot", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-missed-slot"); - expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z")); - - cron.stop(); - await store.cleanup(); }); it("does not replay interrupted one-shot jobs on startup", async () => { - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - const dueAt = Date.parse("2025-12-13T16:00:00.000Z"); const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-stale-one-shot", - name: "one shot stale marker", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), - schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "one-shot stale marker" }, - state: { - nextRunAtMs: dueAt, - runningAtMs: staleRunningAt, + await withRestartedCron( + [ + { + id: "restart-stale-one-shot", + name: "one shot stale marker", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), + schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "one-shot stale marker" }, + state: { + nextRunAtMs: dueAt, + runningAtMs: staleRunningAt, + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-stale-one-shot"); + expect(updated?.state.runningAtMs).toBeUndefined(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-stale-one-shot"); - expect(updated?.state.runningAtMs).toBeUndefined(); - - cron.stop(); - await store.cleanup(); + ); }); it("does not replay cron slot when the latest slot already ran before restart", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-no-duplicate-slot", - name: "every ten minutes +1 no duplicate", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "already ran" }, - state: { - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-no-duplicate-slot", + name: "every ten minutes +1 no duplicate", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "already ran" }, + state: { + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + lastStatus: "ok", + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - cron.stop(); - await store.cleanup(); + ); }); it("does not replay missed cron slots while error backoff is pending after restart", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-backoff-pending", - name: "backoff pending", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), - schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "do not run during backoff" }, - state: { - // Next retry is intentionally delayed by backoff despite a newer cron slot. - nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - lastStatus: "error", - consecutiveErrors: 4, + await withRestartedCron( + [ + { + id: "restart-backoff-pending", + name: "backoff pending", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), + schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "do not run during backoff" }, + state: { + // Next retry is intentionally delayed by backoff despite a newer cron slot. + nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + lastStatus: "error", + consecutiveErrors: 4, + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - - cron.stop(); - await store.cleanup(); + ); }); it("replays missed cron slot after restart when error backoff has already elapsed", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-backoff-elapsed-replay", - name: "backoff elapsed replay", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "replay after backoff elapsed" }, - state: { - // Startup maintenance may already point to a future slot (04:11) even - // though 04:01 was missed and the 30s error backoff has elapsed. - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), - lastStatus: "error", - consecutiveErrors: 1, + await withRestartedCron( + [ + { + id: "restart-backoff-elapsed-replay", + name: "backoff elapsed replay", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "replay after backoff elapsed" }, + state: { + // Startup maintenance may already point to a future slot (04:11) even + // though 04:01 was missed and the 30s error backoff has elapsed. + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), + lastStatus: "error", + consecutiveErrors: 1, + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "replay after backoff elapsed", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "replay after backoff elapsed", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - cron.stop(); - await store.cleanup(); }); it("reschedules deferred missed jobs from the post-catchup clock so they stay in the future", async () => { diff --git a/src/daemon/inspect.ts b/src/daemon/inspect.ts index 29ac8094ceb..c3025ae8b8a 100644 --- a/src/daemon/inspect.ts +++ b/src/daemon/inspect.ts @@ -7,6 +7,7 @@ import { resolveGatewaySystemdServiceName, resolveGatewayWindowsTaskName, } from "./constants.js"; +import { resolveHomeDir } from "./paths.js"; import { execSchtasks } from "./schtasks-exec.js"; export type ExtraGatewayService = { @@ -49,14 +50,6 @@ export function renderGatewayServiceCleanupHints( } } -function resolveHomeDir(env: Record): string { - const home = env.HOME?.trim() || env.USERPROFILE?.trim(); - if (!home) { - throw new Error("Missing HOME"); - } - return home; -} - type Marker = (typeof EXTRA_MARKERS)[number]; function detectMarker(content: string): Marker | null { diff --git a/src/daemon/launchd.test.ts b/src/daemon/launchd.test.ts index ba43715ba28..4c624cfeec1 100644 --- a/src/daemon/launchd.test.ts +++ b/src/daemon/launchd.test.ts @@ -31,6 +31,25 @@ const launchdRestartHandoffState = vi.hoisted(() => ({ })); const defaultProgramArguments = ["node", "-e", "process.exit(0)"]; +function expectLaunchctlEnableBootstrapOrder(env: Record) { + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const plistPath = resolveLaunchAgentPlistPath(env); + const serviceId = `${domain}/${label}`; + const enableIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "enable" && c[1] === serviceId, + ); + const bootstrapIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, + ); + + expect(enableIndex).toBeGreaterThanOrEqual(0); + expect(bootstrapIndex).toBeGreaterThanOrEqual(0); + expect(enableIndex).toBeLessThan(bootstrapIndex); + + return { domain, label, serviceId, bootstrapIndex }; +} + function normalizeLaunchctlArgs(file: string, args: string[]): string[] { if (file === "launchctl") { return args; @@ -219,25 +238,12 @@ describe("launchd bootstrap repair", () => { const repair = await repairLaunchAgentBootstrap({ env }); expect(repair.ok).toBe(true); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); + const { serviceId, bootstrapIndex } = expectLaunchctlEnableBootstrapOrder(env); const kickstartIndex = state.launchctlCalls.findIndex( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); expect(kickstartIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); expect(bootstrapIndex).toBeLessThan(kickstartIndex); }); }); @@ -258,23 +264,10 @@ describe("launchd install", () => { programArguments: defaultProgramArguments, }); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); const installKickstartIndex = state.launchctlCalls.findIndex( (c) => c[0] === "kickstart" && c[2] === serviceId, ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); expect(installKickstartIndex).toBe(-1); }); @@ -360,24 +353,13 @@ describe("launchd install", () => { stdout: new PassThrough(), }); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); const kickstartCalls = state.launchctlCalls.filter( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); expect(result).toEqual({ outcome: "completed" }); expect(kickstartCalls).toHaveLength(2); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); }); diff --git a/src/daemon/launchd.ts b/src/daemon/launchd.ts index 0e6d8610931..29d0933558c 100644 --- a/src/daemon/launchd.ts +++ b/src/daemon/launchd.ts @@ -120,6 +120,58 @@ function resolveGuiDomain(): string { return `gui/${process.getuid()}`; } +function throwBootstrapGuiSessionError(params: { + detail: string; + domain: string; + actionHint: string; +}) { + throw new Error( + [ + `launchctl bootstrap failed: ${params.detail}`, + `LaunchAgent ${params.actionHint} requires a logged-in macOS GUI session for this user (${params.domain}).`, + "This usually means you are running from SSH/headless context or as the wrong user (including sudo).", + `Fix: sign in to the macOS desktop as the target user and rerun \`${params.actionHint}\`.`, + "Headless deployments should use a dedicated logged-in user session or a custom LaunchDaemon (not shipped): https://docs.openclaw.ai/gateway", + ].join("\n"), + ); +} + +function writeLaunchAgentActionLine( + stdout: NodeJS.WritableStream, + label: string, + value: string, +): void { + try { + stdout.write(`${formatLine(label, value)}\n`); + } catch (err: unknown) { + if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { + throw err; + } + } +} + +async function bootstrapLaunchAgentOrThrow(params: { + domain: string; + serviceTarget: string; + plistPath: string; + actionHint: string; +}) { + await execLaunchctl(["enable", params.serviceTarget]); + const boot = await execLaunchctl(["bootstrap", params.domain, params.plistPath]); + if (boot.code === 0) { + return; + } + const detail = (boot.stderr || boot.stdout).trim(); + if (isUnsupportedGuiDomain(detail)) { + throwBootstrapGuiSessionError({ + detail, + domain: params.domain, + actionHint: params.actionHint, + }); + } + throw new Error(`launchctl bootstrap failed: ${detail}`); +} + async function ensureSecureDirectory(targetPath: string): Promise { await fs.mkdir(targetPath, { recursive: true, mode: LAUNCH_AGENT_DIR_MODE }); try { @@ -414,23 +466,12 @@ export async function installLaunchAgent({ await execLaunchctl(["bootout", domain, plistPath]); await execLaunchctl(["unload", plistPath]); // launchd can persist "disabled" state even after bootout + plist removal; clear it before bootstrap. - await execLaunchctl(["enable", `${domain}/${label}`]); - const boot = await execLaunchctl(["bootstrap", domain, plistPath]); - if (boot.code !== 0) { - const detail = (boot.stderr || boot.stdout).trim(); - if (isUnsupportedGuiDomain(detail)) { - throw new Error( - [ - `launchctl bootstrap failed: ${detail}`, - `LaunchAgent install requires a logged-in macOS GUI session for this user (${domain}).`, - "This usually means you are running from SSH/headless context or as the wrong user (including sudo).", - "Fix: sign in to the macOS desktop as the target user and rerun `openclaw gateway install --force`.", - "Headless deployments should use a dedicated logged-in user session or a custom LaunchDaemon (not shipped): https://docs.openclaw.ai/gateway", - ].join("\n"), - ); - } - throw new Error(`launchctl bootstrap failed: ${detail}`); - } + await bootstrapLaunchAgentOrThrow({ + domain, + serviceTarget: `${domain}/${label}`, + plistPath, + actionHint: "openclaw gateway install --force", + }); // `bootstrap` already loads RunAtLoad agents. Avoid `kickstart -k` here: // on slow macOS guests it SIGTERMs the freshly booted gateway and pushes the // real listener startup past onboarding's health deadline. @@ -469,25 +510,13 @@ export async function restartLaunchAgent({ if (!handoff.ok) { throw new Error(`launchd restart handoff failed: ${handoff.detail ?? "unknown error"}`); } - try { - stdout.write(`${formatLine("Scheduled LaunchAgent restart", serviceTarget)}\n`); - } catch (err: unknown) { - if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { - throw err; - } - } + writeLaunchAgentActionLine(stdout, "Scheduled LaunchAgent restart", serviceTarget); return { outcome: "scheduled" }; } const start = await execLaunchctl(["kickstart", "-k", serviceTarget]); if (start.code === 0) { - try { - stdout.write(`${formatLine("Restarted LaunchAgent", serviceTarget)}\n`); - } catch (err: unknown) { - if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { - throw err; - } - } + writeLaunchAgentActionLine(stdout, "Restarted LaunchAgent", serviceTarget); return { outcome: "completed" }; } @@ -496,34 +525,17 @@ export async function restartLaunchAgent({ } // If the service was previously booted out, re-register the plist and retry. - await execLaunchctl(["enable", serviceTarget]); - const boot = await execLaunchctl(["bootstrap", domain, plistPath]); - if (boot.code !== 0) { - const detail = (boot.stderr || boot.stdout).trim(); - if (isUnsupportedGuiDomain(detail)) { - throw new Error( - [ - `launchctl bootstrap failed: ${detail}`, - `LaunchAgent restart requires a logged-in macOS GUI session for this user (${domain}).`, - "This usually means you are running from SSH/headless context or as the wrong user (including sudo).", - "Fix: sign in to the macOS desktop as the target user and rerun `openclaw gateway restart`.", - "Headless deployments should use a dedicated logged-in user session or a custom LaunchDaemon (not shipped): https://docs.openclaw.ai/gateway", - ].join("\n"), - ); - } - throw new Error(`launchctl bootstrap failed: ${detail}`); - } + await bootstrapLaunchAgentOrThrow({ + domain, + serviceTarget, + plistPath, + actionHint: "openclaw gateway restart", + }); const retry = await execLaunchctl(["kickstart", "-k", serviceTarget]); if (retry.code !== 0) { throw new Error(`launchctl kickstart failed: ${retry.stderr || retry.stdout}`.trim()); } - try { - stdout.write(`${formatLine("Restarted LaunchAgent", serviceTarget)}\n`); - } catch (err: unknown) { - if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { - throw err; - } - } + writeLaunchAgentActionLine(stdout, "Restarted LaunchAgent", serviceTarget); return { outcome: "completed" }; } diff --git a/src/daemon/schtasks.startup-fallback.test.ts b/src/daemon/schtasks.startup-fallback.test.ts index 8b26a98e4ed..55e678052f3 100644 --- a/src/daemon/schtasks.startup-fallback.test.ts +++ b/src/daemon/schtasks.startup-fallback.test.ts @@ -1,37 +1,27 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { PassThrough } from "node:stream"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { quoteCmdScriptArg } from "./cmd-argv.js"; - -const schtasksResponses = vi.hoisted( - () => [] as Array<{ code: number; stdout: string; stderr: string }>, -); -const schtasksCalls = vi.hoisted(() => [] as string[][]); -const inspectPortUsage = vi.hoisted(() => vi.fn()); -const killProcessTree = vi.hoisted(() => vi.fn()); +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksResponses, + withWindowsEnv, + writeGatewayScript, +} from "./test-helpers/schtasks-fixtures.js"; const childUnref = vi.hoisted(() => vi.fn()); const spawn = vi.hoisted(() => vi.fn(() => ({ unref: childUnref }))); -vi.mock("./schtasks-exec.js", () => ({ - execSchtasks: async (argv: string[]) => { - schtasksCalls.push(argv); - return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; - }, -})); - -vi.mock("../infra/ports.js", () => ({ - inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args), -})); - -vi.mock("../process/kill-tree.js", () => ({ - killProcessTree: (...args: unknown[]) => killProcessTree(...args), -})); - -vi.mock("node:child_process", () => ({ - spawn, -})); +vi.mock("node:child_process", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn, + }; +}); const { installScheduledTask, @@ -39,6 +29,7 @@ const { readScheduledTaskRuntime, restartScheduledTask, resolveTaskScriptPath, + stopScheduledTask, } = await import("./schtasks.js"); function resolveStartupEntryPath(env: Record) { @@ -53,28 +44,40 @@ function resolveStartupEntryPath(env: Record) { ); } -async function withWindowsEnv( - run: (params: { tmpDir: string; env: Record }) => Promise, -) { - const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-win-startup-")); - const env = { - USERPROFILE: tmpDir, - APPDATA: path.join(tmpDir, "AppData", "Roaming"), - OPENCLAW_PROFILE: "default", - OPENCLAW_GATEWAY_PORT: "18789", - }; - try { - await run({ tmpDir, env }); - } finally { - await fs.rm(tmpDir, { recursive: true, force: true }); - } +async function writeStartupFallbackEntry(env: Record) { + const startupEntryPath = resolveStartupEntryPath(env); + await fs.mkdir(path.dirname(startupEntryPath), { recursive: true }); + await fs.writeFile(startupEntryPath, "@echo off\r\n", "utf8"); + return startupEntryPath; } +function expectStartupFallbackSpawn(env: Record) { + expect(spawn).toHaveBeenCalledWith( + "cmd.exe", + ["/d", "/s", "/c", quoteCmdScriptArg(resolveTaskScriptPath(env))], + expect.objectContaining({ detached: true, stdio: "ignore", windowsHide: true }), + ); +} + +function expectGatewayTermination(pid: number) { + if (process.platform === "win32") { + expect(killProcessTree).not.toHaveBeenCalled(); + return; + } + expect(killProcessTree).toHaveBeenCalledWith(pid, { graceMs: 300 }); +} + +function addStartupFallbackMissingResponses( + extraResponses: Array<{ code: number; stdout: string; stderr: string }> = [], +) { + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 1, stdout: "", stderr: "not found" }, + ...extraResponses, + ); +} beforeEach(() => { - schtasksResponses.length = 0; - schtasksCalls.length = 0; - inspectPortUsage.mockReset(); - killProcessTree.mockReset(); + resetSchtasksBaseMocks(); spawn.mockClear(); childUnref.mockClear(); }); @@ -85,7 +88,7 @@ afterEach(() => { describe("Windows startup fallback", () => { it("falls back to a Startup-folder launcher when schtasks create is denied", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 5, stdout: "", stderr: "ERROR: Access is denied." }, @@ -120,7 +123,7 @@ describe("Windows startup fallback", () => { }); it("falls back to a Startup-folder launcher when schtasks create hangs", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 124, stdout: "", stderr: "schtasks timed out after 15000ms" }, @@ -135,35 +138,23 @@ describe("Windows startup fallback", () => { }); await expect(fs.access(resolveStartupEntryPath(env))).resolves.toBeUndefined(); - expect(spawn).toHaveBeenCalledWith( - "cmd.exe", - ["/d", "/s", "/c", quoteCmdScriptArg(resolveTaskScriptPath(env))], - expect.objectContaining({ detached: true, stdio: "ignore", windowsHide: true }), - ); + expectStartupFallbackSpawn(env); }); }); it("treats an installed Startup-folder launcher as loaded", async () => { - await withWindowsEnv(async ({ env }) => { - schtasksResponses.push( - { code: 0, stdout: "", stderr: "" }, - { code: 1, stdout: "", stderr: "not found" }, - ); - await fs.mkdir(path.dirname(resolveStartupEntryPath(env)), { recursive: true }); - await fs.writeFile(resolveStartupEntryPath(env), "@echo off\r\n", "utf8"); + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + addStartupFallbackMissingResponses(); + await writeStartupFallbackEntry(env); await expect(isScheduledTaskInstalled({ env })).resolves.toBe(true); }); }); it("reports runtime from the gateway listener when using the Startup fallback", async () => { - await withWindowsEnv(async ({ env }) => { - schtasksResponses.push( - { code: 0, stdout: "", stderr: "" }, - { code: 1, stdout: "", stderr: "not found" }, - ); - await fs.mkdir(path.dirname(resolveStartupEntryPath(env)), { recursive: true }); - await fs.writeFile(resolveStartupEntryPath(env), "@echo off\r\n", "utf8"); + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + addStartupFallbackMissingResponses(); + await writeStartupFallbackEntry(env); inspectPortUsage.mockResolvedValue({ port: 18789, status: "busy", @@ -179,15 +170,12 @@ describe("Windows startup fallback", () => { }); it("restarts the Startup fallback by killing the current pid and relaunching the entry", async () => { - await withWindowsEnv(async ({ env }) => { - schtasksResponses.push( + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + addStartupFallbackMissingResponses([ { code: 0, stdout: "", stderr: "" }, { code: 1, stdout: "", stderr: "not found" }, - { code: 0, stdout: "", stderr: "" }, - { code: 1, stdout: "", stderr: "not found" }, - ); - await fs.mkdir(path.dirname(resolveStartupEntryPath(env)), { recursive: true }); - await fs.writeFile(resolveStartupEntryPath(env), "@echo off\r\n", "utf8"); + ]); + await writeStartupFallbackEntry(env); inspectPortUsage.mockResolvedValue({ port: 18789, status: "busy", @@ -199,12 +187,42 @@ describe("Windows startup fallback", () => { await expect(restartScheduledTask({ env, stdout })).resolves.toEqual({ outcome: "completed", }); - expect(killProcessTree).toHaveBeenCalledWith(5151, { graceMs: 300 }); - expect(spawn).toHaveBeenCalledWith( - "cmd.exe", - ["/d", "/s", "/c", quoteCmdScriptArg(resolveTaskScriptPath(env))], - expect.objectContaining({ detached: true, stdio: "ignore", windowsHide: true }), - ); + expectGatewayTermination(5151); + expectStartupFallbackSpawn(env); + }); + }); + + it("kills the Startup fallback runtime even when the CLI env omits the gateway port", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + schtasksResponses.push({ code: 0, stdout: "", stderr: "" }); + await writeGatewayScript(env); + await writeStartupFallbackEntry(env); + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + const envWithoutPort = { ...env }; + delete envWithoutPort.OPENCLAW_GATEWAY_PORT; + await stopScheduledTask({ env: envWithoutPort, stdout }); + + expectGatewayTermination(5151); }); }); }); diff --git a/src/daemon/schtasks.stop.test.ts b/src/daemon/schtasks.stop.test.ts new file mode 100644 index 00000000000..04e5f1fced1 --- /dev/null +++ b/src/daemon/schtasks.stop.test.ts @@ -0,0 +1,170 @@ +import { PassThrough } from "node:stream"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksCalls, + schtasksResponses, + withWindowsEnv, + writeGatewayScript, +} from "./test-helpers/schtasks-fixtures.js"; +const findVerifiedGatewayListenerPidsOnPortSync = vi.hoisted(() => + vi.fn<(port: number) => number[]>(() => []), +); + +vi.mock("../infra/gateway-processes.js", () => ({ + findVerifiedGatewayListenerPidsOnPortSync: (port: number) => + findVerifiedGatewayListenerPidsOnPortSync(port), +})); + +const { restartScheduledTask, stopScheduledTask } = await import("./schtasks.js"); +const GATEWAY_PORT = 18789; +const SUCCESS_RESPONSE = { code: 0, stdout: "", stderr: "" } as const; + +function pushSuccessfulSchtasksResponses(count: number) { + for (let i = 0; i < count; i += 1) { + schtasksResponses.push({ ...SUCCESS_RESPONSE }); + } +} + +function freePortUsage() { + return { + port: GATEWAY_PORT, + status: "free" as const, + listeners: [], + hints: [], + }; +} + +function busyPortUsage( + pid: number, + options: { + command?: string; + commandLine?: string; + } = {}, +) { + return { + port: GATEWAY_PORT, + status: "busy" as const, + listeners: [ + { + pid, + command: options.command ?? "node.exe", + ...(options.commandLine ? { commandLine: options.commandLine } : {}), + }, + ], + hints: [], + }; +} + +function expectGatewayTermination(pid: number) { + if (process.platform === "win32") { + expect(killProcessTree).not.toHaveBeenCalled(); + return; + } + expect(killProcessTree).toHaveBeenCalledWith(pid, { graceMs: 300 }); +} + +async function withPreparedGatewayTask( + run: (context: { env: Record; stdout: PassThrough }) => Promise, +) { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { + await writeGatewayScript(env, GATEWAY_PORT); + const stdout = new PassThrough(); + await run({ env, stdout }); + }); +} + +beforeEach(() => { + resetSchtasksBaseMocks(); + findVerifiedGatewayListenerPidsOnPortSync.mockReset(); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); + inspectPortUsage.mockResolvedValue(freePortUsage()); +}); + +afterEach(() => { + vi.restoreAllMocks(); +}); + +describe("Scheduled Task stop/restart cleanup", () => { + it("kills lingering verified gateway listeners after schtasks stop", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(3); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]); + inspectPortUsage + .mockResolvedValueOnce(busyPortUsage(4242)) + .mockResolvedValueOnce(freePortUsage()); + + await stopScheduledTask({ env, stdout }); + + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(GATEWAY_PORT); + expectGatewayTermination(4242); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + }); + }); + + it("force-kills remaining busy port listeners when the first stop pass does not free the port", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(3); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]); + inspectPortUsage.mockResolvedValueOnce(busyPortUsage(4242)); + for (let i = 0; i < 20; i += 1) { + inspectPortUsage.mockResolvedValueOnce(busyPortUsage(4242)); + } + inspectPortUsage + .mockResolvedValueOnce(busyPortUsage(5252)) + .mockResolvedValueOnce(freePortUsage()); + + await stopScheduledTask({ env, stdout }); + + if (process.platform !== "win32") { + expect(killProcessTree).toHaveBeenNthCalledWith(1, 4242, { graceMs: 300 }); + expect(killProcessTree).toHaveBeenNthCalledWith(2, expect.any(Number), { graceMs: 300 }); + } else { + expect(killProcessTree).not.toHaveBeenCalled(); + } + expect(inspectPortUsage.mock.calls.length).toBeGreaterThanOrEqual(22); + }); + }); + + it("falls back to inspected gateway listeners when sync verification misses on Windows", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(3); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); + inspectPortUsage + .mockResolvedValueOnce( + busyPortUsage(6262, { + commandLine: + '"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port 18789', + }), + ) + .mockResolvedValueOnce(freePortUsage()); + + await stopScheduledTask({ env, stdout }); + + expectGatewayTermination(6262); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + }); + }); + + it("kills lingering verified gateway listeners and waits for port release before restart", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(4); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([5151]); + inspectPortUsage + .mockResolvedValueOnce(busyPortUsage(5151)) + .mockResolvedValueOnce(freePortUsage()); + + await expect(restartScheduledTask({ env, stdout })).resolves.toEqual({ + outcome: "completed", + }); + + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(GATEWAY_PORT); + expectGatewayTermination(5151); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + expect(schtasksCalls.at(-1)).toEqual(["/Run", "/TN", "OpenClaw Gateway"]); + }); + }); +}); diff --git a/src/daemon/schtasks.ts b/src/daemon/schtasks.ts index 2c74cf26a61..2216e93bfd9 100644 --- a/src/daemon/schtasks.ts +++ b/src/daemon/schtasks.ts @@ -1,8 +1,11 @@ -import { spawn } from "node:child_process"; +import { spawn, spawnSync } from "node:child_process"; import fs from "node:fs/promises"; import path from "node:path"; +import { isGatewayArgv } from "../infra/gateway-process-argv.js"; +import { findVerifiedGatewayListenerPidsOnPortSync } from "../infra/gateway-processes.js"; import { inspectPortUsage } from "../infra/ports.js"; import { killProcessTree } from "../process/kill-tree.js"; +import { sleep } from "../utils.js"; import { parseCmdScriptCommandLine, quoteCmdScriptArg } from "./cmd-argv.js"; import { assertNoCmdLineBreak, parseCmdSetAssignment, renderCmdSetAssignment } from "./cmd-set.js"; import { resolveGatewayServiceDescription, resolveGatewayWindowsTaskName } from "./constants.js"; @@ -158,6 +161,12 @@ export type ScheduledTaskInfo = { lastRunResult?: string; }; +function hasListenerPid( + listener: T, +): listener is T & { pid: number } { + return typeof listener.pid === "number"; +} + export function parseSchtasksQuery(output: string): ScheduledTaskInfo { const entries = parseKeyValueOutput(output, ":"); const info: ScheduledTaskInfo = {}; @@ -311,8 +320,175 @@ function resolveConfiguredGatewayPort(env: GatewayServiceEnv): number | null { return Number.isFinite(parsed) && parsed > 0 ? parsed : null; } +function parsePositivePort(raw: string | undefined): number | null { + const value = raw?.trim(); + if (!value) { + return null; + } + if (!/^\d+$/.test(value)) { + return null; + } + const parsed = Number.parseInt(value, 10); + return Number.isFinite(parsed) && parsed > 0 && parsed <= 65535 ? parsed : null; +} + +function parsePortFromProgramArguments(programArguments?: string[]): number | null { + if (!programArguments?.length) { + return null; + } + for (let i = 0; i < programArguments.length; i += 1) { + const arg = programArguments[i]; + if (!arg) { + continue; + } + const inlineMatch = arg.match(/^--port=(\d+)$/); + if (inlineMatch) { + return parsePositivePort(inlineMatch[1]); + } + if (arg === "--port") { + return parsePositivePort(programArguments[i + 1]); + } + } + return null; +} + +async function resolveScheduledTaskPort(env: GatewayServiceEnv): Promise { + const command = await readScheduledTaskCommand(env).catch(() => null); + return ( + parsePortFromProgramArguments(command?.programArguments) ?? + parsePositivePort(command?.environment?.OPENCLAW_GATEWAY_PORT) ?? + resolveConfiguredGatewayPort(env) + ); +} + +async function resolveScheduledTaskGatewayListenerPids(port: number): Promise { + const verified = findVerifiedGatewayListenerPidsOnPortSync(port); + if (verified.length > 0) { + return verified; + } + + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status !== "busy") { + return []; + } + + const matchedGatewayPids = Array.from( + new Set( + diagnostics.listeners + .filter( + (listener) => + typeof listener.pid === "number" && + listener.commandLine && + isGatewayArgv(parseCmdScriptCommandLine(listener.commandLine), { + allowGatewayBinary: true, + }), + ) + .map((listener) => listener.pid as number), + ), + ); + if (matchedGatewayPids.length > 0) { + return matchedGatewayPids; + } + + return Array.from( + new Set( + diagnostics.listeners + .map((listener) => listener.pid) + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), + ), + ); +} + +async function terminateScheduledTaskGatewayListeners(env: GatewayServiceEnv): Promise { + const port = await resolveScheduledTaskPort(env); + if (!port) { + return []; + } + const pids = await resolveScheduledTaskGatewayListenerPids(port); + for (const pid of pids) { + await terminateGatewayProcessTree(pid, 300); + } + return pids; +} + +function isProcessAlive(pid: number): boolean { + try { + process.kill(pid, 0); + return true; + } catch { + return false; + } +} + +async function waitForProcessExit(pid: number, timeoutMs: number): Promise { + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + if (!isProcessAlive(pid)) { + return true; + } + await sleep(100); + } + return !isProcessAlive(pid); +} + +async function terminateGatewayProcessTree(pid: number, graceMs: number): Promise { + if (process.platform !== "win32") { + killProcessTree(pid, { graceMs }); + return; + } + const taskkillPath = path.join( + process.env.SystemRoot ?? "C:\\Windows", + "System32", + "taskkill.exe", + ); + spawnSync(taskkillPath, ["/T", "/PID", String(pid)], { + stdio: "ignore", + timeout: 5_000, + windowsHide: true, + }); + if (await waitForProcessExit(pid, graceMs)) { + return; + } + spawnSync(taskkillPath, ["/F", "/T", "/PID", String(pid)], { + stdio: "ignore", + timeout: 5_000, + windowsHide: true, + }); + await waitForProcessExit(pid, 5_000); +} + +async function waitForGatewayPortRelease(port: number, timeoutMs = 5_000): Promise { + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status === "free") { + return true; + } + await sleep(250); + } + return false; +} + +async function terminateBusyPortListeners(port: number): Promise { + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status !== "busy") { + return []; + } + const pids = Array.from( + new Set( + diagnostics.listeners + .map((listener) => listener.pid) + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), + ), + ); + for (const pid of pids) { + await terminateGatewayProcessTree(pid, 300); + } + return pids; +} + async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise { - const port = resolveConfiguredGatewayPort(env); + const port = (await resolveScheduledTaskPort(env)) ?? resolveConfiguredGatewayPort(env); if (!port) { return { status: "unknown", @@ -326,7 +502,7 @@ async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise typeof item.pid === "number"); + const listener = diagnostics.listeners.find(hasListenerPid); return { status: diagnostics.status === "busy" ? "running" : "stopped", ...(listener?.pid ? { pid: listener.pid } : {}), @@ -343,18 +519,28 @@ async function stopStartupEntry( ): Promise { const runtime = await resolveFallbackRuntime(env); if (typeof runtime.pid === "number" && runtime.pid > 0) { - killProcessTree(runtime.pid, { graceMs: 300 }); + await terminateGatewayProcessTree(runtime.pid, 300); } stdout.write(`${formatLine("Stopped Windows login item", resolveTaskName(env))}\n`); } +async function terminateInstalledStartupRuntime(env: GatewayServiceEnv): Promise { + if (!(await isStartupEntryInstalled(env))) { + return; + } + const runtime = await resolveFallbackRuntime(env); + if (typeof runtime.pid === "number" && runtime.pid > 0) { + await terminateGatewayProcessTree(runtime.pid, 300); + } +} + async function restartStartupEntry( env: GatewayServiceEnv, stdout: NodeJS.WritableStream, ): Promise { const runtime = await resolveFallbackRuntime(env); if (typeof runtime.pid === "number" && runtime.pid > 0) { - killProcessTree(runtime.pid, { graceMs: 300 }); + await terminateGatewayProcessTree(runtime.pid, 300); } launchFallbackTaskScript(resolveTaskScriptPath(env)); stdout.write(`${formatLine("Restarted Windows login item", resolveTaskName(env))}\n`); @@ -489,6 +675,19 @@ export async function stopScheduledTask({ stdout, env }: GatewayServiceControlAr if (res.code !== 0 && !isTaskNotRunning(res)) { throw new Error(`schtasks end failed: ${res.stderr || res.stdout}`.trim()); } + const stopPort = await resolveScheduledTaskPort(effectiveEnv); + await terminateScheduledTaskGatewayListeners(effectiveEnv); + await terminateInstalledStartupRuntime(effectiveEnv); + if (stopPort) { + const released = await waitForGatewayPortRelease(stopPort); + if (!released) { + await terminateBusyPortListeners(stopPort); + const releasedAfterForce = await waitForGatewayPortRelease(stopPort, 2_000); + if (!releasedAfterForce) { + throw new Error(`gateway port ${stopPort} is still busy after stop`); + } + } + } stdout.write(`${formatLine("Stopped Scheduled Task", taskName)}\n`); } @@ -512,6 +711,19 @@ export async function restartScheduledTask({ } const taskName = resolveTaskName(effectiveEnv); await execSchtasks(["/End", "/TN", taskName]); + const restartPort = await resolveScheduledTaskPort(effectiveEnv); + await terminateScheduledTaskGatewayListeners(effectiveEnv); + await terminateInstalledStartupRuntime(effectiveEnv); + if (restartPort) { + const released = await waitForGatewayPortRelease(restartPort); + if (!released) { + await terminateBusyPortListeners(restartPort); + const releasedAfterForce = await waitForGatewayPortRelease(restartPort, 2_000); + if (!releasedAfterForce) { + throw new Error(`gateway port ${restartPort} is still busy before restart`); + } + } + } const res = await execSchtasks(["/Run", "/TN", taskName]); if (res.code !== 0) { throw new Error(`schtasks run failed: ${res.stderr || res.stdout}`.trim()); diff --git a/src/daemon/service-audit.test.ts b/src/daemon/service-audit.test.ts index ffdd0fa526d..f7e87b6a518 100644 --- a/src/daemon/service-audit.test.ts +++ b/src/daemon/service-audit.test.ts @@ -6,6 +6,53 @@ import { } from "./service-audit.js"; import { buildMinimalServicePath } from "./service-env.js"; +function hasIssue( + audit: Awaited>, + code: (typeof SERVICE_AUDIT_CODES)[keyof typeof SERVICE_AUDIT_CODES], +) { + return audit.issues.some((issue) => issue.code === code); +} + +function createGatewayAudit({ + expectedGatewayToken, + path = "/usr/local/bin:/usr/bin:/bin", + serviceToken, + environmentValueSources, +}: { + expectedGatewayToken?: string; + path?: string; + serviceToken?: string; + environmentValueSources?: Record; +} = {}) { + return auditGatewayServiceConfig({ + env: { HOME: "/tmp" }, + platform: "linux", + expectedGatewayToken, + command: { + programArguments: ["/usr/bin/node", "gateway"], + environment: { + PATH: path, + ...(serviceToken ? { OPENCLAW_GATEWAY_TOKEN: serviceToken } : {}), + }, + ...(environmentValueSources ? { environmentValueSources } : {}), + }, + }); +} + +function expectTokenAudit( + audit: Awaited>, + { + embedded, + mismatch, + }: { + embedded: boolean; + mismatch: boolean; + }, +) { + expect(hasIssue(audit, SERVICE_AUDIT_CODES.gatewayTokenEmbedded)).toBe(embedded); + expect(hasIssue(audit, SERVICE_AUDIT_CODES.gatewayTokenMismatch)).toBe(mismatch); +} + describe("auditGatewayServiceConfig", () => { it("flags bun runtime", async () => { const audit = await auditGatewayServiceConfig({ @@ -66,89 +113,37 @@ describe("auditGatewayServiceConfig", () => { }); it("flags gateway token mismatch when service token is stale", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - OPENCLAW_GATEWAY_TOKEN: "old-token", - }, - }, + serviceToken: "old-token", }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(true); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(true); + expectTokenAudit(audit, { embedded: true, mismatch: true }); }); it("flags embedded service token even when it matches config token", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - OPENCLAW_GATEWAY_TOKEN: "new-token", - }, - }, + serviceToken: "new-token", }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(true); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(false); + expectTokenAudit(audit, { embedded: true, mismatch: false }); }); it("does not flag token issues when service token is not embedded", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - }, - }, }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(false); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(false); + expectTokenAudit(audit, { embedded: false, mismatch: false }); }); it("does not treat EnvironmentFile-backed tokens as embedded", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - OPENCLAW_GATEWAY_TOKEN: "old-token", - }, - environmentValueSources: { - OPENCLAW_GATEWAY_TOKEN: "file", - }, + serviceToken: "old-token", + environmentValueSources: { + OPENCLAW_GATEWAY_TOKEN: "file", }, }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(false); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(false); + expectTokenAudit(audit, { embedded: false, mismatch: false }); }); }); diff --git a/src/daemon/systemd.test.ts b/src/daemon/systemd.test.ts index 1d72adaaf43..0041107264a 100644 --- a/src/daemon/systemd.test.ts +++ b/src/daemon/systemd.test.ts @@ -25,6 +25,10 @@ type ExecFileError = Error & { code?: string | number; }; +const TEST_SERVICE_HOME = "/home/test"; +const TEST_MANAGED_HOME = "/tmp/openclaw-test-home"; +const GATEWAY_SERVICE = "openclaw-gateway.service"; + const createExecFileError = ( message: string, options: { stderr?: string; code?: string | number } = {}, @@ -58,6 +62,48 @@ function pathLikeToString(pathname: unknown): string { return ""; } +function assertUserSystemctlArgs(args: string[], ...command: string[]) { + expect(args).toEqual(["--user", ...command]); +} + +function assertMachineUserSystemctlArgs(args: string[], user: string, ...command: string[]) { + expect(args).toEqual(["--machine", `${user}@`, "--user", ...command]); +} + +async function readManagedServiceEnabled(env: NodeJS.ProcessEnv = { HOME: TEST_MANAGED_HOME }) { + const { isSystemdServiceEnabled } = await import("./systemd.js"); + vi.spyOn(fs, "access").mockResolvedValue(undefined); + return isSystemdServiceEnabled({ env }); +} + +function mockReadGatewayServiceFile( + unitLines: string[], + extraFiles: Record = {}, +) { + return vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { + const pathValue = pathLikeToString(pathname); + if (pathValue.endsWith(`/${GATEWAY_SERVICE}`)) { + return unitLines.join("\n"); + } + const extraFile = extraFiles[pathValue]; + if (typeof extraFile === "string") { + return extraFile; + } + if (extraFile instanceof Error) { + throw extraFile; + } + throw new Error(`unexpected readFile path: ${pathValue}`); + }); +} + +async function expectExecStartWithoutEnvironment(envFileLine: string) { + mockReadGatewayServiceFile(["[Service]", "ExecStart=/usr/bin/openclaw gateway run", envFileLine]); + + const command = await readSystemdServiceExecStart({ HOME: TEST_SERVICE_HOME }); + expect(command?.programArguments).toEqual(["/usr/bin/openclaw", "gateway", "run"]); + expect(command?.environment).toBeUndefined(); +} + const assertRestartSuccess = async (env: NodeJS.ProcessEnv) => { const { write, stdout } = createWritableStreamMock(); await restartSystemdService({ stdout, env }); @@ -118,24 +164,18 @@ describe("systemd availability", () => { }); describe("isSystemdServiceEnabled", () => { - const mockManagedUnitPresent = () => { - vi.spyOn(fs, "access").mockResolvedValue(undefined); - }; - beforeEach(() => { vi.restoreAllMocks(); execFileMock.mockReset(); }); it("returns false when systemctl is not present", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementation((_cmd, _args, _opts, cb) => { const err = new Error("spawn systemctl EACCES") as Error & { code?: string }; err.code = "EACCES"; cb(err, "", ""); }); - const result = await isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }); + const result = await readManagedServiceEnabled(); expect(result).toBe(false); }); @@ -152,55 +192,45 @@ describe("isSystemdServiceEnabled", () => { }); it("calls systemctl is-enabled when systemctl is present", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); cb(null, "enabled", ""); }); - const result = await isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }); + const result = await readManagedServiceEnabled(); expect(result).toBe(true); }); it("returns false when systemctl reports disabled", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, _args, _opts, cb) => { const err = new Error("disabled") as Error & { code?: number }; err.code = 1; cb(err, "disabled", ""); }); - const result = await isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }); + const result = await readManagedServiceEnabled(); expect(result).toBe(false); }); it("returns false for the WSL2 Ubuntu 24.04 wrapper-only is-enabled failure", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); const err = new Error( - "Command failed: systemctl --user is-enabled openclaw-gateway.service", + `Command failed: systemctl --user is-enabled ${GATEWAY_SERVICE}`, ) as Error & { code?: number }; err.code = 1; cb(err, "", ""); }); - await expect( - isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }), - ).rejects.toThrow( - "systemctl is-enabled unavailable: Command failed: systemctl --user is-enabled openclaw-gateway.service", + await expect(readManagedServiceEnabled()).rejects.toThrow( + `systemctl is-enabled unavailable: Command failed: systemctl --user is-enabled ${GATEWAY_SERVICE}`, ); }); it("returns false when is-enabled cannot connect to the user bus without machine fallback", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); vi.spyOn(os, "userInfo").mockImplementationOnce(() => { throw new Error("no user info"); }); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); cb( createExecFileError("Failed to connect to bus", { stderr: "Failed to connect to bus" }), "", @@ -209,18 +239,14 @@ describe("isSystemdServiceEnabled", () => { }); await expect( - isSystemdServiceEnabled({ - env: { HOME: "/tmp/openclaw-test-home", USER: "", LOGNAME: "" }, - }), + readManagedServiceEnabled({ HOME: TEST_MANAGED_HOME, USER: "", LOGNAME: "" }), ).rejects.toThrow("systemctl is-enabled unavailable: Failed to connect to bus"); }); it("returns false when both direct and machine-scope is-enabled checks report bus unavailability", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); cb( createExecFileError("Failed to connect to bus", { stderr: "Failed to connect to bus" }), "", @@ -228,13 +254,7 @@ describe("isSystemdServiceEnabled", () => { ); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual([ - "--machine", - "debian@", - "--user", - "is-enabled", - "openclaw-gateway.service", - ]); + assertMachineUserSystemctlArgs(args, "debian", "is-enabled", GATEWAY_SERVICE); cb( createExecFileError("Failed to connect to user scope bus via local transport", { stderr: @@ -246,32 +266,28 @@ describe("isSystemdServiceEnabled", () => { }); await expect( - isSystemdServiceEnabled({ - env: { HOME: "/tmp/openclaw-test-home", USER: "debian" }, - }), + readManagedServiceEnabled({ HOME: TEST_MANAGED_HOME, USER: "debian" }), ).rejects.toThrow("systemctl is-enabled unavailable: Failed to connect to user scope bus"); }); it("throws when generic wrapper errors report infrastructure failures", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); const err = new Error( - "Command failed: systemctl --user is-enabled openclaw-gateway.service", + `Command failed: systemctl --user is-enabled ${GATEWAY_SERVICE}`, ) as Error & { code?: number }; err.code = 1; cb(err, "", "read-only file system"); }); - await expect( - isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }), - ).rejects.toThrow("systemctl is-enabled unavailable: read-only file system"); + await expect(readManagedServiceEnabled()).rejects.toThrow( + "systemctl is-enabled unavailable: read-only file system", + ); }); it("throws when systemctl is-enabled fails for non-state errors", async () => { const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); + vi.spyOn(fs, "access").mockResolvedValue(undefined); execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); @@ -294,7 +310,7 @@ describe("isSystemdServiceEnabled", () => { it("returns false when systemctl is-enabled exits with code 4 (not-found)", async () => { const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); + vi.spyOn(fs, "access").mockResolvedValue(undefined); execFileMock.mockImplementationOnce((_cmd, _args, _opts, cb) => { // On Ubuntu 24.04, `systemctl --user is-enabled ` exits with // code 4 and prints "not-found" to stdout when the unit doesn't exist. @@ -463,82 +479,38 @@ describe("readSystemdServiceExecStart", () => { }); it("loads OPENCLAW_GATEWAY_TOKEN from EnvironmentFile", async () => { - const readFileSpy = vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=%h/.openclaw/.env", - ].join("\n"); - } - if (pathValue === "/home/test/.openclaw/.env") { - return "OPENCLAW_GATEWAY_TOKEN=env-file-token\n"; - } - throw new Error(`unexpected readFile path: ${pathValue}`); - }); + const readFileSpy = mockReadGatewayServiceFile( + ["[Service]", "ExecStart=/usr/bin/openclaw gateway run", "EnvironmentFile=%h/.openclaw/.env"], + { [`${TEST_SERVICE_HOME}/.openclaw/.env`]: "OPENCLAW_GATEWAY_TOKEN=env-file-token\n" }, + ); - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); + const command = await readSystemdServiceExecStart({ HOME: TEST_SERVICE_HOME }); expect(command?.environment?.OPENCLAW_GATEWAY_TOKEN).toBe("env-file-token"); expect(readFileSpy).toHaveBeenCalledTimes(2); }); it("lets EnvironmentFile override inline Environment values", async () => { - vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=%h/.openclaw/.env", - 'Environment="OPENCLAW_GATEWAY_TOKEN=inline-token"', - ].join("\n"); - } - if (pathValue === "/home/test/.openclaw/.env") { - return "OPENCLAW_GATEWAY_TOKEN=env-file-token\n"; - } - throw new Error(`unexpected readFile path: ${pathValue}`); - }); + mockReadGatewayServiceFile( + [ + "[Service]", + "ExecStart=/usr/bin/openclaw gateway run", + "EnvironmentFile=%h/.openclaw/.env", + 'Environment="OPENCLAW_GATEWAY_TOKEN=inline-token"', + ], + { [`${TEST_SERVICE_HOME}/.openclaw/.env`]: "OPENCLAW_GATEWAY_TOKEN=env-file-token\n" }, + ); - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); + const command = await readSystemdServiceExecStart({ HOME: TEST_SERVICE_HOME }); expect(command?.environment?.OPENCLAW_GATEWAY_TOKEN).toBe("env-file-token"); expect(command?.environmentValueSources?.OPENCLAW_GATEWAY_TOKEN).toBe("file"); }); it("ignores missing optional EnvironmentFile entries", async () => { - vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=-%h/.openclaw/missing.env", - ].join("\n"); - } - throw new Error(`missing: ${pathValue}`); - }); - - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); - expect(command?.programArguments).toEqual(["/usr/bin/openclaw", "gateway", "run"]); - expect(command?.environment).toBeUndefined(); + await expectExecStartWithoutEnvironment("EnvironmentFile=-%h/.openclaw/missing.env"); }); it("keeps parsing when non-optional EnvironmentFile entries are missing", async () => { - vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=%h/.openclaw/missing.env", - ].join("\n"); - } - throw new Error(`missing: ${pathValue}`); - }); - - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); - expect(command?.programArguments).toEqual(["/usr/bin/openclaw", "gateway", "run"]); - expect(command?.environment).toBeUndefined(); + await expectExecStartWithoutEnvironment("EnvironmentFile=%h/.openclaw/missing.env"); }); it("supports multiple EnvironmentFile entries and quoted paths", async () => { @@ -631,7 +603,7 @@ describe("readSystemdServiceExecStart", () => { describe("systemd service control", () => { const assertMachineRestartArgs = (args: string[]) => { - expect(args).toEqual(["--machine", "debian@", "--user", "restart", "openclaw-gateway.service"]); + assertMachineUserSystemctlArgs(args, "debian", "restart", GATEWAY_SERVICE); }; beforeEach(() => { @@ -642,7 +614,7 @@ describe("systemd service control", () => { execFileMock .mockImplementationOnce((_cmd, _args, _opts, cb) => cb(null, "", "")) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "stop", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "stop", GATEWAY_SERVICE); cb(null, "", ""); }); const write = vi.fn(); @@ -664,7 +636,7 @@ describe("systemd service control", () => { ), ) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "stop", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "stop", GATEWAY_SERVICE); cb(null, "", ""); }); @@ -678,7 +650,7 @@ describe("systemd service control", () => { execFileMock .mockImplementationOnce((_cmd, _args, _opts, cb) => cb(null, "", "")) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "restart", "openclaw-gateway-work.service"]); + assertUserSystemctlArgs(args, "restart", "openclaw-gateway-work.service"); cb(null, "", ""); }); await assertRestartSuccess({ OPENCLAW_PROFILE: "work" }); @@ -724,7 +696,7 @@ describe("systemd service control", () => { it("targets the sudo caller's user scope when SUDO_USER is set", async () => { execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--machine", "debian@", "--user", "status"]); + assertMachineUserSystemctlArgs(args, "debian", "status"); cb(null, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { @@ -737,11 +709,11 @@ describe("systemd service control", () => { it("keeps direct --user scope when SUDO_USER is root", async () => { execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "status"]); + assertUserSystemctlArgs(args, "status"); cb(null, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "restart", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "restart", GATEWAY_SERVICE); cb(null, "", ""); }); await assertRestartSuccess({ SUDO_USER: "root", USER: "root" }); @@ -750,7 +722,7 @@ describe("systemd service control", () => { it("falls back to machine user scope for restart when user bus env is missing", async () => { execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "status"]); + assertUserSystemctlArgs(args, "status"); const err = createExecFileError("Failed to connect to user scope bus", { stderr: "Failed to connect to user scope bus via local transport: $DBUS_SESSION_BUS_ADDRESS and $XDG_RUNTIME_DIR not defined", @@ -758,11 +730,11 @@ describe("systemd service control", () => { cb(err, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--machine", "debian@", "--user", "status"]); + assertMachineUserSystemctlArgs(args, "debian", "status"); cb(null, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "restart", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "restart", GATEWAY_SERVICE); const err = createExecFileError("Failed to connect to user scope bus", { stderr: "Failed to connect to user scope bus", }); diff --git a/src/daemon/test-helpers/schtasks-base-mocks.ts b/src/daemon/test-helpers/schtasks-base-mocks.ts new file mode 100644 index 00000000000..e3f0f950482 --- /dev/null +++ b/src/daemon/test-helpers/schtasks-base-mocks.ts @@ -0,0 +1,22 @@ +import { vi } from "vitest"; +import { + inspectPortUsage, + killProcessTree, + schtasksCalls, + schtasksResponses, +} from "./schtasks-fixtures.js"; + +vi.mock("../schtasks-exec.js", () => ({ + execSchtasks: async (argv: string[]) => { + schtasksCalls.push(argv); + return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; + }, +})); + +vi.mock("../../infra/ports.js", () => ({ + inspectPortUsage: (port: number) => inspectPortUsage(port), +})); + +vi.mock("../../process/kill-tree.js", () => ({ + killProcessTree: (pid: number, opts?: { graceMs?: number }) => killProcessTree(pid, opts), +})); diff --git a/src/daemon/test-helpers/schtasks-fixtures.ts b/src/daemon/test-helpers/schtasks-fixtures.ts new file mode 100644 index 00000000000..9755acefae7 --- /dev/null +++ b/src/daemon/test-helpers/schtasks-fixtures.ts @@ -0,0 +1,57 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { vi } from "vitest"; +import type { PortUsage } from "../../infra/ports-types.js"; +import type { killProcessTree as killProcessTreeImpl } from "../../process/kill-tree.js"; +import type { MockFn } from "../../test-utils/vitest-mock-fn.js"; +import { resolveTaskScriptPath } from "../schtasks.js"; + +export const schtasksResponses: Array<{ code: number; stdout: string; stderr: string }> = []; +export const schtasksCalls: string[][] = []; + +export const inspectPortUsage: MockFn<(port: number) => Promise> = vi.fn(); +export const killProcessTree: MockFn = vi.fn(); + +export async function withWindowsEnv( + prefix: string, + run: (params: { tmpDir: string; env: Record }) => Promise, +) { + const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const env = { + USERPROFILE: tmpDir, + APPDATA: path.join(tmpDir, "AppData", "Roaming"), + OPENCLAW_PROFILE: "default", + OPENCLAW_GATEWAY_PORT: "18789", + }; + try { + await run({ tmpDir, env }); + } finally { + await fs.rm(tmpDir, { recursive: true, force: true }); + } +} + +export function resetSchtasksBaseMocks() { + schtasksResponses.length = 0; + schtasksCalls.length = 0; + inspectPortUsage.mockReset(); + killProcessTree.mockReset(); +} + +export async function writeGatewayScript( + env: Record, + port = Number(env.OPENCLAW_GATEWAY_PORT || "18789"), +) { + const scriptPath = resolveTaskScriptPath(env); + await fs.mkdir(path.dirname(scriptPath), { recursive: true }); + await fs.writeFile( + scriptPath, + [ + "@echo off", + `set "OPENCLAW_GATEWAY_PORT=${port}"`, + `"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`, + "", + ].join("\r\n"), + "utf8", + ); +} diff --git a/src/discord/monitor.test.ts b/src/discord/monitor.test.ts index 9471a3fe6bc..d3289155699 100644 --- a/src/discord/monitor.test.ts +++ b/src/discord/monitor.test.ts @@ -247,6 +247,18 @@ describe("discord guild/channel resolution", () => { expect(resolved?.slug).toBe("friends-of-openclaw"); }); + it("resolves guild entry by raw guild id when guild object is missing", () => { + const guildEntries = makeEntries({ + "123": { slug: "friends-of-openclaw" }, + }); + const resolved = resolveDiscordGuildEntry({ + guildId: "123", + guildEntries, + }); + expect(resolved?.id).toBe("123"); + expect(resolved?.slug).toBe("friends-of-openclaw"); + }); + it("resolves guild entry by slug key", () => { const guildEntries = makeEntries({ "friends-of-openclaw": { slug: "friends-of-openclaw" }, diff --git a/src/discord/monitor/agent-components.ts b/src/discord/monitor/agent-components.ts index 56e7dfe3240..80239ea51d7 100644 --- a/src/discord/monitor/agent-components.ts +++ b/src/discord/monitor/agent-components.ts @@ -360,6 +360,7 @@ async function ensureAgentComponentInteractionAllowed(params: { }): Promise<{ parentId: string | undefined } | null> { const guildInfo = resolveDiscordGuildEntry({ guild: params.interaction.guild ?? undefined, + guildId: params.rawGuildId, guildEntries: params.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(params.interaction); @@ -1094,6 +1095,7 @@ async function handleDiscordComponentEvent(params: { const { channelId, user, replyOpts, rawGuildId, memberRoleIds } = interactionCtx; const guildInfo = resolveDiscordGuildEntry({ guild: params.interaction.guild ?? undefined, + guildId: rawGuildId, guildEntries: params.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(params.interaction); @@ -1246,6 +1248,7 @@ async function handleDiscordModalTrigger(params: { const { channelId, user, replyOpts, rawGuildId, memberRoleIds } = interactionCtx; const guildInfo = resolveDiscordGuildEntry({ guild: params.interaction.guild ?? undefined, + guildId: rawGuildId, guildEntries: params.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(params.interaction); @@ -1696,6 +1699,7 @@ class DiscordComponentModal extends Modal { const { channelId, user, replyOpts, rawGuildId, memberRoleIds } = interactionCtx; const guildInfo = resolveDiscordGuildEntry({ guild: interaction.guild ?? undefined, + guildId: rawGuildId, guildEntries: this.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(interaction); diff --git a/src/discord/monitor/allow-list.ts b/src/discord/monitor/allow-list.ts index 7c1250cb8ef..353ab8635be 100644 --- a/src/discord/monitor/allow-list.ts +++ b/src/discord/monitor/allow-list.ts @@ -19,34 +19,7 @@ export type DiscordAllowListMatch = AllowlistMatch<"wildcard" | "id" | "name" | const DISCORD_OWNER_ALLOWLIST_PREFIXES = ["discord:", "user:", "pk:"]; -export type DiscordGuildEntryResolved = { - id?: string; - slug?: string; - requireMention?: boolean; - ignoreOtherMentions?: boolean; - reactionNotifications?: "off" | "own" | "all" | "allowlist"; - users?: string[]; - roles?: string[]; - channels?: Record< - string, - { - allow?: boolean; - requireMention?: boolean; - ignoreOtherMentions?: boolean; - skills?: string[]; - enabled?: boolean; - users?: string[]; - roles?: string[]; - systemPrompt?: string; - includeThreadStarter?: boolean; - autoThread?: boolean; - autoArchiveDuration?: "60" | "1440" | "4320" | "10080" | 60 | 1440 | 4320 | 10080; - } - >; -}; - -export type DiscordChannelConfigResolved = { - allowed: boolean; +type DiscordChannelOverrideConfig = { requireMention?: boolean; ignoreOtherMentions?: boolean; skills?: string[]; @@ -57,6 +30,21 @@ export type DiscordChannelConfigResolved = { includeThreadStarter?: boolean; autoThread?: boolean; autoArchiveDuration?: "60" | "1440" | "4320" | "10080" | 60 | 1440 | 4320 | 10080; +}; + +export type DiscordGuildEntryResolved = { + id?: string; + slug?: string; + requireMention?: boolean; + ignoreOtherMentions?: boolean; + reactionNotifications?: "off" | "own" | "all" | "allowlist"; + users?: string[]; + roles?: string[]; + channels?: Record; +}; + +export type DiscordChannelConfigResolved = DiscordChannelOverrideConfig & { + allowed: boolean; matchKey?: string; matchSource?: ChannelMatchSource; }; @@ -103,6 +91,21 @@ export function normalizeDiscordSlug(value: string) { .replace(/^-+|-+$/g, ""); } +function resolveDiscordAllowListNameMatch( + list: DiscordAllowList, + candidate: { name?: string; tag?: string }, +): { matchKey: string; matchSource: "name" | "tag" } | null { + const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; + if (nameSlug && list.names.has(nameSlug)) { + return { matchKey: nameSlug, matchSource: "name" }; + } + const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : ""; + if (tagSlug && list.names.has(tagSlug)) { + return { matchKey: tagSlug, matchSource: "tag" }; + } + return null; +} + export function allowListMatches( list: DiscordAllowList, candidate: { id?: string; name?: string; tag?: string }, @@ -115,11 +118,7 @@ export function allowListMatches( return true; } if (params?.allowNameMatching === true) { - const slug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; - if (slug && list.names.has(slug)) { - return true; - } - if (candidate.tag && list.names.has(normalizeDiscordSlug(candidate.tag))) { + if (resolveDiscordAllowListNameMatch(list, candidate)) { return true; } } @@ -139,13 +138,9 @@ export function resolveDiscordAllowListMatch(params: { return { allowed: true, matchKey: candidate.id, matchSource: "id" }; } if (params.allowNameMatching === true) { - const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; - if (nameSlug && allowList.names.has(nameSlug)) { - return { allowed: true, matchKey: nameSlug, matchSource: "name" }; - } - const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : ""; - if (tagSlug && allowList.names.has(tagSlug)) { - return { allowed: true, matchKey: tagSlug, matchSource: "tag" }; + const namedMatch = resolveDiscordAllowListNameMatch(allowList, candidate); + if (namedMatch) { + return { allowed: true, ...namedMatch }; } } return { allowed: false }; @@ -326,25 +321,30 @@ export function resolveDiscordCommandAuthorized(params: { export function resolveDiscordGuildEntry(params: { guild?: Guild | Guild | null; + guildId?: string | null; guildEntries?: Record; }): DiscordGuildEntryResolved | null { const guild = params.guild; const entries = params.guildEntries; - if (!guild || !entries) { + const guildId = params.guildId?.trim() || guild?.id; + if (!entries) { return null; } - const byId = entries[guild.id]; + const byId = guildId ? entries[guildId] : undefined; if (byId) { - return { ...byId, id: guild.id }; + return { ...byId, id: guildId }; + } + if (!guild) { + return null; } const slug = normalizeDiscordSlug(guild.name ?? ""); const bySlug = entries[slug]; if (bySlug) { - return { ...bySlug, id: guild.id, slug: slug || bySlug.slug }; + return { ...bySlug, id: guildId ?? guild.id, slug: slug || bySlug.slug }; } const wildcard = entries["*"]; if (wildcard) { - return { ...wildcard, id: guild.id, slug: slug || wildcard.slug }; + return { ...wildcard, id: guildId ?? guild.id, slug: slug || wildcard.slug }; } return null; } diff --git a/src/discord/monitor/auto-presence.test.ts b/src/discord/monitor/auto-presence.test.ts index b5a83d5242d..d901a76d642 100644 --- a/src/discord/monitor/auto-presence.test.ts +++ b/src/discord/monitor/auto-presence.test.ts @@ -29,45 +29,33 @@ function createStore(params?: { }; } +function expectExhaustedDecision(params: { failureCounts: Record }) { + const now = Date.now(); + const decision = resolveDiscordAutoPresenceDecision({ + discordConfig: { + autoPresence: { + enabled: true, + exhaustedText: "token exhausted", + }, + }, + authStore: createStore({ cooldownUntil: now + 60_000, failureCounts: params.failureCounts }), + gatewayConnected: true, + now, + }); + + expect(decision).toBeTruthy(); + expect(decision?.state).toBe("exhausted"); + expect(decision?.presence.status).toBe("dnd"); + expect(decision?.presence.activities[0]?.state).toBe("token exhausted"); +} + describe("discord auto presence", () => { it("maps exhausted runtime signal to dnd", () => { - const now = Date.now(); - const decision = resolveDiscordAutoPresenceDecision({ - discordConfig: { - autoPresence: { - enabled: true, - exhaustedText: "token exhausted", - }, - }, - authStore: createStore({ cooldownUntil: now + 60_000, failureCounts: { rate_limit: 2 } }), - gatewayConnected: true, - now, - }); - - expect(decision).toBeTruthy(); - expect(decision?.state).toBe("exhausted"); - expect(decision?.presence.status).toBe("dnd"); - expect(decision?.presence.activities[0]?.state).toBe("token exhausted"); + expectExhaustedDecision({ failureCounts: { rate_limit: 2 } }); }); it("treats overloaded cooldown as exhausted", () => { - const now = Date.now(); - const decision = resolveDiscordAutoPresenceDecision({ - discordConfig: { - autoPresence: { - enabled: true, - exhaustedText: "token exhausted", - }, - }, - authStore: createStore({ cooldownUntil: now + 60_000, failureCounts: { overloaded: 2 } }), - gatewayConnected: true, - now, - }); - - expect(decision).toBeTruthy(); - expect(decision?.state).toBe("exhausted"); - expect(decision?.presence.status).toBe("dnd"); - expect(decision?.presence.activities[0]?.state).toBe("token exhausted"); + expectExhaustedDecision({ failureCounts: { overloaded: 2 } }); }); it("recovers from exhausted to online once a profile becomes usable", () => { diff --git a/src/discord/monitor/exec-approvals.test.ts b/src/discord/monitor/exec-approvals.test.ts index 8f9430393a2..c7cb72b82ec 100644 --- a/src/discord/monitor/exec-approvals.test.ts +++ b/src/discord/monitor/exec-approvals.test.ts @@ -116,6 +116,62 @@ function createHandler(config: DiscordExecApprovalConfig, accountId = "default") }); } +function mockSuccessfulDmDelivery(params?: { + noteChannelId?: string; + expectedNoteText?: string; + throwOnUnexpectedRoute?: boolean; +}) { + mockRestPost.mockImplementation( + async (route: string, requestParams?: { body?: { content?: string } }) => { + if (params?.noteChannelId && route === Routes.channelMessages(params.noteChannelId)) { + if (params.expectedNoteText) { + expect(requestParams?.body?.content).toContain(params.expectedNoteText); + } + return { id: "note-1", channel_id: params.noteChannelId }; + } + if (route === Routes.userChannels()) { + return { id: "dm-1" }; + } + if (route === Routes.channelMessages("dm-1")) { + return { id: "msg-1", channel_id: "dm-1" }; + } + if (params?.throwOnUnexpectedRoute) { + throw new Error(`unexpected route: ${route}`); + } + return { id: "msg-unknown" }; + }, + ); +} + +async function expectGatewayAuthStart(params: { + handler: DiscordExecApprovalHandler; + expectedUrl: string; + expectedSource: "cli" | "env"; + expectedToken?: string; + expectedPassword?: string; +}) { + await params.handler.start(); + + expect(mockResolveGatewayConnectionAuth).toHaveBeenCalledWith( + expect.objectContaining({ + env: process.env, + urlOverride: params.expectedUrl, + urlOverrideSource: params.expectedSource, + }), + ); + + const expectedClientParams: Record = { + url: params.expectedUrl, + }; + if (params.expectedToken !== undefined) { + expectedClientParams.token = params.expectedToken; + } + if (params.expectedPassword !== undefined) { + expectedClientParams.password = params.expectedPassword; + } + expect(mockGatewayClientCtor).toHaveBeenCalledWith(expect.objectContaining(expectedClientParams)); +} + type ExecApprovalHandlerInternals = { pending: Map< string, @@ -772,15 +828,7 @@ describe("DiscordExecApprovalHandler delivery routing", () => { }); const internals = getHandlerInternals(handler); - mockRestPost.mockImplementation(async (route: string) => { - if (route === Routes.userChannels()) { - return { id: "dm-1" }; - } - if (route === Routes.channelMessages("dm-1")) { - return { id: "msg-1", channel_id: "dm-1" }; - } - return { id: "msg-unknown" }; - }); + mockSuccessfulDmDelivery(); const request = createRequest({ sessionKey: "agent:main:discord:dm:123" }); await internals.handleApprovalRequested(request); @@ -809,21 +857,11 @@ describe("DiscordExecApprovalHandler delivery routing", () => { }); const internals = getHandlerInternals(handler); - mockRestPost.mockImplementation( - async (route: string, params?: { body?: { content?: string } }) => { - if (route === Routes.channelMessages("999888777")) { - expect(params?.body?.content).toContain("I sent the allowed approvers DMs"); - return { id: "note-1", channel_id: "999888777" }; - } - if (route === Routes.userChannels()) { - return { id: "dm-1" }; - } - if (route === Routes.channelMessages("dm-1")) { - return { id: "msg-1", channel_id: "dm-1" }; - } - throw new Error(`unexpected route: ${route}`); - }, - ); + mockSuccessfulDmDelivery({ + noteChannelId: "999888777", + expectedNoteText: "I sent the allowed approvers DMs", + throwOnUnexpectedRoute: true, + }); await internals.handleApprovalRequested(createRequest()); @@ -853,15 +891,7 @@ describe("DiscordExecApprovalHandler delivery routing", () => { }); const internals = getHandlerInternals(handler); - mockRestPost.mockImplementation(async (route: string) => { - if (route === Routes.userChannels()) { - return { id: "dm-1" }; - } - if (route === Routes.channelMessages("dm-1")) { - return { id: "msg-1", channel_id: "dm-1" }; - } - throw new Error(`unexpected route: ${route}`); - }); + mockSuccessfulDmDelivery({ throwOnUnexpectedRoute: true }); await internals.handleApprovalRequested( createRequest({ sessionKey: "agent:main:discord:dm:123" }), @@ -890,22 +920,13 @@ describe("DiscordExecApprovalHandler gateway auth resolution", () => { cfg: { session: { store: STORE_PATH } }, }); - await handler.start(); - - expect(mockResolveGatewayConnectionAuth).toHaveBeenCalledWith( - expect.objectContaining({ - env: process.env, - urlOverride: "wss://override.example/ws", - urlOverrideSource: "cli", - }), - ); - expect(mockGatewayClientCtor).toHaveBeenCalledWith( - expect.objectContaining({ - url: "wss://override.example/ws", - token: "resolved-token", - password: "resolved-password", // pragma: allowlist secret - }), - ); + await expectGatewayAuthStart({ + handler, + expectedUrl: "wss://override.example/ws", + expectedSource: "cli", + expectedToken: "resolved-token", + expectedPassword: "resolved-password", // pragma: allowlist secret + }); await handler.stop(); }); @@ -921,20 +942,11 @@ describe("DiscordExecApprovalHandler gateway auth resolution", () => { cfg: { session: { store: STORE_PATH } }, }); - await handler.start(); - - expect(mockResolveGatewayConnectionAuth).toHaveBeenCalledWith( - expect.objectContaining({ - env: process.env, - urlOverride: "wss://gateway-from-env.example/ws", - urlOverrideSource: "env", - }), - ); - expect(mockGatewayClientCtor).toHaveBeenCalledWith( - expect.objectContaining({ - url: "wss://gateway-from-env.example/ws", - }), - ); + await expectGatewayAuthStart({ + handler, + expectedUrl: "wss://gateway-from-env.example/ws", + expectedSource: "env", + }); await handler.stop(); } finally { diff --git a/src/discord/monitor/exec-approvals.ts b/src/discord/monitor/exec-approvals.ts index 87dc0c9a07d..8dd3156e991 100644 --- a/src/discord/monitor/exec-approvals.ts +++ b/src/discord/monitor/exec-approvals.ts @@ -252,17 +252,30 @@ function formatOptionalCommandPreview( return formatCommandPreview(commandText, maxChars); } +function resolveExecApprovalPreviews( + request: ExecApprovalRequest["request"], + maxChars: number, + secondaryMaxChars: number, +): { commandPreview: string; commandSecondaryPreview: string | null } { + const { commandText, commandPreview: secondaryPreview } = + resolveExecApprovalCommandDisplay(request); + return { + commandPreview: formatCommandPreview(commandText, maxChars), + commandSecondaryPreview: formatOptionalCommandPreview(secondaryPreview, secondaryMaxChars), + }; +} + function createExecApprovalRequestContainer(params: { request: ExecApprovalRequest; cfg: OpenClawConfig; accountId: string; actionRow?: Row