diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2761a7b0d3b..344cb400b85 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ on: concurrency: group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} + cancel-in-progress: true env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" @@ -38,9 +38,8 @@ jobs: id: check uses: ./.github/actions/detect-docs-changes - # Detect which heavy areas are touched so PRs can skip unrelated expensive jobs. - # Push to main keeps broad coverage, but this job still needs to run so - # downstream jobs that list it in `needs` are not skipped. + # Detect which heavy areas are touched so CI can skip unrelated expensive jobs. + # Fail-safe: if detection fails, downstream jobs run. changed-scope: needs: [docs-scope] if: needs.docs-scope.outputs.docs_only != 'true' @@ -82,7 +81,7 @@ jobs: # Build dist once for Node-relevant changes and share it with downstream jobs. build-artifacts: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -141,7 +140,7 @@ jobs: checks: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false @@ -149,6 +148,13 @@ jobs: include: - runtime: node task: test + shard_index: 1 + shard_count: 2 + command: pnpm canvas:a2ui:bundle && pnpm test + - runtime: node + task: test + shard_index: 2 + shard_count: 2 command: pnpm canvas:a2ui:bundle && pnpm test - runtime: node task: extensions @@ -179,11 +185,18 @@ jobs: - name: Configure Node test resources if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' + env: + SHARD_COUNT: ${{ matrix.shard_count || '' }} + SHARD_INDEX: ${{ matrix.shard_index || '' }} run: | # `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes. # Default heap limits have been too low on Linux CI (V8 OOM near 4GB). echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV" echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV" + if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then + echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV" + echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV" + fi - name: Run ${{ matrix.task }} (${{ matrix.runtime }}) if: matrix.runtime != 'bun' || github.event_name != 'push' @@ -193,7 +206,7 @@ jobs: check: name: "check" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -239,7 +252,7 @@ jobs: compat-node22: name: "compat-node22" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -272,7 +285,7 @@ jobs: skills-python: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -365,7 +378,7 @@ jobs: checks-windows: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true' runs-on: blacksmith-32vcpu-windows-2025 timeout-minutes: 45 env: @@ -727,7 +740,7 @@ jobs: android: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false diff --git a/.jscpd.json b/.jscpd.json new file mode 100644 index 00000000000..777b025b0c8 --- /dev/null +++ b/.jscpd.json @@ -0,0 +1,16 @@ +{ + "gitignore": true, + "noSymlinks": true, + "ignore": [ + "**/node_modules/**", + "**/dist/**", + "dist/**", + "**/.git/**", + "**/coverage/**", + "**/build/**", + "**/.build/**", + "**/.artifacts/**", + "docs/zh-CN/**", + "**/CHANGELOG.md" + ] +} diff --git a/AGENTS.md b/AGENTS.md index de30fb15068..f7c2f34ce39 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -202,6 +202,13 @@ - Vocabulary: "makeup" = "mac app". - Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested. +- Parallels macOS smoke playbook: + - `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`. + - Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed. + - Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`. + - For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green. + - Don’t run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially. + - Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading. - Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`. - When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`). - Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`. diff --git a/CHANGELOG.md b/CHANGELOG.md index f6cd4a438fa..6d602aa0761 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,11 @@ Docs: https://docs.openclaw.ai - Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus. - Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei. - iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman. +- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chrome’s own setup guides. ### Fixes +- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata. - Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang. - Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups. - Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding. @@ -22,6 +24,7 @@ Docs: https://docs.openclaw.ai - Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei. - Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference. - Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97. +- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart. - Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179. - Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus. - Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec. @@ -29,12 +32,14 @@ Docs: https://docs.openclaw.ai - Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh. - Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates. - Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path. +- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization. - Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark. - macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots. - Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello. - Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin. - Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss. - Telegram/native commands: preserve native command routing for `/fast` option-button callbacks even when text commands are disabled, so `/fast status|on|off` no longer falls through into normal model runs. Thanks @vincentkoc. +- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han. ## 2026.3.12 diff --git a/docs/ci.md b/docs/ci.md index 16a7e670964..fb4c4a252e5 100644 --- a/docs/ci.md +++ b/docs/ci.md @@ -9,32 +9,32 @@ read_when: # CI Pipeline -The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed. +The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed. ## Job Overview -| Job | Purpose | When it runs | -| ----------------- | ------------------------------------------------------- | ------------------------------------------------- | -| `docs-scope` | Detect docs-only changes | Always | -| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs | -| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes | -| `check-docs` | Markdown lint + broken link check | Docs changed | -| `code-analysis` | LOC threshold check (1000 lines) | PRs only | -| `secrets` | Detect leaked secrets | Always | -| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes | -| `release-check` | Validate npm pack contents | After build | -| `checks` | Node/Bun tests + protocol check | Non-docs, node changes | -| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | -| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | -| `android` | Gradle build + tests | Non-docs, android changes | +| Job | Purpose | When it runs | +| ----------------- | ------------------------------------------------------- | ---------------------------------- | +| `docs-scope` | Detect docs-only changes | Always | +| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes | +| `check` | TypeScript types, lint, format | Non-docs, node changes | +| `check-docs` | Markdown lint + broken link check | Docs changed | +| `code-analysis` | LOC threshold check (1000 lines) | PRs only | +| `secrets` | Detect leaked secrets | Always | +| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes | +| `release-check` | Validate npm pack contents | After build | +| `checks` | Node/Bun tests + protocol check | Non-docs, node changes | +| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | +| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | +| `android` | Gradle build + tests | Non-docs, android changes | ## Fail-Fast Order Jobs are ordered so cheap checks fail before expensive ones run: -1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min) -2. `build-artifacts` (blocked on above) -3. `checks`, `checks-windows`, `macos`, `android` (blocked on build) +1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first) +2. `build-artifacts` + `release-check` +3. `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android` Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`. diff --git a/docs/gateway/openresponses-http-api.md b/docs/gateway/openresponses-http-api.md index bcba166db9d..fa86f912ef5 100644 --- a/docs/gateway/openresponses-http-api.md +++ b/docs/gateway/openresponses-http-api.md @@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first. Under the hood, requests are executed as a normal Gateway agent run (same codepath as `openclaw agent`), so routing/permissions/config match your Gateway. -## Authentication +## Authentication, security, and routing -Uses the Gateway auth configuration. Send a bearer token: +Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api): -- `Authorization: Bearer ` +- use `Authorization: Bearer ` with the normal Gateway auth config +- treat the endpoint as full operator access for the gateway instance +- select agents with `model: "openclaw:"`, `model: "agent:"`, or `x-openclaw-agent-id` +- use `x-openclaw-session-key` for explicit session routing -Notes: - -- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`). -- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`). -- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`. - -## Security boundary (important) - -Treat this endpoint as a **full operator-access** surface for the gateway instance. - -- HTTP bearer auth here is not a narrow per-user scope model. -- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential. -- Requests run through the same control-plane agent path as trusted operator actions. -- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway. -- If the target agent policy allows sensitive tools, this endpoint can use them. -- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet. - -See [Security](/gateway/security) and [Remote access](/gateway/remote). - -## Choosing an agent - -No custom headers required: encode the agent id in the OpenResponses `model` field: - -- `model: "openclaw:"` (example: `"openclaw:main"`, `"openclaw:beta"`) -- `model: "agent:"` (alias) - -Or target a specific OpenClaw agent by header: - -- `x-openclaw-agent-id: ` (default: `main`) - -Advanced: - -- `x-openclaw-session-key: ` to fully control session routing. - -## Enabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `true`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: true }, - }, - }, - }, -} -``` - -## Disabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `false`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: false }, - }, - }, - }, -} -``` +Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`. ## Session behavior diff --git a/docs/install/docker-vm-runtime.md b/docs/install/docker-vm-runtime.md new file mode 100644 index 00000000000..77436f44486 --- /dev/null +++ b/docs/install/docker-vm-runtime.md @@ -0,0 +1,138 @@ +--- +summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts" +read_when: + - You are deploying OpenClaw on a cloud VM with Docker + - You need the shared binary bake, persistence, and update flow +title: "Docker VM Runtime" +--- + +# Docker VM Runtime + +Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers. + +## Bake required binaries into the image + +Installing binaries inside a running container is a trap. +Anything installed at runtime will be lost on restart. + +All external binaries required by skills must be installed at image build time. + +The examples below show three common binaries only: + +- `gog` for Gmail access +- `goplaces` for Google Places +- `wacli` for WhatsApp + +These are examples, not a complete list. +You may install as many binaries as needed using the same pattern. + +If you add new skills later that depend on additional binaries, you must: + +1. Update the Dockerfile +2. Rebuild the image +3. Restart the containers + +**Example Dockerfile** + +```dockerfile +FROM node:24-bookworm + +RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* + +# Example binary 1: Gmail CLI +RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog + +# Example binary 2: Google Places CLI +RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces + +# Example binary 3: WhatsApp CLI +RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli + +# Add more binaries below using the same pattern + +WORKDIR /app +COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ +COPY ui/package.json ./ui/package.json +COPY scripts ./scripts + +RUN corepack enable +RUN pnpm install --frozen-lockfile + +COPY . . +RUN pnpm build +RUN pnpm ui:install +RUN pnpm ui:build + +ENV NODE_ENV=production + +CMD ["node","dist/index.js"] +``` + +## Build and launch + +```bash +docker compose build +docker compose up -d openclaw-gateway +``` + +If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. +Use a larger machine class before retrying. + +Verify binaries: + +```bash +docker compose exec openclaw-gateway which gog +docker compose exec openclaw-gateway which goplaces +docker compose exec openclaw-gateway which wacli +``` + +Expected output: + +``` +/usr/local/bin/gog +/usr/local/bin/goplaces +/usr/local/bin/wacli +``` + +Verify Gateway: + +```bash +docker compose logs -f openclaw-gateway +``` + +Expected output: + +``` +[gateway] listening on ws://0.0.0.0:18789 +``` + +## What persists where + +OpenClaw runs in Docker, but Docker is not the source of truth. +All long-lived state must survive restarts, rebuilds, and reboots. + +| Component | Location | Persistence mechanism | Notes | +| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | +| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | +| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | +| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | +| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | +| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | +| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | +| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | +| Node runtime | Container filesystem | Docker image | Rebuilt every image build | +| OS packages | Container filesystem | Docker image | Do not install at runtime | +| Docker container | Ephemeral | Restartable | Safe to destroy | + +## Updates + +To update OpenClaw on the VM: + +```bash +git pull +docker compose build +docker compose up -d +``` diff --git a/docs/install/gcp.md b/docs/install/gcp.md index dfedfe4ba38..7ff4a00d087 100644 --- a/docs/install/gcp.md +++ b/docs/install/gcp.md @@ -281,77 +281,20 @@ services: --- -## 10) Bake required binaries into the image (critical) +## 10) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:24-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 11) Build and launch +## 11) GCP-specific launch notes -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. +On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing: @@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins If you changed the gateway port, replace `18789` with your configured port. -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 12) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - ---- - -## 13) Access from your laptop +## 12) Access from your laptop Create an SSH tunnel to forward the Gateway port: @@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list docker compose run --rm openclaw-cli devices approve ``` ---- - -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- - -## Updates - -To update OpenClaw on the VM: - -```bash -cd ~/openclaw -git pull -docker compose build -docker compose up -d -``` +Need the shared persistence and update reference again? +See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates). --- diff --git a/docs/install/hetzner.md b/docs/install/hetzner.md index 4c27840cee0..46bc76d6243 100644 --- a/docs/install/hetzner.md +++ b/docs/install/hetzner.md @@ -202,107 +202,20 @@ services: --- -## 7) Bake required binaries into the image (critical) +## 7) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:24-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 8) Build and launch +## 8) Hetzner-specific access -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 9) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - -From your laptop: +After the shared build and launch steps, tunnel from your laptop: ```bash ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP @@ -316,25 +229,7 @@ Paste your gateway token. --- -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- +The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where). ## Infrastructure as Code (Terraform) diff --git a/docs/plugins/voice-call.md b/docs/plugins/voice-call.md index 17263ca0509..14198fdba36 100644 --- a/docs/plugins/voice-call.md +++ b/docs/plugins/voice-call.md @@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set: } ``` +`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin +normalizes the provider-supplied `From` value and compares it to `allowFrom`. +Webhook verification authenticates provider delivery and payload integrity, but +it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as +caller-ID filtering, not strong caller identity. + Auto-responses use the agent system. Tune with: - `responseModel` diff --git a/docs/reference/wizard.md b/docs/reference/wizard.md index 60e88fe4226..bbaebbdc84f 100644 --- a/docs/reference/wizard.md +++ b/docs/reference/wizard.md @@ -167,93 +167,8 @@ openclaw onboard --non-interactive \ `--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts. - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice gemini-api-key \ - --gemini-api-key "$GEMINI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice zai-api-key \ - --zai-api-key "$ZAI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice ai-gateway-api-key \ - --ai-gateway-api-key "$AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice cloudflare-ai-gateway-api-key \ - --cloudflare-ai-gateway-account-id "your-account-id" \ - --cloudflare-ai-gateway-gateway-id "your-gateway-id" \ - --cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice moonshot-api-key \ - --moonshot-api-key "$MOONSHOT_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice synthetic-api-key \ - --synthetic-api-key "$SYNTHETIC_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice opencode-zen \ - --opencode-zen-api-key "$OPENCODE_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog. - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice ollama \ - --custom-model-id "qwen3.5:27b" \ - --accept-risk \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance. - - +Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples). +Use this reference page for flag semantics and step ordering. ### Add agent (non-interactive) diff --git a/docs/tools/browser.md b/docs/tools/browser.md index d632e713068..8a7abe93209 100644 --- a/docs/tools/browser.md +++ b/docs/tools/browser.md @@ -48,6 +48,8 @@ Gateway. - `openclaw`: managed, isolated browser (no extension required). - `chrome`: extension relay to your **system browser** (requires the OpenClaw extension to be attached to a tab). +- `existing-session`: official Chrome MCP attach flow for a running Chrome + profile. Set `browser.defaultProfile: "openclaw"` if you want managed mode by default. @@ -77,6 +79,12 @@ Browser settings live in `~/.openclaw/openclaw.json`. profiles: { openclaw: { cdpPort: 18800, color: "#FF4500" }, work: { cdpPort: 18801, color: "#0066CC" }, + chromeLive: { + cdpPort: 18802, + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }, remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" }, }, }, @@ -100,6 +108,8 @@ Notes: - Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay. - Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP. +- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do + not set `cdpUrl` for that driver. ## Use Brave (or another Chromium-based browser) @@ -264,11 +274,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be: - **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port - **remote**: an explicit CDP URL (Chromium-based browser running elsewhere) - **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension +- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect Defaults: - The `openclaw` profile is auto-created if missing. - The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default). +- Existing-session profiles are opt-in; create them with `--driver existing-session`. - Local CDP ports allocate from **18800–18899** by default. - Deleting a profile moves its local data directory to Trash. @@ -328,6 +340,66 @@ Notes: - This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions). - Detach by clicking the extension icon again. + +## Chrome existing-session via MCP + +OpenClaw can also attach to a running Chrome profile through the official +Chrome DevTools MCP server. This reuses the tabs and login state already open in +that Chrome profile. + +Official background and setup references: + +- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp) + +Create a profile: + +```bash +openclaw browser create-profile \ + --name chrome-live \ + --driver existing-session \ + --color "#00AA00" +``` + +Then in Chrome: + +1. Open `chrome://inspect/#remote-debugging` +2. Enable remote debugging +3. Keep Chrome running and approve the connection prompt when OpenClaw attaches + +Live attach smoke test: + +```bash +openclaw browser --browser-profile chrome-live start +openclaw browser --browser-profile chrome-live status +openclaw browser --browser-profile chrome-live tabs +openclaw browser --browser-profile chrome-live snapshot --format ai +``` + +What success looks like: + +- `status` shows `driver: existing-session` +- `status` shows `running: true` +- `tabs` lists your already-open Chrome tabs +- `snapshot` returns refs from the selected live tab + +What to check if attach does not work: + +- Chrome is version `144+` +- remote debugging is enabled at `chrome://inspect/#remote-debugging` +- Chrome showed and you accepted the attach consent prompt +- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect` + +Notes: + +- This path is higher-risk than the isolated `openclaw` profile because it can + act inside your signed-in browser session. +- OpenClaw does not launch Chrome for this driver; it attaches to an existing + session only. +- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not + the legacy default-profile remote debugging port workflow. +- Some features still require the extension relay or managed browser path, such + as PDF export and download interception. - Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated. WSL2 / cross-namespace example: diff --git a/docs/tools/chrome-extension.md b/docs/tools/chrome-extension.md index ce4b271ae9c..dcf2150409b 100644 --- a/docs/tools/chrome-extension.md +++ b/docs/tools/chrome-extension.md @@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs Attach/detach happens via a **single Chrome toolbar button**. +If you want Chrome’s official DevTools MCP attach flow instead of the OpenClaw +extension relay, use an `existing-session` browser profile instead. See +[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chrome’s own +setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your +browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp). + ## What it is (concept) There are three parts: diff --git a/extensions/acpx/src/ensure.test.ts b/extensions/acpx/src/ensure.test.ts index cae52f29f9b..c0bb5469b29 100644 --- a/extensions/acpx/src/ensure.test.ts +++ b/extensions/acpx/src/ensure.test.ts @@ -54,6 +54,49 @@ describe("acpx ensure", () => { } }); + function mockEnsureInstallFlow() { + spawnAndCollectMock + .mockResolvedValueOnce({ + stdout: "acpx 0.0.9\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: "added 1 package\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: `acpx ${ACPX_PINNED_VERSION}\n`, + stderr: "", + code: 0, + error: null, + }); + } + + function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) { + expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ + command: "npm", + args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + } + it("accepts the pinned acpx version", async () => { spawnAndCollectMock.mockResolvedValueOnce({ stdout: `acpx ${ACPX_PINNED_VERSION}\n`, @@ -177,25 +220,7 @@ describe("acpx ensure", () => { }); it("installs and verifies pinned acpx when precheck fails", async () => { - spawnAndCollectMock - .mockResolvedValueOnce({ - stdout: "acpx 0.0.9\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: "added 1 package\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: `acpx ${ACPX_PINNED_VERSION}\n`, - stderr: "", - code: 0, - error: null, - }); + mockEnsureInstallFlow(); await ensureAcpx({ command: "/plugin/node_modules/.bin/acpx", @@ -204,33 +229,11 @@ describe("acpx ensure", () => { }); expect(spawnAndCollectMock).toHaveBeenCalledTimes(3); - expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ - command: "npm", - args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], - cwd: "/plugin", - }); + expectEnsureInstallCalls(); }); it("threads stripProviderAuthEnvVars through version probes and install", async () => { - spawnAndCollectMock - .mockResolvedValueOnce({ - stdout: "acpx 0.0.9\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: "added 1 package\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: `acpx ${ACPX_PINNED_VERSION}\n`, - stderr: "", - code: 0, - error: null, - }); + mockEnsureInstallFlow(); await ensureAcpx({ command: "/plugin/node_modules/.bin/acpx", @@ -239,24 +242,7 @@ describe("acpx ensure", () => { stripProviderAuthEnvVars: true, }); - expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ - command: "/plugin/node_modules/.bin/acpx", - args: ["--version"], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); - expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ - command: "npm", - args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); - expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({ - command: "/plugin/node_modules/.bin/acpx", - args: ["--version"], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); + expectEnsureInstallCalls(true); }); it("fails with actionable error when npm install fails", async () => { diff --git a/extensions/mattermost/src/mattermost/interactions.test.ts b/extensions/mattermost/src/mattermost/interactions.test.ts index 3f52982cc52..62c7bdb757f 100644 --- a/extensions/mattermost/src/mattermost/interactions.test.ts +++ b/extensions/mattermost/src/mattermost/interactions.test.ts @@ -496,6 +496,104 @@ describe("createMattermostInteractionHandler", () => { return res as unknown as ServerResponse & { headers: Record; body: string }; } + function createActionContext(actionId = "approve", channelId = "chan-1") { + const context = { action_id: actionId, __openclaw_channel_id: channelId }; + return { context, token: generateInteractionToken(context, "acct") }; + } + + function createInteractionBody(params: { + context: Record; + token: string; + channelId?: string; + postId?: string; + userId?: string; + userName?: string; + }) { + return { + user_id: params.userId ?? "user-1", + ...(params.userName ? { user_name: params.userName } : {}), + channel_id: params.channelId ?? "chan-1", + post_id: params.postId ?? "post-1", + context: { ...params.context, _token: params.token }, + }; + } + + async function runHandler( + handler: ReturnType, + params: { + body: unknown; + remoteAddress?: string; + headers?: Record; + }, + ) { + const req = createReq({ + remoteAddress: params.remoteAddress, + headers: params.headers, + body: params.body, + }); + const res = createRes(); + await handler(req, res); + return res; + } + + function expectForbiddenResponse( + res: ServerResponse & { body: string }, + expectedMessage: string, + ) { + expect(res.statusCode).toBe(403); + expect(res.body).toContain(expectedMessage); + } + + function expectSuccessfulApprovalUpdate( + res: ServerResponse & { body: string }, + requestLog?: Array<{ path: string; method?: string }>, + ) { + expect(res.statusCode).toBe(200); + expect(res.body).toBe("{}"); + if (requestLog) { + expect(requestLog).toEqual([ + { path: "/posts/post-1", method: undefined }, + { path: "/posts/post-1", method: "PUT" }, + ]); + } + } + + function createActionPost(params?: { + actionId?: string; + actionName?: string; + channelId?: string; + rootId?: string; + }): MattermostPost { + return { + id: "post-1", + channel_id: params?.channelId ?? "chan-1", + ...(params?.rootId ? { root_id: params.rootId } : {}), + message: "Choose", + props: { + attachments: [ + { + actions: [ + { + id: params?.actionId ?? "approve", + name: params?.actionName ?? "Approve", + }, + ], + }, + ], + }, + }; + } + + function createUnusedInteractionHandler() { + return createMattermostInteractionHandler({ + client: { + request: async () => ({ message: "unused" }), + } as unknown as MattermostClient, + botUserId: "bot", + accountId: "acct", + }); + } + async function runApproveInteraction(params?: { actionName?: string; allowedSourceIps?: string[]; @@ -503,8 +601,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress?: string; headers?: Record; }) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const requestLog: Array<{ path: string; method?: string }> = []; const handler = createMattermostInteractionHandler({ client: { @@ -513,15 +610,7 @@ describe("createMattermostInteractionHandler", () => { if (init?.method === "PUT") { return { id: "post-1" }; } - return { - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [ - { actions: [{ id: "approve", name: params?.actionName ?? "Approve" }] }, - ], - }, - }; + return createActionPost({ actionName: params?.actionName }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -530,50 +619,27 @@ describe("createMattermostInteractionHandler", () => { trustedProxies: params?.trustedProxies, }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: params?.remoteAddress, headers: params?.headers, - body: { - user_id: "user-1", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token, userName: "alice" }), }); - const res = createRes(); - await handler(req, res); return { res, requestLog }; } async function runInvalidActionRequest(actionId: string) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: actionId, name: actionId }] }], - }, - }), + request: async () => createActionPost({ actionId, actionName: actionId }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + return await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - await handler(req, res); - return res; } it("accepts callback requests from an allowlisted source IP", async () => { @@ -582,12 +648,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress: "198.51.100.8", }); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expectSuccessfulApprovalUpdate(res, requestLog); }); it("accepts forwarded Mattermost source IPs from a trusted proxy", async () => { @@ -603,8 +664,7 @@ describe("createMattermostInteractionHandler", () => { }); it("rejects callback requests from non-allowlisted source IPs", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { request: async () => { @@ -616,33 +676,17 @@ describe("createMattermostInteractionHandler", () => { allowedSourceIps: ["127.0.0.1"], }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: "198.51.100.8", - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Forbidden origin"); + expectForbiddenResponse(res, "Forbidden origin"); }); it("rejects requests with an invalid interaction token", async () => { - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", - }); + const handler = createUnusedInteractionHandler(); - const req = createReq({ + const res = await runHandler(handler, { body: { user_id: "user-1", channel_id: "chan-1", @@ -650,72 +694,33 @@ describe("createMattermostInteractionHandler", () => { context: { action_id: "approve", _token: "deadbeef" }, }, }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Invalid token"); + expectForbiddenResponse(res, "Invalid token"); }); it("rejects requests when the signed channel does not match the callback payload", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", + const { context, token } = createActionContext(); + const handler = createUnusedInteractionHandler(); + + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, channelId: "chan-2" }), }); - - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-2", - post_id: "post-1", - context: { ...context, _token: token }, - }, - }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Channel mismatch"); + expectForbiddenResponse(res, "Channel mismatch"); }); it("rejects requests when the fetched post does not belong to the callback channel", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-9", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "approve", name: "Approve" }] }], - }, - }), + request: async () => createActionPost({ channelId: "chan-9" }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Post/channel mismatch"); + expectForbiddenResponse(res, "Post/channel mismatch"); }); it("rejects requests when the action is not present on the fetched post", async () => { @@ -730,12 +735,7 @@ describe("createMattermostInteractionHandler", () => { actionName: "approve", }); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expectSuccessfulApprovalUpdate(res, requestLog); }); it("forwards fetched post threading metadata to session and button callbacks", async () => { @@ -745,19 +745,10 @@ describe("createMattermostInteractionHandler", () => { enqueueSystemEvent, }, } as unknown as Parameters[0]); - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const resolveSessionKey = vi.fn().mockResolvedValue("session:thread:root-9"); const dispatchButtonClick = vi.fn(); - const fetchedPost: MattermostPost = { - id: "post-1", - channel_id: "chan-1", - root_id: "root-9", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "approve", name: "Approve" }] }], - }, - }; + const fetchedPost = createActionPost({ rootId: "root-9" }); const handler = createMattermostInteractionHandler({ client: { request: async (_path: string, init?: { method?: string }) => @@ -769,19 +760,9 @@ describe("createMattermostInteractionHandler", () => { dispatchButtonClick, }); - const req = createReq({ - body: { - user_id: "user-1", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, userName: "alice" }), }); - const res = createRes(); - - await handler(req, res); - expect(res.statusCode).toBe(200); expect(resolveSessionKey).toHaveBeenCalledWith({ channelId: "chan-1", @@ -803,8 +784,7 @@ describe("createMattermostInteractionHandler", () => { }); it("lets a custom interaction handler short-circuit generic completion updates", async () => { - const context = { action_id: "mdlprov", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext("mdlprov"); const requestLog: Array<{ path: string; method?: string }> = []; const handleInteraction = vi.fn().mockResolvedValue({ ephemeral_text: "Only the original requester can use this picker.", @@ -814,14 +794,10 @@ describe("createMattermostInteractionHandler", () => { client: { request: async (path: string, init?: { method?: string }) => { requestLog.push({ path, method: init?.method }); - return { - id: "post-1", - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "mdlprov", name: "Browse providers" }] }], - }, - }; + return createActionPost({ + actionId: "mdlprov", + actionName: "Browse providers", + }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -830,18 +806,14 @@ describe("createMattermostInteractionHandler", () => { dispatchButtonClick, }); - const req = createReq({ - body: { - user_id: "user-2", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ + context, + token, + userId: "user-2", + userName: "alice", + }), }); - const res = createRes(); - - await handler(req, res); expect(res.statusCode).toBe(200); expect(res.body).toBe( diff --git a/extensions/open-prose/skills/prose/alts/arabian-nights.md b/extensions/open-prose/skills/prose/alts/arabian-nights.md index cc0d146664e..c637c883bb6 100644 --- a/extensions/open-prose/skills/prose/alts/arabian-nights.md +++ b/extensions/open-prose/skills/prose/alts/arabian-nights.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from One Thousand and One Night | `prompt` | `command` | What is commanded of the djinn | | `model` | `spirit` | Which spirit answers | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Arabian Nights rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Nights -conjure "@alice/research" as research -wish topic: "What to investigate" - -djinn helper: - spirit: sonnet - -name findings = tale: helper - command: "Research {topic}" - -gift summary = tale "Summarize" - scroll: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Nights -bazaar: - security = tale "Check security" - perf = tale "Check performance" - style = tale "Check style" - -tale "Synthesize review" - scroll: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Nights -telling until **the code is bug-free** (max: 5): - tale "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Nights -venture: - tale "Risky operation" -should misfortune strike as err: - tale "Handle error" - scroll: err -and so it was: - tale "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Nights -crossroads **the severity level**: - path "Critical": - tale "Escalate immediately" - path "Minor": - tale "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `tale` +- `parallel` sample -> `bazaar` +- `loop` sample -> `telling` +- `try/catch/finally` sample -> `venture` / `should misfortune strike` / `and so it was` +- `choice` sample -> `crossroads` / `path` ```prose # Nights diff --git a/extensions/open-prose/skills/prose/alts/homer.md b/extensions/open-prose/skills/prose/alts/homer.md index bc27905cf78..716f2052e34 100644 --- a/extensions/open-prose/skills/prose/alts/homer.md +++ b/extensions/open-prose/skills/prose/alts/homer.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from Greek epic poetry—the Il | `prompt` | `charge` | The quest given | | `model` | `muse` | Which muse inspires | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Homeric rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Homeric -invoke "@alice/research" as research -omen topic: "What to investigate" - -hero helper: - muse: sonnet - -decree findings = trial: helper - charge: "Research {topic}" - -glory summary = trial "Summarize" - tidings: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Homeric -host: - security = trial "Check security" - perf = trial "Check performance" - style = trial "Check style" - -trial "Synthesize review" - tidings: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Homeric -ordeal until **the code is bug-free** (max: 5): - trial "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Homeric -venture: - trial "Risky operation" -should ruin come as err: - trial "Handle error" - tidings: err -in the end: - trial "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Homeric -crossroads **the severity level**: - path "Critical": - trial "Escalate immediately" - path "Minor": - trial "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `trial` +- `parallel` sample -> `host` +- `loop` sample -> `ordeal` +- `try/catch/finally` sample -> `venture` / `should ruin come` / `in the end` +- `choice` sample -> `crossroads` / `path` ```prose # Homeric diff --git a/extensions/open-prose/skills/prose/alts/shared-appendix.md b/extensions/open-prose/skills/prose/alts/shared-appendix.md new file mode 100644 index 00000000000..32a4fcbcd17 --- /dev/null +++ b/extensions/open-prose/skills/prose/alts/shared-appendix.md @@ -0,0 +1,35 @@ +--- +role: reference +summary: Shared appendix for experimental OpenProse alternate registers. +status: draft +requires: prose.md +--- + +# OpenProse Alternate Register Appendix + +Use this appendix with experimental register files such as `arabian-nights.md` and `homer.md`. + +## Unchanged keywords + +These keywords already work or are too functional to replace sensibly: + +- `**...**` discretion markers +- `until`, `while` +- `map`, `filter`, `reduce`, `pmap` +- `max` +- `as` +- model names such as `sonnet`, `opus`, and `haiku` + +## Comparison pattern + +Use the translation map in each register file to rewrite the same functional sample programs: + +- simple program +- parallel execution +- loop with condition +- error handling +- choice block +- conditionals + +The goal is consistency, not one canonical wording. +Keep the functional version intact and rewrite only the register-specific aliases. diff --git a/extensions/open-prose/skills/prose/state/sqlite.md b/extensions/open-prose/skills/prose/state/sqlite.md index cfec757567c..352a8705cd5 100644 --- a/extensions/open-prose/skills/prose/state/sqlite.md +++ b/extensions/open-prose/skills/prose/state/sqlite.md @@ -87,71 +87,28 @@ The `agents` and `agent_segments` tables for project-scoped agents live in `.pro ## Responsibility Separation -This section defines **who does what**. This is the contract between the VM and subagents. +The VM/subagent contract matches [postgres.md](./postgres.md#responsibility-separation). -### VM Responsibilities +SQLite-specific differences: -The VM (the orchestrating agent running the .prose program) is responsible for: +- the VM creates `state.db` instead of an `openprose` schema +- subagent confirmation messages point at a local database path, for example `.prose/runs//state.db` +- cleanup is typically `VACUUM` or file deletion rather than dropping schema objects -| Responsibility | Description | -| ------------------------- | -------------------------------------------------------------------------------------------------------- | -| **Database creation** | Create `state.db` and initialize core tables at run start | -| **Program registration** | Store the program source and metadata | -| **Execution tracking** | Update position, status, and timing as statements execute | -| **Subagent spawning** | Spawn sessions via Task tool with database path and instructions | -| **Parallel coordination** | Track branch status, implement join strategies | -| **Loop management** | Track iteration counts, evaluate conditions | -| **Error aggregation** | Record failures, manage retry state | -| **Context preservation** | Maintain sufficient narration in the main conversation thread so execution can be understood and resumed | -| **Completion detection** | Mark the run as complete when finished | +Example return values: -**Critical:** The VM must preserve enough context in its own conversation to understand execution state without re-reading the entire database. The database is for coordination and persistence, not a replacement for working memory. - -### Subagent Responsibilities - -Subagents (sessions spawned by the VM) are responsible for: - -| Responsibility | Description | -| ----------------------- | ----------------------------------------------------------------- | -| **Writing own outputs** | Insert/update their binding in the `bindings` table | -| **Memory management** | For persistent agents: read and update their memory record | -| **Segment recording** | For persistent agents: append segment history | -| **Attachment handling** | Write large outputs to `attachments/` directory, store path in DB | -| **Atomic writes** | Use transactions when updating multiple related records | - -**Critical:** Subagents write ONLY to `bindings`, `agents`, and `agent_segments` tables. The VM owns the `execution` table entirely. Completion signaling happens through the substrate (Task tool return), not database updates. - -**Critical:** Subagents must write their outputs directly to the database. The VM does not write subagent outputs—it only reads them after the subagent completes. - -**What subagents return to the VM:** A confirmation message with the binding location—not the full content: - -**Root scope:** - -``` +```text Binding written: research Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='research', execution_id=NULL) -Summary: AI safety research covering alignment, robustness, and interpretability with 15 citations. ``` -**Inside block invocation:** - -``` +```text Binding written: result Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='result', execution_id=43) Execution ID: 43 -Summary: Processed chunk into 3 sub-parts for recursive processing. ``` -The VM tracks locations, not values. This keeps the VM's context lean and enables arbitrarily large intermediate values. - -### Shared Concerns - -| Concern | Who Handles | -| ---------------- | ------------------------------------------------------------------ | -| Schema evolution | Either (use `CREATE TABLE IF NOT EXISTS`, `ALTER TABLE` as needed) | -| Custom tables | Either (prefix with `x_` for extensions) | -| Indexing | Either (add indexes for frequently-queried columns) | -| Cleanup | VM (at run end, optionally vacuum) | +The VM still tracks locations, not full values. --- diff --git a/extensions/voice-call/README.md b/extensions/voice-call/README.md index 9acc9aec987..fe228537ee8 100644 --- a/extensions/voice-call/README.md +++ b/extensions/voice-call/README.md @@ -89,56 +89,18 @@ Notes: - Twilio/Telnyx/Plivo require a **publicly reachable** webhook URL. - `mock` is a local dev provider (no network calls). - Telnyx requires `telnyx.publicKey` (or `TELNYX_PUBLIC_KEY`) unless `skipSignatureVerification` is true. -- `tunnel.allowNgrokFreeTierLoopbackBypass: true` allows Twilio webhooks with invalid signatures **only** when `tunnel.provider="ngrok"` and `serve.bind` is loopback (ngrok local agent). Use for local dev only. - -Streaming security defaults: - -- `streaming.preStartTimeoutMs` closes sockets that never send a valid `start` frame. -- `streaming.maxPendingConnections` caps total unauthenticated pre-start sockets. -- `streaming.maxPendingConnectionsPerIp` caps unauthenticated pre-start sockets per source IP. -- `streaming.maxConnections` caps total open media stream sockets (pending + active). +- advanced webhook, streaming, and tunnel notes: `https://docs.openclaw.ai/plugins/voice-call` ## Stale call reaper -Use `staleCallReaperSeconds` to end calls that never receive a terminal webhook -(for example, notify-mode calls that never complete). The default is `0` -(disabled). - -Recommended ranges: - -- **Production:** `120`–`300` seconds for notify-style flows. -- Keep this value **higher than `maxDurationSeconds`** so normal calls can - finish. A good starting point is `maxDurationSeconds + 30–60` seconds. - -Example: - -```json5 -{ - staleCallReaperSeconds: 360, -} -``` +See the plugin docs for recommended ranges and production examples: +`https://docs.openclaw.ai/plugins/voice-call#stale-call-reaper` ## TTS for calls Voice Call uses the core `messages.tts` configuration (OpenAI or ElevenLabs) for -streaming speech on calls. You can override it under the plugin config with the -same shape — overrides deep-merge with `messages.tts`. - -```json5 -{ - tts: { - provider: "openai", - openai: { - voice: "alloy", - }, - }, -} -``` - -Notes: - -- Edge TTS is ignored for voice calls (telephony audio needs PCM; Edge output is unreliable). -- Core TTS is used when Twilio media streaming is enabled; otherwise calls fall back to provider native voices. +streaming speech on calls. Override examples and provider caveats live here: +`https://docs.openclaw.ai/plugins/voice-call#tts-for-calls` ## CLI diff --git a/extensions/zalouser/src/monitor.group-gating.test.ts b/extensions/zalouser/src/monitor.group-gating.test.ts index f6723cad3d7..ca42edde43a 100644 --- a/extensions/zalouser/src/monitor.group-gating.test.ts +++ b/extensions/zalouser/src/monitor.group-gating.test.ts @@ -187,6 +187,31 @@ function installRuntime(params: { }; } +function installGroupCommandAuthRuntime() { + return installRuntime({ + resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => + useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), + }); +} + +async function processGroupControlCommand(params: { + account: ResolvedZalouserAccount; + content?: string; + commandContent?: string; +}) { + await __testing.processMessage({ + message: createGroupMessage({ + content: params.content ?? "/new", + commandContent: params.commandContent ?? "/new", + hasAnyMention: true, + wasExplicitlyMentioned: true, + }), + account: params.account, + config: createConfig(), + runtime: createRuntimeEnv(), + }); +} + function createGroupMessage(overrides: Partial = {}): ZaloInboundMessage { return { threadId: "g-1", @@ -229,57 +254,152 @@ describe("zalouser monitor group mention gating", () => { sendSeenZalouserMock.mockClear(); }); - it("skips unmentioned group messages when requireMention=true", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, - }); + async function processMessageWithDefaults(params: { + message: ZaloInboundMessage; + account?: ResolvedZalouserAccount; + historyState?: { + historyLimit: number; + groupHistories: Map< + string, + Array<{ sender: string; body: string; timestamp?: number; messageId?: string }> + >; + }; + }) { await __testing.processMessage({ - message: createGroupMessage(), - account: createAccount(), + message: params.message, + account: params.account ?? createAccount(), config: createConfig(), runtime: createRuntimeEnv(), + historyState: params.historyState, }); + } - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); - expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); - - it("fails closed when requireMention=true but mention detection is unavailable", async () => { + async function expectSkippedGroupMessage(message?: Partial) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ - message: createGroupMessage({ - canResolveExplicitMention: false, - hasAnyMention: false, - wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + await processMessageWithDefaults({ + message: createGroupMessage(message), }); - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); + } - it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { + async function expectGroupCommandAuthorizers(params: { + accountConfig: ResolvedZalouserAccount["config"]; + expectedAuthorizers: Array<{ configured: boolean; allowed: boolean }>; + }) { + const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = + installGroupCommandAuthRuntime(); + await processGroupControlCommand({ + account: { + ...createAccount(), + config: params.accountConfig, + }, + }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; + expect(authCall?.authorizers).toEqual(params.expectedAuthorizers); + } + + async function processOpenDmMessage(params?: { + message?: Partial; + readSessionUpdatedAt?: (input?: { + storePath: string; + sessionKey: string; + }) => number | undefined; + }) { + const runtime = installRuntime({ + commandAuthorized: false, + }); + if (params?.readSessionUpdatedAt) { + runtime.readSessionUpdatedAt.mockImplementation(params.readSessionUpdatedAt); + } + const account = createAccount(); + await processMessageWithDefaults({ + message: createDmMessage(params?.message), + account: { + ...account, + config: { + ...account.config, + dmPolicy: "open", + }, + }, + }); + return runtime; + } + + async function expectDangerousNameMatching(params: { + dangerouslyAllowNameMatching?: boolean; + expectedDispatches: number; + }) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ + await processMessageWithDefaults({ message: createGroupMessage({ + threadId: "g-attacker-001", + groupName: "Trusted Team", + senderId: "666", hasAnyMention: true, wasExplicitlyMentioned: true, content: "ping @bot", }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + account: { + ...createAccount(), + config: { + ...createAccount().config, + ...(params.dangerouslyAllowNameMatching ? { dangerouslyAllowNameMatching: true } : {}), + groupPolicy: "allowlist", + groupAllowFrom: ["*"], + groups: { + "group:g-trusted-001": { allow: true }, + "Trusted Team": { allow: true }, + }, + }, + }, }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes( + params.expectedDispatches, + ); + return dispatchReplyWithBufferedBlockDispatcher; + } + async function dispatchGroupMessage(params: { + commandAuthorized: boolean; + message: Partial; + }) { + const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + commandAuthorized: params.commandAuthorized, + }); + await processMessageWithDefaults({ + message: createGroupMessage(params.message), + }); expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + return dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + } + + it("skips unmentioned group messages when requireMention=true", async () => { + await expectSkippedGroupMessage(); + }); + + it("fails closed when requireMention=true but mention detection is unavailable", async () => { + await expectSkippedGroupMessage({ + canResolveExplicitMention: false, + hasAnyMention: false, + wasExplicitlyMentioned: false, + }); + }); + + it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { + const callArg = await dispatchGroupMessage({ + commandAuthorized: false, + message: { + hasAnyMention: true, + wasExplicitlyMentioned: true, + content: "ping @bot", + }, + }); expect(callArg?.ctx?.WasMentioned).toBe(true); expect(callArg?.ctx?.To).toBe("zalouser:group:g-1"); expect(callArg?.ctx?.OriginatingTo).toBe("zalouser:group:g-1"); @@ -290,22 +410,14 @@ describe("zalouser monitor group mention gating", () => { }); it("allows authorized control commands to bypass mention gating", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + const callArg = await dispatchGroupMessage({ commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ + message: { content: "/status", hasAnyMention: false, wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + }, }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.WasMentioned).toBe(true); }); @@ -346,57 +458,30 @@ describe("zalouser monitor group mention gating", () => { }); it("uses commandContent for mention-prefixed control commands", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + const callArg = await dispatchGroupMessage({ commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ + message: { content: "@Bot /new", commandContent: "/new", hasAnyMention: true, wasExplicitlyMentioned: true, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + }, }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.CommandBody).toBe("/new"); expect(callArg?.ctx?.BodyForCommands).toBe("/new"); }); it("allows group control commands when only allowFrom is configured", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - allowFrom: ["123"], - }, + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["123"], }, - config: createConfig(), - runtime: createRuntimeEnv(), + expectedAuthorizers: [ + { configured: true, allowed: true }, + { configured: true, allowed: true }, + ], }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: true }, - { configured: true, allowed: true }, - ]); }); it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => { @@ -425,123 +510,35 @@ describe("zalouser monitor group mention gating", () => { }); it("does not accept a different group id by matching only the mutable group name by default", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, - }); - await __testing.processMessage({ - message: createGroupMessage({ - threadId: "g-attacker-001", - groupName: "Trusted Team", - senderId: "666", - hasAnyMention: true, - wasExplicitlyMentioned: true, - content: "ping @bot", - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - groupPolicy: "allowlist", - groupAllowFrom: ["*"], - groups: { - "group:g-trusted-001": { allow: true }, - "Trusted Team": { allow: true }, - }, - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); + await expectDangerousNameMatching({ expectedDispatches: 0 }); }); it("accepts mutable group-name matches only when dangerouslyAllowNameMatching is enabled", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, + const dispatchReplyWithBufferedBlockDispatcher = await expectDangerousNameMatching({ + dangerouslyAllowNameMatching: true, + expectedDispatches: 1, }); - await __testing.processMessage({ - message: createGroupMessage({ - threadId: "g-attacker-001", - groupName: "Trusted Team", - senderId: "666", - hasAnyMention: true, - wasExplicitlyMentioned: true, - content: "ping @bot", - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - dangerouslyAllowNameMatching: true, - groupPolicy: "allowlist", - groupAllowFrom: ["*"], - groups: { - "group:g-trusted-001": { allow: true }, - "Trusted Team": { allow: true }, - }, - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.To).toBe("zalouser:group:g-attacker-001"); }); it("allows group control commands when sender is in groupAllowFrom", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - allowFrom: ["999"], - groupAllowFrom: ["123"], - }, + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["999"], + groupAllowFrom: ["123"], }, - config: createConfig(), - runtime: createRuntimeEnv(), + expectedAuthorizers: [ + { configured: true, allowed: false }, + { configured: true, allowed: true }, + ], }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: false }, - { configured: true, allowed: true }, - ]); }); it("routes DM messages with direct peer kind", async () => { const { dispatchReplyWithBufferedBlockDispatcher, resolveAgentRoute, buildAgentSessionKey } = - installRuntime({ - commandAuthorized: false, - }); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); + await processOpenDmMessage(); expect(resolveAgentRoute).toHaveBeenCalledWith( expect.objectContaining({ @@ -559,24 +556,9 @@ describe("zalouser monitor group mention gating", () => { }); it("reuses the legacy DM session key when only the old group-shaped session exists", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, readSessionUpdatedAt } = installRuntime({ - commandAuthorized: false, - }); - readSessionUpdatedAt.mockImplementation((input?: { storePath: string; sessionKey: string }) => - input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, - ); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), + const { dispatchReplyWithBufferedBlockDispatcher } = await processOpenDmMessage({ + readSessionUpdatedAt: (input?: { storePath: string; sessionKey: string }) => + input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, }); const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; diff --git a/package.json b/package.json index c63e72f66fa..54d897eb66f 100644 --- a/package.json +++ b/package.json @@ -353,6 +353,7 @@ "@mariozechner/pi-ai": "0.57.1", "@mariozechner/pi-coding-agent": "0.57.1", "@mariozechner/pi-tui": "0.57.1", + "@modelcontextprotocol/sdk": "1.27.1", "@mozilla/readability": "^0.6.0", "@sinclair/typebox": "0.34.48", "@slack/bolt": "^4.6.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ac32d145c57..ecc73c421c7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -60,16 +60,19 @@ importers: version: 1.2.0-beta.3 '@mariozechner/pi-agent-core': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-ai': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-coding-agent': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': specifier: 0.57.1 version: 0.57.1 + '@modelcontextprotocol/sdk': + specifier: 1.27.1 + version: 1.27.1(zod@4.3.6) '@mozilla/readability': specifier: ^0.6.0 version: 0.6.0 @@ -346,7 +349,7 @@ importers: version: 10.6.1 openclaw: specifier: '>=2026.3.11' - version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) + version: 2026.3.11(@discordjs/opus@0.10.0)(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/imessage: {} @@ -377,7 +380,7 @@ importers: dependencies: '@mariozechner/pi-agent-core': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@matrix-org/matrix-sdk-crypto-nodejs': specifier: ^0.4.0 version: 0.4.0 @@ -407,7 +410,7 @@ importers: dependencies: openclaw: specifier: '>=2026.3.11' - version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) + version: 2026.3.11(@discordjs/opus@0.10.0)(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/memory-lancedb: dependencies: @@ -1828,6 +1831,16 @@ packages: '@mistralai/mistralai@1.14.1': resolution: {integrity: sha512-IiLmmZFCCTReQgPAT33r7KQ1nYo5JPdvGkrkZqA8qQ2qB1GHgs5LoP5K2ICyrjnpw2n8oSxMM/VP+liiKcGNlQ==} + '@modelcontextprotocol/sdk@1.27.1': + resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==} + engines: {node: '>=18'} + peerDependencies: + '@cfworker/json-schema': ^4.1.1 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + '@cfworker/json-schema': + optional: true + '@mozilla/readability@0.6.0': resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==} engines: {node: '>=14.0.0'} @@ -4271,6 +4284,10 @@ packages: core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + cors@2.8.6: + resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==} + engines: {node: '>= 0.10'} + croner@10.0.1: resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==} engines: {node: '>=18.0'} @@ -4550,6 +4567,14 @@ packages: events-universal@1.0.1: resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==} + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.7: + resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==} + engines: {node: '>=18.0.0'} + execa@4.1.0: resolution: {integrity: sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==} engines: {node: '>=10'} @@ -4561,6 +4586,12 @@ packages: exponential-backoff@3.1.3: resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==} + express-rate-limit@8.3.1: + resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==} + engines: {node: '>= 16'} + peerDependencies: + express: '>= 4.11' + express@4.22.1: resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==} engines: {node: '>= 0.10.0'} @@ -5058,6 +5089,9 @@ packages: jose@4.15.9: resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} + jose@6.2.1: + resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==} + js-stringify@1.0.2: resolution: {integrity: sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==} @@ -5102,6 +5136,9 @@ packages: json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + json-schema-typed@8.0.2: + resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} + json-schema@0.4.0: resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} @@ -5870,6 +5907,10 @@ packages: resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==} hasBin: true + pkce-challenge@5.0.1: + resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} + engines: {node: '>=16.20.0'} + playwright-core@1.58.2: resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==} engines: {node: '>=18'} @@ -8645,12 +8686,14 @@ snapshots: optionalDependencies: '@noble/hashes': 2.0.1 - '@google/genai@1.44.0': + '@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))': dependencies: google-auth-library: 10.6.1 p-retry: 4.6.2 protobufjs: 7.5.4 ws: 8.19.0 + optionalDependencies: + '@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6) transitivePeerDependencies: - bufferutil - supports-color @@ -8698,7 +8741,6 @@ snapshots: '@hono/node-server@1.19.10(hono@4.12.7)': dependencies: hono: 4.12.7 - optional: true '@huggingface/jinja@0.5.5': {} @@ -9025,9 +9067,9 @@ snapshots: std-env: 3.10.0 yoctocolors: 2.1.2 - '@mariozechner/pi-agent-core@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-agent-core@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -9037,11 +9079,11 @@ snapshots: - ws - zod - '@mariozechner/pi-ai@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-ai@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) '@aws-sdk/client-bedrock-runtime': 3.1004.0 - '@google/genai': 1.44.0 + '@google/genai': 1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) '@mistralai/mistralai': 1.14.1 '@sinclair/typebox': 0.34.48 ajv: 8.18.0 @@ -9061,11 +9103,11 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-coding-agent@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-agent-core': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': 0.57.1 '@silvia-odwyer/photon-node': 0.3.4 chalk: 5.6.2 @@ -9141,6 +9183,28 @@ snapshots: - bufferutil - utf-8-validate + '@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)': + dependencies: + '@hono/node-server': 1.19.10(hono@4.12.7) + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + content-type: 1.0.5 + cors: 2.8.6 + cross-spawn: 7.0.6 + eventsource: 3.0.7 + eventsource-parser: 3.0.6 + express: 5.2.1 + express-rate-limit: 8.3.1(express@5.2.1) + hono: 4.12.7 + jose: 6.2.1 + json-schema-typed: 8.0.2 + pkce-challenge: 5.0.1 + raw-body: 3.0.2 + zod: 4.3.6 + zod-to-json-schema: 3.25.1(zod@4.3.6) + transitivePeerDependencies: + - supports-color + '@mozilla/readability@0.6.0': {} '@napi-rs/canvas-android-arm64@0.1.95': @@ -11916,6 +11980,11 @@ snapshots: core-util-is@1.0.3: {} + cors@2.8.6: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + croner@10.0.1: {} cross-spawn@7.0.6: @@ -12167,6 +12236,12 @@ snapshots: transitivePeerDependencies: - bare-abort-controller + eventsource-parser@3.0.6: {} + + eventsource@3.0.7: + dependencies: + eventsource-parser: 3.0.6 + execa@4.1.0: dependencies: cross-spawn: 7.0.6 @@ -12183,6 +12258,11 @@ snapshots: exponential-backoff@3.1.3: {} + express-rate-limit@8.3.1(express@5.2.1): + dependencies: + express: 5.2.1 + ip-address: 10.1.0 + express@4.22.1: dependencies: accepts: 1.3.8 @@ -12826,6 +12906,8 @@ snapshots: jose@4.15.9: {} + jose@6.2.1: {} + js-stringify@1.0.2: {} js-tokens@10.0.0: {} @@ -12893,6 +12975,8 @@ snapshots: json-schema-traverse@1.0.0: {} + json-schema-typed@8.0.2: {} + json-schema@0.4.0: {} json-stringify-safe@5.0.1: {} @@ -13497,7 +13581,7 @@ snapshots: ws: 8.19.0 zod: 4.3.6 - openclaw@2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)): + openclaw@2026.3.11(@discordjs/opus@0.10.0)(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)): dependencies: '@agentclientprotocol/sdk': 0.16.1(zod@4.3.6) '@aws-sdk/client-bedrock': 3.1007.0 @@ -13510,9 +13594,9 @@ snapshots: '@larksuiteoapi/node-sdk': 1.59.0 '@line/bot-sdk': 10.6.0 '@lydell/node-pty': 1.2.0-beta.3 - '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-agent-core': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-coding-agent': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': 0.57.1 '@mozilla/readability': 0.6.0 '@napi-rs/canvas': 0.1.95 @@ -13784,6 +13868,8 @@ snapshots: sonic-boom: 4.2.1 thread-stream: 3.1.0 + pkce-challenge@5.0.1: {} + playwright-core@1.58.2: {} playwright@1.58.2: diff --git a/scripts/ci-changed-scope.mjs b/scripts/ci-changed-scope.mjs index a4018b30a2c..c5ed28319b1 100644 --- a/scripts/ci-changed-scope.mjs +++ b/scripts/ci-changed-scope.mjs @@ -5,6 +5,7 @@ import { appendFileSync } from "node:fs"; const DOCS_PATH_RE = /^(docs\/|.*\.mdx?$)/; const SKILLS_PYTHON_SCOPE_RE = /^skills\//; +const CI_WORKFLOW_SCOPE_RE = /^\.github\/workflows\/ci\.yml$/; const MACOS_PROTOCOL_GEN_RE = /^(apps\/macos\/Sources\/OpenClawProtocol\/|apps\/shared\/OpenClawKit\/Sources\/OpenClawProtocol\/)/; const MACOS_NATIVE_RE = /^(apps\/macos\/|apps\/ios\/|apps\/shared\/|Swabble\/)/; @@ -55,6 +56,12 @@ export function detectChangedScope(changedPaths) { runSkillsPython = true; } + if (CI_WORKFLOW_SCOPE_RE.test(path)) { + runMacos = true; + runAndroid = true; + runSkillsPython = true; + } + if (!MACOS_PROTOCOL_GEN_RE.test(path) && MACOS_NATIVE_RE.test(path)) { runMacos = true; } diff --git a/src/acp/translator.prompt-prefix.test.ts b/src/acp/translator.prompt-prefix.test.ts index 38c186519c0..9d53e3aa103 100644 --- a/src/acp/translator.prompt-prefix.test.ts +++ b/src/acp/translator.prompt-prefix.test.ts @@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js"; import { AcpGatewayAgent } from "./translator.js"; import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js"; +const TEST_SESSION_ID = "session-1"; +const TEST_SESSION_KEY = "agent:main:main"; +const TEST_PROMPT = { + sessionId: TEST_SESSION_ID, + prompt: [{ type: "text", text: "hello" }], + _meta: {}, +} as unknown as PromptRequest; + describe("acp prompt cwd prefix", () => { + const createStopAfterSendSpy = () => + vi.fn(async (method: string) => { + if (method === "chat.send") { + throw new Error("stop-after-send"); + } + return {}; + }); + + async function runPromptAndCaptureRequest( + options: { + cwd?: string; + prefixCwd?: boolean; + provenanceMode?: "meta" | "meta+receipt"; + } = {}, + ) { + const sessionStore = createInMemorySessionStore(); + sessionStore.createSession({ + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"), + }); + + const requestSpy = createStopAfterSendSpy(); + const agent = new AcpGatewayAgent( + createAcpConnection(), + createAcpGateway(requestSpy as unknown as GatewayClient["request"]), + { + sessionStore, + prefixCwd: options.prefixCwd, + provenanceMode: options.provenanceMode, + }, + ); + + await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send"); + return requestSpy; + } + async function runPromptWithCwd(cwd: string) { const pinnedHome = os.homedir(); const previousOpenClawHome = process.env.OPENCLAW_HOME; @@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => { delete process.env.OPENCLAW_HOME; process.env.HOME = pinnedHome; - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd, - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - prefixCwd: true, - }, - ); - try { - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - return requestSpy; + return await runPromptAndCaptureRequest({ cwd, prefixCwd: true }); } finally { if (previousOpenClawHome === undefined) { delete process.env.OPENCLAW_HOME; @@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects system provenance metadata when enabled", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects a system provenance receipt when requested", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta+receipt", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => { expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"), + systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`), }), { expectFinal: true }, ); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"), + systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`), }), { expectFinal: true }, ); diff --git a/src/agents/bash-tools.exec.approval-id.test.ts b/src/agents/bash-tools.exec.approval-id.test.ts index cc94f83d665..211d8e3dcaa 100644 --- a/src/agents/bash-tools.exec.approval-id.test.ts +++ b/src/agents/bash-tools.exec.approval-id.test.ts @@ -43,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) { return buildSystemRunPreparePayload(params); } +function getTestConfigPath() { + return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); +} + +async function writeOpenClawConfig(config: Record, pretty = false) { + const configPath = getTestConfigPath(); + await fs.mkdir(path.dirname(configPath), { recursive: true }); + await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined)); +} + +async function writeExecApprovalsConfig(config: Record) { + const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); + await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); + await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2)); +} + +function acceptedApprovalResponse(params: unknown) { + return { status: "accepted", id: (params as { id?: string })?.id }; +} + +function getResultText(result: { content: Array<{ type?: string; text?: string }> }) { + return result.content.find((part) => part.type === "text")?.text ?? ""; +} + +function expectPendingApprovalText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + options: { + command: string; + host: "gateway" | "node"; + nodeId?: string; + interactive?: boolean; + }, +) { + expect(result.details.status).toBe("approval-pending"); + const details = result.details as { approvalId: string; approvalSlug: string }; + const pendingText = getResultText(result); + expect(pendingText).toContain( + `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, + ); + expect(pendingText).toContain(`full ${details.approvalId}`); + expect(pendingText).toContain(`Host: ${options.host}`); + if (options.nodeId) { + expect(pendingText).toContain(`Node: ${options.nodeId}`); + } + expect(pendingText).toContain(`CWD: ${process.cwd()}`); + expect(pendingText).toContain("Command:\n```sh\n"); + expect(pendingText).toContain(options.command); + if (options.interactive) { + expect(pendingText).toContain("Mode: foreground (interactive approvals available)."); + expect(pendingText).toContain("Background mode requires pre-approved policy"); + } + return details; +} + +function expectPendingCommandText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + command: string, +) { + expect(result.details.status).toBe("approval-pending"); + const text = getResultText(result); + expect(text).toContain("Command:\n```sh\n"); + expect(text).toContain(command); +} + +function mockGatewayOkCalls(calls: string[]) { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + calls.push(method); + return { ok: true }; + }); +} + +function createElevatedAllowlistExecTool() { + return createExecTool({ + ask: "on-miss", + security: "allowlist", + approvalRunningNoticeMs: 0, + elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, + }); +} + +async function expectGatewayExecWithoutApproval(options: { + config: Record; + command: string; + ask?: "always" | "on-miss" | "off"; +}) { + await writeExecApprovalsConfig(options.config); + const calls: string[] = []; + mockGatewayOkCalls(calls); + + const tool = createExecTool({ + host: "gateway", + ask: options.ask, + security: "full", + approvalRunningNoticeMs: 0, + }); + + const result = await tool.execute("call-no-approval", { command: options.command }); + expect(result.details.status).toBe("completed"); + expect(calls).not.toContain("exec.approval.request"); + expect(calls).not.toContain("exec.approval.waitDecision"); +} + +function mockAcceptedApprovalFlow(options: { + onAgent?: (params: Record) => void; + onNodeInvoke?: (params: unknown) => unknown; +}) { + vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { + if (method === "exec.approval.request") { + return acceptedApprovalResponse(params); + } + if (method === "exec.approval.waitDecision") { + return { decision: "allow-once" }; + } + if (method === "agent" && options.onAgent) { + options.onAgent(params as Record); + return { status: "ok" }; + } + if (method === "node.invoke" && options.onNodeInvoke) { + return await options.onNodeInvoke(params); + } + return { ok: true }; + }); +} + +function mockPendingApprovalRegistration() { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + if (method === "exec.approval.request") { + return { status: "accepted", id: "approval-id" }; + } + if (method === "exec.approval.waitDecision") { + return { decision: null }; + } + return { ok: true }; + }); +} + +function expectApprovalUnavailableText(result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; +}) { + expect(result.details.status).toBe("approval-unavailable"); + const text = result.content.find((part) => part.type === "text")?.text ?? ""; + expect(text).not.toContain("/approve"); + expect(text).not.toContain("npm view diver name version description"); + expect(text).not.toContain("Pending command:"); + expect(text).not.toContain("Host:"); + expect(text).not.toContain("CWD:"); + return text; +} + describe("exec approvals", () => { let previousHome: string | undefined; let previousUserProfile: string | undefined; @@ -81,18 +237,11 @@ describe("exec approvals", () => { let invokeParams: unknown; let agentParams: unknown; - vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; - } - if (method === "exec.approval.waitDecision") { - return { decision: "allow-once" }; - } - if (method === "agent") { + mockAcceptedApprovalFlow({ + onAgent: (params) => { agentParams = params; - return { status: "ok" }; - } - if (method === "node.invoke") { + }, + onNodeInvoke: (params) => { const invoke = params as { command?: string }; if (invoke.command === "system.run.prepare") { return buildPreparedSystemRunPayload(params); @@ -101,8 +250,7 @@ describe("exec approvals", () => { invokeParams = params; return { payload: { success: true, stdout: "ok" } }; } - } - return { ok: true }; + }, }); const tool = createExecTool({ @@ -113,19 +261,12 @@ describe("exec approvals", () => { }); const result = await tool.execute("call1", { command: "ls -la" }); - expect(result.details.status).toBe("approval-pending"); - const details = result.details as { approvalId: string; approvalSlug: string }; - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, - ); - expect(pendingText).toContain(`full ${details.approvalId}`); - expect(pendingText).toContain("Host: node"); - expect(pendingText).toContain("Node: node-1"); - expect(pendingText).toContain(`CWD: ${process.cwd()}`); - expect(pendingText).toContain("Command:\n```sh\nls -la\n```"); - expect(pendingText).toContain("Mode: foreground (interactive approvals available)."); - expect(pendingText).toContain("Background mode requires pre-approved policy"); + const details = expectPendingApprovalText(result, { + command: "ls -la", + host: "node", + nodeId: "node-1", + interactive: true, + }); const approvalId = details.approvalId; await expect @@ -214,74 +355,28 @@ describe("exec approvals", () => { }); it("uses exec-approvals ask=off to suppress gateway prompts", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: { - main: { security: "full", ask: "off", askFallback: "full" }, - }, + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: { + main: { security: "full", ask: "off", askFallback: "full" }, }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; - }); - - const tool = createExecTool({ - host: "gateway", + }, + command: "echo ok", ask: "on-miss", - security: "full", - approvalRunningNoticeMs: 0, }); - - const result = await tool.execute("call3b", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: {}, - }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: {}, + }, + command: "echo ok", }); - - const tool = createExecTool({ - host: "gateway", - security: "full", - approvalRunningNoticeMs: 0, - }); - - const result = await tool.execute("call3c", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("requires approval for elevated ask when allowlist misses", async () => { @@ -296,7 +391,7 @@ describe("exec approvals", () => { if (method === "exec.approval.request") { resolveApproval?.(); // Return registration confirmation - return { status: "accepted", id: (params as { id?: string })?.id }; + return acceptedApprovalResponse(params); } if (method === "exec.approval.waitDecision") { return { decision: "deny" }; @@ -304,24 +399,10 @@ describe("exec approvals", () => { return { ok: true }; }); - const tool = createExecTool({ - ask: "on-miss", - security: "allowlist", - approvalRunningNoticeMs: 0, - elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, - }); + const tool = createElevatedAllowlistExecTool(); const result = await tool.execute("call4", { command: "echo ok", elevated: true }); - expect(result.details.status).toBe("approval-pending"); - const details = result.details as { approvalId: string; approvalSlug: string }; - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, - ); - expect(pendingText).toContain(`full ${details.approvalId}`); - expect(pendingText).toContain("Host: gateway"); - expect(pendingText).toContain(`CWD: ${process.cwd()}`); - expect(pendingText).toContain("Command:\n```sh\necho ok\n```"); + expectPendingApprovalText(result, { command: "echo ok", host: "gateway" }); await approvalSeen; expect(calls).toContain("exec.approval.request"); expect(calls).toContain("exec.approval.waitDecision"); @@ -330,18 +411,10 @@ describe("exec approvals", () => { it("starts a direct agent follow-up after approved gateway exec completes", async () => { const agentCalls: Array> = []; - vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; - } - if (method === "exec.approval.waitDecision") { - return { decision: "allow-once" }; - } - if (method === "agent") { - agentCalls.push(params as Record); - return { status: "ok" }; - } - return { ok: true }; + mockAcceptedApprovalFlow({ + onAgent: (params) => { + agentCalls.push(params); + }, }); const tool = createExecTool({ @@ -388,7 +461,7 @@ describe("exec approvals", () => { if (typeof request.id === "string") { requestIds.push(request.id); } - return { status: "accepted", id: request.id }; + return acceptedApprovalResponse(request); } if (method === "exec.approval.waitDecision") { const wait = params as { id?: string }; @@ -400,12 +473,7 @@ describe("exec approvals", () => { return { ok: true }; }); - const tool = createExecTool({ - ask: "on-miss", - security: "allowlist", - approvalRunningNoticeMs: 0, - elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, - }); + const tool = createElevatedAllowlistExecTool(); const first = await tool.execute("call-seq-1", { command: "npm view diver --json", @@ -429,7 +497,7 @@ describe("exec approvals", () => { vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { calls.push(method); if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; + return acceptedApprovalResponse(params); } if (method === "exec.approval.waitDecision") { return { decision: "deny" }; @@ -448,11 +516,7 @@ describe("exec approvals", () => { command: "npm view diver --json | jq .name && brew outdated", }); - expect(result.details.status).toBe("approval-pending"); - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - "Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```", - ); + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); expect(calls).toContain("exec.approval.request"); }); @@ -480,11 +544,7 @@ describe("exec approvals", () => { command: "npm view diver --json | jq .name && brew outdated", }); - expect(result.details.status).toBe("approval-pending"); - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - "Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```", - ); + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); expect(calls).toContain("exec.approval.request"); }); @@ -551,30 +611,17 @@ describe("exec approvals", () => { }); it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => { - const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); - await fs.mkdir(path.dirname(configPath), { recursive: true }); - await fs.writeFile( - configPath, - JSON.stringify({ - channels: { - discord: { - enabled: true, - execApprovals: { enabled: false }, - }, + await writeOpenClawConfig({ + channels: { + discord: { + enabled: true, + execApprovals: { enabled: false }, }, - }), - ); - - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: "approval-id" }; - } - if (method === "exec.approval.waitDecision") { - return { decision: null }; - } - return { ok: true }; + }, }); + mockPendingApprovalRegistration(); + const tool = createExecTool({ host: "gateway", ask: "always", @@ -588,49 +635,29 @@ describe("exec approvals", () => { command: "npm view diver name version description", }); - expect(result.details.status).toBe("approval-unavailable"); - const text = result.content.find((part) => part.type === "text")?.text ?? ""; + const text = expectApprovalUnavailableText(result); expect(text).toContain("chat exec approvals are not enabled on Discord"); expect(text).toContain("Web UI or terminal UI"); - expect(text).not.toContain("/approve"); - expect(text).not.toContain("npm view diver name version description"); - expect(text).not.toContain("Pending command:"); - expect(text).not.toContain("Host:"); - expect(text).not.toContain("CWD:"); }); it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => { - const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); - await fs.mkdir(path.dirname(configPath), { recursive: true }); - await fs.writeFile( - configPath, - JSON.stringify( - { - channels: { - telegram: { - enabled: true, - execApprovals: { enabled: false }, - }, - discord: { - enabled: true, - execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, - }, + await writeOpenClawConfig( + { + channels: { + telegram: { + enabled: true, + execApprovals: { enabled: false }, + }, + discord: { + enabled: true, + execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, }, }, - null, - 2, - ), + }, + true, ); - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: "approval-id" }; - } - if (method === "exec.approval.waitDecision") { - return { decision: null }; - } - return { ok: true }; - }); + mockPendingApprovalRegistration(); const tool = createExecTool({ host: "gateway", @@ -645,14 +672,8 @@ describe("exec approvals", () => { command: "npm view diver name version description", }); - expect(result.details.status).toBe("approval-unavailable"); - const text = result.content.find((part) => part.type === "text")?.text ?? ""; + const text = expectApprovalUnavailableText(result); expect(text).toContain("Approval required. I sent the allowed approvers DMs."); - expect(text).not.toContain("/approve"); - expect(text).not.toContain("npm view diver name version description"); - expect(text).not.toContain("Pending command:"); - expect(text).not.toContain("Host:"); - expect(text).not.toContain("CWD:"); }); it("denies node obfuscated command when approval request times out", async () => { diff --git a/src/agents/model-fallback.probe.test.ts b/src/agents/model-fallback.probe.test.ts index d08bd0d4beb..3969416cd38 100644 --- a/src/agents/model-fallback.probe.test.ts +++ b/src/agents/model-fallback.probe.test.ts @@ -46,6 +46,20 @@ function expectFallbackUsed( expect(result.attempts[0]?.reason).toBe("rate_limit"); } +function expectPrimarySkippedForReason( + result: { result: unknown; attempts: Array<{ reason?: string }> }, + run: { + (...args: unknown[]): unknown; + mock: { calls: unknown[][] }; + }, + reason: string, +) { + expect(result.result).toBe("ok"); + expect(run).toHaveBeenCalledTimes(1); + expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); + expect(result.attempts[0]?.reason).toBe(reason); +} + function expectPrimaryProbeSuccess( result: { result: unknown }, run: { @@ -183,11 +197,7 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); it("probes primary model when within 2-min margin of cooldown expiry", async () => { @@ -540,10 +550,6 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); }); diff --git a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts index 1d214e2cc1a..36944d67601 100644 --- a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts +++ b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts @@ -113,6 +113,92 @@ function createMoonshotConfig(overrides: { }; } +function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig { + return { + models: { + ...(mergeMode ? { mode: "merge" as const } : {}), + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY} + api: "openai-completions", + models: [ + { + id: "gpt-4.1", + name: "GPT-4.1", + input: ["text"], + reasoning: false, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 16384, + }, + ], + }, + }, + }, + }; +} + +async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) { + await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { + await withTempHome(async () => { + if (options?.seedMergedProvider) { + await writeAgentModelsJson({ + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret + api: "openai-completions", + models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }], + }, + }, + }); + } + + await ensureOpenClawModelsJson( + createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider), + ); + const result = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + }); + }); +} + +async function expectMoonshotTokenLimits(params: { + contextWindow: number; + maxTokens: number; + expectedContextWindow: number; + expectedMaxTokens: number; +}) { + await withTempHome(async () => { + await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { + await ensureOpenClawModelsJson( + createMoonshotConfig({ + contextWindow: params.contextWindow, + maxTokens: params.maxTokens, + }), + ); + const parsed = await readGeneratedModelsJson<{ + providers: Record< + string, + { + models?: Array<{ + id: string; + contextWindow?: number; + maxTokens?: number; + }>; + } + >; + }>(); + const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); + expect(kimi?.contextWindow).toBe(params.expectedContextWindow); + expect(kimi?.maxTokens).toBe(params.expectedMaxTokens); + }); + }); +} + describe("models-config", () => { it("keeps anthropic api defaults when model entries omit api", async () => { await withTempHome(async () => { @@ -444,131 +530,28 @@ describe("models-config", () => { }); it("does not persist resolved env var value as plaintext in models.json", async () => { - await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { - await withTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig - api: "openai-completions", - models: [ - { - id: "gpt-4.1", - name: "GPT-4.1", - input: ["text"], - reasoning: false, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 16384, - }, - ], - }, - }, - }, - }; - await ensureOpenClawModelsJson(cfg); - const result = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); - }); - }); + await expectOpenAiEnvMarkerApiKey(); }); it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => { - await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { - await withTempHome(async () => { - await writeAgentModelsJson({ - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret - api: "openai-completions", - models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }], - }, - }, - }); - const cfg: OpenClawConfig = { - models: { - mode: "merge", - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY} - api: "openai-completions", - models: [ - { - id: "gpt-4.1", - name: "GPT-4.1", - input: ["text"], - reasoning: false, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 16384, - }, - ], - }, - }, - }, - }; - await ensureOpenClawModelsJson(cfg); - const result = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret - }); - }); + await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true }); }); it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(350000); - expect(kimi?.maxTokens).toBe(16384); - }); + await expectMoonshotTokenLimits({ + contextWindow: 350000, + maxTokens: 16384, + expectedContextWindow: 350000, + expectedMaxTokens: 16384, }); }); it("falls back to implicit token limits when explicit values are invalid", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(256000); - expect(kimi?.maxTokens).toBe(8192); - }); + await expectMoonshotTokenLimits({ + contextWindow: 0, + maxTokens: -1, + expectedContextWindow: 256000, + expectedMaxTokens: 8192, }); }); }); diff --git a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts index 8414fb10d08..e10fcbc4ee4 100644 --- a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts +++ b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts @@ -4,88 +4,80 @@ import { installModelsConfigTestHooks, withModelsTempHome } from "./models-confi import { ensureOpenClawModelsJson } from "./models-config.js"; import { readGeneratedModelsJson } from "./models-config.test-utils.js"; +function createGoogleModelsConfig( + models: NonNullable["providers"]["google"]["models"], +): OpenClawConfig { + return { + models: { + providers: { + google: { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + apiKey: "GEMINI_KEY", // pragma: allowlist secret + api: "google-generative-ai", + models, + }, + }, + }, + }; +} + +async function expectGeneratedGoogleModelIds(ids: string[]) { + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids); +} + describe("models-config", () => { installModelsConfigTestHooks(); it("normalizes gemini 3 ids to preview for google providers", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3-pro", - name: "Gemini 3 Pro", - api: "google-generative-ai", - reasoning: true, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - { - id: "gemini-3-flash", - name: "Gemini 3 Flash", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3-pro", + name: "Gemini 3 Pro", + api: "google-generative-ai", + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + { + id: "gemini-3-flash", + name: "Gemini 3 Flash", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, + }, + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]); }); }); it("normalizes the deprecated google flash preview id to the working preview id", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3.1-flash-preview", - name: "Gemini 3.1 Flash Preview", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3.1-flash-preview", + name: "Gemini 3.1 Flash Preview", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]); }); }); }); diff --git a/src/agents/ollama-models.test.ts b/src/agents/ollama-models.test.ts index 7877d40bdf9..d7b7d066c6f 100644 --- a/src/agents/ollama-models.test.ts +++ b/src/agents/ollama-models.test.ts @@ -1,31 +1,11 @@ import { afterEach, describe, expect, it, vi } from "vitest"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; import { enrichOllamaModelsWithContext, resolveOllamaApiBase, type OllamaTagModel, } from "./ollama-models.js"; -function jsonResponse(body: unknown, status = 200): Response { - return new Response(JSON.stringify(body), { - status, - headers: { "Content-Type": "application/json" }, - }); -} - -function requestUrl(input: string | URL | Request): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - return input.url; -} - -function requestBody(body: BodyInit | null | undefined): string { - return typeof body === "string" ? body : "{}"; -} - describe("ollama-models", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -43,7 +23,7 @@ describe("ollama-models", () => { if (!url.endsWith("/api/show")) { throw new Error(`Unexpected fetch: ${url}`); } - const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; if (body.name === "llama3:8b") { return jsonResponse({ model_info: { "llama.context_length": 65536 } }); } diff --git a/src/agents/ollama-stream.test.ts b/src/agents/ollama-stream.test.ts index 241c7a0f858..ded8064ea19 100644 --- a/src/agents/ollama-stream.test.ts +++ b/src/agents/ollama-stream.test.ts @@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader; } +async function expectDoneEventContent(lines: string[], expectedContent: unknown) { + await withMockNdjsonFetch(lines, async () => { + const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); + const events = await collectStreamEvents(stream); + + const doneEvent = events.at(-1); + if (!doneEvent || doneEvent.type !== "done") { + throw new Error("Expected done event"); + } + + expect(doneEvent.message.content).toEqual(expectedContent); + }); +} + describe("parseNdjsonStream", () => { it("parses text-only streaming chunks", async () => { const reader = mockNdjsonReader([ @@ -486,88 +500,48 @@ describe("createOllamaStreamFn", () => { }); it("drops thinking chunks when no final content is emitted", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([]); - }, + [], ); }); it("prefers streamed content over earlier thinking chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); it("drops reasoning chunks when no final content is emitted", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([]); - }, + [], ); }); it("prefers streamed content over earlier reasoning chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); }); diff --git a/src/agents/openclaw-tools.session-status.test.ts b/src/agents/openclaw-tools.session-status.test.ts index 8b2d9fc467f..0bc079d4ced 100644 --- a/src/agents/openclaw-tools.session-status.test.ts +++ b/src/agents/openclaw-tools.session-status.test.ts @@ -115,6 +115,50 @@ function resetSessionStore(store: Record) { mockConfig = createMockConfig(); } +function installSandboxedSessionStatusConfig() { + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; +} + +function mockSpawnedSessionList( + resolveSessions: (spawnedBy: string | undefined) => Array>, +) { + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) }; + } + return {}; + }); +} + +function expectSpawnedSessionLookupCalls(spawnedBy: string) { + const expectedCall = { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy, + }, + }; + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall); +} + function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) { const tool = createOpenClawTools({ agentSessionKey, @@ -242,27 +286,8 @@ describe("session_status tool", () => { updatedAt: 10, }, }); - mockConfig = { - session: { mainKey: "main", scope: "per-sender" }, - tools: { - sessions: { visibility: "all" }, - agentToAgent: { enabled: true, allow: ["*"] }, - }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - sandbox: { sessionToolsVisibility: "spawned" }, - }, - }, - }; - callGatewayMock.mockImplementation(async (opts: unknown) => { - const request = opts as { method?: string; params?: Record }; - if (request.method === "sessions.list") { - return { sessions: [] }; - } - return {}; - }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList(() => []); const tool = getSessionStatusTool("agent:main:subagent:child", { sandboxed: true, @@ -284,25 +309,7 @@ describe("session_status tool", () => { expect(loadSessionStoreMock).not.toHaveBeenCalled(); expect(updateSessionStoreMock).not.toHaveBeenCalled(); - expect(callGatewayMock).toHaveBeenCalledTimes(2); - expect(callGatewayMock).toHaveBeenNthCalledWith(1, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "agent:main:subagent:child", - }, - }); - expect(callGatewayMock).toHaveBeenNthCalledWith(2, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "agent:main:subagent:child", - }, - }); + expectSpawnedSessionLookupCalls("agent:main:subagent:child"); }); it("keeps legacy main requester keys for sandboxed session tree checks", async () => { @@ -316,30 +323,10 @@ describe("session_status tool", () => { updatedAt: 20, }, }); - mockConfig = { - session: { mainKey: "main", scope: "per-sender" }, - tools: { - sessions: { visibility: "all" }, - agentToAgent: { enabled: true, allow: ["*"] }, - }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - sandbox: { sessionToolsVisibility: "spawned" }, - }, - }, - }; - callGatewayMock.mockImplementation(async (opts: unknown) => { - const request = opts as { method?: string; params?: Record }; - if (request.method === "sessions.list") { - return { - sessions: - request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], - }; - } - return {}; - }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList((spawnedBy) => + spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], + ); const tool = getSessionStatusTool("main", { sandboxed: true, @@ -357,25 +344,7 @@ describe("session_status tool", () => { expect(childDetails.ok).toBe(true); expect(childDetails.sessionKey).toBe("agent:main:subagent:child"); - expect(callGatewayMock).toHaveBeenCalledTimes(2); - expect(callGatewayMock).toHaveBeenNthCalledWith(1, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "main", - }, - }); - expect(callGatewayMock).toHaveBeenNthCalledWith(2, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "main", - }, - }); + expectSpawnedSessionLookupCalls("main"); }); it("scopes bare session keys to the requester agent", async () => { diff --git a/src/agents/pi-embedded-runner.e2e.test.ts b/src/agents/pi-embedded-runner.e2e.test.ts index 31056f6ffe1..5c7722b5d16 100644 --- a/src/agents/pi-embedded-runner.e2e.test.ts +++ b/src/agents/pi-embedded-runner.e2e.test.ts @@ -1,9 +1,14 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import "./test-helpers/fast-coding-tools.js"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; function createMockUsage(input: number, output: number) { return { @@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => { let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager; -let tempRoot: string | undefined; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; let agentDir: string; let workspaceDir: string; let sessionCounter = 0; @@ -98,50 +103,21 @@ beforeAll(async () => { vi.useRealTimers(); ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); ({ SessionManager } = await import("@mariozechner/pi-coding-agent")); - tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-")); - agentDir = path.join(tempRoot, "agent"); - workspaceDir = path.join(tempRoot, "workspace"); - await fs.mkdir(agentDir, { recursive: true }); - await fs.mkdir(workspaceDir, { recursive: true }); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-"); + ({ agentDir, workspaceDir } = e2eWorkspace); }, 180_000); afterAll(async () => { - if (!tempRoot) { - return; - } - await fs.rm(tempRoot, { recursive: true, force: true }); - tempRoot = undefined; + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; }); -const makeOpenAiConfig = (modelIds: string[]) => - ({ - models: { - providers: { - openai: { - api: "openai-responses", - apiKey: "sk-test", - baseUrl: "https://example.com", - models: modelIds.map((id) => ({ - id, - name: `Mock ${id}`, - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 16_000, - maxTokens: 2048, - })), - }, - }, - }, - }) satisfies OpenClawConfig; - const nextSessionFile = () => { sessionCounter += 1; return path.join(workspaceDir, `session-${sessionCounter}.jsonl`); }; const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`; const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`; -const immediateEnqueue = async (task: () => Promise) => task(); const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => { const sessionFile = nextSessionFile(); @@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string timestamp: Date.now(), }); - const cfg = makeOpenAiConfig(["mock-1"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]); return await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => { }; const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => { - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi describe("runEmbeddedPiAgent", () => { it("handles prompt error paths without dropping user state", async () => { const sessionFile = nextSessionFile(); - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); const sessionKey = nextSessionKey(); const result = await runEmbeddedPiAgent({ sessionId: "session:test", diff --git a/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts index 18f439cd01f..d91cf63539b 100644 --- a/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts +++ b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts @@ -8,12 +8,17 @@ * Follows the same pattern as pi-embedded-runner.e2e.test.ts. */ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import "./test-helpers/fast-coding-tools.js"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; function createMockUsage(input: number, output: number) { return { @@ -126,7 +131,7 @@ vi.mock("@mariozechner/pi-ai", async () => { }); let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; -let tempRoot: string | undefined; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; let agentDir: string; let workspaceDir: string; @@ -136,45 +141,15 @@ beforeAll(async () => { responsePlan = []; observedContexts = []; ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); - tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-yield-e2e-")); - agentDir = path.join(tempRoot, "agent"); - workspaceDir = path.join(tempRoot, "workspace"); - await fs.mkdir(agentDir, { recursive: true }); - await fs.mkdir(workspaceDir, { recursive: true }); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-"); + ({ agentDir, workspaceDir } = e2eWorkspace); }, 180_000); afterAll(async () => { - if (!tempRoot) { - return; - } - await fs.rm(tempRoot, { recursive: true, force: true }); - tempRoot = undefined; + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; }); -const makeConfig = (modelIds: string[]) => - ({ - models: { - providers: { - openai: { - api: "openai-responses", - apiKey: "sk-test", - baseUrl: "https://example.com", - models: modelIds.map((id) => ({ - id, - name: `Mock ${id}`, - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 16_000, - maxTokens: 2048, - })), - }, - }, - }, - }) satisfies OpenClawConfig; - -const immediateEnqueue = async (task: () => Promise) => task(); - const readSessionMessages = async (sessionFile: string) => { const raw = await fs.readFile(sessionFile, "utf-8"); return raw @@ -205,7 +180,7 @@ describe("sessions_yield e2e", () => { const sessionId = "yield-e2e-parent"; const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl"); - const cfg = makeConfig(["mock-yield"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]); const result = await runEmbeddedPiAgent({ sessionId, @@ -304,7 +279,7 @@ describe("sessions_yield e2e", () => { const sessionId = "yield-e2e-abort"; const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl"); - const cfg = makeConfig(["mock-yield-abort"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]); const result = await runEmbeddedPiAgent({ sessionId, diff --git a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts index efed941762d..19b5701eaaa 100644 --- a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts @@ -7,6 +7,7 @@ import { usesOpenAiStringModeAnthropicToolChoice, } from "../provider-capabilities.js"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07"; const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const; @@ -341,18 +342,10 @@ export function createAnthropicFastModeWrapper( return underlying(model, context, options); } - const originalOnPayload = options?.onPayload; - return underlying(model, context, { - ...options, - onPayload: (payload) => { - if (payload && typeof payload === "object") { - const payloadObj = payload as Record; - if (payloadObj.service_tier === undefined) { - payloadObj.service_tier = serviceTier; - } - } - return originalOnPayload?.(payload, model); - }, + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } }); }; } diff --git a/src/agents/pi-embedded-runner/compact.hooks.test.ts b/src/agents/pi-embedded-runner/compact.hooks.test.ts index e3ef243b429..a35060173ff 100644 --- a/src/agents/pi-embedded-runner/compact.hooks.test.ts +++ b/src/agents/pi-embedded-runner/compact.hooks.test.ts @@ -278,6 +278,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({ })); vi.mock("../../utils/message-channel.js", () => ({ + INTERNAL_MESSAGE_CHANNEL: "webchat", normalizeMessageChannel: vi.fn(() => undefined), })); @@ -375,6 +376,16 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { unregisterApiProviders(getCustomApiRegistrySourceId("ollama")); }); + async function runDirectCompaction(customInstructions = "focus on decisions") { + return await compactEmbeddedPiSessionDirect({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + customInstructions, + }); + } + it("bootstraps runtime plugins with the resolved workspace", async () => { await compactEmbeddedPiSessionDirect({ sessionId: "session-1", @@ -472,13 +483,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { hookRunner.hasHooks.mockReturnValue(true); sanitizeSessionHistoryMock.mockResolvedValue([]); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - }); + const result = await runDirectCompaction(); expect(result.ok).toBe(true); const beforeContext = sessionHook("compact:before")?.context; @@ -528,13 +533,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { details: { ok: true }, }); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - }); + const result = await runDirectCompaction(); expect(result).toMatchObject({ ok: true, diff --git a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts index d0b483e83ec..8542f329cbe 100644 --- a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts @@ -2,6 +2,7 @@ import type { StreamFn } from "@mariozechner/pi-agent-core"; import type { SimpleStreamOptions } from "@mariozechner/pi-ai"; import { streamSimple } from "@mariozechner/pi-ai"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; type OpenAIServiceTier = "auto" | "default" | "flex" | "priority"; type OpenAIReasoningEffort = "low" | "medium" | "high"; @@ -325,18 +326,10 @@ export function createOpenAIServiceTierWrapper( ) { return underlying(model, context, options); } - const originalOnPayload = options?.onPayload; - return underlying(model, context, { - ...options, - onPayload: (payload) => { - if (payload && typeof payload === "object") { - const payloadObj = payload as Record; - if (payloadObj.service_tier === undefined) { - payloadObj.service_tier = serviceTier; - } - } - return originalOnPayload?.(payload, model); - }, + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } }); }; } diff --git a/src/agents/pi-embedded-runner/stream-payload-utils.ts b/src/agents/pi-embedded-runner/stream-payload-utils.ts new file mode 100644 index 00000000000..580bf5b1391 --- /dev/null +++ b/src/agents/pi-embedded-runner/stream-payload-utils.ts @@ -0,0 +1,20 @@ +import type { StreamFn } from "@mariozechner/pi-agent-core"; + +export function streamWithPayloadPatch( + underlying: StreamFn, + model: Parameters[0], + context: Parameters[1], + options: Parameters[2], + patchPayload: (payload: Record) => void, +) { + const originalOnPayload = options?.onPayload; + return underlying(model, context, { + ...options, + onPayload: (payload) => { + if (payload && typeof payload === "object") { + patchPayload(payload as Record); + } + return originalOnPayload?.(payload, model); + }, + }); +} diff --git a/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts new file mode 100644 index 00000000000..1d987c44d1a --- /dev/null +++ b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts @@ -0,0 +1,57 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import type { OpenClawConfig } from "../../config/config.js"; + +export type EmbeddedPiRunnerTestWorkspace = { + tempRoot: string; + agentDir: string; + workspaceDir: string; +}; + +export async function createEmbeddedPiRunnerTestWorkspace( + prefix: string, +): Promise { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const agentDir = path.join(tempRoot, "agent"); + const workspaceDir = path.join(tempRoot, "workspace"); + await fs.mkdir(agentDir, { recursive: true }); + await fs.mkdir(workspaceDir, { recursive: true }); + return { tempRoot, agentDir, workspaceDir }; +} + +export async function cleanupEmbeddedPiRunnerTestWorkspace( + workspace: EmbeddedPiRunnerTestWorkspace | undefined, +): Promise { + if (!workspace) { + return; + } + await fs.rm(workspace.tempRoot, { recursive: true, force: true }); +} + +export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig { + return { + models: { + providers: { + openai: { + api: "openai-responses", + apiKey: "sk-test", + baseUrl: "https://example.com", + models: modelIds.map((id) => ({ + id, + name: `Mock ${id}`, + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 16_000, + maxTokens: 2048, + })), + }, + }, + }, + }; +} + +export async function immediateEnqueue(task: () => Promise): Promise { + return await task(); +} diff --git a/src/browser/chrome-mcp.snapshot.test.ts b/src/browser/chrome-mcp.snapshot.test.ts new file mode 100644 index 00000000000..3fe3288848f --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.test.ts @@ -0,0 +1,68 @@ +import { describe, expect, it } from "vitest"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "./chrome-mcp.snapshot.js"; + +const snapshot = { + id: "root", + role: "document", + name: "Example", + children: [ + { + id: "btn-1", + role: "button", + name: "Continue", + }, + { + id: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + }, + ], +}; + +describe("chrome MCP snapshot conversion", () => { + it("flattens structured snapshots into aria-style nodes", () => { + const nodes = flattenChromeMcpSnapshotToAriaNodes(snapshot, 10); + expect(nodes).toEqual([ + { + ref: "root", + role: "document", + name: "Example", + value: undefined, + description: undefined, + depth: 0, + }, + { + ref: "btn-1", + role: "button", + name: "Continue", + value: undefined, + description: undefined, + depth: 1, + }, + { + ref: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + description: undefined, + depth: 1, + }, + ]); + }); + + it("builds AI snapshots that preserve Chrome MCP uids as refs", () => { + const result = buildAiSnapshotFromChromeMcpSnapshot({ root: snapshot }); + + expect(result.snapshot).toContain('- button "Continue" [ref=btn-1]'); + expect(result.snapshot).toContain('- textbox "Email" [ref=txt-1] value="peter@example.com"'); + expect(result.refs).toEqual({ + "btn-1": { role: "button", name: "Continue" }, + "txt-1": { role: "textbox", name: "Email" }, + }); + expect(result.stats.refs).toBe(2); + }); +}); diff --git a/src/browser/chrome-mcp.snapshot.ts b/src/browser/chrome-mcp.snapshot.ts new file mode 100644 index 00000000000..e92709df6f2 --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.ts @@ -0,0 +1,246 @@ +import type { SnapshotAriaNode } from "./client.js"; +import { + getRoleSnapshotStats, + type RoleRefMap, + type RoleSnapshotOptions, +} from "./pw-role-snapshot.js"; + +export type ChromeMcpSnapshotNode = { + id?: string; + role?: string; + name?: string; + value?: string | number | boolean; + description?: string; + children?: ChromeMcpSnapshotNode[]; +}; + +const INTERACTIVE_ROLES = new Set([ + "button", + "checkbox", + "combobox", + "link", + "listbox", + "menuitem", + "menuitemcheckbox", + "menuitemradio", + "option", + "radio", + "searchbox", + "slider", + "spinbutton", + "switch", + "tab", + "textbox", + "treeitem", +]); + +const CONTENT_ROLES = new Set([ + "article", + "cell", + "columnheader", + "gridcell", + "heading", + "listitem", + "main", + "navigation", + "region", + "rowheader", +]); + +const STRUCTURAL_ROLES = new Set([ + "application", + "directory", + "document", + "generic", + "group", + "ignored", + "list", + "menu", + "menubar", + "none", + "presentation", + "row", + "rowgroup", + "tablist", + "table", + "toolbar", + "tree", + "treegrid", +]); + +function normalizeRole(node: ChromeMcpSnapshotNode): string { + const role = typeof node.role === "string" ? node.role.trim().toLowerCase() : ""; + return role || "generic"; +} + +function normalizeString(value: unknown): string | undefined { + if (typeof value === "string") { + const trimmed = value.trim(); + return trimmed || undefined; + } + if (typeof value === "number" || typeof value === "boolean") { + return String(value); + } + return undefined; +} + +function escapeQuoted(value: string): string { + return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"'); +} + +function shouldIncludeNode(params: { + role: string; + name?: string; + options?: RoleSnapshotOptions; +}): boolean { + if (params.options?.interactive && !INTERACTIVE_ROLES.has(params.role)) { + return false; + } + if (params.options?.compact && STRUCTURAL_ROLES.has(params.role) && !params.name) { + return false; + } + return true; +} + +function shouldCreateRef(role: string, name?: string): boolean { + return INTERACTIVE_ROLES.has(role) || (CONTENT_ROLES.has(role) && Boolean(name)); +} + +type DuplicateTracker = { + counts: Map; + keysByRef: Map; + duplicates: Set; +}; + +function createDuplicateTracker(): DuplicateTracker { + return { + counts: new Map(), + keysByRef: new Map(), + duplicates: new Set(), + }; +} + +function registerRef( + tracker: DuplicateTracker, + ref: string, + role: string, + name?: string, +): number | undefined { + const key = `${role}:${name ?? ""}`; + const count = tracker.counts.get(key) ?? 0; + tracker.counts.set(key, count + 1); + tracker.keysByRef.set(ref, key); + if (count > 0) { + tracker.duplicates.add(key); + return count; + } + return undefined; +} + +export function flattenChromeMcpSnapshotToAriaNodes( + root: ChromeMcpSnapshotNode, + limit = 500, +): SnapshotAriaNode[] { + const boundedLimit = Math.max(1, Math.min(2000, Math.floor(limit))); + const out: SnapshotAriaNode[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + if (out.length >= boundedLimit) { + return; + } + const ref = normalizeString(node.id); + if (ref) { + out.push({ + ref, + role: normalizeRole(node), + name: normalizeString(node.name) ?? "", + value: normalizeString(node.value), + description: normalizeString(node.description), + depth, + }); + } + for (const child of node.children ?? []) { + visit(child, depth + 1); + if (out.length >= boundedLimit) { + return; + } + } + }; + + visit(root, 0); + return out; +} + +export function buildAiSnapshotFromChromeMcpSnapshot(params: { + root: ChromeMcpSnapshotNode; + options?: RoleSnapshotOptions; + maxChars?: number; +}): { + snapshot: string; + truncated?: boolean; + refs: RoleRefMap; + stats: { lines: number; chars: number; refs: number; interactive: number }; +} { + const refs: RoleRefMap = {}; + const tracker = createDuplicateTracker(); + const lines: string[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + const role = normalizeRole(node); + const name = normalizeString(node.name); + const value = normalizeString(node.value); + const description = normalizeString(node.description); + const maxDepth = params.options?.maxDepth; + if (maxDepth !== undefined && depth > maxDepth) { + return; + } + + const includeNode = shouldIncludeNode({ role, name, options: params.options }); + if (includeNode) { + let line = `${" ".repeat(depth)}- ${role}`; + if (name) { + line += ` "${escapeQuoted(name)}"`; + } + const ref = normalizeString(node.id); + if (ref && shouldCreateRef(role, name)) { + const nth = registerRef(tracker, ref, role, name); + refs[ref] = nth === undefined ? { role, name } : { role, name, nth }; + line += ` [ref=${ref}]`; + } + if (value) { + line += ` value="${escapeQuoted(value)}"`; + } + if (description) { + line += ` description="${escapeQuoted(description)}"`; + } + lines.push(line); + } + + for (const child of node.children ?? []) { + visit(child, depth + 1); + } + }; + + visit(params.root, 0); + + for (const [ref, data] of Object.entries(refs)) { + const key = tracker.keysByRef.get(ref); + if (key && !tracker.duplicates.has(key)) { + delete data.nth; + } + } + + let snapshot = lines.join("\n"); + let truncated = false; + const maxChars = + typeof params.maxChars === "number" && Number.isFinite(params.maxChars) && params.maxChars > 0 + ? Math.floor(params.maxChars) + : undefined; + if (maxChars && snapshot.length > maxChars) { + snapshot = `${snapshot.slice(0, maxChars)}\n\n[...TRUNCATED - page too large]`; + truncated = true; + } + + const stats = getRoleSnapshotStats(snapshot, refs); + return truncated ? { snapshot, truncated, refs, stats } : { snapshot, refs, stats }; +} diff --git a/src/browser/chrome-mcp.test.ts b/src/browser/chrome-mcp.test.ts new file mode 100644 index 00000000000..82149b67792 --- /dev/null +++ b/src/browser/chrome-mcp.test.ts @@ -0,0 +1,100 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + listChromeMcpTabs, + openChromeMcpTab, + resetChromeMcpSessionsForTest, + setChromeMcpSessionFactoryForTest, +} from "./chrome-mcp.js"; + +type ToolCall = { + name: string; + arguments?: Record; +}; + +function createFakeSession() { + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "list_pages") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session [selected]", + "2: https://github.com/openclaw/openclaw/pull/45318", + ].join("\n"), + }, + ], + }; + } + if (name === "new_page") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + "2: https://github.com/openclaw/openclaw/pull/45318", + "3: https://example.com/ [selected]", + ].join("\n"), + }, + ], + }; + } + throw new Error(`unexpected tool ${name}`); + }); + + return { + client: { + callTool, + listTools: vi.fn().mockResolvedValue({ tools: [{ name: "list_pages" }] }), + close: vi.fn().mockResolvedValue(undefined), + connect: vi.fn().mockResolvedValue(undefined), + }, + transport: { + pid: 123, + }, + ready: Promise.resolve(), + }; +} + +describe("chrome MCP page parsing", () => { + beforeEach(async () => { + await resetChromeMcpSessionsForTest(); + }); + + it("parses list_pages text responses when structuredContent is missing", async () => { + setChromeMcpSessionFactoryForTest(async () => createFakeSession()); + + const tabs = await listChromeMcpTabs("chrome-live"); + + expect(tabs).toEqual([ + { + targetId: "1", + title: "", + url: "https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + type: "page", + }, + { + targetId: "2", + title: "", + url: "https://github.com/openclaw/openclaw/pull/45318", + type: "page", + }, + ]); + }); + + it("parses new_page text responses and returns the created tab", async () => { + setChromeMcpSessionFactoryForTest(async () => createFakeSession()); + + const tab = await openChromeMcpTab("chrome-live", "https://example.com/"); + + expect(tab).toEqual({ + targetId: "3", + title: "", + url: "https://example.com/", + type: "page", + }); + }); +}); diff --git a/src/browser/chrome-mcp.ts b/src/browser/chrome-mcp.ts new file mode 100644 index 00000000000..7719a2338e3 --- /dev/null +++ b/src/browser/chrome-mcp.ts @@ -0,0 +1,488 @@ +import { randomUUID } from "node:crypto"; +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; +import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"; +import type { ChromeMcpSnapshotNode } from "./chrome-mcp.snapshot.js"; +import type { BrowserTab } from "./client.js"; +import { BrowserProfileUnavailableError, BrowserTabNotFoundError } from "./errors.js"; + +type ChromeMcpStructuredPage = { + id: number; + url?: string; + selected?: boolean; +}; + +type ChromeMcpToolResult = { + structuredContent?: Record; + content?: Array>; + isError?: boolean; +}; + +type ChromeMcpSession = { + client: Client; + transport: StdioClientTransport; + ready: Promise; +}; + +type ChromeMcpSessionFactory = (profileName: string) => Promise; + +const DEFAULT_CHROME_MCP_COMMAND = "npx"; +const DEFAULT_CHROME_MCP_ARGS = [ + "-y", + "chrome-devtools-mcp@latest", + "--autoConnect", + "--experimental-page-id-routing", +]; + +const sessions = new Map(); +let sessionFactory: ChromeMcpSessionFactory | null = null; + +function asRecord(value: unknown): Record | null { + return value && typeof value === "object" && !Array.isArray(value) + ? (value as Record) + : null; +} + +function asPages(value: unknown): ChromeMcpStructuredPage[] { + if (!Array.isArray(value)) { + return []; + } + const out: ChromeMcpStructuredPage[] = []; + for (const entry of value) { + const record = asRecord(entry); + if (!record || typeof record.id !== "number") { + continue; + } + out.push({ + id: record.id, + url: typeof record.url === "string" ? record.url : undefined, + selected: record.selected === true, + }); + } + return out; +} + +function parsePageId(targetId: string): number { + const parsed = Number.parseInt(targetId.trim(), 10); + if (!Number.isFinite(parsed)) { + throw new BrowserTabNotFoundError(); + } + return parsed; +} + +function toBrowserTabs(pages: ChromeMcpStructuredPage[]): BrowserTab[] { + return pages.map((page) => ({ + targetId: String(page.id), + title: "", + url: page.url ?? "", + type: "page", + })); +} + +function extractStructuredContent(result: ChromeMcpToolResult): Record { + return asRecord(result.structuredContent) ?? {}; +} + +function extractTextContent(result: ChromeMcpToolResult): string[] { + const content = Array.isArray(result.content) ? result.content : []; + return content + .map((entry) => { + const record = asRecord(entry); + return record && typeof record.text === "string" ? record.text : ""; + }) + .filter(Boolean); +} + +function extractTextPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const pages: ChromeMcpStructuredPage[] = []; + for (const block of extractTextContent(result)) { + for (const line of block.split(/\r?\n/)) { + const match = line.match(/^\s*(\d+):\s+(.+?)(?:\s+\[(selected)\])?\s*$/i); + if (!match) { + continue; + } + pages.push({ + id: Number.parseInt(match[1] ?? "", 10), + url: match[2]?.trim() || undefined, + selected: Boolean(match[3]), + }); + } + } + return pages; +} + +function extractStructuredPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const structured = asPages(extractStructuredContent(result).pages); + return structured.length > 0 ? structured : extractTextPages(result); +} + +function extractSnapshot(result: ChromeMcpToolResult): ChromeMcpSnapshotNode { + const structured = extractStructuredContent(result); + const snapshot = asRecord(structured.snapshot); + if (!snapshot) { + throw new Error("Chrome MCP snapshot response was missing structured snapshot data."); + } + return snapshot as unknown as ChromeMcpSnapshotNode; +} + +function extractJsonBlock(text: string): unknown { + const match = text.match(/```json\s*([\s\S]*?)\s*```/i); + const raw = match?.[1]?.trim() || text.trim(); + return raw ? JSON.parse(raw) : null; +} + +async function createRealSession(profileName: string): Promise { + const transport = new StdioClientTransport({ + command: DEFAULT_CHROME_MCP_COMMAND, + args: DEFAULT_CHROME_MCP_ARGS, + stderr: "pipe", + }); + const client = new Client( + { + name: "openclaw-browser", + version: "0.0.0", + }, + {}, + ); + + const ready = (async () => { + try { + await client.connect(transport); + const tools = await client.listTools(); + if (!tools.tools.some((tool) => tool.name === "list_pages")) { + throw new Error("Chrome MCP server did not expose the expected navigation tools."); + } + } catch (err) { + await client.close().catch(() => {}); + throw new BrowserProfileUnavailableError( + `Chrome MCP existing-session attach failed for profile "${profileName}". ` + + `Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` + + `Details: ${String(err)}`, + ); + } + })(); + + return { + client, + transport, + ready, + }; +} + +async function getSession(profileName: string): Promise { + let session = sessions.get(profileName); + if (session && session.transport.pid === null) { + sessions.delete(profileName); + session = undefined; + } + if (!session) { + session = await (sessionFactory ?? createRealSession)(profileName); + sessions.set(profileName, session); + } + try { + await session.ready; + return session; + } catch (err) { + const current = sessions.get(profileName); + if (current?.transport === session.transport) { + sessions.delete(profileName); + } + throw err; + } +} + +async function callTool( + profileName: string, + name: string, + args: Record = {}, +): Promise { + const session = await getSession(profileName); + try { + return (await session.client.callTool({ + name, + arguments: args, + })) as ChromeMcpToolResult; + } catch (err) { + sessions.delete(profileName); + await session.client.close().catch(() => {}); + throw err; + } +} + +async function withTempFile(fn: (filePath: string) => Promise): Promise { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-chrome-mcp-")); + const filePath = path.join(dir, randomUUID()); + try { + return await fn(filePath); + } finally { + await fs.rm(dir, { recursive: true, force: true }).catch(() => {}); + } +} + +async function findPageById(profileName: string, pageId: number): Promise { + const pages = await listChromeMcpPages(profileName); + const page = pages.find((entry) => entry.id === pageId); + if (!page) { + throw new BrowserTabNotFoundError(); + } + return page; +} + +export async function ensureChromeMcpAvailable(profileName: string): Promise { + await getSession(profileName); +} + +export function getChromeMcpPid(profileName: string): number | null { + return sessions.get(profileName)?.transport.pid ?? null; +} + +export async function closeChromeMcpSession(profileName: string): Promise { + const session = sessions.get(profileName); + if (!session) { + return false; + } + sessions.delete(profileName); + await session.client.close().catch(() => {}); + return true; +} + +export async function stopAllChromeMcpSessions(): Promise { + const names = [...sessions.keys()]; + for (const name of names) { + await closeChromeMcpSession(name).catch(() => {}); + } +} + +export async function listChromeMcpPages(profileName: string): Promise { + const result = await callTool(profileName, "list_pages"); + return extractStructuredPages(result); +} + +export async function listChromeMcpTabs(profileName: string): Promise { + return toBrowserTabs(await listChromeMcpPages(profileName)); +} + +export async function openChromeMcpTab(profileName: string, url: string): Promise { + const result = await callTool(profileName, "new_page", { url }); + const pages = extractStructuredPages(result); + const chosen = pages.find((page) => page.selected) ?? pages.at(-1); + if (!chosen) { + throw new Error("Chrome MCP did not return the created page."); + } + return { + targetId: String(chosen.id), + title: "", + url: chosen.url ?? url, + type: "page", + }; +} + +export async function focusChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "select_page", { + pageId: parsePageId(targetId), + bringToFront: true, + }); +} + +export async function closeChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "close_page", { pageId: parsePageId(targetId) }); +} + +export async function navigateChromeMcpPage(params: { + profileName: string; + targetId: string; + url: string; + timeoutMs?: number; +}): Promise<{ url: string }> { + await callTool(params.profileName, "navigate_page", { + pageId: parsePageId(params.targetId), + type: "url", + url: params.url, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); + const page = await findPageById(params.profileName, parsePageId(params.targetId)); + return { url: page.url ?? params.url }; +} + +export async function takeChromeMcpSnapshot(params: { + profileName: string; + targetId: string; +}): Promise { + const result = await callTool(params.profileName, "take_snapshot", { + pageId: parsePageId(params.targetId), + }); + return extractSnapshot(result); +} + +export async function takeChromeMcpScreenshot(params: { + profileName: string; + targetId: string; + uid?: string; + fullPage?: boolean; + format?: "png" | "jpeg"; +}): Promise { + return await withTempFile(async (filePath) => { + await callTool(params.profileName, "take_screenshot", { + pageId: parsePageId(params.targetId), + filePath, + format: params.format ?? "png", + ...(params.uid ? { uid: params.uid } : {}), + ...(params.fullPage ? { fullPage: true } : {}), + }); + return await fs.readFile(filePath); + }); +} + +export async function clickChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + doubleClick?: boolean; +}): Promise { + await callTool(params.profileName, "click", { + pageId: parsePageId(params.targetId), + uid: params.uid, + ...(params.doubleClick ? { dblClick: true } : {}), + }); +} + +export async function fillChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + value: string; +}): Promise { + await callTool(params.profileName, "fill", { + pageId: parsePageId(params.targetId), + uid: params.uid, + value: params.value, + }); +} + +export async function fillChromeMcpForm(params: { + profileName: string; + targetId: string; + elements: Array<{ uid: string; value: string }>; +}): Promise { + await callTool(params.profileName, "fill_form", { + pageId: parsePageId(params.targetId), + elements: params.elements, + }); +} + +export async function hoverChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; +}): Promise { + await callTool(params.profileName, "hover", { + pageId: parsePageId(params.targetId), + uid: params.uid, + }); +} + +export async function dragChromeMcpElement(params: { + profileName: string; + targetId: string; + fromUid: string; + toUid: string; +}): Promise { + await callTool(params.profileName, "drag", { + pageId: parsePageId(params.targetId), + from_uid: params.fromUid, + to_uid: params.toUid, + }); +} + +export async function uploadChromeMcpFile(params: { + profileName: string; + targetId: string; + uid: string; + filePath: string; +}): Promise { + await callTool(params.profileName, "upload_file", { + pageId: parsePageId(params.targetId), + uid: params.uid, + filePath: params.filePath, + }); +} + +export async function pressChromeMcpKey(params: { + profileName: string; + targetId: string; + key: string; +}): Promise { + await callTool(params.profileName, "press_key", { + pageId: parsePageId(params.targetId), + key: params.key, + }); +} + +export async function resizeChromeMcpPage(params: { + profileName: string; + targetId: string; + width: number; + height: number; +}): Promise { + await callTool(params.profileName, "resize_page", { + pageId: parsePageId(params.targetId), + width: params.width, + height: params.height, + }); +} + +export async function handleChromeMcpDialog(params: { + profileName: string; + targetId: string; + action: "accept" | "dismiss"; + promptText?: string; +}): Promise { + await callTool(params.profileName, "handle_dialog", { + pageId: parsePageId(params.targetId), + action: params.action, + ...(params.promptText ? { promptText: params.promptText } : {}), + }); +} + +export async function evaluateChromeMcpScript(params: { + profileName: string; + targetId: string; + fn: string; + args?: string[]; +}): Promise { + const result = await callTool(params.profileName, "evaluate_script", { + pageId: parsePageId(params.targetId), + function: params.fn, + ...(params.args?.length ? { args: params.args } : {}), + }); + const message = extractStructuredContent(result).message; + const text = typeof message === "string" ? message : ""; + if (!text.trim()) { + return null; + } + return extractJsonBlock(text); +} + +export async function waitForChromeMcpText(params: { + profileName: string; + targetId: string; + text: string[]; + timeoutMs?: number; +}): Promise { + await callTool(params.profileName, "wait_for", { + pageId: parsePageId(params.targetId), + text: params.text, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); +} + +export function setChromeMcpSessionFactoryForTest(factory: ChromeMcpSessionFactory | null): void { + sessionFactory = factory; +} + +export async function resetChromeMcpSessionsForTest(): Promise { + sessionFactory = null; + await stopAllChromeMcpSessions(); +} diff --git a/src/browser/client.ts b/src/browser/client.ts index 953c9efcd11..dc418cf3b4a 100644 --- a/src/browser/client.ts +++ b/src/browser/client.ts @@ -3,6 +3,7 @@ import { fetchBrowserJson } from "./client-fetch.js"; export type BrowserStatus = { enabled: boolean; profile?: string; + driver?: "openclaw" | "extension" | "existing-session"; running: boolean; cdpReady?: boolean; cdpHttp?: boolean; @@ -26,6 +27,7 @@ export type ProfileStatus = { cdpPort: number; cdpUrl: string; color: string; + driver: "openclaw" | "extension" | "existing-session"; running: boolean; tabCount: number; isDefault: boolean; @@ -165,7 +167,7 @@ export async function browserCreateProfile( name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }, ): Promise { return await fetchBrowserJson( diff --git a/src/browser/config.ts b/src/browser/config.ts index 6d24a07a287..529ee791c40 100644 --- a/src/browser/config.ts +++ b/src/browser/config.ts @@ -46,7 +46,7 @@ export type ResolvedBrowserProfile = { cdpHost: string; cdpIsLoopback: boolean; color: string; - driver: "openclaw" | "extension"; + driver: "openclaw" | "extension" | "existing-session"; attachOnly: boolean; }; @@ -335,7 +335,12 @@ export function resolveProfile( let cdpHost = resolved.cdpHost; let cdpPort = profile.cdpPort ?? 0; let cdpUrl = ""; - const driver = profile.driver === "extension" ? "extension" : "openclaw"; + const driver = + profile.driver === "extension" + ? "extension" + : profile.driver === "existing-session" + ? "existing-session" + : "openclaw"; if (rawProfileUrl) { const parsed = parseHttpUrl(rawProfileUrl, `browser.profiles.${profileName}.cdpUrl`); @@ -356,7 +361,7 @@ export function resolveProfile( cdpIsLoopback: isLoopbackHost(cdpHost), color: profile.color, driver, - attachOnly: profile.attachOnly ?? resolved.attachOnly, + attachOnly: driver === "existing-session" ? true : (profile.attachOnly ?? resolved.attachOnly), }; } diff --git a/src/browser/profile-capabilities.ts b/src/browser/profile-capabilities.ts index 07a70ba00c4..2bcf4f8fe9e 100644 --- a/src/browser/profile-capabilities.ts +++ b/src/browser/profile-capabilities.ts @@ -1,6 +1,10 @@ import type { ResolvedBrowserProfile } from "./config.js"; -export type BrowserProfileMode = "local-managed" | "local-extension-relay" | "remote-cdp"; +export type BrowserProfileMode = + | "local-managed" + | "local-extension-relay" + | "local-existing-session" + | "remote-cdp"; export type BrowserProfileCapabilities = { mode: BrowserProfileMode; @@ -31,6 +35,20 @@ export function getBrowserProfileCapabilities( }; } + if (profile.driver === "existing-session") { + return { + mode: "local-existing-session", + isRemote: false, + requiresRelay: false, + requiresAttachedTab: false, + usesPersistentPlaywright: false, + supportsPerTabWs: false, + supportsJsonTabEndpoints: false, + supportsReset: false, + supportsManagedTabLimit: false, + }; + } + if (!profile.cdpIsLoopback) { return { mode: "remote-cdp", @@ -75,6 +93,9 @@ export function resolveDefaultSnapshotFormat(params: { if (capabilities.mode === "local-extension-relay") { return "aria"; } + if (capabilities.mode === "local-existing-session") { + return "ai"; + } return params.hasPlaywright ? "ai" : "aria"; } diff --git a/src/browser/profiles-service.test.ts b/src/browser/profiles-service.test.ts index 3dc714d33f3..f70e23ddb67 100644 --- a/src/browser/profiles-service.test.ts +++ b/src/browser/profiles-service.test.ts @@ -1,6 +1,6 @@ import fs from "node:fs"; import path from "node:path"; -import { describe, expect, it, vi } from "vitest"; +import { beforeEach, describe, expect, it, vi } from "vitest"; import { resolveBrowserConfig } from "./config.js"; import { createBrowserProfilesService } from "./profiles-service.js"; import type { BrowserRouteContext, BrowserServerState } from "./server-context.js"; @@ -57,6 +57,10 @@ async function createWorkProfileWithConfig(params: { } describe("BrowserProfilesService", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + it("allocates next local port for new profiles", async () => { const { result, state } = await createWorkProfileWithConfig({ resolved: resolveBrowserConfig({}), @@ -163,6 +167,56 @@ describe("BrowserProfilesService", () => { ).rejects.toThrow(/requires an explicit loopback cdpUrl/i); }); + it("creates existing-session profiles as attach-only local entries", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx, state } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + const result = await service.createProfile({ + name: "chrome-live", + driver: "existing-session", + }); + + expect(result.cdpPort).toBe(18801); + expect(result.isRemote).toBe(false); + expect(state.resolved.profiles["chrome-live"]).toEqual({ + cdpPort: 18801, + driver: "existing-session", + attachOnly: true, + color: expect.any(String), + }); + expect(writeConfigFile).toHaveBeenCalledWith( + expect.objectContaining({ + browser: expect.objectContaining({ + profiles: expect.objectContaining({ + "chrome-live": expect.objectContaining({ + cdpPort: 18801, + driver: "existing-session", + attachOnly: true, + }), + }), + }), + }), + ); + }); + + it("rejects driver=existing-session when cdpUrl is provided", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + + await expect( + service.createProfile({ + name: "chrome-live", + driver: "existing-session", + cdpUrl: "http://127.0.0.1:9222", + }), + ).rejects.toThrow(/does not accept cdpUrl/i); + }); + it("deletes remote profiles without stopping or removing local data", async () => { const resolved = resolveBrowserConfig({ profiles: { @@ -218,4 +272,40 @@ describe("BrowserProfilesService", () => { expect(result.deleted).toBe(true); expect(movePathToTrash).toHaveBeenCalledWith(path.dirname(userDataDir)); }); + + it("deletes existing-session profiles without touching local browser data", async () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }); + const { ctx } = createCtx(resolved); + + vi.mocked(loadConfig).mockReturnValue({ + browser: { + defaultProfile: "openclaw", + profiles: { + openclaw: { cdpPort: 18800, color: "#FF4500" }, + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }, + }); + + const service = createBrowserProfilesService(ctx); + const result = await service.deleteProfile("chrome-live"); + + expect(result.deleted).toBe(false); + expect(ctx.forProfile).not.toHaveBeenCalled(); + expect(movePathToTrash).not.toHaveBeenCalled(); + }); }); diff --git a/src/browser/profiles-service.ts b/src/browser/profiles-service.ts index 962c6408522..936a55c1ffa 100644 --- a/src/browser/profiles-service.ts +++ b/src/browser/profiles-service.ts @@ -27,7 +27,7 @@ export type CreateProfileParams = { name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }; export type CreateProfileResult = { @@ -79,7 +79,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { const createProfile = async (params: CreateProfileParams): Promise => { const name = params.name.trim(); const rawCdpUrl = params.cdpUrl?.trim() || undefined; - const driver = params.driver === "extension" ? "extension" : undefined; + const driver = + params.driver === "extension" + ? "extension" + : params.driver === "existing-session" + ? "existing-session" + : undefined; if (!isValidProfileName(name)) { throw new BrowserValidationError( @@ -118,6 +123,11 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { ); } } + if (driver === "existing-session") { + throw new BrowserValidationError( + "driver=existing-session does not accept cdpUrl; it attaches via the Chrome MCP auto-connect flow", + ); + } profileConfig = { cdpUrl: parsed.normalized, ...(driver ? { driver } : {}), @@ -136,6 +146,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { profileConfig = { cdpPort, ...(driver ? { driver } : {}), + ...(driver === "existing-session" ? { attachOnly: true } : {}), color: profileColor, }; } @@ -195,7 +206,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { const state = ctx.state(); const resolved = resolveProfile(state.resolved, name); - if (resolved?.cdpIsLoopback) { + if (resolved?.cdpIsLoopback && resolved.driver === "openclaw") { try { await ctx.forProfile(name).stopRunningBrowser(); } catch { diff --git a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts index 43f1a6c7e09..8f64b2bf575 100644 --- a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts +++ b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts @@ -12,40 +12,49 @@ afterEach(async () => { await closePlaywrightBrowserConnection().catch(() => {}); }); +function createExtensionFallbackBrowserHarness(options?: { + urls?: string[]; + newCDPSessionError?: string; +}) { + const pageOn = vi.fn(); + const contextOn = vi.fn(); + const browserOn = vi.fn(); + const browserClose = vi.fn(async () => {}); + const newCDPSession = vi.fn(async () => { + throw new Error(options?.newCDPSessionError ?? "Not allowed"); + }); + + const context = { + pages: () => [], + on: contextOn, + newCDPSession, + } as unknown as import("playwright-core").BrowserContext; + + const pages = (options?.urls ?? [undefined]).map( + (url) => + ({ + on: pageOn, + context: () => context, + ...(url ? { url: () => url } : {}), + }) as unknown as import("playwright-core").Page, + ); + (context as unknown as { pages: () => unknown[] }).pages = () => pages; + + const browser = { + contexts: () => [context], + on: browserOn, + close: browserClose, + } as unknown as import("playwright-core").Browser; + + connectOverCdpSpy.mockResolvedValue(browser); + getChromeWebSocketUrlSpy.mockResolvedValue(null); + return { browserClose, newCDPSession, pages }; +} + describe("pw-session getPageForTargetId", () => { it("falls back to the only page when CDP session attachment is blocked (extension relays)", async () => { - connectOverCdpSpy.mockClear(); - getChromeWebSocketUrlSpy.mockClear(); - - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const page = { - on: pageOn, - context: () => context, - } as unknown as import("playwright-core").Page; - - // Fill pages() after page exists. - (context as unknown as { pages: () => unknown[] }).pages = () => [page]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const { browserClose, pages } = createExtensionFallbackBrowserHarness(); + const [page] = pages; const resolved = await getPageForTargetId({ cdpUrl: "http://127.0.0.1:18792", @@ -58,40 +67,9 @@ describe("pw-session getPageForTargetId", () => { }); it("uses the shared HTTP-base normalization when falling back to /json/list for direct WebSocket CDP URLs", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + }).pages; const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue({ ok: true, @@ -117,41 +95,11 @@ describe("pw-session getPageForTargetId", () => { }); it("resolves extension-relay pages from /json/list without probing page CDP sessions first", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - const newCDPSession = vi.fn(async () => { - throw new Error("Target.attachToBrowserTarget: Not allowed"); + const { newCDPSession, pages } = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + newCDPSessionError: "Target.attachToBrowserTarget: Not allowed", }); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession, - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = pages; const fetchSpy = vi.spyOn(globalThis, "fetch"); fetchSpy diff --git a/src/browser/routes/agent.act.download.ts b/src/browser/routes/agent.act.download.ts index d08287fea59..9ed04469c26 100644 --- a/src/browser/routes/agent.act.download.ts +++ b/src/browser/routes/agent.act.download.ts @@ -1,5 +1,10 @@ import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { ensureOutputRootDir, resolveWritableOutputPathOrRespond } from "./output-paths.js"; import { DEFAULT_DOWNLOAD_DIR } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; @@ -23,13 +28,23 @@ export function registerBrowserAgentActDownloadRoutes( const out = toStringOrEmpty(body.path) || ""; const timeoutMs = toNumber(body.timeoutMs); - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "wait for download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "download waiting is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "wait for download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); let downloadPath: string | undefined; if (out.trim()) { @@ -67,13 +82,23 @@ export function registerBrowserAgentActDownloadRoutes( return jsonError(res, 400, "path is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "downloads are not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); const downloadPath = await resolveWritableOutputPathOrRespond({ res, diff --git a/src/browser/routes/agent.act.hooks.ts b/src/browser/routes/agent.act.hooks.ts index 56d97bb03d3..bb1f03b7a7c 100644 --- a/src/browser/routes/agent.act.hooks.ts +++ b/src/browser/routes/agent.act.hooks.ts @@ -1,5 +1,11 @@ +import { evaluateChromeMcpScript, uploadChromeMcpFile } from "../chrome-mcp.js"; import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; @@ -20,13 +26,12 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "paths are required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "file chooser hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const uploadPathsResult = await resolveExistingPathsWithinRoot({ rootDir: DEFAULT_UPLOAD_DIR, requestedPaths: paths, @@ -38,6 +43,39 @@ export function registerBrowserAgentActHookRoutes( } const resolvedPaths = uploadPathsResult.paths; + if (profileCtx.profile.driver === "existing-session") { + if (element) { + return jsonError( + res, + 501, + "existing-session file uploads do not support element selectors; use ref/inputRef.", + ); + } + if (resolvedPaths.length !== 1) { + return jsonError( + res, + 501, + "existing-session file uploads currently support one file at a time.", + ); + } + const uid = inputRef || ref; + if (!uid) { + return jsonError(res, 501, "existing-session file uploads require ref or inputRef."); + } + await uploadChromeMcpFile({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid, + filePath: resolvedPaths[0] ?? "", + }); + return res.json({ ok: true }); + } + + const pw = await requirePwAi(res, "file chooser hook"); + if (!pw) { + return; + } + if (inputRef || element) { if (ref) { return jsonError(res, 400, "ref cannot be combined with inputRef/element"); @@ -79,13 +117,69 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "accept is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "dialog hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session dialog handling does not support timeoutMs.", + ); + } + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + fn: `() => { + const state = (window.__openclawDialogHook ??= {}); + if (!state.originals) { + state.originals = { + alert: window.alert.bind(window), + confirm: window.confirm.bind(window), + prompt: window.prompt.bind(window), + }; + } + const originals = state.originals; + const restore = () => { + window.alert = originals.alert; + window.confirm = originals.confirm; + window.prompt = originals.prompt; + delete window.__openclawDialogHook; + }; + window.alert = (...args) => { + try { + return undefined; + } finally { + restore(); + } + }; + window.confirm = (...args) => { + try { + return ${accept ? "true" : "false"}; + } finally { + restore(); + } + }; + window.prompt = (...args) => { + try { + return ${accept ? JSON.stringify(promptText ?? "") : "null"}; + } finally { + restore(); + } + }; + return true; + }`, + }); + return res.json({ ok: true }); + } + const pw = await requirePwAi(res, "dialog hook"); + if (!pw) { + return; + } await pw.armDialogViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.act.ts b/src/browser/routes/agent.act.ts index 2ae6073c7cf..8928a8a7d06 100644 --- a/src/browser/routes/agent.act.ts +++ b/src/browser/routes/agent.act.ts @@ -1,3 +1,14 @@ +import { + clickChromeMcpElement, + closeChromeMcpTab, + dragChromeMcpElement, + evaluateChromeMcpScript, + fillChromeMcpElement, + fillChromeMcpForm, + hoverChromeMcpElement, + pressChromeMcpKey, + resizeChromeMcpPage, +} from "../chrome-mcp.js"; import type { BrowserFormField } from "../client-actions-core.js"; import { normalizeBrowserFormField } from "../form-fields.js"; import type { BrowserRouteContext } from "../server-context.js"; @@ -11,13 +22,88 @@ import { } from "./agent.act.shared.js"; import { readBody, + requirePwAi, resolveTargetIdFromBody, - withPlaywrightRouteContext, + withRouteTabContext, SELECTOR_UNSUPPORTED_MESSAGE, } from "./agent.shared.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +function buildExistingSessionWaitPredicate(params: { + text?: string; + textGone?: string; + selector?: string; + url?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; +}): string | null { + const checks: string[] = []; + if (params.text) { + checks.push(`Boolean(document.body?.innerText?.includes(${JSON.stringify(params.text)}))`); + } + if (params.textGone) { + checks.push(`!document.body?.innerText?.includes(${JSON.stringify(params.textGone)})`); + } + if (params.selector) { + checks.push(`Boolean(document.querySelector(${JSON.stringify(params.selector)}))`); + } + if (params.url) { + checks.push(`window.location.href === ${JSON.stringify(params.url)}`); + } + if (params.loadState === "domcontentloaded") { + checks.push(`document.readyState === "interactive" || document.readyState === "complete"`); + } else if (params.loadState === "load" || params.loadState === "networkidle") { + checks.push(`document.readyState === "complete"`); + } + if (params.fn) { + checks.push(`Boolean(await (${params.fn})())`); + } + if (checks.length === 0) { + return null; + } + return checks.length === 1 ? checks[0] : checks.map((check) => `(${check})`).join(" && "); +} + +async function waitForExistingSessionCondition(params: { + profileName: string; + targetId: string; + timeMs?: number; + text?: string; + textGone?: string; + selector?: string; + url?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; + timeoutMs?: number; +}): Promise { + if (params.timeMs && params.timeMs > 0) { + await sleep(params.timeMs); + } + const predicate = buildExistingSessionWaitPredicate(params); + if (!predicate) { + return; + } + const timeoutMs = Math.max(250, params.timeoutMs ?? 10_000); + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + const ready = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `async () => ${predicate}`, + }); + if (ready) { + return; + } + await sleep(250); + } + throw new Error("Timed out waiting for condition"); +} + export function registerBrowserAgentActRoutes( app: BrowserRouteRegistrar, ctx: BrowserRouteContext, @@ -34,14 +120,15 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, SELECTOR_UNSUPPORTED_MESSAGE); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: `act:${kind}`, - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const evaluateEnabled = ctx.state().resolved.evaluateEnabled; + const isExistingSession = profileCtx.profile.driver === "existing-session"; + const profileName = profileCtx.profile.name; switch (kind) { case "click": { @@ -63,6 +150,26 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, parsedModifiers.error); } const modifiers = parsedModifiers.modifiers; + if (isExistingSession) { + if ((button && button !== "left") || (modifiers && modifiers.length > 0)) { + return jsonError( + res, + 501, + "existing-session click currently supports left-click only (no button overrides/modifiers).", + ); + } + await clickChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref, + doubleClick, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const clickRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -93,6 +200,33 @@ export function registerBrowserAgentActRoutes( const submit = toBoolean(body.submit) ?? false; const slowly = toBoolean(body.slowly) ?? false; const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (slowly) { + return jsonError( + res, + 501, + "existing-session type does not support slowly=true; use fill/press instead.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref, + value: text, + }); + if (submit) { + await pressChromeMcpKey({ + profileName, + targetId: tab.targetId, + key: "Enter", + }); + } + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const typeRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -113,6 +247,17 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "key is required"); } const delayMs = toNumber(body.delayMs); + if (isExistingSession) { + if (delayMs) { + return jsonError(res, 501, "existing-session press does not support delayMs."); + } + await pressChromeMcpKey({ profileName, targetId: tab.targetId, key }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.pressKeyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -127,6 +272,21 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session hover does not support timeoutMs overrides.", + ); + } + await hoverChromeMcpElement({ profileName, targetId: tab.targetId, uid: ref }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.hoverViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -141,6 +301,26 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session scrollIntoView does not support timeoutMs overrides.", + ); + } + await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn: `(el) => { el.scrollIntoView({ block: "center", inline: "center" }); return true; }`, + args: [ref], + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const scrollRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -159,6 +339,26 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "startRef and endRef are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session drag does not support timeoutMs overrides.", + ); + } + await dragChromeMcpElement({ + profileName, + targetId: tab.targetId, + fromUid: startRef, + toUid: endRef, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.dragViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -175,6 +375,33 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref and values are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (values.length !== 1) { + return jsonError( + res, + 501, + "existing-session select currently supports a single value only.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session select does not support timeoutMs overrides.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref, + value: values[0] ?? "", + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.selectOptionViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -198,6 +425,28 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "fields are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session fill does not support timeoutMs overrides.", + ); + } + await fillChromeMcpForm({ + profileName, + targetId: tab.targetId, + elements: fields.map((field) => ({ + uid: field.ref, + value: String(field.value ?? ""), + })), + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.fillFormViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -212,6 +461,19 @@ export function registerBrowserAgentActRoutes( if (!width || !height) { return jsonError(res, 400, "width and height are required"); } + if (isExistingSession) { + await resizeChromeMcpPage({ + profileName, + targetId: tab.targetId, + width, + height, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.resizeViewportViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -260,6 +522,25 @@ export function registerBrowserAgentActRoutes( "wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn", ); } + if (isExistingSession) { + await waitForExistingSessionCondition({ + profileName, + targetId: tab.targetId, + timeMs, + text, + textGone, + selector, + url, + loadState, + fn, + timeoutMs, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.waitForViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -291,6 +572,31 @@ export function registerBrowserAgentActRoutes( } const ref = toStringOrEmpty(body.ref) || undefined; const evalTimeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (evalTimeoutMs !== undefined) { + return jsonError( + res, + 501, + "existing-session evaluate does not support timeoutMs overrides.", + ); + } + const result = await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn, + args: ref ? [ref] : undefined, + }); + return res.json({ + ok: true, + targetId: tab.targetId, + url: tab.url, + result, + }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const evalRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -310,6 +616,14 @@ export function registerBrowserAgentActRoutes( }); } case "close": { + if (isExistingSession) { + await closeChromeMcpTab(profileName, tab.targetId); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.closePageViaPlaywright({ cdpUrl, targetId: tab.targetId }); return res.json({ ok: true, targetId: tab.targetId }); } @@ -334,13 +648,23 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "response body", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "response body is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "response body"); + if (!pw) { + return; + } const result = await pw.responseBodyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -361,13 +685,39 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "highlight", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + args: [ref], + fn: `(el) => { + if (!(el instanceof Element)) { + return false; + } + el.scrollIntoView({ block: "center", inline: "center" }); + const previousOutline = el.style.outline; + const previousOffset = el.style.outlineOffset; + el.style.outline = "3px solid #FF4500"; + el.style.outlineOffset = "2px"; + setTimeout(() => { + el.style.outline = previousOutline; + el.style.outlineOffset = previousOffset; + }, 2000); + return true; + }`, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, "highlight"); + if (!pw) { + return; + } await pw.highlightViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.snapshot.ts b/src/browser/routes/agent.snapshot.ts index c750cafe723..1b8626141b5 100644 --- a/src/browser/routes/agent.snapshot.ts +++ b/src/browser/routes/agent.snapshot.ts @@ -1,6 +1,20 @@ import path from "node:path"; import { ensureMediaDir, saveMediaBuffer } from "../../media/store.js"; import { captureScreenshot, snapshotAria } from "../cdp.js"; +import { + evaluateChromeMcpScript, + navigateChromeMcpPage, + takeChromeMcpScreenshot, + takeChromeMcpSnapshot, +} from "../chrome-mcp.js"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "../chrome-mcp.snapshot.js"; +import { + assertBrowserNavigationAllowed, + assertBrowserNavigationResultAllowed, +} from "../navigation-guard.js"; import { withBrowserNavigationPolicy } from "../navigation-guard.js"; import { DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, @@ -25,6 +39,89 @@ import { import type { BrowserResponse, BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toStringOrEmpty } from "./utils.js"; +const CHROME_MCP_OVERLAY_ATTR = "data-openclaw-mcp-overlay"; + +async function clearChromeMcpOverlay(params: { + profileName: string; + targetId: string; +}): Promise { + await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `() => { + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + return true; + }`, + }).catch(() => {}); +} + +async function renderChromeMcpLabels(params: { + profileName: string; + targetId: string; + refs: string[]; +}): Promise<{ labels: number; skipped: number }> { + const refList = JSON.stringify(params.refs); + const result = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + args: params.refs, + fn: `(...elements) => { + const refs = ${refList}; + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + const root = document.createElement("div"); + root.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "labels"); + root.style.position = "fixed"; + root.style.inset = "0"; + root.style.pointerEvents = "none"; + root.style.zIndex = "2147483647"; + let labels = 0; + let skipped = 0; + elements.forEach((el, index) => { + if (!(el instanceof Element)) { + skipped += 1; + return; + } + const rect = el.getBoundingClientRect(); + if (rect.width <= 0 && rect.height <= 0) { + skipped += 1; + return; + } + labels += 1; + const badge = document.createElement("div"); + badge.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "label"); + badge.textContent = refs[index] || String(labels); + badge.style.position = "fixed"; + badge.style.left = \`\${Math.max(0, rect.left)}px\`; + badge.style.top = \`\${Math.max(0, rect.top)}px\`; + badge.style.transform = "translateY(-100%)"; + badge.style.padding = "2px 6px"; + badge.style.borderRadius = "999px"; + badge.style.background = "#FF4500"; + badge.style.color = "#fff"; + badge.style.font = "600 12px ui-monospace, SFMono-Regular, Menlo, monospace"; + badge.style.boxShadow = "0 2px 6px rgba(0,0,0,0.35)"; + badge.style.whiteSpace = "nowrap"; + root.appendChild(badge); + }); + document.documentElement.appendChild(root); + return { labels, skipped }; + }`, + }); + const labels = + result && + typeof result === "object" && + typeof (result as { labels?: unknown }).labels === "number" + ? (result as { labels: number }).labels + : 0; + const skipped = + result && + typeof result === "object" && + typeof (result as { skipped?: unknown }).skipped === "number" + ? (result as { skipped: number }).skipped + : 0; + return { labels, skipped }; +} + async function saveBrowserMediaResponse(params: { res: BrowserResponse; buffer: Buffer; @@ -96,13 +193,27 @@ export function registerBrowserAgentSnapshotRoutes( if (!url) { return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "navigate", - run: async ({ cdpUrl, tab, pw, profileCtx }) => { + run: async ({ profileCtx, tab, cdpUrl }) => { + if (profileCtx.profile.driver === "existing-session") { + const ssrfPolicyOpts = withBrowserNavigationPolicy(ctx.state().resolved.ssrfPolicy); + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const result = await navigateChromeMcpPage({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + url, + }); + await assertBrowserNavigationResultAllowed({ url: result.url, ...ssrfPolicyOpts }); + return res.json({ ok: true, targetId: tab.targetId, ...result }); + } + const pw = await requirePwAi(res, "navigate"); + if (!pw) { + return; + } const result = await pw.navigateViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -122,6 +233,17 @@ export function registerBrowserAgentSnapshotRoutes( app.post("/pdf", async (req, res) => { const body = readBody(req); const targetId = toStringOrEmpty(body.targetId) || undefined; + const profileCtx = resolveProfileContext(req, res, ctx); + if (!profileCtx) { + return; + } + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "pdf is not supported for existing-session profiles yet; use screenshot/snapshot instead.", + ); + } await withPlaywrightRouteContext({ req, res, @@ -163,6 +285,36 @@ export function registerBrowserAgentSnapshotRoutes( ctx, targetId, run: async ({ profileCtx, tab, cdpUrl }) => { + if (profileCtx.profile.driver === "existing-session") { + if (element) { + return jsonError( + res, + 400, + "element screenshots are not supported for existing-session profiles; use ref from snapshot.", + ); + } + const buffer = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid: ref, + fullPage, + format: type, + }); + const normalized = await normalizeBrowserScreenshot(buffer, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await saveBrowserMediaResponse({ + res, + buffer: normalized.buffer, + contentType: normalized.contentType ?? `image/${type}`, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + targetId: tab.targetId, + url: tab.url, + }); + return; + } + let buffer: Buffer; const shouldUsePlaywright = shouldUsePlaywrightForScreenshot({ profile: profileCtx.profile, @@ -227,6 +379,90 @@ export function registerBrowserAgentSnapshotRoutes( if ((plan.labels || plan.mode === "efficient") && plan.format === "aria") { return jsonError(res, 400, "labels/mode=efficient require format=ai"); } + if (profileCtx.profile.driver === "existing-session") { + if (plan.labels) { + return jsonError(res, 501, "labels are not supported for existing-session profiles yet."); + } + if (plan.selectorValue || plan.frameSelectorValue) { + return jsonError( + res, + 400, + "selector/frame snapshots are not supported for existing-session profiles; snapshot the whole page and use refs.", + ); + } + const snapshot = await takeChromeMcpSnapshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + if (plan.format === "aria") { + return res.json({ + ok: true, + format: "aria", + targetId: tab.targetId, + url: tab.url, + nodes: flattenChromeMcpSnapshotToAriaNodes(snapshot, plan.limit), + }); + } + const built = buildAiSnapshotFromChromeMcpSnapshot({ + root: snapshot, + options: { + interactive: plan.interactive ?? undefined, + compact: plan.compact ?? undefined, + maxDepth: plan.depth ?? undefined, + }, + maxChars: plan.resolvedMaxChars, + }); + if (plan.labels) { + const refs = Object.keys(built.refs); + const labelResult = await renderChromeMcpLabels({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + refs, + }); + try { + const labeled = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + format: "png", + }); + const normalized = await normalizeBrowserScreenshot(labeled, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await ensureMediaDir(); + const saved = await saveMediaBuffer( + normalized.buffer, + normalized.contentType ?? "image/png", + "browser", + DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + ); + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + labels: true, + labelsCount: labelResult.labels, + labelsSkipped: labelResult.skipped, + imagePath: path.resolve(saved.path), + imageType: normalized.contentType?.includes("jpeg") ? "jpeg" : "png", + ...built, + }); + } finally { + await clearChromeMcpOverlay({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + } + } + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + ...built, + }); + } if (plan.format === "ai") { const pw = await requirePwAi(res, "ai snapshot"); if (!pw) { diff --git a/src/browser/routes/basic.ts b/src/browser/routes/basic.ts index 5f32c86729b..9991744107d 100644 --- a/src/browser/routes/basic.ts +++ b/src/browser/routes/basic.ts @@ -1,3 +1,4 @@ +import { getChromeMcpPid } from "../chrome-mcp.js"; import { resolveBrowserExecutableForPlatform } from "../chrome.executables.js"; import { toBrowserErrorResponse } from "../errors.js"; import { createBrowserProfilesService } from "../profiles-service.js"; @@ -76,10 +77,14 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow res.json({ enabled: current.resolved.enabled, profile: profileCtx.profile.name, + driver: profileCtx.profile.driver, running: cdpReady, cdpReady, cdpHttp, - pid: profileState?.running?.pid ?? null, + pid: + profileCtx.profile.driver === "existing-session" + ? getChromeMcpPid(profileCtx.profile.name) + : (profileState?.running?.pid ?? null), cdpPort: profileCtx.profile.cdpPort, cdpUrl: profileCtx.profile.cdpUrl, chosenBrowser: profileState?.running?.exe.kind ?? null, @@ -146,6 +151,7 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow const driver = toStringOrEmpty((req.body as { driver?: unknown })?.driver) as | "openclaw" | "extension" + | "existing-session" | ""; if (!name) { @@ -158,7 +164,12 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow name, color: color || undefined, cdpUrl: cdpUrl || undefined, - driver: driver === "extension" ? "extension" : undefined, + driver: + driver === "extension" + ? "extension" + : driver === "existing-session" + ? "existing-session" + : undefined, }); res.json(result); } catch (err) { diff --git a/src/browser/server-context.availability.ts b/src/browser/server-context.availability.ts index 3b00ff99dff..d2d9944d964 100644 --- a/src/browser/server-context.availability.ts +++ b/src/browser/server-context.availability.ts @@ -3,6 +3,11 @@ import { PROFILE_POST_RESTART_WS_TIMEOUT_MS, resolveCdpReachabilityTimeouts, } from "./cdp-timeouts.js"; +import { + closeChromeMcpSession, + ensureChromeMcpAvailable, + listChromeMcpTabs, +} from "./chrome-mcp.js"; import { isChromeCdpReady, isChromeReachable, @@ -60,11 +65,19 @@ export function createProfileAvailability({ }); const isReachable = async (timeoutMs?: number) => { + if (profile.driver === "existing-session") { + await ensureChromeMcpAvailable(profile.name); + await listChromeMcpTabs(profile.name); + return true; + } const { httpTimeoutMs, wsTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeCdpReady(profile.cdpUrl, httpTimeoutMs, wsTimeoutMs); }; const isHttpReachable = async (timeoutMs?: number) => { + if (profile.driver === "existing-session") { + return await isReachable(timeoutMs); + } const { httpTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeReachable(profile.cdpUrl, httpTimeoutMs); }; @@ -109,6 +122,9 @@ export function createProfileAvailability({ if (previousProfile.driver === "extension") { await stopChromeExtensionRelayServer({ cdpUrl: previousProfile.cdpUrl }).catch(() => false); } + if (previousProfile.driver === "existing-session") { + await closeChromeMcpSession(previousProfile.name).catch(() => false); + } await closePlaywrightBrowserConnectionForProfile(previousProfile.cdpUrl); if (previousProfile.cdpUrl !== profile.cdpUrl) { await closePlaywrightBrowserConnectionForProfile(profile.cdpUrl); @@ -138,6 +154,10 @@ export function createProfileAvailability({ const ensureBrowserAvailable = async (): Promise => { await reconcileProfileRuntime(); + if (profile.driver === "existing-session") { + await ensureChromeMcpAvailable(profile.name); + return; + } const current = state(); const remoteCdp = capabilities.isRemote; const attachOnly = profile.attachOnly; @@ -238,6 +258,10 @@ export function createProfileAvailability({ const stopRunningBrowser = async (): Promise<{ stopped: boolean }> => { await reconcileProfileRuntime(); + if (profile.driver === "existing-session") { + const stopped = await closeChromeMcpSession(profile.name); + return { stopped }; + } if (capabilities.requiresRelay) { const stopped = await stopChromeExtensionRelayServer({ cdpUrl: profile.cdpUrl, diff --git a/src/browser/server-context.existing-session.test.ts b/src/browser/server-context.existing-session.test.ts new file mode 100644 index 00000000000..abbd222342e --- /dev/null +++ b/src/browser/server-context.existing-session.test.ts @@ -0,0 +1,102 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { createBrowserRouteContext } from "./server-context.js"; +import type { BrowserServerState } from "./server-context.js"; + +vi.mock("./chrome-mcp.js", () => ({ + closeChromeMcpSession: vi.fn(async () => true), + ensureChromeMcpAvailable: vi.fn(async () => {}), + focusChromeMcpTab: vi.fn(async () => {}), + listChromeMcpTabs: vi.fn(async () => [ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]), + openChromeMcpTab: vi.fn(async () => ({ + targetId: "8", + title: "", + url: "https://openclaw.ai", + type: "page", + })), + closeChromeMcpTab: vi.fn(async () => {}), + getChromeMcpPid: vi.fn(() => 4321), +})); + +import * as chromeMcp from "./chrome-mcp.js"; + +function makeState(): BrowserServerState { + return { + server: null, + port: 0, + resolved: { + enabled: true, + evaluateEnabled: true, + controlPort: 18791, + cdpPortRangeStart: 18800, + cdpPortRangeEnd: 18899, + cdpProtocol: "http", + cdpHost: "127.0.0.1", + cdpIsLoopback: true, + remoteCdpTimeoutMs: 1500, + remoteCdpHandshakeTimeoutMs: 3000, + color: "#FF4500", + headless: false, + noSandbox: false, + attachOnly: false, + defaultProfile: "chrome-live", + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + extraArgs: [], + ssrfPolicy: { dangerouslyAllowPrivateNetwork: true }, + }, + profiles: new Map(), + }; +} + +afterEach(() => { + vi.clearAllMocks(); +}); + +describe("browser server-context existing-session profile", () => { + it("routes tab operations through the Chrome MCP backend", async () => { + const state = makeState(); + const ctx = createBrowserRouteContext({ getState: () => state }); + const live = ctx.forProfile("chrome-live"); + + vi.mocked(chromeMcp.listChromeMcpTabs) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]); + + await live.ensureBrowserAvailable(); + const tabs = await live.listTabs(); + expect(tabs.map((tab) => tab.targetId)).toEqual(["7"]); + + const opened = await live.openTab("https://openclaw.ai"); + expect(opened.targetId).toBe("8"); + + const selected = await live.ensureTabAvailable(); + expect(selected.targetId).toBe("8"); + + await live.focusTab("7"); + await live.stopRunningBrowser(); + + expect(chromeMcp.ensureChromeMcpAvailable).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.listChromeMcpTabs).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.openChromeMcpTab).toHaveBeenCalledWith("chrome-live", "https://openclaw.ai"); + expect(chromeMcp.focusChromeMcpTab).toHaveBeenCalledWith("chrome-live", "7"); + expect(chromeMcp.closeChromeMcpSession).toHaveBeenCalledWith("chrome-live"); + }); +}); diff --git a/src/browser/server-context.selection.ts b/src/browser/server-context.selection.ts index 8a9cfa19c42..9e1fb728b2a 100644 --- a/src/browser/server-context.selection.ts +++ b/src/browser/server-context.selection.ts @@ -1,5 +1,6 @@ import { fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath } from "./cdp.js"; +import { closeChromeMcpTab, focusChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { BrowserTabNotFoundError, BrowserTargetAmbiguousError } from "./errors.js"; import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; @@ -111,6 +112,13 @@ export function createProfileSelectionOps({ const focusTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (profile.driver === "existing-session") { + await focusChromeMcpTab(profile.name, resolvedTargetId); + const profileState = getProfileState(); + profileState.lastTargetId = resolvedTargetId; + return; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const focusPageByTargetIdViaPlaywright = (mod as Partial | null) @@ -134,6 +142,11 @@ export function createProfileSelectionOps({ const closeTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (profile.driver === "existing-session") { + await closeChromeMcpTab(profile.name, resolvedTargetId); + return; + } + // For remote profiles, use Playwright's persistent connection to close tabs if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); diff --git a/src/browser/server-context.tab-ops.ts b/src/browser/server-context.tab-ops.ts index 24985430bdc..067536fd017 100644 --- a/src/browser/server-context.tab-ops.ts +++ b/src/browser/server-context.tab-ops.ts @@ -1,6 +1,7 @@ import { CDP_JSON_NEW_TIMEOUT_MS } from "./cdp-timeouts.js"; import { fetchJson, fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath, createTargetViaCdp, normalizeCdpWsUrl } from "./cdp.js"; +import { listChromeMcpTabs, openChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { assertBrowserNavigationAllowed, @@ -65,6 +66,10 @@ export function createProfileTabOps({ const capabilities = getBrowserProfileCapabilities(profile); const listTabs = async (): Promise => { + if (profile.driver === "existing-session") { + return await listChromeMcpTabs(profile.name); + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const listPagesViaPlaywright = (mod as Partial | null)?.listPagesViaPlaywright; @@ -134,6 +139,15 @@ export function createProfileTabOps({ const openTab = async (url: string): Promise => { const ssrfPolicyOpts = withBrowserNavigationPolicy(state().resolved.ssrfPolicy); + if (profile.driver === "existing-session") { + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const page = await openChromeMcpTab(profile.name, url); + const profileState = getProfileState(); + profileState.lastTargetId = page.targetId; + await assertBrowserNavigationResultAllowed({ url: page.url, ...ssrfPolicyOpts }); + return page; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const createPageViaPlaywright = (mod as Partial | null)?.createPageViaPlaywright; diff --git a/src/browser/server-context.ts b/src/browser/server-context.ts index d75b14c2471..37e182f1e69 100644 --- a/src/browser/server-context.ts +++ b/src/browser/server-context.ts @@ -162,12 +162,22 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon let tabCount = 0; let running = false; + const profileCtx = createProfileContext(opts, profile); - if (profileState?.running) { + if (profile.driver === "existing-session") { + try { + running = await profileCtx.isReachable(300); + if (running) { + const tabs = await profileCtx.listTabs(); + tabCount = tabs.filter((t) => t.type === "page").length; + } + } catch { + // Chrome MCP not available + } + } else if (profileState?.running) { running = true; try { - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs(); + const tabs = await profileCtx.listTabs(); tabCount = tabs.filter((t) => t.type === "page").length; } catch { // Browser might not be responsive @@ -178,8 +188,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon const reachable = await isChromeReachable(profile.cdpUrl, 200); if (reachable) { running = true; - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs().catch(() => []); + const tabs = await profileCtx.listTabs().catch(() => []); tabCount = tabs.filter((t) => t.type === "page").length; } } catch { @@ -192,6 +201,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon cdpPort: profile.cdpPort, cdpUrl: profile.cdpUrl, color: profile.color, + driver: profile.driver, running, tabCount, isDefault: name === current.resolved.defaultProfile, diff --git a/src/browser/server-context.types.ts b/src/browser/server-context.types.ts index f05e90e9e77..8f949b96da6 100644 --- a/src/browser/server-context.types.ts +++ b/src/browser/server-context.types.ts @@ -56,6 +56,7 @@ export type ProfileStatus = { cdpPort: number; cdpUrl: string; color: string; + driver: ResolvedBrowserProfile["driver"]; running: boolean; tabCount: number; isDefault: boolean; diff --git a/src/cli/browser-cli-manage.ts b/src/cli/browser-cli-manage.ts index 53b83ca3f97..31d4b02c2aa 100644 --- a/src/cli/browser-cli-manage.ts +++ b/src/cli/browser-cli-manage.ts @@ -407,7 +407,8 @@ export function registerBrowserManageCommands( const def = p.isDefault ? " [default]" : ""; const loc = p.isRemote ? `cdpUrl: ${p.cdpUrl}` : `port: ${p.cdpPort}`; const remote = p.isRemote ? " [remote]" : ""; - return `${p.name}: ${status}${tabs}${def}${remote}\n ${loc}, color: ${p.color}`; + const driver = p.driver !== "openclaw" ? ` [${p.driver}]` : ""; + return `${p.name}: ${status}${tabs}${def}${remote}${driver}\n ${loc}, color: ${p.color}`; }) .join("\n"), ); @@ -420,7 +421,10 @@ export function registerBrowserManageCommands( .requiredOption("--name ", "Profile name (lowercase, numbers, hyphens)") .option("--color ", "Profile color (hex format, e.g. #0066CC)") .option("--cdp-url ", "CDP URL for remote Chrome (http/https)") - .option("--driver ", "Profile driver (openclaw|extension). Default: openclaw") + .option( + "--driver ", + "Profile driver (openclaw|extension|existing-session). Default: openclaw", + ) .action( async (opts: { name: string; color?: string; cdpUrl?: string; driver?: string }, cmd) => { const parent = parentOpts(cmd); @@ -434,7 +438,12 @@ export function registerBrowserManageCommands( name: opts.name, color: opts.color, cdpUrl: opts.cdpUrl, - driver: opts.driver === "extension" ? "extension" : undefined, + driver: + opts.driver === "extension" + ? "extension" + : opts.driver === "existing-session" + ? "existing-session" + : undefined, }, }, { timeoutMs: 10_000 }, @@ -446,7 +455,11 @@ export function registerBrowserManageCommands( defaultRuntime.log( info( `🦞 Created profile "${result.profile}"\n${loc}\n color: ${result.color}${ - opts.driver === "extension" ? "\n driver: extension" : "" + opts.driver === "extension" + ? "\n driver: extension" + : opts.driver === "existing-session" + ? "\n driver: existing-session" + : "" }`, ), ); diff --git a/src/cli/command-secret-gateway.test.ts b/src/cli/command-secret-gateway.test.ts index 6d0f89f6349..74c47f637e9 100644 --- a/src/cli/command-secret-gateway.test.ts +++ b/src/cli/command-secret-gateway.test.ts @@ -64,6 +64,17 @@ describe("resolveCommandSecretRefsViaGateway", () => { }); } + function expectGatewayUnavailableLocalFallbackDiagnostics( + result: Awaited>, + ) { + expect( + result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), + ).toBe(true); + expect( + result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), + ).toBe(true); + } + it("returns config unchanged when no target SecretRefs are configured", async () => { const config = { talk: { @@ -208,11 +219,8 @@ describe("resolveCommandSecretRefsViaGateway", () => { it("falls back to local resolution for web search SecretRefs when gateway is unavailable", async () => { const envKey = "WEB_SEARCH_GEMINI_API_KEY_LOCAL_FALLBACK"; - const priorValue = process.env[envKey]; - process.env[envKey] = "gemini-local-fallback-key"; - callGateway.mockRejectedValueOnce(new Error("gateway closed")); - - try { + await withEnvValue(envKey, "gemini-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); const result = await resolveCommandSecretRefsViaGateway({ config: { tools: { @@ -234,28 +242,14 @@ describe("resolveCommandSecretRefsViaGateway", () => { "gemini-local-fallback-key", ); expect(result.targetStatesByPath["tools.web.search.gemini.apiKey"]).toBe("resolved_local"); - expect( - result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), - ).toBe(true); - expect( - result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), - ).toBe(true); - } finally { - if (priorValue === undefined) { - delete process.env[envKey]; - } else { - process.env[envKey] = priorValue; - } - } + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); }); it("falls back to local resolution for Firecrawl SecretRefs when gateway is unavailable", async () => { const envKey = "WEB_FETCH_FIRECRAWL_API_KEY_LOCAL_FALLBACK"; - const priorValue = process.env[envKey]; - process.env[envKey] = "firecrawl-local-fallback-key"; - callGateway.mockRejectedValueOnce(new Error("gateway closed")); - - try { + await withEnvValue(envKey, "firecrawl-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); const result = await resolveCommandSecretRefsViaGateway({ config: { tools: { @@ -276,19 +270,8 @@ describe("resolveCommandSecretRefsViaGateway", () => { "firecrawl-local-fallback-key", ); expect(result.targetStatesByPath["tools.web.fetch.firecrawl.apiKey"]).toBe("resolved_local"); - expect( - result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), - ).toBe(true); - expect( - result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), - ).toBe(true); - } finally { - if (priorValue === undefined) { - delete process.env[envKey]; - } else { - process.env[envKey] = priorValue; - } - } + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); }); it("marks web SecretRefs inactive when the web surface is disabled during local fallback", async () => { diff --git a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts index 188e7090915..7b1526f87c6 100644 --- a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts @@ -1,30 +1,15 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const readConfigFileSnapshotMock = vi.fn(); const loadConfig = vi.fn(() => ({})); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readConfigFileSnapshot: () => readConfigFileSnapshotMock(), @@ -50,7 +35,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); readConfigFileSnapshotMock.mockResolvedValue({ exists: true, @@ -60,15 +45,8 @@ describe("runServiceRestart config pre-flight (#35862)", () => { }); loadConfig.mockReset(); loadConfig.mockReturnValue({}); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.readCommand.mockResolvedValue({ environment: {} }); - service.restart.mockResolvedValue({ outcome: "completed" }); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + resetLifecycleServiceMocks(); + stubEmptyGatewayEnv(); }); it("aborts restart when config is invalid", async () => { @@ -152,7 +130,7 @@ describe("runServiceStart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); readConfigFileSnapshotMock.mockResolvedValue({ exists: true, @@ -160,10 +138,7 @@ describe("runServiceStart config pre-flight (#35862)", () => { config: {}, issues: [], }); - service.isLoaded.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.restart.mockResolvedValue({ outcome: "completed" }); + resetLifecycleServiceMocks(); }); it("aborts start when config is invalid", async () => { diff --git a/src/cli/daemon-cli/lifecycle-core.test.ts b/src/cli/daemon-cli/lifecycle-core.test.ts index ff66bd17653..7503e21ae5e 100644 --- a/src/cli/daemon-cli/lifecycle-core.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.test.ts @@ -1,4 +1,12 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + runtimeLogs, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const loadConfig = vi.fn(() => ({ gateway: { @@ -8,28 +16,6 @@ const loadConfig = vi.fn(() => ({ }, })); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readBestEffortConfig: async () => loadConfig(), @@ -49,7 +35,7 @@ describe("runServiceRestart token drift", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); loadConfig.mockReset(); loadConfig.mockReturnValue({ gateway: { @@ -58,19 +44,11 @@ describe("runServiceRestart token drift", () => { }, }, }); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); + resetLifecycleServiceMocks(); service.readCommand.mockResolvedValue({ environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" }, }); - service.restart.mockResolvedValue({ outcome: "completed" }); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); - vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); + stubEmptyGatewayEnv(); }); it("emits drift warning when enabled", async () => { diff --git a/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts new file mode 100644 index 00000000000..8e91db61664 --- /dev/null +++ b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts @@ -0,0 +1,45 @@ +import { vi } from "vitest"; + +export const runtimeLogs: string[] = []; + +export const defaultRuntime = { + log: (message: string) => runtimeLogs.push(message), + error: vi.fn(), + exit: (code: number) => { + throw new Error(`__exit__:${code}`); + }, +}; + +export const service = { + label: "TestService", + loadedText: "loaded", + notLoadedText: "not loaded", + install: vi.fn(), + uninstall: vi.fn(), + stop: vi.fn(), + isLoaded: vi.fn(), + readCommand: vi.fn(), + readRuntime: vi.fn(), + restart: vi.fn(), +}; + +export function resetLifecycleRuntimeLogs() { + runtimeLogs.length = 0; +} + +export function resetLifecycleServiceMocks() { + service.isLoaded.mockClear(); + service.readCommand.mockClear(); + service.restart.mockClear(); + service.isLoaded.mockResolvedValue(true); + service.readCommand.mockResolvedValue({ environment: {} }); + service.restart.mockResolvedValue({ outcome: "completed" }); +} + +export function stubEmptyGatewayEnv() { + vi.unstubAllEnvs(); + vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); +} diff --git a/src/commands/backup-verify.test.ts b/src/commands/backup-verify.test.ts index 9288d2fb8c1..a5f0384e61b 100644 --- a/src/commands/backup-verify.test.ts +++ b/src/commands/backup-verify.test.ts @@ -8,6 +8,92 @@ import { buildBackupArchiveRoot } from "./backup-shared.js"; import { backupVerifyCommand } from "./backup-verify.js"; import { backupCreateCommand } from "./backup.js"; +const TEST_ARCHIVE_ROOT = "2026-03-09T00-00-00.000Z-openclaw-backup"; + +const createBackupVerifyRuntime = () => ({ + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), +}); + +function createBackupManifest(assetArchivePath: string) { + return { + schemaVersion: 1, + createdAt: "2026-03-09T00:00:00.000Z", + archiveRoot: TEST_ARCHIVE_ROOT, + runtimeVersion: "test", + platform: process.platform, + nodeVersion: process.version, + assets: [ + { + kind: "state", + sourcePath: "/tmp/.openclaw", + archivePath: assetArchivePath, + }, + ], + }; +} + +async function withBrokenArchiveFixture( + options: { + tempPrefix: string; + manifestAssetArchivePath: string; + payloads: Array<{ fileName: string; contents: string; archivePath?: string }>; + buildTarEntries?: (paths: { manifestPath: string; payloadPaths: string[] }) => string[]; + }, + run: (archivePath: string) => Promise, +) { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), options.tempPrefix)); + const archivePath = path.join(tempDir, "broken.tar.gz"); + const manifestPath = path.join(tempDir, "manifest.json"); + const payloadSpecs = await Promise.all( + options.payloads.map(async (payload) => { + const payloadPath = path.join(tempDir, payload.fileName); + await fs.writeFile(payloadPath, payload.contents, "utf8"); + return { + path: payloadPath, + archivePath: payload.archivePath ?? options.manifestAssetArchivePath, + }; + }), + ); + const payloadEntryPathBySource = new Map( + payloadSpecs.map((payload) => [payload.path, payload.archivePath]), + ); + + try { + await fs.writeFile( + manifestPath, + `${JSON.stringify(createBackupManifest(options.manifestAssetArchivePath), null, 2)}\n`, + "utf8", + ); + await tar.c( + { + file: archivePath, + gzip: true, + portable: true, + preservePaths: true, + onWriteEntry: (entry) => { + if (entry.path === manifestPath) { + entry.path = `${TEST_ARCHIVE_ROOT}/manifest.json`; + return; + } + const payloadEntryPath = payloadEntryPathBySource.get(entry.path); + if (payloadEntryPath) { + entry.path = payloadEntryPath; + } + }, + }, + options.buildTarEntries?.({ + manifestPath, + payloadPaths: payloadSpecs.map((payload) => payload.path), + }) ?? [manifestPath, ...payloadSpecs.map((payload) => payload.path)], + ); + await run(archivePath); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } +} + describe("backupVerifyCommand", () => { let tempHome: TempHomeEnv; @@ -26,12 +112,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(path.join(stateDir, "state.txt"), "hello\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0); const created = await backupCreateCommand(runtime, { output: archiveDir, nowMs }); const verified = await backupVerifyCommand(runtime, { archive: created.archivePath }); @@ -53,12 +134,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(root, "payload", "data.txt"), "x\n", "utf8"); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, ["root"]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /expected exactly one backup manifest entry/i, ); @@ -95,12 +171,7 @@ describe("backupVerifyCommand", () => { ); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, [rootName]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /missing payload for manifest asset/i, ); @@ -110,119 +181,37 @@ describe("backupVerifyCommand", () => { }); it("fails when archive paths contain traversal segments", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-traversal-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const traversalPath = `${rootName}/payload/../escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: traversalPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = traversalPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /path traversal segments/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const traversalPath = `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-traversal-", + manifestAssetArchivePath: traversalPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: traversalPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /path traversal segments/i, + ); + }, + ); }); it("fails when archive paths contain backslashes", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-backslash-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const invalidPath = `${rootName}/payload\\..\\escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: invalidPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = invalidPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /forward slashes/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const invalidPath = `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-backslash-", + manifestAssetArchivePath: invalidPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: invalidPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /forward slashes/i, + ); + }, + ); }); it("ignores payload manifest.json files when locating the backup manifest", async () => { @@ -251,12 +240,7 @@ describe("backupVerifyCommand", () => { "utf8", ); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const created = await backupCreateCommand(runtime, { output: archiveDir, includeWorkspace: true, @@ -274,119 +258,44 @@ describe("backupVerifyCommand", () => { }); it("fails when the archive contains duplicate root manifest entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-manifest-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: `${rootName}/payload/posix/tmp/.openclaw/payload.txt`, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-manifest-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [{ fileName: "payload.txt", contents: "payload\n" }], + buildTarEntries: ({ manifestPath, payloadPaths }) => [ + manifestPath, + manifestPath, + ...payloadPaths, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - } - }, - }, - [manifestPath, manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /expected exactly one backup manifest entry, found 2/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /expected exactly one backup manifest entry, found 2/i, + ); + }, + ); }); it("fails when the archive contains duplicate payload entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-payload-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPathA = path.join(tempDir, "payload-a.txt"); - const payloadPathB = path.join(tempDir, "payload-b.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const payloadArchivePath = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: payloadArchivePath, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-payload-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [ + { fileName: "payload-a.txt", contents: "payload-a\n", archivePath: payloadArchivePath }, + { fileName: "payload-b.txt", contents: "payload-b\n", archivePath: payloadArchivePath }, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPathA, "payload-a\n", "utf8"); - await fs.writeFile(payloadPathB, "payload-b\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPathA || entry.path === payloadPathB) { - entry.path = payloadArchivePath; - } - }, - }, - [manifestPath, payloadPathA, payloadPathB], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /duplicate entry path/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /duplicate entry path/i, + ); + }, + ); }); }); diff --git a/src/commands/backup.test.ts b/src/commands/backup.test.ts index 349714e4d15..b774e58bc2d 100644 --- a/src/commands/backup.test.ts +++ b/src/commands/backup.test.ts @@ -41,6 +41,41 @@ describe("backup commands", () => { await tempHome.restore(); }); + async function withInvalidWorkspaceBackupConfig( + fn: (runtime: { + log: ReturnType; + error: ReturnType; + exit: ReturnType; + }) => Promise, + ) { + const stateDir = path.join(tempHome.home, ".openclaw"); + const configPath = path.join(tempHome.home, "custom-config.json"); + process.env.OPENCLAW_CONFIG_PATH = configPath; + await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); + await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + }; + + try { + return await fn(runtime); + } finally { + delete process.env.OPENCLAW_CONFIG_PATH; + } + } + + function expectWorkspaceCoveredByState( + plan: Awaited>, + ) { + expect(plan.included).toHaveLength(1); + expect(plan.included[0]?.kind).toBe("state"); + expect(plan.skipped).toEqual( + expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), + ); + } + it("collapses default config, credentials, and workspace into the state backup root", async () => { const stateDir = path.join(tempHome.home, ".openclaw"); await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); @@ -50,12 +85,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "workspace", "SOUL.md"), "# soul\n", "utf8"); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); }); it("orders coverage checks by canonical path so symlinked workspaces do not duplicate state", async () => { @@ -84,12 +114,7 @@ describe("backup commands", () => { ); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); } finally { await fs.rm(symlinkDir, { recursive: true, force: true }); } @@ -336,41 +361,15 @@ describe("backup commands", () => { }); it("fails fast when config is invalid and workspace backup is enabled", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { await expect(backupCreateCommand(runtime, { dryRun: true })).rejects.toThrow( /--no-include-workspace/i, ); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("allows explicit partial backups when config is invalid", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { const result = await backupCreateCommand(runtime, { dryRun: true, includeWorkspace: false, @@ -378,9 +377,7 @@ describe("backup commands", () => { expect(result.includeWorkspace).toBe(false); expect(result.assets.some((asset) => asset.kind === "workspace")).toBe(false); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("backs up only the active config file when --only-config is requested", async () => { diff --git a/src/commands/daemon-install-helpers.test.ts b/src/commands/daemon-install-helpers.test.ts index 704c193880c..931a983a8ee 100644 --- a/src/commands/daemon-install-helpers.test.ts +++ b/src/commands/daemon-install-helpers.test.ts @@ -1,6 +1,7 @@ import { afterEach, describe, expect, it, vi } from "vitest"; const mocks = vi.hoisted(() => ({ + loadAuthProfileStoreForSecretsRuntime: vi.fn(), resolvePreferredNodePath: vi.fn(), resolveGatewayProgramArguments: vi.fn(), resolveSystemNodeInfo: vi.fn(), @@ -8,6 +9,10 @@ const mocks = vi.hoisted(() => ({ buildServiceEnvironment: vi.fn(), })); +vi.mock("../agents/auth-profiles.js", () => ({ + loadAuthProfileStoreForSecretsRuntime: mocks.loadAuthProfileStoreForSecretsRuntime, +})); + vi.mock("../daemon/runtime-paths.js", () => ({ resolvePreferredNodePath: mocks.resolvePreferredNodePath, resolveSystemNodeInfo: mocks.resolveSystemNodeInfo, @@ -63,6 +68,10 @@ function mockNodeGatewayPlanFixture( programArguments: ["node", "gateway"], workingDirectory, }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: {}, + }); mocks.resolveSystemNodeInfo.mockResolvedValue({ path: "/opt/node", version, @@ -232,6 +241,67 @@ describe("buildGatewayInstallPlan", () => { expect(plan.environment.HOME).toBe("/Users/service"); expect(plan.environment.OPENCLAW_PORT).toBe("3000"); }); + + it("merges env-backed auth-profile refs into the service environment", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + "anthropic:default": { + type: "token", + provider: "anthropic", + tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: { + OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret + ANTHROPIC_TOKEN: "ant-test-token", + }, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test"); + expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token"); + }); + + it("skips unresolved auth-profile env refs", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: {}, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBeUndefined(); + }); }); describe("gatewayInstallErrorHint", () => { diff --git a/src/commands/daemon-install-helpers.ts b/src/commands/daemon-install-helpers.ts index 7a3bd42e2fc..91248cb86a7 100644 --- a/src/commands/daemon-install-helpers.ts +++ b/src/commands/daemon-install-helpers.ts @@ -1,3 +1,7 @@ +import { + loadAuthProfileStoreForSecretsRuntime, + type AuthProfileStore, +} from "../agents/auth-profiles.js"; import { formatCliCommand } from "../cli/command-format.js"; import { collectConfigServiceEnvVars } from "../config/env-vars.js"; import type { OpenClawConfig } from "../config/types.js"; @@ -19,6 +23,33 @@ export type GatewayInstallPlan = { environment: Record; }; +function collectAuthProfileServiceEnvVars(params: { + env: Record; + authStore?: AuthProfileStore; +}): Record { + const authStore = params.authStore ?? loadAuthProfileStoreForSecretsRuntime(); + const entries: Record = {}; + + for (const credential of Object.values(authStore.profiles)) { + const ref = + credential.type === "api_key" + ? credential.keyRef + : credential.type === "token" + ? credential.tokenRef + : undefined; + if (!ref || ref.source !== "env") { + continue; + } + const value = params.env[ref.id]?.trim(); + if (!value) { + continue; + } + entries[ref.id] = value; + } + + return entries; +} + export async function buildGatewayInstallPlan(params: { env: Record; port: number; @@ -28,6 +59,7 @@ export async function buildGatewayInstallPlan(params: { warn?: DaemonInstallWarnFn; /** Full config to extract env vars from (env vars + inline env keys). */ config?: OpenClawConfig; + authStore?: AuthProfileStore; }): Promise { const { devMode, nodePath } = await resolveDaemonInstallRuntimeInputs({ env: params.env, @@ -61,6 +93,10 @@ export async function buildGatewayInstallPlan(params: { // Config env vars are added first so service-specific vars take precedence. const environment: Record = { ...collectConfigServiceEnvVars(params.config), + ...collectAuthProfileServiceEnvVars({ + env: params.env, + authStore: params.authStore, + }), }; Object.assign(environment, serviceEnvironment); diff --git a/src/commands/doctor-state-migrations.test.ts b/src/commands/doctor-state-migrations.test.ts index 4116a6fca6e..ec465632cfa 100644 --- a/src/commands/doctor-state-migrations.test.ts +++ b/src/commands/doctor-state-migrations.test.ts @@ -26,6 +26,32 @@ async function makeRootWithEmptyCfg() { return { root, cfg }; } +function writeLegacyTelegramAllowFromStore(oauthDir: string) { + fs.writeFileSync( + path.join(oauthDir, "telegram-allowFrom.json"), + JSON.stringify( + { + version: 1, + allowFrom: ["123456"], + }, + null, + 2, + ) + "\n", + "utf-8", + ); +} + +async function runTelegramAllowFromMigration(params: { root: string; cfg: OpenClawConfig }) { + const oauthDir = ensureCredentialsDir(params.root); + writeLegacyTelegramAllowFromStore(oauthDir); + const detected = await detectLegacyStateMigrations({ + cfg: params.cfg, + env: { OPENCLAW_STATE_DIR: params.root } as NodeJS.ProcessEnv, + }); + const result = await runLegacyStateMigrations({ detected, now: () => 123 }); + return { oauthDir, detected, result }; +} + afterEach(async () => { resetAutoMigrateLegacyStateForTest(); resetAutoMigrateLegacyStateDirForTest(); @@ -277,30 +303,11 @@ describe("doctor legacy state migrations", () => { it("migrates legacy Telegram pairing allowFrom store to account-scoped default file", async () => { const { root, cfg } = await makeRootWithEmptyCfg(); - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)), ).toEqual(["telegram-default-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const target = path.join(oauthDir, "telegram-default-allowFrom.json"); @@ -323,30 +330,11 @@ describe("doctor legacy state migrations", () => { }, }, }; - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)).toSorted(), ).toEqual(["telegram-bot1-allowFrom.json", "telegram-bot2-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const bot1Target = path.join(oauthDir, "telegram-bot1-allowFrom.json"); diff --git a/src/commands/models/list.auth-overview.test.ts b/src/commands/models/list.auth-overview.test.ts index 69807a5d7a7..65c324d4b42 100644 --- a/src/commands/models/list.auth-overview.test.ts +++ b/src/commands/models/list.auth-overview.test.ts @@ -1,7 +1,28 @@ import { describe, expect, it } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "../../agents/model-auth-markers.js"; +import { withEnv } from "../../test-utils/env.js"; import { resolveProviderAuthOverview } from "./list.auth-overview.js"; +function resolveOpenAiOverview(apiKey: string) { + return resolveProviderAuthOverview({ + provider: "openai", + cfg: { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey, + models: [], + }, + }, + }, + } as never, + store: { version: 1, profiles: {} } as never, + modelsPath: "/tmp/models.json", + }); +} + describe("resolveProviderAuthOverview", () => { it("does not throw when token profile only has tokenRef", () => { const overview = resolveProviderAuthOverview({ @@ -24,23 +45,9 @@ describe("resolveProviderAuthOverview", () => { }); it("renders marker-backed models.json auth as marker detail", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: NON_ENV_SECRETREF_MARKER, - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview(NON_ENV_SECRETREF_MARKER), + ); expect(overview.effective.kind).toBe("missing"); expect(overview.effective.detail).toBe("missing"); @@ -48,23 +55,9 @@ describe("resolveProviderAuthOverview", () => { }); it("keeps env-var-shaped models.json values masked to avoid accidental plaintext exposure", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "OPENAI_API_KEY", // pragma: allowlist secret - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview("OPENAI_API_KEY"), + ); expect(overview.effective.kind).toBe("missing"); expect(overview.effective.detail).toBe("missing"); @@ -76,23 +69,7 @@ describe("resolveProviderAuthOverview", () => { const prior = process.env.OPENAI_API_KEY; process.env.OPENAI_API_KEY = "sk-openai-from-env"; // pragma: allowlist secret try { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "OPENAI_API_KEY", // pragma: allowlist secret - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = resolveOpenAiOverview("OPENAI_API_KEY"); expect(overview.effective.kind).toBe("env"); expect(overview.effective.detail).not.toContain("OPENAI_API_KEY"); } finally { diff --git a/src/commands/ollama-setup.test.ts b/src/commands/ollama-setup.test.ts index 124254c53b2..0b9b5d0e414 100644 --- a/src/commands/ollama-setup.test.ts +++ b/src/commands/ollama-setup.test.ts @@ -1,5 +1,6 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import type { RuntimeEnv } from "../runtime.js"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; import type { WizardPrompter } from "../wizard/prompts.js"; import { configureOllamaNonInteractive, @@ -23,27 +24,6 @@ vi.mock("./oauth-env.js", () => ({ isRemoteEnvironment: isRemoteEnvironmentMock, })); -function jsonResponse(body: unknown, status = 200): Response { - return new Response(JSON.stringify(body), { - status, - headers: { "Content-Type": "application/json" }, - }); -} - -function requestUrl(input: string | URL | Request): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - return input.url; -} - -function requestBody(body: BodyInit | null | undefined): string { - return typeof body === "string" ? body : "{}"; -} - function createOllamaFetchMock(params: { tags?: string[]; show?: Record; @@ -61,7 +41,7 @@ function createOllamaFetchMock(params: { return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) }); } if (url.endsWith("/api/show")) { - const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; const contextWindow = body.name ? params.show?.[body.name] : undefined; return contextWindow ? jsonResponse({ model_info: { "llama.context_length": contextWindow } }) @@ -77,6 +57,45 @@ function createOllamaFetchMock(params: { }); } +function createModePrompter( + mode: "local" | "remote", + params?: { confirm?: boolean }, +): WizardPrompter { + return { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce(mode), + ...(params?.confirm !== undefined + ? { confirm: vi.fn().mockResolvedValueOnce(params.confirm) } + : {}), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; +} + +function createSignedOutRemoteFetchMock() { + return createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ + jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), + jsonResponse({ username: "testuser" }), + ], + }); +} + +function createDefaultOllamaConfig(primary: string) { + return { + agents: { defaults: { model: { primary } } }, + models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, + }; +} + +function createRuntime() { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; +} + describe("ollama setup", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -86,11 +105,7 @@ describe("ollama setup", () => { }); it("returns suggested default model for local mode", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -101,11 +116,7 @@ describe("ollama setup", () => { }); it("returns suggested default model for remote mode", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("remote"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -116,11 +127,7 @@ describe("ollama setup", () => { }); it("mode selection affects model ordering (local)", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] }); vi.stubGlobal("fetch", fetchMock); @@ -134,20 +141,8 @@ describe("ollama setup", () => { }); it("cloud+local mode triggers /api/me check and opens sign-in URL", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - confirm: vi.fn().mockResolvedValueOnce(true), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; - - const fetchMock = createOllamaFetchMock({ - tags: ["llama3:8b"], - meResponses: [ - jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - jsonResponse({ username: "testuser" }), - ], - }); + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -158,20 +153,8 @@ describe("ollama setup", () => { it("cloud+local mode does not open browser in remote environment", async () => { isRemoteEnvironmentMock.mockReturnValue(true); - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - confirm: vi.fn().mockResolvedValueOnce(true), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; - - const fetchMock = createOllamaFetchMock({ - tags: ["llama3:8b"], - meResponses: [ - jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - jsonResponse({ username: "testuser" }), - ], - }); + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -180,11 +163,7 @@ describe("ollama setup", () => { }); it("local mode does not trigger cloud auth", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -258,10 +237,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), prompter, }); @@ -276,10 +252,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), prompter, }); @@ -292,10 +265,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/kimi-k2.5:cloud" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/kimi-k2.5:cloud"), prompter, }); @@ -324,12 +294,7 @@ describe("ollama setup", () => { pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }), }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: { @@ -362,12 +327,7 @@ describe("ollama setup", () => { pullResponse: new Response('{"status":"success"}\n', { status: 200 }), }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: {}, @@ -379,7 +339,7 @@ describe("ollama setup", () => { }); const pullRequest = fetchMock.mock.calls[1]?.[1]; - expect(JSON.parse(requestBody(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); + expect(JSON.parse(requestBodyText(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); expect(result.agents?.defaults?.model).toEqual( expect.objectContaining({ primary: "ollama/llama3.2:latest" }), ); @@ -388,12 +348,7 @@ describe("ollama setup", () => { it("accepts cloud models in non-interactive mode without pulling", async () => { const fetchMock = createOllamaFetchMock({ tags: [] }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: {}, diff --git a/src/commands/onboard-auth.config-core.ts b/src/commands/onboard-auth.config-core.ts index 4bda29df1bf..8c41bfb939c 100644 --- a/src/commands/onboard-auth.config-core.ts +++ b/src/commands/onboard-auth.config-core.ts @@ -85,6 +85,29 @@ import { MODELSTUDIO_DEFAULT_MODEL_REF, } from "./onboard-auth.models.js"; +function mergeProviderModels( + existingProvider: Record | undefined, + defaultModels: T[], +): T[] { + const existingModels = Array.isArray(existingProvider?.models) + ? (existingProvider.models as T[]) + : []; + const mergedModels = [...existingModels]; + const seen = new Set(existingModels.map((model) => model.id)); + for (const model of defaultModels) { + if (!seen.has(model.id)) { + mergedModels.push(model); + seen.add(model.id); + } + } + return mergedModels; +} + +function getNormalizedProviderApiKey(existingProvider: Record | undefined) { + const { apiKey } = (existingProvider ?? {}) as { apiKey?: string }; + return typeof apiKey === "string" ? apiKey.trim() || undefined : undefined; +} + export function applyZaiProviderConfig( cfg: OpenClawConfig, params?: { endpoint?: string; modelId?: string }, @@ -100,7 +123,6 @@ export function applyZaiProviderConfig( const providers = { ...cfg.models?.providers }; const existingProvider = providers.zai; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; const defaultModels = [ buildZaiModelDefinition({ id: "glm-5" }), @@ -109,21 +131,13 @@ export function applyZaiProviderConfig( buildZaiModelDefinition({ id: "glm-4.7-flashx" }), ]; - const mergedModels = [...existingModels]; - const seen = new Set(existingModels.map((m) => m.id)); - for (const model of defaultModels) { - if (!seen.has(model.id)) { - mergedModels.push(model); - seen.add(model.id); - } - } + const mergedModels = mergeProviderModels(existingProvider, defaultModels); - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); const baseUrl = params?.endpoint ? resolveZaiBaseUrl(params.endpoint) @@ -256,12 +270,11 @@ export function applySyntheticProviderConfig(cfg: OpenClawConfig): OpenClawConfi (model) => !existingModels.some((existing) => existing.id === model.id), ), ]; - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); providers.synthetic = { ...existingProviderRest, baseUrl: SYNTHETIC_BASE_URL, @@ -609,7 +622,6 @@ function applyModelStudioProviderConfigWithBaseUrl( const providers = { ...cfg.models?.providers }; const existingProvider = providers.modelstudio; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; const defaultModels = [ buildModelStudioModelDefinition({ id: "qwen3.5-plus" }), @@ -622,21 +634,13 @@ function applyModelStudioProviderConfigWithBaseUrl( buildModelStudioModelDefinition({ id: "kimi-k2.5" }), ]; - const mergedModels = [...existingModels]; - const seen = new Set(existingModels.map((m) => m.id)); - for (const model of defaultModels) { - if (!seen.has(model.id)) { - mergedModels.push(model); - seen.add(model.id); - } - } + const mergedModels = mergeProviderModels(existingProvider, defaultModels); - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); providers.modelstudio = { ...existingProviderRest, diff --git a/src/config/config.talk-validation.test.ts b/src/config/config.talk-validation.test.ts index cb948d75c75..d2fb463613c 100644 --- a/src/config/config.talk-validation.test.ts +++ b/src/config/config.talk-validation.test.ts @@ -8,38 +8,42 @@ describe("talk config validation fail-closed behavior", () => { vi.restoreAllMocks(); }); + async function expectInvalidTalkConfig(config: unknown, messagePattern: RegExp) { + await withTempHomeConfig(config, async () => { + const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + + let thrown: unknown; + try { + loadConfig(); + } catch (error) { + thrown = error; + } + + expect(thrown).toBeInstanceOf(Error); + expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); + expect((thrown as Error).message).toMatch(messagePattern); + expect(consoleSpy).toHaveBeenCalled(); + }); + } + it.each([ ["boolean", true], ["string", "1500"], ["float", 1500.5], ])("rejects %s talk.silenceTimeoutMs during config load", async (_label, value) => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { silenceTimeoutMs: value, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/silenceTimeoutMs|talk/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /silenceTimeoutMs|talk/i, ); }); it("rejects talk.provider when it does not match talk.providers during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -51,26 +55,12 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|talk\.providers|acme/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|talk\.providers|acme/i, ); }); it("rejects multi-provider talk config without talk.provider during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -84,21 +74,7 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|required/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|required/i, ); }); }); diff --git a/src/config/types.browser.ts b/src/config/types.browser.ts index 57d036bd88c..5f8e28a0ebe 100644 --- a/src/config/types.browser.ts +++ b/src/config/types.browser.ts @@ -4,7 +4,7 @@ export type BrowserProfileConfig = { /** CDP URL for this profile (use for remote Chrome). */ cdpUrl?: string; /** Profile driver (default: openclaw). */ - driver?: "openclaw" | "clawd" | "extension"; + driver?: "openclaw" | "clawd" | "extension" | "existing-session"; /** If true, never launch a browser for this profile; only attach. Falls back to browser.attachOnly. */ attachOnly?: boolean; /** Profile color (hex). Auto-assigned at creation. */ diff --git a/src/config/zod-schema.ts b/src/config/zod-schema.ts index 0064afddd20..741b4bcc0c9 100644 --- a/src/config/zod-schema.ts +++ b/src/config/zod-schema.ts @@ -360,7 +360,12 @@ export const OpenClawSchema = z cdpPort: z.number().int().min(1).max(65535).optional(), cdpUrl: z.string().optional(), driver: z - .union([z.literal("openclaw"), z.literal("clawd"), z.literal("extension")]) + .union([ + z.literal("openclaw"), + z.literal("clawd"), + z.literal("extension"), + z.literal("existing-session"), + ]) .optional(), attachOnly: z.boolean().optional(), color: HexColorSchema, diff --git a/src/cron/isolated-agent.lane.test.ts b/src/cron/isolated-agent.lane.test.ts index 5d26faff327..3790c5e511a 100644 --- a/src/cron/isolated-agent.lane.test.ts +++ b/src/cron/isolated-agent.lane.test.ts @@ -1,6 +1,7 @@ import "./isolated-agent.mocks.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; import { makeCfg, @@ -9,27 +10,6 @@ import { writeSessionStoreEntries, } from "./isolated-agent.test-harness.js"; -function makeDeps() { - return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), - }; -} - -function mockEmbeddedOk() { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ - payloads: [{ text: "ok" }], - meta: { - durationMs: 5, - agentMeta: { sessionId: "s", provider: "p", model: "m" }, - }, - }); -} - function lastEmbeddedLane(): string | undefined { const calls = vi.mocked(runEmbeddedPiAgent).mock.calls; expect(calls.length).toBeGreaterThan(0); @@ -45,11 +25,11 @@ async function runLaneCase(home: string, lane?: string) { lastTo: "", }, }); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); await runCronIsolatedAgentTurn({ cfg: makeCfg(home, storePath), - deps: makeDeps(), + deps: createCliDeps(), job: makeJob({ kind: "agentTurn", message: "do it", deliver: false }), message: "do it", sessionKey: "cron:job-1", diff --git a/src/cron/isolated-agent.model-formatting.test.ts b/src/cron/isolated-agent.model-formatting.test.ts index e78f251dc8b..f9732a32d31 100644 --- a/src/cron/isolated-agent.model-formatting.test.ts +++ b/src/cron/isolated-agent.model-formatting.test.ts @@ -2,6 +2,7 @@ import "./isolated-agent.mocks.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { loadModelCatalog } from "../agents/model-catalog.js"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; import { makeCfg, @@ -13,27 +14,6 @@ import type { CronJob } from "./types.js"; const withTempHome = withTempCronHome; -function makeDeps() { - return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), - }; -} - -function mockEmbeddedOk() { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ - payloads: [{ text: "ok" }], - meta: { - durationMs: 5, - agentMeta: { sessionId: "s", provider: "p", model: "m" }, - }, - }); -} - /** * Extract the provider and model from the last runEmbeddedPiAgent call. */ @@ -62,7 +42,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { }, ...options.storeEntries, }); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const jobPayload = options.jobPayload ?? { kind: "agentTurn" as const, @@ -72,7 +52,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { const res = await runCronIsolatedAgentTurn({ cfg: makeCfg(home, storePath, options.cfgOverrides), - deps: makeDeps(), + deps: createCliDeps(), job: makeJob(jobPayload), message: DEFAULT_MESSAGE, sessionKey: options.sessionKey ?? "cron:job-1", @@ -310,7 +290,7 @@ describe("cron model formatting and precedence edge cases", () => { // Step 2: No job model, session store says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, storeEntries: { @@ -327,7 +307,7 @@ describe("cron model formatting and precedence edge cases", () => { // Step 3: Job payload says anthropic, session store still says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step3 = await runTurn(home, { jobPayload: { kind: "agentTurn", @@ -365,7 +345,7 @@ describe("cron model formatting and precedence edge cases", () => { // Run 2: no override — must revert to default anthropic vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const r2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }); diff --git a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts index b9c0fddb3a3..2cdb6ee0048 100644 --- a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts +++ b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts @@ -133,6 +133,16 @@ async function runTelegramDeliveryResult(bestEffort: boolean) { return outcome; } +function expectSuccessfulTelegramTextDelivery(params: { + res: Awaited>; + deps: CliDeps; +}): void { + expect(params.res.status).toBe("ok"); + expect(params.res.delivered).toBe(true); + expect(params.res.deliveryAttempted).toBe(true); + expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); +} + async function runSignalDeliveryResult(bestEffort: boolean) { let outcome: | { @@ -379,31 +389,11 @@ describe("runCronIsolatedAgentTurn", () => { }); it("delivers text directly when best-effort is disabled", async () => { - await withTempHome(async (home) => { - const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" }); - const deps = createCliDeps(); - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expectDirectTelegramDelivery(deps, { - chatId: "123", - text: "hello from cron", - }); + const { res, deps } = await runTelegramDeliveryResult(false); + expectSuccessfulTelegramTextDelivery({ res, deps }); + expectDirectTelegramDelivery(deps, { + chatId: "123", + text: "hello from cron", }); }); @@ -459,10 +449,7 @@ describe("runCronIsolatedAgentTurn", () => { }, }); - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expectSuccessfulTelegramTextDelivery({ res, deps }); expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(2); expect(deps.sendMessageTelegram).toHaveBeenLastCalledWith( "123", @@ -490,10 +477,7 @@ describe("runCronIsolatedAgentTurn", () => { it("delivers text directly when best-effort is enabled", async () => { const { res, deps } = await runTelegramDeliveryResult(true); - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expectSuccessfulTelegramTextDelivery({ res, deps }); expectDirectTelegramDelivery(deps, { chatId: "123", text: "hello from cron", diff --git a/src/daemon/launchd.test.ts b/src/daemon/launchd.test.ts index ba43715ba28..4c624cfeec1 100644 --- a/src/daemon/launchd.test.ts +++ b/src/daemon/launchd.test.ts @@ -31,6 +31,25 @@ const launchdRestartHandoffState = vi.hoisted(() => ({ })); const defaultProgramArguments = ["node", "-e", "process.exit(0)"]; +function expectLaunchctlEnableBootstrapOrder(env: Record) { + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const plistPath = resolveLaunchAgentPlistPath(env); + const serviceId = `${domain}/${label}`; + const enableIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "enable" && c[1] === serviceId, + ); + const bootstrapIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, + ); + + expect(enableIndex).toBeGreaterThanOrEqual(0); + expect(bootstrapIndex).toBeGreaterThanOrEqual(0); + expect(enableIndex).toBeLessThan(bootstrapIndex); + + return { domain, label, serviceId, bootstrapIndex }; +} + function normalizeLaunchctlArgs(file: string, args: string[]): string[] { if (file === "launchctl") { return args; @@ -219,25 +238,12 @@ describe("launchd bootstrap repair", () => { const repair = await repairLaunchAgentBootstrap({ env }); expect(repair.ok).toBe(true); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); + const { serviceId, bootstrapIndex } = expectLaunchctlEnableBootstrapOrder(env); const kickstartIndex = state.launchctlCalls.findIndex( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); expect(kickstartIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); expect(bootstrapIndex).toBeLessThan(kickstartIndex); }); }); @@ -258,23 +264,10 @@ describe("launchd install", () => { programArguments: defaultProgramArguments, }); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); const installKickstartIndex = state.launchctlCalls.findIndex( (c) => c[0] === "kickstart" && c[2] === serviceId, ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); expect(installKickstartIndex).toBe(-1); }); @@ -360,24 +353,13 @@ describe("launchd install", () => { stdout: new PassThrough(), }); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); const kickstartCalls = state.launchctlCalls.filter( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); expect(result).toEqual({ outcome: "completed" }); expect(kickstartCalls).toHaveLength(2); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); }); diff --git a/src/daemon/schtasks.startup-fallback.test.ts b/src/daemon/schtasks.startup-fallback.test.ts index 1a949856a09..efa200c439a 100644 --- a/src/daemon/schtasks.startup-fallback.test.ts +++ b/src/daemon/schtasks.startup-fallback.test.ts @@ -1,34 +1,19 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { PassThrough } from "node:stream"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { quoteCmdScriptArg } from "./cmd-argv.js"; - -const schtasksResponses = vi.hoisted( - () => [] as Array<{ code: number; stdout: string; stderr: string }>, -); -const schtasksCalls = vi.hoisted(() => [] as string[][]); -const inspectPortUsage = vi.hoisted(() => vi.fn()); -const killProcessTree = vi.hoisted(() => vi.fn()); +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksResponses, + withWindowsEnv, +} from "./test-helpers/schtasks-fixtures.js"; const childUnref = vi.hoisted(() => vi.fn()); const spawn = vi.hoisted(() => vi.fn(() => ({ unref: childUnref }))); -vi.mock("./schtasks-exec.js", () => ({ - execSchtasks: async (argv: string[]) => { - schtasksCalls.push(argv); - return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; - }, -})); - -vi.mock("../infra/ports.js", () => ({ - inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args), -})); - -vi.mock("../process/kill-tree.js", () => ({ - killProcessTree: (...args: unknown[]) => killProcessTree(...args), -})); - vi.mock("node:child_process", async (importOriginal) => { const actual = await importOriginal(); return { @@ -43,6 +28,7 @@ const { readScheduledTaskRuntime, restartScheduledTask, resolveTaskScriptPath, + stopScheduledTask, } = await import("./schtasks.js"); function resolveStartupEntryPath(env: Record) { @@ -57,6 +43,7 @@ function resolveStartupEntryPath(env: Record) { ); } +<<<<<<< HEAD async function withWindowsEnv( run: (params: { tmpDir: string; env: Record }) => Promise, ) { @@ -74,11 +61,43 @@ async function withWindowsEnv( } } +async function writeGatewayScript(env: Record, port = 18789) { + const scriptPath = resolveTaskScriptPath(env); + await fs.mkdir(path.dirname(scriptPath), { recursive: true }); + await fs.writeFile( + scriptPath, + [ + "@echo off", + `set "OPENCLAW_GATEWAY_PORT=${port}"`, + `"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`, + "", + ].join("\r\n"), + "utf8", + ); +} + +||||||| parent of 8fb2c3f894 (refactor: share windows daemon test fixtures) +async function withWindowsEnv( + run: (params: { tmpDir: string; env: Record }) => Promise, +) { + const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-win-startup-")); + const env = { + USERPROFILE: tmpDir, + APPDATA: path.join(tmpDir, "AppData", "Roaming"), + OPENCLAW_PROFILE: "default", + OPENCLAW_GATEWAY_PORT: "18789", + }; + try { + await run({ tmpDir, env }); + } finally { + await fs.rm(tmpDir, { recursive: true, force: true }); + } +} + +======= +>>>>>>> 8fb2c3f894 (refactor: share windows daemon test fixtures) beforeEach(() => { - schtasksResponses.length = 0; - schtasksCalls.length = 0; - inspectPortUsage.mockReset(); - killProcessTree.mockReset(); + resetSchtasksBaseMocks(); spawn.mockClear(); childUnref.mockClear(); }); @@ -89,7 +108,7 @@ afterEach(() => { describe("Windows startup fallback", () => { it("falls back to a Startup-folder launcher when schtasks create is denied", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 5, stdout: "", stderr: "ERROR: Access is denied." }, @@ -124,7 +143,7 @@ describe("Windows startup fallback", () => { }); it("falls back to a Startup-folder launcher when schtasks create hangs", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 124, stdout: "", stderr: "schtasks timed out after 15000ms" }, @@ -148,7 +167,7 @@ describe("Windows startup fallback", () => { }); it("treats an installed Startup-folder launcher as loaded", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 1, stdout: "", stderr: "not found" }, @@ -161,7 +180,7 @@ describe("Windows startup fallback", () => { }); it("reports runtime from the gateway listener when using the Startup fallback", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 1, stdout: "", stderr: "not found" }, @@ -183,7 +202,7 @@ describe("Windows startup fallback", () => { }); it("restarts the Startup fallback by killing the current pid and relaunching the entry", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 1, stdout: "", stderr: "not found" }, @@ -211,4 +230,39 @@ describe("Windows startup fallback", () => { ); }); }); + + it("kills the Startup fallback runtime even when the CLI env omits the gateway port", async () => { + await withWindowsEnv(async ({ env }) => { + schtasksResponses.push({ code: 0, stdout: "", stderr: "" }); + await writeGatewayScript(env); + await fs.mkdir(path.dirname(resolveStartupEntryPath(env)), { recursive: true }); + await fs.writeFile(resolveStartupEntryPath(env), "@echo off\r\n", "utf8"); + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + const envWithoutPort = { ...env }; + delete envWithoutPort.OPENCLAW_GATEWAY_PORT; + await stopScheduledTask({ env: envWithoutPort, stdout }); + + expect(killProcessTree).toHaveBeenCalledWith(5151, { graceMs: 300 }); + }); + }); }); diff --git a/src/daemon/schtasks.stop.test.ts b/src/daemon/schtasks.stop.test.ts index 8142ff0d839..f501c2e4bed 100644 --- a/src/daemon/schtasks.stop.test.ts +++ b/src/daemon/schtasks.stop.test.ts @@ -1,34 +1,20 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { PassThrough } from "node:stream"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; - -const schtasksResponses = vi.hoisted( - () => [] as Array<{ code: number; stdout: string; stderr: string }>, -); -const schtasksCalls = vi.hoisted(() => [] as string[][]); -const inspectPortUsage = vi.hoisted(() => vi.fn()); -const killProcessTree = vi.hoisted(() => vi.fn()); +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksCalls, + schtasksResponses, + withWindowsEnv, +} from "./test-helpers/schtasks-fixtures.js"; const findVerifiedGatewayListenerPidsOnPortSync = vi.hoisted(() => vi.fn<(port: number) => number[]>(() => []), ); -vi.mock("./schtasks-exec.js", () => ({ - execSchtasks: async (argv: string[]) => { - schtasksCalls.push(argv); - return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; - }, -})); - -vi.mock("../infra/ports.js", () => ({ - inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args), -})); - -vi.mock("../process/kill-tree.js", () => ({ - killProcessTree: (...args: unknown[]) => killProcessTree(...args), -})); - vi.mock("../infra/gateway-processes.js", () => ({ findVerifiedGatewayListenerPidsOnPortSync: (port: number) => findVerifiedGatewayListenerPidsOnPortSync(port), @@ -37,23 +23,6 @@ vi.mock("../infra/gateway-processes.js", () => ({ const { restartScheduledTask, resolveTaskScriptPath, stopScheduledTask } = await import("./schtasks.js"); -async function withWindowsEnv( - run: (params: { tmpDir: string; env: Record }) => Promise, -) { - const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-win-stop-")); - const env = { - USERPROFILE: tmpDir, - APPDATA: path.join(tmpDir, "AppData", "Roaming"), - OPENCLAW_PROFILE: "default", - OPENCLAW_GATEWAY_PORT: "18789", - }; - try { - await run({ tmpDir, env }); - } finally { - await fs.rm(tmpDir, { recursive: true, force: true }); - } -} - async function writeGatewayScript(env: Record, port = 18789) { const scriptPath = resolveTaskScriptPath(env); await fs.mkdir(path.dirname(scriptPath), { recursive: true }); @@ -70,10 +39,7 @@ async function writeGatewayScript(env: Record, port = 18789) { } beforeEach(() => { - schtasksResponses.length = 0; - schtasksCalls.length = 0; - inspectPortUsage.mockReset(); - killProcessTree.mockReset(); + resetSchtasksBaseMocks(); findVerifiedGatewayListenerPidsOnPortSync.mockReset(); findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); inspectPortUsage.mockResolvedValue({ @@ -90,7 +56,7 @@ afterEach(() => { describe("Scheduled Task stop/restart cleanup", () => { it("kills lingering verified gateway listeners after schtasks stop", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { await writeGatewayScript(env); schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, @@ -168,7 +134,7 @@ describe("Scheduled Task stop/restart cleanup", () => { }); it("falls back to inspected gateway listeners when sync verification misses on Windows", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { await writeGatewayScript(env); schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, @@ -206,7 +172,7 @@ describe("Scheduled Task stop/restart cleanup", () => { }); it("kills lingering verified gateway listeners and waits for port release before restart", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { await writeGatewayScript(env); schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, diff --git a/src/daemon/schtasks.ts b/src/daemon/schtasks.ts index 3a92f0944fc..2216e93bfd9 100644 --- a/src/daemon/schtasks.ts +++ b/src/daemon/schtasks.ts @@ -161,6 +161,12 @@ export type ScheduledTaskInfo = { lastRunResult?: string; }; +function hasListenerPid( + listener: T, +): listener is T & { pid: number } { + return typeof listener.pid === "number"; +} + export function parseSchtasksQuery(output: string): ScheduledTaskInfo { const entries = parseKeyValueOutput(output, ":"); const info: ScheduledTaskInfo = {}; @@ -388,7 +394,7 @@ async function resolveScheduledTaskGatewayListenerPids(port: number): Promise listener.pid) - .filter((pid): pid is number => Number.isFinite(pid) && pid > 0), + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), ), ); } @@ -472,7 +478,7 @@ async function terminateBusyPortListeners(port: number): Promise { new Set( diagnostics.listeners .map((listener) => listener.pid) - .filter((pid): pid is number => Number.isFinite(pid) && pid > 0), + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), ), ); for (const pid of pids) { @@ -482,7 +488,7 @@ async function terminateBusyPortListeners(port: number): Promise { } async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise { - const port = resolveConfiguredGatewayPort(env); + const port = (await resolveScheduledTaskPort(env)) ?? resolveConfiguredGatewayPort(env); if (!port) { return { status: "unknown", @@ -496,7 +502,7 @@ async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise typeof item.pid === "number"); + const listener = diagnostics.listeners.find(hasListenerPid); return { status: diagnostics.status === "busy" ? "running" : "stopped", ...(listener?.pid ? { pid: listener.pid } : {}), diff --git a/src/daemon/test-helpers/schtasks-base-mocks.ts b/src/daemon/test-helpers/schtasks-base-mocks.ts new file mode 100644 index 00000000000..48933ecdd1c --- /dev/null +++ b/src/daemon/test-helpers/schtasks-base-mocks.ts @@ -0,0 +1,22 @@ +import { vi } from "vitest"; +import { + inspectPortUsage, + killProcessTree, + schtasksCalls, + schtasksResponses, +} from "./schtasks-fixtures.js"; + +vi.mock("../schtasks-exec.js", () => ({ + execSchtasks: async (argv: string[]) => { + schtasksCalls.push(argv); + return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; + }, +})); + +vi.mock("../../infra/ports.js", () => ({ + inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args), +})); + +vi.mock("../../process/kill-tree.js", () => ({ + killProcessTree: (...args: unknown[]) => killProcessTree(...args), +})); diff --git a/src/daemon/test-helpers/schtasks-fixtures.ts b/src/daemon/test-helpers/schtasks-fixtures.ts new file mode 100644 index 00000000000..a89d7a0eb2e --- /dev/null +++ b/src/daemon/test-helpers/schtasks-fixtures.ts @@ -0,0 +1,34 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { vi } from "vitest"; + +export const schtasksResponses: Array<{ code: number; stdout: string; stderr: string }> = []; +export const schtasksCalls: string[][] = []; +export const inspectPortUsage = vi.fn(); +export const killProcessTree = vi.fn(); + +export async function withWindowsEnv( + prefix: string, + run: (params: { tmpDir: string; env: Record }) => Promise, +) { + const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const env = { + USERPROFILE: tmpDir, + APPDATA: path.join(tmpDir, "AppData", "Roaming"), + OPENCLAW_PROFILE: "default", + OPENCLAW_GATEWAY_PORT: "18789", + }; + try { + await run({ tmpDir, env }); + } finally { + await fs.rm(tmpDir, { recursive: true, force: true }); + } +} + +export function resetSchtasksBaseMocks() { + schtasksResponses.length = 0; + schtasksCalls.length = 0; + inspectPortUsage.mockReset(); + killProcessTree.mockReset(); +} diff --git a/src/discord/monitor/provider.proxy.test.ts b/src/discord/monitor/provider.proxy.test.ts index 0b45fd2a2e7..9a15dcef94b 100644 --- a/src/discord/monitor/provider.proxy.test.ts +++ b/src/discord/monitor/provider.proxy.test.ts @@ -123,6 +123,30 @@ describe("createDiscordGatewayPlugin", () => { }; } + async function registerGatewayClient(plugin: unknown) { + await ( + plugin as { + registerClient: (client: { options: { token: string } }) => Promise; + } + ).registerClient({ + options: { token: "token-123" }, + }); + } + + async function expectGatewayRegisterFetchFailure(response: Response) { + const runtime = createRuntime(); + globalFetchMock.mockResolvedValue(response); + const plugin = createDiscordGatewayPlugin({ + discordConfig: {}, + runtime, + }); + + await expect(registerGatewayClient(plugin)).rejects.toThrow( + "Failed to get gateway information from Discord: fetch failed", + ); + expect(baseRegisterClientSpy).not.toHaveBeenCalled(); + } + beforeEach(() => { vi.stubGlobal("fetch", globalFetchMock); baseRegisterClientSpy.mockClear(); @@ -165,28 +189,12 @@ describe("createDiscordGatewayPlugin", () => { }); it("maps plain-text Discord 503 responses to fetch failed", async () => { - const runtime = createRuntime(); - globalFetchMock.mockResolvedValue({ + await expectGatewayRegisterFetchFailure({ ok: false, status: 503, text: async () => "upstream connect error or disconnect/reset before headers. reset reason: overflow", } as Response); - const plugin = createDiscordGatewayPlugin({ - discordConfig: {}, - runtime, - }); - - await expect( - ( - plugin as unknown as { - registerClient: (client: { options: { token: string } }) => Promise; - } - ).registerClient({ - options: { token: "token-123" }, - }), - ).rejects.toThrow("Failed to get gateway information from Discord: fetch failed"); - expect(baseRegisterClientSpy).not.toHaveBeenCalled(); }); it("uses proxy agent for gateway WebSocket when configured", async () => { @@ -257,28 +265,12 @@ describe("createDiscordGatewayPlugin", () => { }); it("maps body read failures to fetch failed", async () => { - const runtime = createRuntime(); - globalFetchMock.mockResolvedValue({ + await expectGatewayRegisterFetchFailure({ ok: true, status: 200, text: async () => { throw new Error("body stream closed"); }, } as unknown as Response); - const plugin = createDiscordGatewayPlugin({ - discordConfig: {}, - runtime, - }); - - await expect( - ( - plugin as unknown as { - registerClient: (client: { options: { token: string } }) => Promise; - } - ).registerClient({ - options: { token: "token-123" }, - }), - ).rejects.toThrow("Failed to get gateway information from Discord: fetch failed"); - expect(baseRegisterClientSpy).not.toHaveBeenCalled(); }); }); diff --git a/src/gateway/client.test.ts b/src/gateway/client.test.ts index 04217b96a65..876a6eb7ed1 100644 --- a/src/gateway/client.test.ts +++ b/src/gateway/client.test.ts @@ -344,6 +344,20 @@ describe("GatewayClient connect auth payload", () => { return parsed.params?.auth ?? {}; } + function connectRequestFrom(ws: MockWebSocket) { + const raw = ws.sent.find((frame) => frame.includes('"method":"connect"')); + expect(raw).toBeTruthy(); + return JSON.parse(raw ?? "{}") as { + id?: string; + params?: { + auth?: { + token?: string; + deviceToken?: string; + }; + }; + }; + } + function emitConnectChallenge(ws: MockWebSocket, nonce = "nonce-1") { ws.emitMessage( JSON.stringify({ @@ -354,6 +368,63 @@ describe("GatewayClient connect auth payload", () => { ); } + function startClientAndConnect(params: { client: GatewayClient; nonce?: string }) { + params.client.start(); + const ws = getLatestWs(); + ws.emitOpen(); + emitConnectChallenge(ws, params.nonce); + return { ws, connect: connectRequestFrom(ws) }; + } + + function emitConnectFailure( + ws: MockWebSocket, + connectId: string | undefined, + details: Record, + ) { + ws.emitMessage( + JSON.stringify({ + type: "res", + id: connectId, + ok: false, + error: { + code: "INVALID_REQUEST", + message: "unauthorized", + details, + }, + }), + ); + } + + async function expectRetriedConnectAuth(params: { + firstWs: MockWebSocket; + connectId: string | undefined; + failureDetails: Record; + }) { + emitConnectFailure(params.firstWs, params.connectId, params.failureDetails); + await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 }); + const ws = getLatestWs(); + ws.emitOpen(); + emitConnectChallenge(ws, "nonce-2"); + return connectFrameFrom(ws); + } + + async function expectNoReconnectAfterConnectFailure(params: { + client: GatewayClient; + firstWs: MockWebSocket; + connectId: string | undefined; + failureDetails: Record; + }) { + vi.useFakeTimers(); + try { + emitConnectFailure(params.firstWs, params.connectId, params.failureDetails); + await vi.advanceTimersByTimeAsync(30_000); + expect(wsInstances).toHaveLength(1); + } finally { + params.client.stop(); + vi.useRealTimers(); + } + } + it("uses explicit shared token and does not inject stored device token", () => { loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" }); const client = new GatewayClient({ @@ -457,37 +528,16 @@ describe("GatewayClient connect auth payload", () => { token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { - id?: string; - params?: { auth?: { token?: string; deviceToken?: string } }; - }; + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); expect(firstConnect.params?.auth?.token).toBe("shared-token"); expect(firstConnect.params?.auth?.deviceToken).toBeUndefined(); - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, - }, - }), - ); - - await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 }); - const ws2 = getLatestWs(); - ws2.emitOpen(); - emitConnectChallenge(ws2, "nonce-2"); - expect(connectFrameFrom(ws2)).toMatchObject({ + const retriedAuth = await expectRetriedConnectAuth({ + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, + }); + expect(retriedAuth).toMatchObject({ token: "shared-token", deviceToken: "stored-device-token", }); @@ -501,32 +551,13 @@ describe("GatewayClient connect auth payload", () => { token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string }; - - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" }, - }, - }), - ); - - await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 }); - const ws2 = getLatestWs(); - ws2.emitOpen(); - emitConnectChallenge(ws2, "nonce-2"); - expect(connectFrameFrom(ws2)).toMatchObject({ + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); + const retriedAuth = await expectRetriedConnectAuth({ + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" }, + }); + expect(retriedAuth).toMatchObject({ token: "shared-token", deviceToken: "stored-device-token", }); @@ -534,71 +565,33 @@ describe("GatewayClient connect auth payload", () => { }); it("does not auto-reconnect on AUTH_TOKEN_MISSING connect failures", async () => { - vi.useFakeTimers(); const client = new GatewayClient({ url: "ws://127.0.0.1:18789", token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string }; - - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_TOKEN_MISSING" }, - }, - }), - ); - - await vi.advanceTimersByTimeAsync(30_000); - expect(wsInstances).toHaveLength(1); - client.stop(); - vi.useRealTimers(); + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); + await expectNoReconnectAfterConnectFailure({ + client, + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_TOKEN_MISSING" }, + }); }); it("does not auto-reconnect on token mismatch when retry is not trusted", async () => { - vi.useFakeTimers(); loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" }); const client = new GatewayClient({ url: "wss://gateway.example.com:18789", token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string }; - - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, - }, - }), - ); - - await vi.advanceTimersByTimeAsync(30_000); - expect(wsInstances).toHaveLength(1); - client.stop(); - vi.useRealTimers(); + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); + await expectNoReconnectAfterConnectFailure({ + client, + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, + }); }); }); diff --git a/src/gateway/server-methods/agent-wait-dedupe.test.ts b/src/gateway/server-methods/agent-wait-dedupe.test.ts index c5204271983..4bbf2a575a0 100644 --- a/src/gateway/server-methods/agent-wait-dedupe.test.ts +++ b/src/gateway/server-methods/agent-wait-dedupe.test.ts @@ -1,4 +1,5 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { DedupeEntry } from "../server-shared.js"; import { __testing, readTerminalSnapshotFromGatewayDedupe, @@ -8,7 +9,7 @@ import { describe("agent wait dedupe helper", () => { function setRunEntry(params: { - dedupe: Map; + dedupe: Map; kind: "agent" | "chat"; runId: string; ts?: number; diff --git a/src/gateway/server-methods/chat.abort-authorization.test.ts b/src/gateway/server-methods/chat.abort-authorization.test.ts index 607e80b58ff..ed8a92e48a0 100644 --- a/src/gateway/server-methods/chat.abort-authorization.test.ts +++ b/src/gateway/server-methods/chat.abort-authorization.test.ts @@ -6,6 +6,30 @@ import { } from "./chat.abort.test-helpers.js"; import { chatHandlers } from "./chat.js"; +async function invokeSingleRunAbort({ + context, + runId = "run-1", + connId, + deviceId, + scopes, +}: { + context: ReturnType; + runId?: string; + connId: string; + deviceId: string; + scopes: string[]; +}) { + return await invokeChatAbortHandler({ + handler: chatHandlers["chat.abort"], + context, + request: { sessionKey: "main", runId }, + client: { + connId, + connect: { device: { id: deviceId }, scopes }, + }, + }); +} + describe("chat.abort authorization", () => { it("rejects explicit run aborts from other clients", async () => { const context = createChatAbortContext({ @@ -17,14 +41,11 @@ describe("chat.abort authorization", () => { ]), }); - const respond = await invokeChatAbortHandler({ - handler: chatHandlers["chat.abort"], + const respond = await invokeSingleRunAbort({ context, - request: { sessionKey: "main", runId: "run-1" }, - client: { - connId: "conn-other", - connect: { device: { id: "dev-other" }, scopes: ["operator.write"] }, - }, + connId: "conn-other", + deviceId: "dev-other", + scopes: ["operator.write"], }); const [ok, payload, error] = respond.mock.calls.at(-1) ?? []; @@ -92,14 +113,11 @@ describe("chat.abort authorization", () => { ]), }); - const respond = await invokeChatAbortHandler({ - handler: chatHandlers["chat.abort"], + const respond = await invokeSingleRunAbort({ context, - request: { sessionKey: "main", runId: "run-1" }, - client: { - connId: "conn-admin", - connect: { device: { id: "dev-admin" }, scopes: ["operator.admin"] }, - }, + connId: "conn-admin", + deviceId: "dev-admin", + scopes: ["operator.admin"], }); const [ok, payload] = respond.mock.calls.at(-1) ?? []; diff --git a/src/gateway/server-runtime-config.test.ts b/src/gateway/server-runtime-config.test.ts index 205bac8cf3e..5c1354d7cd5 100644 --- a/src/gateway/server-runtime-config.test.ts +++ b/src/gateway/server-runtime-config.test.ts @@ -251,7 +251,7 @@ describe("resolveGatewayRuntimeConfig", () => { }); describe("HTTP security headers", () => { - it.each([ + const cases = [ { name: "resolves strict transport security headers from config", strictTransportSecurity: " max-age=31536000; includeSubDomains ", @@ -267,7 +267,13 @@ describe("resolveGatewayRuntimeConfig", () => { strictTransportSecurity: " ", expected: undefined, }, - ])("$name", async ({ strictTransportSecurity, expected }) => { + ] satisfies ReadonlyArray<{ + name: string; + strictTransportSecurity: string | false; + expected: string | undefined; + }>; + + it.each(cases)("$name", async ({ strictTransportSecurity, expected }) => { const result = await resolveGatewayRuntimeConfig({ cfg: { gateway: { diff --git a/src/gateway/server.auth.compat-baseline.test.ts b/src/gateway/server.auth.compat-baseline.test.ts index 8c6ea06978c..a606feab909 100644 --- a/src/gateway/server.auth.compat-baseline.test.ts +++ b/src/gateway/server.auth.compat-baseline.test.ts @@ -34,6 +34,27 @@ function expectAuthErrorDetails(params: { } } +async function expectSharedOperatorScopesCleared( + port: number, + auth: { token?: string; password?: string }, +) { + const ws = await openWs(port); + try { + const res = await connectReq(ws, { + ...auth, + scopes: ["operator.admin"], + device: null, + }); + expect(res.ok).toBe(true); + + const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false }); + expect(adminRes.ok).toBe(false); + expect(adminRes.error?.message).toBe("missing scope: operator.admin"); + } finally { + ws.close(); + } +} + describe("gateway auth compatibility baseline", () => { describe("token mode", () => { let server: Awaited>; @@ -64,21 +85,7 @@ describe("gateway auth compatibility baseline", () => { }); test("clears client-declared scopes for shared-token operator connects", async () => { - const ws = await openWs(port); - try { - const res = await connectReq(ws, { - token: "secret", - scopes: ["operator.admin"], - device: null, - }); - expect(res.ok).toBe(true); - - const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false }); - expect(adminRes.ok).toBe(false); - expect(adminRes.error?.message).toBe("missing scope: operator.admin"); - } finally { - ws.close(); - } + await expectSharedOperatorScopesCleared(port, { token: "secret" }); }); test("returns stable token-missing details for control ui without token", async () => { @@ -184,21 +191,7 @@ describe("gateway auth compatibility baseline", () => { }); test("clears client-declared scopes for shared-password operator connects", async () => { - const ws = await openWs(port); - try { - const res = await connectReq(ws, { - password: "secret", - scopes: ["operator.admin"], - device: null, - }); - expect(res.ok).toBe(true); - - const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false }); - expect(adminRes.ok).toBe(false); - expect(adminRes.error?.message).toBe("missing scope: operator.admin"); - } finally { - ws.close(); - } + await expectSharedOperatorScopesCleared(port, { password: "secret" }); }); }); diff --git a/src/gateway/server.talk-config.test.ts b/src/gateway/server.talk-config.test.ts index 6723f30cd45..a47addbb0e0 100644 --- a/src/gateway/server.talk-config.test.ts +++ b/src/gateway/server.talk-config.test.ts @@ -40,6 +40,7 @@ type TalkConfigPayload = { ui?: { seamColor?: string }; }; }; +type TalkConfig = NonNullable["talk"]>; const TALK_CONFIG_DEVICE_PATH = path.join( os.tmpdir(), `openclaw-talk-config-device-${process.pid}.json`, @@ -95,7 +96,7 @@ async function fetchTalkConfig( } function expectElevenLabsTalkConfig( - talk: TalkConfigPayload["config"] extends { talk?: infer T } ? T : never, + talk: TalkConfig | undefined, expected: { voiceId?: string; apiKey?: string | SecretRef; diff --git a/src/infra/agent-events.test.ts b/src/infra/agent-events.test.ts index 7f65ff5f752..0079a443c7b 100644 --- a/src/infra/agent-events.test.ts +++ b/src/infra/agent-events.test.ts @@ -91,13 +91,13 @@ describe("agent-events sequencing", () => { isControlUiVisible: true, }); registerAgentRunContext("run-ctx", { - verboseLevel: "high", + verboseLevel: "full", isHeartbeat: true, }); expect(getAgentRunContext("run-ctx")).toEqual({ sessionKey: "session-main", - verboseLevel: "high", + verboseLevel: "full", isHeartbeat: true, isControlUiVisible: true, }); diff --git a/src/infra/backoff.test.ts b/src/infra/backoff.test.ts new file mode 100644 index 00000000000..9181d832402 --- /dev/null +++ b/src/infra/backoff.test.ts @@ -0,0 +1,36 @@ +import { describe, expect, it, vi } from "vitest"; +import { computeBackoff, sleepWithAbort, type BackoffPolicy } from "./backoff.js"; + +describe("backoff helpers", () => { + const policy: BackoffPolicy = { + initialMs: 100, + maxMs: 250, + factor: 2, + jitter: 0.5, + }; + + it("treats attempts below one as the first backoff step", () => { + const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0); + try { + expect(computeBackoff(policy, 0)).toBe(100); + expect(computeBackoff(policy, 1)).toBe(100); + } finally { + randomSpy.mockRestore(); + } + }); + + it("adds jitter and clamps to maxMs", () => { + const randomSpy = vi.spyOn(Math, "random").mockReturnValue(1); + try { + expect(computeBackoff(policy, 2)).toBe(250); + expect(computeBackoff({ ...policy, maxMs: 450 }, 2)).toBe(300); + } finally { + randomSpy.mockRestore(); + } + }); + + it("returns immediately for non-positive sleep durations", async () => { + await expect(sleepWithAbort(0, AbortSignal.abort())).resolves.toBeUndefined(); + await expect(sleepWithAbort(-5)).resolves.toBeUndefined(); + }); +}); diff --git a/src/infra/backup-create.test.ts b/src/infra/backup-create.test.ts new file mode 100644 index 00000000000..a91d30c774a --- /dev/null +++ b/src/infra/backup-create.test.ts @@ -0,0 +1,84 @@ +import { describe, expect, it } from "vitest"; +import { formatBackupCreateSummary, type BackupCreateResult } from "./backup-create.js"; + +function makeResult(overrides: Partial = {}): BackupCreateResult { + return { + createdAt: "2026-01-01T00:00:00.000Z", + archiveRoot: "openclaw-backup-2026-01-01", + archivePath: "/tmp/openclaw-backup.tar.gz", + dryRun: false, + includeWorkspace: true, + onlyConfig: false, + verified: false, + assets: [], + skipped: [], + ...overrides, + }; +} + +describe("formatBackupCreateSummary", () => { + it("formats created archives with included and skipped paths", () => { + const lines = formatBackupCreateSummary( + makeResult({ + verified: true, + assets: [ + { + kind: "state", + sourcePath: "/state", + archivePath: "archive/state", + displayPath: "~/.openclaw", + }, + ], + skipped: [ + { + kind: "workspace", + sourcePath: "/workspace", + displayPath: "~/Projects/openclaw", + reason: "covered", + coveredBy: "~/.openclaw", + }, + ], + }), + ); + + expect(lines).toEqual([ + "Backup archive: /tmp/openclaw-backup.tar.gz", + "Included 1 path:", + "- state: ~/.openclaw", + "Skipped 1 path:", + "- workspace: ~/Projects/openclaw (covered by ~/.openclaw)", + "Created /tmp/openclaw-backup.tar.gz", + "Archive verification: passed", + ]); + }); + + it("formats dry runs and pluralized counts", () => { + const lines = formatBackupCreateSummary( + makeResult({ + dryRun: true, + assets: [ + { + kind: "config", + sourcePath: "/config", + archivePath: "archive/config", + displayPath: "~/.openclaw/config.json", + }, + { + kind: "oauth", + sourcePath: "/oauth", + archivePath: "archive/oauth", + displayPath: "~/.openclaw/oauth", + }, + ], + }), + ); + + expect(lines).toEqual([ + "Backup archive: /tmp/openclaw-backup.tar.gz", + "Included 2 paths:", + "- config: ~/.openclaw/config.json", + "- oauth: ~/.openclaw/oauth", + "Dry run only; archive was not written.", + ]); + }); +}); diff --git a/src/infra/binaries.test.ts b/src/infra/binaries.test.ts new file mode 100644 index 00000000000..425a2696fbf --- /dev/null +++ b/src/infra/binaries.test.ts @@ -0,0 +1,38 @@ +import { describe, expect, it, vi } from "vitest"; +import type { runExec } from "../process/exec.js"; +import type { RuntimeEnv } from "../runtime.js"; +import { ensureBinary } from "./binaries.js"; + +describe("ensureBinary", () => { + it("passes through when the binary exists", async () => { + const exec: typeof runExec = vi.fn().mockResolvedValue({ + stdout: "", + stderr: "", + }); + const runtime: RuntimeEnv = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + }; + + await ensureBinary("node", exec, runtime); + + expect(exec).toHaveBeenCalledWith("which", ["node"]); + expect(runtime.error).not.toHaveBeenCalled(); + expect(runtime.exit).not.toHaveBeenCalled(); + }); + + it("logs and exits when the binary is missing", async () => { + const exec: typeof runExec = vi.fn().mockRejectedValue(new Error("missing")); + const error = vi.fn(); + const exit = vi.fn(() => { + throw new Error("exit"); + }); + + await expect(ensureBinary("ghost", exec, { log: vi.fn(), error, exit })).rejects.toThrow( + "exit", + ); + expect(error).toHaveBeenCalledWith("Missing required binary: ghost. Please install it."); + expect(exit).toHaveBeenCalledWith(1); + }); +}); diff --git a/src/infra/bonjour-ciao.test.ts b/src/infra/bonjour-ciao.test.ts new file mode 100644 index 00000000000..120c46d8dce --- /dev/null +++ b/src/infra/bonjour-ciao.test.ts @@ -0,0 +1,27 @@ +import { describe, expect, it, vi } from "vitest"; + +const logDebugMock = vi.hoisted(() => vi.fn()); + +vi.mock("../logger.js", () => ({ + logDebug: (...args: unknown[]) => logDebugMock(...args), +})); + +const { ignoreCiaoCancellationRejection } = await import("./bonjour-ciao.js"); + +describe("bonjour-ciao", () => { + it("ignores and logs ciao cancellation rejections", () => { + expect( + ignoreCiaoCancellationRejection(new Error("Ciao announcement cancelled by shutdown")), + ).toBe(true); + expect(logDebugMock).toHaveBeenCalledWith( + expect.stringContaining("ignoring unhandled ciao rejection"), + ); + }); + + it("keeps unrelated rejections visible", () => { + logDebugMock.mockReset(); + + expect(ignoreCiaoCancellationRejection(new Error("boom"))).toBe(false); + expect(logDebugMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/bonjour-errors.test.ts b/src/infra/bonjour-errors.test.ts new file mode 100644 index 00000000000..688335856c4 --- /dev/null +++ b/src/infra/bonjour-errors.test.ts @@ -0,0 +1,16 @@ +import { describe, expect, it } from "vitest"; +import { formatBonjourError } from "./bonjour-errors.js"; + +describe("formatBonjourError", () => { + it("formats named errors with their type prefix", () => { + const err = new Error("timed out"); + err.name = "AbortError"; + expect(formatBonjourError(err)).toBe("AbortError: timed out"); + }); + + it("falls back to plain error strings and non-error values", () => { + expect(formatBonjourError(new Error(""))).toBe("Error"); + expect(formatBonjourError("boom")).toBe("boom"); + expect(formatBonjourError(42)).toBe("42"); + }); +}); diff --git a/src/infra/boundary-file-read.test.ts b/src/infra/boundary-file-read.test.ts new file mode 100644 index 00000000000..6869ace53f0 --- /dev/null +++ b/src/infra/boundary-file-read.test.ts @@ -0,0 +1,204 @@ +import path from "node:path"; +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const resolveBoundaryPathSyncMock = vi.hoisted(() => vi.fn()); +const resolveBoundaryPathMock = vi.hoisted(() => vi.fn()); +const openVerifiedFileSyncMock = vi.hoisted(() => vi.fn()); + +vi.mock("./boundary-path.js", () => ({ + resolveBoundaryPathSync: (...args: unknown[]) => resolveBoundaryPathSyncMock(...args), + resolveBoundaryPath: (...args: unknown[]) => resolveBoundaryPathMock(...args), +})); + +vi.mock("./safe-open-sync.js", () => ({ + openVerifiedFileSync: (...args: unknown[]) => openVerifiedFileSyncMock(...args), +})); + +const { canUseBoundaryFileOpen, openBoundaryFile, openBoundaryFileSync } = + await import("./boundary-file-read.js"); + +describe("boundary-file-read", () => { + beforeEach(() => { + resolveBoundaryPathSyncMock.mockReset(); + resolveBoundaryPathMock.mockReset(); + openVerifiedFileSyncMock.mockReset(); + }); + + it("recognizes the required sync fs surface", () => { + const validFs = { + openSync() {}, + closeSync() {}, + fstatSync() {}, + lstatSync() {}, + realpathSync() {}, + readFileSync() {}, + constants: {}, + } as never; + + expect(canUseBoundaryFileOpen(validFs)).toBe(true); + expect( + canUseBoundaryFileOpen({ + ...validFs, + openSync: undefined, + } as never), + ).toBe(false); + expect( + canUseBoundaryFileOpen({ + ...validFs, + constants: null, + } as never), + ).toBe(false); + }); + + it("maps sync boundary resolution into verified file opens", () => { + const stat = { size: 3 } as never; + const ioFs = { marker: "io" } as never; + const absolutePath = path.resolve("plugin.json"); + + resolveBoundaryPathSyncMock.mockReturnValue({ + canonicalPath: "/real/plugin.json", + rootCanonicalPath: "/real/root", + }); + openVerifiedFileSyncMock.mockReturnValue({ + ok: true, + path: "/real/plugin.json", + fd: 7, + stat, + }); + + const opened = openBoundaryFileSync({ + absolutePath: "plugin.json", + rootPath: "/workspace", + boundaryLabel: "plugin root", + ioFs, + }); + + expect(resolveBoundaryPathSyncMock).toHaveBeenCalledWith({ + absolutePath, + rootPath: "/workspace", + rootCanonicalPath: undefined, + boundaryLabel: "plugin root", + skipLexicalRootCheck: undefined, + }); + expect(openVerifiedFileSyncMock).toHaveBeenCalledWith({ + filePath: absolutePath, + resolvedPath: "/real/plugin.json", + rejectHardlinks: true, + maxBytes: undefined, + allowedType: undefined, + ioFs, + }); + expect(opened).toEqual({ + ok: true, + path: "/real/plugin.json", + fd: 7, + stat, + rootRealPath: "/real/root", + }); + }); + + it("returns validation errors when sync boundary resolution throws", () => { + const error = new Error("outside root"); + resolveBoundaryPathSyncMock.mockImplementation(() => { + throw error; + }); + + const opened = openBoundaryFileSync({ + absolutePath: "plugin.json", + rootPath: "/workspace", + boundaryLabel: "plugin root", + }); + + expect(opened).toEqual({ + ok: false, + reason: "validation", + error, + }); + expect(openVerifiedFileSyncMock).not.toHaveBeenCalled(); + }); + + it("guards against unexpected async sync-resolution results", () => { + resolveBoundaryPathSyncMock.mockReturnValue( + Promise.resolve({ + canonicalPath: "/real/plugin.json", + rootCanonicalPath: "/real/root", + }), + ); + + const opened = openBoundaryFileSync({ + absolutePath: "plugin.json", + rootPath: "/workspace", + boundaryLabel: "plugin root", + }); + + expect(opened.ok).toBe(false); + if (opened.ok) { + return; + } + expect(opened.reason).toBe("validation"); + expect(String(opened.error)).toContain("Unexpected async boundary resolution"); + }); + + it("awaits async boundary resolution before verifying the file", async () => { + const ioFs = { marker: "io" } as never; + const absolutePath = path.resolve("notes.txt"); + + resolveBoundaryPathMock.mockResolvedValue({ + canonicalPath: "/real/notes.txt", + rootCanonicalPath: "/real/root", + }); + openVerifiedFileSyncMock.mockReturnValue({ + ok: false, + reason: "validation", + error: new Error("blocked"), + }); + + const opened = await openBoundaryFile({ + absolutePath: "notes.txt", + rootPath: "/workspace", + boundaryLabel: "workspace", + aliasPolicy: { allowFinalSymlinkForUnlink: true }, + ioFs, + }); + + expect(resolveBoundaryPathMock).toHaveBeenCalledWith({ + absolutePath, + rootPath: "/workspace", + rootCanonicalPath: undefined, + boundaryLabel: "workspace", + policy: { allowFinalSymlinkForUnlink: true }, + skipLexicalRootCheck: undefined, + }); + expect(openVerifiedFileSyncMock).toHaveBeenCalledWith({ + filePath: absolutePath, + resolvedPath: "/real/notes.txt", + rejectHardlinks: true, + maxBytes: undefined, + allowedType: undefined, + ioFs, + }); + expect(opened).toEqual({ + ok: false, + reason: "validation", + error: expect.any(Error), + }); + }); + + it("maps async boundary resolution failures to validation errors", async () => { + const error = new Error("escaped"); + resolveBoundaryPathMock.mockRejectedValue(error); + + const opened = await openBoundaryFile({ + absolutePath: "notes.txt", + rootPath: "/workspace", + boundaryLabel: "workspace", + }); + + expect(opened).toEqual({ + ok: false, + reason: "validation", + error, + }); + expect(openVerifiedFileSyncMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/canvas-host-url.test.ts b/src/infra/canvas-host-url.test.ts new file mode 100644 index 00000000000..2ca7401a2bb --- /dev/null +++ b/src/infra/canvas-host-url.test.ts @@ -0,0 +1,64 @@ +import { describe, expect, it } from "vitest"; +import { resolveCanvasHostUrl } from "./canvas-host-url.js"; + +describe("resolveCanvasHostUrl", () => { + it("returns undefined when no canvas port or usable host is available", () => { + expect(resolveCanvasHostUrl({})).toBeUndefined(); + expect(resolveCanvasHostUrl({ canvasPort: 3000, hostOverride: "127.0.0.1" })).toBeUndefined(); + }); + + it("prefers non-loopback host overrides and preserves explicit ports", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 3000, + hostOverride: " canvas.openclaw.ai ", + requestHost: "gateway.local:9000", + localAddress: "192.168.1.10", + }), + ).toBe("http://canvas.openclaw.ai:3000"); + }); + + it("falls back from rejected loopback overrides to request hosts", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 3000, + hostOverride: "127.0.0.1", + requestHost: "example.com:8443", + }), + ).toBe("http://example.com:3000"); + }); + + it("maps proxied default gateway ports to request-host ports or scheme defaults", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 18789, + requestHost: "gateway.example.com:9443", + forwardedProto: "https", + }), + ).toBe("https://gateway.example.com:9443"); + expect( + resolveCanvasHostUrl({ + canvasPort: 18789, + requestHost: "gateway.example.com", + forwardedProto: ["https", "http"], + }), + ).toBe("https://gateway.example.com:443"); + expect( + resolveCanvasHostUrl({ + canvasPort: 18789, + requestHost: "gateway.example.com", + }), + ).toBe("http://gateway.example.com:80"); + }); + + it("brackets ipv6 hosts and can fall back to local addresses", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 3000, + requestHost: "not a host", + localAddress: "2001:db8::1", + scheme: "https", + }), + ).toBe("https://[2001:db8::1]:3000"); + }); +}); diff --git a/src/infra/channel-activity.test.ts b/src/infra/channel-activity.test.ts new file mode 100644 index 00000000000..17791056f5b --- /dev/null +++ b/src/infra/channel-activity.test.ts @@ -0,0 +1,72 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + getChannelActivity, + recordChannelActivity, + resetChannelActivityForTest, +} from "./channel-activity.js"; + +describe("channel activity", () => { + beforeEach(() => { + resetChannelActivityForTest(); + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-01-08T00:00:00Z")); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("uses the default account for blank inputs and falls back to null timestamps", () => { + expect(getChannelActivity({ channel: "telegram" })).toEqual({ + inboundAt: null, + outboundAt: null, + }); + + recordChannelActivity({ + channel: "telegram", + accountId: " ", + direction: "inbound", + }); + + expect(getChannelActivity({ channel: "telegram", accountId: null })).toEqual({ + inboundAt: 1767830400000, + outboundAt: null, + }); + }); + + it("keeps inbound and outbound timestamps independent and trims account ids", () => { + recordChannelActivity({ + channel: "whatsapp", + accountId: " team-a ", + direction: "inbound", + at: 10, + }); + recordChannelActivity({ + channel: "whatsapp", + accountId: "team-a", + direction: "outbound", + at: 20, + }); + recordChannelActivity({ + channel: "whatsapp", + accountId: "team-a", + direction: "inbound", + at: 30, + }); + + expect(getChannelActivity({ channel: "whatsapp", accountId: " team-a " })).toEqual({ + inboundAt: 30, + outboundAt: 20, + }); + }); + + it("reset clears previously recorded activity", () => { + recordChannelActivity({ channel: "line", direction: "outbound", at: 7 }); + resetChannelActivityForTest(); + + expect(getChannelActivity({ channel: "line" })).toEqual({ + inboundAt: null, + outboundAt: null, + }); + }); +}); diff --git a/src/infra/channels-status-issues.test.ts b/src/infra/channels-status-issues.test.ts new file mode 100644 index 00000000000..92b4008707c --- /dev/null +++ b/src/infra/channels-status-issues.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it, vi } from "vitest"; + +const listChannelPluginsMock = vi.hoisted(() => vi.fn()); + +vi.mock("../channels/plugins/index.js", () => ({ + listChannelPlugins: () => listChannelPluginsMock(), +})); + +import { collectChannelStatusIssues } from "./channels-status-issues.js"; + +describe("collectChannelStatusIssues", () => { + it("returns no issues when payload accounts are missing or not arrays", () => { + const collectTelegramIssues = vi.fn(() => [{ code: "telegram" }]); + listChannelPluginsMock.mockReturnValue([ + { id: "telegram", status: { collectStatusIssues: collectTelegramIssues } }, + ]); + + expect(collectChannelStatusIssues({})).toEqual([]); + expect(collectChannelStatusIssues({ channelAccounts: { telegram: { bad: true } } })).toEqual( + [], + ); + expect(collectTelegramIssues).not.toHaveBeenCalled(); + }); + + it("skips plugins without collectors and concatenates collector output in plugin order", () => { + const collectTelegramIssues = vi.fn(() => [{ code: "telegram.down" }]); + const collectSlackIssues = vi.fn(() => [{ code: "slack.warn" }, { code: "slack.auth" }]); + const telegramAccounts = [{ id: "tg-1" }]; + const slackAccounts = [{ id: "sl-1" }]; + listChannelPluginsMock.mockReturnValueOnce([ + { id: "discord" }, + { id: "telegram", status: { collectStatusIssues: collectTelegramIssues } }, + { id: "slack", status: { collectStatusIssues: collectSlackIssues } }, + ]); + + expect( + collectChannelStatusIssues({ + channelAccounts: { + discord: [{ id: "dc-1" }], + telegram: telegramAccounts, + slack: slackAccounts, + }, + }), + ).toEqual([{ code: "telegram.down" }, { code: "slack.warn" }, { code: "slack.auth" }]); + + expect(collectTelegramIssues).toHaveBeenCalledWith(telegramAccounts); + expect(collectSlackIssues).toHaveBeenCalledWith(slackAccounts); + }); +}); diff --git a/src/infra/cli-root-options.test.ts b/src/infra/cli-root-options.test.ts index e6907984ec0..6d7461a39e5 100644 --- a/src/infra/cli-root-options.test.ts +++ b/src/infra/cli-root-options.test.ts @@ -6,8 +6,11 @@ describe("isValueToken", () => { { value: "work", expected: true }, { value: "-1", expected: true }, { value: "-1.5", expected: true }, + { value: "-0.5", expected: true }, { value: "--", expected: false }, { value: "--dev", expected: false }, + { value: "-", expected: false }, + { value: "", expected: false }, { value: undefined, expected: false }, ])("classifies %j", ({ value, expected }) => { expect(isValueToken(value)).toBe(expected); @@ -24,6 +27,8 @@ describe("consumeRootOptionToken", () => { { args: ["--log-level", "-1.5"], index: 0, expected: 2 }, { args: ["--profile", "--no-color"], index: 0, expected: 1 }, { args: ["--profile", "--"], index: 0, expected: 1 }, + { args: ["x", "--profile", "work"], index: 1, expected: 2 }, + { args: ["--log-level", ""], index: 0, expected: 1 }, { args: ["--unknown"], index: 0, expected: 0 }, { args: [], index: 0, expected: 0 }, ])("consumes %j at %d", ({ args, index, expected }) => { diff --git a/src/infra/clipboard.test.ts b/src/infra/clipboard.test.ts new file mode 100644 index 00000000000..c511d430c3b --- /dev/null +++ b/src/infra/clipboard.test.ts @@ -0,0 +1,52 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const runCommandWithTimeoutMock = vi.hoisted(() => vi.fn()); + +vi.mock("../process/exec.js", () => ({ + runCommandWithTimeout: (...args: unknown[]) => runCommandWithTimeoutMock(...args), +})); + +const { copyToClipboard } = await import("./clipboard.js"); + +describe("copyToClipboard", () => { + beforeEach(() => { + runCommandWithTimeoutMock.mockReset(); + }); + + it("returns true on the first successful clipboard command", async () => { + runCommandWithTimeoutMock.mockResolvedValueOnce({ code: 0, killed: false }); + + await expect(copyToClipboard("hello")).resolves.toBe(true); + expect(runCommandWithTimeoutMock).toHaveBeenCalledWith(["pbcopy"], { + timeoutMs: 3000, + input: "hello", + }); + expect(runCommandWithTimeoutMock).toHaveBeenCalledTimes(1); + }); + + it("falls through failed attempts until a later command succeeds", async () => { + runCommandWithTimeoutMock + .mockRejectedValueOnce(new Error("missing pbcopy")) + .mockResolvedValueOnce({ code: 1, killed: false }) + .mockResolvedValueOnce({ code: 0, killed: false }); + + await expect(copyToClipboard("hello")).resolves.toBe(true); + expect(runCommandWithTimeoutMock.mock.calls.map((call) => call[0])).toEqual([ + ["pbcopy"], + ["xclip", "-selection", "clipboard"], + ["wl-copy"], + ]); + }); + + it("returns false when every clipboard backend fails or is killed", async () => { + runCommandWithTimeoutMock + .mockResolvedValueOnce({ code: 0, killed: true }) + .mockRejectedValueOnce(new Error("missing xclip")) + .mockResolvedValueOnce({ code: 1, killed: false }) + .mockRejectedValueOnce(new Error("missing clip.exe")) + .mockResolvedValueOnce({ code: 2, killed: false }); + + await expect(copyToClipboard("hello")).resolves.toBe(false); + expect(runCommandWithTimeoutMock).toHaveBeenCalledTimes(5); + }); +}); diff --git a/src/infra/dedupe.test.ts b/src/infra/dedupe.test.ts new file mode 100644 index 00000000000..035324e13c9 --- /dev/null +++ b/src/infra/dedupe.test.ts @@ -0,0 +1,57 @@ +import { describe, expect, it } from "vitest"; +import { createDedupeCache } from "./dedupe.js"; + +describe("createDedupeCache", () => { + it("ignores blank cache keys", () => { + const cache = createDedupeCache({ ttlMs: 1_000, maxSize: 10 }); + + expect(cache.check("", 100)).toBe(false); + expect(cache.check(undefined, 100)).toBe(false); + expect(cache.peek(null, 100)).toBe(false); + expect(cache.size()).toBe(0); + }); + + it("keeps entries indefinitely when ttlMs is zero or negative", () => { + const zeroTtlCache = createDedupeCache({ ttlMs: 0, maxSize: 10 }); + expect(zeroTtlCache.check("a", 100)).toBe(false); + expect(zeroTtlCache.check("a", 10_000)).toBe(true); + + const negativeTtlCache = createDedupeCache({ ttlMs: -100, maxSize: 10 }); + expect(negativeTtlCache.check("b", 100)).toBe(false); + expect(negativeTtlCache.peek("b", 10_000)).toBe(true); + }); + + it("touches duplicate reads so the newest key survives max-size pruning", () => { + const cache = createDedupeCache({ ttlMs: 10_000, maxSize: 2 }); + + expect(cache.check("a", 100)).toBe(false); + expect(cache.check("b", 200)).toBe(false); + expect(cache.check("a", 300)).toBe(true); + expect(cache.check("c", 400)).toBe(false); + + expect(cache.peek("a", 500)).toBe(true); + expect(cache.peek("b", 500)).toBe(false); + expect(cache.peek("c", 500)).toBe(true); + }); + + it("clears itself when maxSize floors to zero", () => { + const cache = createDedupeCache({ ttlMs: 1_000, maxSize: 0.9 }); + + expect(cache.check("a", 100)).toBe(false); + expect(cache.size()).toBe(0); + expect(cache.peek("a", 200)).toBe(false); + }); + + it("supports explicit reset", () => { + const cache = createDedupeCache({ ttlMs: 1_000, maxSize: 10 }); + + expect(cache.check("a", 100)).toBe(false); + expect(cache.check("b", 200)).toBe(false); + expect(cache.size()).toBe(2); + + cache.clear(); + + expect(cache.size()).toBe(0); + expect(cache.peek("a", 300)).toBe(false); + }); +}); diff --git a/src/infra/detect-package-manager.test.ts b/src/infra/detect-package-manager.test.ts new file mode 100644 index 00000000000..57e06cf1a67 --- /dev/null +++ b/src/infra/detect-package-manager.test.ts @@ -0,0 +1,41 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { detectPackageManager } from "./detect-package-manager.js"; + +describe("detectPackageManager", () => { + it("prefers packageManager from package.json when supported", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ packageManager: "pnpm@10.8.1" }), + "utf8", + ); + await fs.writeFile(path.join(root, "package-lock.json"), "", "utf8"); + + await expect(detectPackageManager(root)).resolves.toBe("pnpm"); + }); + + it("falls back to lockfiles when package.json is missing or unsupported", async () => { + const bunRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile(path.join(bunRoot, "bun.lockb"), "", "utf8"); + await expect(detectPackageManager(bunRoot)).resolves.toBe("bun"); + + const npmRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile( + path.join(npmRoot, "package.json"), + JSON.stringify({ packageManager: "yarn@4.0.0" }), + "utf8", + ); + await fs.writeFile(path.join(npmRoot, "package-lock.json"), "", "utf8"); + await expect(detectPackageManager(npmRoot)).resolves.toBe("npm"); + }); + + it("returns null when no package manager markers exist", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile(path.join(root, "package.json"), "{not-json}", "utf8"); + + await expect(detectPackageManager(root)).resolves.toBeNull(); + }); +}); diff --git a/src/infra/device-auth-store.test.ts b/src/infra/device-auth-store.test.ts new file mode 100644 index 00000000000..82a92492015 --- /dev/null +++ b/src/infra/device-auth-store.test.ts @@ -0,0 +1,109 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it, vi } from "vitest"; +import { withTempDir } from "../test-utils/temp-dir.js"; +import { + clearDeviceAuthToken, + loadDeviceAuthToken, + storeDeviceAuthToken, +} from "./device-auth-store.js"; + +function createEnv(stateDir: string): NodeJS.ProcessEnv { + return { + OPENCLAW_STATE_DIR: stateDir, + OPENCLAW_TEST_FAST: "1", + }; +} + +function deviceAuthFile(stateDir: string): string { + return path.join(stateDir, "identity", "device-auth.json"); +} + +describe("infra/device-auth-store", () => { + it("stores and loads device auth tokens under the configured state dir", async () => { + await withTempDir("openclaw-device-auth-", async (stateDir) => { + vi.spyOn(Date, "now").mockReturnValue(1234); + + const entry = storeDeviceAuthToken({ + deviceId: "device-1", + role: " operator ", + token: "secret", + scopes: [" operator.write ", "operator.read", "operator.read"], + env: createEnv(stateDir), + }); + + expect(entry).toEqual({ + token: "secret", + role: "operator", + scopes: ["operator.read", "operator.write"], + updatedAtMs: 1234, + }); + expect( + loadDeviceAuthToken({ + deviceId: "device-1", + role: "operator", + env: createEnv(stateDir), + }), + ).toEqual(entry); + + const raw = await fs.readFile(deviceAuthFile(stateDir), "utf8"); + expect(raw.endsWith("\n")).toBe(true); + expect(JSON.parse(raw)).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + operator: entry, + }, + }); + }); + }); + + it("returns null for missing, invalid, or mismatched stores", async () => { + await withTempDir("openclaw-device-auth-", async (stateDir) => { + const env = createEnv(stateDir); + + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + + await fs.mkdir(path.dirname(deviceAuthFile(stateDir)), { recursive: true }); + await fs.writeFile(deviceAuthFile(stateDir), '{"version":2,"deviceId":"device-1"}\n', "utf8"); + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + + await fs.writeFile( + deviceAuthFile(stateDir), + '{"version":1,"deviceId":"device-2","tokens":{"operator":{"token":"x","role":"operator","scopes":[],"updatedAtMs":1}}}\n', + "utf8", + ); + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + }); + }); + + it("clears only the requested role and leaves unrelated tokens intact", async () => { + await withTempDir("openclaw-device-auth-", async (stateDir) => { + const env = createEnv(stateDir); + + storeDeviceAuthToken({ + deviceId: "device-1", + role: "operator", + token: "operator-token", + env, + }); + storeDeviceAuthToken({ + deviceId: "device-1", + role: "node", + token: "node-token", + env, + }); + + clearDeviceAuthToken({ + deviceId: "device-1", + role: " operator ", + env, + }); + + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "node", env })).toMatchObject({ + token: "node-token", + }); + }); + }); +}); diff --git a/src/infra/device-identity.state-dir.test.ts b/src/infra/device-identity.state-dir.test.ts index 785fa343ec0..00929c26186 100644 --- a/src/infra/device-identity.state-dir.test.ts +++ b/src/infra/device-identity.state-dir.test.ts @@ -49,4 +49,25 @@ describe("device identity state dir defaults", () => { expect(stored.deviceId).toBe(original.deviceId); }); }); + + it("regenerates the identity when the stored file is invalid", async () => { + await withStateDirEnv("openclaw-identity-state-", async ({ stateDir }) => { + const identityPath = path.join(stateDir, "identity", "device.json"); + await fs.mkdir(path.dirname(identityPath), { recursive: true }); + await fs.writeFile(identityPath, '{"version":1,"deviceId":"broken"}\n', "utf8"); + + const regenerated = loadOrCreateDeviceIdentity(); + const stored = JSON.parse(await fs.readFile(identityPath, "utf8")) as { + version?: number; + deviceId?: string; + publicKeyPem?: string; + privateKeyPem?: string; + }; + + expect(stored.version).toBe(1); + expect(stored.deviceId).toBe(regenerated.deviceId); + expect(stored.publicKeyPem).toBe(regenerated.publicKeyPem); + expect(stored.privateKeyPem).toBe(regenerated.privateKeyPem); + }); + }); }); diff --git a/src/infra/diagnostic-events.test.ts b/src/infra/diagnostic-events.test.ts new file mode 100644 index 00000000000..d2b2af1d04a --- /dev/null +++ b/src/infra/diagnostic-events.test.ts @@ -0,0 +1,121 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + emitDiagnosticEvent, + isDiagnosticsEnabled, + onDiagnosticEvent, + resetDiagnosticEventsForTest, +} from "./diagnostic-events.js"; + +describe("diagnostic-events", () => { + beforeEach(() => { + resetDiagnosticEventsForTest(); + }); + + afterEach(() => { + resetDiagnosticEventsForTest(); + vi.restoreAllMocks(); + }); + + it("emits monotonic seq and timestamps to subscribers", () => { + vi.spyOn(Date, "now").mockReturnValueOnce(111).mockReturnValueOnce(222); + const events: Array<{ seq: number; ts: number; type: string }> = []; + const stop = onDiagnosticEvent((event) => { + events.push({ seq: event.seq, ts: event.ts, type: event.type }); + }); + + emitDiagnosticEvent({ + type: "model.usage", + usage: { total: 1 }, + }); + emitDiagnosticEvent({ + type: "session.state", + state: "processing", + }); + stop(); + + expect(events).toEqual([ + { seq: 1, ts: 111, type: "model.usage" }, + { seq: 2, ts: 222, type: "session.state" }, + ]); + }); + + it("isolates listener failures and logs them", () => { + const errorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + const seen: string[] = []; + onDiagnosticEvent(() => { + throw new Error("boom"); + }); + onDiagnosticEvent((event) => { + seen.push(event.type); + }); + + emitDiagnosticEvent({ + type: "message.queued", + source: "telegram", + }); + + expect(seen).toEqual(["message.queued"]); + expect(errorSpy).toHaveBeenCalledWith( + expect.stringContaining("listener error type=message.queued seq=1: Error: boom"), + ); + }); + + it("supports unsubscribe and full reset", () => { + const seen: string[] = []; + const stop = onDiagnosticEvent((event) => { + seen.push(event.type); + }); + + emitDiagnosticEvent({ + type: "webhook.received", + channel: "telegram", + }); + stop(); + emitDiagnosticEvent({ + type: "webhook.processed", + channel: "telegram", + }); + + expect(seen).toEqual(["webhook.received"]); + + resetDiagnosticEventsForTest(); + emitDiagnosticEvent({ + type: "webhook.error", + channel: "telegram", + error: "failed", + }); + expect(seen).toEqual(["webhook.received"]); + }); + + it("drops recursive emissions after the guard threshold", () => { + const errorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + let calls = 0; + onDiagnosticEvent(() => { + calls += 1; + emitDiagnosticEvent({ + type: "queue.lane.enqueue", + lane: "main", + queueSize: calls, + }); + }); + + emitDiagnosticEvent({ + type: "queue.lane.enqueue", + lane: "main", + queueSize: 0, + }); + + expect(calls).toBe(101); + expect(errorSpy).toHaveBeenCalledWith( + expect.stringContaining( + "recursion guard tripped at depth=101, dropping type=queue.lane.enqueue", + ), + ); + }); + + it("requires an explicit true diagnostics flag", () => { + expect(isDiagnosticsEnabled()).toBe(false); + expect(isDiagnosticsEnabled({ diagnostics: { enabled: false } } as never)).toBe(false); + expect(isDiagnosticsEnabled({ diagnostics: { enabled: true } } as never)).toBe(true); + }); +}); diff --git a/src/infra/diagnostic-flags.test.ts b/src/infra/diagnostic-flags.test.ts new file mode 100644 index 00000000000..7c4c3b0a62d --- /dev/null +++ b/src/infra/diagnostic-flags.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { + isDiagnosticFlagEnabled, + matchesDiagnosticFlag, + resolveDiagnosticFlags, +} from "./diagnostic-flags.js"; + +describe("resolveDiagnosticFlags", () => { + it("normalizes and dedupes config and env flags", () => { + const cfg = { + diagnostics: { flags: [" Telegram.Http ", "cache.*", "CACHE.*"] }, + } as OpenClawConfig; + const env = { + OPENCLAW_DIAGNOSTICS: " foo, Cache.* telegram.http ", + } as NodeJS.ProcessEnv; + + expect(resolveDiagnosticFlags(cfg, env)).toEqual(["telegram.http", "cache.*", "foo"]); + }); + + it("treats false-like env values as no extra flags", () => { + const cfg = { + diagnostics: { flags: ["telegram.http"] }, + } as OpenClawConfig; + + for (const raw of ["0", "false", "off", "none", " "]) { + expect( + resolveDiagnosticFlags(cfg, { + OPENCLAW_DIAGNOSTICS: raw, + } as NodeJS.ProcessEnv), + ).toEqual(["telegram.http"]); + } + }); +}); + +describe("matchesDiagnosticFlag", () => { + it("matches exact, namespace, prefix, and wildcard rules", () => { + expect(matchesDiagnosticFlag("telegram.http", ["telegram.http"])).toBe(true); + expect(matchesDiagnosticFlag("cache", ["cache.*"])).toBe(true); + expect(matchesDiagnosticFlag("cache.hit", ["cache.*"])).toBe(true); + expect(matchesDiagnosticFlag("tool.exec.fast", ["tool.exec*"])).toBe(true); + expect(matchesDiagnosticFlag("anything", ["all"])).toBe(true); + expect(matchesDiagnosticFlag("anything", ["*"])).toBe(true); + }); + + it("rejects blank and non-matching flags", () => { + expect(matchesDiagnosticFlag(" ", ["*"])).toBe(false); + expect(matchesDiagnosticFlag("cache.hit", ["cache.miss", "tool.*"])).toBe(false); + }); +}); + +describe("isDiagnosticFlagEnabled", () => { + it("resolves config and env together before matching", () => { + const cfg = { + diagnostics: { flags: ["gateway.*"] }, + } as OpenClawConfig; + const env = { + OPENCLAW_DIAGNOSTICS: "telegram.http", + } as NodeJS.ProcessEnv; + + expect(isDiagnosticFlagEnabled("gateway.ws", cfg, env)).toBe(true); + expect(isDiagnosticFlagEnabled("telegram.http", cfg, env)).toBe(true); + expect(isDiagnosticFlagEnabled("slack.http", cfg, env)).toBe(false); + }); +}); diff --git a/src/infra/errors.test.ts b/src/infra/errors.test.ts new file mode 100644 index 00000000000..45b6b73e395 --- /dev/null +++ b/src/infra/errors.test.ts @@ -0,0 +1,80 @@ +import { describe, expect, it } from "vitest"; +import { + collectErrorGraphCandidates, + extractErrorCode, + formatErrorMessage, + formatUncaughtError, + hasErrnoCode, + isErrno, + readErrorName, +} from "./errors.js"; + +describe("error helpers", () => { + it("extracts codes and names from string and numeric error metadata", () => { + expect(extractErrorCode({ code: "EADDRINUSE" })).toBe("EADDRINUSE"); + expect(extractErrorCode({ code: 429 })).toBe("429"); + expect(extractErrorCode({ code: false })).toBeUndefined(); + expect(extractErrorCode("boom")).toBeUndefined(); + + expect(readErrorName({ name: "AbortError" })).toBe("AbortError"); + expect(readErrorName({ name: 42 })).toBe(""); + expect(readErrorName(null)).toBe(""); + }); + + it("walks nested error graphs once in breadth-first order", () => { + const leaf = { name: "leaf" }; + const child = { name: "child" } as { + name: string; + cause?: unknown; + errors?: unknown[]; + }; + const root = { name: "root", cause: child, errors: [leaf, child] }; + child.cause = root; + + expect( + collectErrorGraphCandidates(root, (current) => [ + current.cause, + ...((current as { errors?: unknown[] }).errors ?? []), + ]), + ).toEqual([root, child, leaf]); + expect(collectErrorGraphCandidates(null)).toEqual([]); + }); + + it("matches errno-shaped errors by code", () => { + const err = Object.assign(new Error("busy"), { code: "EADDRINUSE" }); + expect(isErrno(err)).toBe(true); + expect(hasErrnoCode(err, "EADDRINUSE")).toBe(true); + expect(hasErrnoCode(err, "ENOENT")).toBe(false); + expect(isErrno("busy")).toBe(false); + }); + + it("formats primitives and circular objects without throwing", () => { + const circular: { self?: unknown } = {}; + circular.self = circular; + + expect(formatErrorMessage(123n)).toBe("123"); + expect(formatErrorMessage(false)).toBe("false"); + expect(formatErrorMessage(circular)).toBe("[object Object]"); + }); + + it("redacts sensitive tokens from formatted error messages", () => { + const token = "sk-abcdefghijklmnopqrstuv"; + const formatted = formatErrorMessage(new Error(`Authorization: Bearer ${token}`)); + expect(formatted).toContain("Authorization: Bearer"); + expect(formatted).not.toContain(token); + }); + + it("uses message-only formatting for INVALID_CONFIG and stack formatting otherwise", () => { + const invalidConfig = Object.assign(new Error("TOKEN=sk-abcdefghijklmnopqrstuv"), { + code: "INVALID_CONFIG", + stack: "Error: TOKEN=sk-abcdefghijklmnopqrstuv\n at ignored", + }); + expect(formatUncaughtError(invalidConfig)).not.toContain("at ignored"); + + const uncaught = new Error("boom"); + uncaught.stack = "Error: Authorization: Bearer sk-abcdefghijklmnopqrstuv\n at runTask"; + const formatted = formatUncaughtError(uncaught); + expect(formatted).toContain("at runTask"); + expect(formatted).not.toContain("sk-abcdefghijklmnopqrstuv"); + }); +}); diff --git a/src/infra/exec-allowlist-matching.test.ts b/src/infra/exec-allowlist-matching.test.ts new file mode 100644 index 00000000000..4376eefeff1 --- /dev/null +++ b/src/infra/exec-allowlist-matching.test.ts @@ -0,0 +1,60 @@ +import { describe, expect, it } from "vitest"; +import { matchAllowlist, type ExecAllowlistEntry } from "./exec-approvals.js"; + +describe("exec allowlist matching", () => { + const baseResolution = { + rawExecutable: "rg", + resolvedPath: "/opt/homebrew/bin/rg", + executableName: "rg", + }; + + it("handles wildcard and path matching semantics", () => { + const cases: Array<{ entries: ExecAllowlistEntry[]; expectedPattern: string | null }> = [ + { entries: [{ pattern: "RG" }], expectedPattern: null }, + { entries: [{ pattern: "/opt/**/rg" }], expectedPattern: "/opt/**/rg" }, + { entries: [{ pattern: "/opt/*/rg" }], expectedPattern: null }, + ]; + for (const testCase of cases) { + const match = matchAllowlist(testCase.entries, baseResolution); + expect(match?.pattern ?? null).toBe(testCase.expectedPattern); + } + }); + + it("matches bare wildcard patterns against arbitrary resolved executables", () => { + expect(matchAllowlist([{ pattern: "*" }], baseResolution)?.pattern).toBe("*"); + expect( + matchAllowlist([{ pattern: "*" }], { + rawExecutable: "python3", + resolvedPath: "/usr/bin/python3", + executableName: "python3", + })?.pattern, + ).toBe("*"); + }); + + it("matches absolute paths containing regex metacharacters literally", () => { + const plusPathCases = ["/usr/bin/g++", "/usr/bin/clang++"]; + for (const candidatePath of plusPathCases) { + const match = matchAllowlist([{ pattern: candidatePath }], { + rawExecutable: candidatePath, + resolvedPath: candidatePath, + executableName: candidatePath.split("/").at(-1) ?? candidatePath, + }); + expect(match?.pattern).toBe(candidatePath); + } + + expect( + matchAllowlist([{ pattern: "/usr/bin/*++" }], { + rawExecutable: "/usr/bin/g++", + resolvedPath: "/usr/bin/g++", + executableName: "g++", + })?.pattern, + ).toBe("/usr/bin/*++"); + expect( + matchAllowlist([{ pattern: "/opt/builds/tool[1](stable)" }], { + rawExecutable: "/opt/builds/tool[1](stable)", + resolvedPath: "/opt/builds/tool[1](stable)", + executableName: "tool[1](stable)", + })?.pattern, + ).toBe("/opt/builds/tool[1](stable)"); + }); +}); diff --git a/src/infra/exec-approval-command-display.test.ts b/src/infra/exec-approval-command-display.test.ts new file mode 100644 index 00000000000..9fefeec1aed --- /dev/null +++ b/src/infra/exec-approval-command-display.test.ts @@ -0,0 +1,66 @@ +import { describe, expect, it } from "vitest"; +import { + resolveExecApprovalCommandDisplay, + sanitizeExecApprovalDisplayText, +} from "./exec-approval-command-display.js"; + +describe("sanitizeExecApprovalDisplayText", () => { + it("escapes unicode format characters but leaves other text intact", () => { + expect(sanitizeExecApprovalDisplayText("echo hi\u200Bthere")).toBe("echo hi\\u{200B}there"); + }); +}); + +describe("resolveExecApprovalCommandDisplay", () => { + it("prefers explicit command fields and drops identical previews after trimming", () => { + expect( + resolveExecApprovalCommandDisplay({ + command: "echo hi", + commandPreview: " echo hi ", + host: "gateway", + }), + ).toEqual({ + commandText: "echo hi", + commandPreview: null, + }); + }); + + it("falls back to node systemRunPlan values and sanitizes preview text", () => { + expect( + resolveExecApprovalCommandDisplay({ + command: "", + host: "node", + systemRunPlan: { + argv: ["python3", "-c", "print(1)"], + cwd: null, + commandText: 'python3 -c "print(1)"', + commandPreview: "print\u200B(1)", + agentId: null, + sessionKey: null, + }, + }), + ).toEqual({ + commandText: 'python3 -c "print(1)"', + commandPreview: "print\\u{200B}(1)", + }); + }); + + it("ignores systemRunPlan fallback for non-node hosts", () => { + expect( + resolveExecApprovalCommandDisplay({ + command: "", + host: "sandbox", + systemRunPlan: { + argv: ["echo", "hi"], + cwd: null, + commandText: "echo hi", + commandPreview: "echo hi", + agentId: null, + sessionKey: null, + }, + }), + ).toEqual({ + commandText: "", + commandPreview: null, + }); + }); +}); diff --git a/src/infra/exec-approval-forwarder.test.ts b/src/infra/exec-approval-forwarder.test.ts index ca4d81e012e..d1d72aecd24 100644 --- a/src/infra/exec-approval-forwarder.test.ts +++ b/src/infra/exec-approval-forwarder.test.ts @@ -424,7 +424,7 @@ describe("exec approval forwarder", () => { channel: "whatsapp", to: "+15555550123", accountId: "work", - threadId: "1739201675.123", + threadId: 1739201675, }), ); } finally { diff --git a/src/infra/exec-approval-forwarder.ts b/src/infra/exec-approval-forwarder.ts index ca9abbc80b5..1008531d2f1 100644 --- a/src/infra/exec-approval-forwarder.ts +++ b/src/infra/exec-approval-forwarder.ts @@ -1,7 +1,6 @@ import type { ReplyPayload } from "../auto-reply/types.js"; import type { OpenClawConfig } from "../config/config.js"; import { loadConfig } from "../config/config.js"; -import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; import type { ExecApprovalForwardingConfig, ExecApprovalForwardTarget, @@ -18,13 +17,13 @@ import { } from "../utils/message-channel.js"; import { resolveExecApprovalCommandDisplay } from "./exec-approval-command-display.js"; import { buildExecApprovalPendingReplyPayload } from "./exec-approval-reply.js"; +import { resolveExecApprovalSessionTarget } from "./exec-approval-session-target.js"; import type { ExecApprovalDecision, ExecApprovalRequest, ExecApprovalResolved, } from "./exec-approvals.js"; import { deliverOutboundPayloads } from "./outbound/deliver.js"; -import { resolveSessionDeliveryTarget } from "./outbound/targets.js"; const log = createSubsystemLogger("gateway/exec-approvals"); export type { ExecApprovalRequest, ExecApprovalResolved }; @@ -281,21 +280,9 @@ function defaultResolveSessionTarget(params: { cfg: OpenClawConfig; request: ExecApprovalRequest; }): ExecApprovalForwardTarget | null { - const sessionKey = params.request.request.sessionKey?.trim(); - if (!sessionKey) { - return null; - } - const parsed = parseAgentSessionKey(sessionKey); - const agentId = parsed?.agentId ?? params.request.request.agentId ?? "main"; - const storePath = resolveStorePath(params.cfg.session?.store, { agentId }); - const store = loadSessionStore(storePath); - const entry = store[sessionKey]; - if (!entry) { - return null; - } - const target = resolveSessionDeliveryTarget({ - entry, - requestedChannel: "last", + const target = resolveExecApprovalSessionTarget({ + cfg: params.cfg, + request: params.request, turnSourceChannel: normalizeTurnSourceChannel(params.request.request.turnSourceChannel), turnSourceTo: params.request.request.turnSourceTo?.trim() || undefined, turnSourceAccountId: params.request.request.turnSourceAccountId?.trim() || undefined, diff --git a/src/infra/exec-approval-session-target.ts b/src/infra/exec-approval-session-target.ts new file mode 100644 index 00000000000..71535914c38 --- /dev/null +++ b/src/infra/exec-approval-session-target.ts @@ -0,0 +1,69 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; +import { parseAgentSessionKey } from "../routing/session-key.js"; +import type { ExecApprovalRequest } from "./exec-approvals.js"; +import { resolveSessionDeliveryTarget } from "./outbound/targets.js"; + +export type ExecApprovalSessionTarget = { + channel?: string; + to: string; + accountId?: string; + threadId?: number; +}; + +function normalizeOptionalString(value?: string | null): string | undefined { + const normalized = value?.trim(); + return normalized ? normalized : undefined; +} + +function normalizeOptionalThreadId(value?: string | number | null): number | undefined { + if (typeof value === "number") { + return Number.isFinite(value) ? value : undefined; + } + if (typeof value !== "string") { + return undefined; + } + const normalized = Number.parseInt(value, 10); + return Number.isFinite(normalized) ? normalized : undefined; +} + +export function resolveExecApprovalSessionTarget(params: { + cfg: OpenClawConfig; + request: ExecApprovalRequest; + turnSourceChannel?: string | null; + turnSourceTo?: string | null; + turnSourceAccountId?: string | null; + turnSourceThreadId?: string | number | null; +}): ExecApprovalSessionTarget | null { + const sessionKey = normalizeOptionalString(params.request.request.sessionKey); + if (!sessionKey) { + return null; + } + const parsed = parseAgentSessionKey(sessionKey); + const agentId = parsed?.agentId ?? params.request.request.agentId ?? "main"; + const storePath = resolveStorePath(params.cfg.session?.store, { agentId }); + const store = loadSessionStore(storePath); + const entry = store[sessionKey]; + if (!entry) { + return null; + } + + const target = resolveSessionDeliveryTarget({ + entry, + requestedChannel: "last", + turnSourceChannel: normalizeOptionalString(params.turnSourceChannel), + turnSourceTo: normalizeOptionalString(params.turnSourceTo), + turnSourceAccountId: normalizeOptionalString(params.turnSourceAccountId), + turnSourceThreadId: normalizeOptionalThreadId(params.turnSourceThreadId), + }); + if (!target.to) { + return null; + } + + return { + channel: normalizeOptionalString(target.channel), + to: target.to, + accountId: normalizeOptionalString(target.accountId), + threadId: normalizeOptionalThreadId(target.threadId), + }; +} diff --git a/src/infra/exec-approval-surface.test.ts b/src/infra/exec-approval-surface.test.ts new file mode 100644 index 00000000000..b263330104a --- /dev/null +++ b/src/infra/exec-approval-surface.test.ts @@ -0,0 +1,196 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const loadConfigMock = vi.hoisted(() => vi.fn()); +const listEnabledDiscordAccountsMock = vi.hoisted(() => vi.fn()); +const isDiscordExecApprovalClientEnabledMock = vi.hoisted(() => vi.fn()); +const listEnabledTelegramAccountsMock = vi.hoisted(() => vi.fn()); +const isTelegramExecApprovalClientEnabledMock = vi.hoisted(() => vi.fn()); +const normalizeMessageChannelMock = vi.hoisted(() => vi.fn()); + +vi.mock("../config/config.js", () => ({ + loadConfig: (...args: unknown[]) => loadConfigMock(...args), +})); + +vi.mock("../discord/accounts.js", () => ({ + listEnabledDiscordAccounts: (...args: unknown[]) => listEnabledDiscordAccountsMock(...args), +})); + +vi.mock("../discord/exec-approvals.js", () => ({ + isDiscordExecApprovalClientEnabled: (...args: unknown[]) => + isDiscordExecApprovalClientEnabledMock(...args), +})); + +vi.mock("../telegram/accounts.js", () => ({ + listEnabledTelegramAccounts: (...args: unknown[]) => listEnabledTelegramAccountsMock(...args), +})); + +vi.mock("../telegram/exec-approvals.js", () => ({ + isTelegramExecApprovalClientEnabled: (...args: unknown[]) => + isTelegramExecApprovalClientEnabledMock(...args), +})); + +vi.mock("../utils/message-channel.js", () => ({ + INTERNAL_MESSAGE_CHANNEL: "web", + normalizeMessageChannel: (...args: unknown[]) => normalizeMessageChannelMock(...args), +})); + +import { + hasConfiguredExecApprovalDmRoute, + resolveExecApprovalInitiatingSurfaceState, +} from "./exec-approval-surface.js"; + +describe("resolveExecApprovalInitiatingSurfaceState", () => { + beforeEach(() => { + loadConfigMock.mockReset(); + listEnabledDiscordAccountsMock.mockReset(); + isDiscordExecApprovalClientEnabledMock.mockReset(); + listEnabledTelegramAccountsMock.mockReset(); + isTelegramExecApprovalClientEnabledMock.mockReset(); + normalizeMessageChannelMock.mockReset(); + normalizeMessageChannelMock.mockImplementation((value?: string | null) => + typeof value === "string" ? value.trim().toLowerCase() : undefined, + ); + }); + + it("treats web UI, terminal UI, and missing channels as enabled", () => { + expect(resolveExecApprovalInitiatingSurfaceState({ channel: null })).toEqual({ + kind: "enabled", + channel: undefined, + channelLabel: "this platform", + }); + expect(resolveExecApprovalInitiatingSurfaceState({ channel: "tui" })).toEqual({ + kind: "enabled", + channel: "tui", + channelLabel: "terminal UI", + }); + expect(resolveExecApprovalInitiatingSurfaceState({ channel: "web" })).toEqual({ + kind: "enabled", + channel: "web", + channelLabel: "Web UI", + }); + }); + + it("uses the provided cfg for telegram and discord client enablement", () => { + isTelegramExecApprovalClientEnabledMock.mockReturnValueOnce(true); + isDiscordExecApprovalClientEnabledMock.mockReturnValueOnce(false); + const cfg = { channels: {} }; + + expect( + resolveExecApprovalInitiatingSurfaceState({ + channel: "telegram", + accountId: "main", + cfg: cfg as never, + }), + ).toEqual({ + kind: "enabled", + channel: "telegram", + channelLabel: "Telegram", + }); + expect( + resolveExecApprovalInitiatingSurfaceState({ + channel: "discord", + accountId: "main", + cfg: cfg as never, + }), + ).toEqual({ + kind: "disabled", + channel: "discord", + channelLabel: "Discord", + }); + + expect(loadConfigMock).not.toHaveBeenCalled(); + }); + + it("loads config lazily when cfg is omitted and marks unsupported channels", () => { + loadConfigMock.mockReturnValueOnce({ loaded: true }); + isTelegramExecApprovalClientEnabledMock.mockReturnValueOnce(false); + + expect( + resolveExecApprovalInitiatingSurfaceState({ + channel: "telegram", + accountId: "main", + }), + ).toEqual({ + kind: "disabled", + channel: "telegram", + channelLabel: "Telegram", + }); + expect(loadConfigMock).toHaveBeenCalledOnce(); + + expect(resolveExecApprovalInitiatingSurfaceState({ channel: "signal" })).toEqual({ + kind: "unsupported", + channel: "signal", + channelLabel: "Signal", + }); + }); +}); + +describe("hasConfiguredExecApprovalDmRoute", () => { + beforeEach(() => { + listEnabledDiscordAccountsMock.mockReset(); + listEnabledTelegramAccountsMock.mockReset(); + }); + + it("returns true when any enabled account routes approvals to DM or both", () => { + listEnabledDiscordAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: true, + approvers: ["a"], + target: "channel", + }, + }, + }, + ]); + listEnabledTelegramAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: true, + approvers: ["a"], + target: "both", + }, + }, + }, + ]); + + expect(hasConfiguredExecApprovalDmRoute({} as never)).toBe(true); + }); + + it("returns false when exec approvals are disabled or have no DM route", () => { + listEnabledDiscordAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: false, + approvers: ["a"], + target: "dm", + }, + }, + }, + ]); + listEnabledTelegramAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: true, + approvers: [], + target: "dm", + }, + }, + }, + { + config: { + execApprovals: { + enabled: true, + approvers: ["a"], + target: "channel", + }, + }, + }, + ]); + + expect(hasConfiguredExecApprovalDmRoute({} as never)).toBe(false); + }); +}); diff --git a/src/infra/exec-approvals-allow-always.test.ts b/src/infra/exec-approvals-allow-always.test.ts index 72db45a33ea..a0ba77ecb6b 100644 --- a/src/infra/exec-approvals-allow-always.test.ts +++ b/src/infra/exec-approvals-allow-always.test.ts @@ -18,6 +18,31 @@ describe("resolveAllowAlwaysPatterns", () => { return exe; } + function resolvePersistedPatterns(params: { + command: string; + dir: string; + env: Record; + safeBins: ReturnType; + }) { + const analysis = evaluateShellAllowlist({ + command: params.command, + allowlist: [], + safeBins: params.safeBins, + cwd: params.dir, + env: params.env, + platform: process.platform, + }); + return { + analysis, + persisted: resolveAllowAlwaysPatterns({ + segments: analysis.segments, + cwd: params.dir, + env: params.env, + platform: process.platform, + }), + }; + } + function expectAllowAlwaysBypassBlocked(params: { dir: string; firstCommand: string; @@ -26,19 +51,11 @@ describe("resolveAllowAlwaysPatterns", () => { persistedPattern: string; }) { const safeBins = resolveSafeBins(undefined); - const first = evaluateShellAllowlist({ + const { persisted } = resolvePersistedPatterns({ command: params.firstCommand, - allowlist: [], + dir: params.dir, + env: params.env, safeBins, - cwd: params.dir, - env: params.env, - platform: process.platform, - }); - const persisted = resolveAllowAlwaysPatterns({ - segments: first.segments, - cwd: params.dir, - env: params.env, - platform: process.platform, }); expect(persisted).toEqual([params.persistedPattern]); @@ -61,6 +78,43 @@ describe("resolveAllowAlwaysPatterns", () => { ).toBe(true); } + function createShellScriptFixture() { + const dir = makeTempDir(); + const scriptsDir = path.join(dir, "scripts"); + fs.mkdirSync(scriptsDir, { recursive: true }); + const script = path.join(scriptsDir, "save_crystal.sh"); + fs.writeFileSync(script, "echo ok\n"); + const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; + const safeBins = resolveSafeBins(undefined); + return { dir, scriptsDir, script, env, safeBins }; + } + + function expectPersistedShellScriptMatch(params: { + command: string; + script: string; + dir: string; + env: Record; + safeBins: ReturnType; + }) { + const { persisted } = resolvePersistedPatterns({ + command: params.command, + dir: params.dir, + env: params.env, + safeBins: params.safeBins, + }); + expect(persisted).toEqual([params.script]); + + const second = evaluateShellAllowlist({ + command: params.command, + allowlist: [{ pattern: params.script }], + safeBins: params.safeBins, + cwd: params.dir, + env: params.env, + platform: process.platform, + }); + expect(second.allowlistSatisfied).toBe(true); + } + it("returns direct executable paths for non-shell segments", () => { const exe = path.join("/tmp", "openclaw-tool"); const patterns = resolveAllowAlwaysPatterns({ @@ -131,39 +185,14 @@ describe("resolveAllowAlwaysPatterns", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - - const safeBins = resolveSafeBins(undefined); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; - const first = evaluateShellAllowlist({ + const { dir, scriptsDir, script, env, safeBins } = createShellScriptFixture(); + expectPersistedShellScriptMatch({ command: "bash scripts/save_crystal.sh", - allowlist: [], + script, + dir, + env, safeBins, - cwd: dir, - env, - platform: process.platform, }); - const persisted = resolveAllowAlwaysPatterns({ - segments: first.segments, - cwd: dir, - env, - platform: process.platform, - }); - expect(persisted).toEqual([script]); - - const second = evaluateShellAllowlist({ - command: "bash scripts/save_crystal.sh", - allowlist: [{ pattern: script }], - safeBins, - cwd: dir, - env, - platform: process.platform, - }); - expect(second.allowlistSatisfied).toBe(true); const other = path.join(scriptsDir, "other.sh"); fs.writeFileSync(other, "echo other\n"); @@ -182,51 +211,21 @@ describe("resolveAllowAlwaysPatterns", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - - const safeBins = resolveSafeBins(undefined); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; - const first = evaluateShellAllowlist({ + const { dir, script, env, safeBins } = createShellScriptFixture(); + expectPersistedShellScriptMatch({ command: "/usr/bin/nice bash scripts/save_crystal.sh", - allowlist: [], + script, + dir, + env, safeBins, - cwd: dir, - env, - platform: process.platform, }); - const persisted = resolveAllowAlwaysPatterns({ - segments: first.segments, - cwd: dir, - env, - platform: process.platform, - }); - expect(persisted).toEqual([script]); - - const second = evaluateShellAllowlist({ - command: "/usr/bin/nice bash scripts/save_crystal.sh", - allowlist: [{ pattern: script }], - safeBins, - cwd: dir, - env, - platform: process.platform, - }); - expect(second.allowlistSatisfied).toBe(true); }); it("does not treat inline shell commands as persisted script paths", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; + const { dir, script, env } = createShellScriptFixture(); expectAllowAlwaysBypassBlocked({ dir, firstCommand: "bash scripts/save_crystal.sh", @@ -240,12 +239,7 @@ describe("resolveAllowAlwaysPatterns", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; + const { dir, script, env } = createShellScriptFixture(); expectAllowAlwaysBypassBlocked({ dir, firstCommand: "bash scripts/save_crystal.sh", diff --git a/src/infra/exec-approvals.test.ts b/src/infra/exec-approvals.test.ts index 57290c07116..34a265bd4e0 100644 --- a/src/infra/exec-approvals.test.ts +++ b/src/infra/exec-approvals.test.ts @@ -9,127 +9,16 @@ import { buildSafeBinsShellCommand, evaluateExecAllowlist, evaluateShellAllowlist, - matchAllowlist, maxAsk, mergeExecApprovalsSocketDefaults, minSecurity, normalizeExecApprovals, - parseExecArgvToken, normalizeSafeBins, requiresExecApproval, - resolveCommandResolution, - resolveCommandResolutionFromArgv, resolveExecApprovalsPath, resolveExecApprovalsSocketPath, - type ExecAllowlistEntry, } from "./exec-approvals.js"; -function buildNestedEnvShellCommand(params: { - envExecutable: string; - depth: number; - payload: string; -}): string[] { - return [...Array(params.depth).fill(params.envExecutable), "/bin/sh", "-c", params.payload]; -} - -function analyzeEnvWrapperAllowlist(params: { argv: string[]; envPath: string; cwd: string }) { - const analysis = analyzeArgvCommand({ - argv: params.argv, - cwd: params.cwd, - env: makePathEnv(params.envPath), - }); - const allowlistEval = evaluateExecAllowlist({ - analysis, - allowlist: [{ pattern: params.envPath }], - safeBins: normalizeSafeBins([]), - cwd: params.cwd, - }); - return { analysis, allowlistEval }; -} - -function createPathExecutableFixture(params?: { executable?: string }): { - exeName: string; - exePath: string; - binDir: string; -} { - const dir = makeTempDir(); - const binDir = path.join(dir, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - const baseName = params?.executable ?? "rg"; - const exeName = process.platform === "win32" ? `${baseName}.exe` : baseName; - const exePath = path.join(binDir, exeName); - fs.writeFileSync(exePath, ""); - fs.chmodSync(exePath, 0o755); - return { exeName, exePath, binDir }; -} - -describe("exec approvals allowlist matching", () => { - const baseResolution = { - rawExecutable: "rg", - resolvedPath: "/opt/homebrew/bin/rg", - executableName: "rg", - }; - - it("handles wildcard/path matching semantics", () => { - const cases: Array<{ entries: ExecAllowlistEntry[]; expectedPattern: string | null }> = [ - { entries: [{ pattern: "RG" }], expectedPattern: null }, - { entries: [{ pattern: "/opt/**/rg" }], expectedPattern: "/opt/**/rg" }, - { entries: [{ pattern: "/opt/*/rg" }], expectedPattern: null }, - ]; - for (const testCase of cases) { - const match = matchAllowlist(testCase.entries, baseResolution); - expect(match?.pattern ?? null).toBe(testCase.expectedPattern); - } - }); - - it("matches bare * wildcard pattern against any resolved path", () => { - const match = matchAllowlist([{ pattern: "*" }], baseResolution); - expect(match).not.toBeNull(); - expect(match?.pattern).toBe("*"); - }); - - it("matches bare * wildcard against arbitrary executables", () => { - const match = matchAllowlist([{ pattern: "*" }], { - rawExecutable: "python3", - resolvedPath: "/usr/bin/python3", - executableName: "python3", - }); - expect(match).not.toBeNull(); - expect(match?.pattern).toBe("*"); - }); - - it("matches absolute paths containing regex metacharacters", () => { - const plusPathCases = ["/usr/bin/g++", "/usr/bin/clang++"]; - for (const candidatePath of plusPathCases) { - const match = matchAllowlist([{ pattern: candidatePath }], { - rawExecutable: candidatePath, - resolvedPath: candidatePath, - executableName: candidatePath.split("/").at(-1) ?? candidatePath, - }); - expect(match?.pattern).toBe(candidatePath); - } - }); - - it("does not throw when wildcard globs are mixed with + in path", () => { - const match = matchAllowlist([{ pattern: "/usr/bin/*++" }], { - rawExecutable: "/usr/bin/g++", - resolvedPath: "/usr/bin/g++", - executableName: "g++", - }); - expect(match?.pattern).toBe("/usr/bin/*++"); - }); - - it("matches paths containing []() regex tokens literally", () => { - const literalPattern = "/opt/builds/tool[1](stable)"; - const match = matchAllowlist([{ pattern: literalPattern }], { - rawExecutable: literalPattern, - resolvedPath: literalPattern, - executableName: "tool[1](stable)", - }); - expect(match?.pattern).toBe(literalPattern); - }); -}); - describe("mergeExecApprovalsSocketDefaults", () => { it("prefers normalized socket, then current, then default path", () => { const normalized = normalizeExecApprovals({ @@ -231,161 +120,6 @@ describe("exec approvals safe shell command builder", () => { }); }); -describe("exec approvals command resolution", () => { - it("resolves PATH, relative, and quoted executables", () => { - const cases = [ - { - name: "PATH executable", - setup: () => { - const fixture = createPathExecutableFixture(); - return { - command: "rg -n foo", - cwd: undefined as string | undefined, - envPath: makePathEnv(fixture.binDir), - expectedPath: fixture.exePath, - expectedExecutableName: fixture.exeName, - }; - }, - }, - { - name: "relative executable", - setup: () => { - const dir = makeTempDir(); - const cwd = path.join(dir, "project"); - const script = path.join(cwd, "scripts", "run.sh"); - fs.mkdirSync(path.dirname(script), { recursive: true }); - fs.writeFileSync(script, ""); - fs.chmodSync(script, 0o755); - return { - command: "./scripts/run.sh --flag", - cwd, - envPath: undefined as NodeJS.ProcessEnv | undefined, - expectedPath: script, - expectedExecutableName: undefined, - }; - }, - }, - { - name: "quoted executable", - setup: () => { - const dir = makeTempDir(); - const cwd = path.join(dir, "project"); - const script = path.join(cwd, "bin", "tool"); - fs.mkdirSync(path.dirname(script), { recursive: true }); - fs.writeFileSync(script, ""); - fs.chmodSync(script, 0o755); - return { - command: '"./bin/tool" --version', - cwd, - envPath: undefined as NodeJS.ProcessEnv | undefined, - expectedPath: script, - expectedExecutableName: undefined, - }; - }, - }, - ] as const; - - for (const testCase of cases) { - const setup = testCase.setup(); - const res = resolveCommandResolution(setup.command, setup.cwd, setup.envPath); - expect(res?.resolvedPath, testCase.name).toBe(setup.expectedPath); - if (setup.expectedExecutableName) { - expect(res?.executableName, testCase.name).toBe(setup.expectedExecutableName); - } - } - }); - - it("unwraps transparent env wrapper argv to resolve the effective executable", () => { - const fixture = createPathExecutableFixture(); - - const resolution = resolveCommandResolutionFromArgv( - ["/usr/bin/env", "rg", "-n", "needle"], - undefined, - makePathEnv(fixture.binDir), - ); - expect(resolution?.resolvedPath).toBe(fixture.exePath); - expect(resolution?.executableName).toBe(fixture.exeName); - }); - - it("blocks semantic env wrappers from allowlist/safeBins auto-resolution", () => { - const resolution = resolveCommandResolutionFromArgv([ - "/usr/bin/env", - "FOO=bar", - "rg", - "-n", - "needle", - ]); - expect(resolution?.policyBlocked).toBe(true); - expect(resolution?.rawExecutable).toBe("/usr/bin/env"); - }); - - it("fails closed for env -S even when env itself is allowlisted", () => { - const dir = makeTempDir(); - const binDir = path.join(dir, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - const envName = process.platform === "win32" ? "env.exe" : "env"; - const envPath = path.join(binDir, envName); - fs.writeFileSync(envPath, process.platform === "win32" ? "" : "#!/bin/sh\n"); - if (process.platform !== "win32") { - fs.chmodSync(envPath, 0o755); - } - const { analysis, allowlistEval } = analyzeEnvWrapperAllowlist({ - argv: [envPath, "-S", 'sh -c "echo pwned"'], - envPath: envPath, - cwd: dir, - }); - - expect(analysis.ok).toBe(true); - expect(analysis.segments[0]?.resolution?.policyBlocked).toBe(true); - expect(allowlistEval.allowlistSatisfied).toBe(false); - expect(allowlistEval.segmentSatisfiedBy).toEqual([null]); - }); - - it("fails closed when transparent env wrappers exceed unwrap depth", () => { - if (process.platform === "win32") { - return; - } - const dir = makeTempDir(); - const binDir = path.join(dir, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - const envPath = path.join(binDir, "env"); - fs.writeFileSync(envPath, "#!/bin/sh\n"); - fs.chmodSync(envPath, 0o755); - const { analysis, allowlistEval } = analyzeEnvWrapperAllowlist({ - argv: buildNestedEnvShellCommand({ - envExecutable: envPath, - depth: 5, - payload: "echo pwned", - }), - envPath, - cwd: dir, - }); - - expect(analysis.ok).toBe(true); - expect(analysis.segments[0]?.resolution?.policyBlocked).toBe(true); - expect(analysis.segments[0]?.resolution?.blockedWrapper).toBe("env"); - expect(allowlistEval.allowlistSatisfied).toBe(false); - expect(allowlistEval.segmentSatisfiedBy).toEqual([null]); - }); - - it("unwraps env wrapper with shell inner executable", () => { - const resolution = resolveCommandResolutionFromArgv(["/usr/bin/env", "bash", "-lc", "echo hi"]); - expect(resolution?.rawExecutable).toBe("bash"); - expect(resolution?.executableName.toLowerCase()).toContain("bash"); - }); - - it("unwraps nice wrapper argv to resolve the effective executable", () => { - const resolution = resolveCommandResolutionFromArgv([ - "/usr/bin/nice", - "bash", - "-lc", - "echo hi", - ]); - expect(resolution?.rawExecutable).toBe("bash"); - expect(resolution?.executableName.toLowerCase()).toContain("bash"); - }); -}); - describe("exec approvals shell parsing", () => { it("parses pipelines and chained commands", () => { const cases = [ @@ -532,26 +266,6 @@ describe("exec approvals shell parsing", () => { expect(res.ok).toBe(true); expect(res.segments[0]?.argv).toEqual(["C:\\Program Files\\Tool\\tool.exe", "--version"]); }); - - it("normalizes short option clusters with attached payloads", () => { - const parsed = parseExecArgvToken("-oblocked.txt"); - expect(parsed.kind).toBe("option"); - if (parsed.kind !== "option" || parsed.style !== "short-cluster") { - throw new Error("expected short-cluster option"); - } - expect(parsed.flags[0]).toBe("-o"); - expect(parsed.cluster).toBe("oblocked.txt"); - }); - - it("normalizes long options with inline payloads", () => { - const parsed = parseExecArgvToken("--output=blocked.txt"); - expect(parsed.kind).toBe("option"); - if (parsed.kind !== "option" || parsed.style !== "long") { - throw new Error("expected long option"); - } - expect(parsed.flag).toBe("--output"); - expect(parsed.inlineValue).toBe("blocked.txt"); - }); }); describe("exec approvals shell allowlist (chained commands)", () => { diff --git a/src/infra/exec-command-resolution.test.ts b/src/infra/exec-command-resolution.test.ts new file mode 100644 index 00000000000..1cb003c077e --- /dev/null +++ b/src/infra/exec-command-resolution.test.ts @@ -0,0 +1,242 @@ +import fs from "node:fs"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { makePathEnv, makeTempDir } from "./exec-approvals-test-helpers.js"; +import { + evaluateExecAllowlist, + normalizeSafeBins, + parseExecArgvToken, + resolveAllowlistCandidatePath, + resolveCommandResolution, + resolveCommandResolutionFromArgv, +} from "./exec-approvals.js"; + +function buildNestedEnvShellCommand(params: { + envExecutable: string; + depth: number; + payload: string; +}): string[] { + return [...Array(params.depth).fill(params.envExecutable), "/bin/sh", "-c", params.payload]; +} + +function analyzeEnvWrapperAllowlist(params: { argv: string[]; envPath: string; cwd: string }) { + const analysis = { + ok: true as const, + segments: [ + { + argv: params.argv, + resolution: resolveCommandResolutionFromArgv( + params.argv, + params.cwd, + makePathEnv(params.envPath), + ), + }, + ], + }; + const allowlistEval = evaluateExecAllowlist({ + analysis, + allowlist: [{ pattern: params.envPath }], + safeBins: normalizeSafeBins([]), + cwd: params.cwd, + }); + return { analysis, allowlistEval }; +} + +function createPathExecutableFixture(params?: { executable?: string }): { + exeName: string; + exePath: string; + binDir: string; +} { + const dir = makeTempDir(); + const binDir = path.join(dir, "bin"); + fs.mkdirSync(binDir, { recursive: true }); + const baseName = params?.executable ?? "rg"; + const exeName = process.platform === "win32" ? `${baseName}.exe` : baseName; + const exePath = path.join(binDir, exeName); + fs.writeFileSync(exePath, ""); + fs.chmodSync(exePath, 0o755); + return { exeName, exePath, binDir }; +} + +describe("exec-command-resolution", () => { + it("resolves PATH, relative, and quoted executables", () => { + const cases = [ + { + name: "PATH executable", + setup: () => { + const fixture = createPathExecutableFixture(); + return { + command: "rg -n foo", + cwd: undefined as string | undefined, + envPath: makePathEnv(fixture.binDir), + expectedPath: fixture.exePath, + expectedExecutableName: fixture.exeName, + }; + }, + }, + { + name: "relative executable", + setup: () => { + const dir = makeTempDir(); + const cwd = path.join(dir, "project"); + const script = path.join(cwd, "scripts", "run.sh"); + fs.mkdirSync(path.dirname(script), { recursive: true }); + fs.writeFileSync(script, ""); + fs.chmodSync(script, 0o755); + return { + command: "./scripts/run.sh --flag", + cwd, + envPath: undefined as NodeJS.ProcessEnv | undefined, + expectedPath: script, + expectedExecutableName: undefined, + }; + }, + }, + { + name: "quoted executable", + setup: () => { + const dir = makeTempDir(); + const cwd = path.join(dir, "project"); + const script = path.join(cwd, "bin", "tool"); + fs.mkdirSync(path.dirname(script), { recursive: true }); + fs.writeFileSync(script, ""); + fs.chmodSync(script, 0o755); + return { + command: '"./bin/tool" --version', + cwd, + envPath: undefined as NodeJS.ProcessEnv | undefined, + expectedPath: script, + expectedExecutableName: undefined, + }; + }, + }, + ] as const; + + for (const testCase of cases) { + const setup = testCase.setup(); + const res = resolveCommandResolution(setup.command, setup.cwd, setup.envPath); + expect(res?.resolvedPath, testCase.name).toBe(setup.expectedPath); + if (setup.expectedExecutableName) { + expect(res?.executableName, testCase.name).toBe(setup.expectedExecutableName); + } + } + }); + + it("unwraps transparent env and nice wrappers to the effective executable", () => { + const fixture = createPathExecutableFixture(); + + const envResolution = resolveCommandResolutionFromArgv( + ["/usr/bin/env", "rg", "-n", "needle"], + undefined, + makePathEnv(fixture.binDir), + ); + expect(envResolution?.resolvedPath).toBe(fixture.exePath); + expect(envResolution?.executableName).toBe(fixture.exeName); + + const niceResolution = resolveCommandResolutionFromArgv([ + "/usr/bin/nice", + "bash", + "-lc", + "echo hi", + ]); + expect(niceResolution?.rawExecutable).toBe("bash"); + expect(niceResolution?.executableName.toLowerCase()).toContain("bash"); + }); + + it("blocks semantic env wrappers, env -S, and deep transparent-wrapper chains", () => { + const blockedEnv = resolveCommandResolutionFromArgv([ + "/usr/bin/env", + "FOO=bar", + "rg", + "-n", + "needle", + ]); + expect(blockedEnv?.policyBlocked).toBe(true); + expect(blockedEnv?.rawExecutable).toBe("/usr/bin/env"); + + if (process.platform === "win32") { + return; + } + + const dir = makeTempDir(); + const binDir = path.join(dir, "bin"); + fs.mkdirSync(binDir, { recursive: true }); + const envPath = path.join(binDir, "env"); + fs.writeFileSync(envPath, "#!/bin/sh\n"); + fs.chmodSync(envPath, 0o755); + + const envS = analyzeEnvWrapperAllowlist({ + argv: [envPath, "-S", 'sh -c "echo pwned"'], + envPath, + cwd: dir, + }); + expect(envS.analysis.segments[0]?.resolution?.policyBlocked).toBe(true); + expect(envS.allowlistEval.allowlistSatisfied).toBe(false); + + const deep = analyzeEnvWrapperAllowlist({ + argv: buildNestedEnvShellCommand({ + envExecutable: envPath, + depth: 5, + payload: "echo pwned", + }), + envPath, + cwd: dir, + }); + expect(deep.analysis.segments[0]?.resolution?.policyBlocked).toBe(true); + expect(deep.analysis.segments[0]?.resolution?.blockedWrapper).toBe("env"); + expect(deep.allowlistEval.allowlistSatisfied).toBe(false); + }); + + it("resolves allowlist candidate paths from unresolved raw executables", () => { + expect( + resolveAllowlistCandidatePath( + { + rawExecutable: "~/bin/tool", + executableName: "tool", + }, + "/tmp", + ), + ).toContain("/bin/tool"); + + expect( + resolveAllowlistCandidatePath( + { + rawExecutable: "./scripts/run.sh", + executableName: "run.sh", + }, + "/repo", + ), + ).toBe(path.resolve("/repo", "./scripts/run.sh")); + + expect( + resolveAllowlistCandidatePath( + { + rawExecutable: "rg", + executableName: "rg", + }, + "/repo", + ), + ).toBeUndefined(); + }); + + it("normalizes argv tokens for short clusters, long options, and special sentinels", () => { + expect(parseExecArgvToken("")).toEqual({ kind: "empty", raw: "" }); + expect(parseExecArgvToken("--")).toEqual({ kind: "terminator", raw: "--" }); + expect(parseExecArgvToken("-")).toEqual({ kind: "stdin", raw: "-" }); + expect(parseExecArgvToken("echo")).toEqual({ kind: "positional", raw: "echo" }); + + const short = parseExecArgvToken("-oblocked.txt"); + expect(short.kind).toBe("option"); + if (short.kind === "option" && short.style === "short-cluster") { + expect(short.flags[0]).toBe("-o"); + expect(short.cluster).toBe("oblocked.txt"); + } + + const long = parseExecArgvToken("--output=blocked.txt"); + expect(long.kind).toBe("option"); + if (long.kind === "option" && long.style === "long") { + expect(long.flag).toBe("--output"); + expect(long.inlineValue).toBe("blocked.txt"); + } + }); +}); diff --git a/src/infra/exec-host.test.ts b/src/infra/exec-host.test.ts new file mode 100644 index 00000000000..08d3d8af3be --- /dev/null +++ b/src/infra/exec-host.test.ts @@ -0,0 +1,109 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const requestJsonlSocketMock = vi.hoisted(() => vi.fn()); + +vi.mock("./jsonl-socket.js", () => ({ + requestJsonlSocket: (...args: unknown[]) => requestJsonlSocketMock(...args), +})); + +import { requestExecHostViaSocket } from "./exec-host.js"; + +describe("requestExecHostViaSocket", () => { + beforeEach(() => { + requestJsonlSocketMock.mockReset(); + }); + + it("returns null when socket credentials are missing", async () => { + await expect( + requestExecHostViaSocket({ + socketPath: "", + token: "secret", + request: { command: ["echo", "hi"] }, + }), + ).resolves.toBeNull(); + await expect( + requestExecHostViaSocket({ + socketPath: "/tmp/socket", + token: "", + request: { command: ["echo", "hi"] }, + }), + ).resolves.toBeNull(); + expect(requestJsonlSocketMock).not.toHaveBeenCalled(); + }); + + it("builds an exec payload and forwards the default timeout", async () => { + requestJsonlSocketMock.mockResolvedValueOnce({ ok: true, payload: { success: true } }); + + await expect( + requestExecHostViaSocket({ + socketPath: "/tmp/socket", + token: "secret", + request: { + command: ["echo", "hi"], + cwd: "/tmp", + }, + }), + ).resolves.toEqual({ ok: true, payload: { success: true } }); + + const call = requestJsonlSocketMock.mock.calls[0]?.[0] as + | { + socketPath: string; + payload: string; + timeoutMs: number; + accept: (msg: unknown) => unknown; + } + | undefined; + if (!call) { + throw new Error("expected requestJsonlSocket call"); + } + + expect(call.socketPath).toBe("/tmp/socket"); + expect(call.timeoutMs).toBe(20_000); + const payload = JSON.parse(call.payload) as { + type: string; + id: string; + nonce: string; + ts: number; + hmac: string; + requestJson: string; + }; + expect(payload.type).toBe("exec"); + expect(payload.id).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i); + expect(payload.nonce).toMatch(/^[0-9a-f]{32}$/); + expect(typeof payload.ts).toBe("number"); + expect(payload.hmac).toMatch(/^[0-9a-f]{64}$/); + expect(JSON.parse(payload.requestJson)).toEqual({ + command: ["echo", "hi"], + cwd: "/tmp", + }); + }); + + it("accepts only exec response messages and maps malformed matches to null", async () => { + requestJsonlSocketMock.mockImplementationOnce(async ({ accept }) => { + expect(accept({ type: "ignore" })).toBeUndefined(); + expect(accept({ type: "exec-res", ok: true, payload: { success: true } })).toEqual({ + ok: true, + payload: { success: true }, + }); + expect(accept({ type: "exec-res", ok: false, error: { code: "DENIED" } })).toEqual({ + ok: false, + error: { code: "DENIED" }, + }); + expect(accept({ type: "exec-res", ok: true })).toBeNull(); + return null; + }); + + await expect( + requestExecHostViaSocket({ + socketPath: "/tmp/socket", + token: "secret", + timeoutMs: 123, + request: { command: ["echo", "hi"] }, + }), + ).resolves.toBeNull(); + + expect( + (requestJsonlSocketMock.mock.calls[0]?.[0] as { timeoutMs?: number } | undefined)?.timeoutMs, + ).toBe(123); + }); +}); diff --git a/src/infra/exec-safety.test.ts b/src/infra/exec-safety.test.ts new file mode 100644 index 00000000000..96dcdba357e --- /dev/null +++ b/src/infra/exec-safety.test.ts @@ -0,0 +1,22 @@ +import { describe, expect, it } from "vitest"; +import { isSafeExecutableValue } from "./exec-safety.js"; + +describe("isSafeExecutableValue", () => { + it("accepts bare executable names and likely paths", () => { + expect(isSafeExecutableValue("node")).toBe(true); + expect(isSafeExecutableValue("/usr/bin/node")).toBe(true); + expect(isSafeExecutableValue("./bin/openclaw")).toBe(true); + expect(isSafeExecutableValue("C:\\Tools\\openclaw.exe")).toBe(true); + expect(isSafeExecutableValue(" tool ")).toBe(true); + }); + + it("rejects blanks, flags, shell metacharacters, quotes, and control chars", () => { + expect(isSafeExecutableValue(undefined)).toBe(false); + expect(isSafeExecutableValue(" ")).toBe(false); + expect(isSafeExecutableValue("-rf")).toBe(false); + expect(isSafeExecutableValue("node;rm -rf /")).toBe(false); + expect(isSafeExecutableValue('node "arg"')).toBe(false); + expect(isSafeExecutableValue("node\nnext")).toBe(false); + expect(isSafeExecutableValue("node\0")).toBe(false); + }); +}); diff --git a/src/infra/exec-wrapper-resolution.test.ts b/src/infra/exec-wrapper-resolution.test.ts index 58f1e696c22..001d0ca2514 100644 --- a/src/infra/exec-wrapper-resolution.test.ts +++ b/src/infra/exec-wrapper-resolution.test.ts @@ -1,9 +1,15 @@ import { describe, expect, test } from "vitest"; import { basenameLower, + extractShellWrapperCommand, + extractShellWrapperInlineCommand, + hasEnvManipulationBeforeShellWrapper, isDispatchWrapperExecutable, isShellWrapperExecutable, normalizeExecutableToken, + resolveDispatchWrapperExecutionPlan, + unwrapEnvInvocation, + unwrapKnownDispatchWrapperInvocation, unwrapKnownShellMultiplexerInvocation, } from "./exec-wrapper-resolution.js"; @@ -66,3 +72,157 @@ describe("unwrapKnownShellMultiplexerInvocation", () => { expect(unwrapKnownShellMultiplexerInvocation(argv)).toEqual(expected); }); }); + +describe("unwrapEnvInvocation", () => { + test.each([ + { + argv: ["env", "FOO=bar", "bash", "-lc", "echo hi"], + expected: ["bash", "-lc", "echo hi"], + }, + { + argv: ["env", "-i", "--unset", "PATH", "--", "sh", "-lc", "echo hi"], + expected: ["sh", "-lc", "echo hi"], + }, + { + argv: ["env", "--chdir=/tmp", "pwsh", "-Command", "Get-Date"], + expected: ["pwsh", "-Command", "Get-Date"], + }, + { + argv: ["env", "-", "bash", "-lc", "echo hi"], + expected: ["bash", "-lc", "echo hi"], + }, + { + argv: ["env", "--bogus", "bash", "-lc", "echo hi"], + expected: null, + }, + { + argv: ["env", "--unset"], + expected: null, + }, + ])("unwraps env invocations for %j", ({ argv, expected }) => { + expect(unwrapEnvInvocation(argv)).toEqual(expected); + }); +}); + +describe("unwrapKnownDispatchWrapperInvocation", () => { + test.each([ + { + argv: ["nice", "-n", "5", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "nice", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["nohup", "--", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "nohup", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["stdbuf", "-o", "L", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "stdbuf", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["timeout", "--signal=TERM", "5s", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "timeout", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["sudo", "bash", "-lc", "echo hi"], + expected: { kind: "blocked", wrapper: "sudo" }, + }, + { + argv: ["timeout", "--bogus", "5s", "bash", "-lc", "echo hi"], + expected: { kind: "blocked", wrapper: "timeout" }, + }, + ])("unwraps known dispatch wrappers for %j", ({ argv, expected }) => { + expect(unwrapKnownDispatchWrapperInvocation(argv)).toEqual(expected); + }); +}); + +describe("resolveDispatchWrapperExecutionPlan", () => { + test("unwraps transparent wrapper chains", () => { + expect( + resolveDispatchWrapperExecutionPlan(["nohup", "nice", "-n", "5", "bash", "-lc", "echo hi"]), + ).toEqual({ + argv: ["bash", "-lc", "echo hi"], + wrappers: ["nohup", "nice"], + policyBlocked: false, + }); + }); + + test("blocks semantic env usage even when it reaches a shell wrapper", () => { + expect( + resolveDispatchWrapperExecutionPlan(["env", "FOO=bar", "bash", "-lc", "echo hi"]), + ).toEqual({ + argv: ["env", "FOO=bar", "bash", "-lc", "echo hi"], + wrappers: ["env"], + policyBlocked: true, + blockedWrapper: "env", + }); + }); + + test("blocks wrapper overflow beyond the configured depth", () => { + expect( + resolveDispatchWrapperExecutionPlan(["nohup", "timeout", "5s", "bash", "-lc", "echo hi"], 1), + ).toEqual({ + argv: ["timeout", "5s", "bash", "-lc", "echo hi"], + wrappers: ["nohup"], + policyBlocked: true, + blockedWrapper: "timeout", + }); + }); +}); + +describe("hasEnvManipulationBeforeShellWrapper", () => { + test.each([ + { + argv: ["env", "FOO=bar", "bash", "-lc", "echo hi"], + expected: true, + }, + { + argv: ["timeout", "5s", "env", "--", "bash", "-lc", "echo hi"], + expected: false, + }, + { + argv: ["timeout", "5s", "env", "FOO=bar", "bash", "-lc", "echo hi"], + expected: true, + }, + { + argv: ["sudo", "bash", "-lc", "echo hi"], + expected: false, + }, + ])("detects env manipulation before shell wrappers for %j", ({ argv, expected }) => { + expect(hasEnvManipulationBeforeShellWrapper(argv)).toBe(expected); + }); +}); + +describe("extractShellWrapperCommand", () => { + test.each([ + { + argv: ["bash", "-lc", "echo hi"], + expectedInline: "echo hi", + expectedCommand: { isWrapper: true, command: "echo hi" }, + }, + { + argv: ["busybox", "sh", "-lc", "echo hi"], + expectedInline: "echo hi", + expectedCommand: { isWrapper: true, command: "echo hi" }, + }, + { + argv: ["env", "--", "pwsh", "-Command", "Get-Date"], + expectedInline: "Get-Date", + expectedCommand: { isWrapper: true, command: "Get-Date" }, + }, + { + argv: ["bash", "script.sh"], + expectedInline: null, + expectedCommand: { isWrapper: false, command: null }, + }, + ])("extracts inline commands for %j", ({ argv, expectedInline, expectedCommand }) => { + expect(extractShellWrapperInlineCommand(argv)).toBe(expectedInline); + expect(extractShellWrapperCommand(argv)).toEqual(expectedCommand); + }); + + test("prefers an explicit raw command override when provided", () => { + expect(extractShellWrapperCommand(["bash", "-lc", "echo hi"], " run this instead ")).toEqual({ + isWrapper: true, + command: "run this instead", + }); + }); +}); diff --git a/src/infra/executable-path.test.ts b/src/infra/executable-path.test.ts new file mode 100644 index 00000000000..731457ab183 --- /dev/null +++ b/src/infra/executable-path.test.ts @@ -0,0 +1,50 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { + isExecutableFile, + resolveExecutableFromPathEnv, + resolveExecutablePath, +} from "./executable-path.js"; + +describe("executable path helpers", () => { + it("detects executable files and rejects directories or non-executables", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-exec-path-")); + const execPath = path.join(base, "tool"); + const filePath = path.join(base, "plain.txt"); + const dirPath = path.join(base, "dir"); + await fs.writeFile(execPath, "#!/bin/sh\nexit 0\n", "utf8"); + await fs.chmod(execPath, 0o755); + await fs.writeFile(filePath, "nope", "utf8"); + await fs.mkdir(dirPath); + + expect(isExecutableFile(execPath)).toBe(true); + expect(isExecutableFile(filePath)).toBe(false); + expect(isExecutableFile(dirPath)).toBe(false); + expect(isExecutableFile(path.join(base, "missing"))).toBe(false); + }); + + it("resolves executables from PATH entries and cwd-relative paths", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-exec-path-")); + const binDir = path.join(base, "bin"); + const cwd = path.join(base, "cwd"); + await fs.mkdir(binDir, { recursive: true }); + await fs.mkdir(cwd, { recursive: true }); + + const pathTool = path.join(binDir, "runner"); + const cwdTool = path.join(cwd, "local-tool"); + await fs.writeFile(pathTool, "#!/bin/sh\nexit 0\n", "utf8"); + await fs.writeFile(cwdTool, "#!/bin/sh\nexit 0\n", "utf8"); + await fs.chmod(pathTool, 0o755); + await fs.chmod(cwdTool, 0o755); + + expect(resolveExecutableFromPathEnv("runner", `${binDir}${path.delimiter}/usr/bin`)).toBe( + pathTool, + ); + expect(resolveExecutableFromPathEnv("missing", binDir)).toBeUndefined(); + expect(resolveExecutablePath("./local-tool", { cwd })).toBe(cwdTool); + expect(resolveExecutablePath("runner", { env: { PATH: binDir } })).toBe(pathTool); + expect(resolveExecutablePath("missing", { env: { PATH: binDir } })).toBeUndefined(); + }); +}); diff --git a/src/infra/format-time/format-datetime.ts b/src/infra/format-time/format-datetime.ts index d7ed13f5c24..37cdf713f8d 100644 --- a/src/infra/format-time/format-datetime.ts +++ b/src/infra/format-time/format-datetime.ts @@ -59,36 +59,40 @@ export function formatZonedTimestamp( date: Date, options?: FormatZonedTimestampOptions, ): string | undefined { - const intlOptions: Intl.DateTimeFormatOptions = { - timeZone: options?.timeZone, - year: "numeric", - month: "2-digit", - day: "2-digit", - hour: "2-digit", - minute: "2-digit", - hourCycle: "h23", - timeZoneName: "short", - }; - if (options?.displaySeconds) { - intlOptions.second = "2-digit"; - } - const parts = new Intl.DateTimeFormat("en-US", intlOptions).formatToParts(date); - const pick = (type: string) => parts.find((part) => part.type === type)?.value; - const yyyy = pick("year"); - const mm = pick("month"); - const dd = pick("day"); - const hh = pick("hour"); - const min = pick("minute"); - const sec = options?.displaySeconds ? pick("second") : undefined; - const tz = [...parts] - .toReversed() - .find((part) => part.type === "timeZoneName") - ?.value?.trim(); - if (!yyyy || !mm || !dd || !hh || !min) { + try { + const intlOptions: Intl.DateTimeFormatOptions = { + timeZone: options?.timeZone, + year: "numeric", + month: "2-digit", + day: "2-digit", + hour: "2-digit", + minute: "2-digit", + hourCycle: "h23", + timeZoneName: "short", + }; + if (options?.displaySeconds) { + intlOptions.second = "2-digit"; + } + const parts = new Intl.DateTimeFormat("en-US", intlOptions).formatToParts(date); + const pick = (type: string) => parts.find((part) => part.type === type)?.value; + const yyyy = pick("year"); + const mm = pick("month"); + const dd = pick("day"); + const hh = pick("hour"); + const min = pick("minute"); + const sec = options?.displaySeconds ? pick("second") : undefined; + const tz = [...parts] + .toReversed() + .find((part) => part.type === "timeZoneName") + ?.value?.trim(); + if (!yyyy || !mm || !dd || !hh || !min) { + return undefined; + } + if (options?.displaySeconds && sec) { + return `${yyyy}-${mm}-${dd} ${hh}:${min}:${sec}${tz ? ` ${tz}` : ""}`; + } + return `${yyyy}-${mm}-${dd} ${hh}:${min}${tz ? ` ${tz}` : ""}`; + } catch { return undefined; } - if (options?.displaySeconds && sec) { - return `${yyyy}-${mm}-${dd} ${hh}:${min}:${sec}${tz ? ` ${tz}` : ""}`; - } - return `${yyyy}-${mm}-${dd} ${hh}:${min}${tz ? ` ${tz}` : ""}`; } diff --git a/src/infra/format-time/format-time.test.ts b/src/infra/format-time/format-time.test.ts index 22ae60dcc6d..f3fddff7f6d 100644 --- a/src/infra/format-time/format-time.test.ts +++ b/src/infra/format-time/format-time.test.ts @@ -8,6 +8,12 @@ import { } from "./format-duration.js"; import { formatTimeAgo, formatRelativeTimestamp } from "./format-relative.js"; +const invalidDurationInputs = [null, undefined, -100] as const; + +afterEach(() => { + vi.restoreAllMocks(); +}); + describe("format-duration", () => { describe("formatDurationCompact", () => { it("returns undefined for null/undefined/non-positive", () => { @@ -55,7 +61,7 @@ describe("format-duration", () => { describe("formatDurationHuman", () => { it("returns fallback for invalid duration input", () => { - for (const value of [null, undefined, -100]) { + for (const value of invalidDurationInputs) { expect(formatDurationHuman(value)).toBe("n/a"); } expect(formatDurationHuman(null, "unknown")).toBe("unknown"); @@ -106,6 +112,12 @@ describe("format-duration", () => { it("supports seconds unit", () => { expect(formatDurationSeconds(2000, { unit: "seconds" })).toBe("2 seconds"); }); + + it("clamps negative values and rejects non-finite input", () => { + expect(formatDurationSeconds(-1500, { decimals: 1 })).toBe("0s"); + expect(formatDurationSeconds(NaN)).toBe("unknown"); + expect(formatDurationSeconds(Infinity)).toBe("unknown"); + }); }); }); @@ -152,13 +164,52 @@ describe("format-datetime", () => { const result = formatZonedTimestamp(date, options); expect(result).toMatch(expected); }); + + it("returns undefined when required Intl parts are missing", () => { + function MissingPartsDateTimeFormat() { + return { + formatToParts: () => [ + { type: "month", value: "01" }, + { type: "day", value: "15" }, + { type: "hour", value: "14" }, + { type: "minute", value: "30" }, + ], + } as Intl.DateTimeFormat; + } + + vi.spyOn(Intl, "DateTimeFormat").mockImplementation( + MissingPartsDateTimeFormat as unknown as typeof Intl.DateTimeFormat, + ); + + expect(formatZonedTimestamp(new Date("2024-01-15T14:30:00.000Z"), { timeZone: "UTC" })).toBe( + undefined, + ); + }); + + it("returns undefined when Intl formatting throws", () => { + function ThrowingDateTimeFormat() { + return { + formatToParts: () => { + throw new Error("boom"); + }, + } as unknown as Intl.DateTimeFormat; + } + + vi.spyOn(Intl, "DateTimeFormat").mockImplementation( + ThrowingDateTimeFormat as unknown as typeof Intl.DateTimeFormat, + ); + + expect(formatZonedTimestamp(new Date("2024-01-15T14:30:00.000Z"), { timeZone: "UTC" })).toBe( + undefined, + ); + }); }); }); describe("format-relative", () => { describe("formatTimeAgo", () => { it("returns fallback for invalid elapsed input", () => { - for (const value of [null, undefined, -100]) { + for (const value of invalidDurationInputs) { expect(formatTimeAgo(value)).toBe("unknown"); } expect(formatTimeAgo(null, { fallback: "n/a" })).toBe("n/a"); @@ -240,5 +291,14 @@ describe("format-relative", () => { ])("$name", ({ offsetMs, options, expected }) => { expect(formatRelativeTimestamp(Date.now() + offsetMs, options)).toBe(expected); }); + + it("falls back to relative days when date formatting throws", () => { + expect( + formatRelativeTimestamp(Date.now() - 8 * 24 * 3600000, { + dateFallback: true, + timezone: "Invalid/Timezone", + }), + ).toBe("8d ago"); + }); }); }); diff --git a/src/infra/gateway-process-argv.test.ts b/src/infra/gateway-process-argv.test.ts new file mode 100644 index 00000000000..f3570316860 --- /dev/null +++ b/src/infra/gateway-process-argv.test.ts @@ -0,0 +1,34 @@ +import { describe, expect, it } from "vitest"; +import { isGatewayArgv, parseProcCmdline } from "./gateway-process-argv.js"; + +describe("parseProcCmdline", () => { + it("splits null-delimited argv and trims empty entries", () => { + expect(parseProcCmdline(" node \0 gateway \0\0 --port \0 18789 \0")).toEqual([ + "node", + "gateway", + "--port", + "18789", + ]); + }); +}); + +describe("isGatewayArgv", () => { + it("requires a gateway token", () => { + expect(isGatewayArgv(["node", "dist/index.js", "--port", "18789"])).toBe(false); + }); + + it("matches known entrypoints across slash and case variants", () => { + expect(isGatewayArgv(["NODE", "C:\\OpenClaw\\DIST\\ENTRY.JS", "gateway"])).toBe(true); + expect(isGatewayArgv(["bun", "/srv/openclaw/scripts/run-node.mjs", "gateway"])).toBe(true); + }); + + it("matches the openclaw executable but gates the gateway binary behind the opt-in flag", () => { + expect(isGatewayArgv(["C:\\bin\\openclaw.cmd", "gateway"])).toBe(true); + expect(isGatewayArgv(["/usr/local/bin/openclaw-gateway", "gateway"])).toBe(false); + expect( + isGatewayArgv(["/usr/local/bin/openclaw-gateway", "gateway"], { + allowGatewayBinary: true, + }), + ).toBe(true); + }); +}); diff --git a/src/infra/gateway-processes.test.ts b/src/infra/gateway-processes.test.ts new file mode 100644 index 00000000000..5eb2fbd1113 --- /dev/null +++ b/src/infra/gateway-processes.test.ts @@ -0,0 +1,165 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +const spawnSyncMock = vi.hoisted(() => vi.fn()); +const readFileSyncMock = vi.hoisted(() => vi.fn()); +const parseCmdScriptCommandLineMock = vi.hoisted(() => vi.fn()); +const parseProcCmdlineMock = vi.hoisted(() => vi.fn()); +const isGatewayArgvMock = vi.hoisted(() => vi.fn()); +const findGatewayPidsOnPortSyncMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawnSync: (...args: unknown[]) => spawnSyncMock(...args), +})); + +vi.mock("node:fs", () => ({ + default: { + readFileSync: (...args: unknown[]) => readFileSyncMock(...args), + }, +})); + +vi.mock("../daemon/cmd-argv.js", () => ({ + parseCmdScriptCommandLine: (...args: unknown[]) => parseCmdScriptCommandLineMock(...args), +})); + +vi.mock("./gateway-process-argv.js", () => ({ + parseProcCmdline: (...args: unknown[]) => parseProcCmdlineMock(...args), + isGatewayArgv: (...args: unknown[]) => isGatewayArgvMock(...args), +})); + +vi.mock("./restart-stale-pids.js", () => ({ + findGatewayPidsOnPortSync: (...args: unknown[]) => findGatewayPidsOnPortSyncMock(...args), +})); + +const { + findVerifiedGatewayListenerPidsOnPortSync, + formatGatewayPidList, + readGatewayProcessArgsSync, + signalVerifiedGatewayPidSync, +} = await import("./gateway-processes.js"); + +const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + +function setPlatform(platform: NodeJS.Platform): void { + Object.defineProperty(process, "platform", { + value: platform, + configurable: true, + }); +} + +describe("gateway-processes", () => { + beforeEach(() => { + spawnSyncMock.mockReset(); + readFileSyncMock.mockReset(); + parseCmdScriptCommandLineMock.mockReset(); + parseProcCmdlineMock.mockReset(); + isGatewayArgvMock.mockReset(); + findGatewayPidsOnPortSyncMock.mockReset(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } + }); + + it("reads linux process args from /proc and parses cmdlines", () => { + setPlatform("linux"); + readFileSyncMock.mockReturnValue("node\0dist/index.js\0gateway\0run\0"); + parseProcCmdlineMock.mockReturnValue(["node", "dist/index.js", "gateway", "run"]); + + expect(readGatewayProcessArgsSync(4242)).toEqual(["node", "dist/index.js", "gateway", "run"]); + expect(readFileSyncMock).toHaveBeenCalledWith("/proc/4242/cmdline", "utf8"); + expect(parseProcCmdlineMock).toHaveBeenCalledWith("node\0dist/index.js\0gateway\0run\0"); + }); + + it("reads darwin process args from ps output and returns null on ps failure", () => { + setPlatform("darwin"); + spawnSyncMock + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "node /repo/dist/index.js gateway run\n", + }) + .mockReturnValueOnce({ + error: null, + status: 1, + stdout: "", + }); + + expect(readGatewayProcessArgsSync(123)).toEqual([ + "node", + "/repo/dist/index.js", + "gateway", + "run", + ]); + expect(readGatewayProcessArgsSync(124)).toBeNull(); + }); + + it("falls back from powershell to wmic for windows process args", () => { + setPlatform("win32"); + spawnSyncMock + .mockReturnValueOnce({ + error: new Error("powershell missing"), + status: null, + stdout: "", + }) + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "CommandLine=node.exe gateway run\r\n", + }); + parseCmdScriptCommandLineMock.mockReturnValue(["node.exe", "gateway", "run"]); + + expect(readGatewayProcessArgsSync(77)).toEqual(["node.exe", "gateway", "run"]); + expect(parseCmdScriptCommandLineMock).toHaveBeenCalledWith("node.exe gateway run"); + }); + + it("signals only verified gateway processes", () => { + setPlatform("linux"); + readFileSyncMock.mockReturnValue("node\0gateway\0"); + parseProcCmdlineMock.mockReturnValue(["node", "gateway"]); + isGatewayArgvMock.mockReturnValueOnce(true).mockReturnValueOnce(false); + const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); + + signalVerifiedGatewayPidSync(500, "SIGTERM"); + expect(killSpy).toHaveBeenCalledWith(500, "SIGTERM"); + + expect(() => signalVerifiedGatewayPidSync(501, "SIGUSR1")).toThrow( + /refusing to signal non-gateway process pid 501/, + ); + }); + + it("dedupes and filters verified gateway listener pids on unix and windows", () => { + setPlatform("linux"); + findGatewayPidsOnPortSyncMock.mockReturnValue([process.pid, 200, 200, 300, -1]); + readFileSyncMock.mockReturnValueOnce("openclaw-gateway\0gateway\0"); + readFileSyncMock.mockReturnValueOnce("python\0-m\0http.server\0"); + parseProcCmdlineMock + .mockReturnValueOnce(["openclaw-gateway", "gateway"]) + .mockReturnValueOnce(["python", "-m", "http.server"]); + isGatewayArgvMock.mockReturnValueOnce(true).mockReturnValueOnce(false); + + expect(findVerifiedGatewayListenerPidsOnPortSync(18789)).toEqual([200]); + setPlatform("win32"); + spawnSyncMock + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "200\r\n200\r\n0\r\n", + }) + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "node.exe gateway run", + }); + parseCmdScriptCommandLineMock.mockReturnValue(["node.exe", "gateway", "run"]); + isGatewayArgvMock.mockReturnValue(true); + + expect(findVerifiedGatewayListenerPidsOnPortSync(18789)).toEqual([200]); + }); + + it("formats pid lists as comma-separated output", () => { + expect(formatGatewayPidList([1, 2, 3])).toBe("1, 2, 3"); + }); +}); diff --git a/src/infra/gemini-auth.test.ts b/src/infra/gemini-auth.test.ts new file mode 100644 index 00000000000..6e496f75ec2 --- /dev/null +++ b/src/infra/gemini-auth.test.ts @@ -0,0 +1,34 @@ +import { describe, expect, it } from "vitest"; +import { parseGeminiAuth } from "./gemini-auth.js"; + +describe("parseGeminiAuth", () => { + it("returns bearer auth for OAuth JSON tokens", () => { + expect(parseGeminiAuth('{"token":"oauth-token","projectId":"demo"}')).toEqual({ + headers: { + Authorization: "Bearer oauth-token", + "Content-Type": "application/json", + }, + }); + }); + + it("falls back to API key auth for invalid or unusable OAuth payloads", () => { + expect(parseGeminiAuth('{"token":"","projectId":"demo"}')).toEqual({ + headers: { + "x-goog-api-key": '{"token":"","projectId":"demo"}', + "Content-Type": "application/json", + }, + }); + expect(parseGeminiAuth("{not-json}")).toEqual({ + headers: { + "x-goog-api-key": "{not-json}", + "Content-Type": "application/json", + }, + }); + expect(parseGeminiAuth(' {"token":"oauth-token"}')).toEqual({ + headers: { + "x-goog-api-key": ' {"token":"oauth-token"}', + "Content-Type": "application/json", + }, + }); + }); +}); diff --git a/src/infra/hardlink-guards.test.ts b/src/infra/hardlink-guards.test.ts new file mode 100644 index 00000000000..e96d826c1d8 --- /dev/null +++ b/src/infra/hardlink-guards.test.ts @@ -0,0 +1,67 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it, vi } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { assertNoHardlinkedFinalPath } from "./hardlink-guards.js"; + +describe("assertNoHardlinkedFinalPath", () => { + it("allows missing paths, directories, and explicit unlink opt-in", async () => { + await withTempDir({ prefix: "openclaw-hardlink-guards-" }, async (root) => { + const dirPath = path.join(root, "dir"); + await fs.mkdir(dirPath); + + await expect( + assertNoHardlinkedFinalPath({ + filePath: path.join(root, "missing.txt"), + root, + boundaryLabel: "workspace", + }), + ).resolves.toBeUndefined(); + + await expect( + assertNoHardlinkedFinalPath({ + filePath: dirPath, + root, + boundaryLabel: "workspace", + }), + ).resolves.toBeUndefined(); + + const source = path.join(root, "source.txt"); + const linked = path.join(root, "linked.txt"); + await fs.writeFile(source, "hello", "utf8"); + await fs.link(source, linked); + + await expect( + assertNoHardlinkedFinalPath({ + filePath: linked, + root, + boundaryLabel: "workspace", + allowFinalHardlinkForUnlink: true, + }), + ).resolves.toBeUndefined(); + }); + }); + + it("rejects hardlinked files and shortens home-relative paths in the error", async () => { + await withTempDir({ prefix: "openclaw-hardlink-guards-" }, async (root) => { + const source = path.join(root, "source.txt"); + const linked = path.join(root, "linked.txt"); + await fs.writeFile(source, "hello", "utf8"); + await fs.link(source, linked); + const homedirSpy = vi.spyOn(os, "homedir").mockReturnValue(root); + + try { + await expect( + assertNoHardlinkedFinalPath({ + filePath: linked, + root, + boundaryLabel: "workspace", + }), + ).rejects.toThrow("Hardlinked path is not allowed under workspace (~): ~/linked.txt"); + } finally { + homedirSpy.mockRestore(); + } + }); + }); +}); diff --git a/src/infra/heartbeat-events.test.ts b/src/infra/heartbeat-events.test.ts new file mode 100644 index 00000000000..d1583f8080a --- /dev/null +++ b/src/infra/heartbeat-events.test.ts @@ -0,0 +1,59 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + emitHeartbeatEvent, + getLastHeartbeatEvent, + onHeartbeatEvent, + resolveIndicatorType, +} from "./heartbeat-events.js"; + +describe("resolveIndicatorType", () => { + it("maps heartbeat statuses to indicator types", () => { + expect(resolveIndicatorType("ok-empty")).toBe("ok"); + expect(resolveIndicatorType("ok-token")).toBe("ok"); + expect(resolveIndicatorType("sent")).toBe("alert"); + expect(resolveIndicatorType("failed")).toBe("error"); + expect(resolveIndicatorType("skipped")).toBeUndefined(); + }); +}); + +describe("heartbeat events", () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-01-09T12:00:00Z")); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("stores the last event and timestamps emitted payloads", () => { + emitHeartbeatEvent({ status: "sent", to: "+123", preview: "ping" }); + + expect(getLastHeartbeatEvent()).toEqual({ + ts: 1767960000000, + status: "sent", + to: "+123", + preview: "ping", + }); + }); + + it("delivers events to listeners, isolates listener failures, and supports unsubscribe", () => { + const seen: string[] = []; + const unsubscribeFirst = onHeartbeatEvent((evt) => { + seen.push(`first:${evt.status}`); + }); + onHeartbeatEvent(() => { + throw new Error("boom"); + }); + const unsubscribeThird = onHeartbeatEvent((evt) => { + seen.push(`third:${evt.status}`); + }); + + emitHeartbeatEvent({ status: "ok-empty" }); + unsubscribeFirst(); + unsubscribeThird(); + emitHeartbeatEvent({ status: "failed" }); + + expect(seen).toEqual(["first:ok-empty", "third:ok-empty"]); + }); +}); diff --git a/src/infra/host-env-security.test.ts b/src/infra/host-env-security.test.ts index 08f1a3d65fb..acb756b62a2 100644 --- a/src/infra/host-env-security.test.ts +++ b/src/infra/host-env-security.test.ts @@ -12,6 +12,30 @@ import { } from "./host-env-security.js"; import { OPENCLAW_CLI_ENV_VALUE } from "./openclaw-exec-env.js"; +function getSystemGitPath() { + if (process.platform === "win32") { + return null; + } + const gitPath = "/usr/bin/git"; + return fs.existsSync(gitPath) ? gitPath : null; +} + +function clearMarker(marker: string) { + try { + fs.unlinkSync(marker); + } catch { + // no-op + } +} + +async function runGitLsRemote(gitPath: string, target: string, env: NodeJS.ProcessEnv) { + await new Promise((resolve) => { + const child = spawn(gitPath, ["ls-remote", target], { env, stdio: "ignore" }); + child.once("error", () => resolve()); + child.once("close", () => resolve()); + }); +} + describe("isDangerousHostEnvVarName", () => { it("matches dangerous keys and prefixes case-insensitively", () => { expect(isDangerousHostEnvVarName("BASH_ENV")).toBe(true); @@ -120,6 +144,39 @@ describe("sanitizeHostExecEnv", () => { expect(env[" BAD KEY"]).toBeUndefined(); expect(env["NOT-PORTABLE"]).toBeUndefined(); }); + + it("can allow PATH overrides when explicitly opted out of blocking", () => { + const env = sanitizeHostExecEnv({ + baseEnv: { + PATH: "/usr/bin:/bin", + }, + overrides: { + PATH: "/custom/bin", + }, + blockPathOverrides: false, + }); + + expect(env.PATH).toBe("/custom/bin"); + expect(env.OPENCLAW_CLI).toBe(OPENCLAW_CLI_ENV_VALUE); + }); + + it("drops non-string inherited values and non-portable inherited keys", () => { + const env = sanitizeHostExecEnv({ + baseEnv: { + PATH: "/usr/bin:/bin", + GOOD: "1", + // oxlint-disable-next-line typescript/no-explicit-any + BAD_NUMBER: 1 as any, + "NOT-PORTABLE": "x", + }, + }); + + expect(env).toEqual({ + OPENCLAW_CLI: OPENCLAW_CLI_ENV_VALUE, + PATH: "/usr/bin:/bin", + GOOD: "1", + }); + }); }); describe("isDangerousHostEnvOverrideVarName", () => { @@ -174,6 +231,33 @@ describe("sanitizeSystemRunEnvOverrides", () => { LC_ALL: "C", }); }); + + it("returns undefined when no shell-wrapper overrides survive", () => { + expect( + sanitizeSystemRunEnvOverrides({ + shellWrapper: true, + overrides: { + TOKEN: "abc", + }, + }), + ).toBeUndefined(); + expect(sanitizeSystemRunEnvOverrides({ shellWrapper: true })).toBeUndefined(); + }); + + it("keeps allowlisted shell-wrapper overrides case-insensitively", () => { + expect( + sanitizeSystemRunEnvOverrides({ + shellWrapper: true, + overrides: { + lang: "C", + ColorTerm: "truecolor", + }, + }), + ).toEqual({ + lang: "C", + ColorTerm: "truecolor", + }); + }); }); describe("shell wrapper exploit regression", () => { @@ -215,11 +299,8 @@ describe("shell wrapper exploit regression", () => { describe("git env exploit regression", () => { it("blocks inherited GIT_EXEC_PATH so git cannot execute helper payloads", async () => { - if (process.platform === "win32") { - return; - } - const gitPath = "/usr/bin/git"; - if (!fs.existsSync(gitPath)) { + const gitPath = getSystemGitPath(); + if (!gitPath) { return; } @@ -232,11 +313,7 @@ describe("git env exploit regression", () => { `openclaw-git-exec-path-marker-${process.pid}-${Date.now()}`, ); try { - try { - fs.unlinkSync(marker); - } catch { - // no-op - } + clearMarker(marker); fs.writeFileSync(helperPath, `#!/bin/sh\ntouch ${JSON.stringify(marker)}\nexit 1\n`, "utf8"); fs.chmodSync(helperPath, 0o755); @@ -247,24 +324,16 @@ describe("git env exploit regression", () => { GIT_TERMINAL_PROMPT: "0", }; - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: unsafeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, unsafeEnv); expect(fs.existsSync(marker)).toBe(true); - fs.unlinkSync(marker); + clearMarker(marker); const safeEnv = sanitizeHostExecEnv({ baseEnv: unsafeEnv, }); - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: safeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, safeEnv); expect(fs.existsSync(marker)).toBe(false); } finally { @@ -274,20 +343,13 @@ describe("git env exploit regression", () => { }); it("blocks GIT_SSH_COMMAND override so git cannot execute helper payloads", async () => { - if (process.platform === "win32") { - return; - } - const gitPath = "/usr/bin/git"; - if (!fs.existsSync(gitPath)) { + const gitPath = getSystemGitPath(); + if (!gitPath) { return; } const marker = path.join(os.tmpdir(), `openclaw-git-ssh-command-${process.pid}-${Date.now()}`); - try { - fs.unlinkSync(marker); - } catch { - // no-op - } + clearMarker(marker); const target = "ssh://127.0.0.1:1/does-not-matter"; const exploitValue = `touch ${JSON.stringify(marker)}; false`; @@ -301,14 +363,10 @@ describe("git env exploit regression", () => { GIT_SSH_COMMAND: exploitValue, }; - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: unsafeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, unsafeEnv); expect(fs.existsSync(marker)).toBe(true); - fs.unlinkSync(marker); + clearMarker(marker); const safeEnv = sanitizeHostExecEnv({ baseEnv, @@ -317,11 +375,7 @@ describe("git env exploit regression", () => { }, }); - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: safeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, safeEnv); expect(fs.existsSync(marker)).toBe(false); }); diff --git a/src/infra/infra-parsing.test.ts b/src/infra/infra-parsing.test.ts deleted file mode 100644 index 10590c96790..00000000000 --- a/src/infra/infra-parsing.test.ts +++ /dev/null @@ -1,166 +0,0 @@ -import { describe, expect, it } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; -import { isDiagnosticFlagEnabled, resolveDiagnosticFlags } from "./diagnostic-flags.js"; -import { isMainModule } from "./is-main.js"; -import { buildNodeShellCommand } from "./node-shell.js"; -import { parseSshTarget } from "./ssh-tunnel.js"; - -describe("infra parsing", () => { - describe("diagnostic flags", () => { - it("merges config + env flags", () => { - const cfg = { - diagnostics: { flags: ["telegram.http", "cache.*"] }, - } as OpenClawConfig; - const env = { - OPENCLAW_DIAGNOSTICS: "foo,bar", - } as NodeJS.ProcessEnv; - - const flags = resolveDiagnosticFlags(cfg, env); - expect(flags).toEqual(expect.arrayContaining(["telegram.http", "cache.*", "foo", "bar"])); - expect(isDiagnosticFlagEnabled("telegram.http", cfg, env)).toBe(true); - expect(isDiagnosticFlagEnabled("cache.hit", cfg, env)).toBe(true); - expect(isDiagnosticFlagEnabled("foo", cfg, env)).toBe(true); - }); - - it("treats env true as wildcard", () => { - const env = { OPENCLAW_DIAGNOSTICS: "1" } as NodeJS.ProcessEnv; - expect(isDiagnosticFlagEnabled("anything.here", undefined, env)).toBe(true); - }); - - it("treats env false as disabled", () => { - const env = { OPENCLAW_DIAGNOSTICS: "0" } as NodeJS.ProcessEnv; - expect(isDiagnosticFlagEnabled("telegram.http", undefined, env)).toBe(false); - }); - }); - - describe("isMainModule", () => { - it("returns true when argv[1] matches current file", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/index.js", - argv: ["node", "/repo/dist/index.js"], - cwd: "/repo", - env: {}, - }), - ).toBe(true); - }); - - it("returns true under PM2 when pm_exec_path matches current file", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/index.js", - argv: ["node", "/pm2/lib/ProcessContainerFork.js"], - cwd: "/repo", - env: { pm_exec_path: "/repo/dist/index.js", pm_id: "0" }, - }), - ).toBe(true); - }); - - it("returns true for dist/entry.js when launched via openclaw.mjs wrapper", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/entry.js", - argv: ["node", "/repo/openclaw.mjs"], - cwd: "/repo", - env: {}, - wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], - }), - ).toBe(true); - }); - - it("returns false for wrapper launches when wrapper pair is not configured", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/entry.js", - argv: ["node", "/repo/openclaw.mjs"], - cwd: "/repo", - env: {}, - }), - ).toBe(false); - }); - - it("returns false when wrapper pair targets a different entry basename", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/index.js", - argv: ["node", "/repo/openclaw.mjs"], - cwd: "/repo", - env: {}, - wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], - }), - ).toBe(false); - }); - - it("returns false when running under PM2 but this module is imported", () => { - expect( - isMainModule({ - currentFile: "/repo/node_modules/openclaw/dist/index.js", - argv: ["node", "/repo/app.js"], - cwd: "/repo", - env: { pm_exec_path: "/repo/app.js", pm_id: "0" }, - }), - ).toBe(false); - }); - }); - - describe("buildNodeShellCommand", () => { - it("uses cmd.exe for win32", () => { - expect(buildNodeShellCommand("echo hi", "win32")).toEqual([ - "cmd.exe", - "/d", - "/s", - "/c", - "echo hi", - ]); - }); - - it("uses cmd.exe for windows labels", () => { - expect(buildNodeShellCommand("echo hi", "windows")).toEqual([ - "cmd.exe", - "/d", - "/s", - "/c", - "echo hi", - ]); - expect(buildNodeShellCommand("echo hi", "Windows 11")).toEqual([ - "cmd.exe", - "/d", - "/s", - "/c", - "echo hi", - ]); - }); - - it("uses /bin/sh for darwin", () => { - expect(buildNodeShellCommand("echo hi", "darwin")).toEqual(["/bin/sh", "-lc", "echo hi"]); - }); - - it("uses /bin/sh when platform missing", () => { - expect(buildNodeShellCommand("echo hi")).toEqual(["/bin/sh", "-lc", "echo hi"]); - }); - }); - - describe("parseSshTarget", () => { - it("parses user@host:port targets", () => { - expect(parseSshTarget("me@example.com:2222")).toEqual({ - user: "me", - host: "example.com", - port: 2222, - }); - }); - - it("parses host-only targets with default port", () => { - expect(parseSshTarget("example.com")).toEqual({ - user: undefined, - host: "example.com", - port: 22, - }); - }); - - it("rejects hostnames that start with '-'", () => { - expect(parseSshTarget("-V")).toBeNull(); - expect(parseSshTarget("me@-badhost")).toBeNull(); - expect(parseSshTarget("-oProxyCommand=echo")).toBeNull(); - }); - }); -}); diff --git a/src/infra/infra-runtime.test.ts b/src/infra/infra-runtime.test.ts index 1596b73bbe8..2072f8f2da3 100644 --- a/src/infra/infra-runtime.test.ts +++ b/src/infra/infra-runtime.test.ts @@ -1,8 +1,5 @@ import os from "node:os"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import type { runExec } from "../process/exec.js"; -import type { RuntimeEnv } from "../runtime.js"; -import { ensureBinary } from "./binaries.js"; import { __testing, consumeGatewaySigusr1RestartAuthorization, @@ -31,35 +28,6 @@ describe("infra runtime", () => { }); } - describe("ensureBinary", () => { - it("passes through when binary exists", async () => { - const exec: typeof runExec = vi.fn().mockResolvedValue({ - stdout: "", - stderr: "", - }); - const runtime: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - await ensureBinary("node", exec, runtime); - expect(exec).toHaveBeenCalledWith("which", ["node"]); - }); - - it("logs and exits when missing", async () => { - const exec: typeof runExec = vi.fn().mockRejectedValue(new Error("missing")); - const error = vi.fn(); - const exit = vi.fn(() => { - throw new Error("exit"); - }); - await expect(ensureBinary("ghost", exec, { log: vi.fn(), error, exit })).rejects.toThrow( - "exit", - ); - expect(error).toHaveBeenCalledWith("Missing required binary: ghost. Please install it."); - expect(exit).toHaveBeenCalledWith(1); - }); - }); - describe("restart authorization", () => { setupRestartSignalSuite(); diff --git a/src/infra/infra-store.test.ts b/src/infra/infra-store.test.ts index 1f65b005652..dfa6b1715c4 100644 --- a/src/infra/infra-store.test.ts +++ b/src/infra/infra-store.test.ts @@ -1,24 +1,8 @@ import fs from "node:fs/promises"; import path from "node:path"; -import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { describe, expect, it } from "vitest"; import { withTempDir } from "../test-utils/temp-dir.js"; -import { - getChannelActivity, - recordChannelActivity, - resetChannelActivityForTest, -} from "./channel-activity.js"; -import { createDedupeCache } from "./dedupe.js"; -import { - emitDiagnosticEvent, - onDiagnosticEvent, - resetDiagnosticEventsForTest, -} from "./diagnostic-events.js"; import { readSessionStoreJson5 } from "./state-migrations.fs.js"; -import { - defaultVoiceWakeTriggers, - loadVoiceWakeConfig, - setVoiceWakeTriggers, -} from "./voicewake.js"; describe("infra store", () => { describe("state migrations fs", () => { @@ -49,182 +33,4 @@ describe("infra store", () => { }); }); }); - - describe("voicewake store", () => { - it("returns defaults when missing", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - const cfg = await loadVoiceWakeConfig(baseDir); - expect(cfg.triggers).toEqual(defaultVoiceWakeTriggers()); - expect(cfg.updatedAtMs).toBe(0); - }); - }); - - it("sanitizes and persists triggers", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - const saved = await setVoiceWakeTriggers([" hi ", "", " there "], baseDir); - expect(saved.triggers).toEqual(["hi", "there"]); - expect(saved.updatedAtMs).toBeGreaterThan(0); - - const loaded = await loadVoiceWakeConfig(baseDir); - expect(loaded.triggers).toEqual(["hi", "there"]); - expect(loaded.updatedAtMs).toBeGreaterThan(0); - }); - }); - - it("falls back to defaults when triggers empty", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - const saved = await setVoiceWakeTriggers(["", " "], baseDir); - expect(saved.triggers).toEqual(defaultVoiceWakeTriggers()); - }); - }); - - it("sanitizes malformed persisted config values", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - await fs.mkdir(path.join(baseDir, "settings"), { recursive: true }); - await fs.writeFile( - path.join(baseDir, "settings", "voicewake.json"), - JSON.stringify({ - triggers: [" wake ", "", 42, null], - updatedAtMs: -1, - }), - "utf-8", - ); - - const loaded = await loadVoiceWakeConfig(baseDir); - expect(loaded.triggers).toEqual(["wake"]); - expect(loaded.updatedAtMs).toBe(0); - }); - }); - }); - - describe("diagnostic-events", () => { - it("emits monotonic seq", async () => { - resetDiagnosticEventsForTest(); - const seqs: number[] = []; - const stop = onDiagnosticEvent((evt) => seqs.push(evt.seq)); - - emitDiagnosticEvent({ - type: "model.usage", - usage: { total: 1 }, - }); - emitDiagnosticEvent({ - type: "model.usage", - usage: { total: 2 }, - }); - - stop(); - - expect(seqs).toEqual([1, 2]); - }); - - it("emits message-flow events", async () => { - resetDiagnosticEventsForTest(); - const types: string[] = []; - const stop = onDiagnosticEvent((evt) => types.push(evt.type)); - - emitDiagnosticEvent({ - type: "webhook.received", - channel: "telegram", - updateType: "telegram-post", - }); - emitDiagnosticEvent({ - type: "message.queued", - channel: "telegram", - source: "telegram", - queueDepth: 1, - }); - emitDiagnosticEvent({ - type: "session.state", - state: "processing", - reason: "run_started", - }); - - stop(); - - expect(types).toEqual(["webhook.received", "message.queued", "session.state"]); - }); - }); - - describe("channel activity", () => { - beforeEach(() => { - resetChannelActivityForTest(); - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-01-08T00:00:00Z")); - }); - - afterEach(() => { - vi.useRealTimers(); - }); - - it("records inbound/outbound separately", () => { - recordChannelActivity({ channel: "telegram", direction: "inbound" }); - vi.advanceTimersByTime(1000); - recordChannelActivity({ channel: "telegram", direction: "outbound" }); - const res = getChannelActivity({ channel: "telegram" }); - expect(res.inboundAt).toBe(1767830400000); - expect(res.outboundAt).toBe(1767830401000); - }); - - it("isolates accounts", () => { - recordChannelActivity({ - channel: "whatsapp", - accountId: "a", - direction: "inbound", - at: 1, - }); - recordChannelActivity({ - channel: "whatsapp", - accountId: "b", - direction: "inbound", - at: 2, - }); - expect(getChannelActivity({ channel: "whatsapp", accountId: "a" })).toEqual({ - inboundAt: 1, - outboundAt: null, - }); - expect(getChannelActivity({ channel: "whatsapp", accountId: "b" })).toEqual({ - inboundAt: 2, - outboundAt: null, - }); - }); - }); - - describe("createDedupeCache", () => { - it("marks duplicates within TTL", () => { - const cache = createDedupeCache({ ttlMs: 1000, maxSize: 10 }); - expect(cache.check("a", 100)).toBe(false); - expect(cache.check("a", 500)).toBe(true); - }); - - it("expires entries after TTL", () => { - const cache = createDedupeCache({ ttlMs: 1000, maxSize: 10 }); - expect(cache.check("a", 100)).toBe(false); - expect(cache.check("a", 1501)).toBe(false); - }); - - it("evicts oldest entries when over max size", () => { - const cache = createDedupeCache({ ttlMs: 10_000, maxSize: 2 }); - expect(cache.check("a", 100)).toBe(false); - expect(cache.check("b", 200)).toBe(false); - expect(cache.check("c", 300)).toBe(false); - expect(cache.check("a", 400)).toBe(false); - }); - - it("prunes expired entries even when refreshed keys are older in insertion order", () => { - const cache = createDedupeCache({ ttlMs: 100, maxSize: 10 }); - expect(cache.check("a", 0)).toBe(false); - expect(cache.check("b", 50)).toBe(false); - expect(cache.check("a", 120)).toBe(false); - expect(cache.check("c", 200)).toBe(false); - expect(cache.size()).toBe(2); - }); - - it("supports non-mutating existence checks via peek()", () => { - const cache = createDedupeCache({ ttlMs: 1000, maxSize: 10 }); - expect(cache.peek("a", 100)).toBe(false); - expect(cache.check("a", 100)).toBe(false); - expect(cache.peek("a", 200)).toBe(true); - expect(cache.peek("a", 1201)).toBe(false); - }); - }); }); diff --git a/src/infra/install-from-npm-spec.test.ts b/src/infra/install-from-npm-spec.test.ts new file mode 100644 index 00000000000..f2e5132f96f --- /dev/null +++ b/src/infra/install-from-npm-spec.test.ts @@ -0,0 +1,77 @@ +import { describe, expect, it, vi } from "vitest"; + +const validateRegistryNpmSpecMock = vi.hoisted(() => vi.fn()); +const installFromNpmSpecArchiveWithInstallerMock = vi.hoisted(() => vi.fn()); +const finalizeNpmSpecArchiveInstallMock = vi.hoisted(() => vi.fn()); + +vi.mock("./npm-registry-spec.js", () => ({ + validateRegistryNpmSpec: (...args: unknown[]) => validateRegistryNpmSpecMock(...args), +})); + +vi.mock("./npm-pack-install.js", () => ({ + installFromNpmSpecArchiveWithInstaller: (...args: unknown[]) => + installFromNpmSpecArchiveWithInstallerMock(...args), + finalizeNpmSpecArchiveInstall: (...args: unknown[]) => finalizeNpmSpecArchiveInstallMock(...args), +})); + +import { installFromValidatedNpmSpecArchive } from "./install-from-npm-spec.js"; + +describe("installFromValidatedNpmSpecArchive", () => { + it("trims the spec and returns validation errors before running the installer", async () => { + validateRegistryNpmSpecMock.mockReturnValueOnce("unsupported npm spec"); + + await expect( + installFromValidatedNpmSpecArchive({ + spec: " nope ", + timeoutMs: 30_000, + tempDirPrefix: "openclaw-npm-", + installFromArchive: vi.fn(), + archiveInstallParams: {}, + }), + ).resolves.toEqual({ ok: false, error: "unsupported npm spec" }); + + expect(validateRegistryNpmSpecMock).toHaveBeenCalledWith("nope"); + expect(installFromNpmSpecArchiveWithInstallerMock).not.toHaveBeenCalled(); + expect(finalizeNpmSpecArchiveInstallMock).not.toHaveBeenCalled(); + }); + + it("passes the trimmed spec through the archive installer and finalizer", async () => { + const installFromArchive = vi.fn(); + const warn = vi.fn(); + const onIntegrityDrift = vi.fn(); + const flowResult = { + ok: true, + installResult: { ok: true }, + npmResolution: { version: "1.2.3" }, + }; + const finalized = { ok: true, archivePath: "/tmp/pkg.tgz" }; + validateRegistryNpmSpecMock.mockReturnValueOnce(null); + installFromNpmSpecArchiveWithInstallerMock.mockResolvedValueOnce(flowResult); + finalizeNpmSpecArchiveInstallMock.mockReturnValueOnce(finalized); + + await expect( + installFromValidatedNpmSpecArchive({ + spec: " @openclaw/demo@beta ", + timeoutMs: 45_000, + tempDirPrefix: "openclaw-npm-", + expectedIntegrity: "sha512-demo", + onIntegrityDrift, + warn, + installFromArchive, + archiveInstallParams: { destination: "/tmp/demo" }, + }), + ).resolves.toBe(finalized); + + expect(installFromNpmSpecArchiveWithInstallerMock).toHaveBeenCalledWith({ + tempDirPrefix: "openclaw-npm-", + spec: "@openclaw/demo@beta", + timeoutMs: 45_000, + expectedIntegrity: "sha512-demo", + onIntegrityDrift, + warn, + installFromArchive, + archiveInstallParams: { destination: "/tmp/demo" }, + }); + expect(finalizeNpmSpecArchiveInstallMock).toHaveBeenCalledWith(flowResult); + }); +}); diff --git a/src/infra/install-mode-options.test.ts b/src/infra/install-mode-options.test.ts index 3e3c7297471..6fd450ee370 100644 --- a/src/infra/install-mode-options.test.ts +++ b/src/infra/install-mode-options.test.ts @@ -4,6 +4,8 @@ import { resolveTimedInstallModeOptions, } from "./install-mode-options.js"; +type LoggerKey = "default" | "explicit"; + describe("install mode option helpers", () => { it.each([ { @@ -21,11 +23,15 @@ describe("install mode option helpers", () => { params: { mode: "update" as const, dryRun: false }, expected: { loggerKey: "default", mode: "update", dryRun: false }, }, - ])("$name", ({ params, expected }) => { + ] satisfies Array<{ + name: string; + params: { loggerKey?: LoggerKey; mode?: "install" | "update"; dryRun?: boolean }; + expected: { loggerKey: LoggerKey; mode: "install" | "update"; dryRun: boolean }; + }>)("$name", ({ params, expected }) => { const loggers = { default: { warn: (_message: string) => {} }, explicit: { warn: (_message: string) => {} }, - }; + } satisfies Record void }>; expect( resolveInstallModeOptions( diff --git a/src/infra/install-target.test.ts b/src/infra/install-target.test.ts new file mode 100644 index 00000000000..211d5c1a99d --- /dev/null +++ b/src/infra/install-target.test.ts @@ -0,0 +1,129 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; + +const fileExistsMock = vi.hoisted(() => vi.fn()); +const resolveSafeInstallDirMock = vi.hoisted(() => vi.fn()); +const assertCanonicalPathWithinBaseMock = vi.hoisted(() => vi.fn()); + +vi.mock("./archive.js", () => ({ + fileExists: (...args: unknown[]) => fileExistsMock(...args), +})); + +vi.mock("./install-safe-path.js", () => ({ + resolveSafeInstallDir: (...args: unknown[]) => resolveSafeInstallDirMock(...args), + assertCanonicalPathWithinBase: (...args: unknown[]) => assertCanonicalPathWithinBaseMock(...args), +})); + +import { ensureInstallTargetAvailable, resolveCanonicalInstallTarget } from "./install-target.js"; + +beforeEach(() => { + fileExistsMock.mockReset(); + resolveSafeInstallDirMock.mockReset(); + assertCanonicalPathWithinBaseMock.mockReset(); +}); + +describe("resolveCanonicalInstallTarget", () => { + it("creates the base dir and returns early for invalid install ids", async () => { + await withTempDir({ prefix: "openclaw-install-target-" }, async (root) => { + const baseDir = path.join(root, "plugins"); + resolveSafeInstallDirMock.mockReturnValueOnce({ + ok: false, + error: "bad id", + }); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "../oops", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: false, error: "bad id" }); + + await expect(fs.stat(baseDir)).resolves.toMatchObject({ isDirectory: expect.any(Function) }); + expect(assertCanonicalPathWithinBaseMock).not.toHaveBeenCalled(); + }); + }); + + it("returns canonical boundary errors for Error and non-Error throws", async () => { + await withTempDir({ prefix: "openclaw-install-target-" }, async (baseDir) => { + const targetDir = path.join(baseDir, "demo"); + resolveSafeInstallDirMock.mockReturnValue({ + ok: true, + path: targetDir, + }); + assertCanonicalPathWithinBaseMock.mockRejectedValueOnce(new Error("escaped")); + assertCanonicalPathWithinBaseMock.mockRejectedValueOnce("boom"); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "demo", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: false, error: "escaped" }); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "demo", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: false, error: "boom" }); + }); + }); + + it("returns the resolved target path on success", async () => { + await withTempDir({ prefix: "openclaw-install-target-" }, async (baseDir) => { + const targetDir = path.join(baseDir, "demo"); + resolveSafeInstallDirMock.mockReturnValueOnce({ + ok: true, + path: targetDir, + }); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "demo", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: true, targetDir }); + }); + }); +}); + +describe("ensureInstallTargetAvailable", () => { + it("blocks only install mode when the target already exists", async () => { + fileExistsMock.mockResolvedValueOnce(true); + fileExistsMock.mockResolvedValueOnce(false); + + await expect( + ensureInstallTargetAvailable({ + mode: "install", + targetDir: "/tmp/demo", + alreadyExistsError: "already there", + }), + ).resolves.toEqual({ ok: false, error: "already there" }); + + await expect( + ensureInstallTargetAvailable({ + mode: "update", + targetDir: "/tmp/demo", + alreadyExistsError: "already there", + }), + ).resolves.toEqual({ ok: true }); + + await expect( + ensureInstallTargetAvailable({ + mode: "install", + targetDir: "/tmp/demo", + alreadyExistsError: "already there", + }), + ).resolves.toEqual({ ok: true }); + }); +}); diff --git a/src/infra/is-main.test.ts b/src/infra/is-main.test.ts new file mode 100644 index 00000000000..b2f6197ad24 --- /dev/null +++ b/src/infra/is-main.test.ts @@ -0,0 +1,80 @@ +import { describe, expect, it } from "vitest"; +import { isMainModule } from "./is-main.js"; + +describe("isMainModule", () => { + it("returns true when argv[1] matches current file", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "/repo/dist/index.js"], + cwd: "/repo", + env: {}, + }), + ).toBe(true); + }); + + it("returns true under PM2 when pm_exec_path matches current file", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "/pm2/lib/ProcessContainerFork.js"], + cwd: "/repo", + env: { pm_exec_path: "/repo/dist/index.js", pm_id: "0" }, + }), + ).toBe(true); + }); + + it("returns true for configured wrapper-to-entry pairs", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/entry.js", + argv: ["node", "/repo/openclaw.mjs"], + cwd: "/repo", + env: {}, + wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], + }), + ).toBe(true); + }); + + it("returns false for unmatched wrapper launches", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/entry.js", + argv: ["node", "/repo/openclaw.mjs"], + cwd: "/repo", + env: {}, + }), + ).toBe(false); + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "/repo/openclaw.mjs"], + cwd: "/repo", + env: {}, + wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], + }), + ).toBe(false); + }); + + it("returns false when this module is only imported under PM2", () => { + expect( + isMainModule({ + currentFile: "/repo/node_modules/openclaw/dist/index.js", + argv: ["node", "/repo/app.js"], + cwd: "/repo", + env: { pm_exec_path: "/repo/app.js", pm_id: "0" }, + }), + ).toBe(false); + }); + + it("falls back to basename matching for relative or symlinked entrypoints", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "../other/index.js"], + cwd: "/repo/dist", + env: {}, + }), + ).toBe(true); + }); +}); diff --git a/src/infra/json-file.test.ts b/src/infra/json-file.test.ts new file mode 100644 index 00000000000..95def5fa54a --- /dev/null +++ b/src/infra/json-file.test.ts @@ -0,0 +1,33 @@ +import fs from "node:fs"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { loadJsonFile, saveJsonFile } from "./json-file.js"; + +describe("json-file helpers", () => { + it("returns undefined for missing and invalid JSON files", async () => { + await withTempDir({ prefix: "openclaw-json-file-" }, async (root) => { + const pathname = path.join(root, "config.json"); + expect(loadJsonFile(pathname)).toBeUndefined(); + + fs.writeFileSync(pathname, "{", "utf8"); + expect(loadJsonFile(pathname)).toBeUndefined(); + }); + }); + + it("creates parent dirs, writes a trailing newline, and loads the saved object", async () => { + await withTempDir({ prefix: "openclaw-json-file-" }, async (root) => { + const pathname = path.join(root, "nested", "config.json"); + saveJsonFile(pathname, { enabled: true, count: 2 }); + + const raw = fs.readFileSync(pathname, "utf8"); + expect(raw.endsWith("\n")).toBe(true); + expect(loadJsonFile(pathname)).toEqual({ enabled: true, count: 2 }); + + const fileMode = fs.statSync(pathname).mode & 0o777; + const dirMode = fs.statSync(path.dirname(pathname)).mode & 0o777; + expect(fileMode).toBe(0o600); + expect(dirMode).toBe(0o700); + }); + }); +}); diff --git a/src/infra/json-files.test.ts b/src/infra/json-files.test.ts new file mode 100644 index 00000000000..d2d0fa600f5 --- /dev/null +++ b/src/infra/json-files.test.ts @@ -0,0 +1,68 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { createAsyncLock, readJsonFile, writeJsonAtomic, writeTextAtomic } from "./json-files.js"; + +describe("json file helpers", () => { + it("reads valid json and returns null for missing or invalid files", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-json-files-")); + const validPath = path.join(base, "valid.json"); + const invalidPath = path.join(base, "invalid.json"); + + await fs.writeFile(validPath, '{"ok":true}', "utf8"); + await fs.writeFile(invalidPath, "{not-json}", "utf8"); + + await expect(readJsonFile<{ ok: boolean }>(validPath)).resolves.toEqual({ ok: true }); + await expect(readJsonFile(invalidPath)).resolves.toBeNull(); + await expect(readJsonFile(path.join(base, "missing.json"))).resolves.toBeNull(); + }); + + it("writes json atomically with pretty formatting and optional trailing newline", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-json-files-")); + const filePath = path.join(base, "nested", "config.json"); + + await writeJsonAtomic( + filePath, + { ok: true, nested: { value: 1 } }, + { trailingNewline: true, ensureDirMode: 0o755 }, + ); + + await expect(fs.readFile(filePath, "utf8")).resolves.toBe( + '{\n "ok": true,\n "nested": {\n "value": 1\n }\n}\n', + ); + }); + + it("writes text atomically and avoids duplicate trailing newlines", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-json-files-")); + const filePath = path.join(base, "nested", "note.txt"); + + await writeTextAtomic(filePath, "hello", { appendTrailingNewline: true }); + await expect(fs.readFile(filePath, "utf8")).resolves.toBe("hello\n"); + + await writeTextAtomic(filePath, "hello\n", { appendTrailingNewline: true }); + await expect(fs.readFile(filePath, "utf8")).resolves.toBe("hello\n"); + }); + + it("serializes async lock callers even across rejections", async () => { + const withLock = createAsyncLock(); + const events: string[] = []; + + const first = withLock(async () => { + events.push("first:start"); + await new Promise((resolve) => setTimeout(resolve, 20)); + events.push("first:end"); + throw new Error("boom"); + }); + + const second = withLock(async () => { + events.push("second:start"); + events.push("second:end"); + return "ok"; + }); + + await expect(first).rejects.toThrow("boom"); + await expect(second).resolves.toBe("ok"); + expect(events).toEqual(["first:start", "first:end", "second:start", "second:end"]); + }); +}); diff --git a/src/infra/json-utf8-bytes.test.ts b/src/infra/json-utf8-bytes.test.ts index e2f8e217be0..5009301ffd6 100644 --- a/src/infra/json-utf8-bytes.test.ts +++ b/src/infra/json-utf8-bytes.test.ts @@ -18,6 +18,11 @@ describe("jsonUtf8Bytes", () => { value: undefined, expected: Buffer.byteLength("undefined", "utf8"), }, + { + name: "unicode strings", + value: "🙂", + expected: Buffer.byteLength(JSON.stringify("🙂"), "utf8"), + }, ])("returns utf8 byte length for $name", ({ value, expected }) => { expect(jsonUtf8Bytes(value)).toBe(expected); }); @@ -31,4 +36,8 @@ describe("jsonUtf8Bytes", () => { it("uses string conversion for BigInt serialization failures", () => { expect(jsonUtf8Bytes(12n)).toBe(Buffer.byteLength("12", "utf8")); }); + + it("uses string conversion for symbol serialization failures", () => { + expect(jsonUtf8Bytes(Symbol("token"))).toBe(Buffer.byteLength("Symbol(token)", "utf8")); + }); }); diff --git a/src/infra/jsonl-socket.test.ts b/src/infra/jsonl-socket.test.ts new file mode 100644 index 00000000000..af8bf0fdaed --- /dev/null +++ b/src/infra/jsonl-socket.test.ts @@ -0,0 +1,69 @@ +import net from "node:net"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { requestJsonlSocket } from "./jsonl-socket.js"; + +describe.runIf(process.platform !== "win32")("requestJsonlSocket", () => { + it("ignores malformed and non-accepted lines until one is accepted", async () => { + await withTempDir({ prefix: "openclaw-jsonl-socket-" }, async (dir) => { + const socketPath = path.join(dir, "socket.sock"); + const server = net.createServer((socket) => { + socket.on("data", () => { + socket.write("{bad json}\n"); + socket.write('{"type":"ignore"}\n'); + socket.write('{"type":"done","value":42}\n'); + }); + }); + await new Promise((resolve) => server.listen(socketPath, resolve)); + + try { + await expect( + requestJsonlSocket({ + socketPath, + payload: '{"hello":"world"}', + timeoutMs: 500, + accept: (msg) => { + const value = msg as { type?: string; value?: number }; + return value.type === "done" ? (value.value ?? null) : undefined; + }, + }), + ).resolves.toBe(42); + } finally { + server.close(); + } + }); + }); + + it("returns null on timeout and on socket errors", async () => { + await withTempDir({ prefix: "openclaw-jsonl-socket-" }, async (dir) => { + const socketPath = path.join(dir, "socket.sock"); + const server = net.createServer(() => { + // Intentionally never reply. + }); + await new Promise((resolve) => server.listen(socketPath, resolve)); + + try { + await expect( + requestJsonlSocket({ + socketPath, + payload: "{}", + timeoutMs: 50, + accept: () => undefined, + }), + ).resolves.toBeNull(); + } finally { + server.close(); + } + + await expect( + requestJsonlSocket({ + socketPath, + payload: "{}", + timeoutMs: 50, + accept: () => undefined, + }), + ).resolves.toBeNull(); + }); + }); +}); diff --git a/src/infra/machine-name.test.ts b/src/infra/machine-name.test.ts new file mode 100644 index 00000000000..f36efd6ceee --- /dev/null +++ b/src/infra/machine-name.test.ts @@ -0,0 +1,54 @@ +import os from "node:os"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { importFreshModule } from "../../test/helpers/import-fresh.js"; + +const execFileMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + execFile: (...args: unknown[]) => execFileMock(...args), +})); + +const originalVitest = process.env.VITEST; +const originalNodeEnv = process.env.NODE_ENV; + +async function importMachineName(scope: string) { + return await importFreshModule( + import.meta.url, + `./machine-name.js?scope=${scope}`, + ); +} + +afterEach(() => { + execFileMock.mockReset(); + vi.restoreAllMocks(); + if (originalVitest === undefined) { + delete process.env.VITEST; + } else { + process.env.VITEST = originalVitest; + } + if (originalNodeEnv === undefined) { + delete process.env.NODE_ENV; + } else { + process.env.NODE_ENV = originalNodeEnv; + } +}); + +describe("getMachineDisplayName", () => { + it("uses the hostname fallback in test mode and trims .local", async () => { + const hostnameSpy = vi.spyOn(os, "hostname").mockReturnValue(" clawbox.local "); + const machineName = await importMachineName("test-fallback"); + + await expect(machineName.getMachineDisplayName()).resolves.toBe("clawbox.local"); + await expect(machineName.getMachineDisplayName()).resolves.toBe("clawbox.local"); + expect(hostnameSpy).toHaveBeenCalledTimes(1); + expect(execFileMock).not.toHaveBeenCalled(); + }); + + it("falls back to the default product name when hostname is blank", async () => { + vi.spyOn(os, "hostname").mockReturnValue(" "); + const machineName = await importMachineName("blank-hostname"); + + await expect(machineName.getMachineDisplayName()).resolves.toBe("openclaw"); + expect(execFileMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/map-size.test.ts b/src/infra/map-size.test.ts new file mode 100644 index 00000000000..82fbe8a52ac --- /dev/null +++ b/src/infra/map-size.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from "vitest"; +import { pruneMapToMaxSize } from "./map-size.js"; + +describe("pruneMapToMaxSize", () => { + it("keeps the newest entries after flooring fractional limits", () => { + const map = new Map([ + ["a", 1], + ["b", 2], + ["c", 3], + ]); + + pruneMapToMaxSize(map, 2.9); + + expect([...map.entries()]).toEqual([ + ["b", 2], + ["c", 3], + ]); + }); + + it("clears maps for zero or negative limits and leaves undersized maps untouched", () => { + const cleared = new Map([ + ["a", 1], + ["b", 2], + ]); + pruneMapToMaxSize(cleared, 0); + expect([...cleared.entries()]).toEqual([]); + + const alsoCleared = new Map([ + ["a", 1], + ["b", 2], + ]); + pruneMapToMaxSize(alsoCleared, -4); + expect([...alsoCleared.entries()]).toEqual([]); + + const unchanged = new Map([["a", 1]]); + pruneMapToMaxSize(unchanged, 5); + expect([...unchanged.entries()]).toEqual([["a", 1]]); + }); +}); diff --git a/src/infra/net/hostname.test.ts b/src/infra/net/hostname.test.ts new file mode 100644 index 00000000000..90e4c939e91 --- /dev/null +++ b/src/infra/net/hostname.test.ts @@ -0,0 +1,17 @@ +import { describe, expect, it } from "vitest"; +import { normalizeHostname } from "./hostname.js"; + +describe("normalizeHostname", () => { + it("trims, lowercases, and strips a trailing dot", () => { + expect(normalizeHostname(" Example.COM. ")).toBe("example.com"); + }); + + it("unwraps bracketed ipv6 hosts after normalization", () => { + expect(normalizeHostname(" [FD7A:115C:A1E0::1] ")).toBe("fd7a:115c:a1e0::1"); + }); + + it("leaves non-fully-bracketed values otherwise unchanged", () => { + expect(normalizeHostname("[fd7a:115c:a1e0::1")).toBe("[fd7a:115c:a1e0::1"); + expect(normalizeHostname("fd7a:115c:a1e0::1]")).toBe("fd7a:115c:a1e0::1]"); + }); +}); diff --git a/src/infra/net/proxy-env.test.ts b/src/infra/net/proxy-env.test.ts index 37b910f1769..3f3031f028a 100644 --- a/src/infra/net/proxy-env.test.ts +++ b/src/infra/net/proxy-env.test.ts @@ -1,5 +1,31 @@ import { describe, expect, it } from "vitest"; -import { hasEnvHttpProxyConfigured, resolveEnvHttpProxyUrl } from "./proxy-env.js"; +import { + hasEnvHttpProxyConfigured, + hasProxyEnvConfigured, + resolveEnvHttpProxyUrl, +} from "./proxy-env.js"; + +describe("hasProxyEnvConfigured", () => { + it.each([ + { + name: "detects upper-case HTTP proxy values", + env: { HTTP_PROXY: "http://upper-http.test:8080" } as NodeJS.ProcessEnv, + expected: true, + }, + { + name: "detects lower-case all_proxy values", + env: { all_proxy: "socks5://proxy.test:1080" } as NodeJS.ProcessEnv, + expected: true, + }, + { + name: "ignores blank proxy values", + env: { HTTP_PROXY: " ", all_proxy: "" } as NodeJS.ProcessEnv, + expected: false, + }, + ])("$name", ({ env, expected }) => { + expect(hasProxyEnvConfigured(env)).toBe(expected); + }); +}); describe("resolveEnvHttpProxyUrl", () => { it("uses lower-case https_proxy before upper-case HTTPS_PROXY", () => { @@ -39,4 +65,24 @@ describe("resolveEnvHttpProxyUrl", () => { expect(resolveEnvHttpProxyUrl("https", env)).toBe("http://upper-http.test:8080"); expect(hasEnvHttpProxyConfigured("https", env)).toBe(true); }); + + it("does not use ALL_PROXY for EnvHttpProxyAgent-style resolution", () => { + const env = { + ALL_PROXY: "http://all-proxy.test:8080", + all_proxy: "http://lower-all-proxy.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("https", env)).toBeUndefined(); + expect(resolveEnvHttpProxyUrl("http", env)).toBeUndefined(); + expect(hasEnvHttpProxyConfigured("https", env)).toBe(false); + }); + + it("returns only HTTP proxies for http requests", () => { + const env = { + https_proxy: "http://lower-https.test:8080", + http_proxy: "http://lower-http.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("http", env)).toBe("http://lower-http.test:8080"); + }); }); diff --git a/src/infra/node-shell.test.ts b/src/infra/node-shell.test.ts new file mode 100644 index 00000000000..8a0dc72bde1 --- /dev/null +++ b/src/infra/node-shell.test.ts @@ -0,0 +1,35 @@ +import { describe, expect, it } from "vitest"; +import { buildNodeShellCommand } from "./node-shell.js"; + +describe("buildNodeShellCommand", () => { + it("uses cmd.exe for win-prefixed platform labels", () => { + expect(buildNodeShellCommand("echo hi", "win32")).toEqual([ + "cmd.exe", + "/d", + "/s", + "/c", + "echo hi", + ]); + expect(buildNodeShellCommand("echo hi", "windows")).toEqual([ + "cmd.exe", + "/d", + "/s", + "/c", + "echo hi", + ]); + expect(buildNodeShellCommand("echo hi", " Windows 11 ")).toEqual([ + "cmd.exe", + "/d", + "/s", + "/c", + "echo hi", + ]); + }); + + it("uses /bin/sh for non-windows and missing platform values", () => { + expect(buildNodeShellCommand("echo hi", "darwin")).toEqual(["/bin/sh", "-lc", "echo hi"]); + expect(buildNodeShellCommand("echo hi", "linux")).toEqual(["/bin/sh", "-lc", "echo hi"]); + expect(buildNodeShellCommand("echo hi")).toEqual(["/bin/sh", "-lc", "echo hi"]); + expect(buildNodeShellCommand("echo hi", null)).toEqual(["/bin/sh", "-lc", "echo hi"]); + }); +}); diff --git a/src/infra/npm-integrity.test.ts b/src/infra/npm-integrity.test.ts index e7e40b46413..aa96da76fab 100644 --- a/src/infra/npm-integrity.test.ts +++ b/src/infra/npm-integrity.test.ts @@ -6,23 +6,34 @@ import { describe("resolveNpmIntegrityDrift", () => { it("returns proceed=true when integrity is missing or unchanged", async () => { - await expect( - resolveNpmIntegrityDrift({ - spec: "@openclaw/test@1.0.0", - expectedIntegrity: "sha512-same", + const createPayload = vi.fn(() => "unused"); + const cases = [ + { + expectedIntegrity: undefined, resolution: { integrity: "sha512-same", resolvedAt: "2026-01-01T00:00:00.000Z" }, - createPayload: () => "unused", - }), - ).resolves.toEqual({ proceed: true }); - - await expect( - resolveNpmIntegrityDrift({ - spec: "@openclaw/test@1.0.0", + }, + { expectedIntegrity: "sha512-same", resolution: { resolvedAt: "2026-01-01T00:00:00.000Z" }, - createPayload: () => "unused", - }), - ).resolves.toEqual({ proceed: true }); + }, + { + expectedIntegrity: "sha512-same", + resolution: { integrity: "sha512-same", resolvedAt: "2026-01-01T00:00:00.000Z" }, + }, + ]; + + for (const testCase of cases) { + await expect( + resolveNpmIntegrityDrift({ + spec: "@openclaw/test@1.0.0", + expectedIntegrity: testCase.expectedIntegrity, + resolution: testCase.resolution, + createPayload, + }), + ).resolves.toEqual({ proceed: true }); + } + + expect(createPayload).not.toHaveBeenCalled(); }); it("uses callback on integrity drift", async () => { @@ -52,6 +63,31 @@ describe("resolveNpmIntegrityDrift", () => { }); }); + it("returns payload when the drift callback allows continuing", async () => { + const result = await resolveNpmIntegrityDrift({ + spec: "@openclaw/test@1.0.0", + expectedIntegrity: "sha512-old", + resolution: { + integrity: "sha512-new", + resolvedAt: "2026-01-01T00:00:00.000Z", + }, + createPayload: ({ spec, actualIntegrity }) => ({ spec, actualIntegrity }), + onIntegrityDrift: async () => true, + }); + + expect(result).toEqual({ + integrityDrift: { + expectedIntegrity: "sha512-old", + actualIntegrity: "sha512-new", + }, + payload: { + spec: "@openclaw/test@1.0.0", + actualIntegrity: "sha512-new", + }, + proceed: true, + }); + }); + it("warns by default when no callback is provided", async () => { const warn = vi.fn(); const result = await resolveNpmIntegrityDrift({ @@ -100,4 +136,22 @@ describe("resolveNpmIntegrityDrift", () => { "aborted: npm package integrity drift detected for @openclaw/test@1.0.0", ); }); + + it("falls back to the original spec when resolvedSpec is missing", async () => { + const warn = vi.fn(); + + await resolveNpmIntegrityDriftWithDefaultMessage({ + spec: "@openclaw/test@1.0.0", + expectedIntegrity: "sha512-old", + resolution: { + integrity: "sha512-new", + resolvedAt: "2026-01-01T00:00:00.000Z", + }, + warn, + }); + + expect(warn).toHaveBeenCalledWith( + "Integrity drift detected for @openclaw/test@1.0.0: expected sha512-old, got sha512-new", + ); + }); }); diff --git a/src/infra/npm-pack-install.test.ts b/src/infra/npm-pack-install.test.ts index c0428ec03c5..94d732deef6 100644 --- a/src/infra/npm-pack-install.test.ts +++ b/src/infra/npm-pack-install.test.ts @@ -91,6 +91,24 @@ describe("installFromNpmSpecArchive", () => { expect(withTempDir).toHaveBeenCalledWith("openclaw-test-", expect.any(Function)); }); + it("rejects unsupported npm specs before packing", async () => { + const installFromArchive = vi.fn(async () => ({ ok: true as const })); + + const result = await installFromNpmSpecArchive({ + tempDirPrefix: "openclaw-test-", + spec: "file:/tmp/openclaw.tgz", + timeoutMs: 1000, + installFromArchive, + }); + + expect(result).toEqual({ + ok: false, + error: "unsupported npm spec", + }); + expect(packNpmSpecToArchive).not.toHaveBeenCalled(); + expect(installFromArchive).not.toHaveBeenCalled(); + }); + it("returns resolution metadata and installer result on success", async () => { mockPackedSuccess({ name: "@openclaw/test", version: "1.0.0" }); const installFromArchive = vi.fn(async () => ({ ok: true as const, target: "done" })); @@ -176,6 +194,56 @@ describe("installFromNpmSpecArchive", () => { const okResult = expectWrappedOkResult(result, { ok: false, error: "install failed" }); expect(okResult.integrityDrift).toBeUndefined(); }); + + it("rejects prerelease resolutions unless explicitly requested", async () => { + vi.mocked(packNpmSpecToArchive).mockResolvedValue({ + ok: true, + archivePath: baseArchivePath, + metadata: { + resolvedSpec: "@openclaw/test@latest", + integrity: "sha512-same", + version: "1.1.0-beta.1", + }, + }); + const installFromArchive = vi.fn(async () => ({ ok: true as const })); + + const result = await installFromNpmSpecArchive({ + tempDirPrefix: "openclaw-test-", + spec: "@openclaw/test@latest", + timeoutMs: 1000, + installFromArchive, + }); + + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("expected prerelease rejection"); + } + expect(result.error).toContain("prerelease version 1.1.0-beta.1"); + expect(installFromArchive).not.toHaveBeenCalled(); + }); + + it("allows prerelease resolutions when explicitly requested by tag", async () => { + vi.mocked(packNpmSpecToArchive).mockResolvedValue({ + ok: true, + archivePath: baseArchivePath, + metadata: { + resolvedSpec: "@openclaw/test@beta", + integrity: "sha512-same", + version: "1.1.0-beta.1", + }, + }); + const installFromArchive = vi.fn(async () => ({ ok: true as const, pluginId: "beta-plugin" })); + + const result = await installFromNpmSpecArchive({ + tempDirPrefix: "openclaw-test-", + spec: "@openclaw/test@beta", + timeoutMs: 1000, + installFromArchive, + }); + + const okResult = expectWrappedOkResult(result, { ok: true, pluginId: "beta-plugin" }); + expect(okResult.npmResolution.version).toBe("1.1.0-beta.1"); + }); }); describe("installFromNpmSpecArchiveWithInstaller", () => { diff --git a/src/infra/openclaw-exec-env.test.ts b/src/infra/openclaw-exec-env.test.ts new file mode 100644 index 00000000000..488fa1dd5ef --- /dev/null +++ b/src/infra/openclaw-exec-env.test.ts @@ -0,0 +1,30 @@ +import { describe, expect, it } from "vitest"; +import { + ensureOpenClawExecMarkerOnProcess, + markOpenClawExecEnv, + OPENCLAW_CLI_ENV_VALUE, + OPENCLAW_CLI_ENV_VAR, +} from "./openclaw-exec-env.js"; + +describe("markOpenClawExecEnv", () => { + it("returns a cloned env object with the exec marker set", () => { + const env = { PATH: "/usr/bin", OPENCLAW_CLI: "0" }; + const marked = markOpenClawExecEnv(env); + + expect(marked).toEqual({ + PATH: "/usr/bin", + OPENCLAW_CLI: OPENCLAW_CLI_ENV_VALUE, + }); + expect(marked).not.toBe(env); + expect(env.OPENCLAW_CLI).toBe("0"); + }); +}); + +describe("ensureOpenClawExecMarkerOnProcess", () => { + it("mutates and returns the provided process env", () => { + const env: NodeJS.ProcessEnv = { PATH: "/usr/bin" }; + + expect(ensureOpenClawExecMarkerOnProcess(env)).toBe(env); + expect(env[OPENCLAW_CLI_ENV_VAR]).toBe(OPENCLAW_CLI_ENV_VALUE); + }); +}); diff --git a/src/infra/openclaw-root.test.ts b/src/infra/openclaw-root.test.ts index 85d24512468..e12b2d77f64 100644 --- a/src/infra/openclaw-root.test.ts +++ b/src/infra/openclaw-root.test.ts @@ -141,6 +141,19 @@ describe("resolveOpenClawPackageRoot", () => { expect(resolveOpenClawPackageRootSync({ moduleUrl })).toBe(pkgRoot); }); + it("falls through from a non-openclaw moduleUrl candidate to cwd", async () => { + const wrongPkgRoot = fx("moduleurl-fallthrough", "wrong"); + const cwdPkgRoot = fx("moduleurl-fallthrough", "cwd"); + setFile(path.join(wrongPkgRoot, "package.json"), JSON.stringify({ name: "not-openclaw" })); + setFile(path.join(cwdPkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); + const moduleUrl = pathToFileURL(path.join(wrongPkgRoot, "dist", "index.js")).toString(); + + expect(resolveOpenClawPackageRootSync({ moduleUrl, cwd: cwdPkgRoot })).toBe(cwdPkgRoot); + await expect(resolveOpenClawPackageRoot({ moduleUrl, cwd: cwdPkgRoot })).resolves.toBe( + cwdPkgRoot, + ); + }); + it("ignores invalid moduleUrl values and falls back to cwd", async () => { const pkgRoot = fx("invalid-moduleurl"); setFile(path.join(pkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); @@ -160,6 +173,16 @@ describe("resolveOpenClawPackageRoot", () => { expect(resolveOpenClawPackageRootSync({ cwd: pkgRoot })).toBeNull(); }); + it("falls back from a symlinked argv1 to the node_modules package root", () => { + const project = fx("symlink-node-modules-fallback"); + const argv1 = path.join(project, "node_modules", ".bin", "openclaw"); + state.realpaths.set(abs(argv1), abs(path.join(project, "versions", "current", "openclaw.mjs"))); + const pkgRoot = path.join(project, "node_modules", "openclaw"); + setFile(path.join(pkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); + + expect(resolveOpenClawPackageRootSync({ argv1 })).toBe(pkgRoot); + }); + it("async resolver matches sync behavior", async () => { const pkgRoot = fx("async"); setFile(path.join(pkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); diff --git a/src/infra/os-summary.test.ts b/src/infra/os-summary.test.ts new file mode 100644 index 00000000000..533321f8dba --- /dev/null +++ b/src/infra/os-summary.test.ts @@ -0,0 +1,64 @@ +import os from "node:os"; +import { afterEach, describe, expect, it, vi } from "vitest"; + +const spawnSyncMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawnSync: (...args: unknown[]) => spawnSyncMock(...args), +})); + +import { resolveOsSummary } from "./os-summary.js"; + +describe("resolveOsSummary", () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("formats darwin labels from sw_vers output", () => { + vi.spyOn(os, "platform").mockReturnValue("darwin"); + vi.spyOn(os, "release").mockReturnValue("24.0.0"); + vi.spyOn(os, "arch").mockReturnValue("arm64"); + spawnSyncMock.mockReturnValue({ + stdout: " 15.4 \n", + stderr: "", + pid: 1, + output: [], + status: 0, + signal: null, + }); + + expect(resolveOsSummary()).toEqual({ + platform: "darwin", + arch: "arm64", + release: "24.0.0", + label: "macos 15.4 (arm64)", + }); + }); + + it("falls back to os.release when sw_vers output is blank", () => { + vi.spyOn(os, "platform").mockReturnValue("darwin"); + vi.spyOn(os, "release").mockReturnValue("24.1.0"); + vi.spyOn(os, "arch").mockReturnValue("x64"); + spawnSyncMock.mockReturnValue({ + stdout: " ", + stderr: "", + pid: 1, + output: [], + status: 0, + signal: null, + }); + + expect(resolveOsSummary().label).toBe("macos 24.1.0 (x64)"); + }); + + it("formats windows and non-darwin labels from os metadata", () => { + vi.spyOn(os, "release").mockReturnValue("10.0.26100"); + vi.spyOn(os, "arch").mockReturnValue("x64"); + + vi.spyOn(os, "platform").mockReturnValue("win32"); + expect(resolveOsSummary().label).toBe("windows 10.0.26100 (x64)"); + + vi.spyOn(os, "platform").mockReturnValue("linux"); + expect(resolveOsSummary().label).toBe("linux 10.0.26100 (x64)"); + }); +}); diff --git a/src/infra/outbound/abort.test.ts b/src/infra/outbound/abort.test.ts new file mode 100644 index 00000000000..794615b2a28 --- /dev/null +++ b/src/infra/outbound/abort.test.ts @@ -0,0 +1,21 @@ +import { describe, expect, it } from "vitest"; +import { throwIfAborted } from "./abort.js"; + +describe("throwIfAborted", () => { + it("does nothing when the signal is missing or not aborted", () => { + expect(() => throwIfAborted()).not.toThrow(); + expect(() => throwIfAborted(new AbortController().signal)).not.toThrow(); + }); + + it("throws a standard AbortError when the signal is aborted", () => { + const controller = new AbortController(); + controller.abort(); + + expect(() => throwIfAborted(controller.signal)).toThrowError( + expect.objectContaining({ + name: "AbortError", + message: "Operation aborted", + }), + ); + }); +}); diff --git a/src/infra/outbound/channel-adapters.test.ts b/src/infra/outbound/channel-adapters.test.ts new file mode 100644 index 00000000000..ee2b5fe6dc8 --- /dev/null +++ b/src/infra/outbound/channel-adapters.test.ts @@ -0,0 +1,48 @@ +import { Separator, TextDisplay } from "@buape/carbon"; +import { describe, expect, it } from "vitest"; +import { DiscordUiContainer } from "../../discord/ui.js"; +import { getChannelMessageAdapter } from "./channel-adapters.js"; + +describe("getChannelMessageAdapter", () => { + it("returns the default adapter for non-discord channels", () => { + expect(getChannelMessageAdapter("telegram")).toEqual({ + supportsComponentsV2: false, + }); + }); + + it("returns the discord adapter with a cross-context component builder", () => { + const adapter = getChannelMessageAdapter("discord"); + + expect(adapter.supportsComponentsV2).toBe(true); + expect(adapter.buildCrossContextComponents).toBeTypeOf("function"); + + const components = adapter.buildCrossContextComponents?.({ + originLabel: "Telegram", + message: "Hello from chat", + cfg: {} as never, + accountId: "primary", + }); + const container = components?.[0] as DiscordUiContainer | undefined; + + expect(components).toHaveLength(1); + expect(container).toBeInstanceOf(DiscordUiContainer); + expect(container?.components).toEqual([ + expect.any(TextDisplay), + expect.any(Separator), + expect.any(TextDisplay), + ]); + }); + + it("omits the message body block when the cross-context message is blank", () => { + const adapter = getChannelMessageAdapter("discord"); + const components = adapter.buildCrossContextComponents?.({ + originLabel: "Signal", + message: " ", + cfg: {} as never, + }); + const container = components?.[0] as DiscordUiContainer | undefined; + + expect(components).toHaveLength(1); + expect(container?.components).toEqual([expect.any(TextDisplay)]); + }); +}); diff --git a/src/infra/outbound/channel-resolution.test.ts b/src/infra/outbound/channel-resolution.test.ts new file mode 100644 index 00000000000..407994b152f --- /dev/null +++ b/src/infra/outbound/channel-resolution.test.ts @@ -0,0 +1,156 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const resolveDefaultAgentIdMock = vi.hoisted(() => vi.fn()); +const resolveAgentWorkspaceDirMock = vi.hoisted(() => vi.fn()); +const getChannelPluginMock = vi.hoisted(() => vi.fn()); +const applyPluginAutoEnableMock = vi.hoisted(() => vi.fn()); +const loadOpenClawPluginsMock = vi.hoisted(() => vi.fn()); +const getActivePluginRegistryMock = vi.hoisted(() => vi.fn()); +const getActivePluginRegistryKeyMock = vi.hoisted(() => vi.fn()); +const normalizeMessageChannelMock = vi.hoisted(() => vi.fn()); +const isDeliverableMessageChannelMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../agents/agent-scope.js", () => ({ + resolveDefaultAgentId: (...args: unknown[]) => resolveDefaultAgentIdMock(...args), + resolveAgentWorkspaceDir: (...args: unknown[]) => resolveAgentWorkspaceDirMock(...args), +})); + +vi.mock("../../channels/plugins/index.js", () => ({ + getChannelPlugin: (...args: unknown[]) => getChannelPluginMock(...args), +})); + +vi.mock("../../config/plugin-auto-enable.js", () => ({ + applyPluginAutoEnable: (...args: unknown[]) => applyPluginAutoEnableMock(...args), +})); + +vi.mock("../../plugins/loader.js", () => ({ + loadOpenClawPlugins: (...args: unknown[]) => loadOpenClawPluginsMock(...args), +})); + +vi.mock("../../plugins/runtime.js", () => ({ + getActivePluginRegistry: (...args: unknown[]) => getActivePluginRegistryMock(...args), + getActivePluginRegistryKey: (...args: unknown[]) => getActivePluginRegistryKeyMock(...args), +})); + +vi.mock("../../utils/message-channel.js", () => ({ + normalizeMessageChannel: (...args: unknown[]) => normalizeMessageChannelMock(...args), + isDeliverableMessageChannel: (...args: unknown[]) => isDeliverableMessageChannelMock(...args), +})); + +import { importFreshModule } from "../../../test/helpers/import-fresh.js"; + +async function importChannelResolution(scope: string) { + return await importFreshModule( + import.meta.url, + `./channel-resolution.js?scope=${scope}`, + ); +} + +describe("outbound channel resolution", () => { + beforeEach(() => { + resolveDefaultAgentIdMock.mockReset(); + resolveAgentWorkspaceDirMock.mockReset(); + getChannelPluginMock.mockReset(); + applyPluginAutoEnableMock.mockReset(); + loadOpenClawPluginsMock.mockReset(); + getActivePluginRegistryMock.mockReset(); + getActivePluginRegistryKeyMock.mockReset(); + normalizeMessageChannelMock.mockReset(); + isDeliverableMessageChannelMock.mockReset(); + + normalizeMessageChannelMock.mockImplementation((value?: string | null) => + typeof value === "string" ? value.trim().toLowerCase() : undefined, + ); + isDeliverableMessageChannelMock.mockImplementation((value?: string) => + ["telegram", "discord", "slack"].includes(String(value)), + ); + getActivePluginRegistryMock.mockReturnValue({ channels: [] }); + getActivePluginRegistryKeyMock.mockReturnValue("registry-key"); + applyPluginAutoEnableMock.mockReturnValue({ config: { autoEnabled: true } }); + resolveDefaultAgentIdMock.mockReturnValue("main"); + resolveAgentWorkspaceDirMock.mockReturnValue("/tmp/workspace"); + }); + + it("normalizes deliverable channels and rejects unknown ones", async () => { + const channelResolution = await importChannelResolution("normalize"); + + expect(channelResolution.normalizeDeliverableOutboundChannel(" Telegram ")).toBe("telegram"); + expect(channelResolution.normalizeDeliverableOutboundChannel("unknown")).toBeUndefined(); + expect(channelResolution.normalizeDeliverableOutboundChannel(null)).toBeUndefined(); + }); + + it("returns the already-registered plugin without bootstrapping", async () => { + const plugin = { id: "telegram" }; + getChannelPluginMock.mockReturnValueOnce(plugin); + const channelResolution = await importChannelResolution("existing-plugin"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: {} as never, + }), + ).toBe(plugin); + expect(loadOpenClawPluginsMock).not.toHaveBeenCalled(); + }); + + it("falls back to the active registry when getChannelPlugin misses", async () => { + const plugin = { id: "telegram" }; + getChannelPluginMock.mockReturnValue(undefined); + getActivePluginRegistryMock.mockReturnValue({ + channels: [{ plugin }], + }); + const channelResolution = await importChannelResolution("direct-registry"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: {} as never, + }), + ).toBe(plugin); + }); + + it("bootstraps plugins once per registry key and returns the newly loaded plugin", async () => { + const plugin = { id: "telegram" }; + getChannelPluginMock.mockReturnValueOnce(undefined).mockReturnValueOnce(plugin); + const channelResolution = await importChannelResolution("bootstrap-success"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }), + ).toBe(plugin); + expect(loadOpenClawPluginsMock).toHaveBeenCalledWith({ + config: { autoEnabled: true }, + workspaceDir: "/tmp/workspace", + }); + + getChannelPluginMock.mockReturnValue(undefined); + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }); + expect(loadOpenClawPluginsMock).toHaveBeenCalledTimes(1); + }); + + it("retries bootstrap after a transient load failure", async () => { + getChannelPluginMock.mockReturnValue(undefined); + loadOpenClawPluginsMock.mockImplementationOnce(() => { + throw new Error("transient"); + }); + const channelResolution = await importChannelResolution("bootstrap-retry"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }), + ).toBeUndefined(); + + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }); + expect(loadOpenClawPluginsMock).toHaveBeenCalledTimes(2); + }); +}); diff --git a/src/infra/outbound/channel-target.test.ts b/src/infra/outbound/channel-target.test.ts new file mode 100644 index 00000000000..5d1f290d8f5 --- /dev/null +++ b/src/infra/outbound/channel-target.test.ts @@ -0,0 +1,63 @@ +import { describe, expect, it } from "vitest"; +import { applyTargetToParams } from "./channel-target.js"; + +describe("applyTargetToParams", () => { + it("maps trimmed target values into the configured target field", () => { + const toParams = { + action: "send", + args: { target: " channel:C1 " } as Record, + }; + applyTargetToParams(toParams); + expect(toParams.args.to).toBe("channel:C1"); + + const channelIdParams = { + action: "channel-info", + args: { target: " C123 " } as Record, + }; + applyTargetToParams(channelIdParams); + expect(channelIdParams.args.channelId).toBe("C123"); + }); + + it("throws on legacy destination fields when the action has canonical target support", () => { + expect(() => + applyTargetToParams({ + action: "send", + args: { + target: "channel:C1", + to: "legacy", + }, + }), + ).toThrow("Use `target` instead of `to`/`channelId`."); + }); + + it("throws when a no-target action receives target or legacy destination fields", () => { + expect(() => + applyTargetToParams({ + action: "broadcast", + args: { + to: "legacy", + }, + }), + ).toThrow("Use `target` for actions that accept a destination."); + + expect(() => + applyTargetToParams({ + action: "broadcast", + args: { + target: "channel:C1", + }, + }), + ).toThrow("Action broadcast does not accept a target."); + }); + + it("does nothing when target is blank", () => { + const params = { + action: "send", + args: { target: " " } as Record, + }; + + applyTargetToParams(params); + + expect(params.args).toEqual({ target: " " }); + }); +}); diff --git a/src/infra/outbound/conversation-id.test.ts b/src/infra/outbound/conversation-id.test.ts index b35c8e2e4a1..68865219c37 100644 --- a/src/infra/outbound/conversation-id.test.ts +++ b/src/infra/outbound/conversation-id.test.ts @@ -2,39 +2,58 @@ import { describe, expect, it } from "vitest"; import { resolveConversationIdFromTargets } from "./conversation-id.js"; describe("resolveConversationIdFromTargets", () => { - it("prefers explicit thread id when present", () => { - const resolved = resolveConversationIdFromTargets({ - threadId: "123456789", - targets: ["channel:987654321"], - }); - expect(resolved).toBe("123456789"); + it.each([ + { + name: "prefers explicit thread id strings", + params: { threadId: "123456789", targets: ["channel:987654321"] }, + expected: "123456789", + }, + { + name: "normalizes numeric thread ids", + params: { threadId: 123456789, targets: ["channel:987654321"] }, + expected: "123456789", + }, + { + name: "falls back when the thread id is blank", + params: { threadId: " ", targets: ["channel:987654321"] }, + expected: "987654321", + }, + ])("$name", ({ params, expected }) => { + expect(resolveConversationIdFromTargets(params)).toBe(expected); }); - it("extracts channel ids from channel: targets", () => { - const resolved = resolveConversationIdFromTargets({ + it.each([ + { + name: "extracts channel ids from channel targets", targets: ["channel:987654321"], - }); - expect(resolved).toBe("987654321"); - }); - - it("extracts ids from Discord channel mentions", () => { - const resolved = resolveConversationIdFromTargets({ + expected: "987654321", + }, + { + name: "trims channel target ids", + targets: ["channel: 987654321 "], + expected: "987654321", + }, + { + name: "extracts ids from Discord channel mentions", targets: ["<#1475250310120214812>"], - }); - expect(resolved).toBe("1475250310120214812"); - }); - - it("accepts raw numeric ids", () => { - const resolved = resolveConversationIdFromTargets({ + expected: "1475250310120214812", + }, + { + name: "accepts raw numeric ids", targets: ["1475250310120214812"], - }); - expect(resolved).toBe("1475250310120214812"); - }); - - it("returns undefined for non-channel targets", () => { - const resolved = resolveConversationIdFromTargets({ + expected: "1475250310120214812", + }, + { + name: "returns undefined for non-channel targets", targets: ["user:alice", "general"], - }); - expect(resolved).toBeUndefined(); + expected: undefined, + }, + { + name: "skips blank and malformed targets", + targets: [undefined, null, " ", "channel: ", "<#not-a-number>"], + expected: undefined, + }, + ])("$name", ({ targets, expected }) => { + expect(resolveConversationIdFromTargets({ targets })).toBe(expected); }); }); diff --git a/src/infra/outbound/envelope.test.ts b/src/infra/outbound/envelope.test.ts new file mode 100644 index 00000000000..68b2aa28b96 --- /dev/null +++ b/src/infra/outbound/envelope.test.ts @@ -0,0 +1,55 @@ +import { describe, expect, it } from "vitest"; +import type { ReplyPayload } from "../../auto-reply/types.js"; +import { buildOutboundResultEnvelope } from "./envelope.js"; +import type { OutboundDeliveryJson } from "./format.js"; + +describe("buildOutboundResultEnvelope", () => { + const delivery: OutboundDeliveryJson = { + channel: "telegram", + via: "direct", + to: "123", + messageId: "m1", + mediaUrl: null, + chatId: "c1", + }; + + it("flattens delivery by default when nothing else is present", () => { + expect(buildOutboundResultEnvelope({ delivery })).toEqual(delivery); + }); + + it("keeps pre-normalized payload JSON entries but clones the array", () => { + const payloads = [{ text: "hi", mediaUrl: null, mediaUrls: undefined }]; + + const envelope = buildOutboundResultEnvelope({ + payloads, + meta: { ok: true }, + }); + + expect(envelope).toEqual({ + payloads: [{ text: "hi", mediaUrl: null, mediaUrls: undefined }], + meta: { ok: true }, + }); + expect((envelope as { payloads: unknown[] }).payloads).not.toBe(payloads); + }); + + it("normalizes reply payloads and keeps wrapped delivery when flattening is disabled", () => { + const payloads: ReplyPayload[] = [{ text: "hello" }]; + + expect( + buildOutboundResultEnvelope({ + payloads, + delivery, + flattenDelivery: false, + }), + ).toEqual({ + payloads: [ + { + text: "hello", + mediaUrl: null, + channelData: undefined, + }, + ], + delivery, + }); + }); +}); diff --git a/src/infra/outbound/identity.test.ts b/src/infra/outbound/identity.test.ts new file mode 100644 index 00000000000..ea1c3623fbc --- /dev/null +++ b/src/infra/outbound/identity.test.ts @@ -0,0 +1,69 @@ +import { describe, expect, it, vi } from "vitest"; + +const resolveAgentIdentityMock = vi.hoisted(() => vi.fn()); +const resolveAgentAvatarMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../agents/identity.js", () => ({ + resolveAgentIdentity: (...args: unknown[]) => resolveAgentIdentityMock(...args), +})); + +vi.mock("../../agents/identity-avatar.js", () => ({ + resolveAgentAvatar: (...args: unknown[]) => resolveAgentAvatarMock(...args), +})); + +import { normalizeOutboundIdentity, resolveAgentOutboundIdentity } from "./identity.js"; + +describe("normalizeOutboundIdentity", () => { + it("trims fields and drops empty identities", () => { + expect( + normalizeOutboundIdentity({ + name: " Demo Bot ", + avatarUrl: " https://example.com/a.png ", + emoji: " 🤖 ", + }), + ).toEqual({ + name: "Demo Bot", + avatarUrl: "https://example.com/a.png", + emoji: "🤖", + }); + expect( + normalizeOutboundIdentity({ + name: " ", + avatarUrl: "\n", + emoji: "", + }), + ).toBeUndefined(); + }); +}); + +describe("resolveAgentOutboundIdentity", () => { + it("builds normalized identity data and keeps only remote avatars", () => { + resolveAgentIdentityMock.mockReturnValueOnce({ + name: " Agent Smith ", + emoji: " 🕶️ ", + }); + resolveAgentAvatarMock.mockReturnValueOnce({ + kind: "remote", + url: "https://example.com/avatar.png", + }); + + expect(resolveAgentOutboundIdentity({} as never, "main")).toEqual({ + name: "Agent Smith", + emoji: "🕶️", + avatarUrl: "https://example.com/avatar.png", + }); + }); + + it("drops blank and non-remote avatar values after normalization", () => { + resolveAgentIdentityMock.mockReturnValueOnce({ + name: " ", + emoji: "", + }); + resolveAgentAvatarMock.mockReturnValueOnce({ + kind: "data", + dataUrl: "data:image/png;base64,abc", + }); + + expect(resolveAgentOutboundIdentity({} as never, "main")).toBeUndefined(); + }); +}); diff --git a/src/infra/outbound/message-action-params.test.ts b/src/infra/outbound/message-action-params.test.ts index 996db9682b0..f72bd2d26aa 100644 --- a/src/infra/outbound/message-action-params.test.ts +++ b/src/infra/outbound/message-action-params.test.ts @@ -2,15 +2,145 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import type { ChannelThreadingToolContext } from "../../channels/plugins/types.js"; import type { OpenClawConfig } from "../../config/config.js"; import { hydrateAttachmentParamsForAction, + normalizeSandboxMediaList, normalizeSandboxMediaParams, + resolveAttachmentMediaPolicy, + resolveSlackAutoThreadId, + resolveTelegramAutoThreadId, } from "./message-action-params.js"; const cfg = {} as OpenClawConfig; const maybeIt = process.platform === "win32" ? it.skip : it; +function createToolContext( + overrides: Partial = {}, +): ChannelThreadingToolContext { + return { + currentChannelId: "C123", + currentThreadTs: "thread-1", + replyToMode: "all", + ...overrides, + }; +} + +describe("message action threading helpers", () => { + it("resolves Slack auto-thread ids only for matching active channels", () => { + expect( + resolveSlackAutoThreadId({ + to: "#c123", + toolContext: createToolContext(), + }), + ).toBe("thread-1"); + expect( + resolveSlackAutoThreadId({ + to: "channel:C999", + toolContext: createToolContext(), + }), + ).toBeUndefined(); + expect( + resolveSlackAutoThreadId({ + to: "user:U123", + toolContext: createToolContext(), + }), + ).toBeUndefined(); + }); + + it("skips Slack auto-thread ids when reply mode or context blocks them", () => { + expect( + resolveSlackAutoThreadId({ + to: "C123", + toolContext: createToolContext({ + replyToMode: "first", + hasRepliedRef: { value: true }, + }), + }), + ).toBeUndefined(); + expect( + resolveSlackAutoThreadId({ + to: "C123", + toolContext: createToolContext({ replyToMode: "off" }), + }), + ).toBeUndefined(); + expect( + resolveSlackAutoThreadId({ + to: "C123", + toolContext: createToolContext({ currentThreadTs: undefined }), + }), + ).toBeUndefined(); + }); + + it("resolves Telegram auto-thread ids for matching chats across target formats", () => { + expect( + resolveTelegramAutoThreadId({ + to: "telegram:group:-100123:topic:77", + toolContext: createToolContext({ + currentChannelId: "tg:group:-100123", + }), + }), + ).toBe("thread-1"); + expect( + resolveTelegramAutoThreadId({ + to: "-100999:77", + toolContext: createToolContext({ + currentChannelId: "-100123", + }), + }), + ).toBeUndefined(); + expect( + resolveTelegramAutoThreadId({ + to: "-100123", + toolContext: createToolContext({ currentChannelId: undefined }), + }), + ).toBeUndefined(); + }); +}); + +describe("message action media helpers", () => { + it("prefers sandbox media policy when sandbox roots are non-blank", () => { + expect( + resolveAttachmentMediaPolicy({ + sandboxRoot: " /tmp/workspace ", + mediaLocalRoots: ["/tmp/a"], + }), + ).toEqual({ + mode: "sandbox", + sandboxRoot: "/tmp/workspace", + }); + expect( + resolveAttachmentMediaPolicy({ + sandboxRoot: " ", + mediaLocalRoots: ["/tmp/a"], + }), + ).toEqual({ + mode: "host", + localRoots: ["/tmp/a"], + }); + }); + + maybeIt("normalizes sandbox media lists and dedupes resolved workspace paths", async () => { + const sandboxRoot = await fs.mkdtemp(path.join(os.tmpdir(), "msg-params-list-")); + try { + await expect( + normalizeSandboxMediaList({ + values: [" data:text/plain;base64,QQ== "], + }), + ).rejects.toThrow(/data:/i); + await expect( + normalizeSandboxMediaList({ + values: [" file:///workspace/assets/photo.png ", "/workspace/assets/photo.png", " "], + sandboxRoot: ` ${sandboxRoot} `, + }), + ).resolves.toEqual([path.join(sandboxRoot, "assets", "photo.png")]); + } finally { + await fs.rm(sandboxRoot, { recursive: true, force: true }); + } + }); +}); + describe("message action sandbox media hydration", () => { maybeIt("rejects symlink retarget escapes after sandbox media normalization", async () => { const sandboxRoot = await fs.mkdtemp(path.join(os.tmpdir(), "msg-params-sandbox-")); diff --git a/src/infra/outbound/outbound.test.ts b/src/infra/outbound/outbound.test.ts index 72ccf3e3c55..b04c0462e43 100644 --- a/src/infra/outbound/outbound.test.ts +++ b/src/infra/outbound/outbound.test.ts @@ -19,8 +19,6 @@ import { recoverPendingDeliveries, } from "./delivery-queue.js"; import { DirectoryCache } from "./directory-cache.js"; -import { buildOutboundResultEnvelope } from "./envelope.js"; -import type { OutboundDeliveryJson } from "./format.js"; import { buildOutboundDeliveryJson, formatGatewaySummary, @@ -670,73 +668,6 @@ describe("DirectoryCache", () => { }); }); -describe("buildOutboundResultEnvelope", () => { - it("formats envelope variants", () => { - const whatsappDelivery: OutboundDeliveryJson = { - channel: "whatsapp", - via: "gateway", - to: "+1", - messageId: "m1", - mediaUrl: null, - }; - const telegramDelivery: OutboundDeliveryJson = { - channel: "telegram", - via: "direct", - to: "123", - messageId: "m2", - mediaUrl: null, - chatId: "c1", - }; - const discordDelivery: OutboundDeliveryJson = { - channel: "discord", - via: "gateway", - to: "channel:C1", - messageId: "m3", - mediaUrl: null, - channelId: "C1", - }; - const cases = typedCases<{ - name: string; - input: Parameters[0]; - expected: unknown; - }>([ - { - name: "flatten delivery by default", - input: { delivery: whatsappDelivery }, - expected: whatsappDelivery, - }, - { - name: "keep payloads + meta", - input: { - payloads: [{ text: "hi", mediaUrl: null, mediaUrls: undefined }], - meta: { foo: "bar" }, - }, - expected: { - payloads: [{ text: "hi", mediaUrl: null, mediaUrls: undefined }], - meta: { foo: "bar" }, - }, - }, - { - name: "include delivery when payloads exist", - input: { payloads: [], delivery: telegramDelivery, meta: { ok: true } }, - expected: { - payloads: [], - meta: { ok: true }, - delivery: telegramDelivery, - }, - }, - { - name: "keep wrapped delivery when flatten disabled", - input: { delivery: discordDelivery, flattenDelivery: false }, - expected: { delivery: discordDelivery }, - }, - ]); - for (const testCase of cases) { - expect(buildOutboundResultEnvelope(testCase.input), testCase.name).toEqual(testCase.expected); - } - }); -}); - describe("formatOutboundDeliverySummary", () => { it("formats fallback and channel-specific detail variants", () => { const cases = [ diff --git a/src/infra/outbound/session-context.test.ts b/src/infra/outbound/session-context.test.ts new file mode 100644 index 00000000000..c24ede1f3e8 --- /dev/null +++ b/src/infra/outbound/session-context.test.ts @@ -0,0 +1,55 @@ +import { describe, expect, it, vi } from "vitest"; + +const resolveSessionAgentIdMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../agents/agent-scope.js", () => ({ + resolveSessionAgentId: (...args: unknown[]) => resolveSessionAgentIdMock(...args), +})); + +import { buildOutboundSessionContext } from "./session-context.js"; + +describe("buildOutboundSessionContext", () => { + it("returns undefined when both session key and agent id are blank", () => { + expect( + buildOutboundSessionContext({ + cfg: {} as never, + sessionKey: " ", + agentId: null, + }), + ).toBeUndefined(); + expect(resolveSessionAgentIdMock).not.toHaveBeenCalled(); + }); + + it("derives the agent id from the trimmed session key when no explicit agent is given", () => { + resolveSessionAgentIdMock.mockReturnValueOnce("derived-agent"); + + expect( + buildOutboundSessionContext({ + cfg: { agents: {} } as never, + sessionKey: " session:main:123 ", + }), + ).toEqual({ + key: "session:main:123", + agentId: "derived-agent", + }); + expect(resolveSessionAgentIdMock).toHaveBeenCalledWith({ + sessionKey: "session:main:123", + config: { agents: {} }, + }); + }); + + it("prefers an explicit trimmed agent id over the derived one", () => { + resolveSessionAgentIdMock.mockReturnValueOnce("derived-agent"); + + expect( + buildOutboundSessionContext({ + cfg: {} as never, + sessionKey: "session:main:123", + agentId: " explicit-agent ", + }), + ).toEqual({ + key: "session:main:123", + agentId: "explicit-agent", + }); + }); +}); diff --git a/src/infra/outbound/target-errors.test.ts b/src/infra/outbound/target-errors.test.ts new file mode 100644 index 00000000000..fb43f5279bf --- /dev/null +++ b/src/infra/outbound/target-errors.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from "vitest"; +import { + ambiguousTargetError, + ambiguousTargetMessage, + missingTargetError, + missingTargetMessage, + unknownTargetError, + unknownTargetMessage, +} from "./target-errors.js"; + +describe("target error helpers", () => { + it("formats missing-target messages with and without hints", () => { + expect(missingTargetMessage("Slack")).toBe("Delivering to Slack requires target"); + expect(missingTargetMessage("Slack", "Use channel:C123")).toBe( + "Delivering to Slack requires target Use channel:C123", + ); + expect(missingTargetError("Slack", "Use channel:C123").message).toBe( + "Delivering to Slack requires target Use channel:C123", + ); + }); + + it("formats ambiguous and unknown target messages with labeled hints", () => { + expect(ambiguousTargetMessage("Discord", "general")).toBe( + 'Ambiguous target "general" for Discord. Provide a unique name or an explicit id.', + ); + expect(ambiguousTargetMessage("Discord", "general", "Use channel:123")).toBe( + 'Ambiguous target "general" for Discord. Provide a unique name or an explicit id. Hint: Use channel:123', + ); + expect(unknownTargetMessage("Discord", "general", "Use channel:123")).toBe( + 'Unknown target "general" for Discord. Hint: Use channel:123', + ); + expect(ambiguousTargetError("Discord", "general", "Use channel:123").message).toContain( + "Hint: Use channel:123", + ); + expect(unknownTargetError("Discord", "general").message).toBe( + 'Unknown target "general" for Discord.', + ); + }); +}); diff --git a/src/infra/outbound/target-normalization.test.ts b/src/infra/outbound/target-normalization.test.ts new file mode 100644 index 00000000000..c8e6ea7e124 --- /dev/null +++ b/src/infra/outbound/target-normalization.test.ts @@ -0,0 +1,142 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const normalizeChannelIdMock = vi.hoisted(() => vi.fn()); +const getChannelPluginMock = vi.hoisted(() => vi.fn()); +const getActivePluginRegistryVersionMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../channels/plugins/index.js", () => ({ + normalizeChannelId: (...args: unknown[]) => normalizeChannelIdMock(...args), + getChannelPlugin: (...args: unknown[]) => getChannelPluginMock(...args), +})); + +vi.mock("../../plugins/runtime.js", () => ({ + getActivePluginRegistryVersion: (...args: unknown[]) => + getActivePluginRegistryVersionMock(...args), +})); + +import { + buildTargetResolverSignature, + normalizeChannelTargetInput, + normalizeTargetForProvider, +} from "./target-normalization.js"; + +describe("normalizeChannelTargetInput", () => { + it("trims raw target input", () => { + expect(normalizeChannelTargetInput(" channel:C1 ")).toBe("channel:C1"); + }); +}); + +describe("normalizeTargetForProvider", () => { + beforeEach(() => { + normalizeChannelIdMock.mockReset(); + getChannelPluginMock.mockReset(); + getActivePluginRegistryVersionMock.mockReset(); + }); + + it("returns undefined for missing or blank raw input", () => { + expect(normalizeTargetForProvider("telegram")).toBeUndefined(); + expect(normalizeTargetForProvider("telegram", " ")).toBeUndefined(); + }); + + it("falls back to trimmed input when the provider is unknown or has no normalizer", () => { + normalizeChannelIdMock.mockReturnValueOnce(null); + expect(normalizeTargetForProvider("unknown", " raw-id ")).toBe("raw-id"); + + normalizeChannelIdMock.mockReturnValueOnce("telegram"); + getActivePluginRegistryVersionMock.mockReturnValueOnce(1); + getChannelPluginMock.mockReturnValueOnce(undefined); + expect(normalizeTargetForProvider("telegram", " raw-id ")).toBe("raw-id"); + }); + + it("uses the cached target normalizer until the plugin registry version changes", () => { + const firstNormalizer = vi.fn((raw: string) => raw.trim().toUpperCase()); + const secondNormalizer = vi.fn((raw: string) => `next:${raw.trim()}`); + normalizeChannelIdMock.mockReturnValue("telegram"); + getActivePluginRegistryVersionMock + .mockReturnValueOnce(10) + .mockReturnValueOnce(10) + .mockReturnValueOnce(11); + getChannelPluginMock + .mockReturnValueOnce({ + messaging: { normalizeTarget: firstNormalizer }, + }) + .mockReturnValueOnce({ + messaging: { normalizeTarget: secondNormalizer }, + }); + + expect(normalizeTargetForProvider("telegram", " abc ")).toBe("ABC"); + expect(normalizeTargetForProvider("telegram", " def ")).toBe("DEF"); + expect(normalizeTargetForProvider("telegram", " ghi ")).toBe("next:ghi"); + + expect(getChannelPluginMock).toHaveBeenCalledTimes(2); + expect(firstNormalizer).toHaveBeenCalledTimes(2); + expect(secondNormalizer).toHaveBeenCalledTimes(1); + }); + + it("returns undefined when the provider normalizer resolves to an empty value", () => { + normalizeChannelIdMock.mockReturnValueOnce("telegram"); + getActivePluginRegistryVersionMock.mockReturnValueOnce(20); + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + normalizeTarget: () => "", + }, + }); + + expect(normalizeTargetForProvider("telegram", " raw-id ")).toBeUndefined(); + }); +}); + +describe("buildTargetResolverSignature", () => { + beforeEach(() => { + getChannelPluginMock.mockReset(); + }); + + it("builds stable signatures from resolver hint and looksLikeId source", () => { + const looksLikeId = (value: string) => value.startsWith("C"); + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use channel id", + looksLikeId, + }, + }, + }); + + const first = buildTargetResolverSignature("slack"); + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use channel id", + looksLikeId, + }, + }, + }); + const second = buildTargetResolverSignature("slack"); + + expect(first).toBe(second); + }); + + it("changes when resolver metadata changes", () => { + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use channel id", + looksLikeId: (value: string) => value.startsWith("C"), + }, + }, + }); + const first = buildTargetResolverSignature("slack"); + + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use user id", + looksLikeId: (value: string) => value.startsWith("U"), + }, + }, + }); + const second = buildTargetResolverSignature("slack"); + + expect(first).not.toBe(second); + }); +}); diff --git a/src/infra/outbound/targets.test.ts b/src/infra/outbound/targets.test.ts index e0b669040a6..b9c795f532e 100644 --- a/src/infra/outbound/targets.test.ts +++ b/src/infra/outbound/targets.test.ts @@ -462,7 +462,14 @@ describe("resolveSessionDeliveryTarget", () => { expectedChannel: "none", expectedReason: "dm-blocked", }, - ])("$name", ({ name, entry, directPolicy, expectedChannel, expectedTo, expectedReason }) => { + ] satisfies Array<{ + name: string; + entry: NonNullable[0]["entry"]>; + directPolicy?: "allow" | "block"; + expectedChannel: string; + expectedTo?: string; + expectedReason?: string; + }>)("$name", ({ name, entry, directPolicy, expectedChannel, expectedTo, expectedReason }) => { expectHeartbeatTarget({ name, entry, diff --git a/src/infra/outbound/tool-payload.test.ts b/src/infra/outbound/tool-payload.test.ts new file mode 100644 index 00000000000..08629089618 --- /dev/null +++ b/src/infra/outbound/tool-payload.test.ts @@ -0,0 +1,42 @@ +import { describe, expect, it } from "vitest"; +import { extractToolPayload } from "./tool-payload.js"; + +describe("extractToolPayload", () => { + it("prefers explicit details payloads", () => { + expect( + extractToolPayload({ + details: { ok: true }, + content: [{ type: "text", text: '{"ignored":true}' }], + } as never), + ).toEqual({ ok: true }); + }); + + it("parses JSON text blocks from tool content", () => { + expect( + extractToolPayload({ + content: [ + { type: "image", url: "https://example.com/a.png" }, + { type: "text", text: '{"ok":true,"count":2}' }, + ], + } as never), + ).toEqual({ ok: true, count: 2 }); + }); + + it("falls back to raw text, then content, then the whole result", () => { + expect( + extractToolPayload({ + content: [{ type: "text", text: "not json" }], + } as never), + ).toBe("not json"); + + const content = [{ type: "image", url: "https://example.com/a.png" }]; + expect( + extractToolPayload({ + content, + } as never), + ).toBe(content); + + const result = { status: "ok" }; + expect(extractToolPayload(result as never)).toBe(result); + }); +}); diff --git a/src/infra/package-json.test.ts b/src/infra/package-json.test.ts new file mode 100644 index 00000000000..664fcaa4f14 --- /dev/null +++ b/src/infra/package-json.test.ts @@ -0,0 +1,39 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { readPackageName, readPackageVersion } from "./package-json.js"; + +describe("package-json helpers", () => { + it("reads package version and trims package name", async () => { + await withTempDir({ prefix: "openclaw-package-json-" }, async (root) => { + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ version: "1.2.3", name: " @openclaw/demo " }), + "utf8", + ); + + await expect(readPackageVersion(root)).resolves.toBe("1.2.3"); + await expect(readPackageName(root)).resolves.toBe("@openclaw/demo"); + }); + }); + + it("returns null for missing or invalid package.json data", async () => { + await withTempDir({ prefix: "openclaw-package-json-" }, async (root) => { + await expect(readPackageVersion(root)).resolves.toBeNull(); + await expect(readPackageName(root)).resolves.toBeNull(); + + await fs.writeFile(path.join(root, "package.json"), "{", "utf8"); + await expect(readPackageVersion(root)).resolves.toBeNull(); + await expect(readPackageName(root)).resolves.toBeNull(); + + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ version: 123, name: " " }), + "utf8", + ); + await expect(readPackageVersion(root)).resolves.toBeNull(); + await expect(readPackageName(root)).resolves.toBeNull(); + }); + }); +}); diff --git a/src/infra/package-tag.test.ts b/src/infra/package-tag.test.ts new file mode 100644 index 00000000000..794acf63093 --- /dev/null +++ b/src/infra/package-tag.test.ts @@ -0,0 +1,21 @@ +import { describe, expect, it } from "vitest"; +import { normalizePackageTagInput } from "./package-tag.js"; + +describe("normalizePackageTagInput", () => { + const packageNames = ["openclaw", "@openclaw/plugin"] as const; + + it("returns null for blank inputs", () => { + expect(normalizePackageTagInput(undefined, packageNames)).toBeNull(); + expect(normalizePackageTagInput(" ", packageNames)).toBeNull(); + }); + + it("strips known package-name prefixes before returning the tag", () => { + expect(normalizePackageTagInput("openclaw@beta", packageNames)).toBe("beta"); + expect(normalizePackageTagInput("@openclaw/plugin@2026.2.24", packageNames)).toBe("2026.2.24"); + }); + + it("returns trimmed raw values when no package prefix matches", () => { + expect(normalizePackageTagInput(" latest ", packageNames)).toBe("latest"); + expect(normalizePackageTagInput("@other/plugin@beta", packageNames)).toBe("@other/plugin@beta"); + }); +}); diff --git a/src/infra/pairing-files.test.ts b/src/infra/pairing-files.test.ts new file mode 100644 index 00000000000..8f891036956 --- /dev/null +++ b/src/infra/pairing-files.test.ts @@ -0,0 +1,85 @@ +import { describe, expect, it, vi } from "vitest"; +import { + pruneExpiredPending, + resolvePairingPaths, + upsertPendingPairingRequest, +} from "./pairing-files.js"; + +describe("pairing file helpers", () => { + it("resolves pairing file paths from explicit base dirs", () => { + expect(resolvePairingPaths("/tmp/openclaw-state", "devices")).toEqual({ + dir: "/tmp/openclaw-state/devices", + pendingPath: "/tmp/openclaw-state/devices/pending.json", + pairedPath: "/tmp/openclaw-state/devices/paired.json", + }); + }); + + it("prunes only entries older than the ttl", () => { + const pendingById = { + stale: { ts: 10, requestId: "stale" }, + edge: { ts: 50, requestId: "edge" }, + fresh: { ts: 70, requestId: "fresh" }, + }; + + pruneExpiredPending(pendingById, 100, 50); + + expect(pendingById).toEqual({ + edge: { ts: 50, requestId: "edge" }, + fresh: { ts: 70, requestId: "fresh" }, + }); + }); + + it("reuses existing pending requests without persisting again", async () => { + const persist = vi.fn(async () => undefined); + const existing = { requestId: "req-1", deviceId: "device-1", ts: 1 }; + const pendingById = { "req-1": existing }; + + await expect( + upsertPendingPairingRequest({ + pendingById, + isExisting: (pending) => pending.deviceId === "device-1", + createRequest: vi.fn(() => ({ requestId: "req-2", deviceId: "device-1", ts: 2 })), + isRepair: false, + persist, + }), + ).resolves.toEqual({ + status: "pending", + request: existing, + created: false, + }); + expect(persist).not.toHaveBeenCalled(); + }); + + it("creates and persists new pending requests with the repair flag", async () => { + const persist = vi.fn(async () => undefined); + const createRequest = vi.fn((isRepair: boolean) => ({ + requestId: "req-2", + deviceId: "device-2", + ts: 2, + isRepair, + })); + const pendingById: Record< + string, + { requestId: string; deviceId: string; ts: number; isRepair: boolean } + > = {}; + + await expect( + upsertPendingPairingRequest({ + pendingById, + isExisting: (pending) => pending.deviceId === "device-2", + createRequest, + isRepair: true, + persist, + }), + ).resolves.toEqual({ + status: "pending", + request: { requestId: "req-2", deviceId: "device-2", ts: 2, isRepair: true }, + created: true, + }); + expect(createRequest).toHaveBeenCalledWith(true); + expect(persist).toHaveBeenCalledOnce(); + expect(pendingById).toEqual({ + "req-2": { requestId: "req-2", deviceId: "device-2", ts: 2, isRepair: true }, + }); + }); +}); diff --git a/src/infra/pairing-pending.test.ts b/src/infra/pairing-pending.test.ts new file mode 100644 index 00000000000..30c2551176b --- /dev/null +++ b/src/infra/pairing-pending.test.ts @@ -0,0 +1,48 @@ +import { describe, expect, it, vi } from "vitest"; +import { rejectPendingPairingRequest } from "./pairing-pending.js"; + +describe("rejectPendingPairingRequest", () => { + it("returns null and skips persistence when the request is missing", async () => { + const persistState = vi.fn(); + + await expect( + rejectPendingPairingRequest({ + requestId: "missing", + idKey: "deviceId", + loadState: async () => ({ pendingById: {} }), + persistState, + getId: (pending: { id: string }) => pending.id, + }), + ).resolves.toBeNull(); + + expect(persistState).not.toHaveBeenCalled(); + }); + + it("removes the request, persists, and returns the dynamic id key", async () => { + const state = { + pendingById: { + keep: { accountId: "keep-me" }, + reject: { accountId: "acct-42" }, + }, + }; + const persistState = vi.fn(async () => undefined); + + await expect( + rejectPendingPairingRequest({ + requestId: "reject", + idKey: "accountId", + loadState: async () => state, + persistState, + getId: (pending) => pending.accountId, + }), + ).resolves.toEqual({ + requestId: "reject", + accountId: "acct-42", + }); + + expect(state.pendingById).toEqual({ + keep: { accountId: "keep-me" }, + }); + expect(persistState).toHaveBeenCalledWith(state); + }); +}); diff --git a/src/infra/pairing-token.test.ts b/src/infra/pairing-token.test.ts new file mode 100644 index 00000000000..2d6a5964396 --- /dev/null +++ b/src/infra/pairing-token.test.ts @@ -0,0 +1,30 @@ +import { Buffer } from "node:buffer"; +import { describe, expect, it, vi } from "vitest"; + +const randomBytesMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:crypto", async () => { + const actual = await vi.importActual("node:crypto"); + return { + ...actual, + randomBytes: (...args: unknown[]) => randomBytesMock(...args), + }; +}); + +import { generatePairingToken, PAIRING_TOKEN_BYTES, verifyPairingToken } from "./pairing-token.js"; + +describe("generatePairingToken", () => { + it("uses the configured byte count and returns a base64url token", () => { + randomBytesMock.mockReturnValueOnce(Buffer.from([0xfb, 0xff, 0x00])); + + expect(generatePairingToken()).toBe("-_8A"); + expect(randomBytesMock).toHaveBeenCalledWith(PAIRING_TOKEN_BYTES); + }); +}); + +describe("verifyPairingToken", () => { + it("uses constant-time comparison semantics", () => { + expect(verifyPairingToken("secret-token", "secret-token")).toBe(true); + expect(verifyPairingToken("secret-token", "secret-tokEn")).toBe(false); + }); +}); diff --git a/src/infra/parse-finite-number.test.ts b/src/infra/parse-finite-number.test.ts index d3c838cf61a..46329f3001b 100644 --- a/src/infra/parse-finite-number.test.ts +++ b/src/infra/parse-finite-number.test.ts @@ -11,11 +11,13 @@ describe("parseFiniteNumber", () => { { value: 42, expected: 42 }, { value: "3.14", expected: 3.14 }, { value: " 3.14ms", expected: 3.14 }, + { value: "+7", expected: 7 }, + { value: "1e3", expected: 1000 }, ])("parses %j", ({ value, expected }) => { expect(parseFiniteNumber(value)).toBe(expected); }); - it.each([Number.NaN, Number.POSITIVE_INFINITY, "not-a-number", " ", null])( + it.each([Number.NaN, Number.POSITIVE_INFINITY, "not-a-number", " ", "", null])( "returns undefined for %j", (value) => { expect(parseFiniteNumber(value)).toBeUndefined(); @@ -28,13 +30,17 @@ describe("parseStrictInteger", () => { { value: "42", expected: 42 }, { value: " -7 ", expected: -7 }, { value: 12, expected: 12 }, + { value: "+9", expected: 9 }, ])("parses %j", ({ value, expected }) => { expect(parseStrictInteger(value)).toBe(expected); }); - it.each(["42ms", "0abc", "1.5", " ", Number.MAX_SAFE_INTEGER + 1])("rejects %j", (value) => { - expect(parseStrictInteger(value)).toBeUndefined(); - }); + it.each(["42ms", "0abc", "1.5", "1e3", " ", Number.MAX_SAFE_INTEGER + 1])( + "rejects %j", + (value) => { + expect(parseStrictInteger(value)).toBeUndefined(); + }, + ); }); describe("parseStrictPositiveInteger", () => { diff --git a/src/infra/path-guards.test.ts b/src/infra/path-guards.test.ts new file mode 100644 index 00000000000..28bf3d7c3b8 --- /dev/null +++ b/src/infra/path-guards.test.ts @@ -0,0 +1,50 @@ +import { describe, expect, it } from "vitest"; +import { + hasNodeErrorCode, + isNodeError, + isNotFoundPathError, + isPathInside, + isSymlinkOpenError, + normalizeWindowsPathForComparison, +} from "./path-guards.js"; + +describe("normalizeWindowsPathForComparison", () => { + it("normalizes extended-length and UNC windows paths", () => { + expect(normalizeWindowsPathForComparison("\\\\?\\C:\\Users\\Peter/Repo")).toBe( + "c:\\users\\peter\\repo", + ); + expect(normalizeWindowsPathForComparison("\\\\?\\UNC\\Server\\Share\\Folder")).toBe( + "\\\\server\\share\\folder", + ); + }); +}); + +describe("node path error helpers", () => { + it("recognizes node-style error objects and exact codes", () => { + const enoent = { code: "ENOENT" }; + + expect(isNodeError(enoent)).toBe(true); + expect(isNodeError({ message: "nope" })).toBe(false); + expect(hasNodeErrorCode(enoent, "ENOENT")).toBe(true); + expect(hasNodeErrorCode(enoent, "EACCES")).toBe(false); + }); + + it("classifies not-found and symlink-open error codes", () => { + expect(isNotFoundPathError({ code: "ENOENT" })).toBe(true); + expect(isNotFoundPathError({ code: "ENOTDIR" })).toBe(true); + expect(isNotFoundPathError({ code: "EACCES" })).toBe(false); + + expect(isSymlinkOpenError({ code: "ELOOP" })).toBe(true); + expect(isSymlinkOpenError({ code: "EINVAL" })).toBe(true); + expect(isSymlinkOpenError({ code: "ENOTSUP" })).toBe(true); + expect(isSymlinkOpenError({ code: "ENOENT" })).toBe(false); + }); +}); + +describe("isPathInside", () => { + it("accepts identical and nested paths but rejects escapes", () => { + expect(isPathInside("/workspace/root", "/workspace/root")).toBe(true); + expect(isPathInside("/workspace/root", "/workspace/root/nested/file.txt")).toBe(true); + expect(isPathInside("/workspace/root", "/workspace/root/../escape.txt")).toBe(false); + }); +}); diff --git a/src/infra/path-prepend.test.ts b/src/infra/path-prepend.test.ts new file mode 100644 index 00000000000..29dfb504cfb --- /dev/null +++ b/src/infra/path-prepend.test.ts @@ -0,0 +1,33 @@ +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { mergePathPrepend, normalizePathPrepend } from "./path-prepend.js"; + +describe("path prepend helpers", () => { + it("normalizes prepend lists by trimming, skipping blanks, and deduping", () => { + expect( + normalizePathPrepend([ + " /custom/bin ", + "", + " /custom/bin ", + "/opt/bin", + // oxlint-disable-next-line typescript/no-explicit-any + 42 as any, + ]), + ).toEqual(["/custom/bin", "/opt/bin"]); + expect(normalizePathPrepend()).toEqual([]); + }); + + it("merges prepended paths ahead of existing values without duplicates", () => { + expect(mergePathPrepend(`/usr/bin${path.delimiter}/opt/bin`, ["/custom/bin", "/usr/bin"])).toBe( + ["/custom/bin", "/usr/bin", "/opt/bin"].join(path.delimiter), + ); + expect(mergePathPrepend(undefined, ["/custom/bin"])).toBe("/custom/bin"); + expect(mergePathPrepend("/usr/bin", [])).toBe("/usr/bin"); + }); + + it("trims existing path entries while preserving order", () => { + expect( + mergePathPrepend(` /usr/bin ${path.delimiter} ${path.delimiter} /opt/bin `, ["/custom/bin"]), + ).toBe(["/custom/bin", "/usr/bin", "/opt/bin"].join(path.delimiter)); + }); +}); diff --git a/src/infra/path-safety.test.ts b/src/infra/path-safety.test.ts index 9c85fbac63e..4e89c1c35fb 100644 --- a/src/infra/path-safety.test.ts +++ b/src/infra/path-safety.test.ts @@ -6,6 +6,7 @@ describe("path-safety", () => { it.each([ { rootDir: "/tmp/demo", expected: `${path.resolve("/tmp/demo")}${path.sep}` }, { rootDir: `/tmp/demo${path.sep}`, expected: `${path.resolve("/tmp/demo")}${path.sep}` }, + { rootDir: "/tmp/demo/..", expected: `${path.resolve("/tmp")}${path.sep}` }, ])("resolves safe base dir for %j", ({ rootDir, expected }) => { expect(resolveSafeBaseDir(rootDir)).toBe(expected); }); @@ -13,8 +14,11 @@ describe("path-safety", () => { it.each([ { rootDir: "/tmp/demo", targetPath: "/tmp/demo", expected: true }, { rootDir: "/tmp/demo", targetPath: "/tmp/demo/sub/file.txt", expected: true }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo/./nested/../file.txt", expected: true }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo-two/../demo/file.txt", expected: true }, { rootDir: "/tmp/demo", targetPath: "/tmp/demo/../escape.txt", expected: false }, { rootDir: "/tmp/demo", targetPath: "/tmp/demo-sibling/file.txt", expected: false }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo/../../escape.txt", expected: false }, { rootDir: "/tmp/demo", targetPath: "sub/file.txt", expected: false }, ])("checks containment for %j", ({ rootDir, targetPath, expected }) => { expect(isWithinDir(rootDir, targetPath)).toBe(expected); diff --git a/src/infra/plain-object.test.ts b/src/infra/plain-object.test.ts index 892e5c89fab..272c7c94f9d 100644 --- a/src/infra/plain-object.test.ts +++ b/src/infra/plain-object.test.ts @@ -9,10 +9,21 @@ describe("isPlainObject", () => { }, ); - it.each([null, [], new Date(), /re/, "x", 42, () => null, new Map()])( - "rejects non-plain values: %j", - (value) => { - expect(isPlainObject(value)).toBe(false); - }, - ); + it.each([ + null, + [], + new Date(), + /re/, + "x", + 42, + () => null, + new Map(), + { [Symbol.toStringTag]: "Array" }, + ])("rejects non-plain values: %j", (value) => { + expect(isPlainObject(value)).toBe(false); + }); + + it("accepts object-tag values with an explicit Object toStringTag", () => { + expect(isPlainObject({ [Symbol.toStringTag]: "Object" })).toBe(true); + }); }); diff --git a/src/infra/ports-format.test.ts b/src/infra/ports-format.test.ts new file mode 100644 index 00000000000..c532de63970 --- /dev/null +++ b/src/infra/ports-format.test.ts @@ -0,0 +1,87 @@ +import { describe, expect, it } from "vitest"; +import { + buildPortHints, + classifyPortListener, + formatPortDiagnostics, + formatPortListener, +} from "./ports-format.js"; + +describe("ports-format", () => { + it("classifies listeners across gateway, ssh, and unknown command lines", () => { + const cases = [ + { + listener: { commandLine: "ssh -N -L 18789:127.0.0.1:18789 user@host" }, + expected: "ssh", + }, + { + listener: { command: "ssh" }, + expected: "ssh", + }, + { + listener: { commandLine: "node /Users/me/Projects/openclaw/dist/entry.js gateway" }, + expected: "gateway", + }, + { + listener: { commandLine: "python -m http.server 18789" }, + expected: "unknown", + }, + ] as const; + + for (const testCase of cases) { + expect( + classifyPortListener(testCase.listener, 18789), + JSON.stringify(testCase.listener), + ).toBe(testCase.expected); + } + }); + + it("builds ordered hints for mixed listener kinds and multiplicity", () => { + expect( + buildPortHints( + [ + { commandLine: "node dist/index.js openclaw gateway" }, + { commandLine: "ssh -N -L 18789:127.0.0.1:18789" }, + { commandLine: "python -m http.server 18789" }, + ], + 18789, + ), + ).toEqual([ + expect.stringContaining("Gateway already running locally."), + "SSH tunnel already bound to this port. Close the tunnel or use a different local port in -L.", + "Another process is listening on this port.", + expect.stringContaining("Multiple listeners detected"), + ]); + expect(buildPortHints([], 18789)).toEqual([]); + }); + + it("formats listeners with pid, user, command, and address fallbacks", () => { + expect( + formatPortListener({ pid: 123, user: "alice", commandLine: "ssh -N", address: "::1" }), + ).toBe("pid 123 alice: ssh -N (::1)"); + expect(formatPortListener({ command: "ssh", address: "127.0.0.1:18789" })).toBe( + "pid ?: ssh (127.0.0.1:18789)", + ); + expect(formatPortListener({})).toBe("pid ?: unknown"); + }); + + it("formats free and busy port diagnostics", () => { + expect( + formatPortDiagnostics({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }), + ).toEqual(["Port 18789 is free."]); + + const lines = formatPortDiagnostics({ + port: 18789, + status: "busy", + listeners: [{ pid: 123, user: "alice", commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], + hints: buildPortHints([{ pid: 123, commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], 18789), + }); + expect(lines[0]).toContain("Port 18789 is already in use"); + expect(lines).toContain("- pid 123 alice: ssh -N -L 18789:127.0.0.1:18789"); + expect(lines.some((line) => line.includes("SSH tunnel"))).toBe(true); + }); +}); diff --git a/src/infra/ports-lsof.test.ts b/src/infra/ports-lsof.test.ts new file mode 100644 index 00000000000..eb599112a5a --- /dev/null +++ b/src/infra/ports-lsof.test.ts @@ -0,0 +1,67 @@ +import fs from "node:fs"; +import fsPromises from "node:fs/promises"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { resolveLsofCommand, resolveLsofCommandSync } from "./ports-lsof.js"; + +const LSOF_CANDIDATES = + process.platform === "darwin" + ? ["/usr/sbin/lsof", "/usr/bin/lsof"] + : ["/usr/bin/lsof", "/usr/sbin/lsof"]; + +describe("lsof command resolution", () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("prefers the first executable async candidate", async () => { + const accessSpy = vi.spyOn(fsPromises, "access").mockImplementation(async (target) => { + if (target === LSOF_CANDIDATES[0]) { + return; + } + throw new Error("unexpected"); + }); + + await expect(resolveLsofCommand()).resolves.toBe(LSOF_CANDIDATES[0]); + expect(accessSpy).toHaveBeenCalledTimes(1); + }); + + it("falls through async candidates before using the shell fallback", async () => { + const accessSpy = vi.spyOn(fsPromises, "access").mockImplementation(async (target) => { + if (target === LSOF_CANDIDATES[0]) { + throw new Error("missing"); + } + if (target === LSOF_CANDIDATES[1]) { + return; + } + throw new Error("unexpected"); + }); + + await expect(resolveLsofCommand()).resolves.toBe(LSOF_CANDIDATES[1]); + expect(accessSpy).toHaveBeenCalledTimes(2); + + accessSpy.mockImplementation(async () => { + throw new Error("missing"); + }); + await expect(resolveLsofCommand()).resolves.toBe("lsof"); + }); + + it("mirrors candidate resolution for the sync helper", () => { + const accessSpy = vi.spyOn(fs, "accessSync").mockImplementation((target) => { + if (target === LSOF_CANDIDATES[0]) { + throw new Error("missing"); + } + if (target === LSOF_CANDIDATES[1]) { + return undefined; + } + throw new Error("unexpected"); + }); + + expect(resolveLsofCommandSync()).toBe(LSOF_CANDIDATES[1]); + expect(accessSpy).toHaveBeenCalledTimes(2); + + accessSpy.mockImplementation(() => { + throw new Error("missing"); + }); + expect(resolveLsofCommandSync()).toBe("lsof"); + }); +}); diff --git a/src/infra/ports-probe.test.ts b/src/infra/ports-probe.test.ts new file mode 100644 index 00000000000..ce127970cce --- /dev/null +++ b/src/infra/ports-probe.test.ts @@ -0,0 +1,30 @@ +import net from "node:net"; +import { describe, expect, it } from "vitest"; +import { tryListenOnPort } from "./ports-probe.js"; + +describe("tryListenOnPort", () => { + it("can bind and release an ephemeral loopback port", async () => { + await expect(tryListenOnPort({ port: 0, host: "127.0.0.1", exclusive: true })).resolves.toBe( + undefined, + ); + }); + + it("rejects when the port is already in use", async () => { + const server = net.createServer(); + await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); + const address = server.address(); + if (!address || typeof address === "string") { + throw new Error("expected tcp address"); + } + + try { + await expect( + tryListenOnPort({ port: address.port, host: "127.0.0.1" }), + ).rejects.toMatchObject({ + code: "EADDRINUSE", + }); + } finally { + await new Promise((resolve) => server.close(() => resolve())); + } + }); +}); diff --git a/src/infra/ports.test.ts b/src/infra/ports.test.ts index f809662f1ac..090ccb128b9 100644 --- a/src/infra/ports.test.ts +++ b/src/infra/ports.test.ts @@ -8,14 +8,7 @@ vi.mock("../process/exec.js", () => ({ runCommandWithTimeout: (...args: unknown[]) => runCommandWithTimeoutMock(...args), })); import { inspectPortUsage } from "./ports-inspect.js"; -import { - buildPortHints, - classifyPortListener, - ensurePortAvailable, - formatPortDiagnostics, - handlePortError, - PortInUseError, -} from "./ports.js"; +import { ensurePortAvailable, handlePortError, PortInUseError } from "./ports.js"; const describeUnix = process.platform === "win32" ? describe.skip : describe; @@ -61,32 +54,6 @@ describe("ports helpers", () => { const messages = runtime.error.mock.calls.map((call) => stripAnsi(String(call[0] ?? ""))); expect(messages.join("\n")).toContain("another OpenClaw instance is already running"); }); - - it("classifies ssh and gateway listeners", () => { - expect( - classifyPortListener({ commandLine: "ssh -N -L 18789:127.0.0.1:18789 user@host" }, 18789), - ).toBe("ssh"); - expect( - classifyPortListener( - { - commandLine: "node /Users/me/Projects/openclaw/dist/entry.js gateway", - }, - 18789, - ), - ).toBe("gateway"); - }); - - it("formats port diagnostics with hints", () => { - const diagnostics = { - port: 18789, - status: "busy" as const, - listeners: [{ pid: 123, commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], - hints: buildPortHints([{ pid: 123, commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], 18789), - }; - const lines = formatPortDiagnostics(diagnostics); - expect(lines[0]).toContain("Port 18789 is already in use"); - expect(lines.some((line) => line.includes("SSH tunnel"))).toBe(true); - }); }); describeUnix("inspectPortUsage", () => { diff --git a/src/infra/prototype-keys.test.ts b/src/infra/prototype-keys.test.ts new file mode 100644 index 00000000000..f2bd8287226 --- /dev/null +++ b/src/infra/prototype-keys.test.ts @@ -0,0 +1,14 @@ +import { describe, expect, it } from "vitest"; +import { isBlockedObjectKey } from "./prototype-keys.js"; + +describe("isBlockedObjectKey", () => { + it("blocks prototype-pollution keys and allows ordinary keys", () => { + for (const key of ["__proto__", "prototype", "constructor"]) { + expect(isBlockedObjectKey(key)).toBe(true); + } + + for (const key of ["toString", "value", "constructorName"]) { + expect(isBlockedObjectKey(key)).toBe(false); + } + }); +}); diff --git a/src/infra/provider-usage.auth.normalizes-keys.test.ts b/src/infra/provider-usage.auth.normalizes-keys.test.ts index 851c789941d..baf96781c27 100644 --- a/src/infra/provider-usage.auth.normalizes-keys.test.ts +++ b/src/infra/provider-usage.auth.normalizes-keys.test.ts @@ -3,7 +3,7 @@ import os from "node:os"; import path from "node:path"; import { afterAll, beforeAll, describe, expect, it } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "../agents/model-auth-markers.js"; -import { resolveProviderAuths } from "./provider-usage.auth.js"; +import { resolveProviderAuths, type ProviderAuth } from "./provider-usage.auth.js"; describe("resolveProviderAuths key normalization", () => { let suiteRoot = ""; @@ -214,7 +214,12 @@ describe("resolveProviderAuths key normalization", () => { }, expected: [{ provider: "minimax", token: "code-plan-key" }], }, - ])("$name", async ({ providers, env, expected }) => { + ] satisfies Array<{ + name: string; + providers: readonly Parameters[0]["providers"][number][]; + env: Record; + expected: ProviderAuth[]; + }>)("$name", async ({ providers, env, expected }) => { await expectResolvedAuthsFromSuiteHome({ providers: [...providers], env, expected }); }); diff --git a/src/infra/provider-usage.fetch.claude.test.ts b/src/infra/provider-usage.fetch.claude.test.ts index 59b8542558a..e9b82c9ad4f 100644 --- a/src/infra/provider-usage.fetch.claude.test.ts +++ b/src/infra/provider-usage.fetch.claude.test.ts @@ -77,6 +77,25 @@ describe("fetchClaudeUsage", () => { ]); }); + it("clamps oauth usage windows and prefers sonnet over opus when both exist", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + five_hour: { utilization: -5 }, + seven_day: { utilization: 140 }, + seven_day_sonnet: { utilization: 40 }, + seven_day_opus: { utilization: 90 }, + }), + ); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + + expect(result.windows).toEqual([ + { label: "5h", usedPercent: 0, resetAt: undefined }, + { label: "Week", usedPercent: 100, resetAt: undefined }, + { label: "Sonnet", usedPercent: 40 }, + ]); + }); + it("returns HTTP errors with provider message suffix", async () => { const mockFetch = createProviderUsageFetch(async () => makeResponse(403, { @@ -89,6 +108,26 @@ describe("fetchClaudeUsage", () => { expect(result.windows).toHaveLength(0); }); + it("omits blank error message suffixes on oauth failures", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(403, { + error: { message: " " }, + }), + ); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + expect(result.error).toBe("HTTP 403"); + expect(result.windows).toHaveLength(0); + }); + + it("keeps HTTP status errors when oauth error bodies are not JSON", async () => { + const mockFetch = createProviderUsageFetch(async () => makeResponse(502, "bad gateway")); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + expect(result.error).toBe("HTTP 502"); + expect(result.windows).toHaveLength(0); + }); + it("falls back to claude web usage when oauth scope is missing", async () => { vi.stubEnv("CLAUDE_AI_SESSION_KEY", "sk-ant-session-key"); @@ -119,6 +158,25 @@ describe("fetchClaudeUsage", () => { expect(result.windows).toEqual([{ label: "5h", usedPercent: 12, resetAt: undefined }]); }); + it("parses sessionKey from Cookie-prefixed CLAUDE_WEB_COOKIE headers", async () => { + vi.stubEnv("CLAUDE_WEB_COOKIE", "Cookie: foo=bar; sessionKey=sk-ant-cookie-header"); + + const mockFetch = createScopeFallbackFetch(async (url) => { + if (url.endsWith("/api/organizations")) { + return makeResponse(200, [{ uuid: "org-header" }]); + } + if (url.endsWith("/api/organizations/org-header/usage")) { + return makeResponse(200, { five_hour: { utilization: 9 } }); + } + return makeResponse(404, "not found"); + }); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + expect(result.error).toBeUndefined(); + expect(result.windows).toEqual([{ label: "5h", usedPercent: 9, resetAt: undefined }]); + expect(mockFetch).toHaveBeenCalledTimes(3); + }); + it("parses sessionKey from CLAUDE_WEB_COOKIE for web fallback", async () => { vi.stubEnv("CLAUDE_WEB_COOKIE", "sessionKey=sk-ant-cookie-session"); diff --git a/src/infra/provider-usage.fetch.codex.test.ts b/src/infra/provider-usage.fetch.codex.test.ts index e74d0f25f65..428199c40fe 100644 --- a/src/infra/provider-usage.fetch.codex.test.ts +++ b/src/infra/provider-usage.fetch.codex.test.ts @@ -107,4 +107,44 @@ describe("fetchCodexUsage", () => { { label: "Week", usedPercent: 20, resetAt: weeklyLikeSecondaryReset * 1000 }, ]); }); + + it("labels short secondary windows in hours", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + rate_limit: { + secondary_window: { + limit_window_seconds: 21_600, + used_percent: 11, + }, + }, + }), + ); + + const result = await fetchCodexUsage("token", undefined, 5000, mockFetch); + expect(result.windows).toEqual([{ label: "6h", usedPercent: 11, resetAt: undefined }]); + }); + + it("builds a balance-only plan when credits exist without a plan type", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + credits: { balance: "7.5" }, + }), + ); + + const result = await fetchCodexUsage("token", undefined, 5000, mockFetch); + expect(result.plan).toBe("$7.50"); + expect(result.windows).toEqual([]); + }); + + it("falls back invalid credit strings to a zero balance", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + plan_type: "Plus", + credits: { balance: "not-a-number" }, + }), + ); + + const result = await fetchCodexUsage("token", undefined, 5000, mockFetch); + expect(result.plan).toBe("Plus ($0.00)"); + }); }); diff --git a/src/infra/provider-usage.fetch.copilot.test.ts b/src/infra/provider-usage.fetch.copilot.test.ts index 7df17118159..0abfd5f782f 100644 --- a/src/infra/provider-usage.fetch.copilot.test.ts +++ b/src/infra/provider-usage.fetch.copilot.test.ts @@ -34,4 +34,40 @@ describe("fetchCopilotUsage", () => { { label: "Chat", usedPercent: 25 }, ]); }); + + it("defaults missing snapshot values and clamps invalid remaining percentages", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + quota_snapshots: { + premium_interactions: { percent_remaining: null }, + chat: { percent_remaining: 140 }, + }, + }), + ); + + const result = await fetchCopilotUsage("token", 5000, mockFetch); + + expect(result.windows).toEqual([ + { label: "Premium", usedPercent: 100 }, + { label: "Chat", usedPercent: 0 }, + ]); + expect(result.plan).toBeUndefined(); + }); + + it("returns an empty window list when quota snapshots are missing", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + copilot_plan: "free", + }), + ); + + const result = await fetchCopilotUsage("token", 5000, mockFetch); + + expect(result).toEqual({ + provider: "github-copilot", + displayName: "Copilot", + windows: [], + plan: "free", + }); + }); }); diff --git a/src/infra/provider-usage.fetch.gemini.test.ts b/src/infra/provider-usage.fetch.gemini.test.ts index ea713478011..c21292ebf97 100644 --- a/src/infra/provider-usage.fetch.gemini.test.ts +++ b/src/infra/provider-usage.fetch.gemini.test.ts @@ -36,4 +36,39 @@ describe("fetchGeminiUsage", () => { expect(result.windows[1]?.label).toBe("Flash"); expect(result.windows[1]?.usedPercent).toBeCloseTo(30, 6); }); + + it("returns no windows when the response has no recognized model families", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + buckets: [{ modelId: "gemini-unknown", remainingFraction: 0.5 }], + }), + ); + + const result = await fetchGeminiUsage("token", 5000, mockFetch, "google-gemini-cli"); + + expect(result).toEqual({ + provider: "google-gemini-cli", + displayName: "Gemini", + windows: [], + }); + }); + + it("defaults missing fractions to fully available and clamps invalid fractions", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + buckets: [ + { modelId: "gemini-pro" }, + { modelId: "gemini-pro-latest", remainingFraction: -0.5 }, + { modelId: "gemini-flash", remainingFraction: 1.2 }, + ], + }), + ); + + const result = await fetchGeminiUsage("token", 5000, mockFetch, "google-gemini-cli"); + + expect(result.windows).toEqual([ + { label: "Pro", usedPercent: 100 }, + { label: "Flash", usedPercent: 0 }, + ]); + }); }); diff --git a/src/infra/provider-usage.fetch.shared.test.ts b/src/infra/provider-usage.fetch.shared.test.ts index 213a5a3eb2d..692a57705db 100644 --- a/src/infra/provider-usage.fetch.shared.test.ts +++ b/src/infra/provider-usage.fetch.shared.test.ts @@ -1,11 +1,17 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { buildUsageErrorSnapshot, buildUsageHttpErrorSnapshot, + fetchJson, parseFiniteNumber, } from "./provider-usage.fetch.shared.js"; describe("provider usage fetch shared helpers", () => { + afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); + }); + it("builds a provider error snapshot", () => { expect(buildUsageErrorSnapshot("zai", "API error")).toEqual({ provider: "zai", @@ -23,6 +29,58 @@ describe("provider usage fetch shared helpers", () => { expect(parseFiniteNumber(value)).toBe(expected); }); + it("forwards request init and clears the timeout on success", async () => { + vi.useFakeTimers(); + const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout"); + const fetchFnMock = vi.fn( + async (_input: URL | RequestInfo, init?: RequestInit) => + new Response(JSON.stringify({ aborted: init?.signal?.aborted ?? false }), { status: 200 }), + ); + const fetchFn = fetchFnMock as typeof fetch; + + const response = await fetchJson( + "https://example.com/usage", + { + method: "POST", + headers: { authorization: "Bearer test" }, + }, + 1_000, + fetchFn, + ); + + expect(fetchFnMock).toHaveBeenCalledWith( + "https://example.com/usage", + expect.objectContaining({ + method: "POST", + headers: { authorization: "Bearer test" }, + signal: expect.any(AbortSignal), + }), + ); + await expect(response.json()).resolves.toEqual({ aborted: false }); + expect(clearTimeoutSpy).toHaveBeenCalledTimes(1); + }); + + it("aborts timed out requests and clears the timer on rejection", async () => { + vi.useFakeTimers(); + const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout"); + const fetchFnMock = vi.fn( + (_input: URL | RequestInfo, init?: RequestInit) => + new Promise((_, reject) => { + init?.signal?.addEventListener("abort", () => reject(new Error("aborted by timeout")), { + once: true, + }); + }), + ); + const fetchFn = fetchFnMock as typeof fetch; + + const request = fetchJson("https://example.com/usage", {}, 50, fetchFn); + const rejection = expect(request).rejects.toThrow("aborted by timeout"); + await vi.advanceTimersByTimeAsync(50); + + await rejection; + expect(clearTimeoutSpy).toHaveBeenCalledTimes(1); + }); + it("maps configured status codes to token expired", () => { const snapshot = buildUsageHttpErrorSnapshot({ provider: "openai-codex", diff --git a/src/infra/provider-usage.fetch.zai.test.ts b/src/infra/provider-usage.fetch.zai.test.ts index 2dafaccca9f..d952495e90f 100644 --- a/src/infra/provider-usage.fetch.zai.test.ts +++ b/src/infra/provider-usage.fetch.zai.test.ts @@ -25,6 +25,20 @@ describe("fetchZaiUsage", () => { expect(result.windows).toHaveLength(0); }); + it("falls back to a generic API error for blank unsuccessful messages", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + success: false, + code: 500, + msg: " ", + }), + ); + + const result = await fetchZaiUsage("key", 5000, mockFetch); + expect(result.error).toBe("API error"); + expect(result.windows).toHaveLength(0); + }); + it("parses token and monthly windows with reset times", async () => { const tokenReset = "2026-01-08T00:00:00Z"; const minuteReset = "2026-01-08T00:30:00Z"; @@ -83,4 +97,47 @@ describe("fetchZaiUsage", () => { }, ]); }); + + it("clamps invalid percentages and falls back to alternate plan fields", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + success: true, + code: 200, + data: { + plan: "Pro", + limits: [ + { + type: "TOKENS_LIMIT", + percentage: -5, + unit: 99, + }, + { + type: "TIME_LIMIT", + percentage: 140, + }, + { + type: "OTHER_LIMIT", + percentage: 50, + }, + ], + }, + }), + ); + + const result = await fetchZaiUsage("key", 5000, mockFetch); + + expect(result.plan).toBe("Pro"); + expect(result.windows).toEqual([ + { + label: "Tokens (Limit)", + usedPercent: 0, + resetAt: undefined, + }, + { + label: "Monthly", + usedPercent: 100, + resetAt: undefined, + }, + ]); + }); }); diff --git a/src/infra/provider-usage.fetch.zai.ts b/src/infra/provider-usage.fetch.zai.ts index 1ab1fd14764..d6f4970f0b7 100644 --- a/src/infra/provider-usage.fetch.zai.ts +++ b/src/infra/provider-usage.fetch.zai.ts @@ -46,11 +46,12 @@ export async function fetchZaiUsage( const data = (await res.json()) as ZaiUsageResponse; if (!data.success || data.code !== 200) { + const errorMessage = typeof data.msg === "string" ? data.msg.trim() : ""; return { provider: "zai", displayName: PROVIDER_LABELS.zai, windows: [], - error: data.msg || "API error", + error: errorMessage || "API error", }; } diff --git a/src/infra/provider-usage.format.test.ts b/src/infra/provider-usage.format.test.ts index 3063a571a24..d87d6a73c17 100644 --- a/src/infra/provider-usage.format.test.ts +++ b/src/infra/provider-usage.format.test.ts @@ -54,6 +54,18 @@ describe("provider-usage.format", () => { expect(summary).toBe("A 90% left · B 80% left"); }); + it("treats non-positive max windows as all windows and clamps overused percentages", () => { + const summary = formatUsageWindowSummary( + makeSnapshot([ + { label: "Over", usedPercent: 120, resetAt: now + 60_000 }, + { label: "Under", usedPercent: -10 }, + ]), + { now, maxWindows: 0, includeResets: true }, + ); + + expect(summary).toBe("Over 0% left ⏱1m · Under 100% left"); + }); + it("formats summary line from highest-usage window and provider cap", () => { const summary: UsageSummary = { updatedAt: now, @@ -79,6 +91,27 @@ describe("provider-usage.format", () => { ); }); + it("returns null summary line when providers are errored or have no windows", () => { + expect( + formatUsageSummaryLine({ + updatedAt: now, + providers: [ + { + provider: "anthropic", + displayName: "Claude", + windows: [], + error: "HTTP 401", + }, + { + provider: "zai", + displayName: "z.ai", + windows: [], + }, + ], + }), + ).toBeNull(); + }); + it("formats report output for empty, error, no-data, and plan entries", () => { expect(formatUsageReportLines({ updatedAt: now, providers: [] })).toEqual([ "Usage: no provider usage available.", @@ -107,4 +140,24 @@ describe("provider-usage.format", () => { " Xiaomi: no data", ]); }); + + it("formats detailed report lines with reset windows", () => { + const summary: UsageSummary = { + updatedAt: now, + providers: [ + { + provider: "anthropic", + displayName: "Claude", + plan: "Pro", + windows: [{ label: "Daily", usedPercent: 25, resetAt: now + 2 * 60 * 60_000 }], + }, + ], + }; + + expect(formatUsageReportLines(summary, { now })).toEqual([ + "Usage:", + " Claude (Pro)", + " Daily: 75% left · resets 2h", + ]); + }); }); diff --git a/src/infra/provider-usage.shared.test.ts b/src/infra/provider-usage.shared.test.ts index 578cb876a4f..048352a183d 100644 --- a/src/infra/provider-usage.shared.test.ts +++ b/src/infra/provider-usage.shared.test.ts @@ -1,7 +1,12 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { clampPercent, resolveUsageProviderId, withTimeout } from "./provider-usage.shared.js"; describe("provider-usage.shared", () => { + afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); + }); + it.each([ { value: "z-ai", expected: "zai" }, { value: " GOOGLE-GEMINI-CLI ", expected: "google-gemini-cli" }, @@ -33,7 +38,18 @@ describe("provider-usage.shared", () => { }); it("returns fallback when timeout wins", async () => { + vi.useFakeTimers(); const late = new Promise((resolve) => setTimeout(() => resolve("late"), 50)); - await expect(withTimeout(late, 1, "fallback")).resolves.toBe("fallback"); + const result = withTimeout(late, 1, "fallback"); + await vi.advanceTimersByTimeAsync(1); + await expect(result).resolves.toBe("fallback"); + }); + + it("clears the timeout after successful work", async () => { + const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout"); + + await expect(withTimeout(Promise.resolve("ok"), 100, "fallback")).resolves.toBe("ok"); + + expect(clearTimeoutSpy).toHaveBeenCalledTimes(1); }); }); diff --git a/src/infra/restart-sentinel.test.ts b/src/infra/restart-sentinel.test.ts index 76b9e53b59e..c28504685bb 100644 --- a/src/infra/restart-sentinel.test.ts +++ b/src/infra/restart-sentinel.test.ts @@ -5,9 +5,11 @@ import { afterEach, beforeEach, describe, expect, it } from "vitest"; import { captureEnv } from "../test-utils/env.js"; import { consumeRestartSentinel, + formatDoctorNonInteractiveHint, formatRestartSentinelMessage, readRestartSentinel, resolveRestartSentinelPath, + summarizeRestartSentinel, trimLogTail, writeRestartSentinel, } from "./restart-sentinel.js"; @@ -59,6 +61,15 @@ describe("restart sentinel", () => { await expect(fs.stat(filePath)).rejects.toThrow(); }); + it("drops structurally invalid sentinel payloads", async () => { + const filePath = resolveRestartSentinelPath(); + await fs.mkdir(path.dirname(filePath), { recursive: true }); + await fs.writeFile(filePath, JSON.stringify({ version: 2, payload: null }), "utf-8"); + + await expect(readRestartSentinel()).resolves.toBeNull(); + await expect(fs.stat(filePath)).rejects.toThrow(); + }); + it("formatRestartSentinelMessage uses custom message when present", () => { const payload = { kind: "config-apply" as const, @@ -93,6 +104,26 @@ describe("restart sentinel", () => { expect(result).toContain("Gateway restart"); }); + it("formats summary, distinct reason, and doctor hint together", () => { + const payload = { + kind: "config-patch" as const, + status: "error" as const, + ts: Date.now(), + message: "Patch failed", + doctorHint: "Run openclaw doctor", + stats: { mode: "patch", reason: "validation failed" }, + }; + + expect(formatRestartSentinelMessage(payload)).toBe( + [ + "Gateway restart config-patch error (patch)", + "Patch failed", + "Reason: validation failed", + "Run openclaw doctor", + ].join("\n"), + ); + }); + it("trims log tails", () => { const text = "a".repeat(9000); const trimmed = trimLogTail(text, 8000); @@ -115,6 +146,18 @@ describe("restart sentinel", () => { expect(textA).toContain("Gateway restart restart ok"); expect(textA).not.toContain('"ts"'); }); + + it("summarizes restart payloads and trims log tails without trailing whitespace", () => { + expect( + summarizeRestartSentinel({ + kind: "update", + status: "skipped", + ts: 1, + }), + ).toBe("Gateway restart update skipped"); + expect(trimLogTail("hello\n")).toBe("hello"); + expect(trimLogTail(undefined)).toBeNull(); + }); }); describe("restart sentinel message dedup", () => { @@ -145,4 +188,10 @@ describe("restart sentinel message dedup", () => { expect(result).toContain("Restart requested by /restart"); expect(result).toContain("Reason: /restart"); }); + + it("formats the non-interactive doctor command", () => { + expect(formatDoctorNonInteractiveHint({ PATH: "/usr/bin:/bin" })).toContain( + "openclaw doctor --non-interactive", + ); + }); }); diff --git a/src/infra/run-node.test.ts b/src/infra/run-node.test.ts index fab1d7e771a..1007b2c6141 100644 --- a/src/infra/run-node.test.ts +++ b/src/infra/run-node.test.ts @@ -13,6 +13,17 @@ async function withTempDir(run: (dir: string) => Promise): Promise { } } +function createExitedProcess(code: number | null, signal: string | null = null) { + return { + on: (event: string, cb: (code: number | null, signal: string | null) => void) => { + if (event === "exit") { + queueMicrotask(() => cb(code, signal)); + } + return undefined; + }, + }; +} + describe("run-node script", () => { it.runIf(process.platform !== "win32")( "preserves control-ui assets by building with tsdown --no-clean", @@ -66,4 +77,88 @@ describe("run-node script", () => { }); }, ); + + it("skips rebuilding when dist is current and the source tree is clean", async () => { + await withTempDir(async (tmp) => { + const srcPath = path.join(tmp, "src", "index.ts"); + const distEntryPath = path.join(tmp, "dist", "entry.js"); + const buildStampPath = path.join(tmp, "dist", ".buildstamp"); + const tsconfigPath = path.join(tmp, "tsconfig.json"); + const packageJsonPath = path.join(tmp, "package.json"); + await fs.mkdir(path.dirname(srcPath), { recursive: true }); + await fs.mkdir(path.dirname(distEntryPath), { recursive: true }); + await fs.writeFile(srcPath, "export const value = 1;\n", "utf-8"); + await fs.writeFile(tsconfigPath, "{}\n", "utf-8"); + await fs.writeFile(packageJsonPath, '{"name":"openclaw-test"}\n', "utf-8"); + await fs.writeFile(distEntryPath, "console.log('built');\n", "utf-8"); + await fs.writeFile(buildStampPath, '{"head":"abc123"}\n', "utf-8"); + + const oldTime = new Date("2026-03-13T10:00:00.000Z"); + const stampTime = new Date("2026-03-13T12:00:00.000Z"); + await fs.utimes(srcPath, oldTime, oldTime); + await fs.utimes(tsconfigPath, oldTime, oldTime); + await fs.utimes(packageJsonPath, oldTime, oldTime); + await fs.utimes(distEntryPath, stampTime, stampTime); + await fs.utimes(buildStampPath, stampTime, stampTime); + + const spawnCalls: string[][] = []; + const spawn = (cmd: string, args: string[]) => { + spawnCalls.push([cmd, ...args]); + return createExitedProcess(0); + }; + const spawnSync = (cmd: string, args: string[]) => { + if (cmd === "git" && args[0] === "rev-parse") { + return { status: 0, stdout: "abc123\n" }; + } + if (cmd === "git" && args[0] === "status") { + return { status: 0, stdout: "" }; + } + return { status: 1, stdout: "" }; + }; + + const { runNodeMain } = await import("../../scripts/run-node.mjs"); + const exitCode = await runNodeMain({ + cwd: tmp, + args: ["status"], + env: { + ...process.env, + OPENCLAW_RUNNER_LOG: "0", + }, + spawn, + spawnSync, + execPath: process.execPath, + platform: process.platform, + }); + + expect(exitCode).toBe(0); + expect(spawnCalls).toEqual([[process.execPath, "openclaw.mjs", "status"]]); + }); + }); + + it("returns the build exit code when the compiler step fails", async () => { + await withTempDir(async (tmp) => { + const spawn = (cmd: string) => { + if (cmd === "pnpm") { + return createExitedProcess(23); + } + return createExitedProcess(0); + }; + + const { runNodeMain } = await import("../../scripts/run-node.mjs"); + const exitCode = await runNodeMain({ + cwd: tmp, + args: ["status"], + env: { + ...process.env, + OPENCLAW_FORCE_BUILD: "1", + OPENCLAW_RUNNER_LOG: "0", + }, + spawn, + execPath: process.execPath, + platform: process.platform, + }); + + expect(exitCode).toBe(23); + }); + }); }); diff --git a/src/infra/runtime-guard.test.ts b/src/infra/runtime-guard.test.ts index 410fe5d4a2d..ca1080b84bc 100644 --- a/src/infra/runtime-guard.test.ts +++ b/src/infra/runtime-guard.test.ts @@ -4,6 +4,7 @@ import { detectRuntime, isAtLeast, parseSemver, + isSupportedNodeVersion, type RuntimeDetails, runtimeSatisfies, } from "./runtime-guard.js"; @@ -12,6 +13,7 @@ describe("runtime-guard", () => { it("parses semver with or without leading v", () => { expect(parseSemver("v22.1.3")).toEqual({ major: 22, minor: 1, patch: 3 }); expect(parseSemver("1.3.0")).toEqual({ major: 1, minor: 3, patch: 0 }); + expect(parseSemver("22.16.0-beta.1")).toEqual({ major: 22, minor: 16, patch: 0 }); expect(parseSemver("invalid")).toBeNull(); }); @@ -49,6 +51,9 @@ describe("runtime-guard", () => { expect(runtimeSatisfies(nodeOld)).toBe(false); expect(runtimeSatisfies(nodeTooOld)).toBe(false); expect(runtimeSatisfies(unknown)).toBe(false); + expect(isSupportedNodeVersion("22.16.0")).toBe(true); + expect(isSupportedNodeVersion("22.15.9")).toBe(false); + expect(isSupportedNodeVersion(null)).toBe(false); }); it("throws via exit when runtime is too old", () => { @@ -67,6 +72,7 @@ describe("runtime-guard", () => { }; expect(() => assertSupportedRuntime(runtime, details)).toThrow("exit"); expect(runtime.error).toHaveBeenCalledWith(expect.stringContaining("requires Node")); + expect(runtime.error).toHaveBeenCalledWith(expect.stringContaining("Detected: node 20.0.0")); }); it("returns silently when runtime meets requirements", () => { @@ -84,4 +90,25 @@ describe("runtime-guard", () => { expect(() => assertSupportedRuntime(runtime, details)).not.toThrow(); expect(runtime.exit).not.toHaveBeenCalled(); }); + + it("reports unknown runtimes with fallback labels", () => { + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(() => { + throw new Error("exit"); + }), + }; + const details: RuntimeDetails = { + kind: "unknown", + version: null, + execPath: null, + pathEnv: "(not set)", + }; + + expect(() => assertSupportedRuntime(runtime, details)).toThrow("exit"); + expect(runtime.error).toHaveBeenCalledWith( + expect.stringContaining("Detected: unknown runtime (exec: unknown)."), + ); + }); }); diff --git a/src/infra/runtime-status.test.ts b/src/infra/runtime-status.test.ts new file mode 100644 index 00000000000..fc79afe5bee --- /dev/null +++ b/src/infra/runtime-status.test.ts @@ -0,0 +1,30 @@ +import { describe, expect, it } from "vitest"; +import { formatRuntimeStatusWithDetails } from "./runtime-status.js"; + +describe("formatRuntimeStatusWithDetails", () => { + it("falls back to unknown when status is missing", () => { + expect(formatRuntimeStatusWithDetails({})).toBe("unknown"); + }); + + it("includes pid, distinct state, and non-empty details", () => { + expect( + formatRuntimeStatusWithDetails({ + status: "running", + pid: 1234, + state: "sleeping", + details: ["healthy", "", "port 18789"], + }), + ).toBe("running (pid 1234, state sleeping, healthy, port 18789)"); + }); + + it("omits duplicate state text and falsy pid values", () => { + expect( + formatRuntimeStatusWithDetails({ + status: "running", + pid: 0, + state: "RUNNING", + details: [], + }), + ).toBe("running"); + }); +}); diff --git a/src/infra/safe-open-sync.test.ts b/src/infra/safe-open-sync.test.ts index 4848752a66e..726aa9195f1 100644 --- a/src/infra/safe-open-sync.test.ts +++ b/src/infra/safe-open-sync.test.ts @@ -5,6 +5,11 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; import { openVerifiedFileSync } from "./safe-open-sync.js"; +type SafeOpenSyncFs = NonNullable[0]["ioFs"]>; +type SafeOpenSyncLstatSync = SafeOpenSyncFs["lstatSync"]; +type SafeOpenSyncRealpathSync = SafeOpenSyncFs["realpathSync"]; +type SafeOpenSyncFstatSync = SafeOpenSyncFs["fstatSync"]; + async function withTempDir(prefix: string, run: (dir: string) => Promise): Promise { const dir = await fsp.mkdtemp(path.join(os.tmpdir(), prefix)); try { @@ -33,6 +38,20 @@ function mockStat(params: { } as unknown as fs.Stats; } +function mockRealpathSync(result: string): SafeOpenSyncRealpathSync { + const resolvePath = ((_: fs.PathLike) => result) as SafeOpenSyncRealpathSync; + resolvePath.native = ((_: fs.PathLike) => result) as typeof resolvePath.native; + return resolvePath; +} + +function mockLstatSync(read: (filePath: fs.PathLike) => fs.Stats): SafeOpenSyncLstatSync { + return ((filePath: fs.PathLike) => read(filePath)) as unknown as SafeOpenSyncLstatSync; +} + +function mockFstatSync(stat: fs.Stats): SafeOpenSyncFstatSync { + return ((_: number) => stat) as unknown as SafeOpenSyncFstatSync; +} + describe("openVerifiedFileSync", () => { it("returns a path error for missing files", async () => { await withTempDir("openclaw-safe-open-", async (root) => { @@ -115,15 +134,16 @@ describe("openVerifiedFileSync", () => { closed.push(fd); }; const closed: number[] = []; - const ioFs = { + const ioFs: SafeOpenSyncFs = { constants: fs.constants, - lstatSync: (filePath: string) => - filePath === "/real/file.txt" + lstatSync: mockLstatSync((filePath) => + String(filePath) === "/real/file.txt" ? mockStat({ isFile: true, size: 1, dev: 1, ino: 1 }) : mockStat({ isFile: false }), - realpathSync: () => "/real/file.txt", + ), + realpathSync: mockRealpathSync("/real/file.txt"), openSync: () => 42, - fstatSync: () => mockStat({ isFile: true, size: 1, dev: 2, ino: 1 }), + fstatSync: mockFstatSync(mockStat({ isFile: true, size: 1, dev: 2, ino: 1 })), closeSync, }; @@ -139,16 +159,16 @@ describe("openVerifiedFileSync", () => { }); it("reports non-path filesystem failures as io errors", () => { - const ioFs = { + const ioFs: SafeOpenSyncFs = { constants: fs.constants, lstatSync: () => { const err = new Error("permission denied") as NodeJS.ErrnoException; err.code = "EACCES"; throw err; }, - realpathSync: () => "/real/file.txt", + realpathSync: mockRealpathSync("/real/file.txt"), openSync: () => 42, - fstatSync: () => mockStat({ isFile: true }), + fstatSync: mockFstatSync(mockStat({ isFile: true })), closeSync: () => {}, }; diff --git a/src/infra/secure-random.test.ts b/src/infra/secure-random.test.ts index 6b2ea728ebd..2a595900c7b 100644 --- a/src/infra/secure-random.test.ts +++ b/src/infra/secure-random.test.ts @@ -40,4 +40,13 @@ describe("secure-random", () => { expect(cryptoMocks.randomBytes).toHaveBeenCalledWith(18); expect(token18).toBe(Buffer.alloc(18, 0xab).toString("base64url")); }); + + it("supports zero-byte tokens without rewriting the requested size", () => { + cryptoMocks.randomBytes.mockClear(); + + const token = generateSecureToken(0); + + expect(cryptoMocks.randomBytes).toHaveBeenCalledWith(0); + expect(token).toBe(""); + }); }); diff --git a/src/infra/session-maintenance-warning.test.ts b/src/infra/session-maintenance-warning.test.ts index f0e9590c572..f4c2e0757a1 100644 --- a/src/infra/session-maintenance-warning.test.ts +++ b/src/infra/session-maintenance-warning.test.ts @@ -1,3 +1,4 @@ +import { randomUUID } from "node:crypto"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; const mocks = vi.hoisted(() => ({ @@ -37,6 +38,26 @@ vi.mock("./system-events.js", () => ({ const { deliverSessionMaintenanceWarning } = await import("./session-maintenance-warning.js"); +function createParams( + overrides: Partial[0]> = {}, +): Parameters[0] { + const sessionKey = overrides.sessionKey ?? `agent:${randomUUID()}:main`; + return { + cfg: {}, + sessionKey, + entry: {} as never, + warning: { + activeSessionKey: sessionKey, + pruneAfterMs: 1_000, + maxEntries: 100, + wouldPrune: true, + wouldCap: false, + ...(overrides.warning as object), + } as never, + ...overrides, + }; +} + describe("deliverSessionMaintenanceWarning", () => { let prevVitest: string | undefined; let prevNodeEnv: string | undefined; @@ -68,18 +89,9 @@ describe("deliverSessionMaintenanceWarning", () => { }); it("forwards session context to outbound delivery", async () => { - await deliverSessionMaintenanceWarning({ - cfg: {}, - sessionKey: "agent:main:main", - entry: {} as never, - warning: { - activeSessionKey: "agent:main:main", - pruneAfterMs: 1_000, - maxEntries: 100, - wouldPrune: true, - wouldCap: false, - } as never, - }); + const params = createParams({ sessionKey: "agent:main:main" }); + + await deliverSessionMaintenanceWarning(params); expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( expect.objectContaining({ @@ -90,4 +102,61 @@ describe("deliverSessionMaintenanceWarning", () => { ); expect(mocks.enqueueSystemEvent).not.toHaveBeenCalled(); }); + + it("suppresses duplicate warning contexts for the same session", async () => { + const params = createParams(); + + await deliverSessionMaintenanceWarning(params); + await deliverSessionMaintenanceWarning(params); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledTimes(1); + }); + + it("falls back to a system event when the last target is not deliverable", async () => { + mocks.resolveSessionDeliveryTarget.mockReturnValueOnce({ + channel: "debug", + to: "+15550001", + accountId: "acct-1", + threadId: "thread-1", + }); + mocks.isDeliverableMessageChannel.mockReturnValueOnce(false); + + await deliverSessionMaintenanceWarning( + createParams({ + warning: { + pruneAfterMs: 3_600_000, + maxEntries: 10, + wouldPrune: false, + wouldCap: true, + } as never, + }), + ); + + expect(mocks.deliverOutboundPayloads).not.toHaveBeenCalled(); + expect(mocks.enqueueSystemEvent).toHaveBeenCalledWith( + expect.stringContaining("most recent 10 sessions"), + expect.objectContaining({ sessionKey: expect.stringContaining("agent:") }), + ); + }); + + it("skips warning delivery in test mode", async () => { + process.env.NODE_ENV = "test"; + + await deliverSessionMaintenanceWarning(createParams()); + + expect(mocks.resolveSessionDeliveryTarget).not.toHaveBeenCalled(); + expect(mocks.deliverOutboundPayloads).not.toHaveBeenCalled(); + expect(mocks.enqueueSystemEvent).not.toHaveBeenCalled(); + }); + + it("enqueues a system event when outbound delivery fails", async () => { + mocks.deliverOutboundPayloads.mockRejectedValueOnce(new Error("boom")); + + await deliverSessionMaintenanceWarning(createParams()); + + expect(mocks.enqueueSystemEvent).toHaveBeenCalledWith( + expect.stringContaining("older than 1 second"), + expect.objectContaining({ sessionKey: expect.stringContaining("agent:") }), + ); + }); }); diff --git a/src/infra/shell-inline-command.test.ts b/src/infra/shell-inline-command.test.ts new file mode 100644 index 00000000000..8ac552f9fee --- /dev/null +++ b/src/infra/shell-inline-command.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it } from "vitest"; +import { + POSIX_INLINE_COMMAND_FLAGS, + POWERSHELL_INLINE_COMMAND_FLAGS, + resolveInlineCommandMatch, +} from "./shell-inline-command.js"; + +describe("resolveInlineCommandMatch", () => { + it("extracts the next token for exact inline-command flags", () => { + expect( + resolveInlineCommandMatch(["bash", "-lc", "echo hi"], POSIX_INLINE_COMMAND_FLAGS), + ).toEqual({ + command: "echo hi", + valueTokenIndex: 2, + }); + expect( + resolveInlineCommandMatch( + ["pwsh", "-Command", "Get-ChildItem"], + POWERSHELL_INLINE_COMMAND_FLAGS, + ), + ).toEqual({ + command: "Get-ChildItem", + valueTokenIndex: 2, + }); + }); + + it("supports combined -c forms only when enabled", () => { + expect( + resolveInlineCommandMatch(["sh", "-cecho hi"], POSIX_INLINE_COMMAND_FLAGS, { + allowCombinedC: true, + }), + ).toEqual({ + command: "echo hi", + valueTokenIndex: 1, + }); + expect( + resolveInlineCommandMatch(["sh", "-cecho hi"], POSIX_INLINE_COMMAND_FLAGS, { + allowCombinedC: false, + }), + ).toEqual({ + command: null, + valueTokenIndex: null, + }); + }); + + it("returns a value index even when the flag is present without a usable command", () => { + expect(resolveInlineCommandMatch(["bash", "-lc", " "], POSIX_INLINE_COMMAND_FLAGS)).toEqual({ + command: null, + valueTokenIndex: 2, + }); + expect(resolveInlineCommandMatch(["bash", "-lc"], POSIX_INLINE_COMMAND_FLAGS)).toEqual({ + command: null, + valueTokenIndex: null, + }); + }); + + it("stops parsing after --", () => { + expect( + resolveInlineCommandMatch(["bash", "--", "-lc", "echo hi"], POSIX_INLINE_COMMAND_FLAGS), + ).toEqual({ + command: null, + valueTokenIndex: null, + }); + }); +}); diff --git a/src/infra/ssh-config.test.ts b/src/infra/ssh-config.test.ts index 318f2dab973..cd722f51203 100644 --- a/src/infra/ssh-config.test.ts +++ b/src/infra/ssh-config.test.ts @@ -58,6 +58,17 @@ describe("ssh-config", () => { expect(parsed.identityFiles).toEqual(["/tmp/id"]); }); + it("ignores invalid ports and blank lines in ssh -G output", () => { + const parsed = parseSshConfigOutput( + "user bob\nhostname example.com\nport not-a-number\nidentityfile none\nidentityfile \n", + ); + + expect(parsed.user).toBe("bob"); + expect(parsed.host).toBe("example.com"); + expect(parsed.port).toBeUndefined(); + expect(parsed.identityFiles).toEqual([]); + }); + it("resolves ssh config via ssh -G", async () => { const config = await resolveSshConfig({ user: "me", host: "alias", port: 22 }); expect(config?.user).toBe("steipete"); @@ -68,6 +79,16 @@ describe("ssh-config", () => { expect(args?.slice(-2)).toEqual(["--", "me@alias"]); }); + it("adds non-default port and trimmed identity arguments", async () => { + await resolveSshConfig( + { user: "me", host: "alias", port: 2022 }, + { identity: " /tmp/custom_id " }, + ); + + const args = spawnMock.mock.calls.at(-1)?.[1] as string[] | undefined; + expect(args).toEqual(["-G", "-p", "2022", "-i", "/tmp/custom_id", "--", "me@alias"]); + }); + it("returns null when ssh -G fails", async () => { spawnMock.mockImplementationOnce( (_command: string, _args: readonly string[], _options: SpawnOptions): ChildProcess => { @@ -82,4 +103,18 @@ describe("ssh-config", () => { const config = await resolveSshConfig({ user: "me", host: "bad-host", port: 22 }); expect(config).toBeNull(); }); + + it("returns null when the ssh process emits an error", async () => { + spawnMock.mockImplementationOnce( + (_command: string, _args: readonly string[], _options: SpawnOptions): ChildProcess => { + const { child } = createMockSpawnChild(); + process.nextTick(() => { + child.emit("error", new Error("spawn boom")); + }); + return child as unknown as ChildProcess; + }, + ); + + await expect(resolveSshConfig({ user: "me", host: "bad-host", port: 22 })).resolves.toBeNull(); + }); }); diff --git a/src/infra/ssh-tunnel.test.ts b/src/infra/ssh-tunnel.test.ts new file mode 100644 index 00000000000..da450d1c029 --- /dev/null +++ b/src/infra/ssh-tunnel.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from "vitest"; +import { parseSshTarget } from "./ssh-tunnel.js"; + +describe("parseSshTarget", () => { + it("parses user@host:port targets", () => { + expect(parseSshTarget("me@example.com:2222")).toEqual({ + user: "me", + host: "example.com", + port: 2222, + }); + }); + + it("strips an ssh prefix and keeps the default port when missing", () => { + expect(parseSshTarget(" ssh alice@example.com ")).toEqual({ + user: "alice", + host: "example.com", + port: 22, + }); + }); + + it("rejects invalid hosts and ports", () => { + expect(parseSshTarget("")).toBeNull(); + expect(parseSshTarget("me@example.com:0")).toBeNull(); + expect(parseSshTarget("me@example.com:not-a-port")).toBeNull(); + expect(parseSshTarget("-V")).toBeNull(); + expect(parseSshTarget("me@-badhost")).toBeNull(); + expect(parseSshTarget("-oProxyCommand=echo")).toBeNull(); + }); +}); diff --git a/src/infra/stable-node-path.test.ts b/src/infra/stable-node-path.test.ts new file mode 100644 index 00000000000..75121ba91b2 --- /dev/null +++ b/src/infra/stable-node-path.test.ts @@ -0,0 +1,40 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { resolveStableNodePath } from "./stable-node-path.js"; + +describe("resolveStableNodePath", () => { + it("returns non-cellar paths unchanged", async () => { + await expect(resolveStableNodePath("/usr/local/bin/node")).resolves.toBe("/usr/local/bin/node"); + }); + + it("prefers the Homebrew opt symlink for default and versioned formulas", async () => { + const prefix = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-stable-node-")); + const defaultNode = path.join(prefix, "Cellar", "node", "25.7.0", "bin", "node"); + const versionedNode = path.join(prefix, "Cellar", "node@22", "22.17.0", "bin", "node"); + const optDefault = path.join(prefix, "opt", "node", "bin", "node"); + const optVersioned = path.join(prefix, "opt", "node@22", "bin", "node"); + + await fs.mkdir(path.dirname(optDefault), { recursive: true }); + await fs.mkdir(path.dirname(optVersioned), { recursive: true }); + await fs.writeFile(optDefault, "", "utf8"); + await fs.writeFile(optVersioned, "", "utf8"); + + await expect(resolveStableNodePath(defaultNode)).resolves.toBe(optDefault); + await expect(resolveStableNodePath(versionedNode)).resolves.toBe(optVersioned); + }); + + it("falls back to the bin symlink for the default formula, otherwise original path", async () => { + const prefix = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-stable-node-")); + const defaultNode = path.join(prefix, "Cellar", "node", "25.7.0", "bin", "node"); + const versionedNode = path.join(prefix, "Cellar", "node@22", "22.17.0", "bin", "node"); + const binNode = path.join(prefix, "bin", "node"); + + await fs.mkdir(path.dirname(binNode), { recursive: true }); + await fs.writeFile(binNode, "", "utf8"); + + await expect(resolveStableNodePath(defaultNode)).resolves.toBe(binNode); + await expect(resolveStableNodePath(versionedNode)).resolves.toBe(versionedNode); + }); +}); diff --git a/src/infra/state-migrations.fs.test.ts b/src/infra/state-migrations.fs.test.ts new file mode 100644 index 00000000000..143572ca303 --- /dev/null +++ b/src/infra/state-migrations.fs.test.ts @@ -0,0 +1,71 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { + ensureDir, + existsDir, + fileExists, + isLegacyWhatsAppAuthFile, + readSessionStoreJson5, + safeReadDir, +} from "./state-migrations.fs.js"; + +describe("state migration fs helpers", () => { + it("reads directories safely and creates missing directories", () => { + const base = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-state-migrations-fs-")); + const nested = path.join(base, "nested"); + + expect(safeReadDir(nested)).toEqual([]); + ensureDir(nested); + fs.writeFileSync(path.join(nested, "file.txt"), "ok", "utf8"); + + expect(safeReadDir(nested).map((entry) => entry.name)).toEqual(["file.txt"]); + expect(existsDir(nested)).toBe(true); + expect(existsDir(path.join(nested, "file.txt"))).toBe(false); + }); + + it("distinguishes files from directories", () => { + const base = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-state-migrations-fs-")); + const filePath = path.join(base, "store.json"); + const dirPath = path.join(base, "dir"); + fs.writeFileSync(filePath, "{}", "utf8"); + fs.mkdirSync(dirPath); + + expect(fileExists(filePath)).toBe(true); + expect(fileExists(dirPath)).toBe(false); + expect(fileExists(path.join(base, "missing.json"))).toBe(false); + }); + + it("recognizes legacy whatsapp auth file names", () => { + expect(isLegacyWhatsAppAuthFile("creds.json")).toBe(true); + expect(isLegacyWhatsAppAuthFile("creds.json.bak")).toBe(true); + expect(isLegacyWhatsAppAuthFile("session-123.json")).toBe(true); + expect(isLegacyWhatsAppAuthFile("pre-key-1.json")).toBe(true); + expect(isLegacyWhatsAppAuthFile("sender-key-1.txt")).toBe(false); + expect(isLegacyWhatsAppAuthFile("other.json")).toBe(false); + }); + + it("parses json5 session stores and rejects invalid shapes", () => { + const base = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-state-migrations-fs-")); + const okPath = path.join(base, "store.json"); + const badPath = path.join(base, "bad.json"); + const listPath = path.join(base, "list.json"); + + fs.writeFileSync(okPath, "{session: {sessionId: 'abc', updatedAt: 1}}", "utf8"); + fs.writeFileSync(badPath, "{not valid", "utf8"); + fs.writeFileSync(listPath, "[]", "utf8"); + + expect(readSessionStoreJson5(okPath)).toEqual({ + ok: true, + store: { + session: { + sessionId: "abc", + updatedAt: 1, + }, + }, + }); + expect(readSessionStoreJson5(badPath)).toEqual({ ok: false, store: {} }); + expect(readSessionStoreJson5(listPath)).toEqual({ ok: false, store: {} }); + }); +}); diff --git a/src/infra/state-migrations.state-dir.test.ts b/src/infra/state-migrations.state-dir.test.ts index 8c46fe398e0..c270e30475f 100644 --- a/src/infra/state-migrations.state-dir.test.ts +++ b/src/infra/state-migrations.state-dir.test.ts @@ -49,4 +49,47 @@ describe("legacy state dir auto-migration", () => { expect(fs.readFileSync(path.join(root, ".moltbot", "marker.txt"), "utf-8")).toBe("ok"); expect(fs.readFileSync(path.join(root, ".clawdbot", "marker.txt"), "utf-8")).toBe("ok"); }); + + it("skips state-dir migration when OPENCLAW_STATE_DIR is explicitly set", async () => { + const root = await makeTempRoot(); + const legacyDir = path.join(root, ".clawdbot"); + fs.mkdirSync(legacyDir, { recursive: true }); + + const result = await autoMigrateLegacyStateDir({ + env: { OPENCLAW_STATE_DIR: path.join(root, "custom-state") } as NodeJS.ProcessEnv, + homedir: () => root, + }); + + expect(result).toEqual({ + migrated: false, + skipped: true, + changes: [], + warnings: [], + }); + expect(fs.existsSync(legacyDir)).toBe(true); + }); + + it("only runs once per process until reset", async () => { + const root = await makeTempRoot(); + const legacyDir = path.join(root, ".clawdbot"); + fs.mkdirSync(legacyDir, { recursive: true }); + fs.writeFileSync(path.join(legacyDir, "marker.txt"), "ok", "utf-8"); + + const first = await autoMigrateLegacyStateDir({ + env: {} as NodeJS.ProcessEnv, + homedir: () => root, + }); + const second = await autoMigrateLegacyStateDir({ + env: {} as NodeJS.ProcessEnv, + homedir: () => root, + }); + + expect(first.migrated).toBe(true); + expect(second).toEqual({ + migrated: false, + skipped: true, + changes: [], + warnings: [], + }); + }); }); diff --git a/src/infra/supervisor-markers.test.ts b/src/infra/supervisor-markers.test.ts new file mode 100644 index 00000000000..fb49ec6eaf5 --- /dev/null +++ b/src/infra/supervisor-markers.test.ts @@ -0,0 +1,67 @@ +import { describe, expect, it } from "vitest"; +import { detectRespawnSupervisor, SUPERVISOR_HINT_ENV_VARS } from "./supervisor-markers.js"; + +describe("SUPERVISOR_HINT_ENV_VARS", () => { + it("includes the cross-platform supervisor hint env vars", () => { + expect(SUPERVISOR_HINT_ENV_VARS).toEqual( + expect.arrayContaining([ + "LAUNCH_JOB_LABEL", + "INVOCATION_ID", + "OPENCLAW_WINDOWS_TASK_NAME", + "OPENCLAW_SERVICE_MARKER", + "OPENCLAW_SERVICE_KIND", + ]), + ); + }); +}); + +describe("detectRespawnSupervisor", () => { + it("detects launchd and systemd only from non-blank platform-specific hints", () => { + expect(detectRespawnSupervisor({ LAUNCH_JOB_LABEL: " ai.openclaw.gateway " }, "darwin")).toBe( + "launchd", + ); + expect(detectRespawnSupervisor({ LAUNCH_JOB_LABEL: " " }, "darwin")).toBeNull(); + + expect(detectRespawnSupervisor({ INVOCATION_ID: "abc123" }, "linux")).toBe("systemd"); + expect(detectRespawnSupervisor({ JOURNAL_STREAM: "" }, "linux")).toBeNull(); + }); + + it("detects scheduled-task supervision on Windows from either hint family", () => { + expect( + detectRespawnSupervisor({ OPENCLAW_WINDOWS_TASK_NAME: "OpenClaw Gateway" }, "win32"), + ).toBe("schtasks"); + expect( + detectRespawnSupervisor( + { + OPENCLAW_SERVICE_MARKER: "openclaw", + OPENCLAW_SERVICE_KIND: "gateway", + }, + "win32", + ), + ).toBe("schtasks"); + expect( + detectRespawnSupervisor( + { + OPENCLAW_SERVICE_MARKER: "openclaw", + OPENCLAW_SERVICE_KIND: "worker", + }, + "win32", + ), + ).toBeNull(); + }); + + it("ignores service markers on non-Windows platforms and unknown platforms", () => { + expect( + detectRespawnSupervisor( + { + OPENCLAW_SERVICE_MARKER: "openclaw", + OPENCLAW_SERVICE_KIND: "gateway", + }, + "linux", + ), + ).toBeNull(); + expect( + detectRespawnSupervisor({ LAUNCH_JOB_LABEL: "ai.openclaw.gateway" }, "freebsd"), + ).toBeNull(); + }); +}); diff --git a/src/infra/system-events.test.ts b/src/infra/system-events.test.ts index 0b92aa36568..cf16416e210 100644 --- a/src/infra/system-events.test.ts +++ b/src/infra/system-events.test.ts @@ -3,7 +3,15 @@ import { drainFormattedSystemEvents } from "../auto-reply/reply/session-updates. import type { OpenClawConfig } from "../config/config.js"; import { resolveMainSessionKey } from "../config/sessions.js"; import { isCronSystemEvent } from "./heartbeat-runner.js"; -import { enqueueSystemEvent, peekSystemEvents, resetSystemEventsForTest } from "./system-events.js"; +import { + drainSystemEventEntries, + enqueueSystemEvent, + hasSystemEvents, + isSystemEventContextChanged, + peekSystemEventEntries, + peekSystemEvents, + resetSystemEventsForTest, +} from "./system-events.js"; const cfg = {} as unknown as OpenClawConfig; const mainKey = resolveMainSessionKey(cfg); @@ -56,6 +64,50 @@ describe("system events (session routing)", () => { expect(second).toBe(false); }); + it("normalizes context keys when checking for context changes", () => { + const key = "agent:main:test-context"; + expect(isSystemEventContextChanged(key, " build:123 ")).toBe(true); + + enqueueSystemEvent("Node connected", { + sessionKey: key, + contextKey: " BUILD:123 ", + }); + + expect(isSystemEventContextChanged(key, "build:123")).toBe(false); + expect(isSystemEventContextChanged(key, "build:456")).toBe(true); + expect(isSystemEventContextChanged(key)).toBe(true); + }); + + it("returns cloned event entries and resets duplicate suppression after drain", () => { + const key = "agent:main:test-entry-clone"; + enqueueSystemEvent("Node connected", { + sessionKey: key, + contextKey: "build:123", + }); + + const peeked = peekSystemEventEntries(key); + expect(hasSystemEvents(key)).toBe(true); + expect(peeked).toHaveLength(1); + peeked[0].text = "mutated"; + expect(peekSystemEvents(key)).toEqual(["Node connected"]); + + expect(drainSystemEventEntries(key).map((entry) => entry.text)).toEqual(["Node connected"]); + expect(hasSystemEvents(key)).toBe(false); + + expect(enqueueSystemEvent("Node connected", { sessionKey: key })).toBe(true); + }); + + it("keeps only the newest 20 queued events", () => { + const key = "agent:main:test-max-events"; + for (let index = 1; index <= 22; index += 1) { + enqueueSystemEvent(`event ${index}`, { sessionKey: key }); + } + + expect(peekSystemEvents(key)).toEqual( + Array.from({ length: 20 }, (_, index) => `event ${index + 3}`), + ); + }); + it("filters heartbeat/noise lines, returning undefined", async () => { const key = "agent:main:test-heartbeat-filter"; enqueueSystemEvent("Read HEARTBEAT.md before continuing", { sessionKey: key }); diff --git a/src/infra/system-message.test.ts b/src/infra/system-message.test.ts index 5cb1d4be87f..980c852eeb4 100644 --- a/src/infra/system-message.test.ts +++ b/src/infra/system-message.test.ts @@ -13,6 +13,7 @@ describe("system-message", () => { it.each([ { input: `${SYSTEM_MARK} already prefixed`, expected: true }, { input: ` ${SYSTEM_MARK} hello`, expected: true }, + { input: SYSTEM_MARK, expected: true }, { input: "", expected: false }, { input: "hello", expected: false }, ])("detects marks for %j", ({ input, expected }) => { @@ -24,4 +25,8 @@ describe("system-message", () => { `${SYSTEM_MARK} already prefixed`, ); }); + + it("preserves mark-only messages after trimming", () => { + expect(prefixSystemMessage(` ${SYSTEM_MARK} `)).toBe(SYSTEM_MARK); + }); }); diff --git a/src/infra/system-presence.test.ts b/src/infra/system-presence.test.ts index 10929115605..02369a18355 100644 --- a/src/infra/system-presence.test.ts +++ b/src/infra/system-presence.test.ts @@ -61,6 +61,42 @@ describe("system-presence", () => { expect(entry?.scopes).toEqual(expect.arrayContaining(["operator.admin", "system.run"])); }); + it("parses node presence text and normalizes the update key", () => { + const update = updateSystemPresence({ + text: "Node: Relay-Host (10.0.0.9) · app 2.1.0 · last input 7s ago · mode ui · reason beacon", + instanceId: " Mixed-Case-Node ", + }); + + expect(update.key).toBe("mixed-case-node"); + expect(update.changedKeys).toEqual(["host", "ip", "version", "mode", "reason"]); + expect(update.next).toMatchObject({ + host: "Relay-Host", + ip: "10.0.0.9", + version: "2.1.0", + lastInputSeconds: 7, + mode: "ui", + reason: "beacon", + text: "Node: Relay-Host (10.0.0.9) · app 2.1.0 · last input 7s ago · mode ui · reason beacon", + }); + }); + + it("drops blank role and scope entries while keeping fallback text", () => { + const deviceId = randomUUID(); + + upsertPresence(deviceId, { + deviceId, + host: "relay-host", + mode: "operator", + roles: [" operator ", "", " "], + scopes: ["operator.admin", "", " "], + }); + + const entry = listSystemPresence().find((candidate) => candidate.deviceId === deviceId); + expect(entry?.roles).toEqual(["operator"]); + expect(entry?.scopes).toEqual(["operator.admin"]); + expect(entry?.text).toBe("Node: relay-host · mode operator"); + }); + it("prunes stale non-self entries after TTL", () => { vi.useFakeTimers(); vi.setSystemTime(Date.now()); diff --git a/src/infra/system-run-approval-binding.test.ts b/src/infra/system-run-approval-binding.test.ts new file mode 100644 index 00000000000..468956dba7b --- /dev/null +++ b/src/infra/system-run-approval-binding.test.ts @@ -0,0 +1,245 @@ +import { describe, expect, it } from "vitest"; +import { + buildSystemRunApprovalBinding, + buildSystemRunApprovalEnvBinding, + matchSystemRunApprovalBinding, + matchSystemRunApprovalEnvHash, + missingSystemRunApprovalBinding, + normalizeSystemRunApprovalPlan, +} from "./system-run-approval-binding.js"; + +describe("normalizeSystemRunApprovalPlan", () => { + it("accepts commandText and normalized mutable file operands", () => { + expect( + normalizeSystemRunApprovalPlan({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + commandPreview: "echo hi", + cwd: " /tmp ", + agentId: " main ", + sessionKey: " agent:main:main ", + mutableFileOperand: { + argvIndex: 2, + path: " /tmp/payload.txt ", + sha256: " abc123 ", + }, + }), + ).toEqual({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + commandPreview: "echo hi", + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + mutableFileOperand: { + argvIndex: 2, + path: "/tmp/payload.txt", + sha256: "abc123", + }, + }); + }); + + it("falls back to rawCommand and rejects invalid file operands", () => { + expect( + normalizeSystemRunApprovalPlan({ + argv: ["bash", "-lc", "echo hi"], + rawCommand: 'bash -lc "echo hi"', + }), + ).toEqual({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + commandPreview: null, + cwd: null, + agentId: null, + sessionKey: null, + mutableFileOperand: undefined, + }); + + expect( + normalizeSystemRunApprovalPlan({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + mutableFileOperand: { + argvIndex: -1, + path: "/tmp/payload.txt", + sha256: "abc123", + }, + }), + ).toBeNull(); + }); +}); + +describe("buildSystemRunApprovalEnvBinding", () => { + it("normalizes, filters, and sorts env keys before hashing", () => { + const normalized = buildSystemRunApprovalEnvBinding({ + z_key: "b", + " bad key ": "ignored", + alpha: "a", + EMPTY: 1, + }); + const reordered = buildSystemRunApprovalEnvBinding({ + alpha: "a", + z_key: "b", + }); + + expect(normalized).toEqual({ + envHash: reordered.envHash, + envKeys: ["alpha", "z_key"], + }); + expect(normalized.envHash).toBeTypeOf("string"); + expect(normalized.envHash).toHaveLength(64); + }); + + it("returns a null hash when no usable env entries remain", () => { + expect(buildSystemRunApprovalEnvBinding(null)).toEqual({ + envHash: null, + envKeys: [], + }); + expect( + buildSystemRunApprovalEnvBinding({ + bad: 1, + }), + ).toEqual({ + envHash: null, + envKeys: [], + }); + }); +}); + +describe("buildSystemRunApprovalBinding", () => { + it("normalizes argv and metadata into a binding", () => { + const envBinding = buildSystemRunApprovalEnvBinding({ + beta: "2", + alpha: "1", + }); + + expect( + buildSystemRunApprovalBinding({ + argv: ["bash", "-lc", 12], + cwd: " /tmp ", + agentId: " main ", + sessionKey: " agent:main:main ", + env: { + beta: "2", + alpha: "1", + }, + }), + ).toEqual({ + binding: { + argv: ["bash", "-lc", "12"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + envHash: envBinding.envHash, + }, + envKeys: ["alpha", "beta"], + }); + }); +}); + +describe("matchSystemRunApprovalEnvHash", () => { + it("handles matching, missing, and mismatched env bindings", () => { + expect( + matchSystemRunApprovalEnvHash({ + expectedEnvHash: null, + actualEnvHash: null, + actualEnvKeys: [], + }), + ).toEqual({ ok: true }); + + expect( + matchSystemRunApprovalEnvHash({ + expectedEnvHash: null, + actualEnvHash: "abc", + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ + ok: false, + code: "APPROVAL_ENV_BINDING_MISSING", + message: "approval id missing env binding for requested env overrides", + details: { envKeys: ["ALPHA"] }, + }); + + expect( + matchSystemRunApprovalEnvHash({ + expectedEnvHash: "abc", + actualEnvHash: "def", + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ + ok: false, + code: "APPROVAL_ENV_MISMATCH", + message: "approval id env binding mismatch", + details: { + envKeys: ["ALPHA"], + expectedEnvHash: "abc", + actualEnvHash: "def", + }, + }); + }); +}); + +describe("matchSystemRunApprovalBinding", () => { + const expected = { + argv: ["bash", "-lc", "echo hi"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + envHash: "abc", + }; + + it("accepts exact matches", () => { + expect( + matchSystemRunApprovalBinding({ + expected, + actual: { ...expected }, + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ ok: true }); + }); + + it.each([ + { + name: "argv mismatch", + actual: { ...expected, argv: ["bash", "-lc", "echo bye"] }, + }, + { + name: "cwd mismatch", + actual: { ...expected, cwd: "/var/tmp" }, + }, + { + name: "agent mismatch", + actual: { ...expected, agentId: "other" }, + }, + { + name: "session mismatch", + actual: { ...expected, sessionKey: "agent:main:other" }, + }, + ])("rejects $name", ({ actual }) => { + expect( + matchSystemRunApprovalBinding({ + expected, + actual, + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ + ok: false, + code: "APPROVAL_REQUEST_MISMATCH", + message: "approval id does not match request", + details: undefined, + }); + }); +}); + +describe("missingSystemRunApprovalBinding", () => { + it("reports env keys with request mismatches", () => { + expect(missingSystemRunApprovalBinding({ actualEnvKeys: ["ALPHA", "BETA"] })).toEqual({ + ok: false, + code: "APPROVAL_REQUEST_MISMATCH", + message: "approval id does not match request", + details: { + envKeys: ["ALPHA", "BETA"], + }, + }); + }); +}); diff --git a/src/infra/system-run-approval-context.test.ts b/src/infra/system-run-approval-context.test.ts index fbd0e805a22..1dc98eea200 100644 --- a/src/infra/system-run-approval-context.test.ts +++ b/src/infra/system-run-approval-context.test.ts @@ -1,5 +1,9 @@ import { describe, expect, test } from "vitest"; -import { resolveSystemRunApprovalRequestContext } from "./system-run-approval-context.js"; +import { + parsePreparedSystemRunPayload, + resolveSystemRunApprovalRequestContext, + resolveSystemRunApprovalRuntimeContext, +} from "./system-run-approval-context.js"; describe("resolveSystemRunApprovalRequestContext", () => { test("uses full approval text and separate preview for node system.run plans", () => { @@ -37,4 +41,127 @@ describe("resolveSystemRunApprovalRequestContext", () => { expect(context.commandText).toBe('./env sh -c "jq --version"'); expect(context.commandPreview).toBe("jq --version"); }); + + test("falls back to explicit request params for non-node hosts", () => { + const context = resolveSystemRunApprovalRequestContext({ + host: "gateway", + command: "jq --version", + commandArgv: ["jq", "--version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + systemRunPlan: { + argv: ["ignored"], + commandText: "ignored", + }, + }); + + expect(context.plan).toBeNull(); + expect(context.commandArgv).toEqual(["jq", "--version"]); + expect(context.commandText).toBe("jq --version"); + expect(context.commandPreview).toBeNull(); + expect(context.cwd).toBe("/tmp"); + expect(context.agentId).toBe("main"); + expect(context.sessionKey).toBe("agent:main:main"); + }); +}); + +describe("parsePreparedSystemRunPayload", () => { + test("parses legacy prepared payloads via top-level fallback command text", () => { + expect( + parsePreparedSystemRunPayload({ + plan: { + argv: ["bash", "-lc", "jq --version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + }, + commandText: 'bash -lc "jq --version"', + }), + ).toEqual({ + plan: { + argv: ["bash", "-lc", "jq --version"], + cwd: "/tmp", + commandText: 'bash -lc "jq --version"', + commandPreview: null, + agentId: "main", + sessionKey: "agent:main:main", + }, + }); + }); + + test("rejects legacy payloads missing argv or command text", () => { + expect(parsePreparedSystemRunPayload({ plan: { argv: [] }, commandText: "jq --version" })).toBe( + null, + ); + expect( + parsePreparedSystemRunPayload({ + plan: { argv: ["jq", "--version"] }, + }), + ).toBeNull(); + }); +}); + +describe("resolveSystemRunApprovalRuntimeContext", () => { + test("uses normalized plan runtime metadata when available", () => { + expect( + resolveSystemRunApprovalRuntimeContext({ + plan: { + argv: ["jq", "--version"], + cwd: "/tmp", + commandText: "jq --version", + commandPreview: "jq --version", + agentId: "main", + sessionKey: "agent:main:main", + }, + }), + ).toEqual({ + ok: true, + plan: { + argv: ["jq", "--version"], + cwd: "/tmp", + commandText: "jq --version", + commandPreview: "jq --version", + agentId: "main", + sessionKey: "agent:main:main", + }, + argv: ["jq", "--version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + commandText: "jq --version", + }); + }); + + test("falls back to command/rawCommand validation without a plan", () => { + expect( + resolveSystemRunApprovalRuntimeContext({ + command: ["bash", "-lc", "jq --version"], + rawCommand: 'bash -lc "jq --version"', + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + }), + ).toEqual({ + ok: true, + plan: null, + argv: ["bash", "-lc", "jq --version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + commandText: 'bash -lc "jq --version"', + }); + }); + + test("returns request validation errors from command fallback", () => { + expect( + resolveSystemRunApprovalRuntimeContext({ + rawCommand: "jq --version", + }), + ).toEqual({ + ok: false, + message: "rawCommand requires params.command", + details: { code: "MISSING_COMMAND" }, + }); + }); }); diff --git a/src/infra/system-run-normalize.test.ts b/src/infra/system-run-normalize.test.ts new file mode 100644 index 00000000000..6bf2f56d4e9 --- /dev/null +++ b/src/infra/system-run-normalize.test.ts @@ -0,0 +1,17 @@ +import { describe, expect, it } from "vitest"; +import { normalizeNonEmptyString, normalizeStringArray } from "./system-run-normalize.js"; + +describe("system run normalization helpers", () => { + it("normalizes only non-empty trimmed strings", () => { + expect(normalizeNonEmptyString(" hello ")).toBe("hello"); + expect(normalizeNonEmptyString(" \n\t ")).toBeNull(); + expect(normalizeNonEmptyString(42)).toBeNull(); + expect(normalizeNonEmptyString(null)).toBeNull(); + }); + + it("normalizes array entries and rejects non-arrays", () => { + expect(normalizeStringArray([" alpha ", 42, false])).toEqual([" alpha ", "42", "false"]); + expect(normalizeStringArray(undefined)).toEqual([]); + expect(normalizeStringArray("alpha")).toEqual([]); + }); +}); diff --git a/src/infra/tailnet.test.ts b/src/infra/tailnet.test.ts new file mode 100644 index 00000000000..eeb259cbeb4 --- /dev/null +++ b/src/infra/tailnet.test.ts @@ -0,0 +1,54 @@ +import os from "node:os"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + isTailnetIPv4, + listTailnetAddresses, + pickPrimaryTailnetIPv4, + pickPrimaryTailnetIPv6, +} from "./tailnet.js"; + +describe("tailnet helpers", () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("detects tailscale ipv4 ranges", () => { + expect(isTailnetIPv4("100.64.0.1")).toBe(true); + expect(isTailnetIPv4("100.127.255.254")).toBe(true); + expect(isTailnetIPv4("100.63.255.255")).toBe(false); + expect(isTailnetIPv4("192.168.1.10")).toBe(false); + }); + + it("lists unique non-internal tailnet addresses only", () => { + vi.spyOn(os, "networkInterfaces").mockReturnValue({ + lo0: [{ address: "127.0.0.1", family: "IPv4", internal: true, netmask: "" }], + en0: [ + { address: " 100.88.1.5 ", family: "IPv4", internal: false, netmask: "" }, + { address: "100.88.1.5", family: "IPv4", internal: false, netmask: "" }, + { address: "fd7a:115c:a1e0::1", family: "IPv6", internal: false, netmask: "" }, + { address: " ", family: "IPv6", internal: false, netmask: "" }, + { address: "fe80::1", family: "IPv6", internal: false, netmask: "" }, + ], + // oxlint-disable-next-line typescript/no-explicit-any + } as any); + + expect(listTailnetAddresses()).toEqual({ + ipv4: ["100.88.1.5"], + ipv6: ["fd7a:115c:a1e0::1"], + }); + }); + + it("picks the first available tailnet addresses", () => { + vi.spyOn(os, "networkInterfaces").mockReturnValue({ + utun1: [ + { address: "100.99.1.1", family: "IPv4", internal: false, netmask: "" }, + { address: "100.99.1.2", family: "IPv4", internal: false, netmask: "" }, + { address: "fd7a:115c:a1e0::9", family: "IPv6", internal: false, netmask: "" }, + ], + // oxlint-disable-next-line typescript/no-explicit-any + } as any); + + expect(pickPrimaryTailnetIPv4()).toBe("100.99.1.1"); + expect(pickPrimaryTailnetIPv6()).toBe("fd7a:115c:a1e0::9"); + }); +}); diff --git a/src/infra/update-channels.test.ts b/src/infra/update-channels.test.ts index c13476f356a..2738cc0ddad 100644 --- a/src/infra/update-channels.test.ts +++ b/src/infra/update-channels.test.ts @@ -7,6 +7,8 @@ import { normalizeUpdateChannel, resolveEffectiveUpdateChannel, resolveUpdateChannelDisplay, + type UpdateChannel, + type UpdateChannelSource, } from "./update-channels.js"; describe("update-channels tag detection", () => { @@ -32,9 +34,12 @@ describe("normalizeUpdateChannel", () => { { value: " nightly ", expected: null }, { value: null, expected: null }, { value: undefined, expected: null }, - ])("normalizes %j", ({ value, expected }) => { - expect(normalizeUpdateChannel(value)).toBe(expected); - }); + ] satisfies Array<{ value: string | null | undefined; expected: UpdateChannel | null }>)( + "normalizes %j", + ({ value, expected }) => { + expect(normalizeUpdateChannel(value)).toBe(expected); + }, + ); }); describe("channelToNpmTag", () => { @@ -42,9 +47,12 @@ describe("channelToNpmTag", () => { { channel: "stable", expected: "latest" }, { channel: "beta", expected: "beta" }, { channel: "dev", expected: "dev" }, - ])("maps $channel to $expected", ({ channel, expected }) => { - expect(channelToNpmTag(channel)).toBe(expected); - }); + ] satisfies Array<{ channel: UpdateChannel; expected: string }>)( + "maps $channel to $expected", + ({ channel, expected }) => { + expect(channelToNpmTag(channel)).toBe(expected); + }, + ); }); describe("resolveEffectiveUpdateChannel", () => { @@ -100,7 +108,11 @@ describe("resolveEffectiveUpdateChannel", () => { params: { installKind: "unknown" as const }, expected: { channel: "stable", source: "default" }, }, - ])("$name", ({ params, expected }) => { + ] satisfies Array<{ + name: string; + params: Parameters[0]; + expected: { channel: UpdateChannel; source: UpdateChannelSource }; + }>)("$name", ({ params, expected }) => { expect(resolveEffectiveUpdateChannel(params)).toEqual(expected); }); }); @@ -145,7 +157,11 @@ describe("formatUpdateChannelLabel", () => { params: { channel: "stable", source: "default" as const }, expected: "stable (default)", }, - ])("$name", ({ params, expected }) => { + ] satisfies Array<{ + name: string; + params: Parameters[0]; + expected: string; + }>)("$name", ({ params, expected }) => { expect(formatUpdateChannelLabel(params)).toBe(expected); }); }); diff --git a/src/infra/update-check.test.ts b/src/infra/update-check.test.ts index 560902aee83..610ca1957ec 100644 --- a/src/infra/update-check.test.ts +++ b/src/infra/update-check.test.ts @@ -1,5 +1,16 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import { compareSemverStrings, resolveNpmChannelTag } from "./update-check.js"; +import { + checkDepsStatus, + checkUpdateStatus, + compareSemverStrings, + fetchNpmLatestVersion, + fetchNpmTagVersion, + formatGitInstallLabel, + resolveNpmChannelTag, +} from "./update-check.js"; describe("compareSemverStrings", () => { it("handles stable and prerelease precedence for both legacy and beta formats", () => { @@ -72,4 +83,160 @@ describe("resolveNpmChannelTag", () => { expect(resolved).toEqual({ tag: "latest", version: "1.0.1" }); }); + + it("keeps non-beta channels unchanged", async () => { + versionByTag.latest = "1.0.3"; + + await expect(resolveNpmChannelTag({ channel: "stable", timeoutMs: 1000 })).resolves.toEqual({ + tag: "latest", + version: "1.0.3", + }); + }); + + it("exposes tag fetch helpers for success and http failures", async () => { + versionByTag.latest = "1.0.4"; + + await expect(fetchNpmTagVersion({ tag: "latest", timeoutMs: 1000 })).resolves.toEqual({ + tag: "latest", + version: "1.0.4", + }); + await expect(fetchNpmLatestVersion({ timeoutMs: 1000 })).resolves.toEqual({ + latestVersion: "1.0.4", + error: undefined, + }); + await expect(fetchNpmTagVersion({ tag: "beta", timeoutMs: 1000 })).resolves.toEqual({ + tag: "beta", + version: null, + error: "HTTP 404", + }); + }); +}); + +describe("formatGitInstallLabel", () => { + it("formats branch, detached tag, and non-git installs", () => { + expect( + formatGitInstallLabel({ + root: "/repo", + installKind: "git", + packageManager: "pnpm", + git: { + root: "/repo", + sha: "1234567890abcdef", + tag: null, + branch: "main", + upstream: "origin/main", + dirty: false, + ahead: 0, + behind: 0, + fetchOk: true, + }, + }), + ).toBe("main · @ 12345678"); + + expect( + formatGitInstallLabel({ + root: "/repo", + installKind: "git", + packageManager: "pnpm", + git: { + root: "/repo", + sha: "abcdef1234567890", + tag: "v1.2.3", + branch: "HEAD", + upstream: null, + dirty: false, + ahead: 0, + behind: 0, + fetchOk: null, + }, + }), + ).toBe("detached · tag v1.2.3 · @ abcdef12"); + + expect( + formatGitInstallLabel({ + root: null, + installKind: "package", + packageManager: "pnpm", + }), + ).toBeNull(); + }); +}); + +describe("checkDepsStatus", () => { + it("reports unknown, missing, stale, and ok states from lockfile markers", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-check-")); + + await expect(checkDepsStatus({ root: base, manager: "unknown" })).resolves.toEqual({ + manager: "unknown", + status: "unknown", + lockfilePath: null, + markerPath: null, + reason: "unknown package manager", + }); + + await fs.writeFile(path.join(base, "pnpm-lock.yaml"), "lock", "utf8"); + await expect(checkDepsStatus({ root: base, manager: "pnpm" })).resolves.toMatchObject({ + manager: "pnpm", + status: "missing", + reason: "node_modules marker missing", + }); + + const markerPath = path.join(base, "node_modules", ".modules.yaml"); + await fs.mkdir(path.dirname(markerPath), { recursive: true }); + await fs.writeFile(markerPath, "marker", "utf8"); + const staleDate = new Date(Date.now() - 10_000); + const freshDate = new Date(); + await fs.utimes(markerPath, staleDate, staleDate); + await fs.utimes(path.join(base, "pnpm-lock.yaml"), freshDate, freshDate); + + await expect(checkDepsStatus({ root: base, manager: "pnpm" })).resolves.toMatchObject({ + manager: "pnpm", + status: "stale", + reason: "lockfile newer than install marker", + }); + + const newerMarker = new Date(Date.now() + 2_000); + await fs.utimes(markerPath, newerMarker, newerMarker); + await expect(checkDepsStatus({ root: base, manager: "pnpm" })).resolves.toMatchObject({ + manager: "pnpm", + status: "ok", + }); + }); +}); + +describe("checkUpdateStatus", () => { + it("returns unknown install status when root is missing", async () => { + await expect( + checkUpdateStatus({ root: null, includeRegistry: false, timeoutMs: 1000 }), + ).resolves.toEqual({ + root: null, + installKind: "unknown", + packageManager: "unknown", + registry: undefined, + }); + }); + + it("detects package installs for non-git roots", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-check-")); + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ packageManager: "npm@10.0.0" }), + "utf8", + ); + await fs.writeFile(path.join(root, "package-lock.json"), "lock", "utf8"); + await fs.mkdir(path.join(root, "node_modules"), { recursive: true }); + + await expect( + checkUpdateStatus({ root, includeRegistry: false, fetchGit: false, timeoutMs: 1000 }), + ).resolves.toMatchObject({ + root, + installKind: "package", + packageManager: "npm", + git: undefined, + registry: undefined, + deps: { + manager: "npm", + }, + }); + }); }); diff --git a/src/infra/update-global.test.ts b/src/infra/update-global.test.ts new file mode 100644 index 00000000000..b95727febbf --- /dev/null +++ b/src/infra/update-global.test.ts @@ -0,0 +1,150 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import { captureEnv } from "../test-utils/env.js"; +import { + cleanupGlobalRenameDirs, + detectGlobalInstallManagerByPresence, + detectGlobalInstallManagerForRoot, + globalInstallArgs, + globalInstallFallbackArgs, + resolveGlobalPackageRoot, + resolveGlobalInstallSpec, + resolveGlobalRoot, + type CommandRunner, +} from "./update-global.js"; + +describe("update global helpers", () => { + let envSnapshot: ReturnType | undefined; + + afterEach(() => { + envSnapshot?.restore(); + envSnapshot = undefined; + }); + + it("prefers explicit package spec overrides", () => { + envSnapshot = captureEnv(["OPENCLAW_UPDATE_PACKAGE_SPEC"]); + process.env.OPENCLAW_UPDATE_PACKAGE_SPEC = "file:/tmp/openclaw.tgz"; + + expect(resolveGlobalInstallSpec({ packageName: "openclaw", tag: "latest" })).toBe( + "file:/tmp/openclaw.tgz", + ); + expect( + resolveGlobalInstallSpec({ + packageName: "openclaw", + tag: "beta", + env: { OPENCLAW_UPDATE_PACKAGE_SPEC: "openclaw@next" }, + }), + ).toBe("openclaw@next"); + }); + + it("resolves global roots and package roots from runner output", async () => { + const runCommand: CommandRunner = async (argv) => { + if (argv[0] === "npm") { + return { stdout: "/tmp/npm-root\n", stderr: "", code: 0 }; + } + if (argv[0] === "pnpm") { + return { stdout: "", stderr: "", code: 1 }; + } + throw new Error(`unexpected command: ${argv.join(" ")}`); + }; + + await expect(resolveGlobalRoot("npm", runCommand, 1000)).resolves.toBe("/tmp/npm-root"); + await expect(resolveGlobalRoot("pnpm", runCommand, 1000)).resolves.toBeNull(); + await expect(resolveGlobalRoot("bun", runCommand, 1000)).resolves.toContain( + path.join(".bun", "install", "global", "node_modules"), + ); + await expect(resolveGlobalPackageRoot("npm", runCommand, 1000)).resolves.toBe( + "/tmp/npm-root/openclaw", + ); + }); + + it("detects install managers from resolved roots and on-disk presence", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-global-")); + const npmRoot = path.join(base, "npm-root"); + const pnpmRoot = path.join(base, "pnpm-root"); + const bunRoot = path.join(base, ".bun", "install", "global", "node_modules"); + const pkgRoot = path.join(pnpmRoot, "openclaw"); + await fs.mkdir(pkgRoot, { recursive: true }); + await fs.mkdir(path.join(npmRoot, "openclaw"), { recursive: true }); + await fs.mkdir(path.join(bunRoot, "openclaw"), { recursive: true }); + + envSnapshot = captureEnv(["BUN_INSTALL"]); + process.env.BUN_INSTALL = path.join(base, ".bun"); + + const runCommand: CommandRunner = async (argv) => { + if (argv[0] === "npm") { + return { stdout: `${npmRoot}\n`, stderr: "", code: 0 }; + } + if (argv[0] === "pnpm") { + return { stdout: `${pnpmRoot}\n`, stderr: "", code: 0 }; + } + throw new Error(`unexpected command: ${argv.join(" ")}`); + }; + + await expect(detectGlobalInstallManagerForRoot(runCommand, pkgRoot, 1000)).resolves.toBe( + "pnpm", + ); + await expect(detectGlobalInstallManagerByPresence(runCommand, 1000)).resolves.toBe("npm"); + + await fs.rm(path.join(npmRoot, "openclaw"), { recursive: true, force: true }); + await fs.rm(path.join(pnpmRoot, "openclaw"), { recursive: true, force: true }); + await expect(detectGlobalInstallManagerByPresence(runCommand, 1000)).resolves.toBe("bun"); + }); + + it("builds install argv and npm fallback argv", () => { + expect(globalInstallArgs("npm", "openclaw@latest")).toEqual([ + "npm", + "i", + "-g", + "openclaw@latest", + "--no-fund", + "--no-audit", + "--loglevel=error", + ]); + expect(globalInstallArgs("pnpm", "openclaw@latest")).toEqual([ + "pnpm", + "add", + "-g", + "openclaw@latest", + ]); + expect(globalInstallArgs("bun", "openclaw@latest")).toEqual([ + "bun", + "add", + "-g", + "openclaw@latest", + ]); + + expect(globalInstallFallbackArgs("npm", "openclaw@latest")).toEqual([ + "npm", + "i", + "-g", + "openclaw@latest", + "--omit=optional", + "--no-fund", + "--no-audit", + "--loglevel=error", + ]); + expect(globalInstallFallbackArgs("pnpm", "openclaw@latest")).toBeNull(); + }); + + it("cleans only renamed package directories", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-cleanup-")); + await fs.mkdir(path.join(root, ".openclaw-123"), { recursive: true }); + await fs.mkdir(path.join(root, ".openclaw-456"), { recursive: true }); + await fs.writeFile(path.join(root, ".openclaw-file"), "nope", "utf8"); + await fs.mkdir(path.join(root, "openclaw"), { recursive: true }); + + await expect( + cleanupGlobalRenameDirs({ + globalRoot: root, + packageName: "openclaw", + }), + ).resolves.toEqual({ + removed: [".openclaw-123", ".openclaw-456"], + }); + await expect(fs.stat(path.join(root, "openclaw"))).resolves.toBeDefined(); + await expect(fs.stat(path.join(root, ".openclaw-file"))).resolves.toBeDefined(); + }); +}); diff --git a/src/infra/voicewake.test.ts b/src/infra/voicewake.test.ts new file mode 100644 index 00000000000..d719a496e81 --- /dev/null +++ b/src/infra/voicewake.test.ts @@ -0,0 +1,55 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-utils/temp-dir.js"; +import { + defaultVoiceWakeTriggers, + loadVoiceWakeConfig, + setVoiceWakeTriggers, +} from "./voicewake.js"; + +describe("voicewake config", () => { + it("returns defaults when missing", async () => { + await withTempDir("openclaw-voicewake-", async (baseDir) => { + await expect(loadVoiceWakeConfig(baseDir)).resolves.toEqual({ + triggers: defaultVoiceWakeTriggers(), + updatedAtMs: 0, + }); + }); + }); + + it("sanitizes and persists triggers", async () => { + await withTempDir("openclaw-voicewake-", async (baseDir) => { + const saved = await setVoiceWakeTriggers([" hi ", "", " there "], baseDir); + expect(saved.triggers).toEqual(["hi", "there"]); + expect(saved.updatedAtMs).toBeGreaterThan(0); + + await expect(loadVoiceWakeConfig(baseDir)).resolves.toEqual({ + triggers: ["hi", "there"], + updatedAtMs: saved.updatedAtMs, + }); + }); + }); + + it("falls back to defaults for empty or malformed persisted values", async () => { + await withTempDir("openclaw-voicewake-", async (baseDir) => { + const emptySaved = await setVoiceWakeTriggers(["", " "], baseDir); + expect(emptySaved.triggers).toEqual(defaultVoiceWakeTriggers()); + + await fs.mkdir(path.join(baseDir, "settings"), { recursive: true }); + await fs.writeFile( + path.join(baseDir, "settings", "voicewake.json"), + JSON.stringify({ + triggers: [" wake ", "", 42, null], + updatedAtMs: -1, + }), + "utf8", + ); + + await expect(loadVoiceWakeConfig(baseDir)).resolves.toEqual({ + triggers: ["wake"], + updatedAtMs: 0, + }); + }); + }); +}); diff --git a/src/infra/warning-filter.test.ts b/src/infra/warning-filter.test.ts index 9333d23da0c..1eb3b1372b5 100644 --- a/src/infra/warning-filter.test.ts +++ b/src/infra/warning-filter.test.ts @@ -12,6 +12,10 @@ function resetWarningFilterInstallState(): void { process.emitWarning = baseEmitWarning; } +async function flushWarnings(): Promise { + await new Promise((resolve) => setImmediate(resolve)); +} + describe("warning filter", () => { beforeEach(() => { resetWarningFilterInstallState(); @@ -23,36 +27,49 @@ describe("warning filter", () => { }); it("suppresses known deprecation and experimental warning signatures", () => { - expect( - shouldIgnoreWarning({ + const ignoredWarnings = [ + { name: "DeprecationWarning", code: "DEP0040", message: "The punycode module is deprecated.", - }), - ).toBe(true); - expect( - shouldIgnoreWarning({ + }, + { name: "DeprecationWarning", code: "DEP0060", message: "The `util._extend` API is deprecated.", - }), - ).toBe(true); - expect( - shouldIgnoreWarning({ + }, + { name: "ExperimentalWarning", message: "SQLite is an experimental feature and might change at any time", - }), - ).toBe(true); + }, + ]; + + for (const warning of ignoredWarnings) { + expect(shouldIgnoreWarning(warning)).toBe(true); + } }); it("keeps unknown warnings visible", () => { - expect( - shouldIgnoreWarning({ + const visibleWarnings = [ + { name: "DeprecationWarning", code: "DEP9999", message: "Totally new warning", - }), - ).toBe(false); + }, + { + name: "ExperimentalWarning", + message: "Different experimental warning", + }, + { + name: "DeprecationWarning", + code: "DEP0040", + message: "Different deprecated module", + }, + ]; + + for (const warning of visibleWarnings) { + expect(shouldIgnoreWarning(warning)).toBe(false); + } }); it("installs once and suppresses known warnings at emit time", async () => { @@ -82,11 +99,18 @@ describe("warning filter", () => { type: "DeprecationWarning", code: "DEP0060", }); - await new Promise((resolve) => setImmediate(resolve)); + emitWarning( + Object.assign(new Error("The punycode module is deprecated."), { + name: "DeprecationWarning", + code: "DEP0040", + }), + ); + await flushWarnings(); expect(seenWarnings.find((warning) => warning.code === "DEP0060")).toBeUndefined(); + expect(seenWarnings.find((warning) => warning.code === "DEP0040")).toBeUndefined(); emitWarning("Visible warning", { type: "Warning", code: "OPENCLAW_TEST_WARNING" }); - await new Promise((resolve) => setImmediate(resolve)); + await flushWarnings(); expect( seenWarnings.find((warning) => warning.code === "OPENCLAW_TEST_WARNING"), ).toBeDefined(); diff --git a/src/infra/widearea-dns.test.ts b/src/infra/widearea-dns.test.ts index 7c8a894858d..f2ab0c0f54f 100644 --- a/src/infra/widearea-dns.test.ts +++ b/src/infra/widearea-dns.test.ts @@ -1,10 +1,34 @@ -import { describe, expect, it } from "vitest"; +import fs from "node:fs"; +import path from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import * as utils from "../utils.js"; import { + getWideAreaZonePath, normalizeWideAreaDomain, renderWideAreaGatewayZoneText, resolveWideAreaDiscoveryDomain, + type WideAreaGatewayZoneOpts, + writeWideAreaGatewayZone, } from "./widearea-dns.js"; +const baseZoneOpts: WideAreaGatewayZoneOpts = { + domain: "openclaw.internal.", + gatewayPort: 18789, + displayName: "Mac Studio (OpenClaw)", + tailnetIPv4: "100.123.224.76", + hostLabel: "studio-london", + instanceLabel: "studio-london", +}; + +function makeZoneOpts(overrides: Partial = {}): WideAreaGatewayZoneOpts { + return { ...baseZoneOpts, ...overrides }; +} + +afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); +}); + describe("wide-area DNS discovery domain helpers", () => { it.each([ { value: "openclaw.internal", expected: "openclaw.internal." }, @@ -45,6 +69,12 @@ describe("wide-area DNS discovery domain helpers", () => { ])("$name", ({ params, expected }) => { expect(resolveWideAreaDiscoveryDomain(params)).toBe(expected); }); + + it("builds the default zone path from the normalized domain", () => { + expect(getWideAreaZonePath("openclaw.internal.")).toBe( + path.join(utils.CONFIG_DIR, "dns", "openclaw.internal.db"), + ); + }); }); describe("wide-area DNS-SD zone rendering", () => { @@ -113,3 +143,50 @@ describe("wide-area DNS-SD zone rendering", () => { expect(txt).toContain(`cliPath=/opt/homebrew/bin/openclaw`); }); }); + +describe("wide-area DNS zone writes", () => { + it("rejects blank domains", async () => { + await expect(writeWideAreaGatewayZone(makeZoneOpts({ domain: " " }))).rejects.toThrow( + "wide-area discovery domain is required", + ); + }); + + it("skips rewriting unchanged content", async () => { + vi.spyOn(utils, "ensureDir").mockResolvedValue(undefined); + const existing = renderWideAreaGatewayZoneText({ ...makeZoneOpts(), serial: 2026031301 }); + vi.spyOn(fs, "readFileSync").mockReturnValue(existing); + const writeSpy = vi.spyOn(fs, "writeFileSync").mockImplementation(() => undefined); + + const result = await writeWideAreaGatewayZone(makeZoneOpts()); + + expect(result).toEqual({ + zonePath: getWideAreaZonePath("openclaw.internal."), + changed: false, + }); + expect(writeSpy).not.toHaveBeenCalled(); + }); + + it("increments same-day serials when content changes", async () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-13T12:00:00.000Z")); + vi.spyOn(utils, "ensureDir").mockResolvedValue(undefined); + vi.spyOn(fs, "readFileSync").mockReturnValue( + renderWideAreaGatewayZoneText({ ...makeZoneOpts(), serial: 2026031304 }), + ); + const writeSpy = vi.spyOn(fs, "writeFileSync").mockImplementation(() => undefined); + + const result = await writeWideAreaGatewayZone( + makeZoneOpts({ gatewayTlsEnabled: true, gatewayTlsFingerprintSha256: "abc123" }), + ); + + expect(result).toEqual({ + zonePath: getWideAreaZonePath("openclaw.internal."), + changed: true, + }); + expect(writeSpy).toHaveBeenCalledWith( + getWideAreaZonePath("openclaw.internal."), + expect.stringContaining("@ IN SOA ns1 hostmaster 2026031305 7200 3600 1209600 60"), + "utf-8", + ); + }); +}); diff --git a/src/infra/ws.test.ts b/src/infra/ws.test.ts new file mode 100644 index 00000000000..be7a98c2133 --- /dev/null +++ b/src/infra/ws.test.ts @@ -0,0 +1,19 @@ +import { Buffer } from "node:buffer"; +import { describe, expect, it } from "vitest"; +import { rawDataToString } from "./ws.js"; + +describe("rawDataToString", () => { + it("returns string input unchanged", () => { + expect(rawDataToString("hello")).toBe("hello"); + }); + + it("decodes Buffer, Buffer[] and ArrayBuffer inputs", () => { + expect(rawDataToString(Buffer.from("hello"))).toBe("hello"); + expect(rawDataToString([Buffer.from("he"), Buffer.from("llo")])).toBe("hello"); + expect(rawDataToString(Uint8Array.from([104, 101, 108, 108, 111]).buffer)).toBe("hello"); + }); + + it("falls back to string coercion for other raw data shapes", () => { + expect(rawDataToString(Uint8Array.from([1, 2, 3]) as never)).toBe("1,2,3"); + }); +}); diff --git a/src/line/bot-handlers.test.ts b/src/line/bot-handlers.test.ts index a2d012a32bb..7b3638f072b 100644 --- a/src/line/bot-handlers.test.ts +++ b/src/line/bot-handlers.test.ts @@ -678,7 +678,7 @@ describe("handleLineWebhookEvents", () => { it("skips group messages by default when requireMention is not configured", async () => { const processMessage = vi.fn(); const event = createTestMessageEvent({ - message: { id: "m-default-skip", type: "text", text: "hi there" }, + message: { id: "m-default-skip", type: "text", text: "hi there", quoteToken: "q-default" }, source: { type: "group", groupId: "group-default", userId: "user-default" }, webhookEventId: "evt-default-skip", }); @@ -702,7 +702,7 @@ describe("handleLineWebhookEvents", () => { import("../auto-reply/reply/history.js").HistoryEntry[] >(); const event = createTestMessageEvent({ - message: { id: "m-hist-1", type: "text", text: "hello history" }, + message: { id: "m-hist-1", type: "text", text: "hello history", quoteToken: "q-hist-1" }, timestamp: 1700000000000, source: { type: "group", groupId: "group-hist-1", userId: "user-hist" }, webhookEventId: "evt-hist-1", @@ -730,7 +730,7 @@ describe("handleLineWebhookEvents", () => { it("skips group messages without mention when requireMention is set", async () => { const processMessage = vi.fn(); const event = createTestMessageEvent({ - message: { id: "m-mention-1", type: "text", text: "hi there" }, + message: { id: "m-mention-1", type: "text", text: "hi there", quoteToken: "q-mention-1" }, source: { type: "group", groupId: "group-mention", userId: "user-mention" }, webhookEventId: "evt-mention-1", }); @@ -808,7 +808,7 @@ describe("handleLineWebhookEvents", () => { it("does not apply requireMention gating to DM messages", async () => { const processMessage = vi.fn(); const event = createTestMessageEvent({ - message: { id: "m-mention-dm", type: "text", text: "hi" }, + message: { id: "m-mention-dm", type: "text", text: "hi", quoteToken: "q-mention-dm" }, source: { type: "user", userId: "user-dm" }, webhookEventId: "evt-mention-dm", }); @@ -830,7 +830,12 @@ describe("handleLineWebhookEvents", () => { const processMessage = vi.fn(); // Image message -- LINE only carries mention metadata on text messages. const event = createTestMessageEvent({ - message: { id: "m-mention-img", type: "image", contentProvider: { type: "line" } }, + message: { + id: "m-mention-img", + type: "image", + contentProvider: { type: "line" }, + quoteToken: "q-mention-img", + }, source: { type: "group", groupId: "group-1", userId: "user-img" }, webhookEventId: "evt-mention-img", }); diff --git a/src/logging/subsystem.test.ts b/src/logging/subsystem.test.ts index 06f504f47de..15c5dbb9754 100644 --- a/src/logging/subsystem.test.ts +++ b/src/logging/subsystem.test.ts @@ -4,6 +4,17 @@ import { resetLogger, setLoggerOverride } from "./logger.js"; import { loggingState } from "./state.js"; import { createSubsystemLogger } from "./subsystem.js"; +function installConsoleMethodSpy(method: "warn" | "error") { + const spy = vi.fn(); + loggingState.rawConsole = { + log: vi.fn(), + info: vi.fn(), + warn: method === "warn" ? spy : vi.fn(), + error: method === "error" ? spy : vi.fn(), + }; + return spy; +} + afterEach(() => { setConsoleSubsystemFilter(null); setLoggerOverride(null); @@ -58,13 +69,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("suppresses probe warnings for embedded subsystems based on structured run metadata", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("agent/embedded").child("failover"); log.warn("embedded run failover decision", { @@ -77,13 +82,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("does not suppress probe errors for embedded subsystems", () => { setLoggerOverride({ level: "silent", consoleLevel: "error" }); - const error = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn: vi.fn(), - error, - }; + const error = installConsoleMethodSpy("error"); const log = createSubsystemLogger("agent/embedded").child("failover"); log.error("embedded run failover decision", { @@ -96,13 +95,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("suppresses probe warnings for model-fallback child subsystems based on structured run metadata", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("model-fallback").child("decision"); log.warn("model fallback decision", { @@ -115,13 +108,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("does not suppress probe errors for model-fallback child subsystems", () => { setLoggerOverride({ level: "silent", consoleLevel: "error" }); - const error = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn: vi.fn(), - error, - }; + const error = installConsoleMethodSpy("error"); const log = createSubsystemLogger("model-fallback").child("decision"); log.error("model fallback decision", { @@ -134,13 +121,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("still emits non-probe warnings for embedded subsystems", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("agent/embedded").child("auth-profiles"); log.warn("auth profile failure state updated", { @@ -153,13 +134,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("still emits non-probe model-fallback child warnings", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("model-fallback").child("decision"); log.warn("model fallback decision", { diff --git a/src/media/fetch.telegram-network.test.ts b/src/media/fetch.telegram-network.test.ts index c9989867f0b..cb4cb1ab5b1 100644 --- a/src/media/fetch.telegram-network.test.ts +++ b/src/media/fetch.telegram-network.test.ts @@ -2,47 +2,35 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { resolveTelegramTransport } from "../telegram/fetch.js"; import { fetchRemoteMedia } from "./fetch.js"; -const undiciFetch = vi.hoisted(() => vi.fn()); -const AgentCtor = vi.hoisted(() => - vi.fn(function MockAgent( - this: { options?: Record }, - options?: Record, - ) { - this.options = options; - }), -); -const EnvHttpProxyAgentCtor = vi.hoisted(() => - vi.fn(function MockEnvHttpProxyAgent( - this: { options?: Record }, - options?: Record, - ) { - this.options = options; - }), -); -const ProxyAgentCtor = vi.hoisted(() => - vi.fn(function MockProxyAgent( - this: { options?: Record | string }, - options?: Record | string, - ) { - this.options = options; - }), -); +const undiciMocks = vi.hoisted(() => { + const createDispatcherCtor = | string>() => + vi.fn(function MockDispatcher(this: { options?: T }, options?: T) { + this.options = options; + }); + + return { + fetch: vi.fn(), + agentCtor: createDispatcherCtor>(), + envHttpProxyAgentCtor: createDispatcherCtor>(), + proxyAgentCtor: createDispatcherCtor | string>(), + }; +}); vi.mock("undici", () => ({ - Agent: AgentCtor, - EnvHttpProxyAgent: EnvHttpProxyAgentCtor, - ProxyAgent: ProxyAgentCtor, - fetch: undiciFetch, + Agent: undiciMocks.agentCtor, + EnvHttpProxyAgent: undiciMocks.envHttpProxyAgentCtor, + ProxyAgent: undiciMocks.proxyAgentCtor, + fetch: undiciMocks.fetch, })); describe("fetchRemoteMedia telegram network policy", () => { type LookupFn = NonNullable[0]["lookupFn"]>; afterEach(() => { - undiciFetch.mockReset(); - AgentCtor.mockClear(); - EnvHttpProxyAgentCtor.mockClear(); - ProxyAgentCtor.mockClear(); + undiciMocks.fetch.mockReset(); + undiciMocks.agentCtor.mockClear(); + undiciMocks.envHttpProxyAgentCtor.mockClear(); + undiciMocks.proxyAgentCtor.mockClear(); vi.unstubAllEnvs(); }); @@ -50,7 +38,7 @@ describe("fetchRemoteMedia telegram network policy", () => { const lookupFn = vi.fn(async () => [ { address: "149.154.167.220", family: 4 }, ]) as unknown as LookupFn; - undiciFetch.mockResolvedValueOnce( + undiciMocks.fetch.mockResolvedValueOnce( new Response(new Uint8Array([0xff, 0xd8, 0xff, 0x00]), { status: 200, headers: { "content-type": "image/jpeg" }, @@ -76,7 +64,7 @@ describe("fetchRemoteMedia telegram network policy", () => { }, }); - const init = undiciFetch.mock.calls[0]?.[1] as + const init = undiciMocks.fetch.mock.calls[0]?.[1] as | (RequestInit & { dispatcher?: { options?: { @@ -100,7 +88,7 @@ describe("fetchRemoteMedia telegram network policy", () => { const lookupFn = vi.fn(async () => [ { address: "149.154.167.220", family: 4 }, ]) as unknown as LookupFn; - undiciFetch.mockResolvedValueOnce( + undiciMocks.fetch.mockResolvedValueOnce( new Response(new Uint8Array([0x25, 0x50, 0x44, 0x46]), { status: 200, headers: { "content-type": "application/pdf" }, @@ -126,7 +114,7 @@ describe("fetchRemoteMedia telegram network policy", () => { }, }); - const init = undiciFetch.mock.calls[0]?.[1] as + const init = undiciMocks.fetch.mock.calls[0]?.[1] as | (RequestInit & { dispatcher?: { options?: { @@ -137,6 +125,6 @@ describe("fetchRemoteMedia telegram network policy", () => { | undefined; expect(init?.dispatcher?.options?.uri).toBe("http://127.0.0.1:7890"); - expect(ProxyAgentCtor).toHaveBeenCalled(); + expect(undiciMocks.proxyAgentCtor).toHaveBeenCalled(); }); }); diff --git a/src/memory/embeddings-voyage.test.ts b/src/memory/embeddings-voyage.test.ts index 2f4bedc87c3..28314017a6f 100644 --- a/src/memory/embeddings-voyage.test.ts +++ b/src/memory/embeddings-voyage.test.ts @@ -1,8 +1,8 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import * as authModule from "../agents/model-auth.js"; -import * as ssrf from "../infra/net/ssrf.js"; import { type FetchMock, withFetchPreconnect } from "../test-utils/fetch-mock.js"; import { createVoyageEmbeddingProvider, normalizeVoyageModel } from "./embeddings-voyage.js"; +import { mockPublicPinnedHostname } from "./test-helpers/ssrf.js"; vi.mock("../agents/model-auth.js", async () => { const { createModelAuthMockModule } = await import("../test-utils/model-auth-mock.js"); @@ -28,18 +28,6 @@ function mockVoyageApiKey() { }); } -function mockPublicPinnedHostname() { - return vi.spyOn(ssrf, "resolvePinnedHostnameWithPolicy").mockImplementation(async (hostname) => { - const normalized = hostname.trim().toLowerCase().replace(/\.$/, ""); - const addresses = ["93.184.216.34"]; - return { - hostname: normalized, - addresses, - lookup: ssrf.createPinnedLookup({ hostname: normalized, addresses }), - }; - }); -} - async function createDefaultVoyageProvider( model: string, fetchMock: ReturnType, diff --git a/src/memory/embeddings.test.ts b/src/memory/embeddings.test.ts index 206eb53326f..6f489ecc0c1 100644 --- a/src/memory/embeddings.test.ts +++ b/src/memory/embeddings.test.ts @@ -1,8 +1,8 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import * as authModule from "../agents/model-auth.js"; -import * as ssrf from "../infra/net/ssrf.js"; import { DEFAULT_GEMINI_EMBEDDING_MODEL } from "./embeddings-gemini.js"; import { createEmbeddingProvider, DEFAULT_LOCAL_MODEL } from "./embeddings.js"; +import { mockPublicPinnedHostname } from "./test-helpers/ssrf.js"; vi.mock("../agents/model-auth.js", async () => { const { createModelAuthMockModule } = await import("../test-utils/model-auth-mock.js"); @@ -33,18 +33,6 @@ function readFirstFetchRequest(fetchMock: { mock: { calls: unknown[][] } }) { return { url, init: init as RequestInit | undefined }; } -function mockPublicPinnedHostname() { - return vi.spyOn(ssrf, "resolvePinnedHostnameWithPolicy").mockImplementation(async (hostname) => { - const normalized = hostname.trim().toLowerCase().replace(/\.$/, ""); - const addresses = ["93.184.216.34"]; - return { - hostname: normalized, - addresses, - lookup: ssrf.createPinnedLookup({ hostname: normalized, addresses }), - }; - }); -} - afterEach(() => { vi.resetAllMocks(); vi.unstubAllGlobals(); diff --git a/src/memory/manager-embedding-ops.ts b/src/memory/manager-embedding-ops.ts index 49171d809cb..fe9b27acd32 100644 --- a/src/memory/manager-embedding-ops.ts +++ b/src/memory/manager-embedding-ops.ts @@ -548,12 +548,7 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { if (!this.isRetryableEmbeddingError(message) || attempt >= EMBEDDING_RETRY_MAX_ATTEMPTS) { throw err; } - const waitMs = Math.min( - EMBEDDING_RETRY_MAX_DELAY_MS, - Math.round(delayMs * (1 + Math.random() * 0.2)), - ); - log.warn(`memory embeddings rate limited; retrying in ${waitMs}ms`); - await new Promise((resolve) => setTimeout(resolve, waitMs)); + await this.waitForEmbeddingRetry(delayMs, "retrying"); delayMs *= 2; attempt += 1; } @@ -587,18 +582,22 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { if (!this.isRetryableEmbeddingError(message) || attempt >= EMBEDDING_RETRY_MAX_ATTEMPTS) { throw err; } - const waitMs = Math.min( - EMBEDDING_RETRY_MAX_DELAY_MS, - Math.round(delayMs * (1 + Math.random() * 0.2)), - ); - log.warn(`memory embeddings rate limited; retrying structured batch in ${waitMs}ms`); - await new Promise((resolve) => setTimeout(resolve, waitMs)); + await this.waitForEmbeddingRetry(delayMs, "retrying structured batch"); delayMs *= 2; attempt += 1; } } } + private async waitForEmbeddingRetry(delayMs: number, action: string): Promise { + const waitMs = Math.min( + EMBEDDING_RETRY_MAX_DELAY_MS, + Math.round(delayMs * (1 + Math.random() * 0.2)), + ); + log.warn(`memory embeddings rate limited; ${action} in ${waitMs}ms`); + await new Promise((resolve) => setTimeout(resolve, waitMs)); + } + private isRetryableEmbeddingError(message: string): boolean { return /(rate[_ ]limit|too many requests|429|resource has been exhausted|5\d\d|cloudflare|tokens per day)/i.test( message, diff --git a/src/memory/manager.embedding-batches.test.ts b/src/memory/manager.embedding-batches.test.ts index 1d81744f280..e2af1ed97f2 100644 --- a/src/memory/manager.embedding-batches.test.ts +++ b/src/memory/manager.embedding-batches.test.ts @@ -28,6 +28,17 @@ const fx = installEmbeddingManagerFixture({ const { embedBatch } = fx; describe("memory embedding batches", () => { + async function expectSyncWithFastTimeouts(manager: { + sync: (params: { reason: string }) => Promise; + }) { + const restoreFastTimeouts = useFastShortTimeouts(); + try { + await manager.sync({ reason: "test" }); + } finally { + restoreFastTimeouts(); + } + } + it("splits large files across multiple embedding batches", async () => { const memoryDir = fx.getMemoryDir(); const managerLarge = fx.getManagerLarge(); @@ -93,12 +104,7 @@ describe("memory embedding batches", () => { return texts.map(() => [0, 1, 0]); }); - const restoreFastTimeouts = useFastShortTimeouts(); - try { - await managerSmall.sync({ reason: "test" }); - } finally { - restoreFastTimeouts(); - } + await expectSyncWithFastTimeouts(managerSmall); expect(calls).toBe(3); }, 10000); @@ -119,12 +125,7 @@ describe("memory embedding batches", () => { return texts.map(() => [0, 1, 0]); }); - const restoreFastTimeouts = useFastShortTimeouts(); - try { - await managerSmall.sync({ reason: "test" }); - } finally { - restoreFastTimeouts(); - } + await expectSyncWithFastTimeouts(managerSmall); expect(calls).toBe(2); }, 10000); diff --git a/src/memory/manager.get-concurrency.test.ts b/src/memory/manager.get-concurrency.test.ts index 67b10768fc3..515a9d8226d 100644 --- a/src/memory/manager.get-concurrency.test.ts +++ b/src/memory/manager.get-concurrency.test.ts @@ -49,9 +49,8 @@ describe("memory manager cache hydration", () => { await fs.rm(workspaceDir, { recursive: true, force: true }); }); - it("deduplicates concurrent manager creation for the same cache key", async () => { - const indexPath = path.join(workspaceDir, "index.sqlite"); - const cfg = { + function createMemoryConcurrencyConfig(indexPath: string): OpenClawConfig { + return { agents: { defaults: { workspace: workspaceDir, @@ -65,6 +64,11 @@ describe("memory manager cache hydration", () => { list: [{ id: "main", default: true }], }, } as OpenClawConfig; + } + + it("deduplicates concurrent manager creation for the same cache key", async () => { + const indexPath = path.join(workspaceDir, "index.sqlite"); + const cfg = createMemoryConcurrencyConfig(indexPath); const results = await Promise.all( Array.from( @@ -85,20 +89,7 @@ describe("memory manager cache hydration", () => { it("drains in-flight manager creation during global teardown", async () => { const indexPath = path.join(workspaceDir, "index.sqlite"); - const cfg = { - agents: { - defaults: { - workspace: workspaceDir, - memorySearch: { - provider: "openai", - model: "mock-embed", - store: { path: indexPath, vector: { enabled: false } }, - sync: { watch: false, onSessionStart: false, onSearch: false }, - }, - }, - list: [{ id: "main", default: true }], - }, - } as OpenClawConfig; + const cfg = createMemoryConcurrencyConfig(indexPath); hoisted.providerDelayMs = 100; diff --git a/src/memory/manager.watcher-config.test.ts b/src/memory/manager.watcher-config.test.ts index 43682183676..9bb55a8a0bc 100644 --- a/src/memory/manager.watcher-config.test.ts +++ b/src/memory/manager.watcher-config.test.ts @@ -51,14 +51,18 @@ describe("memory watcher config", () => { } }); - it("watches markdown globs and ignores dependency directories", async () => { + async function setupWatcherWorkspace(seedFile: { name: string; contents: string }) { workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-watch-")); extraDir = path.join(workspaceDir, "extra"); await fs.mkdir(path.join(workspaceDir, "memory"), { recursive: true }); await fs.mkdir(extraDir, { recursive: true }); - await fs.writeFile(path.join(extraDir, "notes.md"), "hello"); + await fs.writeFile(path.join(extraDir, seedFile.name), seedFile.contents); + } - const cfg = { + function createWatcherConfig( + overrides?: Partial["defaults"]["memorySearch"]>, + ): OpenClawConfig { + return { agents: { defaults: { workspace: workspaceDir, @@ -69,18 +73,28 @@ describe("memory watcher config", () => { sync: { watch: true, watchDebounceMs: 25, onSessionStart: false, onSearch: false }, query: { minScore: 0, hybrid: { enabled: false } }, extraPaths: [extraDir], + ...overrides, }, }, list: [{ id: "main", default: true }], }, } as OpenClawConfig; + } + async function expectWatcherManager(cfg: OpenClawConfig) { const result = await getMemorySearchManager({ cfg, agentId: "main" }); expect(result.manager).not.toBeNull(); if (!result.manager) { throw new Error("manager missing"); } manager = result.manager as unknown as MemoryIndexManager; + } + + it("watches markdown globs and ignores dependency directories", async () => { + await setupWatcherWorkspace({ name: "notes.md", contents: "hello" }); + const cfg = createWatcherConfig(); + + await expectWatcherManager(cfg); expect(watchMock).toHaveBeenCalledTimes(1); const [watchedPaths, options] = watchMock.mock.calls[0] as unknown as [ @@ -108,37 +122,15 @@ describe("memory watcher config", () => { }); it("watches multimodal extensions with case-insensitive globs", async () => { - workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-watch-")); - extraDir = path.join(workspaceDir, "extra"); - await fs.mkdir(path.join(workspaceDir, "memory"), { recursive: true }); - await fs.mkdir(extraDir, { recursive: true }); - await fs.writeFile(path.join(extraDir, "PHOTO.PNG"), "png"); + await setupWatcherWorkspace({ name: "PHOTO.PNG", contents: "png" }); + const cfg = createWatcherConfig({ + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + multimodal: { enabled: true, modalities: ["image", "audio"] }, + }); - const cfg = { - agents: { - defaults: { - workspace: workspaceDir, - memorySearch: { - provider: "gemini", - model: "gemini-embedding-2-preview", - fallback: "none", - store: { path: path.join(workspaceDir, "index.sqlite"), vector: { enabled: false } }, - sync: { watch: true, watchDebounceMs: 25, onSessionStart: false, onSearch: false }, - query: { minScore: 0, hybrid: { enabled: false } }, - extraPaths: [extraDir], - multimodal: { enabled: true, modalities: ["image", "audio"] }, - }, - }, - list: [{ id: "main", default: true }], - }, - } as OpenClawConfig; - - const result = await getMemorySearchManager({ cfg, agentId: "main" }); - expect(result.manager).not.toBeNull(); - if (!result.manager) { - throw new Error("manager missing"); - } - manager = result.manager as unknown as MemoryIndexManager; + await expectWatcherManager(cfg); expect(watchMock).toHaveBeenCalledTimes(1); const [watchedPaths] = watchMock.mock.calls[0] as unknown as [ diff --git a/src/memory/test-helpers/ssrf.ts b/src/memory/test-helpers/ssrf.ts new file mode 100644 index 00000000000..c90ef0c4502 --- /dev/null +++ b/src/memory/test-helpers/ssrf.ts @@ -0,0 +1,14 @@ +import { vi } from "vitest"; +import * as ssrf from "../../infra/net/ssrf.js"; + +export function mockPublicPinnedHostname() { + return vi.spyOn(ssrf, "resolvePinnedHostnameWithPolicy").mockImplementation(async (hostname) => { + const normalized = hostname.trim().toLowerCase().replace(/\.$/, ""); + const addresses = ["93.184.216.34"]; + return { + hostname: normalized, + addresses, + lookup: ssrf.createPinnedLookup({ hostname: normalized, addresses }), + }; + }); +} diff --git a/src/node-host/invoke-system-run-plan.test.ts b/src/node-host/invoke-system-run-plan.test.ts index 442d2cad96b..8a957335422 100644 --- a/src/node-host/invoke-system-run-plan.test.ts +++ b/src/node-host/invoke-system-run-plan.test.ts @@ -68,20 +68,36 @@ function createScriptOperandFixture(tmp: string, fixture?: RuntimeFixture): Scri }; } -function withFakeRuntimeBin(params: { binName: string; run: () => T }): T { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), `openclaw-${params.binName}-bin-`)); - const binDir = path.join(tmp, "bin"); - fs.mkdirSync(binDir, { recursive: true }); +function writeFakeRuntimeBin(binDir: string, binName: string) { const runtimePath = - process.platform === "win32" - ? path.join(binDir, `${params.binName}.cmd`) - : path.join(binDir, params.binName); + process.platform === "win32" ? path.join(binDir, `${binName}.cmd`) : path.join(binDir, binName); const runtimeBody = process.platform === "win32" ? "@echo off\r\nexit /b 0\r\n" : "#!/bin/sh\nexit 0\n"; fs.writeFileSync(runtimePath, runtimeBody, { mode: 0o755 }); if (process.platform !== "win32") { fs.chmodSync(runtimePath, 0o755); } +} + +function withFakeRuntimeBin(params: { binName: string; run: () => T }): T { + return withFakeRuntimeBins({ + binNames: [params.binName], + tmpPrefix: `openclaw-${params.binName}-bin-`, + run: params.run, + }); +} + +function withFakeRuntimeBins(params: { + binNames: string[]; + tmpPrefix?: string; + run: () => T; +}): T { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), params.tmpPrefix ?? "openclaw-runtime-bins-")); + const binDir = path.join(tmp, "bin"); + fs.mkdirSync(binDir, { recursive: true }); + for (const binName of params.binNames) { + writeFakeRuntimeBin(binDir, binName); + } const oldPath = process.env.PATH; process.env.PATH = `${binDir}${path.delimiter}${oldPath ?? ""}`; try { @@ -96,32 +112,44 @@ function withFakeRuntimeBin(params: { binName: string; run: () => T }): T { } } -function withFakeRuntimeBins(params: { binNames: string[]; run: () => T }): T { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-runtime-bins-")); - const binDir = path.join(tmp, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - for (const binName of params.binNames) { - const runtimePath = - process.platform === "win32" - ? path.join(binDir, `${binName}.cmd`) - : path.join(binDir, binName); - const runtimeBody = - process.platform === "win32" ? "@echo off\r\nexit /b 0\r\n" : "#!/bin/sh\nexit 0\n"; - fs.writeFileSync(runtimePath, runtimeBody, { mode: 0o755 }); - if (process.platform !== "win32") { - fs.chmodSync(runtimePath, 0o755); - } +function expectMutableFileOperandApprovalPlan(fixture: ScriptOperandFixture, cwd: string) { + const prepared = buildSystemRunApprovalPlan({ + command: fixture.command, + cwd, + }); + expect(prepared.ok).toBe(true); + if (!prepared.ok) { + throw new Error("unreachable"); } - const oldPath = process.env.PATH; - process.env.PATH = `${binDir}${path.delimiter}${oldPath ?? ""}`; + expect(prepared.plan.mutableFileOperand).toEqual({ + argvIndex: fixture.expectedArgvIndex, + path: fs.realpathSync(fixture.scriptPath), + sha256: expect.any(String), + }); +} + +function writeScriptOperandFixture(fixture: ScriptOperandFixture) { + fs.writeFileSync(fixture.scriptPath, fixture.initialBody); + if (process.platform !== "win32") { + fs.chmodSync(fixture.scriptPath, 0o755); + } +} + +function withScriptOperandPlanFixture( + params: { + tmpPrefix: string; + fixture?: RuntimeFixture; + afterWrite?: (fixture: ScriptOperandFixture, tmp: string) => void; + }, + run: (fixture: ScriptOperandFixture, tmp: string) => T, +) { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), params.tmpPrefix)); + const fixture = createScriptOperandFixture(tmp, params.fixture); + writeScriptOperandFixture(fixture); + params.afterWrite?.(fixture, tmp); try { - return params.run(); + return run(fixture, tmp); } finally { - if (oldPath === undefined) { - delete process.env.PATH; - } else { - process.env.PATH = oldPath; - } fs.rmSync(tmp, { recursive: true, force: true }); } } @@ -432,61 +460,37 @@ describe("hardenApprovedExecutionPaths", () => { withFakeRuntimeBins({ binNames, run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-approval-script-plan-")); - const fixture = createScriptOperandFixture(tmp, runtimeCase); - fs.writeFileSync(fixture.scriptPath, fixture.initialBody); - const executablePath = fixture.command[0]; - if (executablePath?.endsWith("pnpm.js")) { - const shimPath = path.join(tmp, "pnpm.js"); - fs.writeFileSync(shimPath, "#!/usr/bin/env node\nconsole.log('shim')\n"); - fs.chmodSync(shimPath, 0o755); - } - try { - const prepared = buildSystemRunApprovalPlan({ - command: fixture.command, - cwd: tmp, - }); - expect(prepared.ok).toBe(true); - if (!prepared.ok) { - throw new Error("unreachable"); - } - expect(prepared.plan.mutableFileOperand).toEqual({ - argvIndex: fixture.expectedArgvIndex, - path: fs.realpathSync(fixture.scriptPath), - sha256: expect.any(String), - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } + withScriptOperandPlanFixture( + { + tmpPrefix: "openclaw-approval-script-plan-", + fixture: runtimeCase, + afterWrite: (fixture, tmp) => { + const executablePath = fixture.command[0]; + if (executablePath?.endsWith("pnpm.js")) { + const shimPath = path.join(tmp, "pnpm.js"); + fs.writeFileSync(shimPath, "#!/usr/bin/env node\nconsole.log('shim')\n"); + fs.chmodSync(shimPath, 0o755); + } + }, + }, + (fixture, tmp) => { + expectMutableFileOperandApprovalPlan(fixture, tmp); + }, + ); }, }); }); } it("captures mutable shell script operands in approval plans", () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-approval-script-plan-")); - const fixture = createScriptOperandFixture(tmp); - fs.writeFileSync(fixture.scriptPath, fixture.initialBody); - if (process.platform !== "win32") { - fs.chmodSync(fixture.scriptPath, 0o755); - } - try { - const prepared = buildSystemRunApprovalPlan({ - command: fixture.command, - cwd: tmp, - }); - expect(prepared.ok).toBe(true); - if (!prepared.ok) { - throw new Error("unreachable"); - } - expect(prepared.plan.mutableFileOperand).toEqual({ - argvIndex: fixture.expectedArgvIndex, - path: fs.realpathSync(fixture.scriptPath), - sha256: expect.any(String), - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } + withScriptOperandPlanFixture( + { + tmpPrefix: "openclaw-approval-script-plan-", + }, + (fixture, tmp) => { + expectMutableFileOperandApprovalPlan(fixture, tmp); + }, + ); }); it("rejects bun package script names that do not bind a concrete file", () => { diff --git a/src/plugins/discovery.test.ts b/src/plugins/discovery.test.ts index 400094c1fef..3b10146d28f 100644 --- a/src/plugins/discovery.test.ts +++ b/src/plugins/discovery.test.ts @@ -1,32 +1,22 @@ -import { randomUUID } from "node:crypto"; import fs from "node:fs"; -import os from "node:os"; import path from "node:path"; import { afterAll, afterEach, describe, expect, it } from "vitest"; import { clearPluginDiscoveryCache, discoverOpenClawPlugins } from "./discovery.js"; +import { + cleanupTrackedTempDirs, + makeTrackedTempDir, + mkdirSafeDir, +} from "./test-helpers/fs-fixtures.js"; const tempDirs: string[] = []; const previousUmask = process.umask(0o022); -function chmodSafeDir(dir: string) { - if (process.platform === "win32") { - return; - } - fs.chmodSync(dir, 0o755); -} - -function mkdirSafe(dir: string) { - fs.mkdirSync(dir, { recursive: true }); - chmodSafeDir(dir); -} - function makeTempDir() { - const dir = path.join(os.tmpdir(), `openclaw-plugins-${randomUUID()}`); - mkdirSafe(dir); - tempDirs.push(dir); - return dir; + return makeTrackedTempDir("openclaw-plugins", tempDirs); } +const mkdirSafe = mkdirSafeDir; + function buildDiscoveryEnv(stateDir: string): NodeJS.ProcessEnv { return { OPENCLAW_STATE_DIR: stateDir, @@ -66,13 +56,7 @@ function expectEscapesPackageDiagnostic(diagnostics: Array<{ message: string }>) afterEach(() => { clearPluginDiscoveryCache(); - for (const dir of tempDirs.splice(0)) { - try { - fs.rmSync(dir, { recursive: true, force: true }); - } catch { - // ignore cleanup failures - } - } + cleanupTrackedTempDirs(tempDirs); }); afterAll(() => { diff --git a/src/plugins/loader.test.ts b/src/plugins/loader.test.ts index d2ecfab18be..20d3fb22287 100644 --- a/src/plugins/loader.test.ts +++ b/src/plugins/loader.test.ts @@ -1488,7 +1488,7 @@ describe("loadOpenClawPlugins", () => { load: { paths: [plugin.file] }, }, }, - } as const; + }; loadOpenClawPlugins(options); loadOpenClawPlugins(options); diff --git a/src/plugins/manifest-registry.test.ts b/src/plugins/manifest-registry.test.ts index bbf65d14e41..a948344cba8 100644 --- a/src/plugins/manifest-registry.test.ts +++ b/src/plugins/manifest-registry.test.ts @@ -1,6 +1,4 @@ -import { randomUUID } from "node:crypto"; import fs from "node:fs"; -import os from "node:os"; import path from "node:path"; import { afterAll, afterEach, describe, expect, it } from "vitest"; import type { PluginCandidate } from "./discovery.js"; @@ -8,6 +6,7 @@ import { clearPluginManifestRegistryCache, loadPluginManifestRegistry, } from "./manifest-registry.js"; +import { cleanupTrackedTempDirs, makeTrackedTempDir } from "./test-helpers/fs-fixtures.js"; const tempDirs: string[] = []; const previousUmask = process.umask(0o022); @@ -25,10 +24,7 @@ function mkdirSafe(dir: string) { } function makeTempDir() { - const dir = path.join(os.tmpdir(), `openclaw-manifest-registry-${randomUUID()}`); - mkdirSafe(dir); - tempDirs.push(dir); - return dir; + return makeTrackedTempDir("openclaw-manifest-registry", tempDirs); } function writeManifest(dir: string, manifest: Record) { @@ -133,17 +129,7 @@ function expectUnsafeWorkspaceManifestRejected(params: { afterEach(() => { clearPluginManifestRegistryCache(); - while (tempDirs.length > 0) { - const dir = tempDirs.pop(); - if (!dir) { - break; - } - try { - fs.rmSync(dir, { recursive: true, force: true }); - } catch { - // ignore cleanup failures - } - } + cleanupTrackedTempDirs(tempDirs); }); afterAll(() => { diff --git a/src/plugins/source-display.test.ts b/src/plugins/source-display.test.ts index 3c85cca88b7..6d1b3da7719 100644 --- a/src/plugins/source-display.test.ts +++ b/src/plugins/source-display.test.ts @@ -3,83 +3,60 @@ import { describe, expect, it } from "vitest"; import { withEnv } from "../test-utils/env.js"; import { formatPluginSourceForTable, resolvePluginSourceRoots } from "./source-display.js"; +function createPluginSourceRoots() { + const stockRoot = path.resolve( + path.sep, + "opt", + "homebrew", + "lib", + "node_modules", + "openclaw", + "extensions", + ); + const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); + const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + return { + stock: stockRoot, + global: globalRoot, + workspace: workspaceRoot, + }; +} + describe("formatPluginSourceForTable", () => { it("shortens bundled plugin sources under the stock root", () => { - const stockRoot = path.resolve( - path.sep, - "opt", - "homebrew", - "lib", - "node_modules", - "openclaw", - "extensions", - ); - const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); - const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + const roots = createPluginSourceRoots(); const out = formatPluginSourceForTable( { origin: "bundled", - source: path.join(stockRoot, "bluebubbles", "index.ts"), - }, - { - stock: stockRoot, - global: globalRoot, - workspace: workspaceRoot, + source: path.join(roots.stock, "bluebubbles", "index.ts"), }, + roots, ); expect(out.value).toBe("stock:bluebubbles/index.ts"); expect(out.rootKey).toBe("stock"); }); it("shortens workspace plugin sources under the workspace root", () => { - const stockRoot = path.resolve( - path.sep, - "opt", - "homebrew", - "lib", - "node_modules", - "openclaw", - "extensions", - ); - const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); - const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + const roots = createPluginSourceRoots(); const out = formatPluginSourceForTable( { origin: "workspace", - source: path.join(workspaceRoot, "matrix", "index.ts"), - }, - { - stock: stockRoot, - global: globalRoot, - workspace: workspaceRoot, + source: path.join(roots.workspace, "matrix", "index.ts"), }, + roots, ); expect(out.value).toBe("workspace:matrix/index.ts"); expect(out.rootKey).toBe("workspace"); }); it("shortens global plugin sources under the global root", () => { - const stockRoot = path.resolve( - path.sep, - "opt", - "homebrew", - "lib", - "node_modules", - "openclaw", - "extensions", - ); - const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); - const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + const roots = createPluginSourceRoots(); const out = formatPluginSourceForTable( { origin: "global", - source: path.join(globalRoot, "zalo", "index.js"), - }, - { - stock: stockRoot, - global: globalRoot, - workspace: workspaceRoot, + source: path.join(roots.global, "zalo", "index.js"), }, + roots, ); expect(out.value).toBe("global:zalo/index.js"); expect(out.rootKey).toBe("global"); diff --git a/src/plugins/test-helpers/fs-fixtures.ts b/src/plugins/test-helpers/fs-fixtures.ts new file mode 100644 index 00000000000..ec6b88fa4e4 --- /dev/null +++ b/src/plugins/test-helpers/fs-fixtures.ts @@ -0,0 +1,33 @@ +import { randomUUID } from "node:crypto"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; + +function chmodSafeDir(dir: string) { + if (process.platform === "win32") { + return; + } + fs.chmodSync(dir, 0o755); +} + +export function mkdirSafeDir(dir: string) { + fs.mkdirSync(dir, { recursive: true }); + chmodSafeDir(dir); +} + +export function makeTrackedTempDir(prefix: string, trackedDirs: string[]) { + const dir = path.join(os.tmpdir(), `${prefix}-${randomUUID()}`); + mkdirSafeDir(dir); + trackedDirs.push(dir); + return dir; +} + +export function cleanupTrackedTempDirs(trackedDirs: string[]) { + for (const dir of trackedDirs.splice(0)) { + try { + fs.rmSync(dir, { recursive: true, force: true }); + } catch { + // ignore cleanup failures + } + } +} diff --git a/src/plugins/wired-hooks-compaction.test.ts b/src/plugins/wired-hooks-compaction.test.ts index 5081922ec1d..1e3f0021e29 100644 --- a/src/plugins/wired-hooks-compaction.test.ts +++ b/src/plugins/wired-hooks-compaction.test.ts @@ -40,6 +40,28 @@ describe("compaction hook wiring", () => { vi.mocked(emitAgentEvent).mockClear(); }); + function createCompactionEndCtx(params: { + runId: string; + messages?: unknown[]; + compactionCount?: number; + withRetryHooks?: boolean; + }) { + return { + params: { runId: params.runId, session: { messages: params.messages ?? [] } }, + state: { compactionInFlight: true }, + log: { debug: vi.fn(), warn: vi.fn() }, + maybeResolveCompactionWait: vi.fn(), + incrementCompactionCount: vi.fn(), + getCompactionCount: () => params.compactionCount ?? 0, + ...(params.withRetryHooks + ? { + noteCompactionRetry: vi.fn(), + resetForCompactionRetry: vi.fn(), + } + : {}), + }; + } + it("calls runBeforeCompaction in handleAutoCompactionStart", () => { hookMocks.runner.hasHooks.mockReturnValue(true); @@ -86,14 +108,11 @@ describe("compaction hook wiring", () => { it("calls runAfterCompaction when willRetry is false", () => { hookMocks.runner.hasHooks.mockReturnValue(true); - const ctx = { - params: { runId: "r2", session: { messages: [1, 2] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 1, - }; + const ctx = createCompactionEndCtx({ + runId: "r2", + messages: [1, 2], + compactionCount: 1, + }); handleAutoCompactionEnd( ctx as never, @@ -126,16 +145,11 @@ describe("compaction hook wiring", () => { it("does not call runAfterCompaction when willRetry is true but still increments counter", () => { hookMocks.runner.hasHooks.mockReturnValue(true); - const ctx = { - params: { runId: "r3", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - noteCompactionRetry: vi.fn(), - resetForCompactionRetry: vi.fn(), - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 1, - }; + const ctx = createCompactionEndCtx({ + runId: "r3", + compactionCount: 1, + withRetryHooks: true, + }); handleAutoCompactionEnd( ctx as never, @@ -160,14 +174,7 @@ describe("compaction hook wiring", () => { }); it("does not increment counter when compaction was aborted", () => { - const ctx = { - params: { runId: "r3b", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 0, - }; + const ctx = createCompactionEndCtx({ runId: "r3b" }); handleAutoCompactionEnd( ctx as never, @@ -183,14 +190,7 @@ describe("compaction hook wiring", () => { }); it("does not increment counter when compaction has result but was aborted", () => { - const ctx = { - params: { runId: "r3b2", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 0, - }; + const ctx = createCompactionEndCtx({ runId: "r3b2" }); handleAutoCompactionEnd( ctx as never, @@ -206,14 +206,7 @@ describe("compaction hook wiring", () => { }); it("does not increment counter when result is undefined", () => { - const ctx = { - params: { runId: "r3c", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 0, - }; + const ctx = createCompactionEndCtx({ runId: "r3c" }); handleAutoCompactionEnd( ctx as never, diff --git a/src/scripts/ci-changed-scope.test.ts b/src/scripts/ci-changed-scope.test.ts index 358dbfc472c..682cfb8d9b3 100644 --- a/src/scripts/ci-changed-scope.test.ts +++ b/src/scripts/ci-changed-scope.test.ts @@ -124,6 +124,16 @@ describe("detectChangedScope", () => { }); }); + it("runs platform lanes when the CI workflow changes", () => { + expect(detectChangedScope([".github/workflows/ci.yml"])).toEqual({ + runNode: true, + runMacos: true, + runAndroid: true, + runWindows: true, + runSkillsPython: true, + }); + }); + it("treats base and head as literal git args", () => { const markerPath = path.join( os.tmpdir(), diff --git a/src/secrets/runtime-web-tools.test.ts b/src/secrets/runtime-web-tools.test.ts index b4484095188..57e3e955066 100644 --- a/src/secrets/runtime-web-tools.test.ts +++ b/src/secrets/runtime-web-tools.test.ts @@ -65,6 +65,24 @@ function readProviderKey(config: OpenClawConfig, provider: ProviderUnderTest): u return config.tools?.web?.search?.perplexity?.apiKey; } +function expectInactiveFirecrawlSecretRef(params: { + resolveSpy: ReturnType; + metadata: Awaited>["metadata"]; + context: Awaited>["context"]; +}) { + expect(params.resolveSpy).not.toHaveBeenCalled(); + expect(params.metadata.fetch.firecrawl.active).toBe(false); + expect(params.metadata.fetch.firecrawl.apiKeySource).toBe("secretRef"); + expect(params.context.warnings).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: "SECRETS_REF_IGNORED_INACTIVE_SURFACE", + path: "tools.web.fetch.firecrawl.apiKey", + }), + ]), + ); +} + describe("runtime web tools resolution", () => { afterEach(() => { vi.restoreAllMocks(); @@ -339,17 +357,7 @@ describe("runtime web tools resolution", () => { }), }); - expect(resolveSpy).not.toHaveBeenCalled(); - expect(metadata.fetch.firecrawl.active).toBe(false); - expect(metadata.fetch.firecrawl.apiKeySource).toBe("secretRef"); - expect(context.warnings).toEqual( - expect.arrayContaining([ - expect.objectContaining({ - code: "SECRETS_REF_IGNORED_INACTIVE_SURFACE", - path: "tools.web.fetch.firecrawl.apiKey", - }), - ]), - ); + expectInactiveFirecrawlSecretRef({ resolveSpy, metadata, context }); }); it("does not resolve Firecrawl SecretRef when Firecrawl is disabled", async () => { @@ -370,17 +378,7 @@ describe("runtime web tools resolution", () => { }), }); - expect(resolveSpy).not.toHaveBeenCalled(); - expect(metadata.fetch.firecrawl.active).toBe(false); - expect(metadata.fetch.firecrawl.apiKeySource).toBe("secretRef"); - expect(context.warnings).toEqual( - expect.arrayContaining([ - expect.objectContaining({ - code: "SECRETS_REF_IGNORED_INACTIVE_SURFACE", - path: "tools.web.fetch.firecrawl.apiKey", - }), - ]), - ); + expectInactiveFirecrawlSecretRef({ resolveSpy, metadata, context }); }); it("uses env fallback for unresolved Firecrawl SecretRef when active", async () => { diff --git a/src/secrets/runtime-web-tools.ts b/src/secrets/runtime-web-tools.ts index d888b36e8ab..883aac6bd02 100644 --- a/src/secrets/runtime-web-tools.ts +++ b/src/secrets/runtime-web-tools.ts @@ -471,39 +471,30 @@ export async function resolveRuntimeWebTools(params: { } } + const failUnresolvedSearchNoFallback = (unresolved: { path: string; reason: string }) => { + const diagnostic: RuntimeWebDiagnostic = { + code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", + message: unresolved.reason, + path: unresolved.path, + }; + diagnostics.push(diagnostic); + searchMetadata.diagnostics.push(diagnostic); + pushWarning(params.context, { + code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", + path: unresolved.path, + message: unresolved.reason, + }); + throw new Error(`[WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK] ${unresolved.reason}`); + }; + if (configuredProvider) { const unresolved = unresolvedWithoutFallback[0]; if (unresolved) { - const diagnostic: RuntimeWebDiagnostic = { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - message: unresolved.reason, - path: unresolved.path, - }; - diagnostics.push(diagnostic); - searchMetadata.diagnostics.push(diagnostic); - pushWarning(params.context, { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - path: unresolved.path, - message: unresolved.reason, - }); - throw new Error(`[WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK] ${unresolved.reason}`); + failUnresolvedSearchNoFallback(unresolved); } } else { if (!selectedProvider && unresolvedWithoutFallback.length > 0) { - const unresolved = unresolvedWithoutFallback[0]; - const diagnostic: RuntimeWebDiagnostic = { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - message: unresolved.reason, - path: unresolved.path, - }; - diagnostics.push(diagnostic); - searchMetadata.diagnostics.push(diagnostic); - pushWarning(params.context, { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - path: unresolved.path, - message: unresolved.reason, - }); - throw new Error(`[WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK] ${unresolved.reason}`); + failUnresolvedSearchNoFallback(unresolvedWithoutFallback[0]); } if (selectedProvider) { diff --git a/src/security/external-content.test.ts b/src/security/external-content.test.ts index b943bdacf72..bdf8af0de46 100644 --- a/src/security/external-content.test.ts +++ b/src/security/external-content.test.ts @@ -236,6 +236,27 @@ describe("external-content security", () => { expect(result).not.toContain(endMarker); } }); + + it.each([ + ["U+200B zero width space", "\u200B"], + ["U+200C zero width non-joiner", "\u200C"], + ["U+200D zero width joiner", "\u200D"], + ["U+2060 word joiner", "\u2060"], + ["U+FEFF zero width no-break space", "\uFEFF"], + ["U+00AD soft hyphen", "\u00AD"], + ])("sanitizes boundary markers split by %s", (_name, ignorable) => { + const startMarker = `<<>>`; + const endMarker = `<<>>`; + const result = wrapWebContent( + `Before ${startMarker} middle ${endMarker} after`, + "web_search", + ); + + expect(result).toContain("[[MARKER_SANITIZED]]"); + expect(result).toContain("[[END_MARKER_SANITIZED]]"); + expect(result).not.toContain(startMarker); + expect(result).not.toContain(endMarker); + }); }); describe("buildSafeExternalPrompt", () => { diff --git a/src/security/external-content.ts b/src/security/external-content.ts index ff571871b5e..1c8a3dfb1b9 100644 --- a/src/security/external-content.ts +++ b/src/security/external-content.ts @@ -151,10 +151,18 @@ function foldMarkerChar(char: string): string { return char; } +const MARKER_IGNORABLE_CHAR_RE = /\u200B|\u200C|\u200D|\u2060|\uFEFF|\u00AD/g; + function foldMarkerText(input: string): string { - return input.replace( - /[\uFF21-\uFF3A\uFF41-\uFF5A\uFF1C\uFF1E\u2329\u232A\u3008\u3009\u2039\u203A\u27E8\u27E9\uFE64\uFE65\u00AB\u00BB\u300A\u300B\u27EA\u27EB\u27EC\u27ED\u27EE\u27EF\u276C\u276D\u276E\u276F\u02C2\u02C3]/g, - (char) => foldMarkerChar(char), + return ( + input + // Strip invisible format characters that can split marker tokens without changing + // how downstream models interpret the apparent boundary text. + .replace(MARKER_IGNORABLE_CHAR_RE, "") + .replace( + /[\uFF21-\uFF3A\uFF41-\uFF5A\uFF1C\uFF1E\u2329\u232A\u3008\u3009\u2039\u203A\u27E8\u27E9\uFE64\uFE65\u00AB\u00BB\u300A\u300B\u27EA\u27EB\u27EC\u27ED\u27EE\u27EF\u276C\u276D\u276E\u276F\u02C2\u02C3]/g, + (char) => foldMarkerChar(char), + ) ); } diff --git a/src/shared/assistant-identity-values.test.ts b/src/shared/assistant-identity-values.test.ts new file mode 100644 index 00000000000..59792dc7fd9 --- /dev/null +++ b/src/shared/assistant-identity-values.test.ts @@ -0,0 +1,17 @@ +import { describe, expect, it } from "vitest"; +import { coerceIdentityValue } from "./assistant-identity-values.js"; + +describe("shared/assistant-identity-values", () => { + it("returns undefined for missing or blank values", () => { + expect(coerceIdentityValue(undefined, 10)).toBeUndefined(); + expect(coerceIdentityValue(" ", 10)).toBeUndefined(); + }); + + it("trims values and preserves strings within the limit", () => { + expect(coerceIdentityValue(" OpenClaw ", 20)).toBe("OpenClaw"); + }); + + it("truncates overlong trimmed values at the exact limit", () => { + expect(coerceIdentityValue(" OpenClaw Assistant ", 8)).toBe("OpenClaw"); + }); +}); diff --git a/src/shared/chat-content.test.ts b/src/shared/chat-content.test.ts new file mode 100644 index 00000000000..d66ec203c72 --- /dev/null +++ b/src/shared/chat-content.test.ts @@ -0,0 +1,51 @@ +import { describe, expect, it } from "vitest"; +import { extractTextFromChatContent } from "./chat-content.js"; + +describe("shared/chat-content", () => { + it("normalizes plain string content", () => { + expect(extractTextFromChatContent(" hello\nworld ")).toBe("hello world"); + }); + + it("extracts only text blocks from array content", () => { + expect( + extractTextFromChatContent([ + { type: "text", text: " hello " }, + { type: "image_url", image_url: "https://example.com" }, + { type: "text", text: "world" }, + null, + ]), + ).toBe("hello world"); + }); + + it("applies sanitizers and custom join/normalization hooks", () => { + expect( + extractTextFromChatContent("Here [Tool Call: foo (ID: 1)] ok", { + sanitizeText: (text) => text.replace(/\[Tool Call:[^\]]+\]\s*/g, ""), + }), + ).toBe("Here ok"); + + expect( + extractTextFromChatContent( + [ + { type: "text", text: " hello " }, + { type: "text", text: "world " }, + ], + { + sanitizeText: (text) => text.trim(), + joinWith: "\n", + normalizeText: (text) => text.trim(), + }, + ), + ).toBe("hello\nworld"); + }); + + it("returns null for unsupported or empty content", () => { + expect(extractTextFromChatContent(123)).toBeNull(); + expect(extractTextFromChatContent([{ type: "text", text: " " }])).toBeNull(); + expect( + extractTextFromChatContent(" ", { + sanitizeText: () => "", + }), + ).toBeNull(); + }); +}); diff --git a/src/shared/chat-envelope.test.ts b/src/shared/chat-envelope.test.ts new file mode 100644 index 00000000000..d04bc014e44 --- /dev/null +++ b/src/shared/chat-envelope.test.ts @@ -0,0 +1,23 @@ +import { describe, expect, it } from "vitest"; +import { stripEnvelope, stripMessageIdHints } from "./chat-envelope.js"; + +describe("shared/chat-envelope", () => { + it("strips recognized channel and timestamp envelope prefixes only", () => { + expect(stripEnvelope("[WhatsApp 2026-01-24 13:36] hello")).toBe("hello"); + expect(stripEnvelope("[2026-01-24T13:36Z] hello")).toBe("hello"); + expect(stripEnvelope("[Custom Sender] hello")).toBe("[Custom Sender] hello"); + }); + + it("keeps non-envelope headers and preserves unmatched text", () => { + expect(stripEnvelope("hello")).toBe("hello"); + expect(stripEnvelope("[note] hello")).toBe("[note] hello"); + }); + + it("removes standalone message id hint lines but keeps inline mentions", () => { + expect(stripMessageIdHints("hello\n[message_id: abc123]")).toBe("hello"); + expect(stripMessageIdHints("hello\n [message_id: abc123] \nworld")).toBe("hello\nworld"); + expect(stripMessageIdHints("I typed [message_id: abc123] inline")).toBe( + "I typed [message_id: abc123] inline", + ); + }); +}); diff --git a/src/shared/chat-message-content.test.ts b/src/shared/chat-message-content.test.ts new file mode 100644 index 00000000000..db2d533cebd --- /dev/null +++ b/src/shared/chat-message-content.test.ts @@ -0,0 +1,19 @@ +import { describe, expect, it } from "vitest"; +import { extractFirstTextBlock } from "./chat-message-content.js"; + +describe("shared/chat-message-content", () => { + it("extracts the first text block from array content", () => { + expect( + extractFirstTextBlock({ + content: [{ text: "hello" }, { text: "world" }], + }), + ).toBe("hello"); + }); + + it("returns undefined for missing, empty, or non-text content", () => { + expect(extractFirstTextBlock(null)).toBeUndefined(); + expect(extractFirstTextBlock({ content: [] })).toBeUndefined(); + expect(extractFirstTextBlock({ content: [{ type: "image" }] })).toBeUndefined(); + expect(extractFirstTextBlock({ content: ["hello"] })).toBeUndefined(); + }); +}); diff --git a/src/shared/device-auth-store.test.ts b/src/shared/device-auth-store.test.ts new file mode 100644 index 00000000000..be070ee79cd --- /dev/null +++ b/src/shared/device-auth-store.test.ts @@ -0,0 +1,206 @@ +import { describe, expect, it, vi } from "vitest"; +import { + clearDeviceAuthTokenFromStore, + loadDeviceAuthTokenFromStore, + storeDeviceAuthTokenInStore, + type DeviceAuthStoreAdapter, +} from "./device-auth-store.js"; + +function createAdapter(initialStore: ReturnType = null) { + let store = initialStore; + const writes: unknown[] = []; + const adapter: DeviceAuthStoreAdapter = { + readStore: () => store, + writeStore: (next) => { + store = next; + writes.push(next); + }, + }; + return { adapter, writes, readStore: () => store }; +} + +describe("device-auth-store", () => { + it("loads only matching device ids and normalized roles", () => { + const { adapter } = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: { + operator: { + token: "secret", + role: "operator", + scopes: ["operator.read"], + updatedAtMs: 1, + }, + }, + }); + + expect( + loadDeviceAuthTokenFromStore({ + adapter, + deviceId: "device-1", + role: " operator ", + }), + ).toMatchObject({ token: "secret" }); + expect( + loadDeviceAuthTokenFromStore({ + adapter, + deviceId: "device-2", + role: "operator", + }), + ).toBeNull(); + }); + + it("stores normalized roles and deduped sorted scopes while preserving same-device tokens", () => { + vi.spyOn(Date, "now").mockReturnValue(1234); + const { adapter, writes, readStore } = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: ["node.invoke"], + updatedAtMs: 10, + }, + }, + }); + + const entry = storeDeviceAuthTokenInStore({ + adapter, + deviceId: "device-1", + role: " operator ", + token: "operator-token", + scopes: [" operator.write ", "operator.read", "operator.read", ""], + }); + + expect(entry).toEqual({ + token: "operator-token", + role: "operator", + scopes: ["operator.read", "operator.write"], + updatedAtMs: 1234, + }); + expect(writes).toHaveLength(1); + expect(readStore()).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: ["node.invoke"], + updatedAtMs: 10, + }, + operator: entry, + }, + }); + }); + + it("replaces stale stores from other devices instead of merging them", () => { + const { adapter, readStore } = createAdapter({ + version: 1, + deviceId: "device-2", + tokens: { + operator: { + token: "old-token", + role: "operator", + scopes: [], + updatedAtMs: 1, + }, + }, + }); + + storeDeviceAuthTokenInStore({ + adapter, + deviceId: "device-1", + role: "node", + token: "node-token", + }); + + expect(readStore()).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: [], + updatedAtMs: expect.any(Number), + }, + }, + }); + }); + + it("avoids writes when clearing missing roles or mismatched devices", () => { + const missingRole = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: {}, + }); + clearDeviceAuthTokenFromStore({ + adapter: missingRole.adapter, + deviceId: "device-1", + role: "operator", + }); + expect(missingRole.writes).toHaveLength(0); + + const otherDevice = createAdapter({ + version: 1, + deviceId: "device-2", + tokens: { + operator: { + token: "secret", + role: "operator", + scopes: [], + updatedAtMs: 1, + }, + }, + }); + clearDeviceAuthTokenFromStore({ + adapter: otherDevice.adapter, + deviceId: "device-1", + role: "operator", + }); + expect(otherDevice.writes).toHaveLength(0); + }); + + it("removes normalized roles when clearing stored tokens", () => { + const { adapter, writes, readStore } = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: { + operator: { + token: "secret", + role: "operator", + scopes: ["operator.read"], + updatedAtMs: 1, + }, + node: { + token: "node-token", + role: "node", + scopes: [], + updatedAtMs: 2, + }, + }, + }); + + clearDeviceAuthTokenFromStore({ + adapter, + deviceId: "device-1", + role: " operator ", + }); + + expect(writes).toHaveLength(1); + expect(readStore()).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: [], + updatedAtMs: 2, + }, + }, + }); + }); +}); diff --git a/src/shared/device-auth.test.ts b/src/shared/device-auth.test.ts new file mode 100644 index 00000000000..4675f866e54 --- /dev/null +++ b/src/shared/device-auth.test.ts @@ -0,0 +1,16 @@ +import { describe, expect, it } from "vitest"; +import { normalizeDeviceAuthRole, normalizeDeviceAuthScopes } from "./device-auth.js"; + +describe("shared/device-auth", () => { + it("trims device auth roles without further rewriting", () => { + expect(normalizeDeviceAuthRole(" operator ")).toBe("operator"); + expect(normalizeDeviceAuthRole("")).toBe(""); + }); + + it("dedupes, trims, sorts, and filters auth scopes", () => { + expect( + normalizeDeviceAuthScopes([" node.invoke ", "operator.read", "", "node.invoke", "a.scope"]), + ).toEqual(["a.scope", "node.invoke", "operator.read"]); + expect(normalizeDeviceAuthScopes(undefined)).toEqual([]); + }); +}); diff --git a/src/shared/entry-metadata.test.ts b/src/shared/entry-metadata.test.ts new file mode 100644 index 00000000000..64afb728a14 --- /dev/null +++ b/src/shared/entry-metadata.test.ts @@ -0,0 +1,33 @@ +import { describe, expect, it } from "vitest"; +import { resolveEmojiAndHomepage } from "./entry-metadata.js"; + +describe("shared/entry-metadata", () => { + it("prefers metadata emoji and homepage when present", () => { + expect( + resolveEmojiAndHomepage({ + metadata: { emoji: "🦀", homepage: " https://openclaw.ai " }, + frontmatter: { emoji: "🙂", homepage: "https://example.com" }, + }), + ).toEqual({ + emoji: "🦀", + homepage: "https://openclaw.ai", + }); + }); + + it("falls back through frontmatter homepage aliases and drops blanks", () => { + expect( + resolveEmojiAndHomepage({ + frontmatter: { emoji: "🙂", website: " https://docs.openclaw.ai " }, + }), + ).toEqual({ + emoji: "🙂", + homepage: "https://docs.openclaw.ai", + }); + expect( + resolveEmojiAndHomepage({ + metadata: { homepage: " " }, + frontmatter: { url: " " }, + }), + ).toEqual({}); + }); +}); diff --git a/src/shared/entry-status.test.ts b/src/shared/entry-status.test.ts new file mode 100644 index 00000000000..88913913011 --- /dev/null +++ b/src/shared/entry-status.test.ts @@ -0,0 +1,132 @@ +import { afterEach, describe, expect, it } from "vitest"; +import { + evaluateEntryMetadataRequirements, + evaluateEntryMetadataRequirementsForCurrentPlatform, + evaluateEntryRequirementsForCurrentPlatform, +} from "./entry-status.js"; + +const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + +function setPlatform(platform: NodeJS.Platform): void { + Object.defineProperty(process, "platform", { + value: platform, + configurable: true, + }); +} + +afterEach(() => { + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } +}); + +describe("shared/entry-status", () => { + it("combines metadata presentation fields with evaluated requirements", () => { + const result = evaluateEntryMetadataRequirements({ + always: false, + metadata: { + emoji: "🦀", + homepage: "https://openclaw.ai", + requires: { + bins: ["bun"], + anyBins: ["ffmpeg", "sox"], + env: ["OPENCLAW_TOKEN"], + config: ["gateway.bind"], + }, + os: ["darwin"], + }, + frontmatter: { + emoji: "🙂", + homepage: "https://docs.openclaw.ai", + }, + hasLocalBin: (bin) => bin === "bun", + localPlatform: "linux", + remote: { + hasAnyBin: (bins) => bins.includes("sox"), + }, + isEnvSatisfied: () => false, + isConfigSatisfied: (path) => path === "gateway.bind", + }); + + expect(result).toEqual({ + emoji: "🦀", + homepage: "https://openclaw.ai", + required: { + bins: ["bun"], + anyBins: ["ffmpeg", "sox"], + env: ["OPENCLAW_TOKEN"], + config: ["gateway.bind"], + os: ["darwin"], + }, + missing: { + bins: [], + anyBins: [], + env: ["OPENCLAW_TOKEN"], + config: [], + os: ["darwin"], + }, + requirementsSatisfied: false, + configChecks: [{ path: "gateway.bind", satisfied: true }], + }); + }); + + it("uses process.platform in the current-platform wrapper", () => { + setPlatform("darwin"); + + const result = evaluateEntryMetadataRequirementsForCurrentPlatform({ + always: false, + metadata: { + os: ["darwin"], + }, + hasLocalBin: () => false, + isEnvSatisfied: () => true, + isConfigSatisfied: () => true, + }); + + expect(result.requirementsSatisfied).toBe(true); + expect(result.missing.os).toEqual([]); + }); + + it("pulls metadata and frontmatter from entry objects in the entry wrapper", () => { + setPlatform("linux"); + + const result = evaluateEntryRequirementsForCurrentPlatform({ + always: true, + entry: { + metadata: { + requires: { + bins: ["missing-bin"], + }, + }, + frontmatter: { + website: " https://docs.openclaw.ai ", + emoji: "🙂", + }, + }, + hasLocalBin: () => false, + isEnvSatisfied: () => false, + isConfigSatisfied: () => false, + }); + + expect(result).toEqual({ + emoji: "🙂", + homepage: "https://docs.openclaw.ai", + required: { + bins: ["missing-bin"], + anyBins: [], + env: [], + config: [], + os: [], + }, + missing: { + bins: [], + anyBins: [], + env: [], + config: [], + os: [], + }, + requirementsSatisfied: true, + configChecks: [], + }); + }); +}); diff --git a/src/shared/frontmatter.test.ts b/src/shared/frontmatter.test.ts new file mode 100644 index 00000000000..606114b9f56 --- /dev/null +++ b/src/shared/frontmatter.test.ts @@ -0,0 +1,135 @@ +import { describe, expect, it, test } from "vitest"; +import { + applyOpenClawManifestInstallCommonFields, + getFrontmatterString, + normalizeStringList, + parseFrontmatterBool, + parseOpenClawManifestInstallBase, + resolveOpenClawManifestBlock, + resolveOpenClawManifestInstall, + resolveOpenClawManifestOs, + resolveOpenClawManifestRequires, +} from "./frontmatter.js"; + +describe("shared/frontmatter", () => { + test("normalizeStringList handles strings, arrays, and non-list values", () => { + expect(normalizeStringList("a, b,,c")).toEqual(["a", "b", "c"]); + expect(normalizeStringList([" a ", "", "b", 42])).toEqual(["a", "b", "42"]); + expect(normalizeStringList(null)).toEqual([]); + }); + + test("getFrontmatterString extracts strings only", () => { + expect(getFrontmatterString({ a: "b" }, "a")).toBe("b"); + expect(getFrontmatterString({ a: 1 }, "a")).toBeUndefined(); + }); + + test("parseFrontmatterBool respects explicit values and fallback", () => { + expect(parseFrontmatterBool("true", false)).toBe(true); + expect(parseFrontmatterBool("false", true)).toBe(false); + expect(parseFrontmatterBool(undefined, true)).toBe(true); + }); + + test("resolveOpenClawManifestBlock reads current manifest keys and custom metadata fields", () => { + expect( + resolveOpenClawManifestBlock({ + frontmatter: { + metadata: "{ openclaw: { foo: 1, bar: 'baz' } }", + }, + }), + ).toEqual({ foo: 1, bar: "baz" }); + + expect( + resolveOpenClawManifestBlock({ + frontmatter: { + pluginMeta: "{ openclaw: { foo: 2 } }", + }, + key: "pluginMeta", + }), + ).toEqual({ foo: 2 }); + }); + + test("resolveOpenClawManifestBlock returns undefined for invalid input", () => { + expect(resolveOpenClawManifestBlock({ frontmatter: {} })).toBeUndefined(); + expect( + resolveOpenClawManifestBlock({ frontmatter: { metadata: "not-json5" } }), + ).toBeUndefined(); + expect( + resolveOpenClawManifestBlock({ frontmatter: { metadata: "{ nope: { a: 1 } }" } }), + ).toBeUndefined(); + }); + + it("normalizes manifest requirement and os lists", () => { + expect( + resolveOpenClawManifestRequires({ + requires: { + bins: "bun, node", + anyBins: [" ffmpeg ", ""], + env: ["OPENCLAW_TOKEN", " OPENCLAW_URL "], + config: null, + }, + }), + ).toEqual({ + bins: ["bun", "node"], + anyBins: ["ffmpeg"], + env: ["OPENCLAW_TOKEN", "OPENCLAW_URL"], + config: [], + }); + expect(resolveOpenClawManifestRequires({})).toBeUndefined(); + expect(resolveOpenClawManifestOs({ os: [" darwin ", "linux", ""] })).toEqual([ + "darwin", + "linux", + ]); + }); + + it("parses and applies install common fields", () => { + const parsed = parseOpenClawManifestInstallBase( + { + type: " Brew ", + id: "brew.git", + label: "Git", + bins: [" git ", "git"], + }, + ["brew", "npm"], + ); + + expect(parsed).toEqual({ + raw: { + type: " Brew ", + id: "brew.git", + label: "Git", + bins: [" git ", "git"], + }, + kind: "brew", + id: "brew.git", + label: "Git", + bins: ["git", "git"], + }); + expect(parseOpenClawManifestInstallBase({ kind: "bad" }, ["brew"])).toBeUndefined(); + expect(applyOpenClawManifestInstallCommonFields({ extra: true }, parsed!)).toEqual({ + extra: true, + id: "brew.git", + label: "Git", + bins: ["git", "git"], + }); + }); + + it("maps install entries through the parser and filters rejected specs", () => { + expect( + resolveOpenClawManifestInstall( + { + install: [{ id: "keep" }, { id: "drop" }, "bad"], + }, + (entry) => { + if ( + typeof entry === "object" && + entry !== null && + (entry as { id?: string }).id === "keep" + ) { + return { id: "keep" }; + } + return undefined; + }, + ), + ).toEqual([{ id: "keep" }]); + }); +}); diff --git a/src/shared/gateway-bind-url.test.ts b/src/shared/gateway-bind-url.test.ts new file mode 100644 index 00000000000..23dd855c4e6 --- /dev/null +++ b/src/shared/gateway-bind-url.test.ts @@ -0,0 +1,94 @@ +import { describe, expect, it, vi } from "vitest"; +import { resolveGatewayBindUrl } from "./gateway-bind-url.js"; + +describe("shared/gateway-bind-url", () => { + it("returns null for loopback/default binds", () => { + expect( + resolveGatewayBindUrl({ + scheme: "ws", + port: 18789, + pickTailnetHost: () => "100.64.0.1", + pickLanHost: () => "192.168.1.2", + }), + ).toBeNull(); + }); + + it("resolves custom binds only when custom host is present after trimming", () => { + expect( + resolveGatewayBindUrl({ + bind: "custom", + customBindHost: " gateway.local ", + scheme: "wss", + port: 443, + pickTailnetHost: vi.fn(), + pickLanHost: vi.fn(), + }), + ).toEqual({ + url: "wss://gateway.local:443", + source: "gateway.bind=custom", + }); + + expect( + resolveGatewayBindUrl({ + bind: "custom", + customBindHost: " ", + scheme: "ws", + port: 18789, + pickTailnetHost: vi.fn(), + pickLanHost: vi.fn(), + }), + ).toEqual({ + error: "gateway.bind=custom requires gateway.customBindHost.", + }); + }); + + it("resolves tailnet and lan binds or returns clear errors", () => { + expect( + resolveGatewayBindUrl({ + bind: "tailnet", + scheme: "ws", + port: 18789, + pickTailnetHost: () => "100.64.0.1", + pickLanHost: vi.fn(), + }), + ).toEqual({ + url: "ws://100.64.0.1:18789", + source: "gateway.bind=tailnet", + }); + expect( + resolveGatewayBindUrl({ + bind: "tailnet", + scheme: "ws", + port: 18789, + pickTailnetHost: () => null, + pickLanHost: vi.fn(), + }), + ).toEqual({ + error: "gateway.bind=tailnet set, but no tailnet IP was found.", + }); + + expect( + resolveGatewayBindUrl({ + bind: "lan", + scheme: "wss", + port: 8443, + pickTailnetHost: vi.fn(), + pickLanHost: () => "192.168.1.2", + }), + ).toEqual({ + url: "wss://192.168.1.2:8443", + source: "gateway.bind=lan", + }); + expect( + resolveGatewayBindUrl({ + bind: "lan", + scheme: "ws", + port: 18789, + pickTailnetHost: vi.fn(), + pickLanHost: () => null, + }), + ).toEqual({ + error: "gateway.bind=lan set, but no private LAN IP was found.", + }); + }); +}); diff --git a/src/shared/model-param-b.test.ts b/src/shared/model-param-b.test.ts new file mode 100644 index 00000000000..21c3dce79c8 --- /dev/null +++ b/src/shared/model-param-b.test.ts @@ -0,0 +1,14 @@ +import { describe, expect, it } from "vitest"; +import { inferParamBFromIdOrName } from "./model-param-b.js"; + +describe("shared/model-param-b", () => { + it("extracts the largest valid b-sized parameter token", () => { + expect(inferParamBFromIdOrName("llama-8b mixtral-22b")).toBe(22); + expect(inferParamBFromIdOrName("Qwen 0.5B Instruct")).toBe(0.5); + }); + + it("ignores malformed, zero, and non-delimited matches", () => { + expect(inferParamBFromIdOrName("abc70beta 0b x70b2")).toBeNull(); + expect(inferParamBFromIdOrName("model 0b")).toBeNull(); + }); +}); diff --git a/src/shared/net/ipv4.test.ts b/src/shared/net/ipv4.test.ts new file mode 100644 index 00000000000..a6d7ab2f84e --- /dev/null +++ b/src/shared/net/ipv4.test.ts @@ -0,0 +1,26 @@ +import { describe, expect, it } from "vitest"; +import { validateDottedDecimalIPv4Input, validateIPv4AddressInput } from "./ipv4.js"; + +describe("shared/net/ipv4", () => { + it("requires a value for custom bind mode", () => { + expect(validateDottedDecimalIPv4Input(undefined)).toBe( + "IP address is required for custom bind mode", + ); + expect(validateDottedDecimalIPv4Input("")).toBe("IP address is required for custom bind mode"); + }); + + it("accepts canonical dotted-decimal ipv4 only", () => { + expect(validateDottedDecimalIPv4Input("192.168.1.100")).toBeUndefined(); + expect(validateDottedDecimalIPv4Input("0177.0.0.1")).toBe( + "Invalid IPv4 address (e.g., 192.168.1.100)", + ); + expect(validateDottedDecimalIPv4Input("example.com")).toBe( + "Invalid IPv4 address (e.g., 192.168.1.100)", + ); + }); + + it("keeps the backward-compatible alias wired to the same validation", () => { + expect(validateIPv4AddressInput("192.168.1.100")).toBeUndefined(); + expect(validateIPv4AddressInput("bad-ip")).toBe("Invalid IPv4 address (e.g., 192.168.1.100)"); + }); +}); diff --git a/src/shared/node-match.test.ts b/src/shared/node-match.test.ts new file mode 100644 index 00000000000..2ddc3663d3f --- /dev/null +++ b/src/shared/node-match.test.ts @@ -0,0 +1,59 @@ +import { describe, expect, it } from "vitest"; +import { normalizeNodeKey, resolveNodeIdFromCandidates, resolveNodeMatches } from "./node-match.js"; + +describe("shared/node-match", () => { + it("normalizes node keys by lowercasing and collapsing separators", () => { + expect(normalizeNodeKey(" Mac Studio! ")).toBe("mac-studio"); + expect(normalizeNodeKey("---PI__Node---")).toBe("pi-node"); + }); + + it("matches candidates by node id, remote ip, normalized name, and long prefix", () => { + const nodes = [ + { nodeId: "mac-abcdef", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, + { nodeId: "pi-456789", displayName: "Raspberry Pi", remoteIp: "100.0.0.2" }, + ]; + + expect(resolveNodeMatches(nodes, "mac-abcdef")).toEqual([nodes[0]]); + expect(resolveNodeMatches(nodes, "100.0.0.2")).toEqual([nodes[1]]); + expect(resolveNodeMatches(nodes, "mac studio")).toEqual([nodes[0]]); + expect(resolveNodeMatches(nodes, "pi-456")).toEqual([nodes[1]]); + expect(resolveNodeMatches(nodes, "pi")).toEqual([]); + expect(resolveNodeMatches(nodes, " ")).toEqual([]); + }); + + it("resolves unique matches and prefers a unique connected node", () => { + expect( + resolveNodeIdFromCandidates( + [ + { nodeId: "ios-old", displayName: "iPhone", connected: false }, + { nodeId: "ios-live", displayName: "iPhone", connected: true }, + ], + "iphone", + ), + ).toBe("ios-live"); + }); + + it("throws clear unknown and ambiguous node errors", () => { + expect(() => + resolveNodeIdFromCandidates( + [ + { nodeId: "mac-123", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, + { nodeId: "pi-456" }, + ], + "nope", + ), + ).toThrow(/unknown node: nope.*known: Mac Studio, pi-456/); + + expect(() => + resolveNodeIdFromCandidates( + [ + { nodeId: "ios-a", displayName: "iPhone", connected: true }, + { nodeId: "ios-b", displayName: "iPhone", connected: true }, + ], + "iphone", + ), + ).toThrow(/ambiguous node: iphone.*matches: iPhone, iPhone/); + + expect(() => resolveNodeIdFromCandidates([], "")).toThrow(/node required/); + }); +}); diff --git a/src/shared/node-resolve.test.ts b/src/shared/node-resolve.test.ts new file mode 100644 index 00000000000..4af0c5a8a9b --- /dev/null +++ b/src/shared/node-resolve.test.ts @@ -0,0 +1,42 @@ +import { describe, expect, it } from "vitest"; +import { resolveNodeFromNodeList, resolveNodeIdFromNodeList } from "./node-resolve.js"; + +describe("shared/node-resolve", () => { + const nodes = [ + { nodeId: "mac-123", displayName: "Mac Studio", connected: true }, + { nodeId: "pi-456", displayName: "Raspberry Pi", connected: false }, + ]; + + it("resolves node ids through candidate matching", () => { + expect(resolveNodeIdFromNodeList(nodes, "Mac Studio")).toBe("mac-123"); + }); + + it("supports optional default-node selection when query is blank", () => { + expect( + resolveNodeIdFromNodeList(nodes, " ", { + allowDefault: true, + pickDefaultNode: (entries) => entries.find((entry) => entry.connected) ?? null, + }), + ).toBe("mac-123"); + }); + + it("still throws when default selection is disabled or returns null", () => { + expect(() => resolveNodeIdFromNodeList(nodes, " ")).toThrow(/node required/); + expect(() => + resolveNodeIdFromNodeList(nodes, "", { + allowDefault: true, + pickDefaultNode: () => null, + }), + ).toThrow(/node required/); + }); + + it("returns the full node object and falls back to a synthetic entry when needed", () => { + expect(resolveNodeFromNodeList(nodes, "pi-456")).toEqual(nodes[1]); + expect( + resolveNodeFromNodeList([], "", { + allowDefault: true, + pickDefaultNode: () => ({ nodeId: "synthetic-1" }), + }), + ).toEqual({ nodeId: "synthetic-1" }); + }); +}); diff --git a/src/shared/process-scoped-map.test.ts b/src/shared/process-scoped-map.test.ts new file mode 100644 index 00000000000..dd4e9d492c8 --- /dev/null +++ b/src/shared/process-scoped-map.test.ts @@ -0,0 +1,29 @@ +import { afterEach, describe, expect, it } from "vitest"; +import { resolveProcessScopedMap } from "./process-scoped-map.js"; + +const MAP_KEY = Symbol("process-scoped-map:test"); +const OTHER_MAP_KEY = Symbol("process-scoped-map:other"); + +afterEach(() => { + delete (process as Record)[MAP_KEY]; + delete (process as Record)[OTHER_MAP_KEY]; +}); + +describe("shared/process-scoped-map", () => { + it("reuses the same map for the same symbol", () => { + const first = resolveProcessScopedMap(MAP_KEY); + first.set("a", 1); + + const second = resolveProcessScopedMap(MAP_KEY); + + expect(second).toBe(first); + expect(second.get("a")).toBe(1); + }); + + it("keeps distinct maps for distinct symbols", () => { + const first = resolveProcessScopedMap(MAP_KEY); + const second = resolveProcessScopedMap(OTHER_MAP_KEY); + + expect(second).not.toBe(first); + }); +}); diff --git a/src/shared/shared-misc.test.ts b/src/shared/shared-misc.test.ts deleted file mode 100644 index 8a729109513..00000000000 --- a/src/shared/shared-misc.test.ts +++ /dev/null @@ -1,151 +0,0 @@ -import { describe, expect, it, test } from "vitest"; -import { extractTextFromChatContent } from "./chat-content.js"; -import { - getFrontmatterString, - normalizeStringList, - parseFrontmatterBool, - resolveOpenClawManifestBlock, -} from "./frontmatter.js"; -import { resolveNodeIdFromCandidates } from "./node-match.js"; - -describe("extractTextFromChatContent", () => { - it("normalizes string content", () => { - expect(extractTextFromChatContent(" hello\nworld ")).toBe("hello world"); - }); - - it("extracts text blocks from array content", () => { - expect( - extractTextFromChatContent([ - { type: "text", text: " hello " }, - { type: "image_url", image_url: "https://example.com" }, - { type: "text", text: "world" }, - ]), - ).toBe("hello world"); - }); - - it("applies sanitizer when provided", () => { - expect( - extractTextFromChatContent("Here [Tool Call: foo (ID: 1)] ok", { - sanitizeText: (text) => text.replace(/\[Tool Call:[^\]]+\]\s*/g, ""), - }), - ).toBe("Here ok"); - }); - - it("supports custom join and normalization", () => { - expect( - extractTextFromChatContent( - [ - { type: "text", text: " hello " }, - { type: "text", text: "world " }, - ], - { - sanitizeText: (text) => text.trim(), - joinWith: "\n", - normalizeText: (text) => text.trim(), - }, - ), - ).toBe("hello\nworld"); - }); -}); - -describe("shared/frontmatter", () => { - test("normalizeStringList handles strings and arrays", () => { - expect(normalizeStringList("a, b,,c")).toEqual(["a", "b", "c"]); - expect(normalizeStringList([" a ", "", "b"])).toEqual(["a", "b"]); - expect(normalizeStringList(null)).toEqual([]); - }); - - test("getFrontmatterString extracts strings only", () => { - expect(getFrontmatterString({ a: "b" }, "a")).toBe("b"); - expect(getFrontmatterString({ a: 1 }, "a")).toBeUndefined(); - }); - - test("parseFrontmatterBool respects fallback", () => { - expect(parseFrontmatterBool("true", false)).toBe(true); - expect(parseFrontmatterBool("false", true)).toBe(false); - expect(parseFrontmatterBool(undefined, true)).toBe(true); - }); - - test("resolveOpenClawManifestBlock parses JSON5 metadata and picks openclaw block", () => { - const frontmatter = { - metadata: "{ openclaw: { foo: 1, bar: 'baz' } }", - }; - expect(resolveOpenClawManifestBlock({ frontmatter })).toEqual({ foo: 1, bar: "baz" }); - }); - - test("resolveOpenClawManifestBlock returns undefined for invalid input", () => { - expect(resolveOpenClawManifestBlock({ frontmatter: {} })).toBeUndefined(); - expect( - resolveOpenClawManifestBlock({ frontmatter: { metadata: "not-json5" } }), - ).toBeUndefined(); - expect( - resolveOpenClawManifestBlock({ frontmatter: { metadata: "{ nope: { a: 1 } }" } }), - ).toBeUndefined(); - }); -}); - -describe("resolveNodeIdFromCandidates", () => { - it("matches nodeId", () => { - expect( - resolveNodeIdFromCandidates( - [ - { nodeId: "mac-123", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, - { nodeId: "pi-456", displayName: "Raspberry Pi", remoteIp: "100.0.0.2" }, - ], - "pi-456", - ), - ).toBe("pi-456"); - }); - - it("matches displayName using normalization", () => { - expect( - resolveNodeIdFromCandidates([{ nodeId: "mac-123", displayName: "Mac Studio" }], "mac studio"), - ).toBe("mac-123"); - }); - - it("matches nodeId prefix (>=6 chars)", () => { - expect(resolveNodeIdFromCandidates([{ nodeId: "mac-abcdef" }], "mac-ab")).toBe("mac-abcdef"); - }); - - it("throws unknown node with known list", () => { - expect(() => - resolveNodeIdFromCandidates( - [ - { nodeId: "mac-123", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, - { nodeId: "pi-456" }, - ], - "nope", - ), - ).toThrow(/unknown node: nope.*known: /); - }); - - it("throws ambiguous node with matches list", () => { - expect(() => - resolveNodeIdFromCandidates([{ nodeId: "mac-abcdef" }, { nodeId: "mac-abc999" }], "mac-abc"), - ).toThrow(/ambiguous node: mac-abc.*matches:/); - }); - - it("prefers a unique connected node when names are duplicated", () => { - expect( - resolveNodeIdFromCandidates( - [ - { nodeId: "ios-old", displayName: "iPhone", connected: false }, - { nodeId: "ios-live", displayName: "iPhone", connected: true }, - ], - "iphone", - ), - ).toBe("ios-live"); - }); - - it("stays ambiguous when multiple connected nodes match", () => { - expect(() => - resolveNodeIdFromCandidates( - [ - { nodeId: "ios-a", displayName: "iPhone", connected: true }, - { nodeId: "ios-b", displayName: "iPhone", connected: true }, - ], - "iphone", - ), - ).toThrow(/ambiguous node: iphone.*matches:/); - }); -}); diff --git a/src/shared/subagents-format.test.ts b/src/shared/subagents-format.test.ts new file mode 100644 index 00000000000..34d1f9a8d5d --- /dev/null +++ b/src/shared/subagents-format.test.ts @@ -0,0 +1,58 @@ +import { describe, expect, it } from "vitest"; +import { + formatDurationCompact, + formatTokenShort, + formatTokenUsageDisplay, + resolveIoTokens, + resolveTotalTokens, + truncateLine, +} from "./subagents-format.js"; + +describe("shared/subagents-format", () => { + it("formats compact durations across minute, hour, and day buckets", () => { + expect(formatDurationCompact()).toBe("n/a"); + expect(formatDurationCompact(30_000)).toBe("1m"); + expect(formatDurationCompact(61 * 60_000)).toBe("1h1m"); + expect(formatDurationCompact(25 * 60 * 60_000)).toBe("1d1h"); + }); + + it("formats token counts with integer, kilo, and million branches", () => { + expect(formatTokenShort()).toBeUndefined(); + expect(formatTokenShort(999.9)).toBe("999"); + expect(formatTokenShort(1_500)).toBe("1.5k"); + expect(formatTokenShort(15_400)).toBe("15k"); + expect(formatTokenShort(1_250_000)).toBe("1.3m"); + }); + + it("truncates lines only when needed", () => { + expect(truncateLine("short", 10)).toBe("short"); + expect(truncateLine("trim me ", 7)).toBe("trim me..."); + }); + + it("resolves token totals and io breakdowns from valid numeric fields only", () => { + expect(resolveTotalTokens()).toBeUndefined(); + expect(resolveTotalTokens({ totalTokens: 42 })).toBe(42); + expect(resolveTotalTokens({ inputTokens: 10, outputTokens: 5 })).toBe(15); + expect(resolveTotalTokens({ inputTokens: Number.NaN, outputTokens: 5 })).toBeUndefined(); + + expect(resolveIoTokens({ inputTokens: 10, outputTokens: 5 })).toEqual({ + input: 10, + output: 5, + total: 15, + }); + expect(resolveIoTokens({ inputTokens: Number.NaN, outputTokens: 0 })).toBeUndefined(); + }); + + it("formats io and prompt-cache usage displays with fallback branches", () => { + expect( + formatTokenUsageDisplay({ + inputTokens: 1_200, + outputTokens: 300, + totalTokens: 2_100, + }), + ).toBe("tokens 1.5k (in 1.2k / out 300), prompt/cache 2.1k"); + + expect(formatTokenUsageDisplay({ totalTokens: 500 })).toBe("tokens 500 prompt/cache"); + expect(formatTokenUsageDisplay({ inputTokens: 0, outputTokens: 0, totalTokens: 0 })).toBe(""); + }); +}); diff --git a/src/shared/tailscale-status.test.ts b/src/shared/tailscale-status.test.ts new file mode 100644 index 00000000000..5826e4b00b3 --- /dev/null +++ b/src/shared/tailscale-status.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it, vi } from "vitest"; +import { resolveTailnetHostWithRunner } from "./tailscale-status.js"; + +describe("shared/tailscale-status", () => { + it("returns null when no runner is provided", async () => { + await expect(resolveTailnetHostWithRunner()).resolves.toBeNull(); + }); + + it("prefers DNS names and trims trailing dots from status json", async () => { + const run = vi.fn().mockResolvedValue({ + code: 0, + stdout: 'noise\n{"Self":{"DNSName":"mac.tail123.ts.net.","TailscaleIPs":["100.64.0.8"]}}', + }); + + await expect(resolveTailnetHostWithRunner(run)).resolves.toBe("mac.tail123.ts.net"); + expect(run).toHaveBeenCalledWith(["tailscale", "status", "--json"], { timeoutMs: 5000 }); + }); + + it("falls back across command candidates and then to the first tailscale ip", async () => { + const run = vi.fn().mockRejectedValueOnce(new Error("missing binary")).mockResolvedValueOnce({ + code: 0, + stdout: '{"Self":{"TailscaleIPs":["100.64.0.9","fd7a::1"]}}', + }); + + await expect(resolveTailnetHostWithRunner(run)).resolves.toBe("100.64.0.9"); + expect(run).toHaveBeenNthCalledWith( + 2, + ["/Applications/Tailscale.app/Contents/MacOS/Tailscale", "status", "--json"], + { + timeoutMs: 5000, + }, + ); + }); + + it("returns null for non-zero exits, blank output, or invalid json", async () => { + const run = vi + .fn() + .mockResolvedValueOnce({ code: 1, stdout: "boom" }) + .mockResolvedValueOnce({ code: 0, stdout: " " }); + + await expect(resolveTailnetHostWithRunner(run)).resolves.toBeNull(); + + const invalid = vi.fn().mockResolvedValue({ + code: 0, + stdout: "not-json", + }); + await expect(resolveTailnetHostWithRunner(invalid)).resolves.toBeNull(); + }); +}); diff --git a/src/shared/text-chunking.test.ts b/src/shared/text-chunking.test.ts new file mode 100644 index 00000000000..be1fb518750 --- /dev/null +++ b/src/shared/text-chunking.test.ts @@ -0,0 +1,31 @@ +import { describe, expect, it } from "vitest"; +import { chunkTextByBreakResolver } from "./text-chunking.js"; + +describe("shared/text-chunking", () => { + it("returns empty for blank input and the full text when under limit", () => { + expect(chunkTextByBreakResolver("", 10, () => 5)).toEqual([]); + expect(chunkTextByBreakResolver("hello", 10, () => 2)).toEqual(["hello"]); + }); + + it("splits at resolver-provided breakpoints and trims separator boundaries", () => { + expect( + chunkTextByBreakResolver("alpha beta gamma", 10, (window) => window.lastIndexOf(" ")), + ).toEqual(["alpha", "beta gamma"]); + }); + + it("falls back to hard limits for invalid break indexes", () => { + expect(chunkTextByBreakResolver("abcdefghij", 4, () => Number.NaN)).toEqual([ + "abcd", + "efgh", + "ij", + ]); + expect(chunkTextByBreakResolver("abcdefghij", 4, () => 99)).toEqual(["abcd", "efgh", "ij"]); + expect(chunkTextByBreakResolver("abcdefghij", 4, () => 0)).toEqual(["abcd", "efgh", "ij"]); + }); + + it("skips empty chunks created by whitespace-only segments", () => { + expect( + chunkTextByBreakResolver("word next", 5, (window) => window.lastIndexOf(" ")), + ).toEqual(["word", "next"]); + }); +}); diff --git a/src/shared/text/code-regions.test.ts b/src/shared/text/code-regions.test.ts new file mode 100644 index 00000000000..05934383bd2 --- /dev/null +++ b/src/shared/text/code-regions.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from "vitest"; +import { findCodeRegions, isInsideCode } from "./code-regions.js"; + +describe("shared/text/code-regions", () => { + it("finds fenced and inline code regions without double-counting inline code inside fences", () => { + const text = [ + "before `inline` after", + "```ts", + "const a = `inside fence`;", + "```", + "tail", + ].join("\n"); + + const regions = findCodeRegions(text); + + expect(regions).toHaveLength(2); + expect(text.slice(regions[0].start, regions[0].end)).toBe("`inline`"); + expect(text.slice(regions[1].start, regions[1].end)).toContain("```ts"); + }); + + it("accepts alternate fence markers and unterminated trailing fences", () => { + const text = "~~~js\nconsole.log(1)\n~~~\nplain\n```\nunterminated"; + const regions = findCodeRegions(text); + + expect(regions).toHaveLength(2); + expect(text.slice(regions[0].start, regions[0].end)).toContain("~~~js"); + expect(text.slice(regions[1].start, regions[1].end)).toBe("```\nunterminated"); + }); + + it("reports whether positions are inside discovered regions", () => { + const text = "plain `code` done"; + const regions = findCodeRegions(text); + const codeStart = text.indexOf("code"); + const plainStart = text.indexOf("plain"); + + expect(isInsideCode(codeStart, regions)).toBe(true); + expect(isInsideCode(plainStart, regions)).toBe(false); + }); +}); diff --git a/src/shared/usage-aggregates.test.ts b/src/shared/usage-aggregates.test.ts new file mode 100644 index 00000000000..e5ba960ad95 --- /dev/null +++ b/src/shared/usage-aggregates.test.ts @@ -0,0 +1,117 @@ +import { describe, expect, it } from "vitest"; +import { + buildUsageAggregateTail, + mergeUsageDailyLatency, + mergeUsageLatency, +} from "./usage-aggregates.js"; + +describe("shared/usage-aggregates", () => { + it("merges latency totals and ignores empty inputs", () => { + const totals = { + count: 1, + sum: 100, + min: 100, + max: 100, + p95Max: 100, + }; + + mergeUsageLatency(totals, undefined); + mergeUsageLatency(totals, { + count: 2, + avgMs: 50, + minMs: 20, + maxMs: 90, + p95Ms: 80, + }); + + expect(totals).toEqual({ + count: 3, + sum: 200, + min: 20, + max: 100, + p95Max: 100, + }); + }); + + it("merges daily latency by date and computes aggregate tail sorting", () => { + const dailyLatencyMap = new Map< + string, + { + date: string; + count: number; + sum: number; + min: number; + max: number; + p95Max: number; + } + >(); + + mergeUsageDailyLatency(dailyLatencyMap, [ + { date: "2026-03-12", count: 2, avgMs: 50, minMs: 20, maxMs: 90, p95Ms: 80 }, + { date: "2026-03-12", count: 1, avgMs: 120, minMs: 120, maxMs: 120, p95Ms: 120 }, + { date: "2026-03-11", count: 1, avgMs: 30, minMs: 30, maxMs: 30, p95Ms: 30 }, + ]); + + const tail = buildUsageAggregateTail({ + byChannelMap: new Map([ + ["discord", { totalCost: 4 }], + ["telegram", { totalCost: 8 }], + ]), + latencyTotals: { + count: 3, + sum: 200, + min: 20, + max: 120, + p95Max: 120, + }, + dailyLatencyMap, + modelDailyMap: new Map([ + ["b", { date: "2026-03-12", cost: 1 }], + ["a", { date: "2026-03-12", cost: 2 }], + ["c", { date: "2026-03-11", cost: 9 }], + ]), + dailyMap: new Map([ + ["b", { date: "2026-03-12" }], + ["a", { date: "2026-03-11" }], + ]), + }); + + expect(tail.byChannel.map((entry) => entry.channel)).toEqual(["telegram", "discord"]); + expect(tail.latency).toEqual({ + count: 3, + avgMs: 200 / 3, + minMs: 20, + maxMs: 120, + p95Ms: 120, + }); + expect(tail.dailyLatency).toEqual([ + { date: "2026-03-11", count: 1, avgMs: 30, minMs: 30, maxMs: 30, p95Ms: 30 }, + { date: "2026-03-12", count: 3, avgMs: 220 / 3, minMs: 20, maxMs: 120, p95Ms: 120 }, + ]); + expect(tail.modelDaily).toEqual([ + { date: "2026-03-11", cost: 9 }, + { date: "2026-03-12", cost: 2 }, + { date: "2026-03-12", cost: 1 }, + ]); + expect(tail.daily).toEqual([{ date: "2026-03-11" }, { date: "2026-03-12" }]); + }); + + it("omits latency when no requests were counted", () => { + const tail = buildUsageAggregateTail({ + byChannelMap: new Map(), + latencyTotals: { + count: 0, + sum: 0, + min: Number.POSITIVE_INFINITY, + max: 0, + p95Max: 0, + }, + dailyLatencyMap: new Map(), + modelDailyMap: new Map(), + dailyMap: new Map(), + }); + + expect(tail.latency).toBeUndefined(); + expect(tail.dailyLatency).toEqual([]); + }); +}); diff --git a/src/telegram/bot-message-context.named-account-dm.test.ts b/src/telegram/bot-message-context.named-account-dm.test.ts index c48fb17fe76..50a24b38f8a 100644 --- a/src/telegram/bot-message-context.named-account-dm.test.ts +++ b/src/telegram/bot-message-context.named-account-dm.test.ts @@ -26,6 +26,25 @@ describe("buildTelegramMessageContext named-account DM fallback", () => { return callArgs?.updateLastRoute; } + function buildNamedAccountDmMessage(messageId = 1) { + return { + message_id: messageId, + chat: { id: 814912386, type: "private" as const }, + date: 1700000000 + messageId - 1, + text: "hello", + from: { id: 814912386, first_name: "Alice" }, + }; + } + + async function buildNamedAccountDmContext(accountId = "atlas", messageId = 1) { + setRuntimeConfigSnapshot(baseCfg); + return await buildTelegramMessageContextForTest({ + cfg: baseCfg, + accountId, + message: buildNamedAccountDmMessage(messageId), + }); + } + it("allows DM through for a named account with no explicit binding", async () => { setRuntimeConfigSnapshot(baseCfg); @@ -47,67 +66,21 @@ describe("buildTelegramMessageContext named-account DM fallback", () => { }); it("uses a per-account session key for named-account DMs", async () => { - setRuntimeConfigSnapshot(baseCfg); - - const ctx = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "atlas", - message: { - message_id: 1, - chat: { id: 814912386, type: "private" }, - date: 1700000000, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); + const ctx = await buildNamedAccountDmContext(); expect(ctx?.ctxPayload?.SessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); }); it("keeps named-account fallback lastRoute on the isolated DM session", async () => { - setRuntimeConfigSnapshot(baseCfg); - - const ctx = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "atlas", - message: { - message_id: 1, - chat: { id: 814912386, type: "private" }, - date: 1700000000, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); + const ctx = await buildNamedAccountDmContext(); expect(ctx?.ctxPayload?.SessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); expect(getLastUpdateLastRoute()?.sessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); }); it("isolates sessions between named accounts that share the default agent", async () => { - setRuntimeConfigSnapshot(baseCfg); - - const atlas = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "atlas", - message: { - message_id: 1, - chat: { id: 814912386, type: "private" }, - date: 1700000000, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); - const skynet = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "skynet", - message: { - message_id: 2, - chat: { id: 814912386, type: "private" }, - date: 1700000001, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); + const atlas = await buildNamedAccountDmContext("atlas", 1); + const skynet = await buildNamedAccountDmContext("skynet", 2); expect(atlas?.ctxPayload?.SessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); expect(skynet?.ctxPayload?.SessionKey).toBe("agent:main:telegram:skynet:direct:814912386"); diff --git a/src/telegram/bot-message.test.ts b/src/telegram/bot-message.test.ts index 4a745cbbe47..14f3ea37594 100644 --- a/src/telegram/bot-message.test.ts +++ b/src/telegram/bot-message.test.ts @@ -57,6 +57,21 @@ describe("telegram bot message processor", () => { ); } + function createDispatchFailureHarness( + context: Record, + sendMessage: ReturnType, + ) { + const runtimeError = vi.fn(); + buildTelegramMessageContext.mockResolvedValue(context); + dispatchTelegramMessage.mockRejectedValue(new Error("dispatch exploded")); + const processMessage = createTelegramMessageProcessor({ + ...baseDeps, + bot: { api: { sendMessage } }, + runtime: { error: runtimeError }, + } as unknown as Parameters[0]); + return { processMessage, runtimeError }; + } + it("dispatches when context is available", async () => { buildTelegramMessageContext.mockResolvedValue({ route: { sessionKey: "agent:main:main" } }); @@ -75,19 +90,14 @@ describe("telegram bot message processor", () => { it("sends user-visible fallback when dispatch throws", async () => { const sendMessage = vi.fn().mockResolvedValue(undefined); - const runtimeError = vi.fn(); - buildTelegramMessageContext.mockResolvedValue({ - chatId: 123, - threadSpec: { id: 456 }, - route: { sessionKey: "agent:main:main" }, - }); - dispatchTelegramMessage.mockRejectedValue(new Error("dispatch exploded")); - - const processMessage = createTelegramMessageProcessor({ - ...baseDeps, - bot: { api: { sendMessage } }, - runtime: { error: runtimeError }, - } as unknown as Parameters[0]); + const { processMessage, runtimeError } = createDispatchFailureHarness( + { + chatId: 123, + threadSpec: { id: 456 }, + route: { sessionKey: "agent:main:main" }, + }, + sendMessage, + ); await expect(processSampleMessage(processMessage)).resolves.toBeUndefined(); expect(sendMessage).toHaveBeenCalledWith( @@ -100,18 +110,13 @@ describe("telegram bot message processor", () => { it("swallows fallback delivery failures after dispatch throws", async () => { const sendMessage = vi.fn().mockRejectedValue(new Error("blocked by user")); - const runtimeError = vi.fn(); - buildTelegramMessageContext.mockResolvedValue({ - chatId: 123, - route: { sessionKey: "agent:main:main" }, - }); - dispatchTelegramMessage.mockRejectedValue(new Error("dispatch exploded")); - - const processMessage = createTelegramMessageProcessor({ - ...baseDeps, - bot: { api: { sendMessage } }, - runtime: { error: runtimeError }, - } as unknown as Parameters[0]); + const { processMessage, runtimeError } = createDispatchFailureHarness( + { + chatId: 123, + route: { sessionKey: "agent:main:main" }, + }, + sendMessage, + ); await expect(processSampleMessage(processMessage)).resolves.toBeUndefined(); expect(sendMessage).toHaveBeenCalledWith( diff --git a/src/telegram/bot-native-commands.group-auth.test.ts b/src/telegram/bot-native-commands.group-auth.test.ts index 77d73497c26..cca25aedc2c 100644 --- a/src/telegram/bot-native-commands.group-auth.test.ts +++ b/src/telegram/bot-native-commands.group-auth.test.ts @@ -1,26 +1,12 @@ -import { describe, expect, it, vi } from "vitest"; +import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import type { ChannelGroupPolicy } from "../config/group-policy.js"; import type { TelegramAccountConfig } from "../config/types.js"; -import type { RuntimeEnv } from "../runtime.js"; -import { registerTelegramNativeCommands } from "./bot-native-commands.js"; - -const getPluginCommandSpecs = vi.hoisted(() => vi.fn(() => [])); -const matchPluginCommand = vi.hoisted(() => vi.fn(() => null)); -const executePluginCommand = vi.hoisted(() => vi.fn(async () => ({ text: "ok" }))); - -vi.mock("../plugins/commands.js", () => ({ - getPluginCommandSpecs, - matchPluginCommand, - executePluginCommand, -})); - -const deliverReplies = vi.hoisted(() => vi.fn(async () => {})); -vi.mock("./bot/delivery.js", () => ({ deliverReplies })); - -vi.mock("../pairing/pairing-store.js", () => ({ - readChannelAllowFromStore: vi.fn(async () => []), -})); +import { + createNativeCommandsHarness, + createTelegramGroupCommandContext, + findNotAuthorizedCalls, +} from "./bot-native-commands.test-helpers.js"; describe("native command auth in groups", () => { function setup(params: { @@ -32,32 +18,12 @@ describe("native command auth in groups", () => { groupConfig?: Record; resolveGroupPolicy?: () => ChannelGroupPolicy; }) { - const handlers: Record Promise> = {}; - const sendMessage = vi.fn().mockResolvedValue(undefined); - const bot = { - api: { - setMyCommands: vi.fn().mockResolvedValue(undefined), - sendMessage, - }, - command: (name: string, handler: (ctx: unknown) => Promise) => { - handlers[name] = handler; - }, - } as const; - - registerTelegramNativeCommands({ - bot: bot as unknown as Parameters[0]["bot"], + return createNativeCommandsHarness({ cfg: params.cfg ?? ({} as OpenClawConfig), - runtime: {} as unknown as RuntimeEnv, - accountId: "default", telegramCfg: params.telegramCfg ?? ({} as TelegramAccountConfig), allowFrom: params.allowFrom ?? [], groupAllowFrom: params.groupAllowFrom ?? [], - replyToMode: "off", - textLimit: 4000, useAccessGroups: params.useAccessGroups ?? false, - nativeEnabled: true, - nativeSkillsEnabled: false, - nativeDisabledExplicit: false, resolveGroupPolicy: params.resolveGroupPolicy ?? (() => @@ -65,15 +31,8 @@ describe("native command auth in groups", () => { allowlistEnabled: false, allowed: true, }) as ChannelGroupPolicy), - resolveTelegramGroupConfig: () => ({ - groupConfig: params.groupConfig as undefined, - topicConfig: undefined, - }), - shouldSkipUpdate: () => false, - opts: { token: "token" }, + groupConfig: params.groupConfig, }); - - return { handlers, sendMessage }; } it("authorizes native commands in groups when sender is in groupAllowFrom", async () => { @@ -83,23 +42,11 @@ describe("native command auth in groups", () => { // no allowFrom — sender is NOT in DM allowlist }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); - // should NOT send "not authorized" rejection - const notAuthCalls = sendMessage.mock.calls.filter( - (call) => typeof call[1] === "string" && call[1].includes("not authorized"), - ); + const notAuthCalls = findNotAuthorizedCalls(sendMessage); expect(notAuthCalls).toHaveLength(0); }); @@ -117,22 +64,11 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); - const notAuthCalls = sendMessage.mock.calls.filter( - (call) => typeof call[1] === "string" && call[1].includes("not authorized"), - ); + const notAuthCalls = findNotAuthorizedCalls(sendMessage); expect(notAuthCalls).toHaveLength(0); }); @@ -149,16 +85,7 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); @@ -189,16 +116,7 @@ describe("native command auth in groups", () => { }) as ChannelGroupPolicy, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); @@ -226,16 +144,7 @@ describe("native command auth in groups", () => { }) as ChannelGroupPolicy, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); @@ -253,22 +162,13 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "intruder" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext({ + username: "intruder", + }); await handlers.status?.(ctx); - const notAuthCalls = sendMessage.mock.calls.filter( - (call) => typeof call[1] === "string" && call[1].includes("not authorized"), - ); + const notAuthCalls = findNotAuthorizedCalls(sendMessage); expect(notAuthCalls.length).toBeGreaterThan(0); }); @@ -279,16 +179,9 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "intruder" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext({ + username: "intruder", + }); await handlers.status?.(ctx); diff --git a/src/telegram/bot-native-commands.plugin-auth.test.ts b/src/telegram/bot-native-commands.plugin-auth.test.ts index f6f6d16c2fc..6312fa08b7b 100644 --- a/src/telegram/bot-native-commands.plugin-auth.test.ts +++ b/src/telegram/bot-native-commands.plugin-auth.test.ts @@ -1,26 +1,13 @@ import { describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; -import type { ChannelGroupPolicy } from "../config/group-policy.js"; import type { TelegramAccountConfig } from "../config/types.js"; -import type { RuntimeEnv } from "../runtime.js"; -import { registerTelegramNativeCommands } from "./bot-native-commands.js"; - -const getPluginCommandSpecs = vi.hoisted(() => vi.fn()); -const matchPluginCommand = vi.hoisted(() => vi.fn()); -const executePluginCommand = vi.hoisted(() => vi.fn()); - -vi.mock("../plugins/commands.js", () => ({ +import { + createNativeCommandsHarness, + deliverReplies, + executePluginCommand, getPluginCommandSpecs, matchPluginCommand, - executePluginCommand, -})); - -const deliverReplies = vi.hoisted(() => vi.fn(async () => {})); -vi.mock("./bot/delivery.js", () => ({ deliverReplies })); - -vi.mock("../pairing/pairing-store.js", () => ({ - readChannelAllowFromStore: vi.fn(async () => []), -})); +} from "./bot-native-commands.test-helpers.js"; describe("registerTelegramNativeCommands (plugin auth)", () => { it("does not register plugin commands in menu when native=false but keeps handlers available", () => { @@ -30,44 +17,10 @@ describe("registerTelegramNativeCommands (plugin auth)", () => { })); getPluginCommandSpecs.mockReturnValue(specs); - const handlers: Record Promise> = {}; - const setMyCommands = vi.fn().mockResolvedValue(undefined); - const log = vi.fn(); - const bot = { - api: { - setMyCommands, - sendMessage: vi.fn(), - }, - command: (name: string, handler: (ctx: unknown) => Promise) => { - handlers[name] = handler; - }, - } as const; - - registerTelegramNativeCommands({ - bot: bot as unknown as Parameters[0]["bot"], + const { handlers, setMyCommands, log } = createNativeCommandsHarness({ cfg: {} as OpenClawConfig, - runtime: { log } as unknown as RuntimeEnv, - accountId: "default", telegramCfg: {} as TelegramAccountConfig, - allowFrom: [], - groupAllowFrom: [], - replyToMode: "off", - textLimit: 4000, - useAccessGroups: false, nativeEnabled: false, - nativeSkillsEnabled: false, - nativeDisabledExplicit: false, - resolveGroupPolicy: () => - ({ - allowlistEnabled: false, - allowed: true, - }) as ChannelGroupPolicy, - resolveTelegramGroupConfig: () => ({ - groupConfig: undefined, - topicConfig: undefined, - }), - shouldSkipUpdate: () => false, - opts: { token: "token" }, }); expect(setMyCommands).not.toHaveBeenCalled(); @@ -87,46 +40,11 @@ describe("registerTelegramNativeCommands (plugin auth)", () => { matchPluginCommand.mockReturnValue({ command, args: undefined }); executePluginCommand.mockResolvedValue({ text: "ok" }); - const handlers: Record Promise> = {}; - const bot = { - api: { - setMyCommands: vi.fn().mockResolvedValue(undefined), - sendMessage: vi.fn(), - }, - command: (name: string, handler: (ctx: unknown) => Promise) => { - handlers[name] = handler; - }, - } as const; - - const cfg = {} as OpenClawConfig; - const telegramCfg = {} as TelegramAccountConfig; - const resolveGroupPolicy = () => - ({ - allowlistEnabled: false, - allowed: true, - }) as ChannelGroupPolicy; - - registerTelegramNativeCommands({ - bot: bot as unknown as Parameters[0]["bot"], - cfg, - runtime: {} as unknown as RuntimeEnv, - accountId: "default", - telegramCfg, + const { handlers, bot } = createNativeCommandsHarness({ + cfg: {} as OpenClawConfig, + telegramCfg: {} as TelegramAccountConfig, allowFrom: ["999"], - groupAllowFrom: [], - replyToMode: "off", - textLimit: 4000, - useAccessGroups: false, nativeEnabled: false, - nativeSkillsEnabled: false, - nativeDisabledExplicit: false, - resolveGroupPolicy, - resolveTelegramGroupConfig: () => ({ - groupConfig: undefined, - topicConfig: undefined, - }), - shouldSkipUpdate: () => false, - opts: { token: "token" }, }); const ctx = { diff --git a/src/telegram/bot-native-commands.test-helpers.ts b/src/telegram/bot-native-commands.test-helpers.ts index b79d61d48a3..cb5745aed0d 100644 --- a/src/telegram/bot-native-commands.test-helpers.ts +++ b/src/telegram/bot-native-commands.test-helpers.ts @@ -1,49 +1,113 @@ +import { vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import type { ChannelGroupPolicy } from "../config/group-policy.js"; import type { TelegramAccountConfig } from "../config/types.js"; import type { RuntimeEnv } from "../runtime.js"; -import type { registerTelegramNativeCommands } from "./bot-native-commands.js"; +import { registerTelegramNativeCommands } from "./bot-native-commands.js"; -type RegisterTelegramNativeCommandParams = Parameters[0]; +const pluginCommandMocks = vi.hoisted(() => ({ + getPluginCommandSpecs: vi.fn(() => []), + matchPluginCommand: vi.fn(() => null), + executePluginCommand: vi.fn(async () => ({ text: "ok" })), +})); +export const getPluginCommandSpecs = pluginCommandMocks.getPluginCommandSpecs; +export const matchPluginCommand = pluginCommandMocks.matchPluginCommand; +export const executePluginCommand = pluginCommandMocks.executePluginCommand; -export function createNativeCommandTestParams(params: { - bot: RegisterTelegramNativeCommandParams["bot"]; +vi.mock("../plugins/commands.js", () => ({ + getPluginCommandSpecs: pluginCommandMocks.getPluginCommandSpecs, + matchPluginCommand: pluginCommandMocks.matchPluginCommand, + executePluginCommand: pluginCommandMocks.executePluginCommand, +})); + +const deliveryMocks = vi.hoisted(() => ({ + deliverReplies: vi.fn(async () => {}), +})); +export const deliverReplies = deliveryMocks.deliverReplies; +vi.mock("./bot/delivery.js", () => ({ deliverReplies: deliveryMocks.deliverReplies })); +vi.mock("../pairing/pairing-store.js", () => ({ + readChannelAllowFromStore: vi.fn(async () => []), +})); + +export function createNativeCommandsHarness(params?: { cfg?: OpenClawConfig; runtime?: RuntimeEnv; - accountId?: string; telegramCfg?: TelegramAccountConfig; allowFrom?: string[]; groupAllowFrom?: string[]; - replyToMode?: RegisterTelegramNativeCommandParams["replyToMode"]; - textLimit?: number; useAccessGroups?: boolean; nativeEnabled?: boolean; - nativeSkillsEnabled?: boolean; - nativeDisabledExplicit?: boolean; - resolveTelegramGroupConfig?: RegisterTelegramNativeCommandParams["resolveTelegramGroupConfig"]; - opts?: RegisterTelegramNativeCommandParams["opts"]; -}): RegisterTelegramNativeCommandParams { - return { - bot: params.bot, - cfg: params.cfg ?? {}, - runtime: params.runtime ?? ({} as RuntimeEnv), - accountId: params.accountId ?? "default", - telegramCfg: params.telegramCfg ?? ({} as TelegramAccountConfig), - allowFrom: params.allowFrom ?? [], - groupAllowFrom: params.groupAllowFrom ?? [], - replyToMode: params.replyToMode ?? "off", - textLimit: params.textLimit ?? 4096, - useAccessGroups: params.useAccessGroups ?? false, - nativeEnabled: params.nativeEnabled ?? true, - nativeSkillsEnabled: params.nativeSkillsEnabled ?? true, - nativeDisabledExplicit: params.nativeDisabledExplicit ?? false, - resolveGroupPolicy: () => ({ allowlistEnabled: false, allowed: true }), - resolveTelegramGroupConfig: - params.resolveTelegramGroupConfig ?? - (() => ({ - groupConfig: undefined, - topicConfig: undefined, - })), + groupConfig?: Record; + resolveGroupPolicy?: () => ChannelGroupPolicy; +}) { + const handlers: Record Promise> = {}; + const sendMessage = vi.fn().mockResolvedValue(undefined); + const setMyCommands = vi.fn().mockResolvedValue(undefined); + const log = vi.fn(); + const bot = { + api: { + setMyCommands, + sendMessage, + }, + command: (name: string, handler: (ctx: unknown) => Promise) => { + handlers[name] = handler; + }, + } as const; + + registerTelegramNativeCommands({ + bot: bot as unknown as Parameters[0]["bot"], + cfg: params?.cfg ?? ({} as OpenClawConfig), + runtime: params?.runtime ?? ({ log } as unknown as RuntimeEnv), + accountId: "default", + telegramCfg: params?.telegramCfg ?? ({} as TelegramAccountConfig), + allowFrom: params?.allowFrom ?? [], + groupAllowFrom: params?.groupAllowFrom ?? [], + replyToMode: "off", + textLimit: 4000, + useAccessGroups: params?.useAccessGroups ?? false, + nativeEnabled: params?.nativeEnabled ?? true, + nativeSkillsEnabled: false, + nativeDisabledExplicit: false, + resolveGroupPolicy: + params?.resolveGroupPolicy ?? + (() => + ({ + allowlistEnabled: false, + allowed: true, + }) as ChannelGroupPolicy), + resolveTelegramGroupConfig: () => ({ + groupConfig: params?.groupConfig as undefined, + topicConfig: undefined, + }), shouldSkipUpdate: () => false, - opts: params.opts ?? { token: "token" }, + opts: { token: "token" }, + }); + + return { handlers, sendMessage, setMyCommands, log, bot }; +} + +export function createTelegramGroupCommandContext(params?: { + senderId?: number; + username?: string; + threadId?: number; +}) { + return { + message: { + chat: { id: -100999, type: "supergroup", is_forum: true }, + from: { + id: params?.senderId ?? 12345, + username: params?.username ?? "testuser", + }, + message_thread_id: params?.threadId ?? 42, + message_id: 1, + date: 1700000000, + }, + match: "", }; } + +export function findNotAuthorizedCalls(sendMessage: ReturnType) { + return sendMessage.mock.calls.filter( + (call) => typeof call[1] === "string" && call[1].includes("not authorized"), + ); +} diff --git a/src/telegram/bot.fetch-abort.test.ts b/src/telegram/bot.fetch-abort.test.ts index 0d9bd53643b..258215d4c6d 100644 --- a/src/telegram/bot.fetch-abort.test.ts +++ b/src/telegram/bot.fetch-abort.test.ts @@ -3,9 +3,22 @@ import { botCtorSpy } from "./bot.create-telegram-bot.test-harness.js"; import { createTelegramBot } from "./bot.js"; import { getTelegramNetworkErrorOrigin } from "./network-errors.js"; +function createWrappedTelegramClientFetch(proxyFetch: typeof fetch) { + const shutdown = new AbortController(); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); + return { clientFetch, shutdown }; +} + describe("createTelegramBot fetch abort", () => { it("aborts wrapped client fetch when fetchAbortSignal aborts", async () => { - const shutdown = new AbortController(); const fetchSpy = vi.fn( (_input: RequestInfo | URL, init?: RequestInit) => new Promise((resolve) => { @@ -13,15 +26,9 @@ describe("createTelegramBot fetch abort", () => { signal.addEventListener("abort", () => resolve(signal), { once: true }); }), ); - botCtorSpy.mockClear(); - createTelegramBot({ - token: "tok", - fetchAbortSignal: shutdown.signal, - proxyFetch: fetchSpy as unknown as typeof fetch, - }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + const { clientFetch, shutdown } = createWrappedTelegramClientFetch( + fetchSpy as unknown as typeof fetch, + ); const observedSignalPromise = clientFetch("https://example.test"); shutdown.abort(new Error("shutdown")); @@ -32,7 +39,6 @@ describe("createTelegramBot fetch abort", () => { }); it("tags wrapped Telegram fetch failures with the Bot API method", async () => { - const shutdown = new AbortController(); const fetchError = Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { code: "UND_ERR_CONNECT_TIMEOUT", @@ -41,15 +47,7 @@ describe("createTelegramBot fetch abort", () => { const fetchSpy = vi.fn(async () => { throw fetchError; }); - botCtorSpy.mockClear(); - createTelegramBot({ - token: "tok", - fetchAbortSignal: shutdown.signal, - proxyFetch: fetchSpy as unknown as typeof fetch, - }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + const { clientFetch } = createWrappedTelegramClientFetch(fetchSpy as unknown as typeof fetch); await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( fetchError, @@ -61,7 +59,6 @@ describe("createTelegramBot fetch abort", () => { }); it("preserves the original fetch error when tagging cannot attach metadata", async () => { - const shutdown = new AbortController(); const frozenError = Object.freeze( Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { @@ -72,15 +69,7 @@ describe("createTelegramBot fetch abort", () => { const fetchSpy = vi.fn(async () => { throw frozenError; }); - botCtorSpy.mockClear(); - createTelegramBot({ - token: "tok", - fetchAbortSignal: shutdown.signal, - proxyFetch: fetchSpy as unknown as typeof fetch, - }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + const { clientFetch } = createWrappedTelegramClientFetch(fetchSpy as unknown as typeof fetch); await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( frozenError, diff --git a/src/telegram/exec-approvals-handler.ts b/src/telegram/exec-approvals-handler.ts index 65488928469..01e3b51bedd 100644 --- a/src/telegram/exec-approvals-handler.ts +++ b/src/telegram/exec-approvals-handler.ts @@ -1,5 +1,4 @@ import type { OpenClawConfig } from "../config/config.js"; -import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; import { GatewayClient } from "../gateway/client.js"; import { createOperatorApprovalsGatewayClient } from "../gateway/operator-approvals-client.js"; import type { EventFrame } from "../gateway/protocol/index.js"; @@ -8,8 +7,8 @@ import { buildExecApprovalPendingReplyPayload, type ExecApprovalPendingReplyParams, } from "../infra/exec-approval-reply.js"; +import { resolveExecApprovalSessionTarget } from "../infra/exec-approval-session-target.js"; import type { ExecApprovalRequest, ExecApprovalResolved } from "../infra/exec-approvals.js"; -import { resolveSessionDeliveryTarget } from "../infra/outbound/targets.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { normalizeAccountId, parseAgentSessionKey } from "../routing/session-key.js"; import type { RuntimeEnv } from "../runtime.js"; @@ -120,40 +119,14 @@ function resolveRequestSessionTarget(params: { cfg: OpenClawConfig; request: ExecApprovalRequest; }): { to: string; accountId?: string; threadId?: number; channel?: string } | null { - const sessionKey = params.request.request.sessionKey?.trim(); - if (!sessionKey) { - return null; - } - const parsed = parseAgentSessionKey(sessionKey); - const agentId = parsed?.agentId ?? params.request.request.agentId ?? "main"; - const storePath = resolveStorePath(params.cfg.session?.store, { agentId }); - const store = loadSessionStore(storePath); - const entry = store[sessionKey]; - if (!entry) { - return null; - } - const target = resolveSessionDeliveryTarget({ - entry, - requestedChannel: "last", + return resolveExecApprovalSessionTarget({ + cfg: params.cfg, + request: params.request, turnSourceChannel: params.request.request.turnSourceChannel ?? undefined, turnSourceTo: params.request.request.turnSourceTo ?? undefined, turnSourceAccountId: params.request.request.turnSourceAccountId ?? undefined, turnSourceThreadId: params.request.request.turnSourceThreadId ?? undefined, }); - if (!target.to) { - return null; - } - return { - channel: target.channel ?? undefined, - to: target.to, - accountId: target.accountId ?? undefined, - threadId: - typeof target.threadId === "number" - ? target.threadId - : typeof target.threadId === "string" - ? Number.parseInt(target.threadId, 10) - : undefined, - }; } function resolveTelegramSourceTarget(params: { diff --git a/src/telegram/lane-delivery.test.ts b/src/telegram/lane-delivery.test.ts index 3a165147d84..3ddad092d7a 100644 --- a/src/telegram/lane-delivery.test.ts +++ b/src/telegram/lane-delivery.test.ts @@ -84,6 +84,39 @@ function createHarness(params?: { }; } +async function deliverFinalAnswer(harness: ReturnType, text: string) { + return harness.deliverLaneText({ + laneName: "answer", + text, + payload: { text }, + infoKind: "final", + }); +} + +function seedArchivedAnswerPreview(harness: ReturnType) { + harness.archivedAnswerPreviews.push({ + messageId: 5555, + textSnapshot: "Partial streaming...", + deleteIfUnused: true, + }); +} + +async function expectFinalEditFallbackToSend(params: { + harness: ReturnType; + text: string; + expectedLogSnippet: string; +}) { + const result = await deliverFinalAnswer(params.harness, params.text); + expect(result).toBe("sent"); + expect(params.harness.editPreview).toHaveBeenCalledTimes(1); + expect(params.harness.sendPayload).toHaveBeenCalledWith( + expect.objectContaining({ text: params.text }), + ); + expect(params.harness.log).toHaveBeenCalledWith( + expect.stringContaining(params.expectedLogSnippet), + ); +} + describe("createLaneTextDeliverer", () => { it("finalizes text-only replies by editing an existing preview message", async () => { const harness = createHarness({ answerMessageId: 999 }); @@ -198,21 +231,11 @@ describe("createLaneTextDeliverer", () => { const harness = createHarness({ answerMessageId: 999 }); harness.editPreview.mockRejectedValue(new Error("400: Bad Request: message to edit not found")); - const result = await harness.deliverLaneText({ - laneName: "answer", + await expectFinalEditFallbackToSend({ + harness, text: "Hello final", - payload: { text: "Hello final" }, - infoKind: "final", + expectedLogSnippet: "edit target missing with no alternate preview; falling back", }); - - expect(result).toBe("sent"); - expect(harness.editPreview).toHaveBeenCalledTimes(1); - expect(harness.sendPayload).toHaveBeenCalledWith( - expect.objectContaining({ text: "Hello final" }), - ); - expect(harness.log).toHaveBeenCalledWith( - expect.stringContaining("edit target missing with no alternate preview; falling back"), - ); }); it("falls back to sendPayload when the final edit fails before reaching Telegram", async () => { @@ -451,19 +474,10 @@ describe("createLaneTextDeliverer", () => { it("falls back when an archived preview edit target is missing and no alternate preview exists", async () => { const harness = createHarness(); - harness.archivedAnswerPreviews.push({ - messageId: 5555, - textSnapshot: "Partial streaming...", - deleteIfUnused: true, - }); + seedArchivedAnswerPreview(harness); harness.editPreview.mockRejectedValue(new Error("400: Bad Request: message to edit not found")); - const result = await harness.deliverLaneText({ - laneName: "answer", - text: "Complete final answer", - payload: { text: "Complete final answer" }, - infoKind: "final", - }); + const result = await deliverFinalAnswer(harness, "Complete final answer"); expect(harness.editPreview).toHaveBeenCalledTimes(1); expect(harness.sendPayload).toHaveBeenCalledWith( @@ -475,19 +489,10 @@ describe("createLaneTextDeliverer", () => { it("keeps the active preview when an archived final edit target is missing", async () => { const harness = createHarness({ answerMessageId: 999 }); - harness.archivedAnswerPreviews.push({ - messageId: 5555, - textSnapshot: "Partial streaming...", - deleteIfUnused: true, - }); + seedArchivedAnswerPreview(harness); harness.editPreview.mockRejectedValue(new Error("400: Bad Request: message to edit not found")); - const result = await harness.deliverLaneText({ - laneName: "answer", - text: "Complete final answer", - payload: { text: "Complete final answer" }, - infoKind: "final", - }); + const result = await deliverFinalAnswer(harness, "Complete final answer"); expect(harness.editPreview).toHaveBeenCalledTimes(1); expect(harness.sendPayload).not.toHaveBeenCalled(); @@ -502,21 +507,11 @@ describe("createLaneTextDeliverer", () => { const err = Object.assign(new Error("403: Forbidden"), { error_code: 403 }); harness.editPreview.mockRejectedValue(err); - const result = await harness.deliverLaneText({ - laneName: "answer", + await expectFinalEditFallbackToSend({ + harness, text: "Hello final", - payload: { text: "Hello final" }, - infoKind: "final", + expectedLogSnippet: "rejected by Telegram (client error); falling back", }); - - expect(result).toBe("sent"); - expect(harness.editPreview).toHaveBeenCalledTimes(1); - expect(harness.sendPayload).toHaveBeenCalledWith( - expect.objectContaining({ text: "Hello final" }), - ); - expect(harness.log).toHaveBeenCalledWith( - expect.stringContaining("rejected by Telegram (client error); falling back"), - ); }); it("retains preview on 502 with error_code during final (ambiguous server error)", async () => { diff --git a/src/telegram/network-config.test.ts b/src/telegram/network-config.test.ts index fad150e0d7f..70de5f46826 100644 --- a/src/telegram/network-config.test.ts +++ b/src/telegram/network-config.test.ts @@ -1,4 +1,5 @@ import { afterEach, describe, expect, it, vi } from "vitest"; +import type { TelegramNetworkConfig } from "../config/types.telegram.js"; import { resetTelegramNetworkConfigStateForTests, resolveTelegramAutoSelectFamilyDecision, @@ -157,7 +158,9 @@ describe("resolveTelegramDnsResultOrderDecision", () => { }, { name: "normalizes trimmed config values", - network: { dnsResultOrder: " Verbatim " }, + network: { dnsResultOrder: " Verbatim " } as TelegramNetworkConfig & { + dnsResultOrder: string; + }, nodeMajor: 20, expected: { value: "verbatim", source: "config" }, }, @@ -171,11 +174,17 @@ describe("resolveTelegramDnsResultOrderDecision", () => { { name: "ignores invalid env and config values before applying Node 22 default", env: { OPENCLAW_TELEGRAM_DNS_RESULT_ORDER: "bogus" }, - network: { dnsResultOrder: "invalid" }, + network: { dnsResultOrder: "invalid" } as TelegramNetworkConfig & { dnsResultOrder: string }, nodeMajor: 22, expected: { value: "ipv4first", source: "default-node22" }, }, - ])("$name", ({ env, network, nodeMajor, expected }) => { + ] satisfies Array<{ + name: string; + env?: NodeJS.ProcessEnv; + network?: TelegramNetworkConfig | (TelegramNetworkConfig & { dnsResultOrder: string }); + nodeMajor: number; + expected: ReturnType; + }>)("$name", ({ env, network, nodeMajor, expected }) => { const decision = resolveTelegramDnsResultOrderDecision({ env, network, diff --git a/src/test-helpers/http.ts b/src/test-helpers/http.ts new file mode 100644 index 00000000000..2aa6f21ba6c --- /dev/null +++ b/src/test-helpers/http.ts @@ -0,0 +1,20 @@ +export function jsonResponse(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { "Content-Type": "application/json" }, + }); +} + +export function requestUrl(input: string | URL | Request): string { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + return input.url; +} + +export function requestBodyText(body: BodyInit | null | undefined): string { + return typeof body === "string" ? body : "{}"; +} diff --git a/src/test-utils/exec-assertions.ts b/src/test-utils/exec-assertions.ts index 58b77f9f730..6e9149725ef 100644 --- a/src/test-utils/exec-assertions.ts +++ b/src/test-utils/exec-assertions.ts @@ -28,14 +28,7 @@ export function expectSingleNpmInstallIgnoreScriptsCall(params: { throw new Error("expected npm install call"); } const [argv, opts] = first; - expect(argv).toEqual([ - "npm", - "install", - "--omit=dev", - "--omit=peer", - "--silent", - "--ignore-scripts", - ]); + expect(argv).toEqual(["npm", "install", "--omit=dev", "--silent", "--ignore-scripts"]); expect(opts?.cwd).toBeTruthy(); const cwd = String(opts?.cwd); const expectedTargetDir = params.expectedTargetDir; diff --git a/src/tts/tts.test.ts b/src/tts/tts.test.ts index eedc325fd4f..b326b4835e5 100644 --- a/src/tts/tts.test.ts +++ b/src/tts/tts.test.ts @@ -91,6 +91,22 @@ const mockAssistantMessage = (content: AssistantMessage["content"]): AssistantMe timestamp: Date.now(), }); +function createOpenAiTelephonyCfg(model: "tts-1" | "gpt-4o-mini-tts"): OpenClawConfig { + return { + messages: { + tts: { + provider: "openai", + openai: { + apiKey: "test-key", + model, + voice: "alloy", + instructions: "Speak warmly", + }, + }, + }, + }; +} + describe("tts", () => { beforeEach(() => { vi.clearAllMocks(); @@ -592,25 +608,14 @@ describe("tts", () => { } }; - it("omits instructions for unsupported speech models", async () => { - const cfg: OpenClawConfig = { - messages: { - tts: { - provider: "openai", - openai: { - apiKey: "test-key", - model: "tts-1", - voice: "alloy", - instructions: "Speak warmly", - }, - }, - }, - }; - + async function expectTelephonyInstructions( + model: "tts-1" | "gpt-4o-mini-tts", + expectedInstructions: string | undefined, + ) { await withMockedTelephonyFetch(async (fetchMock) => { const result = await tts.textToSpeechTelephony({ text: "Hello there, friendly caller.", - cfg, + cfg: createOpenAiTelephonyCfg(model), }); expect(result.success).toBe(true); @@ -618,38 +623,16 @@ describe("tts", () => { const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; expect(typeof init.body).toBe("string"); const body = JSON.parse(init.body as string) as Record; - expect(body.instructions).toBeUndefined(); + expect(body.instructions).toBe(expectedInstructions); }); + } + + it("omits instructions for unsupported speech models", async () => { + await expectTelephonyInstructions("tts-1", undefined); }); it("includes instructions for gpt-4o-mini-tts", async () => { - const cfg: OpenClawConfig = { - messages: { - tts: { - provider: "openai", - openai: { - apiKey: "test-key", - model: "gpt-4o-mini-tts", - voice: "alloy", - instructions: "Speak warmly", - }, - }, - }, - }; - - await withMockedTelephonyFetch(async (fetchMock) => { - const result = await tts.textToSpeechTelephony({ - text: "Hello there, friendly caller.", - cfg, - }); - - expect(result.success).toBe(true); - expect(fetchMock).toHaveBeenCalledTimes(1); - const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; - expect(typeof init.body).toBe("string"); - const body = JSON.parse(init.body as string) as Record; - expect(body.instructions).toBe("Speak warmly"); - }); + await expectTelephonyInstructions("gpt-4o-mini-tts", "Speak warmly"); }); }); diff --git a/ui/src/ui/views/chat.ts b/ui/src/ui/views/chat.ts index 36412b965a6..1d0b877d042 100644 --- a/ui/src/ui/views/chat.ts +++ b/ui/src/ui/views/chat.ts @@ -1169,7 +1169,7 @@ export function renderChat(props: ChatProps) { props.showNewMessages ? html`