Merge branch 'main' into vincentkoc-code/telegram-fast-native-callback

This commit is contained in:
Vincent Koc 2026-03-13 16:35:04 -04:00 committed by GitHub
commit 38bfa88a34
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
276 changed files with 13583 additions and 5364 deletions

View File

@ -7,7 +7,7 @@ on:
concurrency:
group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
cancel-in-progress: true
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
@ -38,9 +38,8 @@ jobs:
id: check
uses: ./.github/actions/detect-docs-changes
# Detect which heavy areas are touched so PRs can skip unrelated expensive jobs.
# Push to main keeps broad coverage, but this job still needs to run so
# downstream jobs that list it in `needs` are not skipped.
# Detect which heavy areas are touched so CI can skip unrelated expensive jobs.
# Fail-safe: if detection fails, downstream jobs run.
changed-scope:
needs: [docs-scope]
if: needs.docs-scope.outputs.docs_only != 'true'
@ -82,7 +81,7 @@ jobs:
# Build dist once for Node-relevant changes and share it with downstream jobs.
build-artifacts:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -141,7 +140,7 @@ jobs:
checks:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
strategy:
fail-fast: false
@ -149,6 +148,13 @@ jobs:
include:
- runtime: node
task: test
shard_index: 1
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node
task: test
shard_index: 2
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node
task: extensions
@ -179,11 +185,18 @@ jobs:
- name: Configure Node test resources
if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
env:
SHARD_COUNT: ${{ matrix.shard_count || '' }}
SHARD_INDEX: ${{ matrix.shard_index || '' }}
run: |
# `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes.
# Default heap limits have been too low on Linux CI (V8 OOM near 4GB).
echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV"
if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then
echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV"
fi
- name: Run ${{ matrix.task }} (${{ matrix.runtime }})
if: matrix.runtime != 'bun' || github.event_name != 'push'
@ -193,7 +206,7 @@ jobs:
check:
name: "check"
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -239,7 +252,7 @@ jobs:
compat-node22:
name: "compat-node22"
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -272,7 +285,7 @@ jobs:
skills-python:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -365,7 +378,7 @@ jobs:
checks-windows:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true'
runs-on: blacksmith-32vcpu-windows-2025
timeout-minutes: 45
env:
@ -727,7 +740,7 @@ jobs:
android:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true')
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
strategy:
fail-fast: false

16
.jscpd.json Normal file
View File

@ -0,0 +1,16 @@
{
"gitignore": true,
"noSymlinks": true,
"ignore": [
"**/node_modules/**",
"**/dist/**",
"dist/**",
"**/.git/**",
"**/coverage/**",
"**/build/**",
"**/.build/**",
"**/.artifacts/**",
"docs/zh-CN/**",
"**/CHANGELOG.md"
]
}

View File

@ -202,6 +202,13 @@
- Vocabulary: "makeup" = "mac app".
- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested.
- Parallels macOS smoke playbook:
- `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`.
- Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed.
- Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`.
- For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green.
- Dont run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially.
- Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading.
- Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`.
- When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`).
- Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`.

View File

@ -9,9 +9,11 @@ Docs: https://docs.openclaw.ai
- Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus.
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
- iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman.
- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chromes own setup guides.
### Fixes
- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata.
- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang.
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding.
@ -22,6 +24,7 @@ Docs: https://docs.openclaw.ai
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
- Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference.
- Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97.
- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart.
- Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179.
- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
- Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec.
@ -29,12 +32,14 @@ Docs: https://docs.openclaw.ai
- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization.
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss.
- Telegram/native commands: preserve native command routing for `/fast` option-button callbacks even when text commands are disabled, so `/fast status|on|off` no longer falls through into normal model runs. Thanks @vincentkoc.
- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han.
## 2026.3.12

View File

@ -9,32 +9,32 @@ read_when:
# CI Pipeline
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed.
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed.
## Job Overview
| Job | Purpose | When it runs |
| ----------------- | ------------------------------------------------------- | ------------------------------------------------- |
| `docs-scope` | Detect docs-only changes | Always |
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs |
| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes |
| `check-docs` | Markdown lint + broken link check | Docs changed |
| `code-analysis` | LOC threshold check (1000 lines) | PRs only |
| `secrets` | Detect leaked secrets | Always |
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes |
| `release-check` | Validate npm pack contents | After build |
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes |
| Job | Purpose | When it runs |
| ----------------- | ------------------------------------------------------- | ---------------------------------- |
| `docs-scope` | Detect docs-only changes | Always |
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes |
| `check` | TypeScript types, lint, format | Non-docs, node changes |
| `check-docs` | Markdown lint + broken link check | Docs changed |
| `code-analysis` | LOC threshold check (1000 lines) | PRs only |
| `secrets` | Detect leaked secrets | Always |
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes |
| `release-check` | Validate npm pack contents | After build |
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes |
## Fail-Fast Order
Jobs are ordered so cheap checks fail before expensive ones run:
1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min)
2. `build-artifacts` (blocked on above)
3. `checks`, `checks-windows`, `macos`, `android` (blocked on build)
1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first)
2. `build-artifacts` + `release-check`
3. `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`.

View File

@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first.
Under the hood, requests are executed as a normal Gateway agent run (same codepath as
`openclaw agent`), so routing/permissions/config match your Gateway.
## Authentication
## Authentication, security, and routing
Uses the Gateway auth configuration. Send a bearer token:
Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api):
- `Authorization: Bearer <token>`
- use `Authorization: Bearer <token>` with the normal Gateway auth config
- treat the endpoint as full operator access for the gateway instance
- select agents with `model: "openclaw:<agentId>"`, `model: "agent:<agentId>"`, or `x-openclaw-agent-id`
- use `x-openclaw-session-key` for explicit session routing
Notes:
- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`).
- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`).
- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`.
## Security boundary (important)
Treat this endpoint as a **full operator-access** surface for the gateway instance.
- HTTP bearer auth here is not a narrow per-user scope model.
- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential.
- Requests run through the same control-plane agent path as trusted operator actions.
- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway.
- If the target agent policy allows sensitive tools, this endpoint can use them.
- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet.
See [Security](/gateway/security) and [Remote access](/gateway/remote).
## Choosing an agent
No custom headers required: encode the agent id in the OpenResponses `model` field:
- `model: "openclaw:<agentId>"` (example: `"openclaw:main"`, `"openclaw:beta"`)
- `model: "agent:<agentId>"` (alias)
Or target a specific OpenClaw agent by header:
- `x-openclaw-agent-id: <agentId>` (default: `main`)
Advanced:
- `x-openclaw-session-key: <sessionKey>` to fully control session routing.
## Enabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `true`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: true },
},
},
},
}
```
## Disabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `false`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: false },
},
},
},
}
```
Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`.
## Session behavior

View File

@ -0,0 +1,138 @@
---
summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts"
read_when:
- You are deploying OpenClaw on a cloud VM with Docker
- You need the shared binary bake, persistence, and update flow
title: "Docker VM Runtime"
---
# Docker VM Runtime
Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers.
## Bake required binaries into the image
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
## Build and launch
```bash
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory.
Use a larger machine class before retrying.
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
Verify Gateway:
```bash
docker compose logs -f openclaw-gateway
```
Expected output:
```
[gateway] listening on ws://0.0.0.0:18789
```
## What persists where
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
## Updates
To update OpenClaw on the VM:
```bash
git pull
docker compose build
docker compose up -d
```

View File

@ -281,77 +281,20 @@ services:
---
## 10) Bake required binaries into the image (critical)
## 10) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
Use the shared runtime guide for the common Docker host flow:
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
- [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
---
## 11) Build and launch
## 11) GCP-specific launch notes
```bash
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing:
@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins
If you changed the gateway port, replace `18789` with your configured port.
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 12) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
---
## 13) Access from your laptop
## 12) Access from your laptop
Create an SSH tunnel to forward the Gateway port:
@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list
docker compose run --rm openclaw-cli devices approve <requestId>
```
---
## What persists where (source of truth)
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
## Updates
To update OpenClaw on the VM:
```bash
cd ~/openclaw
git pull
docker compose build
docker compose up -d
```
Need the shared persistence and update reference again?
See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates).
---

View File

@ -202,107 +202,20 @@ services:
---
## 7) Bake required binaries into the image (critical)
## 7) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
Use the shared runtime guide for the common Docker host flow:
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
- [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
---
## 8) Build and launch
## 8) Hetzner-specific access
```bash
docker compose build
docker compose up -d openclaw-gateway
```
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 9) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
From your laptop:
After the shared build and launch steps, tunnel from your laptop:
```bash
ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP
@ -316,25 +229,7 @@ Paste your gateway token.
---
## What persists where (source of truth)
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where).
## Infrastructure as Code (Terraform)

View File

@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set:
}
```
`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin
normalizes the provider-supplied `From` value and compares it to `allowFrom`.
Webhook verification authenticates provider delivery and payload integrity, but
it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as
caller-ID filtering, not strong caller identity.
Auto-responses use the agent system. Tune with:
- `responseModel`

View File

@ -167,93 +167,8 @@ openclaw onboard --non-interactive \
`--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts.
</Note>
<AccordionGroup>
<Accordion title="Gemini example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice gemini-api-key \
--gemini-api-key "$GEMINI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Z.AI example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice zai-api-key \
--zai-api-key "$ZAI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Vercel AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ai-gateway-api-key \
--ai-gateway-api-key "$AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Cloudflare AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice cloudflare-ai-gateway-api-key \
--cloudflare-ai-gateway-account-id "your-account-id" \
--cloudflare-ai-gateway-gateway-id "your-gateway-id" \
--cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Moonshot example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice moonshot-api-key \
--moonshot-api-key "$MOONSHOT_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Synthetic example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice synthetic-api-key \
--synthetic-api-key "$SYNTHETIC_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="OpenCode example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice opencode-zen \
--opencode-zen-api-key "$OPENCODE_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog.
</Accordion>
<Accordion title="Ollama example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ollama \
--custom-model-id "qwen3.5:27b" \
--accept-risk \
--gateway-port 18789 \
--gateway-bind loopback
```
Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance.
</Accordion>
</AccordionGroup>
Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples).
Use this reference page for flag semantics and step ordering.
### Add agent (non-interactive)

View File

@ -48,6 +48,8 @@ Gateway.
- `openclaw`: managed, isolated browser (no extension required).
- `chrome`: extension relay to your **system browser** (requires the OpenClaw
extension to be attached to a tab).
- `existing-session`: official Chrome MCP attach flow for a running Chrome
profile.
Set `browser.defaultProfile: "openclaw"` if you want managed mode by default.
@ -77,6 +79,12 @@ Browser settings live in `~/.openclaw/openclaw.json`.
profiles: {
openclaw: { cdpPort: 18800, color: "#FF4500" },
work: { cdpPort: 18801, color: "#0066CC" },
chromeLive: {
cdpPort: 18802,
driver: "existing-session",
attachOnly: true,
color: "#00AA00",
},
remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" },
},
},
@ -100,6 +108,8 @@ Notes:
- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay.
- Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary.
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP.
- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do
not set `cdpUrl` for that driver.
## Use Brave (or another Chromium-based browser)
@ -264,11 +274,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be:
- **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port
- **remote**: an explicit CDP URL (Chromium-based browser running elsewhere)
- **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension
- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect
Defaults:
- The `openclaw` profile is auto-created if missing.
- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default).
- Existing-session profiles are opt-in; create them with `--driver existing-session`.
- Local CDP ports allocate from **1880018899** by default.
- Deleting a profile moves its local data directory to Trash.
@ -328,6 +340,66 @@ Notes:
- This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions).
- Detach by clicking the extension icon again.
## Chrome existing-session via MCP
OpenClaw can also attach to a running Chrome profile through the official
Chrome DevTools MCP server. This reuses the tabs and login state already open in
that Chrome profile.
Official background and setup references:
- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp)
Create a profile:
```bash
openclaw browser create-profile \
--name chrome-live \
--driver existing-session \
--color "#00AA00"
```
Then in Chrome:
1. Open `chrome://inspect/#remote-debugging`
2. Enable remote debugging
3. Keep Chrome running and approve the connection prompt when OpenClaw attaches
Live attach smoke test:
```bash
openclaw browser --browser-profile chrome-live start
openclaw browser --browser-profile chrome-live status
openclaw browser --browser-profile chrome-live tabs
openclaw browser --browser-profile chrome-live snapshot --format ai
```
What success looks like:
- `status` shows `driver: existing-session`
- `status` shows `running: true`
- `tabs` lists your already-open Chrome tabs
- `snapshot` returns refs from the selected live tab
What to check if attach does not work:
- Chrome is version `144+`
- remote debugging is enabled at `chrome://inspect/#remote-debugging`
- Chrome showed and you accepted the attach consent prompt
- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect`
Notes:
- This path is higher-risk than the isolated `openclaw` profile because it can
act inside your signed-in browser session.
- OpenClaw does not launch Chrome for this driver; it attaches to an existing
session only.
- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not
the legacy default-profile remote debugging port workflow.
- Some features still require the extension relay or managed browser path, such
as PDF export and download interception.
- Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated.
WSL2 / cross-namespace example:

View File

@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs
Attach/detach happens via a **single Chrome toolbar button**.
If you want Chromes official DevTools MCP attach flow instead of the OpenClaw
extension relay, use an `existing-session` browser profile instead. See
[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chromes own
setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your
browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp).
## What it is (concept)
There are three parts:

View File

@ -54,6 +54,49 @@ describe("acpx ensure", () => {
}
});
function mockEnsureInstallFlow() {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
}
function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) {
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
}
it("accepts the pinned acpx version", async () => {
spawnAndCollectMock.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
@ -177,25 +220,7 @@ describe("acpx ensure", () => {
});
it("installs and verifies pinned acpx when precheck fails", async () => {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
mockEnsureInstallFlow();
await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx",
@ -204,33 +229,11 @@ describe("acpx ensure", () => {
});
expect(spawnAndCollectMock).toHaveBeenCalledTimes(3);
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
});
expectEnsureInstallCalls();
});
it("threads stripProviderAuthEnvVars through version probes and install", async () => {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
mockEnsureInstallFlow();
await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx",
@ -239,24 +242,7 @@ describe("acpx ensure", () => {
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expectEnsureInstallCalls(true);
});
it("fails with actionable error when npm install fails", async () => {

View File

@ -496,6 +496,104 @@ describe("createMattermostInteractionHandler", () => {
return res as unknown as ServerResponse & { headers: Record<string, string>; body: string };
}
function createActionContext(actionId = "approve", channelId = "chan-1") {
const context = { action_id: actionId, __openclaw_channel_id: channelId };
return { context, token: generateInteractionToken(context, "acct") };
}
function createInteractionBody(params: {
context: Record<string, unknown>;
token: string;
channelId?: string;
postId?: string;
userId?: string;
userName?: string;
}) {
return {
user_id: params.userId ?? "user-1",
...(params.userName ? { user_name: params.userName } : {}),
channel_id: params.channelId ?? "chan-1",
post_id: params.postId ?? "post-1",
context: { ...params.context, _token: params.token },
};
}
async function runHandler(
handler: ReturnType<typeof createMattermostInteractionHandler>,
params: {
body: unknown;
remoteAddress?: string;
headers?: Record<string, string>;
},
) {
const req = createReq({
remoteAddress: params.remoteAddress,
headers: params.headers,
body: params.body,
});
const res = createRes();
await handler(req, res);
return res;
}
function expectForbiddenResponse(
res: ServerResponse & { body: string },
expectedMessage: string,
) {
expect(res.statusCode).toBe(403);
expect(res.body).toContain(expectedMessage);
}
function expectSuccessfulApprovalUpdate(
res: ServerResponse & { body: string },
requestLog?: Array<{ path: string; method?: string }>,
) {
expect(res.statusCode).toBe(200);
expect(res.body).toBe("{}");
if (requestLog) {
expect(requestLog).toEqual([
{ path: "/posts/post-1", method: undefined },
{ path: "/posts/post-1", method: "PUT" },
]);
}
}
function createActionPost(params?: {
actionId?: string;
actionName?: string;
channelId?: string;
rootId?: string;
}): MattermostPost {
return {
id: "post-1",
channel_id: params?.channelId ?? "chan-1",
...(params?.rootId ? { root_id: params.rootId } : {}),
message: "Choose",
props: {
attachments: [
{
actions: [
{
id: params?.actionId ?? "approve",
name: params?.actionName ?? "Approve",
},
],
},
],
},
};
}
function createUnusedInteractionHandler() {
return createMattermostInteractionHandler({
client: {
request: async () => ({ message: "unused" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
}
async function runApproveInteraction(params?: {
actionName?: string;
allowedSourceIps?: string[];
@ -503,8 +601,7 @@ describe("createMattermostInteractionHandler", () => {
remoteAddress?: string;
headers?: Record<string, string>;
}) {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const requestLog: Array<{ path: string; method?: string }> = [];
const handler = createMattermostInteractionHandler({
client: {
@ -513,15 +610,7 @@ describe("createMattermostInteractionHandler", () => {
if (init?.method === "PUT") {
return { id: "post-1" };
}
return {
channel_id: "chan-1",
message: "Choose",
props: {
attachments: [
{ actions: [{ id: "approve", name: params?.actionName ?? "Approve" }] },
],
},
};
return createActionPost({ actionName: params?.actionName });
},
} as unknown as MattermostClient,
botUserId: "bot",
@ -530,50 +619,27 @@ describe("createMattermostInteractionHandler", () => {
trustedProxies: params?.trustedProxies,
});
const req = createReq({
const res = await runHandler(handler, {
remoteAddress: params?.remoteAddress,
headers: params?.headers,
body: {
user_id: "user-1",
user_name: "alice",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
body: createInteractionBody({ context, token, userName: "alice" }),
});
const res = createRes();
await handler(req, res);
return { res, requestLog };
}
async function runInvalidActionRequest(actionId: string) {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({
channel_id: "chan-1",
message: "Choose",
props: {
attachments: [{ actions: [{ id: actionId, name: actionId }] }],
},
}),
request: async () => createActionPost({ actionId, actionName: actionId }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
const req = createReq({
body: {
user_id: "user-1",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
return await runHandler(handler, {
body: createInteractionBody({ context, token }),
});
const res = createRes();
await handler(req, res);
return res;
}
it("accepts callback requests from an allowlisted source IP", async () => {
@ -582,12 +648,7 @@ describe("createMattermostInteractionHandler", () => {
remoteAddress: "198.51.100.8",
});
expect(res.statusCode).toBe(200);
expect(res.body).toBe("{}");
expect(requestLog).toEqual([
{ path: "/posts/post-1", method: undefined },
{ path: "/posts/post-1", method: "PUT" },
]);
expectSuccessfulApprovalUpdate(res, requestLog);
});
it("accepts forwarded Mattermost source IPs from a trusted proxy", async () => {
@ -603,8 +664,7 @@ describe("createMattermostInteractionHandler", () => {
});
it("rejects callback requests from non-allowlisted source IPs", async () => {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const handler = createMattermostInteractionHandler({
client: {
request: async () => {
@ -616,33 +676,17 @@ describe("createMattermostInteractionHandler", () => {
allowedSourceIps: ["127.0.0.1"],
});
const req = createReq({
const res = await runHandler(handler, {
remoteAddress: "198.51.100.8",
body: {
user_id: "user-1",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
body: createInteractionBody({ context, token }),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Forbidden origin");
expectForbiddenResponse(res, "Forbidden origin");
});
it("rejects requests with an invalid interaction token", async () => {
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({ message: "unused" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
const handler = createUnusedInteractionHandler();
const req = createReq({
const res = await runHandler(handler, {
body: {
user_id: "user-1",
channel_id: "chan-1",
@ -650,72 +694,33 @@ describe("createMattermostInteractionHandler", () => {
context: { action_id: "approve", _token: "deadbeef" },
},
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Invalid token");
expectForbiddenResponse(res, "Invalid token");
});
it("rejects requests when the signed channel does not match the callback payload", async () => {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({ message: "unused" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
const { context, token } = createActionContext();
const handler = createUnusedInteractionHandler();
const res = await runHandler(handler, {
body: createInteractionBody({ context, token, channelId: "chan-2" }),
});
const req = createReq({
body: {
user_id: "user-1",
channel_id: "chan-2",
post_id: "post-1",
context: { ...context, _token: token },
},
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Channel mismatch");
expectForbiddenResponse(res, "Channel mismatch");
});
it("rejects requests when the fetched post does not belong to the callback channel", async () => {
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const handler = createMattermostInteractionHandler({
client: {
request: async () => ({
channel_id: "chan-9",
message: "Choose",
props: {
attachments: [{ actions: [{ id: "approve", name: "Approve" }] }],
},
}),
request: async () => createActionPost({ channelId: "chan-9" }),
} as unknown as MattermostClient,
botUserId: "bot",
accountId: "acct",
});
const req = createReq({
body: {
user_id: "user-1",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
const res = await runHandler(handler, {
body: createInteractionBody({ context, token }),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(403);
expect(res.body).toContain("Post/channel mismatch");
expectForbiddenResponse(res, "Post/channel mismatch");
});
it("rejects requests when the action is not present on the fetched post", async () => {
@ -730,12 +735,7 @@ describe("createMattermostInteractionHandler", () => {
actionName: "approve",
});
expect(res.statusCode).toBe(200);
expect(res.body).toBe("{}");
expect(requestLog).toEqual([
{ path: "/posts/post-1", method: undefined },
{ path: "/posts/post-1", method: "PUT" },
]);
expectSuccessfulApprovalUpdate(res, requestLog);
});
it("forwards fetched post threading metadata to session and button callbacks", async () => {
@ -745,19 +745,10 @@ describe("createMattermostInteractionHandler", () => {
enqueueSystemEvent,
},
} as unknown as Parameters<typeof setMattermostRuntime>[0]);
const context = { action_id: "approve", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext();
const resolveSessionKey = vi.fn().mockResolvedValue("session:thread:root-9");
const dispatchButtonClick = vi.fn();
const fetchedPost: MattermostPost = {
id: "post-1",
channel_id: "chan-1",
root_id: "root-9",
message: "Choose",
props: {
attachments: [{ actions: [{ id: "approve", name: "Approve" }] }],
},
};
const fetchedPost = createActionPost({ rootId: "root-9" });
const handler = createMattermostInteractionHandler({
client: {
request: async (_path: string, init?: { method?: string }) =>
@ -769,19 +760,9 @@ describe("createMattermostInteractionHandler", () => {
dispatchButtonClick,
});
const req = createReq({
body: {
user_id: "user-1",
user_name: "alice",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
const res = await runHandler(handler, {
body: createInteractionBody({ context, token, userName: "alice" }),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(200);
expect(resolveSessionKey).toHaveBeenCalledWith({
channelId: "chan-1",
@ -803,8 +784,7 @@ describe("createMattermostInteractionHandler", () => {
});
it("lets a custom interaction handler short-circuit generic completion updates", async () => {
const context = { action_id: "mdlprov", __openclaw_channel_id: "chan-1" };
const token = generateInteractionToken(context, "acct");
const { context, token } = createActionContext("mdlprov");
const requestLog: Array<{ path: string; method?: string }> = [];
const handleInteraction = vi.fn().mockResolvedValue({
ephemeral_text: "Only the original requester can use this picker.",
@ -814,14 +794,10 @@ describe("createMattermostInteractionHandler", () => {
client: {
request: async (path: string, init?: { method?: string }) => {
requestLog.push({ path, method: init?.method });
return {
id: "post-1",
channel_id: "chan-1",
message: "Choose",
props: {
attachments: [{ actions: [{ id: "mdlprov", name: "Browse providers" }] }],
},
};
return createActionPost({
actionId: "mdlprov",
actionName: "Browse providers",
});
},
} as unknown as MattermostClient,
botUserId: "bot",
@ -830,18 +806,14 @@ describe("createMattermostInteractionHandler", () => {
dispatchButtonClick,
});
const req = createReq({
body: {
user_id: "user-2",
user_name: "alice",
channel_id: "chan-1",
post_id: "post-1",
context: { ...context, _token: token },
},
const res = await runHandler(handler, {
body: createInteractionBody({
context,
token,
userId: "user-2",
userName: "alice",
}),
});
const res = createRes();
await handler(req, res);
expect(res.statusCode).toBe(200);
expect(res.body).toBe(

View File

@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from One Thousand and One Night
| `prompt` | `command` | What is commanded of the djinn |
| `model` | `spirit` | Which spirit answers |
### Unchanged
### Shared appendix
These keywords already work or are too functional to replace sensibly:
Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern.
- `**...**` discretion markers — already work
- `until`, `while` — already work
- `map`, `filter`, `reduce`, `pmap` — pipeline operators
- `max` — constraint modifier
- `as` — aliasing
- Model names: `sonnet`, `opus`, `haiku` — already poetic
Recommended Arabian Nights rewrite targets:
---
## Side-by-Side Comparison
### Simple Program
```prose
# Functional
use "@alice/research" as research
input topic: "What to investigate"
agent helper:
model: sonnet
let findings = session: helper
prompt: "Research {topic}"
output summary = session "Summarize"
context: findings
```
```prose
# Nights
conjure "@alice/research" as research
wish topic: "What to investigate"
djinn helper:
spirit: sonnet
name findings = tale: helper
command: "Research {topic}"
gift summary = tale "Summarize"
scroll: findings
```
### Parallel Execution
```prose
# Functional
parallel:
security = session "Check security"
perf = session "Check performance"
style = session "Check style"
session "Synthesize review"
context: { security, perf, style }
```
```prose
# Nights
bazaar:
security = tale "Check security"
perf = tale "Check performance"
style = tale "Check style"
tale "Synthesize review"
scroll: { security, perf, style }
```
### Loop with Condition
```prose
# Functional
loop until **the code is bug-free** (max: 5):
session "Find and fix bugs"
```
```prose
# Nights
telling until **the code is bug-free** (max: 5):
tale "Find and fix bugs"
```
### Error Handling
```prose
# Functional
try:
session "Risky operation"
catch as err:
session "Handle error"
context: err
finally:
session "Cleanup"
```
```prose
# Nights
venture:
tale "Risky operation"
should misfortune strike as err:
tale "Handle error"
scroll: err
and so it was:
tale "Cleanup"
```
### Choice Block
```prose
# Functional
choice **the severity level**:
option "Critical":
session "Escalate immediately"
option "Minor":
session "Log for later"
```
```prose
# Nights
crossroads **the severity level**:
path "Critical":
tale "Escalate immediately"
path "Minor":
tale "Log for later"
```
### Conditionals
```prose
# Functional
if **has security issues**:
session "Fix security"
elif **has performance issues**:
session "Optimize"
else:
session "Approve"
```
- `session` sample -> `tale`
- `parallel` sample -> `bazaar`
- `loop` sample -> `telling`
- `try/catch/finally` sample -> `venture` / `should misfortune strike` / `and so it was`
- `choice` sample -> `crossroads` / `path`
```prose
# Nights

View File

@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from Greek epic poetry—the Il
| `prompt` | `charge` | The quest given |
| `model` | `muse` | Which muse inspires |
### Unchanged
### Shared appendix
These keywords already work or are too functional to replace sensibly:
Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern.
- `**...**` discretion markers — already work
- `until`, `while` — already work
- `map`, `filter`, `reduce`, `pmap` — pipeline operators
- `max` — constraint modifier
- `as` — aliasing
- Model names: `sonnet`, `opus`, `haiku` — already poetic
Recommended Homeric rewrite targets:
---
## Side-by-Side Comparison
### Simple Program
```prose
# Functional
use "@alice/research" as research
input topic: "What to investigate"
agent helper:
model: sonnet
let findings = session: helper
prompt: "Research {topic}"
output summary = session "Summarize"
context: findings
```
```prose
# Homeric
invoke "@alice/research" as research
omen topic: "What to investigate"
hero helper:
muse: sonnet
decree findings = trial: helper
charge: "Research {topic}"
glory summary = trial "Summarize"
tidings: findings
```
### Parallel Execution
```prose
# Functional
parallel:
security = session "Check security"
perf = session "Check performance"
style = session "Check style"
session "Synthesize review"
context: { security, perf, style }
```
```prose
# Homeric
host:
security = trial "Check security"
perf = trial "Check performance"
style = trial "Check style"
trial "Synthesize review"
tidings: { security, perf, style }
```
### Loop with Condition
```prose
# Functional
loop until **the code is bug-free** (max: 5):
session "Find and fix bugs"
```
```prose
# Homeric
ordeal until **the code is bug-free** (max: 5):
trial "Find and fix bugs"
```
### Error Handling
```prose
# Functional
try:
session "Risky operation"
catch as err:
session "Handle error"
context: err
finally:
session "Cleanup"
```
```prose
# Homeric
venture:
trial "Risky operation"
should ruin come as err:
trial "Handle error"
tidings: err
in the end:
trial "Cleanup"
```
### Choice Block
```prose
# Functional
choice **the severity level**:
option "Critical":
session "Escalate immediately"
option "Minor":
session "Log for later"
```
```prose
# Homeric
crossroads **the severity level**:
path "Critical":
trial "Escalate immediately"
path "Minor":
trial "Log for later"
```
### Conditionals
```prose
# Functional
if **has security issues**:
session "Fix security"
elif **has performance issues**:
session "Optimize"
else:
session "Approve"
```
- `session` sample -> `trial`
- `parallel` sample -> `host`
- `loop` sample -> `ordeal`
- `try/catch/finally` sample -> `venture` / `should ruin come` / `in the end`
- `choice` sample -> `crossroads` / `path`
```prose
# Homeric

View File

@ -0,0 +1,35 @@
---
role: reference
summary: Shared appendix for experimental OpenProse alternate registers.
status: draft
requires: prose.md
---
# OpenProse Alternate Register Appendix
Use this appendix with experimental register files such as `arabian-nights.md` and `homer.md`.
## Unchanged keywords
These keywords already work or are too functional to replace sensibly:
- `**...**` discretion markers
- `until`, `while`
- `map`, `filter`, `reduce`, `pmap`
- `max`
- `as`
- model names such as `sonnet`, `opus`, and `haiku`
## Comparison pattern
Use the translation map in each register file to rewrite the same functional sample programs:
- simple program
- parallel execution
- loop with condition
- error handling
- choice block
- conditionals
The goal is consistency, not one canonical wording.
Keep the functional version intact and rewrite only the register-specific aliases.

View File

@ -87,71 +87,28 @@ The `agents` and `agent_segments` tables for project-scoped agents live in `.pro
## Responsibility Separation
This section defines **who does what**. This is the contract between the VM and subagents.
The VM/subagent contract matches [postgres.md](./postgres.md#responsibility-separation).
### VM Responsibilities
SQLite-specific differences:
The VM (the orchestrating agent running the .prose program) is responsible for:
- the VM creates `state.db` instead of an `openprose` schema
- subagent confirmation messages point at a local database path, for example `.prose/runs/<runId>/state.db`
- cleanup is typically `VACUUM` or file deletion rather than dropping schema objects
| Responsibility | Description |
| ------------------------- | -------------------------------------------------------------------------------------------------------- |
| **Database creation** | Create `state.db` and initialize core tables at run start |
| **Program registration** | Store the program source and metadata |
| **Execution tracking** | Update position, status, and timing as statements execute |
| **Subagent spawning** | Spawn sessions via Task tool with database path and instructions |
| **Parallel coordination** | Track branch status, implement join strategies |
| **Loop management** | Track iteration counts, evaluate conditions |
| **Error aggregation** | Record failures, manage retry state |
| **Context preservation** | Maintain sufficient narration in the main conversation thread so execution can be understood and resumed |
| **Completion detection** | Mark the run as complete when finished |
Example return values:
**Critical:** The VM must preserve enough context in its own conversation to understand execution state without re-reading the entire database. The database is for coordination and persistence, not a replacement for working memory.
### Subagent Responsibilities
Subagents (sessions spawned by the VM) are responsible for:
| Responsibility | Description |
| ----------------------- | ----------------------------------------------------------------- |
| **Writing own outputs** | Insert/update their binding in the `bindings` table |
| **Memory management** | For persistent agents: read and update their memory record |
| **Segment recording** | For persistent agents: append segment history |
| **Attachment handling** | Write large outputs to `attachments/` directory, store path in DB |
| **Atomic writes** | Use transactions when updating multiple related records |
**Critical:** Subagents write ONLY to `bindings`, `agents`, and `agent_segments` tables. The VM owns the `execution` table entirely. Completion signaling happens through the substrate (Task tool return), not database updates.
**Critical:** Subagents must write their outputs directly to the database. The VM does not write subagent outputs—it only reads them after the subagent completes.
**What subagents return to the VM:** A confirmation message with the binding location—not the full content:
**Root scope:**
```
```text
Binding written: research
Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='research', execution_id=NULL)
Summary: AI safety research covering alignment, robustness, and interpretability with 15 citations.
```
**Inside block invocation:**
```
```text
Binding written: result
Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='result', execution_id=43)
Execution ID: 43
Summary: Processed chunk into 3 sub-parts for recursive processing.
```
The VM tracks locations, not values. This keeps the VM's context lean and enables arbitrarily large intermediate values.
### Shared Concerns
| Concern | Who Handles |
| ---------------- | ------------------------------------------------------------------ |
| Schema evolution | Either (use `CREATE TABLE IF NOT EXISTS`, `ALTER TABLE` as needed) |
| Custom tables | Either (prefix with `x_` for extensions) |
| Indexing | Either (add indexes for frequently-queried columns) |
| Cleanup | VM (at run end, optionally vacuum) |
The VM still tracks locations, not full values.
---

View File

@ -89,56 +89,18 @@ Notes:
- Twilio/Telnyx/Plivo require a **publicly reachable** webhook URL.
- `mock` is a local dev provider (no network calls).
- Telnyx requires `telnyx.publicKey` (or `TELNYX_PUBLIC_KEY`) unless `skipSignatureVerification` is true.
- `tunnel.allowNgrokFreeTierLoopbackBypass: true` allows Twilio webhooks with invalid signatures **only** when `tunnel.provider="ngrok"` and `serve.bind` is loopback (ngrok local agent). Use for local dev only.
Streaming security defaults:
- `streaming.preStartTimeoutMs` closes sockets that never send a valid `start` frame.
- `streaming.maxPendingConnections` caps total unauthenticated pre-start sockets.
- `streaming.maxPendingConnectionsPerIp` caps unauthenticated pre-start sockets per source IP.
- `streaming.maxConnections` caps total open media stream sockets (pending + active).
- advanced webhook, streaming, and tunnel notes: `https://docs.openclaw.ai/plugins/voice-call`
## Stale call reaper
Use `staleCallReaperSeconds` to end calls that never receive a terminal webhook
(for example, notify-mode calls that never complete). The default is `0`
(disabled).
Recommended ranges:
- **Production:** `120``300` seconds for notify-style flows.
- Keep this value **higher than `maxDurationSeconds`** so normal calls can
finish. A good starting point is `maxDurationSeconds + 3060` seconds.
Example:
```json5
{
staleCallReaperSeconds: 360,
}
```
See the plugin docs for recommended ranges and production examples:
`https://docs.openclaw.ai/plugins/voice-call#stale-call-reaper`
## TTS for calls
Voice Call uses the core `messages.tts` configuration (OpenAI or ElevenLabs) for
streaming speech on calls. You can override it under the plugin config with the
same shape — overrides deep-merge with `messages.tts`.
```json5
{
tts: {
provider: "openai",
openai: {
voice: "alloy",
},
},
}
```
Notes:
- Edge TTS is ignored for voice calls (telephony audio needs PCM; Edge output is unreliable).
- Core TTS is used when Twilio media streaming is enabled; otherwise calls fall back to provider native voices.
streaming speech on calls. Override examples and provider caveats live here:
`https://docs.openclaw.ai/plugins/voice-call#tts-for-calls`
## CLI

View File

@ -187,6 +187,31 @@ function installRuntime(params: {
};
}
function installGroupCommandAuthRuntime() {
return installRuntime({
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
});
}
async function processGroupControlCommand(params: {
account: ResolvedZalouserAccount;
content?: string;
commandContent?: string;
}) {
await __testing.processMessage({
message: createGroupMessage({
content: params.content ?? "/new",
commandContent: params.commandContent ?? "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: params.account,
config: createConfig(),
runtime: createRuntimeEnv(),
});
}
function createGroupMessage(overrides: Partial<ZaloInboundMessage> = {}): ZaloInboundMessage {
return {
threadId: "g-1",
@ -229,57 +254,152 @@ describe("zalouser monitor group mention gating", () => {
sendSeenZalouserMock.mockClear();
});
it("skips unmentioned group messages when requireMention=true", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
async function processMessageWithDefaults(params: {
message: ZaloInboundMessage;
account?: ResolvedZalouserAccount;
historyState?: {
historyLimit: number;
groupHistories: Map<
string,
Array<{ sender: string; body: string; timestamp?: number; messageId?: string }>
>;
};
}) {
await __testing.processMessage({
message: createGroupMessage(),
account: createAccount(),
message: params.message,
account: params.account ?? createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
historyState: params.historyState,
});
}
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
expect(sendTypingZalouserMock).not.toHaveBeenCalled();
});
it("fails closed when requireMention=true but mention detection is unavailable", async () => {
async function expectSkippedGroupMessage(message?: Partial<ZaloInboundMessage>) {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
await __testing.processMessage({
message: createGroupMessage({
canResolveExplicitMention: false,
hasAnyMention: false,
wasExplicitlyMentioned: false,
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
await processMessageWithDefaults({
message: createGroupMessage(message),
});
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
expect(sendTypingZalouserMock).not.toHaveBeenCalled();
});
}
it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => {
async function expectGroupCommandAuthorizers(params: {
accountConfig: ResolvedZalouserAccount["config"];
expectedAuthorizers: Array<{ configured: boolean; allowed: boolean }>;
}) {
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
installGroupCommandAuthRuntime();
await processGroupControlCommand({
account: {
...createAccount(),
config: params.accountConfig,
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
expect(authCall?.authorizers).toEqual(params.expectedAuthorizers);
}
async function processOpenDmMessage(params?: {
message?: Partial<ZaloInboundMessage>;
readSessionUpdatedAt?: (input?: {
storePath: string;
sessionKey: string;
}) => number | undefined;
}) {
const runtime = installRuntime({
commandAuthorized: false,
});
if (params?.readSessionUpdatedAt) {
runtime.readSessionUpdatedAt.mockImplementation(params.readSessionUpdatedAt);
}
const account = createAccount();
await processMessageWithDefaults({
message: createDmMessage(params?.message),
account: {
...account,
config: {
...account.config,
dmPolicy: "open",
},
},
});
return runtime;
}
async function expectDangerousNameMatching(params: {
dangerouslyAllowNameMatching?: boolean;
expectedDispatches: number;
}) {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
await __testing.processMessage({
await processMessageWithDefaults({
message: createGroupMessage({
threadId: "g-attacker-001",
groupName: "Trusted Team",
senderId: "666",
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
account: {
...createAccount(),
config: {
...createAccount().config,
...(params.dangerouslyAllowNameMatching ? { dangerouslyAllowNameMatching: true } : {}),
groupPolicy: "allowlist",
groupAllowFrom: ["*"],
groups: {
"group:g-trusted-001": { allow: true },
"Trusted Team": { allow: true },
},
},
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(
params.expectedDispatches,
);
return dispatchReplyWithBufferedBlockDispatcher;
}
async function dispatchGroupMessage(params: {
commandAuthorized: boolean;
message: Partial<ZaloInboundMessage>;
}) {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: params.commandAuthorized,
});
await processMessageWithDefaults({
message: createGroupMessage(params.message),
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
return dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
}
it("skips unmentioned group messages when requireMention=true", async () => {
await expectSkippedGroupMessage();
});
it("fails closed when requireMention=true but mention detection is unavailable", async () => {
await expectSkippedGroupMessage({
canResolveExplicitMention: false,
hasAnyMention: false,
wasExplicitlyMentioned: false,
});
});
it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => {
const callArg = await dispatchGroupMessage({
commandAuthorized: false,
message: {
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
},
});
expect(callArg?.ctx?.WasMentioned).toBe(true);
expect(callArg?.ctx?.To).toBe("zalouser:group:g-1");
expect(callArg?.ctx?.OriginatingTo).toBe("zalouser:group:g-1");
@ -290,22 +410,14 @@ describe("zalouser monitor group mention gating", () => {
});
it("allows authorized control commands to bypass mention gating", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
const callArg = await dispatchGroupMessage({
commandAuthorized: true,
});
await __testing.processMessage({
message: createGroupMessage({
message: {
content: "/status",
hasAnyMention: false,
wasExplicitlyMentioned: false,
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
expect(callArg?.ctx?.WasMentioned).toBe(true);
});
@ -346,57 +458,30 @@ describe("zalouser monitor group mention gating", () => {
});
it("uses commandContent for mention-prefixed control commands", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
const callArg = await dispatchGroupMessage({
commandAuthorized: true,
});
await __testing.processMessage({
message: createGroupMessage({
message: {
content: "@Bot /new",
commandContent: "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: createAccount(),
config: createConfig(),
runtime: createRuntimeEnv(),
},
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
expect(callArg?.ctx?.CommandBody).toBe("/new");
expect(callArg?.ctx?.BodyForCommands).toBe("/new");
});
it("allows group control commands when only allowFrom is configured", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
installRuntime({
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
});
await __testing.processMessage({
message: createGroupMessage({
content: "/new",
commandContent: "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: {
...createAccount(),
config: {
...createAccount().config,
allowFrom: ["123"],
},
await expectGroupCommandAuthorizers({
accountConfig: {
...createAccount().config,
allowFrom: ["123"],
},
config: createConfig(),
runtime: createRuntimeEnv(),
expectedAuthorizers: [
{ configured: true, allowed: true },
{ configured: true, allowed: true },
],
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
expect(authCall?.authorizers).toEqual([
{ configured: true, allowed: true },
{ configured: true, allowed: true },
]);
});
it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => {
@ -425,123 +510,35 @@ describe("zalouser monitor group mention gating", () => {
});
it("does not accept a different group id by matching only the mutable group name by default", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
});
await __testing.processMessage({
message: createGroupMessage({
threadId: "g-attacker-001",
groupName: "Trusted Team",
senderId: "666",
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
}),
account: {
...createAccount(),
config: {
...createAccount().config,
groupPolicy: "allowlist",
groupAllowFrom: ["*"],
groups: {
"group:g-trusted-001": { allow: true },
"Trusted Team": { allow: true },
},
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
});
expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
await expectDangerousNameMatching({ expectedDispatches: 0 });
});
it("accepts mutable group-name matches only when dangerouslyAllowNameMatching is enabled", async () => {
const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({
commandAuthorized: false,
const dispatchReplyWithBufferedBlockDispatcher = await expectDangerousNameMatching({
dangerouslyAllowNameMatching: true,
expectedDispatches: 1,
});
await __testing.processMessage({
message: createGroupMessage({
threadId: "g-attacker-001",
groupName: "Trusted Team",
senderId: "666",
hasAnyMention: true,
wasExplicitlyMentioned: true,
content: "ping @bot",
}),
account: {
...createAccount(),
config: {
...createAccount().config,
dangerouslyAllowNameMatching: true,
groupPolicy: "allowlist",
groupAllowFrom: ["*"],
groups: {
"group:g-trusted-001": { allow: true },
"Trusted Team": { allow: true },
},
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];
expect(callArg?.ctx?.To).toBe("zalouser:group:g-attacker-001");
});
it("allows group control commands when sender is in groupAllowFrom", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } =
installRuntime({
resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) =>
useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed),
});
await __testing.processMessage({
message: createGroupMessage({
content: "/new",
commandContent: "/new",
hasAnyMention: true,
wasExplicitlyMentioned: true,
}),
account: {
...createAccount(),
config: {
...createAccount().config,
allowFrom: ["999"],
groupAllowFrom: ["123"],
},
await expectGroupCommandAuthorizers({
accountConfig: {
...createAccount().config,
allowFrom: ["999"],
groupAllowFrom: ["123"],
},
config: createConfig(),
runtime: createRuntimeEnv(),
expectedAuthorizers: [
{ configured: true, allowed: false },
{ configured: true, allowed: true },
],
});
expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1);
const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0];
expect(authCall?.authorizers).toEqual([
{ configured: true, allowed: false },
{ configured: true, allowed: true },
]);
});
it("routes DM messages with direct peer kind", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, resolveAgentRoute, buildAgentSessionKey } =
installRuntime({
commandAuthorized: false,
});
const account = createAccount();
await __testing.processMessage({
message: createDmMessage(),
account: {
...account,
config: {
...account.config,
dmPolicy: "open",
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
});
await processOpenDmMessage();
expect(resolveAgentRoute).toHaveBeenCalledWith(
expect.objectContaining({
@ -559,24 +556,9 @@ describe("zalouser monitor group mention gating", () => {
});
it("reuses the legacy DM session key when only the old group-shaped session exists", async () => {
const { dispatchReplyWithBufferedBlockDispatcher, readSessionUpdatedAt } = installRuntime({
commandAuthorized: false,
});
readSessionUpdatedAt.mockImplementation((input?: { storePath: string; sessionKey: string }) =>
input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined,
);
const account = createAccount();
await __testing.processMessage({
message: createDmMessage(),
account: {
...account,
config: {
...account.config,
dmPolicy: "open",
},
},
config: createConfig(),
runtime: createRuntimeEnv(),
const { dispatchReplyWithBufferedBlockDispatcher } = await processOpenDmMessage({
readSessionUpdatedAt: (input?: { storePath: string; sessionKey: string }) =>
input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined,
});
const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0];

View File

@ -353,6 +353,7 @@
"@mariozechner/pi-ai": "0.57.1",
"@mariozechner/pi-coding-agent": "0.57.1",
"@mariozechner/pi-tui": "0.57.1",
"@modelcontextprotocol/sdk": "1.27.1",
"@mozilla/readability": "^0.6.0",
"@sinclair/typebox": "0.34.48",
"@slack/bolt": "^4.6.0",

View File

@ -60,16 +60,19 @@ importers:
version: 1.2.0-beta.3
'@mariozechner/pi-agent-core':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-coding-agent':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui':
specifier: 0.57.1
version: 0.57.1
'@modelcontextprotocol/sdk':
specifier: 1.27.1
version: 1.27.1(zod@4.3.6)
'@mozilla/readability':
specifier: ^0.6.0
version: 0.6.0
@ -346,7 +349,7 @@ importers:
version: 10.6.1
openclaw:
specifier: '>=2026.3.11'
version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
version: 2026.3.11(@discordjs/opus@0.10.0)(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
extensions/imessage: {}
@ -377,7 +380,7 @@ importers:
dependencies:
'@mariozechner/pi-agent-core':
specifier: 0.57.1
version: 0.57.1(ws@8.19.0)(zod@4.3.6)
version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@matrix-org/matrix-sdk-crypto-nodejs':
specifier: ^0.4.0
version: 0.4.0
@ -407,7 +410,7 @@ importers:
dependencies:
openclaw:
specifier: '>=2026.3.11'
version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
version: 2026.3.11(@discordjs/opus@0.10.0)(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3))
extensions/memory-lancedb:
dependencies:
@ -1828,6 +1831,16 @@ packages:
'@mistralai/mistralai@1.14.1':
resolution: {integrity: sha512-IiLmmZFCCTReQgPAT33r7KQ1nYo5JPdvGkrkZqA8qQ2qB1GHgs5LoP5K2ICyrjnpw2n8oSxMM/VP+liiKcGNlQ==}
'@modelcontextprotocol/sdk@1.27.1':
resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==}
engines: {node: '>=18'}
peerDependencies:
'@cfworker/json-schema': ^4.1.1
zod: ^3.25 || ^4.0
peerDependenciesMeta:
'@cfworker/json-schema':
optional: true
'@mozilla/readability@0.6.0':
resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==}
engines: {node: '>=14.0.0'}
@ -4271,6 +4284,10 @@ packages:
core-util-is@1.0.3:
resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==}
cors@2.8.6:
resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==}
engines: {node: '>= 0.10'}
croner@10.0.1:
resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==}
engines: {node: '>=18.0'}
@ -4550,6 +4567,14 @@ packages:
events-universal@1.0.1:
resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==}
eventsource-parser@3.0.6:
resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
engines: {node: '>=18.0.0'}
eventsource@3.0.7:
resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==}
engines: {node: '>=18.0.0'}
execa@4.1.0:
resolution: {integrity: sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==}
engines: {node: '>=10'}
@ -4561,6 +4586,12 @@ packages:
exponential-backoff@3.1.3:
resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==}
express-rate-limit@8.3.1:
resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==}
engines: {node: '>= 16'}
peerDependencies:
express: '>= 4.11'
express@4.22.1:
resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==}
engines: {node: '>= 0.10.0'}
@ -5058,6 +5089,9 @@ packages:
jose@4.15.9:
resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==}
jose@6.2.1:
resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==}
js-stringify@1.0.2:
resolution: {integrity: sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==}
@ -5102,6 +5136,9 @@ packages:
json-schema-traverse@1.0.0:
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
json-schema-typed@8.0.2:
resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==}
json-schema@0.4.0:
resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==}
@ -5870,6 +5907,10 @@ packages:
resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==}
hasBin: true
pkce-challenge@5.0.1:
resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==}
engines: {node: '>=16.20.0'}
playwright-core@1.58.2:
resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==}
engines: {node: '>=18'}
@ -8645,12 +8686,14 @@ snapshots:
optionalDependencies:
'@noble/hashes': 2.0.1
'@google/genai@1.44.0':
'@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))':
dependencies:
google-auth-library: 10.6.1
p-retry: 4.6.2
protobufjs: 7.5.4
ws: 8.19.0
optionalDependencies:
'@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6)
transitivePeerDependencies:
- bufferutil
- supports-color
@ -8698,7 +8741,6 @@ snapshots:
'@hono/node-server@1.19.10(hono@4.12.7)':
dependencies:
hono: 4.12.7
optional: true
'@huggingface/jinja@0.5.5': {}
@ -9025,9 +9067,9 @@ snapshots:
std-env: 3.10.0
yoctocolors: 2.1.2
'@mariozechner/pi-agent-core@0.57.1(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-agent-core@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
transitivePeerDependencies:
- '@modelcontextprotocol/sdk'
- aws-crt
@ -9037,11 +9079,11 @@ snapshots:
- ws
- zod
'@mariozechner/pi-ai@0.57.1(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-ai@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@anthropic-ai/sdk': 0.73.0(zod@4.3.6)
'@aws-sdk/client-bedrock-runtime': 3.1004.0
'@google/genai': 1.44.0
'@google/genai': 1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))
'@mistralai/mistralai': 1.14.1
'@sinclair/typebox': 0.34.48
ajv: 8.18.0
@ -9061,11 +9103,11 @@ snapshots:
- ws
- zod
'@mariozechner/pi-coding-agent@0.57.1(ws@8.19.0)(zod@4.3.6)':
'@mariozechner/pi-coding-agent@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)':
dependencies:
'@mariozechner/jiti': 2.6.5
'@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-agent-core': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui': 0.57.1
'@silvia-odwyer/photon-node': 0.3.4
chalk: 5.6.2
@ -9141,6 +9183,28 @@ snapshots:
- bufferutil
- utf-8-validate
'@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)':
dependencies:
'@hono/node-server': 1.19.10(hono@4.12.7)
ajv: 8.18.0
ajv-formats: 3.0.1(ajv@8.18.0)
content-type: 1.0.5
cors: 2.8.6
cross-spawn: 7.0.6
eventsource: 3.0.7
eventsource-parser: 3.0.6
express: 5.2.1
express-rate-limit: 8.3.1(express@5.2.1)
hono: 4.12.7
jose: 6.2.1
json-schema-typed: 8.0.2
pkce-challenge: 5.0.1
raw-body: 3.0.2
zod: 4.3.6
zod-to-json-schema: 3.25.1(zod@4.3.6)
transitivePeerDependencies:
- supports-color
'@mozilla/readability@0.6.0': {}
'@napi-rs/canvas-android-arm64@0.1.95':
@ -11916,6 +11980,11 @@ snapshots:
core-util-is@1.0.3: {}
cors@2.8.6:
dependencies:
object-assign: 4.1.1
vary: 1.1.2
croner@10.0.1: {}
cross-spawn@7.0.6:
@ -12167,6 +12236,12 @@ snapshots:
transitivePeerDependencies:
- bare-abort-controller
eventsource-parser@3.0.6: {}
eventsource@3.0.7:
dependencies:
eventsource-parser: 3.0.6
execa@4.1.0:
dependencies:
cross-spawn: 7.0.6
@ -12183,6 +12258,11 @@ snapshots:
exponential-backoff@3.1.3: {}
express-rate-limit@8.3.1(express@5.2.1):
dependencies:
express: 5.2.1
ip-address: 10.1.0
express@4.22.1:
dependencies:
accepts: 1.3.8
@ -12826,6 +12906,8 @@ snapshots:
jose@4.15.9: {}
jose@6.2.1: {}
js-stringify@1.0.2: {}
js-tokens@10.0.0: {}
@ -12893,6 +12975,8 @@ snapshots:
json-schema-traverse@1.0.0: {}
json-schema-typed@8.0.2: {}
json-schema@0.4.0: {}
json-stringify-safe@5.0.1: {}
@ -13497,7 +13581,7 @@ snapshots:
ws: 8.19.0
zod: 4.3.6
openclaw@2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)):
openclaw@2026.3.11(@discordjs/opus@0.10.0)(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)):
dependencies:
'@agentclientprotocol/sdk': 0.16.1(zod@4.3.6)
'@aws-sdk/client-bedrock': 3.1007.0
@ -13510,9 +13594,9 @@ snapshots:
'@larksuiteoapi/node-sdk': 1.59.0
'@line/bot-sdk': 10.6.0
'@lydell/node-pty': 1.2.0-beta.3
'@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-agent-core': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-coding-agent': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)
'@mariozechner/pi-tui': 0.57.1
'@mozilla/readability': 0.6.0
'@napi-rs/canvas': 0.1.95
@ -13784,6 +13868,8 @@ snapshots:
sonic-boom: 4.2.1
thread-stream: 3.1.0
pkce-challenge@5.0.1: {}
playwright-core@1.58.2: {}
playwright@1.58.2:

View File

@ -5,6 +5,7 @@ import { appendFileSync } from "node:fs";
const DOCS_PATH_RE = /^(docs\/|.*\.mdx?$)/;
const SKILLS_PYTHON_SCOPE_RE = /^skills\//;
const CI_WORKFLOW_SCOPE_RE = /^\.github\/workflows\/ci\.yml$/;
const MACOS_PROTOCOL_GEN_RE =
/^(apps\/macos\/Sources\/OpenClawProtocol\/|apps\/shared\/OpenClawKit\/Sources\/OpenClawProtocol\/)/;
const MACOS_NATIVE_RE = /^(apps\/macos\/|apps\/ios\/|apps\/shared\/|Swabble\/)/;
@ -55,6 +56,12 @@ export function detectChangedScope(changedPaths) {
runSkillsPython = true;
}
if (CI_WORKFLOW_SCOPE_RE.test(path)) {
runMacos = true;
runAndroid = true;
runSkillsPython = true;
}
if (!MACOS_PROTOCOL_GEN_RE.test(path) && MACOS_NATIVE_RE.test(path)) {
runMacos = true;
}

View File

@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js";
import { AcpGatewayAgent } from "./translator.js";
import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js";
const TEST_SESSION_ID = "session-1";
const TEST_SESSION_KEY = "agent:main:main";
const TEST_PROMPT = {
sessionId: TEST_SESSION_ID,
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest;
describe("acp prompt cwd prefix", () => {
const createStopAfterSendSpy = () =>
vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
async function runPromptAndCaptureRequest(
options: {
cwd?: string;
prefixCwd?: boolean;
provenanceMode?: "meta" | "meta+receipt";
} = {},
) {
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: TEST_SESSION_ID,
sessionKey: TEST_SESSION_KEY,
cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"),
});
const requestSpy = createStopAfterSendSpy();
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
prefixCwd: options.prefixCwd,
provenanceMode: options.provenanceMode,
},
);
await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send");
return requestSpy;
}
async function runPromptWithCwd(cwd: string) {
const pinnedHome = os.homedir();
const previousOpenClawHome = process.env.OPENCLAW_HOME;
@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => {
delete process.env.OPENCLAW_HOME;
process.env.HOME = pinnedHome;
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: "session-1",
sessionKey: "agent:main:main",
cwd,
});
const requestSpy = vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
prefixCwd: true,
},
);
try {
await expect(
agent.prompt({
sessionId: "session-1",
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest),
).rejects.toThrow("stop-after-send");
return requestSpy;
return await runPromptAndCaptureRequest({ cwd, prefixCwd: true });
} finally {
if (previousOpenClawHome === undefined) {
delete process.env.OPENCLAW_HOME;
@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => {
});
it("injects system provenance metadata when enabled", async () => {
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: "session-1",
sessionKey: "agent:main:main",
cwd: path.join(os.homedir(), "openclaw-test"),
});
const requestSpy = vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
provenanceMode: "meta",
},
);
await expect(
agent.prompt({
sessionId: "session-1",
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest),
).rejects.toThrow("stop-after-send");
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" });
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemInputProvenance: {
kind: "external_user",
originSessionId: "session-1",
originSessionId: TEST_SESSION_ID,
sourceChannel: "acp",
sourceTool: "openclaw_acp",
},
@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => {
});
it("injects a system provenance receipt when requested", async () => {
const sessionStore = createInMemorySessionStore();
sessionStore.createSession({
sessionId: "session-1",
sessionKey: "agent:main:main",
cwd: path.join(os.homedir(), "openclaw-test"),
});
const requestSpy = vi.fn(async (method: string) => {
if (method === "chat.send") {
throw new Error("stop-after-send");
}
return {};
});
const agent = new AcpGatewayAgent(
createAcpConnection(),
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
{
sessionStore,
provenanceMode: "meta+receipt",
},
);
await expect(
agent.prompt({
sessionId: "session-1",
prompt: [{ type: "text", text: "hello" }],
_meta: {},
} as unknown as PromptRequest),
).rejects.toThrow("stop-after-send");
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" });
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemInputProvenance: {
kind: "external_user",
originSessionId: "session-1",
originSessionId: TEST_SESSION_ID,
sourceChannel: "acp",
sourceTool: "openclaw_acp",
},
@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => {
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"),
systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`),
}),
{ expectFinal: true },
);
expect(requestSpy).toHaveBeenCalledWith(
"chat.send",
expect.objectContaining({
systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"),
systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`),
}),
{ expectFinal: true },
);

View File

@ -43,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) {
return buildSystemRunPreparePayload(params);
}
function getTestConfigPath() {
return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
}
async function writeOpenClawConfig(config: Record<string, unknown>, pretty = false) {
const configPath = getTestConfigPath();
await fs.mkdir(path.dirname(configPath), { recursive: true });
await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined));
}
async function writeExecApprovalsConfig(config: Record<string, unknown>) {
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2));
}
function acceptedApprovalResponse(params: unknown) {
return { status: "accepted", id: (params as { id?: string })?.id };
}
function getResultText(result: { content: Array<{ type?: string; text?: string }> }) {
return result.content.find((part) => part.type === "text")?.text ?? "";
}
function expectPendingApprovalText(
result: {
details: { status?: string };
content: Array<{ type?: string; text?: string }>;
},
options: {
command: string;
host: "gateway" | "node";
nodeId?: string;
interactive?: boolean;
},
) {
expect(result.details.status).toBe("approval-pending");
const details = result.details as { approvalId: string; approvalSlug: string };
const pendingText = getResultText(result);
expect(pendingText).toContain(
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
);
expect(pendingText).toContain(`full ${details.approvalId}`);
expect(pendingText).toContain(`Host: ${options.host}`);
if (options.nodeId) {
expect(pendingText).toContain(`Node: ${options.nodeId}`);
}
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
expect(pendingText).toContain("Command:\n```sh\n");
expect(pendingText).toContain(options.command);
if (options.interactive) {
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
expect(pendingText).toContain("Background mode requires pre-approved policy");
}
return details;
}
function expectPendingCommandText(
result: {
details: { status?: string };
content: Array<{ type?: string; text?: string }>;
},
command: string,
) {
expect(result.details.status).toBe("approval-pending");
const text = getResultText(result);
expect(text).toContain("Command:\n```sh\n");
expect(text).toContain(command);
}
function mockGatewayOkCalls(calls: string[]) {
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
calls.push(method);
return { ok: true };
});
}
function createElevatedAllowlistExecTool() {
return createExecTool({
ask: "on-miss",
security: "allowlist",
approvalRunningNoticeMs: 0,
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
});
}
async function expectGatewayExecWithoutApproval(options: {
config: Record<string, unknown>;
command: string;
ask?: "always" | "on-miss" | "off";
}) {
await writeExecApprovalsConfig(options.config);
const calls: string[] = [];
mockGatewayOkCalls(calls);
const tool = createExecTool({
host: "gateway",
ask: options.ask,
security: "full",
approvalRunningNoticeMs: 0,
});
const result = await tool.execute("call-no-approval", { command: options.command });
expect(result.details.status).toBe("completed");
expect(calls).not.toContain("exec.approval.request");
expect(calls).not.toContain("exec.approval.waitDecision");
}
function mockAcceptedApprovalFlow(options: {
onAgent?: (params: Record<string, unknown>) => void;
onNodeInvoke?: (params: unknown) => unknown;
}) {
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
if (method === "exec.approval.request") {
return acceptedApprovalResponse(params);
}
if (method === "exec.approval.waitDecision") {
return { decision: "allow-once" };
}
if (method === "agent" && options.onAgent) {
options.onAgent(params as Record<string, unknown>);
return { status: "ok" };
}
if (method === "node.invoke" && options.onNodeInvoke) {
return await options.onNodeInvoke(params);
}
return { ok: true };
});
}
function mockPendingApprovalRegistration() {
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: "approval-id" };
}
if (method === "exec.approval.waitDecision") {
return { decision: null };
}
return { ok: true };
});
}
function expectApprovalUnavailableText(result: {
details: { status?: string };
content: Array<{ type?: string; text?: string }>;
}) {
expect(result.details.status).toBe("approval-unavailable");
const text = result.content.find((part) => part.type === "text")?.text ?? "";
expect(text).not.toContain("/approve");
expect(text).not.toContain("npm view diver name version description");
expect(text).not.toContain("Pending command:");
expect(text).not.toContain("Host:");
expect(text).not.toContain("CWD:");
return text;
}
describe("exec approvals", () => {
let previousHome: string | undefined;
let previousUserProfile: string | undefined;
@ -81,18 +237,11 @@ describe("exec approvals", () => {
let invokeParams: unknown;
let agentParams: unknown;
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: (params as { id?: string })?.id };
}
if (method === "exec.approval.waitDecision") {
return { decision: "allow-once" };
}
if (method === "agent") {
mockAcceptedApprovalFlow({
onAgent: (params) => {
agentParams = params;
return { status: "ok" };
}
if (method === "node.invoke") {
},
onNodeInvoke: (params) => {
const invoke = params as { command?: string };
if (invoke.command === "system.run.prepare") {
return buildPreparedSystemRunPayload(params);
@ -101,8 +250,7 @@ describe("exec approvals", () => {
invokeParams = params;
return { payload: { success: true, stdout: "ok" } };
}
}
return { ok: true };
},
});
const tool = createExecTool({
@ -113,19 +261,12 @@ describe("exec approvals", () => {
});
const result = await tool.execute("call1", { command: "ls -la" });
expect(result.details.status).toBe("approval-pending");
const details = result.details as { approvalId: string; approvalSlug: string };
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
);
expect(pendingText).toContain(`full ${details.approvalId}`);
expect(pendingText).toContain("Host: node");
expect(pendingText).toContain("Node: node-1");
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
expect(pendingText).toContain("Command:\n```sh\nls -la\n```");
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
expect(pendingText).toContain("Background mode requires pre-approved policy");
const details = expectPendingApprovalText(result, {
command: "ls -la",
host: "node",
nodeId: "node-1",
interactive: true,
});
const approvalId = details.approvalId;
await expect
@ -214,74 +355,28 @@ describe("exec approvals", () => {
});
it("uses exec-approvals ask=off to suppress gateway prompts", async () => {
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
await fs.writeFile(
approvalsPath,
JSON.stringify(
{
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {
main: { security: "full", ask: "off", askFallback: "full" },
},
await expectGatewayExecWithoutApproval({
config: {
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {
main: { security: "full", ask: "off", askFallback: "full" },
},
null,
2,
),
);
const calls: string[] = [];
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
calls.push(method);
return { ok: true };
});
const tool = createExecTool({
host: "gateway",
},
command: "echo ok",
ask: "on-miss",
security: "full",
approvalRunningNoticeMs: 0,
});
const result = await tool.execute("call3b", { command: "echo ok" });
expect(result.details.status).toBe("completed");
expect(calls).not.toContain("exec.approval.request");
expect(calls).not.toContain("exec.approval.waitDecision");
});
it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => {
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
await fs.writeFile(
approvalsPath,
JSON.stringify(
{
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {},
},
null,
2,
),
);
const calls: string[] = [];
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
calls.push(method);
return { ok: true };
await expectGatewayExecWithoutApproval({
config: {
version: 1,
defaults: { security: "full", ask: "off", askFallback: "full" },
agents: {},
},
command: "echo ok",
});
const tool = createExecTool({
host: "gateway",
security: "full",
approvalRunningNoticeMs: 0,
});
const result = await tool.execute("call3c", { command: "echo ok" });
expect(result.details.status).toBe("completed");
expect(calls).not.toContain("exec.approval.request");
expect(calls).not.toContain("exec.approval.waitDecision");
});
it("requires approval for elevated ask when allowlist misses", async () => {
@ -296,7 +391,7 @@ describe("exec approvals", () => {
if (method === "exec.approval.request") {
resolveApproval?.();
// Return registration confirmation
return { status: "accepted", id: (params as { id?: string })?.id };
return acceptedApprovalResponse(params);
}
if (method === "exec.approval.waitDecision") {
return { decision: "deny" };
@ -304,24 +399,10 @@ describe("exec approvals", () => {
return { ok: true };
});
const tool = createExecTool({
ask: "on-miss",
security: "allowlist",
approvalRunningNoticeMs: 0,
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
});
const tool = createElevatedAllowlistExecTool();
const result = await tool.execute("call4", { command: "echo ok", elevated: true });
expect(result.details.status).toBe("approval-pending");
const details = result.details as { approvalId: string; approvalSlug: string };
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
);
expect(pendingText).toContain(`full ${details.approvalId}`);
expect(pendingText).toContain("Host: gateway");
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
expect(pendingText).toContain("Command:\n```sh\necho ok\n```");
expectPendingApprovalText(result, { command: "echo ok", host: "gateway" });
await approvalSeen;
expect(calls).toContain("exec.approval.request");
expect(calls).toContain("exec.approval.waitDecision");
@ -330,18 +411,10 @@ describe("exec approvals", () => {
it("starts a direct agent follow-up after approved gateway exec completes", async () => {
const agentCalls: Array<Record<string, unknown>> = [];
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: (params as { id?: string })?.id };
}
if (method === "exec.approval.waitDecision") {
return { decision: "allow-once" };
}
if (method === "agent") {
agentCalls.push(params as Record<string, unknown>);
return { status: "ok" };
}
return { ok: true };
mockAcceptedApprovalFlow({
onAgent: (params) => {
agentCalls.push(params);
},
});
const tool = createExecTool({
@ -388,7 +461,7 @@ describe("exec approvals", () => {
if (typeof request.id === "string") {
requestIds.push(request.id);
}
return { status: "accepted", id: request.id };
return acceptedApprovalResponse(request);
}
if (method === "exec.approval.waitDecision") {
const wait = params as { id?: string };
@ -400,12 +473,7 @@ describe("exec approvals", () => {
return { ok: true };
});
const tool = createExecTool({
ask: "on-miss",
security: "allowlist",
approvalRunningNoticeMs: 0,
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
});
const tool = createElevatedAllowlistExecTool();
const first = await tool.execute("call-seq-1", {
command: "npm view diver --json",
@ -429,7 +497,7 @@ describe("exec approvals", () => {
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
calls.push(method);
if (method === "exec.approval.request") {
return { status: "accepted", id: (params as { id?: string })?.id };
return acceptedApprovalResponse(params);
}
if (method === "exec.approval.waitDecision") {
return { decision: "deny" };
@ -448,11 +516,7 @@ describe("exec approvals", () => {
command: "npm view diver --json | jq .name && brew outdated",
});
expect(result.details.status).toBe("approval-pending");
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
);
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
expect(calls).toContain("exec.approval.request");
});
@ -480,11 +544,7 @@ describe("exec approvals", () => {
command: "npm view diver --json | jq .name && brew outdated",
});
expect(result.details.status).toBe("approval-pending");
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
expect(pendingText).toContain(
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
);
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
expect(calls).toContain("exec.approval.request");
});
@ -551,30 +611,17 @@ describe("exec approvals", () => {
});
it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => {
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
await fs.mkdir(path.dirname(configPath), { recursive: true });
await fs.writeFile(
configPath,
JSON.stringify({
channels: {
discord: {
enabled: true,
execApprovals: { enabled: false },
},
await writeOpenClawConfig({
channels: {
discord: {
enabled: true,
execApprovals: { enabled: false },
},
}),
);
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: "approval-id" };
}
if (method === "exec.approval.waitDecision") {
return { decision: null };
}
return { ok: true };
},
});
mockPendingApprovalRegistration();
const tool = createExecTool({
host: "gateway",
ask: "always",
@ -588,49 +635,29 @@ describe("exec approvals", () => {
command: "npm view diver name version description",
});
expect(result.details.status).toBe("approval-unavailable");
const text = result.content.find((part) => part.type === "text")?.text ?? "";
const text = expectApprovalUnavailableText(result);
expect(text).toContain("chat exec approvals are not enabled on Discord");
expect(text).toContain("Web UI or terminal UI");
expect(text).not.toContain("/approve");
expect(text).not.toContain("npm view diver name version description");
expect(text).not.toContain("Pending command:");
expect(text).not.toContain("Host:");
expect(text).not.toContain("CWD:");
});
it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => {
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
await fs.mkdir(path.dirname(configPath), { recursive: true });
await fs.writeFile(
configPath,
JSON.stringify(
{
channels: {
telegram: {
enabled: true,
execApprovals: { enabled: false },
},
discord: {
enabled: true,
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
},
await writeOpenClawConfig(
{
channels: {
telegram: {
enabled: true,
execApprovals: { enabled: false },
},
discord: {
enabled: true,
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
},
},
null,
2,
),
},
true,
);
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
if (method === "exec.approval.request") {
return { status: "accepted", id: "approval-id" };
}
if (method === "exec.approval.waitDecision") {
return { decision: null };
}
return { ok: true };
});
mockPendingApprovalRegistration();
const tool = createExecTool({
host: "gateway",
@ -645,14 +672,8 @@ describe("exec approvals", () => {
command: "npm view diver name version description",
});
expect(result.details.status).toBe("approval-unavailable");
const text = result.content.find((part) => part.type === "text")?.text ?? "";
const text = expectApprovalUnavailableText(result);
expect(text).toContain("Approval required. I sent the allowed approvers DMs.");
expect(text).not.toContain("/approve");
expect(text).not.toContain("npm view diver name version description");
expect(text).not.toContain("Pending command:");
expect(text).not.toContain("Host:");
expect(text).not.toContain("CWD:");
});
it("denies node obfuscated command when approval request times out", async () => {

View File

@ -46,6 +46,20 @@ function expectFallbackUsed(
expect(result.attempts[0]?.reason).toBe("rate_limit");
}
function expectPrimarySkippedForReason(
result: { result: unknown; attempts: Array<{ reason?: string }> },
run: {
(...args: unknown[]): unknown;
mock: { calls: unknown[][] };
},
reason: string,
) {
expect(result.result).toBe("ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
expect(result.attempts[0]?.reason).toBe(reason);
}
function expectPrimaryProbeSuccess(
result: { result: unknown },
run: {
@ -183,11 +197,7 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok");
const result = await runPrimaryCandidate(cfg, run);
expect(result.result).toBe("ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
expect(result.attempts[0]?.reason).toBe("billing");
expectPrimarySkippedForReason(result, run, "billing");
});
it("probes primary model when within 2-min margin of cooldown expiry", async () => {
@ -540,10 +550,6 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok");
const result = await runPrimaryCandidate(cfg, run);
expect(result.result).toBe("ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
expect(result.attempts[0]?.reason).toBe("billing");
expectPrimarySkippedForReason(result, run, "billing");
});
});

View File

@ -113,6 +113,92 @@ function createMoonshotConfig(overrides: {
};
}
function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig {
return {
models: {
...(mergeMode ? { mode: "merge" as const } : {}),
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
api: "openai-completions",
models: [
{
id: "gpt-4.1",
name: "GPT-4.1",
input: ["text"],
reasoning: false,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 16384,
},
],
},
},
},
};
}
async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) {
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
await withTempHome(async () => {
if (options?.seedMergedProvider) {
await writeAgentModelsJson({
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
api: "openai-completions",
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
},
},
});
}
await ensureOpenClawModelsJson(
createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider),
);
const result = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
});
});
}
async function expectMoonshotTokenLimits(params: {
contextWindow: number;
maxTokens: number;
expectedContextWindow: number;
expectedMaxTokens: number;
}) {
await withTempHome(async () => {
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
await ensureOpenClawModelsJson(
createMoonshotConfig({
contextWindow: params.contextWindow,
maxTokens: params.maxTokens,
}),
);
const parsed = await readGeneratedModelsJson<{
providers: Record<
string,
{
models?: Array<{
id: string;
contextWindow?: number;
maxTokens?: number;
}>;
}
>;
}>();
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
expect(kimi?.contextWindow).toBe(params.expectedContextWindow);
expect(kimi?.maxTokens).toBe(params.expectedMaxTokens);
});
});
}
describe("models-config", () => {
it("keeps anthropic api defaults when model entries omit api", async () => {
await withTempHome(async () => {
@ -444,131 +530,28 @@ describe("models-config", () => {
});
it("does not persist resolved env var value as plaintext in models.json", async () => {
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
await withTempHome(async () => {
const cfg: OpenClawConfig = {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig
api: "openai-completions",
models: [
{
id: "gpt-4.1",
name: "GPT-4.1",
input: ["text"],
reasoning: false,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 16384,
},
],
},
},
},
};
await ensureOpenClawModelsJson(cfg);
const result = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY");
});
});
await expectOpenAiEnvMarkerApiKey();
});
it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => {
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
await withTempHome(async () => {
await writeAgentModelsJson({
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
api: "openai-completions",
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
},
},
});
const cfg: OpenClawConfig = {
models: {
mode: "merge",
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
api: "openai-completions",
models: [
{
id: "gpt-4.1",
name: "GPT-4.1",
input: ["text"],
reasoning: false,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 16384,
},
],
},
},
},
};
await ensureOpenClawModelsJson(cfg);
const result = await readGeneratedModelsJson<{
providers: Record<string, { apiKey?: string }>;
}>();
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
});
});
await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true });
});
it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => {
await withTempHome(async () => {
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 });
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<
string,
{
models?: Array<{
id: string;
contextWindow?: number;
maxTokens?: number;
}>;
}
>;
}>();
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
expect(kimi?.contextWindow).toBe(350000);
expect(kimi?.maxTokens).toBe(16384);
});
await expectMoonshotTokenLimits({
contextWindow: 350000,
maxTokens: 16384,
expectedContextWindow: 350000,
expectedMaxTokens: 16384,
});
});
it("falls back to implicit token limits when explicit values are invalid", async () => {
await withTempHome(async () => {
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 });
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<
string,
{
models?: Array<{
id: string;
contextWindow?: number;
maxTokens?: number;
}>;
}
>;
}>();
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
expect(kimi?.contextWindow).toBe(256000);
expect(kimi?.maxTokens).toBe(8192);
});
await expectMoonshotTokenLimits({
contextWindow: 0,
maxTokens: -1,
expectedContextWindow: 256000,
expectedMaxTokens: 8192,
});
});
});

View File

@ -4,88 +4,80 @@ import { installModelsConfigTestHooks, withModelsTempHome } from "./models-confi
import { ensureOpenClawModelsJson } from "./models-config.js";
import { readGeneratedModelsJson } from "./models-config.test-utils.js";
function createGoogleModelsConfig(
models: NonNullable<OpenClawConfig["models"]>["providers"]["google"]["models"],
): OpenClawConfig {
return {
models: {
providers: {
google: {
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
apiKey: "GEMINI_KEY", // pragma: allowlist secret
api: "google-generative-ai",
models,
},
},
},
};
}
async function expectGeneratedGoogleModelIds(ids: string[]) {
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { models: Array<{ id: string }> }>;
}>();
expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids);
}
describe("models-config", () => {
installModelsConfigTestHooks();
it("normalizes gemini 3 ids to preview for google providers", async () => {
await withModelsTempHome(async () => {
const cfg: OpenClawConfig = {
models: {
providers: {
google: {
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
apiKey: "GEMINI_KEY", // pragma: allowlist secret
api: "google-generative-ai",
models: [
{
id: "gemini-3-pro",
name: "Gemini 3 Pro",
api: "google-generative-ai",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
{
id: "gemini-3-flash",
name: "Gemini 3 Flash",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
],
},
},
const cfg = createGoogleModelsConfig([
{
id: "gemini-3-pro",
name: "Gemini 3 Pro",
api: "google-generative-ai",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
};
{
id: "gemini-3-flash",
name: "Gemini 3 Flash",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
]);
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { models: Array<{ id: string }> }>;
}>();
const ids = parsed.providers.google?.models?.map((model) => model.id);
expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
});
});
it("normalizes the deprecated google flash preview id to the working preview id", async () => {
await withModelsTempHome(async () => {
const cfg: OpenClawConfig = {
models: {
providers: {
google: {
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
apiKey: "GEMINI_KEY", // pragma: allowlist secret
api: "google-generative-ai",
models: [
{
id: "gemini-3.1-flash-preview",
name: "Gemini 3.1 Flash Preview",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
],
},
},
const cfg = createGoogleModelsConfig([
{
id: "gemini-3.1-flash-preview",
name: "Gemini 3.1 Flash Preview",
api: "google-generative-ai",
reasoning: false,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1048576,
maxTokens: 65536,
},
};
]);
await ensureOpenClawModelsJson(cfg);
const parsed = await readGeneratedModelsJson<{
providers: Record<string, { models: Array<{ id: string }> }>;
}>();
const ids = parsed.providers.google?.models?.map((model) => model.id);
expect(ids).toEqual(["gemini-3-flash-preview"]);
await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]);
});
});
});

View File

@ -1,31 +1,11 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js";
import {
enrichOllamaModelsWithContext,
resolveOllamaApiBase,
type OllamaTagModel,
} from "./ollama-models.js";
function jsonResponse(body: unknown, status = 200): Response {
return new Response(JSON.stringify(body), {
status,
headers: { "Content-Type": "application/json" },
});
}
function requestUrl(input: string | URL | Request): string {
if (typeof input === "string") {
return input;
}
if (input instanceof URL) {
return input.toString();
}
return input.url;
}
function requestBody(body: BodyInit | null | undefined): string {
return typeof body === "string" ? body : "{}";
}
describe("ollama-models", () => {
afterEach(() => {
vi.unstubAllGlobals();
@ -43,7 +23,7 @@ describe("ollama-models", () => {
if (!url.endsWith("/api/show")) {
throw new Error(`Unexpected fetch: ${url}`);
}
const body = JSON.parse(requestBody(init?.body)) as { name?: string };
const body = JSON.parse(requestBodyText(init?.body)) as { name?: string };
if (body.name === "llama3:8b") {
return jsonResponse({ model_info: { "llama.context_length": 65536 } });
}

View File

@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader<Uint8Arr
} as unknown as ReadableStreamDefaultReader<Uint8Array>;
}
async function expectDoneEventContent(lines: string[], expectedContent: unknown) {
await withMockNdjsonFetch(lines, async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual(expectedContent);
});
}
describe("parseNdjsonStream", () => {
it("parses text-only streaming chunks", async () => {
const reader = mockNdjsonReader([
@ -486,88 +500,48 @@ describe("createOllamaStreamFn", () => {
});
it("drops thinking chunks when no final content is emitted", async () => {
await withMockNdjsonFetch(
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([]);
},
[],
);
});
it("prefers streamed content over earlier thinking chunks", async () => {
await withMockNdjsonFetch(
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
},
[{ type: "text", text: "final answer" }],
);
});
it("drops reasoning chunks when no final content is emitted", async () => {
await withMockNdjsonFetch(
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([]);
},
[],
);
});
it("prefers streamed content over earlier reasoning chunks", async () => {
await withMockNdjsonFetch(
await expectDoneEventContent(
[
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
],
async () => {
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
const events = await collectStreamEvents(stream);
const doneEvent = events.at(-1);
if (!doneEvent || doneEvent.type !== "done") {
throw new Error("Expected done event");
}
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
},
[{ type: "text", text: "final answer" }],
);
});
});

View File

@ -115,6 +115,50 @@ function resetSessionStore(store: Record<string, unknown>) {
mockConfig = createMockConfig();
}
function installSandboxedSessionStatusConfig() {
mockConfig = {
session: { mainKey: "main", scope: "per-sender" },
tools: {
sessions: { visibility: "all" },
agentToAgent: { enabled: true, allow: ["*"] },
},
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {},
sandbox: { sessionToolsVisibility: "spawned" },
},
},
};
}
function mockSpawnedSessionList(
resolveSessions: (spawnedBy: string | undefined) => Array<Record<string, unknown>>,
) {
callGatewayMock.mockImplementation(async (opts: unknown) => {
const request = opts as { method?: string; params?: Record<string, unknown> };
if (request.method === "sessions.list") {
return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) };
}
return {};
});
}
function expectSpawnedSessionLookupCalls(spawnedBy: string) {
const expectedCall = {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy,
},
};
expect(callGatewayMock).toHaveBeenCalledTimes(2);
expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall);
expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall);
}
function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) {
const tool = createOpenClawTools({
agentSessionKey,
@ -242,27 +286,8 @@ describe("session_status tool", () => {
updatedAt: 10,
},
});
mockConfig = {
session: { mainKey: "main", scope: "per-sender" },
tools: {
sessions: { visibility: "all" },
agentToAgent: { enabled: true, allow: ["*"] },
},
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {},
sandbox: { sessionToolsVisibility: "spawned" },
},
},
};
callGatewayMock.mockImplementation(async (opts: unknown) => {
const request = opts as { method?: string; params?: Record<string, unknown> };
if (request.method === "sessions.list") {
return { sessions: [] };
}
return {};
});
installSandboxedSessionStatusConfig();
mockSpawnedSessionList(() => []);
const tool = getSessionStatusTool("agent:main:subagent:child", {
sandboxed: true,
@ -284,25 +309,7 @@ describe("session_status tool", () => {
expect(loadSessionStoreMock).not.toHaveBeenCalled();
expect(updateSessionStoreMock).not.toHaveBeenCalled();
expect(callGatewayMock).toHaveBeenCalledTimes(2);
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "agent:main:subagent:child",
},
});
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "agent:main:subagent:child",
},
});
expectSpawnedSessionLookupCalls("agent:main:subagent:child");
});
it("keeps legacy main requester keys for sandboxed session tree checks", async () => {
@ -316,30 +323,10 @@ describe("session_status tool", () => {
updatedAt: 20,
},
});
mockConfig = {
session: { mainKey: "main", scope: "per-sender" },
tools: {
sessions: { visibility: "all" },
agentToAgent: { enabled: true, allow: ["*"] },
},
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {},
sandbox: { sessionToolsVisibility: "spawned" },
},
},
};
callGatewayMock.mockImplementation(async (opts: unknown) => {
const request = opts as { method?: string; params?: Record<string, unknown> };
if (request.method === "sessions.list") {
return {
sessions:
request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
};
}
return {};
});
installSandboxedSessionStatusConfig();
mockSpawnedSessionList((spawnedBy) =>
spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
);
const tool = getSessionStatusTool("main", {
sandboxed: true,
@ -357,25 +344,7 @@ describe("session_status tool", () => {
expect(childDetails.ok).toBe(true);
expect(childDetails.sessionKey).toBe("agent:main:subagent:child");
expect(callGatewayMock).toHaveBeenCalledTimes(2);
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "main",
},
});
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
method: "sessions.list",
params: {
includeGlobal: false,
includeUnknown: false,
limit: 500,
spawnedBy: "main",
},
});
expectSpawnedSessionLookupCalls("main");
});
it("scopes bare session keys to the requester agent", async () => {

View File

@ -1,9 +1,14 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import "./test-helpers/fast-coding-tools.js";
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import {
cleanupEmbeddedPiRunnerTestWorkspace,
createEmbeddedPiRunnerOpenAiConfig,
createEmbeddedPiRunnerTestWorkspace,
type EmbeddedPiRunnerTestWorkspace,
immediateEnqueue,
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
function createMockUsage(input: number, output: number) {
return {
@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager;
let tempRoot: string | undefined;
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
let agentDir: string;
let workspaceDir: string;
let sessionCounter = 0;
@ -98,50 +103,21 @@ beforeAll(async () => {
vi.useRealTimers();
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
({ SessionManager } = await import("@mariozechner/pi-coding-agent"));
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-"));
agentDir = path.join(tempRoot, "agent");
workspaceDir = path.join(tempRoot, "workspace");
await fs.mkdir(agentDir, { recursive: true });
await fs.mkdir(workspaceDir, { recursive: true });
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-");
({ agentDir, workspaceDir } = e2eWorkspace);
}, 180_000);
afterAll(async () => {
if (!tempRoot) {
return;
}
await fs.rm(tempRoot, { recursive: true, force: true });
tempRoot = undefined;
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
e2eWorkspace = undefined;
});
const makeOpenAiConfig = (modelIds: string[]) =>
({
models: {
providers: {
openai: {
api: "openai-responses",
apiKey: "sk-test",
baseUrl: "https://example.com",
models: modelIds.map((id) => ({
id,
name: `Mock ${id}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
})),
},
},
},
}) satisfies OpenClawConfig;
const nextSessionFile = () => {
sessionCounter += 1;
return path.join(workspaceDir, `session-${sessionCounter}.jsonl`);
};
const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`;
const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`;
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => {
const sessionFile = nextSessionFile();
@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string
timestamp: Date.now(),
});
const cfg = makeOpenAiConfig(["mock-1"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]);
return await runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => {
};
const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => {
const cfg = makeOpenAiConfig(["mock-error"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
await runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi
describe("runEmbeddedPiAgent", () => {
it("handles prompt error paths without dropping user state", async () => {
const sessionFile = nextSessionFile();
const cfg = makeOpenAiConfig(["mock-error"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
const sessionKey = nextSessionKey();
const result = await runEmbeddedPiAgent({
sessionId: "session:test",

View File

@ -8,12 +8,17 @@
* Follows the same pattern as pi-embedded-runner.e2e.test.ts.
*/
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import "./test-helpers/fast-coding-tools.js";
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js";
import {
cleanupEmbeddedPiRunnerTestWorkspace,
createEmbeddedPiRunnerOpenAiConfig,
createEmbeddedPiRunnerTestWorkspace,
type EmbeddedPiRunnerTestWorkspace,
immediateEnqueue,
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
function createMockUsage(input: number, output: number) {
return {
@ -126,7 +131,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
});
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
let tempRoot: string | undefined;
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
let agentDir: string;
let workspaceDir: string;
@ -136,45 +141,15 @@ beforeAll(async () => {
responsePlan = [];
observedContexts = [];
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-yield-e2e-"));
agentDir = path.join(tempRoot, "agent");
workspaceDir = path.join(tempRoot, "workspace");
await fs.mkdir(agentDir, { recursive: true });
await fs.mkdir(workspaceDir, { recursive: true });
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-");
({ agentDir, workspaceDir } = e2eWorkspace);
}, 180_000);
afterAll(async () => {
if (!tempRoot) {
return;
}
await fs.rm(tempRoot, { recursive: true, force: true });
tempRoot = undefined;
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
e2eWorkspace = undefined;
});
const makeConfig = (modelIds: string[]) =>
({
models: {
providers: {
openai: {
api: "openai-responses",
apiKey: "sk-test",
baseUrl: "https://example.com",
models: modelIds.map((id) => ({
id,
name: `Mock ${id}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
})),
},
},
},
}) satisfies OpenClawConfig;
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
const readSessionMessages = async (sessionFile: string) => {
const raw = await fs.readFile(sessionFile, "utf-8");
return raw
@ -205,7 +180,7 @@ describe("sessions_yield e2e", () => {
const sessionId = "yield-e2e-parent";
const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl");
const cfg = makeConfig(["mock-yield"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]);
const result = await runEmbeddedPiAgent({
sessionId,
@ -304,7 +279,7 @@ describe("sessions_yield e2e", () => {
const sessionId = "yield-e2e-abort";
const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl");
const cfg = makeConfig(["mock-yield-abort"]);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]);
const result = await runEmbeddedPiAgent({
sessionId,

View File

@ -7,6 +7,7 @@ import {
usesOpenAiStringModeAnthropicToolChoice,
} from "../provider-capabilities.js";
import { log } from "./logger.js";
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07";
const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const;
@ -341,18 +342,10 @@ export function createAnthropicFastModeWrapper(
return underlying(model, context, options);
}
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
const payloadObj = payload as Record<string, unknown>;
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
}
return originalOnPayload?.(payload, model);
},
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
});
};
}

View File

@ -278,6 +278,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({
}));
vi.mock("../../utils/message-channel.js", () => ({
INTERNAL_MESSAGE_CHANNEL: "webchat",
normalizeMessageChannel: vi.fn(() => undefined),
}));
@ -375,6 +376,16 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
unregisterApiProviders(getCustomApiRegistrySourceId("ollama"));
});
async function runDirectCompaction(customInstructions = "focus on decisions") {
return await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
sessionKey: "agent:main:session-1",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
customInstructions,
});
}
it("bootstraps runtime plugins with the resolved workspace", async () => {
await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
@ -472,13 +483,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
hookRunner.hasHooks.mockReturnValue(true);
sanitizeSessionHistoryMock.mockResolvedValue([]);
const result = await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
sessionKey: "agent:main:session-1",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
customInstructions: "focus on decisions",
});
const result = await runDirectCompaction();
expect(result.ok).toBe(true);
const beforeContext = sessionHook("compact:before")?.context;
@ -528,13 +533,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
details: { ok: true },
});
const result = await compactEmbeddedPiSessionDirect({
sessionId: "session-1",
sessionKey: "agent:main:session-1",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
customInstructions: "focus on decisions",
});
const result = await runDirectCompaction();
expect(result).toMatchObject({
ok: true,

View File

@ -2,6 +2,7 @@ import type { StreamFn } from "@mariozechner/pi-agent-core";
import type { SimpleStreamOptions } from "@mariozechner/pi-ai";
import { streamSimple } from "@mariozechner/pi-ai";
import { log } from "./logger.js";
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
type OpenAIReasoningEffort = "low" | "medium" | "high";
@ -325,18 +326,10 @@ export function createOpenAIServiceTierWrapper(
) {
return underlying(model, context, options);
}
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
const payloadObj = payload as Record<string, unknown>;
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
}
return originalOnPayload?.(payload, model);
},
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
if (payloadObj.service_tier === undefined) {
payloadObj.service_tier = serviceTier;
}
});
};
}

View File

@ -0,0 +1,20 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
export function streamWithPayloadPatch(
underlying: StreamFn,
model: Parameters<StreamFn>[0],
context: Parameters<StreamFn>[1],
options: Parameters<StreamFn>[2],
patchPayload: (payload: Record<string, unknown>) => void,
) {
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
patchPayload(payload as Record<string, unknown>);
}
return originalOnPayload?.(payload, model);
},
});
}

View File

@ -0,0 +1,57 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import type { OpenClawConfig } from "../../config/config.js";
export type EmbeddedPiRunnerTestWorkspace = {
tempRoot: string;
agentDir: string;
workspaceDir: string;
};
export async function createEmbeddedPiRunnerTestWorkspace(
prefix: string,
): Promise<EmbeddedPiRunnerTestWorkspace> {
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
const agentDir = path.join(tempRoot, "agent");
const workspaceDir = path.join(tempRoot, "workspace");
await fs.mkdir(agentDir, { recursive: true });
await fs.mkdir(workspaceDir, { recursive: true });
return { tempRoot, agentDir, workspaceDir };
}
export async function cleanupEmbeddedPiRunnerTestWorkspace(
workspace: EmbeddedPiRunnerTestWorkspace | undefined,
): Promise<void> {
if (!workspace) {
return;
}
await fs.rm(workspace.tempRoot, { recursive: true, force: true });
}
export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig {
return {
models: {
providers: {
openai: {
api: "openai-responses",
apiKey: "sk-test",
baseUrl: "https://example.com",
models: modelIds.map((id) => ({
id,
name: `Mock ${id}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
})),
},
},
},
};
}
export async function immediateEnqueue<T>(task: () => Promise<T>): Promise<T> {
return await task();
}

View File

@ -0,0 +1,68 @@
import { describe, expect, it } from "vitest";
import {
buildAiSnapshotFromChromeMcpSnapshot,
flattenChromeMcpSnapshotToAriaNodes,
} from "./chrome-mcp.snapshot.js";
const snapshot = {
id: "root",
role: "document",
name: "Example",
children: [
{
id: "btn-1",
role: "button",
name: "Continue",
},
{
id: "txt-1",
role: "textbox",
name: "Email",
value: "peter@example.com",
},
],
};
describe("chrome MCP snapshot conversion", () => {
it("flattens structured snapshots into aria-style nodes", () => {
const nodes = flattenChromeMcpSnapshotToAriaNodes(snapshot, 10);
expect(nodes).toEqual([
{
ref: "root",
role: "document",
name: "Example",
value: undefined,
description: undefined,
depth: 0,
},
{
ref: "btn-1",
role: "button",
name: "Continue",
value: undefined,
description: undefined,
depth: 1,
},
{
ref: "txt-1",
role: "textbox",
name: "Email",
value: "peter@example.com",
description: undefined,
depth: 1,
},
]);
});
it("builds AI snapshots that preserve Chrome MCP uids as refs", () => {
const result = buildAiSnapshotFromChromeMcpSnapshot({ root: snapshot });
expect(result.snapshot).toContain('- button "Continue" [ref=btn-1]');
expect(result.snapshot).toContain('- textbox "Email" [ref=txt-1] value="peter@example.com"');
expect(result.refs).toEqual({
"btn-1": { role: "button", name: "Continue" },
"txt-1": { role: "textbox", name: "Email" },
});
expect(result.stats.refs).toBe(2);
});
});

View File

@ -0,0 +1,246 @@
import type { SnapshotAriaNode } from "./client.js";
import {
getRoleSnapshotStats,
type RoleRefMap,
type RoleSnapshotOptions,
} from "./pw-role-snapshot.js";
export type ChromeMcpSnapshotNode = {
id?: string;
role?: string;
name?: string;
value?: string | number | boolean;
description?: string;
children?: ChromeMcpSnapshotNode[];
};
const INTERACTIVE_ROLES = new Set([
"button",
"checkbox",
"combobox",
"link",
"listbox",
"menuitem",
"menuitemcheckbox",
"menuitemradio",
"option",
"radio",
"searchbox",
"slider",
"spinbutton",
"switch",
"tab",
"textbox",
"treeitem",
]);
const CONTENT_ROLES = new Set([
"article",
"cell",
"columnheader",
"gridcell",
"heading",
"listitem",
"main",
"navigation",
"region",
"rowheader",
]);
const STRUCTURAL_ROLES = new Set([
"application",
"directory",
"document",
"generic",
"group",
"ignored",
"list",
"menu",
"menubar",
"none",
"presentation",
"row",
"rowgroup",
"tablist",
"table",
"toolbar",
"tree",
"treegrid",
]);
function normalizeRole(node: ChromeMcpSnapshotNode): string {
const role = typeof node.role === "string" ? node.role.trim().toLowerCase() : "";
return role || "generic";
}
function normalizeString(value: unknown): string | undefined {
if (typeof value === "string") {
const trimmed = value.trim();
return trimmed || undefined;
}
if (typeof value === "number" || typeof value === "boolean") {
return String(value);
}
return undefined;
}
function escapeQuoted(value: string): string {
return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"');
}
function shouldIncludeNode(params: {
role: string;
name?: string;
options?: RoleSnapshotOptions;
}): boolean {
if (params.options?.interactive && !INTERACTIVE_ROLES.has(params.role)) {
return false;
}
if (params.options?.compact && STRUCTURAL_ROLES.has(params.role) && !params.name) {
return false;
}
return true;
}
function shouldCreateRef(role: string, name?: string): boolean {
return INTERACTIVE_ROLES.has(role) || (CONTENT_ROLES.has(role) && Boolean(name));
}
type DuplicateTracker = {
counts: Map<string, number>;
keysByRef: Map<string, string>;
duplicates: Set<string>;
};
function createDuplicateTracker(): DuplicateTracker {
return {
counts: new Map(),
keysByRef: new Map(),
duplicates: new Set(),
};
}
function registerRef(
tracker: DuplicateTracker,
ref: string,
role: string,
name?: string,
): number | undefined {
const key = `${role}:${name ?? ""}`;
const count = tracker.counts.get(key) ?? 0;
tracker.counts.set(key, count + 1);
tracker.keysByRef.set(ref, key);
if (count > 0) {
tracker.duplicates.add(key);
return count;
}
return undefined;
}
export function flattenChromeMcpSnapshotToAriaNodes(
root: ChromeMcpSnapshotNode,
limit = 500,
): SnapshotAriaNode[] {
const boundedLimit = Math.max(1, Math.min(2000, Math.floor(limit)));
const out: SnapshotAriaNode[] = [];
const visit = (node: ChromeMcpSnapshotNode, depth: number) => {
if (out.length >= boundedLimit) {
return;
}
const ref = normalizeString(node.id);
if (ref) {
out.push({
ref,
role: normalizeRole(node),
name: normalizeString(node.name) ?? "",
value: normalizeString(node.value),
description: normalizeString(node.description),
depth,
});
}
for (const child of node.children ?? []) {
visit(child, depth + 1);
if (out.length >= boundedLimit) {
return;
}
}
};
visit(root, 0);
return out;
}
export function buildAiSnapshotFromChromeMcpSnapshot(params: {
root: ChromeMcpSnapshotNode;
options?: RoleSnapshotOptions;
maxChars?: number;
}): {
snapshot: string;
truncated?: boolean;
refs: RoleRefMap;
stats: { lines: number; chars: number; refs: number; interactive: number };
} {
const refs: RoleRefMap = {};
const tracker = createDuplicateTracker();
const lines: string[] = [];
const visit = (node: ChromeMcpSnapshotNode, depth: number) => {
const role = normalizeRole(node);
const name = normalizeString(node.name);
const value = normalizeString(node.value);
const description = normalizeString(node.description);
const maxDepth = params.options?.maxDepth;
if (maxDepth !== undefined && depth > maxDepth) {
return;
}
const includeNode = shouldIncludeNode({ role, name, options: params.options });
if (includeNode) {
let line = `${" ".repeat(depth)}- ${role}`;
if (name) {
line += ` "${escapeQuoted(name)}"`;
}
const ref = normalizeString(node.id);
if (ref && shouldCreateRef(role, name)) {
const nth = registerRef(tracker, ref, role, name);
refs[ref] = nth === undefined ? { role, name } : { role, name, nth };
line += ` [ref=${ref}]`;
}
if (value) {
line += ` value="${escapeQuoted(value)}"`;
}
if (description) {
line += ` description="${escapeQuoted(description)}"`;
}
lines.push(line);
}
for (const child of node.children ?? []) {
visit(child, depth + 1);
}
};
visit(params.root, 0);
for (const [ref, data] of Object.entries(refs)) {
const key = tracker.keysByRef.get(ref);
if (key && !tracker.duplicates.has(key)) {
delete data.nth;
}
}
let snapshot = lines.join("\n");
let truncated = false;
const maxChars =
typeof params.maxChars === "number" && Number.isFinite(params.maxChars) && params.maxChars > 0
? Math.floor(params.maxChars)
: undefined;
if (maxChars && snapshot.length > maxChars) {
snapshot = `${snapshot.slice(0, maxChars)}\n\n[...TRUNCATED - page too large]`;
truncated = true;
}
const stats = getRoleSnapshotStats(snapshot, refs);
return truncated ? { snapshot, truncated, refs, stats } : { snapshot, refs, stats };
}

View File

@ -0,0 +1,100 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import {
listChromeMcpTabs,
openChromeMcpTab,
resetChromeMcpSessionsForTest,
setChromeMcpSessionFactoryForTest,
} from "./chrome-mcp.js";
type ToolCall = {
name: string;
arguments?: Record<string, unknown>;
};
function createFakeSession() {
const callTool = vi.fn(async ({ name }: ToolCall) => {
if (name === "list_pages") {
return {
content: [
{
type: "text",
text: [
"## Pages",
"1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session [selected]",
"2: https://github.com/openclaw/openclaw/pull/45318",
].join("\n"),
},
],
};
}
if (name === "new_page") {
return {
content: [
{
type: "text",
text: [
"## Pages",
"1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session",
"2: https://github.com/openclaw/openclaw/pull/45318",
"3: https://example.com/ [selected]",
].join("\n"),
},
],
};
}
throw new Error(`unexpected tool ${name}`);
});
return {
client: {
callTool,
listTools: vi.fn().mockResolvedValue({ tools: [{ name: "list_pages" }] }),
close: vi.fn().mockResolvedValue(undefined),
connect: vi.fn().mockResolvedValue(undefined),
},
transport: {
pid: 123,
},
ready: Promise.resolve(),
};
}
describe("chrome MCP page parsing", () => {
beforeEach(async () => {
await resetChromeMcpSessionsForTest();
});
it("parses list_pages text responses when structuredContent is missing", async () => {
setChromeMcpSessionFactoryForTest(async () => createFakeSession());
const tabs = await listChromeMcpTabs("chrome-live");
expect(tabs).toEqual([
{
targetId: "1",
title: "",
url: "https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session",
type: "page",
},
{
targetId: "2",
title: "",
url: "https://github.com/openclaw/openclaw/pull/45318",
type: "page",
},
]);
});
it("parses new_page text responses and returns the created tab", async () => {
setChromeMcpSessionFactoryForTest(async () => createFakeSession());
const tab = await openChromeMcpTab("chrome-live", "https://example.com/");
expect(tab).toEqual({
targetId: "3",
title: "",
url: "https://example.com/",
type: "page",
});
});
});

488
src/browser/chrome-mcp.ts Normal file
View File

@ -0,0 +1,488 @@
import { randomUUID } from "node:crypto";
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
import type { ChromeMcpSnapshotNode } from "./chrome-mcp.snapshot.js";
import type { BrowserTab } from "./client.js";
import { BrowserProfileUnavailableError, BrowserTabNotFoundError } from "./errors.js";
type ChromeMcpStructuredPage = {
id: number;
url?: string;
selected?: boolean;
};
type ChromeMcpToolResult = {
structuredContent?: Record<string, unknown>;
content?: Array<Record<string, unknown>>;
isError?: boolean;
};
type ChromeMcpSession = {
client: Client;
transport: StdioClientTransport;
ready: Promise<void>;
};
type ChromeMcpSessionFactory = (profileName: string) => Promise<ChromeMcpSession>;
const DEFAULT_CHROME_MCP_COMMAND = "npx";
const DEFAULT_CHROME_MCP_ARGS = [
"-y",
"chrome-devtools-mcp@latest",
"--autoConnect",
"--experimental-page-id-routing",
];
const sessions = new Map<string, ChromeMcpSession>();
let sessionFactory: ChromeMcpSessionFactory | null = null;
function asRecord(value: unknown): Record<string, unknown> | null {
return value && typeof value === "object" && !Array.isArray(value)
? (value as Record<string, unknown>)
: null;
}
function asPages(value: unknown): ChromeMcpStructuredPage[] {
if (!Array.isArray(value)) {
return [];
}
const out: ChromeMcpStructuredPage[] = [];
for (const entry of value) {
const record = asRecord(entry);
if (!record || typeof record.id !== "number") {
continue;
}
out.push({
id: record.id,
url: typeof record.url === "string" ? record.url : undefined,
selected: record.selected === true,
});
}
return out;
}
function parsePageId(targetId: string): number {
const parsed = Number.parseInt(targetId.trim(), 10);
if (!Number.isFinite(parsed)) {
throw new BrowserTabNotFoundError();
}
return parsed;
}
function toBrowserTabs(pages: ChromeMcpStructuredPage[]): BrowserTab[] {
return pages.map((page) => ({
targetId: String(page.id),
title: "",
url: page.url ?? "",
type: "page",
}));
}
function extractStructuredContent(result: ChromeMcpToolResult): Record<string, unknown> {
return asRecord(result.structuredContent) ?? {};
}
function extractTextContent(result: ChromeMcpToolResult): string[] {
const content = Array.isArray(result.content) ? result.content : [];
return content
.map((entry) => {
const record = asRecord(entry);
return record && typeof record.text === "string" ? record.text : "";
})
.filter(Boolean);
}
function extractTextPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] {
const pages: ChromeMcpStructuredPage[] = [];
for (const block of extractTextContent(result)) {
for (const line of block.split(/\r?\n/)) {
const match = line.match(/^\s*(\d+):\s+(.+?)(?:\s+\[(selected)\])?\s*$/i);
if (!match) {
continue;
}
pages.push({
id: Number.parseInt(match[1] ?? "", 10),
url: match[2]?.trim() || undefined,
selected: Boolean(match[3]),
});
}
}
return pages;
}
function extractStructuredPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] {
const structured = asPages(extractStructuredContent(result).pages);
return structured.length > 0 ? structured : extractTextPages(result);
}
function extractSnapshot(result: ChromeMcpToolResult): ChromeMcpSnapshotNode {
const structured = extractStructuredContent(result);
const snapshot = asRecord(structured.snapshot);
if (!snapshot) {
throw new Error("Chrome MCP snapshot response was missing structured snapshot data.");
}
return snapshot as unknown as ChromeMcpSnapshotNode;
}
function extractJsonBlock(text: string): unknown {
const match = text.match(/```json\s*([\s\S]*?)\s*```/i);
const raw = match?.[1]?.trim() || text.trim();
return raw ? JSON.parse(raw) : null;
}
async function createRealSession(profileName: string): Promise<ChromeMcpSession> {
const transport = new StdioClientTransport({
command: DEFAULT_CHROME_MCP_COMMAND,
args: DEFAULT_CHROME_MCP_ARGS,
stderr: "pipe",
});
const client = new Client(
{
name: "openclaw-browser",
version: "0.0.0",
},
{},
);
const ready = (async () => {
try {
await client.connect(transport);
const tools = await client.listTools();
if (!tools.tools.some((tool) => tool.name === "list_pages")) {
throw new Error("Chrome MCP server did not expose the expected navigation tools.");
}
} catch (err) {
await client.close().catch(() => {});
throw new BrowserProfileUnavailableError(
`Chrome MCP existing-session attach failed for profile "${profileName}". ` +
`Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` +
`Details: ${String(err)}`,
);
}
})();
return {
client,
transport,
ready,
};
}
async function getSession(profileName: string): Promise<ChromeMcpSession> {
let session = sessions.get(profileName);
if (session && session.transport.pid === null) {
sessions.delete(profileName);
session = undefined;
}
if (!session) {
session = await (sessionFactory ?? createRealSession)(profileName);
sessions.set(profileName, session);
}
try {
await session.ready;
return session;
} catch (err) {
const current = sessions.get(profileName);
if (current?.transport === session.transport) {
sessions.delete(profileName);
}
throw err;
}
}
async function callTool(
profileName: string,
name: string,
args: Record<string, unknown> = {},
): Promise<ChromeMcpToolResult> {
const session = await getSession(profileName);
try {
return (await session.client.callTool({
name,
arguments: args,
})) as ChromeMcpToolResult;
} catch (err) {
sessions.delete(profileName);
await session.client.close().catch(() => {});
throw err;
}
}
async function withTempFile<T>(fn: (filePath: string) => Promise<T>): Promise<T> {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-chrome-mcp-"));
const filePath = path.join(dir, randomUUID());
try {
return await fn(filePath);
} finally {
await fs.rm(dir, { recursive: true, force: true }).catch(() => {});
}
}
async function findPageById(profileName: string, pageId: number): Promise<ChromeMcpStructuredPage> {
const pages = await listChromeMcpPages(profileName);
const page = pages.find((entry) => entry.id === pageId);
if (!page) {
throw new BrowserTabNotFoundError();
}
return page;
}
export async function ensureChromeMcpAvailable(profileName: string): Promise<void> {
await getSession(profileName);
}
export function getChromeMcpPid(profileName: string): number | null {
return sessions.get(profileName)?.transport.pid ?? null;
}
export async function closeChromeMcpSession(profileName: string): Promise<boolean> {
const session = sessions.get(profileName);
if (!session) {
return false;
}
sessions.delete(profileName);
await session.client.close().catch(() => {});
return true;
}
export async function stopAllChromeMcpSessions(): Promise<void> {
const names = [...sessions.keys()];
for (const name of names) {
await closeChromeMcpSession(name).catch(() => {});
}
}
export async function listChromeMcpPages(profileName: string): Promise<ChromeMcpStructuredPage[]> {
const result = await callTool(profileName, "list_pages");
return extractStructuredPages(result);
}
export async function listChromeMcpTabs(profileName: string): Promise<BrowserTab[]> {
return toBrowserTabs(await listChromeMcpPages(profileName));
}
export async function openChromeMcpTab(profileName: string, url: string): Promise<BrowserTab> {
const result = await callTool(profileName, "new_page", { url });
const pages = extractStructuredPages(result);
const chosen = pages.find((page) => page.selected) ?? pages.at(-1);
if (!chosen) {
throw new Error("Chrome MCP did not return the created page.");
}
return {
targetId: String(chosen.id),
title: "",
url: chosen.url ?? url,
type: "page",
};
}
export async function focusChromeMcpTab(profileName: string, targetId: string): Promise<void> {
await callTool(profileName, "select_page", {
pageId: parsePageId(targetId),
bringToFront: true,
});
}
export async function closeChromeMcpTab(profileName: string, targetId: string): Promise<void> {
await callTool(profileName, "close_page", { pageId: parsePageId(targetId) });
}
export async function navigateChromeMcpPage(params: {
profileName: string;
targetId: string;
url: string;
timeoutMs?: number;
}): Promise<{ url: string }> {
await callTool(params.profileName, "navigate_page", {
pageId: parsePageId(params.targetId),
type: "url",
url: params.url,
...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}),
});
const page = await findPageById(params.profileName, parsePageId(params.targetId));
return { url: page.url ?? params.url };
}
export async function takeChromeMcpSnapshot(params: {
profileName: string;
targetId: string;
}): Promise<ChromeMcpSnapshotNode> {
const result = await callTool(params.profileName, "take_snapshot", {
pageId: parsePageId(params.targetId),
});
return extractSnapshot(result);
}
export async function takeChromeMcpScreenshot(params: {
profileName: string;
targetId: string;
uid?: string;
fullPage?: boolean;
format?: "png" | "jpeg";
}): Promise<Buffer> {
return await withTempFile(async (filePath) => {
await callTool(params.profileName, "take_screenshot", {
pageId: parsePageId(params.targetId),
filePath,
format: params.format ?? "png",
...(params.uid ? { uid: params.uid } : {}),
...(params.fullPage ? { fullPage: true } : {}),
});
return await fs.readFile(filePath);
});
}
export async function clickChromeMcpElement(params: {
profileName: string;
targetId: string;
uid: string;
doubleClick?: boolean;
}): Promise<void> {
await callTool(params.profileName, "click", {
pageId: parsePageId(params.targetId),
uid: params.uid,
...(params.doubleClick ? { dblClick: true } : {}),
});
}
export async function fillChromeMcpElement(params: {
profileName: string;
targetId: string;
uid: string;
value: string;
}): Promise<void> {
await callTool(params.profileName, "fill", {
pageId: parsePageId(params.targetId),
uid: params.uid,
value: params.value,
});
}
export async function fillChromeMcpForm(params: {
profileName: string;
targetId: string;
elements: Array<{ uid: string; value: string }>;
}): Promise<void> {
await callTool(params.profileName, "fill_form", {
pageId: parsePageId(params.targetId),
elements: params.elements,
});
}
export async function hoverChromeMcpElement(params: {
profileName: string;
targetId: string;
uid: string;
}): Promise<void> {
await callTool(params.profileName, "hover", {
pageId: parsePageId(params.targetId),
uid: params.uid,
});
}
export async function dragChromeMcpElement(params: {
profileName: string;
targetId: string;
fromUid: string;
toUid: string;
}): Promise<void> {
await callTool(params.profileName, "drag", {
pageId: parsePageId(params.targetId),
from_uid: params.fromUid,
to_uid: params.toUid,
});
}
export async function uploadChromeMcpFile(params: {
profileName: string;
targetId: string;
uid: string;
filePath: string;
}): Promise<void> {
await callTool(params.profileName, "upload_file", {
pageId: parsePageId(params.targetId),
uid: params.uid,
filePath: params.filePath,
});
}
export async function pressChromeMcpKey(params: {
profileName: string;
targetId: string;
key: string;
}): Promise<void> {
await callTool(params.profileName, "press_key", {
pageId: parsePageId(params.targetId),
key: params.key,
});
}
export async function resizeChromeMcpPage(params: {
profileName: string;
targetId: string;
width: number;
height: number;
}): Promise<void> {
await callTool(params.profileName, "resize_page", {
pageId: parsePageId(params.targetId),
width: params.width,
height: params.height,
});
}
export async function handleChromeMcpDialog(params: {
profileName: string;
targetId: string;
action: "accept" | "dismiss";
promptText?: string;
}): Promise<void> {
await callTool(params.profileName, "handle_dialog", {
pageId: parsePageId(params.targetId),
action: params.action,
...(params.promptText ? { promptText: params.promptText } : {}),
});
}
export async function evaluateChromeMcpScript(params: {
profileName: string;
targetId: string;
fn: string;
args?: string[];
}): Promise<unknown> {
const result = await callTool(params.profileName, "evaluate_script", {
pageId: parsePageId(params.targetId),
function: params.fn,
...(params.args?.length ? { args: params.args } : {}),
});
const message = extractStructuredContent(result).message;
const text = typeof message === "string" ? message : "";
if (!text.trim()) {
return null;
}
return extractJsonBlock(text);
}
export async function waitForChromeMcpText(params: {
profileName: string;
targetId: string;
text: string[];
timeoutMs?: number;
}): Promise<void> {
await callTool(params.profileName, "wait_for", {
pageId: parsePageId(params.targetId),
text: params.text,
...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}),
});
}
export function setChromeMcpSessionFactoryForTest(factory: ChromeMcpSessionFactory | null): void {
sessionFactory = factory;
}
export async function resetChromeMcpSessionsForTest(): Promise<void> {
sessionFactory = null;
await stopAllChromeMcpSessions();
}

View File

@ -3,6 +3,7 @@ import { fetchBrowserJson } from "./client-fetch.js";
export type BrowserStatus = {
enabled: boolean;
profile?: string;
driver?: "openclaw" | "extension" | "existing-session";
running: boolean;
cdpReady?: boolean;
cdpHttp?: boolean;
@ -26,6 +27,7 @@ export type ProfileStatus = {
cdpPort: number;
cdpUrl: string;
color: string;
driver: "openclaw" | "extension" | "existing-session";
running: boolean;
tabCount: number;
isDefault: boolean;
@ -165,7 +167,7 @@ export async function browserCreateProfile(
name: string;
color?: string;
cdpUrl?: string;
driver?: "openclaw" | "extension";
driver?: "openclaw" | "extension" | "existing-session";
},
): Promise<BrowserCreateProfileResult> {
return await fetchBrowserJson<BrowserCreateProfileResult>(

View File

@ -46,7 +46,7 @@ export type ResolvedBrowserProfile = {
cdpHost: string;
cdpIsLoopback: boolean;
color: string;
driver: "openclaw" | "extension";
driver: "openclaw" | "extension" | "existing-session";
attachOnly: boolean;
};
@ -335,7 +335,12 @@ export function resolveProfile(
let cdpHost = resolved.cdpHost;
let cdpPort = profile.cdpPort ?? 0;
let cdpUrl = "";
const driver = profile.driver === "extension" ? "extension" : "openclaw";
const driver =
profile.driver === "extension"
? "extension"
: profile.driver === "existing-session"
? "existing-session"
: "openclaw";
if (rawProfileUrl) {
const parsed = parseHttpUrl(rawProfileUrl, `browser.profiles.${profileName}.cdpUrl`);
@ -356,7 +361,7 @@ export function resolveProfile(
cdpIsLoopback: isLoopbackHost(cdpHost),
color: profile.color,
driver,
attachOnly: profile.attachOnly ?? resolved.attachOnly,
attachOnly: driver === "existing-session" ? true : (profile.attachOnly ?? resolved.attachOnly),
};
}

View File

@ -1,6 +1,10 @@
import type { ResolvedBrowserProfile } from "./config.js";
export type BrowserProfileMode = "local-managed" | "local-extension-relay" | "remote-cdp";
export type BrowserProfileMode =
| "local-managed"
| "local-extension-relay"
| "local-existing-session"
| "remote-cdp";
export type BrowserProfileCapabilities = {
mode: BrowserProfileMode;
@ -31,6 +35,20 @@ export function getBrowserProfileCapabilities(
};
}
if (profile.driver === "existing-session") {
return {
mode: "local-existing-session",
isRemote: false,
requiresRelay: false,
requiresAttachedTab: false,
usesPersistentPlaywright: false,
supportsPerTabWs: false,
supportsJsonTabEndpoints: false,
supportsReset: false,
supportsManagedTabLimit: false,
};
}
if (!profile.cdpIsLoopback) {
return {
mode: "remote-cdp",
@ -75,6 +93,9 @@ export function resolveDefaultSnapshotFormat(params: {
if (capabilities.mode === "local-extension-relay") {
return "aria";
}
if (capabilities.mode === "local-existing-session") {
return "ai";
}
return params.hasPlaywright ? "ai" : "aria";
}

View File

@ -1,6 +1,6 @@
import fs from "node:fs";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { resolveBrowserConfig } from "./config.js";
import { createBrowserProfilesService } from "./profiles-service.js";
import type { BrowserRouteContext, BrowserServerState } from "./server-context.js";
@ -57,6 +57,10 @@ async function createWorkProfileWithConfig(params: {
}
describe("BrowserProfilesService", () => {
beforeEach(() => {
vi.clearAllMocks();
});
it("allocates next local port for new profiles", async () => {
const { result, state } = await createWorkProfileWithConfig({
resolved: resolveBrowserConfig({}),
@ -163,6 +167,56 @@ describe("BrowserProfilesService", () => {
).rejects.toThrow(/requires an explicit loopback cdpUrl/i);
});
it("creates existing-session profiles as attach-only local entries", async () => {
const resolved = resolveBrowserConfig({});
const { ctx, state } = createCtx(resolved);
vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } });
const service = createBrowserProfilesService(ctx);
const result = await service.createProfile({
name: "chrome-live",
driver: "existing-session",
});
expect(result.cdpPort).toBe(18801);
expect(result.isRemote).toBe(false);
expect(state.resolved.profiles["chrome-live"]).toEqual({
cdpPort: 18801,
driver: "existing-session",
attachOnly: true,
color: expect.any(String),
});
expect(writeConfigFile).toHaveBeenCalledWith(
expect.objectContaining({
browser: expect.objectContaining({
profiles: expect.objectContaining({
"chrome-live": expect.objectContaining({
cdpPort: 18801,
driver: "existing-session",
attachOnly: true,
}),
}),
}),
}),
);
});
it("rejects driver=existing-session when cdpUrl is provided", async () => {
const resolved = resolveBrowserConfig({});
const { ctx } = createCtx(resolved);
vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } });
const service = createBrowserProfilesService(ctx);
await expect(
service.createProfile({
name: "chrome-live",
driver: "existing-session",
cdpUrl: "http://127.0.0.1:9222",
}),
).rejects.toThrow(/does not accept cdpUrl/i);
});
it("deletes remote profiles without stopping or removing local data", async () => {
const resolved = resolveBrowserConfig({
profiles: {
@ -218,4 +272,40 @@ describe("BrowserProfilesService", () => {
expect(result.deleted).toBe(true);
expect(movePathToTrash).toHaveBeenCalledWith(path.dirname(userDataDir));
});
it("deletes existing-session profiles without touching local browser data", async () => {
const resolved = resolveBrowserConfig({
profiles: {
"chrome-live": {
cdpPort: 18801,
color: "#0066CC",
driver: "existing-session",
attachOnly: true,
},
},
});
const { ctx } = createCtx(resolved);
vi.mocked(loadConfig).mockReturnValue({
browser: {
defaultProfile: "openclaw",
profiles: {
openclaw: { cdpPort: 18800, color: "#FF4500" },
"chrome-live": {
cdpPort: 18801,
color: "#0066CC",
driver: "existing-session",
attachOnly: true,
},
},
},
});
const service = createBrowserProfilesService(ctx);
const result = await service.deleteProfile("chrome-live");
expect(result.deleted).toBe(false);
expect(ctx.forProfile).not.toHaveBeenCalled();
expect(movePathToTrash).not.toHaveBeenCalled();
});
});

View File

@ -27,7 +27,7 @@ export type CreateProfileParams = {
name: string;
color?: string;
cdpUrl?: string;
driver?: "openclaw" | "extension";
driver?: "openclaw" | "extension" | "existing-session";
};
export type CreateProfileResult = {
@ -79,7 +79,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
const createProfile = async (params: CreateProfileParams): Promise<CreateProfileResult> => {
const name = params.name.trim();
const rawCdpUrl = params.cdpUrl?.trim() || undefined;
const driver = params.driver === "extension" ? "extension" : undefined;
const driver =
params.driver === "extension"
? "extension"
: params.driver === "existing-session"
? "existing-session"
: undefined;
if (!isValidProfileName(name)) {
throw new BrowserValidationError(
@ -118,6 +123,11 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
);
}
}
if (driver === "existing-session") {
throw new BrowserValidationError(
"driver=existing-session does not accept cdpUrl; it attaches via the Chrome MCP auto-connect flow",
);
}
profileConfig = {
cdpUrl: parsed.normalized,
...(driver ? { driver } : {}),
@ -136,6 +146,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
profileConfig = {
cdpPort,
...(driver ? { driver } : {}),
...(driver === "existing-session" ? { attachOnly: true } : {}),
color: profileColor,
};
}
@ -195,7 +206,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
const state = ctx.state();
const resolved = resolveProfile(state.resolved, name);
if (resolved?.cdpIsLoopback) {
if (resolved?.cdpIsLoopback && resolved.driver === "openclaw") {
try {
await ctx.forProfile(name).stopRunningBrowser();
} catch {

View File

@ -12,40 +12,49 @@ afterEach(async () => {
await closePlaywrightBrowserConnection().catch(() => {});
});
function createExtensionFallbackBrowserHarness(options?: {
urls?: string[];
newCDPSessionError?: string;
}) {
const pageOn = vi.fn();
const contextOn = vi.fn();
const browserOn = vi.fn();
const browserClose = vi.fn(async () => {});
const newCDPSession = vi.fn(async () => {
throw new Error(options?.newCDPSessionError ?? "Not allowed");
});
const context = {
pages: () => [],
on: contextOn,
newCDPSession,
} as unknown as import("playwright-core").BrowserContext;
const pages = (options?.urls ?? [undefined]).map(
(url) =>
({
on: pageOn,
context: () => context,
...(url ? { url: () => url } : {}),
}) as unknown as import("playwright-core").Page,
);
(context as unknown as { pages: () => unknown[] }).pages = () => pages;
const browser = {
contexts: () => [context],
on: browserOn,
close: browserClose,
} as unknown as import("playwright-core").Browser;
connectOverCdpSpy.mockResolvedValue(browser);
getChromeWebSocketUrlSpy.mockResolvedValue(null);
return { browserClose, newCDPSession, pages };
}
describe("pw-session getPageForTargetId", () => {
it("falls back to the only page when CDP session attachment is blocked (extension relays)", async () => {
connectOverCdpSpy.mockClear();
getChromeWebSocketUrlSpy.mockClear();
const pageOn = vi.fn();
const contextOn = vi.fn();
const browserOn = vi.fn();
const browserClose = vi.fn(async () => {});
const context = {
pages: () => [],
on: contextOn,
newCDPSession: vi.fn(async () => {
throw new Error("Not allowed");
}),
} as unknown as import("playwright-core").BrowserContext;
const page = {
on: pageOn,
context: () => context,
} as unknown as import("playwright-core").Page;
// Fill pages() after page exists.
(context as unknown as { pages: () => unknown[] }).pages = () => [page];
const browser = {
contexts: () => [context],
on: browserOn,
close: browserClose,
} as unknown as import("playwright-core").Browser;
connectOverCdpSpy.mockResolvedValue(browser);
getChromeWebSocketUrlSpy.mockResolvedValue(null);
const { browserClose, pages } = createExtensionFallbackBrowserHarness();
const [page] = pages;
const resolved = await getPageForTargetId({
cdpUrl: "http://127.0.0.1:18792",
@ -58,40 +67,9 @@ describe("pw-session getPageForTargetId", () => {
});
it("uses the shared HTTP-base normalization when falling back to /json/list for direct WebSocket CDP URLs", async () => {
const pageOn = vi.fn();
const contextOn = vi.fn();
const browserOn = vi.fn();
const browserClose = vi.fn(async () => {});
const context = {
pages: () => [],
on: contextOn,
newCDPSession: vi.fn(async () => {
throw new Error("Not allowed");
}),
} as unknown as import("playwright-core").BrowserContext;
const pageA = {
on: pageOn,
context: () => context,
url: () => "https://alpha.example",
} as unknown as import("playwright-core").Page;
const pageB = {
on: pageOn,
context: () => context,
url: () => "https://beta.example",
} as unknown as import("playwright-core").Page;
(context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB];
const browser = {
contexts: () => [context],
on: browserOn,
close: browserClose,
} as unknown as import("playwright-core").Browser;
connectOverCdpSpy.mockResolvedValue(browser);
getChromeWebSocketUrlSpy.mockResolvedValue(null);
const [, pageB] = createExtensionFallbackBrowserHarness({
urls: ["https://alpha.example", "https://beta.example"],
}).pages;
const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue({
ok: true,
@ -117,41 +95,11 @@ describe("pw-session getPageForTargetId", () => {
});
it("resolves extension-relay pages from /json/list without probing page CDP sessions first", async () => {
const pageOn = vi.fn();
const contextOn = vi.fn();
const browserOn = vi.fn();
const browserClose = vi.fn(async () => {});
const newCDPSession = vi.fn(async () => {
throw new Error("Target.attachToBrowserTarget: Not allowed");
const { newCDPSession, pages } = createExtensionFallbackBrowserHarness({
urls: ["https://alpha.example", "https://beta.example"],
newCDPSessionError: "Target.attachToBrowserTarget: Not allowed",
});
const context = {
pages: () => [],
on: contextOn,
newCDPSession,
} as unknown as import("playwright-core").BrowserContext;
const pageA = {
on: pageOn,
context: () => context,
url: () => "https://alpha.example",
} as unknown as import("playwright-core").Page;
const pageB = {
on: pageOn,
context: () => context,
url: () => "https://beta.example",
} as unknown as import("playwright-core").Page;
(context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB];
const browser = {
contexts: () => [context],
on: browserOn,
close: browserClose,
} as unknown as import("playwright-core").Browser;
connectOverCdpSpy.mockResolvedValue(browser);
getChromeWebSocketUrlSpy.mockResolvedValue(null);
const [, pageB] = pages;
const fetchSpy = vi.spyOn(globalThis, "fetch");
fetchSpy

View File

@ -1,5 +1,10 @@
import type { BrowserRouteContext } from "../server-context.js";
import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js";
import {
readBody,
requirePwAi,
resolveTargetIdFromBody,
withRouteTabContext,
} from "./agent.shared.js";
import { ensureOutputRootDir, resolveWritableOutputPathOrRespond } from "./output-paths.js";
import { DEFAULT_DOWNLOAD_DIR } from "./path-output.js";
import type { BrowserRouteRegistrar } from "./types.js";
@ -23,13 +28,23 @@ export function registerBrowserAgentActDownloadRoutes(
const out = toStringOrEmpty(body.path) || "";
const timeoutMs = toNumber(body.timeoutMs);
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: "wait for download",
run: async ({ cdpUrl, tab, pw }) => {
run: async ({ profileCtx, cdpUrl, tab }) => {
if (profileCtx.profile.driver === "existing-session") {
return jsonError(
res,
501,
"download waiting is not supported for existing-session profiles yet.",
);
}
const pw = await requirePwAi(res, "wait for download");
if (!pw) {
return;
}
await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR);
let downloadPath: string | undefined;
if (out.trim()) {
@ -67,13 +82,23 @@ export function registerBrowserAgentActDownloadRoutes(
return jsonError(res, 400, "path is required");
}
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: "download",
run: async ({ cdpUrl, tab, pw }) => {
run: async ({ profileCtx, cdpUrl, tab }) => {
if (profileCtx.profile.driver === "existing-session") {
return jsonError(
res,
501,
"downloads are not supported for existing-session profiles yet.",
);
}
const pw = await requirePwAi(res, "download");
if (!pw) {
return;
}
await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR);
const downloadPath = await resolveWritableOutputPathOrRespond({
res,

View File

@ -1,5 +1,11 @@
import { evaluateChromeMcpScript, uploadChromeMcpFile } from "../chrome-mcp.js";
import type { BrowserRouteContext } from "../server-context.js";
import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js";
import {
readBody,
requirePwAi,
resolveTargetIdFromBody,
withRouteTabContext,
} from "./agent.shared.js";
import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "./path-output.js";
import type { BrowserRouteRegistrar } from "./types.js";
import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js";
@ -20,13 +26,12 @@ export function registerBrowserAgentActHookRoutes(
return jsonError(res, 400, "paths are required");
}
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: "file chooser hook",
run: async ({ cdpUrl, tab, pw }) => {
run: async ({ profileCtx, cdpUrl, tab }) => {
const uploadPathsResult = await resolveExistingPathsWithinRoot({
rootDir: DEFAULT_UPLOAD_DIR,
requestedPaths: paths,
@ -38,6 +43,39 @@ export function registerBrowserAgentActHookRoutes(
}
const resolvedPaths = uploadPathsResult.paths;
if (profileCtx.profile.driver === "existing-session") {
if (element) {
return jsonError(
res,
501,
"existing-session file uploads do not support element selectors; use ref/inputRef.",
);
}
if (resolvedPaths.length !== 1) {
return jsonError(
res,
501,
"existing-session file uploads currently support one file at a time.",
);
}
const uid = inputRef || ref;
if (!uid) {
return jsonError(res, 501, "existing-session file uploads require ref or inputRef.");
}
await uploadChromeMcpFile({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
uid,
filePath: resolvedPaths[0] ?? "",
});
return res.json({ ok: true });
}
const pw = await requirePwAi(res, "file chooser hook");
if (!pw) {
return;
}
if (inputRef || element) {
if (ref) {
return jsonError(res, 400, "ref cannot be combined with inputRef/element");
@ -79,13 +117,69 @@ export function registerBrowserAgentActHookRoutes(
return jsonError(res, 400, "accept is required");
}
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: "dialog hook",
run: async ({ cdpUrl, tab, pw }) => {
run: async ({ profileCtx, cdpUrl, tab }) => {
if (profileCtx.profile.driver === "existing-session") {
if (timeoutMs) {
return jsonError(
res,
501,
"existing-session dialog handling does not support timeoutMs.",
);
}
await evaluateChromeMcpScript({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
fn: `() => {
const state = (window.__openclawDialogHook ??= {});
if (!state.originals) {
state.originals = {
alert: window.alert.bind(window),
confirm: window.confirm.bind(window),
prompt: window.prompt.bind(window),
};
}
const originals = state.originals;
const restore = () => {
window.alert = originals.alert;
window.confirm = originals.confirm;
window.prompt = originals.prompt;
delete window.__openclawDialogHook;
};
window.alert = (...args) => {
try {
return undefined;
} finally {
restore();
}
};
window.confirm = (...args) => {
try {
return ${accept ? "true" : "false"};
} finally {
restore();
}
};
window.prompt = (...args) => {
try {
return ${accept ? JSON.stringify(promptText ?? "") : "null"};
} finally {
restore();
}
};
return true;
}`,
});
return res.json({ ok: true });
}
const pw = await requirePwAi(res, "dialog hook");
if (!pw) {
return;
}
await pw.armDialogViaPlaywright({
cdpUrl,
targetId: tab.targetId,

View File

@ -1,3 +1,14 @@
import {
clickChromeMcpElement,
closeChromeMcpTab,
dragChromeMcpElement,
evaluateChromeMcpScript,
fillChromeMcpElement,
fillChromeMcpForm,
hoverChromeMcpElement,
pressChromeMcpKey,
resizeChromeMcpPage,
} from "../chrome-mcp.js";
import type { BrowserFormField } from "../client-actions-core.js";
import { normalizeBrowserFormField } from "../form-fields.js";
import type { BrowserRouteContext } from "../server-context.js";
@ -11,13 +22,88 @@ import {
} from "./agent.act.shared.js";
import {
readBody,
requirePwAi,
resolveTargetIdFromBody,
withPlaywrightRouteContext,
withRouteTabContext,
SELECTOR_UNSUPPORTED_MESSAGE,
} from "./agent.shared.js";
import type { BrowserRouteRegistrar } from "./types.js";
import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js";
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
function buildExistingSessionWaitPredicate(params: {
text?: string;
textGone?: string;
selector?: string;
url?: string;
loadState?: "load" | "domcontentloaded" | "networkidle";
fn?: string;
}): string | null {
const checks: string[] = [];
if (params.text) {
checks.push(`Boolean(document.body?.innerText?.includes(${JSON.stringify(params.text)}))`);
}
if (params.textGone) {
checks.push(`!document.body?.innerText?.includes(${JSON.stringify(params.textGone)})`);
}
if (params.selector) {
checks.push(`Boolean(document.querySelector(${JSON.stringify(params.selector)}))`);
}
if (params.url) {
checks.push(`window.location.href === ${JSON.stringify(params.url)}`);
}
if (params.loadState === "domcontentloaded") {
checks.push(`document.readyState === "interactive" || document.readyState === "complete"`);
} else if (params.loadState === "load" || params.loadState === "networkidle") {
checks.push(`document.readyState === "complete"`);
}
if (params.fn) {
checks.push(`Boolean(await (${params.fn})())`);
}
if (checks.length === 0) {
return null;
}
return checks.length === 1 ? checks[0] : checks.map((check) => `(${check})`).join(" && ");
}
async function waitForExistingSessionCondition(params: {
profileName: string;
targetId: string;
timeMs?: number;
text?: string;
textGone?: string;
selector?: string;
url?: string;
loadState?: "load" | "domcontentloaded" | "networkidle";
fn?: string;
timeoutMs?: number;
}): Promise<void> {
if (params.timeMs && params.timeMs > 0) {
await sleep(params.timeMs);
}
const predicate = buildExistingSessionWaitPredicate(params);
if (!predicate) {
return;
}
const timeoutMs = Math.max(250, params.timeoutMs ?? 10_000);
const deadline = Date.now() + timeoutMs;
while (Date.now() < deadline) {
const ready = await evaluateChromeMcpScript({
profileName: params.profileName,
targetId: params.targetId,
fn: `async () => ${predicate}`,
});
if (ready) {
return;
}
await sleep(250);
}
throw new Error("Timed out waiting for condition");
}
export function registerBrowserAgentActRoutes(
app: BrowserRouteRegistrar,
ctx: BrowserRouteContext,
@ -34,14 +120,15 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, SELECTOR_UNSUPPORTED_MESSAGE);
}
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: `act:${kind}`,
run: async ({ cdpUrl, tab, pw }) => {
run: async ({ profileCtx, cdpUrl, tab }) => {
const evaluateEnabled = ctx.state().resolved.evaluateEnabled;
const isExistingSession = profileCtx.profile.driver === "existing-session";
const profileName = profileCtx.profile.name;
switch (kind) {
case "click": {
@ -63,6 +150,26 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, parsedModifiers.error);
}
const modifiers = parsedModifiers.modifiers;
if (isExistingSession) {
if ((button && button !== "left") || (modifiers && modifiers.length > 0)) {
return jsonError(
res,
501,
"existing-session click currently supports left-click only (no button overrides/modifiers).",
);
}
await clickChromeMcpElement({
profileName,
targetId: tab.targetId,
uid: ref,
doubleClick,
});
return res.json({ ok: true, targetId: tab.targetId, url: tab.url });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
const clickRequest: Parameters<typeof pw.clickViaPlaywright>[0] = {
cdpUrl,
targetId: tab.targetId,
@ -93,6 +200,33 @@ export function registerBrowserAgentActRoutes(
const submit = toBoolean(body.submit) ?? false;
const slowly = toBoolean(body.slowly) ?? false;
const timeoutMs = toNumber(body.timeoutMs);
if (isExistingSession) {
if (slowly) {
return jsonError(
res,
501,
"existing-session type does not support slowly=true; use fill/press instead.",
);
}
await fillChromeMcpElement({
profileName,
targetId: tab.targetId,
uid: ref,
value: text,
});
if (submit) {
await pressChromeMcpKey({
profileName,
targetId: tab.targetId,
key: "Enter",
});
}
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
const typeRequest: Parameters<typeof pw.typeViaPlaywright>[0] = {
cdpUrl,
targetId: tab.targetId,
@ -113,6 +247,17 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "key is required");
}
const delayMs = toNumber(body.delayMs);
if (isExistingSession) {
if (delayMs) {
return jsonError(res, 501, "existing-session press does not support delayMs.");
}
await pressChromeMcpKey({ profileName, targetId: tab.targetId, key });
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.pressKeyViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -127,6 +272,21 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "ref is required");
}
const timeoutMs = toNumber(body.timeoutMs);
if (isExistingSession) {
if (timeoutMs) {
return jsonError(
res,
501,
"existing-session hover does not support timeoutMs overrides.",
);
}
await hoverChromeMcpElement({ profileName, targetId: tab.targetId, uid: ref });
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.hoverViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -141,6 +301,26 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "ref is required");
}
const timeoutMs = toNumber(body.timeoutMs);
if (isExistingSession) {
if (timeoutMs) {
return jsonError(
res,
501,
"existing-session scrollIntoView does not support timeoutMs overrides.",
);
}
await evaluateChromeMcpScript({
profileName,
targetId: tab.targetId,
fn: `(el) => { el.scrollIntoView({ block: "center", inline: "center" }); return true; }`,
args: [ref],
});
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
const scrollRequest: Parameters<typeof pw.scrollIntoViewViaPlaywright>[0] = {
cdpUrl,
targetId: tab.targetId,
@ -159,6 +339,26 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "startRef and endRef are required");
}
const timeoutMs = toNumber(body.timeoutMs);
if (isExistingSession) {
if (timeoutMs) {
return jsonError(
res,
501,
"existing-session drag does not support timeoutMs overrides.",
);
}
await dragChromeMcpElement({
profileName,
targetId: tab.targetId,
fromUid: startRef,
toUid: endRef,
});
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.dragViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -175,6 +375,33 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "ref and values are required");
}
const timeoutMs = toNumber(body.timeoutMs);
if (isExistingSession) {
if (values.length !== 1) {
return jsonError(
res,
501,
"existing-session select currently supports a single value only.",
);
}
if (timeoutMs) {
return jsonError(
res,
501,
"existing-session select does not support timeoutMs overrides.",
);
}
await fillChromeMcpElement({
profileName,
targetId: tab.targetId,
uid: ref,
value: values[0] ?? "",
});
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.selectOptionViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -198,6 +425,28 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "fields are required");
}
const timeoutMs = toNumber(body.timeoutMs);
if (isExistingSession) {
if (timeoutMs) {
return jsonError(
res,
501,
"existing-session fill does not support timeoutMs overrides.",
);
}
await fillChromeMcpForm({
profileName,
targetId: tab.targetId,
elements: fields.map((field) => ({
uid: field.ref,
value: String(field.value ?? ""),
})),
});
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.fillFormViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -212,6 +461,19 @@ export function registerBrowserAgentActRoutes(
if (!width || !height) {
return jsonError(res, 400, "width and height are required");
}
if (isExistingSession) {
await resizeChromeMcpPage({
profileName,
targetId: tab.targetId,
width,
height,
});
return res.json({ ok: true, targetId: tab.targetId, url: tab.url });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.resizeViewportViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -260,6 +522,25 @@ export function registerBrowserAgentActRoutes(
"wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn",
);
}
if (isExistingSession) {
await waitForExistingSessionCondition({
profileName,
targetId: tab.targetId,
timeMs,
text,
textGone,
selector,
url,
loadState,
fn,
timeoutMs,
});
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.waitForViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -291,6 +572,31 @@ export function registerBrowserAgentActRoutes(
}
const ref = toStringOrEmpty(body.ref) || undefined;
const evalTimeoutMs = toNumber(body.timeoutMs);
if (isExistingSession) {
if (evalTimeoutMs !== undefined) {
return jsonError(
res,
501,
"existing-session evaluate does not support timeoutMs overrides.",
);
}
const result = await evaluateChromeMcpScript({
profileName,
targetId: tab.targetId,
fn,
args: ref ? [ref] : undefined,
});
return res.json({
ok: true,
targetId: tab.targetId,
url: tab.url,
result,
});
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
const evalRequest: Parameters<typeof pw.evaluateViaPlaywright>[0] = {
cdpUrl,
targetId: tab.targetId,
@ -310,6 +616,14 @@ export function registerBrowserAgentActRoutes(
});
}
case "close": {
if (isExistingSession) {
await closeChromeMcpTab(profileName, tab.targetId);
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, `act:${kind}`);
if (!pw) {
return;
}
await pw.closePageViaPlaywright({ cdpUrl, targetId: tab.targetId });
return res.json({ ok: true, targetId: tab.targetId });
}
@ -334,13 +648,23 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "url is required");
}
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: "response body",
run: async ({ cdpUrl, tab, pw }) => {
run: async ({ profileCtx, cdpUrl, tab }) => {
if (profileCtx.profile.driver === "existing-session") {
return jsonError(
res,
501,
"response body is not supported for existing-session profiles yet.",
);
}
const pw = await requirePwAi(res, "response body");
if (!pw) {
return;
}
const result = await pw.responseBodyViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -361,13 +685,39 @@ export function registerBrowserAgentActRoutes(
return jsonError(res, 400, "ref is required");
}
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: "highlight",
run: async ({ cdpUrl, tab, pw }) => {
run: async ({ profileCtx, cdpUrl, tab }) => {
if (profileCtx.profile.driver === "existing-session") {
await evaluateChromeMcpScript({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
args: [ref],
fn: `(el) => {
if (!(el instanceof Element)) {
return false;
}
el.scrollIntoView({ block: "center", inline: "center" });
const previousOutline = el.style.outline;
const previousOffset = el.style.outlineOffset;
el.style.outline = "3px solid #FF4500";
el.style.outlineOffset = "2px";
setTimeout(() => {
el.style.outline = previousOutline;
el.style.outlineOffset = previousOffset;
}, 2000);
return true;
}`,
});
return res.json({ ok: true, targetId: tab.targetId });
}
const pw = await requirePwAi(res, "highlight");
if (!pw) {
return;
}
await pw.highlightViaPlaywright({
cdpUrl,
targetId: tab.targetId,

View File

@ -1,6 +1,20 @@
import path from "node:path";
import { ensureMediaDir, saveMediaBuffer } from "../../media/store.js";
import { captureScreenshot, snapshotAria } from "../cdp.js";
import {
evaluateChromeMcpScript,
navigateChromeMcpPage,
takeChromeMcpScreenshot,
takeChromeMcpSnapshot,
} from "../chrome-mcp.js";
import {
buildAiSnapshotFromChromeMcpSnapshot,
flattenChromeMcpSnapshotToAriaNodes,
} from "../chrome-mcp.snapshot.js";
import {
assertBrowserNavigationAllowed,
assertBrowserNavigationResultAllowed,
} from "../navigation-guard.js";
import { withBrowserNavigationPolicy } from "../navigation-guard.js";
import {
DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
@ -25,6 +39,89 @@ import {
import type { BrowserResponse, BrowserRouteRegistrar } from "./types.js";
import { jsonError, toBoolean, toStringOrEmpty } from "./utils.js";
const CHROME_MCP_OVERLAY_ATTR = "data-openclaw-mcp-overlay";
async function clearChromeMcpOverlay(params: {
profileName: string;
targetId: string;
}): Promise<void> {
await evaluateChromeMcpScript({
profileName: params.profileName,
targetId: params.targetId,
fn: `() => {
document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove());
return true;
}`,
}).catch(() => {});
}
async function renderChromeMcpLabels(params: {
profileName: string;
targetId: string;
refs: string[];
}): Promise<{ labels: number; skipped: number }> {
const refList = JSON.stringify(params.refs);
const result = await evaluateChromeMcpScript({
profileName: params.profileName,
targetId: params.targetId,
args: params.refs,
fn: `(...elements) => {
const refs = ${refList};
document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove());
const root = document.createElement("div");
root.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "labels");
root.style.position = "fixed";
root.style.inset = "0";
root.style.pointerEvents = "none";
root.style.zIndex = "2147483647";
let labels = 0;
let skipped = 0;
elements.forEach((el, index) => {
if (!(el instanceof Element)) {
skipped += 1;
return;
}
const rect = el.getBoundingClientRect();
if (rect.width <= 0 && rect.height <= 0) {
skipped += 1;
return;
}
labels += 1;
const badge = document.createElement("div");
badge.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "label");
badge.textContent = refs[index] || String(labels);
badge.style.position = "fixed";
badge.style.left = \`\${Math.max(0, rect.left)}px\`;
badge.style.top = \`\${Math.max(0, rect.top)}px\`;
badge.style.transform = "translateY(-100%)";
badge.style.padding = "2px 6px";
badge.style.borderRadius = "999px";
badge.style.background = "#FF4500";
badge.style.color = "#fff";
badge.style.font = "600 12px ui-monospace, SFMono-Regular, Menlo, monospace";
badge.style.boxShadow = "0 2px 6px rgba(0,0,0,0.35)";
badge.style.whiteSpace = "nowrap";
root.appendChild(badge);
});
document.documentElement.appendChild(root);
return { labels, skipped };
}`,
});
const labels =
result &&
typeof result === "object" &&
typeof (result as { labels?: unknown }).labels === "number"
? (result as { labels: number }).labels
: 0;
const skipped =
result &&
typeof result === "object" &&
typeof (result as { skipped?: unknown }).skipped === "number"
? (result as { skipped: number }).skipped
: 0;
return { labels, skipped };
}
async function saveBrowserMediaResponse(params: {
res: BrowserResponse;
buffer: Buffer;
@ -96,13 +193,27 @@ export function registerBrowserAgentSnapshotRoutes(
if (!url) {
return jsonError(res, 400, "url is required");
}
await withPlaywrightRouteContext({
await withRouteTabContext({
req,
res,
ctx,
targetId,
feature: "navigate",
run: async ({ cdpUrl, tab, pw, profileCtx }) => {
run: async ({ profileCtx, tab, cdpUrl }) => {
if (profileCtx.profile.driver === "existing-session") {
const ssrfPolicyOpts = withBrowserNavigationPolicy(ctx.state().resolved.ssrfPolicy);
await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts });
const result = await navigateChromeMcpPage({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
url,
});
await assertBrowserNavigationResultAllowed({ url: result.url, ...ssrfPolicyOpts });
return res.json({ ok: true, targetId: tab.targetId, ...result });
}
const pw = await requirePwAi(res, "navigate");
if (!pw) {
return;
}
const result = await pw.navigateViaPlaywright({
cdpUrl,
targetId: tab.targetId,
@ -122,6 +233,17 @@ export function registerBrowserAgentSnapshotRoutes(
app.post("/pdf", async (req, res) => {
const body = readBody(req);
const targetId = toStringOrEmpty(body.targetId) || undefined;
const profileCtx = resolveProfileContext(req, res, ctx);
if (!profileCtx) {
return;
}
if (profileCtx.profile.driver === "existing-session") {
return jsonError(
res,
501,
"pdf is not supported for existing-session profiles yet; use screenshot/snapshot instead.",
);
}
await withPlaywrightRouteContext({
req,
res,
@ -163,6 +285,36 @@ export function registerBrowserAgentSnapshotRoutes(
ctx,
targetId,
run: async ({ profileCtx, tab, cdpUrl }) => {
if (profileCtx.profile.driver === "existing-session") {
if (element) {
return jsonError(
res,
400,
"element screenshots are not supported for existing-session profiles; use ref from snapshot.",
);
}
const buffer = await takeChromeMcpScreenshot({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
uid: ref,
fullPage,
format: type,
});
const normalized = await normalizeBrowserScreenshot(buffer, {
maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE,
maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
});
await saveBrowserMediaResponse({
res,
buffer: normalized.buffer,
contentType: normalized.contentType ?? `image/${type}`,
maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
targetId: tab.targetId,
url: tab.url,
});
return;
}
let buffer: Buffer;
const shouldUsePlaywright = shouldUsePlaywrightForScreenshot({
profile: profileCtx.profile,
@ -227,6 +379,90 @@ export function registerBrowserAgentSnapshotRoutes(
if ((plan.labels || plan.mode === "efficient") && plan.format === "aria") {
return jsonError(res, 400, "labels/mode=efficient require format=ai");
}
if (profileCtx.profile.driver === "existing-session") {
if (plan.labels) {
return jsonError(res, 501, "labels are not supported for existing-session profiles yet.");
}
if (plan.selectorValue || plan.frameSelectorValue) {
return jsonError(
res,
400,
"selector/frame snapshots are not supported for existing-session profiles; snapshot the whole page and use refs.",
);
}
const snapshot = await takeChromeMcpSnapshot({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
});
if (plan.format === "aria") {
return res.json({
ok: true,
format: "aria",
targetId: tab.targetId,
url: tab.url,
nodes: flattenChromeMcpSnapshotToAriaNodes(snapshot, plan.limit),
});
}
const built = buildAiSnapshotFromChromeMcpSnapshot({
root: snapshot,
options: {
interactive: plan.interactive ?? undefined,
compact: plan.compact ?? undefined,
maxDepth: plan.depth ?? undefined,
},
maxChars: plan.resolvedMaxChars,
});
if (plan.labels) {
const refs = Object.keys(built.refs);
const labelResult = await renderChromeMcpLabels({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
refs,
});
try {
const labeled = await takeChromeMcpScreenshot({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
format: "png",
});
const normalized = await normalizeBrowserScreenshot(labeled, {
maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE,
maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
});
await ensureMediaDir();
const saved = await saveMediaBuffer(
normalized.buffer,
normalized.contentType ?? "image/png",
"browser",
DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
);
return res.json({
ok: true,
format: "ai",
targetId: tab.targetId,
url: tab.url,
labels: true,
labelsCount: labelResult.labels,
labelsSkipped: labelResult.skipped,
imagePath: path.resolve(saved.path),
imageType: normalized.contentType?.includes("jpeg") ? "jpeg" : "png",
...built,
});
} finally {
await clearChromeMcpOverlay({
profileName: profileCtx.profile.name,
targetId: tab.targetId,
});
}
}
return res.json({
ok: true,
format: "ai",
targetId: tab.targetId,
url: tab.url,
...built,
});
}
if (plan.format === "ai") {
const pw = await requirePwAi(res, "ai snapshot");
if (!pw) {

View File

@ -1,3 +1,4 @@
import { getChromeMcpPid } from "../chrome-mcp.js";
import { resolveBrowserExecutableForPlatform } from "../chrome.executables.js";
import { toBrowserErrorResponse } from "../errors.js";
import { createBrowserProfilesService } from "../profiles-service.js";
@ -76,10 +77,14 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow
res.json({
enabled: current.resolved.enabled,
profile: profileCtx.profile.name,
driver: profileCtx.profile.driver,
running: cdpReady,
cdpReady,
cdpHttp,
pid: profileState?.running?.pid ?? null,
pid:
profileCtx.profile.driver === "existing-session"
? getChromeMcpPid(profileCtx.profile.name)
: (profileState?.running?.pid ?? null),
cdpPort: profileCtx.profile.cdpPort,
cdpUrl: profileCtx.profile.cdpUrl,
chosenBrowser: profileState?.running?.exe.kind ?? null,
@ -146,6 +151,7 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow
const driver = toStringOrEmpty((req.body as { driver?: unknown })?.driver) as
| "openclaw"
| "extension"
| "existing-session"
| "";
if (!name) {
@ -158,7 +164,12 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow
name,
color: color || undefined,
cdpUrl: cdpUrl || undefined,
driver: driver === "extension" ? "extension" : undefined,
driver:
driver === "extension"
? "extension"
: driver === "existing-session"
? "existing-session"
: undefined,
});
res.json(result);
} catch (err) {

View File

@ -3,6 +3,11 @@ import {
PROFILE_POST_RESTART_WS_TIMEOUT_MS,
resolveCdpReachabilityTimeouts,
} from "./cdp-timeouts.js";
import {
closeChromeMcpSession,
ensureChromeMcpAvailable,
listChromeMcpTabs,
} from "./chrome-mcp.js";
import {
isChromeCdpReady,
isChromeReachable,
@ -60,11 +65,19 @@ export function createProfileAvailability({
});
const isReachable = async (timeoutMs?: number) => {
if (profile.driver === "existing-session") {
await ensureChromeMcpAvailable(profile.name);
await listChromeMcpTabs(profile.name);
return true;
}
const { httpTimeoutMs, wsTimeoutMs } = resolveTimeouts(timeoutMs);
return await isChromeCdpReady(profile.cdpUrl, httpTimeoutMs, wsTimeoutMs);
};
const isHttpReachable = async (timeoutMs?: number) => {
if (profile.driver === "existing-session") {
return await isReachable(timeoutMs);
}
const { httpTimeoutMs } = resolveTimeouts(timeoutMs);
return await isChromeReachable(profile.cdpUrl, httpTimeoutMs);
};
@ -109,6 +122,9 @@ export function createProfileAvailability({
if (previousProfile.driver === "extension") {
await stopChromeExtensionRelayServer({ cdpUrl: previousProfile.cdpUrl }).catch(() => false);
}
if (previousProfile.driver === "existing-session") {
await closeChromeMcpSession(previousProfile.name).catch(() => false);
}
await closePlaywrightBrowserConnectionForProfile(previousProfile.cdpUrl);
if (previousProfile.cdpUrl !== profile.cdpUrl) {
await closePlaywrightBrowserConnectionForProfile(profile.cdpUrl);
@ -138,6 +154,10 @@ export function createProfileAvailability({
const ensureBrowserAvailable = async (): Promise<void> => {
await reconcileProfileRuntime();
if (profile.driver === "existing-session") {
await ensureChromeMcpAvailable(profile.name);
return;
}
const current = state();
const remoteCdp = capabilities.isRemote;
const attachOnly = profile.attachOnly;
@ -238,6 +258,10 @@ export function createProfileAvailability({
const stopRunningBrowser = async (): Promise<{ stopped: boolean }> => {
await reconcileProfileRuntime();
if (profile.driver === "existing-session") {
const stopped = await closeChromeMcpSession(profile.name);
return { stopped };
}
if (capabilities.requiresRelay) {
const stopped = await stopChromeExtensionRelayServer({
cdpUrl: profile.cdpUrl,

View File

@ -0,0 +1,102 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import { createBrowserRouteContext } from "./server-context.js";
import type { BrowserServerState } from "./server-context.js";
vi.mock("./chrome-mcp.js", () => ({
closeChromeMcpSession: vi.fn(async () => true),
ensureChromeMcpAvailable: vi.fn(async () => {}),
focusChromeMcpTab: vi.fn(async () => {}),
listChromeMcpTabs: vi.fn(async () => [
{ targetId: "7", title: "", url: "https://example.com", type: "page" },
]),
openChromeMcpTab: vi.fn(async () => ({
targetId: "8",
title: "",
url: "https://openclaw.ai",
type: "page",
})),
closeChromeMcpTab: vi.fn(async () => {}),
getChromeMcpPid: vi.fn(() => 4321),
}));
import * as chromeMcp from "./chrome-mcp.js";
function makeState(): BrowserServerState {
return {
server: null,
port: 0,
resolved: {
enabled: true,
evaluateEnabled: true,
controlPort: 18791,
cdpPortRangeStart: 18800,
cdpPortRangeEnd: 18899,
cdpProtocol: "http",
cdpHost: "127.0.0.1",
cdpIsLoopback: true,
remoteCdpTimeoutMs: 1500,
remoteCdpHandshakeTimeoutMs: 3000,
color: "#FF4500",
headless: false,
noSandbox: false,
attachOnly: false,
defaultProfile: "chrome-live",
profiles: {
"chrome-live": {
cdpPort: 18801,
color: "#0066CC",
driver: "existing-session",
attachOnly: true,
},
},
extraArgs: [],
ssrfPolicy: { dangerouslyAllowPrivateNetwork: true },
},
profiles: new Map(),
};
}
afterEach(() => {
vi.clearAllMocks();
});
describe("browser server-context existing-session profile", () => {
it("routes tab operations through the Chrome MCP backend", async () => {
const state = makeState();
const ctx = createBrowserRouteContext({ getState: () => state });
const live = ctx.forProfile("chrome-live");
vi.mocked(chromeMcp.listChromeMcpTabs)
.mockResolvedValueOnce([
{ targetId: "7", title: "", url: "https://example.com", type: "page" },
])
.mockResolvedValueOnce([
{ targetId: "8", title: "", url: "https://openclaw.ai", type: "page" },
])
.mockResolvedValueOnce([
{ targetId: "8", title: "", url: "https://openclaw.ai", type: "page" },
])
.mockResolvedValueOnce([
{ targetId: "7", title: "", url: "https://example.com", type: "page" },
]);
await live.ensureBrowserAvailable();
const tabs = await live.listTabs();
expect(tabs.map((tab) => tab.targetId)).toEqual(["7"]);
const opened = await live.openTab("https://openclaw.ai");
expect(opened.targetId).toBe("8");
const selected = await live.ensureTabAvailable();
expect(selected.targetId).toBe("8");
await live.focusTab("7");
await live.stopRunningBrowser();
expect(chromeMcp.ensureChromeMcpAvailable).toHaveBeenCalledWith("chrome-live");
expect(chromeMcp.listChromeMcpTabs).toHaveBeenCalledWith("chrome-live");
expect(chromeMcp.openChromeMcpTab).toHaveBeenCalledWith("chrome-live", "https://openclaw.ai");
expect(chromeMcp.focusChromeMcpTab).toHaveBeenCalledWith("chrome-live", "7");
expect(chromeMcp.closeChromeMcpSession).toHaveBeenCalledWith("chrome-live");
});
});

View File

@ -1,5 +1,6 @@
import { fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js";
import { appendCdpPath } from "./cdp.js";
import { closeChromeMcpTab, focusChromeMcpTab } from "./chrome-mcp.js";
import type { ResolvedBrowserProfile } from "./config.js";
import { BrowserTabNotFoundError, BrowserTargetAmbiguousError } from "./errors.js";
import { getBrowserProfileCapabilities } from "./profile-capabilities.js";
@ -111,6 +112,13 @@ export function createProfileSelectionOps({
const focusTab = async (targetId: string): Promise<void> => {
const resolvedTargetId = await resolveTargetIdOrThrow(targetId);
if (profile.driver === "existing-session") {
await focusChromeMcpTab(profile.name, resolvedTargetId);
const profileState = getProfileState();
profileState.lastTargetId = resolvedTargetId;
return;
}
if (capabilities.usesPersistentPlaywright) {
const mod = await getPwAiModule({ mode: "strict" });
const focusPageByTargetIdViaPlaywright = (mod as Partial<PwAiModule> | null)
@ -134,6 +142,11 @@ export function createProfileSelectionOps({
const closeTab = async (targetId: string): Promise<void> => {
const resolvedTargetId = await resolveTargetIdOrThrow(targetId);
if (profile.driver === "existing-session") {
await closeChromeMcpTab(profile.name, resolvedTargetId);
return;
}
// For remote profiles, use Playwright's persistent connection to close tabs
if (capabilities.usesPersistentPlaywright) {
const mod = await getPwAiModule({ mode: "strict" });

View File

@ -1,6 +1,7 @@
import { CDP_JSON_NEW_TIMEOUT_MS } from "./cdp-timeouts.js";
import { fetchJson, fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js";
import { appendCdpPath, createTargetViaCdp, normalizeCdpWsUrl } from "./cdp.js";
import { listChromeMcpTabs, openChromeMcpTab } from "./chrome-mcp.js";
import type { ResolvedBrowserProfile } from "./config.js";
import {
assertBrowserNavigationAllowed,
@ -65,6 +66,10 @@ export function createProfileTabOps({
const capabilities = getBrowserProfileCapabilities(profile);
const listTabs = async (): Promise<BrowserTab[]> => {
if (profile.driver === "existing-session") {
return await listChromeMcpTabs(profile.name);
}
if (capabilities.usesPersistentPlaywright) {
const mod = await getPwAiModule({ mode: "strict" });
const listPagesViaPlaywright = (mod as Partial<PwAiModule> | null)?.listPagesViaPlaywright;
@ -134,6 +139,15 @@ export function createProfileTabOps({
const openTab = async (url: string): Promise<BrowserTab> => {
const ssrfPolicyOpts = withBrowserNavigationPolicy(state().resolved.ssrfPolicy);
if (profile.driver === "existing-session") {
await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts });
const page = await openChromeMcpTab(profile.name, url);
const profileState = getProfileState();
profileState.lastTargetId = page.targetId;
await assertBrowserNavigationResultAllowed({ url: page.url, ...ssrfPolicyOpts });
return page;
}
if (capabilities.usesPersistentPlaywright) {
const mod = await getPwAiModule({ mode: "strict" });
const createPageViaPlaywright = (mod as Partial<PwAiModule> | null)?.createPageViaPlaywright;

View File

@ -162,12 +162,22 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon
let tabCount = 0;
let running = false;
const profileCtx = createProfileContext(opts, profile);
if (profileState?.running) {
if (profile.driver === "existing-session") {
try {
running = await profileCtx.isReachable(300);
if (running) {
const tabs = await profileCtx.listTabs();
tabCount = tabs.filter((t) => t.type === "page").length;
}
} catch {
// Chrome MCP not available
}
} else if (profileState?.running) {
running = true;
try {
const ctx = createProfileContext(opts, profile);
const tabs = await ctx.listTabs();
const tabs = await profileCtx.listTabs();
tabCount = tabs.filter((t) => t.type === "page").length;
} catch {
// Browser might not be responsive
@ -178,8 +188,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon
const reachable = await isChromeReachable(profile.cdpUrl, 200);
if (reachable) {
running = true;
const ctx = createProfileContext(opts, profile);
const tabs = await ctx.listTabs().catch(() => []);
const tabs = await profileCtx.listTabs().catch(() => []);
tabCount = tabs.filter((t) => t.type === "page").length;
}
} catch {
@ -192,6 +201,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon
cdpPort: profile.cdpPort,
cdpUrl: profile.cdpUrl,
color: profile.color,
driver: profile.driver,
running,
tabCount,
isDefault: name === current.resolved.defaultProfile,

View File

@ -56,6 +56,7 @@ export type ProfileStatus = {
cdpPort: number;
cdpUrl: string;
color: string;
driver: ResolvedBrowserProfile["driver"];
running: boolean;
tabCount: number;
isDefault: boolean;

View File

@ -407,7 +407,8 @@ export function registerBrowserManageCommands(
const def = p.isDefault ? " [default]" : "";
const loc = p.isRemote ? `cdpUrl: ${p.cdpUrl}` : `port: ${p.cdpPort}`;
const remote = p.isRemote ? " [remote]" : "";
return `${p.name}: ${status}${tabs}${def}${remote}\n ${loc}, color: ${p.color}`;
const driver = p.driver !== "openclaw" ? ` [${p.driver}]` : "";
return `${p.name}: ${status}${tabs}${def}${remote}${driver}\n ${loc}, color: ${p.color}`;
})
.join("\n"),
);
@ -420,7 +421,10 @@ export function registerBrowserManageCommands(
.requiredOption("--name <name>", "Profile name (lowercase, numbers, hyphens)")
.option("--color <hex>", "Profile color (hex format, e.g. #0066CC)")
.option("--cdp-url <url>", "CDP URL for remote Chrome (http/https)")
.option("--driver <driver>", "Profile driver (openclaw|extension). Default: openclaw")
.option(
"--driver <driver>",
"Profile driver (openclaw|extension|existing-session). Default: openclaw",
)
.action(
async (opts: { name: string; color?: string; cdpUrl?: string; driver?: string }, cmd) => {
const parent = parentOpts(cmd);
@ -434,7 +438,12 @@ export function registerBrowserManageCommands(
name: opts.name,
color: opts.color,
cdpUrl: opts.cdpUrl,
driver: opts.driver === "extension" ? "extension" : undefined,
driver:
opts.driver === "extension"
? "extension"
: opts.driver === "existing-session"
? "existing-session"
: undefined,
},
},
{ timeoutMs: 10_000 },
@ -446,7 +455,11 @@ export function registerBrowserManageCommands(
defaultRuntime.log(
info(
`🦞 Created profile "${result.profile}"\n${loc}\n color: ${result.color}${
opts.driver === "extension" ? "\n driver: extension" : ""
opts.driver === "extension"
? "\n driver: extension"
: opts.driver === "existing-session"
? "\n driver: existing-session"
: ""
}`,
),
);

View File

@ -64,6 +64,17 @@ describe("resolveCommandSecretRefsViaGateway", () => {
});
}
function expectGatewayUnavailableLocalFallbackDiagnostics(
result: Awaited<ReturnType<typeof resolveCommandSecretRefsViaGateway>>,
) {
expect(
result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")),
).toBe(true);
expect(
result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")),
).toBe(true);
}
it("returns config unchanged when no target SecretRefs are configured", async () => {
const config = {
talk: {
@ -208,11 +219,8 @@ describe("resolveCommandSecretRefsViaGateway", () => {
it("falls back to local resolution for web search SecretRefs when gateway is unavailable", async () => {
const envKey = "WEB_SEARCH_GEMINI_API_KEY_LOCAL_FALLBACK";
const priorValue = process.env[envKey];
process.env[envKey] = "gemini-local-fallback-key";
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
try {
await withEnvValue(envKey, "gemini-local-fallback-key", async () => {
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
const result = await resolveCommandSecretRefsViaGateway({
config: {
tools: {
@ -234,28 +242,14 @@ describe("resolveCommandSecretRefsViaGateway", () => {
"gemini-local-fallback-key",
);
expect(result.targetStatesByPath["tools.web.search.gemini.apiKey"]).toBe("resolved_local");
expect(
result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")),
).toBe(true);
expect(
result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")),
).toBe(true);
} finally {
if (priorValue === undefined) {
delete process.env[envKey];
} else {
process.env[envKey] = priorValue;
}
}
expectGatewayUnavailableLocalFallbackDiagnostics(result);
});
});
it("falls back to local resolution for Firecrawl SecretRefs when gateway is unavailable", async () => {
const envKey = "WEB_FETCH_FIRECRAWL_API_KEY_LOCAL_FALLBACK";
const priorValue = process.env[envKey];
process.env[envKey] = "firecrawl-local-fallback-key";
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
try {
await withEnvValue(envKey, "firecrawl-local-fallback-key", async () => {
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
const result = await resolveCommandSecretRefsViaGateway({
config: {
tools: {
@ -276,19 +270,8 @@ describe("resolveCommandSecretRefsViaGateway", () => {
"firecrawl-local-fallback-key",
);
expect(result.targetStatesByPath["tools.web.fetch.firecrawl.apiKey"]).toBe("resolved_local");
expect(
result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")),
).toBe(true);
expect(
result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")),
).toBe(true);
} finally {
if (priorValue === undefined) {
delete process.env[envKey];
} else {
process.env[envKey] = priorValue;
}
}
expectGatewayUnavailableLocalFallbackDiagnostics(result);
});
});
it("marks web SecretRefs inactive when the web surface is disabled during local fallback", async () => {

View File

@ -1,30 +1,15 @@
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import {
defaultRuntime,
resetLifecycleRuntimeLogs,
resetLifecycleServiceMocks,
service,
stubEmptyGatewayEnv,
} from "./test-helpers/lifecycle-core-harness.js";
const readConfigFileSnapshotMock = vi.fn();
const loadConfig = vi.fn(() => ({}));
const runtimeLogs: string[] = [];
const defaultRuntime = {
log: (message: string) => runtimeLogs.push(message),
error: vi.fn(),
exit: (code: number) => {
throw new Error(`__exit__:${code}`);
},
};
const service = {
label: "TestService",
loadedText: "loaded",
notLoadedText: "not loaded",
install: vi.fn(),
uninstall: vi.fn(),
stop: vi.fn(),
isLoaded: vi.fn(),
readCommand: vi.fn(),
readRuntime: vi.fn(),
restart: vi.fn(),
};
vi.mock("../../config/config.js", () => ({
loadConfig: () => loadConfig(),
readConfigFileSnapshot: () => readConfigFileSnapshotMock(),
@ -50,7 +35,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => {
});
beforeEach(() => {
runtimeLogs.length = 0;
resetLifecycleRuntimeLogs();
readConfigFileSnapshotMock.mockReset();
readConfigFileSnapshotMock.mockResolvedValue({
exists: true,
@ -60,15 +45,8 @@ describe("runServiceRestart config pre-flight (#35862)", () => {
});
loadConfig.mockReset();
loadConfig.mockReturnValue({});
service.isLoaded.mockClear();
service.readCommand.mockClear();
service.restart.mockClear();
service.isLoaded.mockResolvedValue(true);
service.readCommand.mockResolvedValue({ environment: {} });
service.restart.mockResolvedValue({ outcome: "completed" });
vi.unstubAllEnvs();
vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "");
vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", "");
resetLifecycleServiceMocks();
stubEmptyGatewayEnv();
});
it("aborts restart when config is invalid", async () => {
@ -152,7 +130,7 @@ describe("runServiceStart config pre-flight (#35862)", () => {
});
beforeEach(() => {
runtimeLogs.length = 0;
resetLifecycleRuntimeLogs();
readConfigFileSnapshotMock.mockReset();
readConfigFileSnapshotMock.mockResolvedValue({
exists: true,
@ -160,10 +138,7 @@ describe("runServiceStart config pre-flight (#35862)", () => {
config: {},
issues: [],
});
service.isLoaded.mockClear();
service.restart.mockClear();
service.isLoaded.mockResolvedValue(true);
service.restart.mockResolvedValue({ outcome: "completed" });
resetLifecycleServiceMocks();
});
it("aborts start when config is invalid", async () => {

View File

@ -1,4 +1,12 @@
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import {
defaultRuntime,
resetLifecycleRuntimeLogs,
resetLifecycleServiceMocks,
runtimeLogs,
service,
stubEmptyGatewayEnv,
} from "./test-helpers/lifecycle-core-harness.js";
const loadConfig = vi.fn(() => ({
gateway: {
@ -8,28 +16,6 @@ const loadConfig = vi.fn(() => ({
},
}));
const runtimeLogs: string[] = [];
const defaultRuntime = {
log: (message: string) => runtimeLogs.push(message),
error: vi.fn(),
exit: (code: number) => {
throw new Error(`__exit__:${code}`);
},
};
const service = {
label: "TestService",
loadedText: "loaded",
notLoadedText: "not loaded",
install: vi.fn(),
uninstall: vi.fn(),
stop: vi.fn(),
isLoaded: vi.fn(),
readCommand: vi.fn(),
readRuntime: vi.fn(),
restart: vi.fn(),
};
vi.mock("../../config/config.js", () => ({
loadConfig: () => loadConfig(),
readBestEffortConfig: async () => loadConfig(),
@ -49,7 +35,7 @@ describe("runServiceRestart token drift", () => {
});
beforeEach(() => {
runtimeLogs.length = 0;
resetLifecycleRuntimeLogs();
loadConfig.mockReset();
loadConfig.mockReturnValue({
gateway: {
@ -58,19 +44,11 @@ describe("runServiceRestart token drift", () => {
},
},
});
service.isLoaded.mockClear();
service.readCommand.mockClear();
service.restart.mockClear();
service.isLoaded.mockResolvedValue(true);
resetLifecycleServiceMocks();
service.readCommand.mockResolvedValue({
environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" },
});
service.restart.mockResolvedValue({ outcome: "completed" });
vi.unstubAllEnvs();
vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "");
vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", "");
vi.stubEnv("OPENCLAW_GATEWAY_URL", "");
vi.stubEnv("CLAWDBOT_GATEWAY_URL", "");
stubEmptyGatewayEnv();
});
it("emits drift warning when enabled", async () => {

View File

@ -0,0 +1,45 @@
import { vi } from "vitest";
export const runtimeLogs: string[] = [];
export const defaultRuntime = {
log: (message: string) => runtimeLogs.push(message),
error: vi.fn(),
exit: (code: number) => {
throw new Error(`__exit__:${code}`);
},
};
export const service = {
label: "TestService",
loadedText: "loaded",
notLoadedText: "not loaded",
install: vi.fn(),
uninstall: vi.fn(),
stop: vi.fn(),
isLoaded: vi.fn(),
readCommand: vi.fn(),
readRuntime: vi.fn(),
restart: vi.fn(),
};
export function resetLifecycleRuntimeLogs() {
runtimeLogs.length = 0;
}
export function resetLifecycleServiceMocks() {
service.isLoaded.mockClear();
service.readCommand.mockClear();
service.restart.mockClear();
service.isLoaded.mockResolvedValue(true);
service.readCommand.mockResolvedValue({ environment: {} });
service.restart.mockResolvedValue({ outcome: "completed" });
}
export function stubEmptyGatewayEnv() {
vi.unstubAllEnvs();
vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "");
vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", "");
vi.stubEnv("OPENCLAW_GATEWAY_URL", "");
vi.stubEnv("CLAWDBOT_GATEWAY_URL", "");
}

View File

@ -8,6 +8,92 @@ import { buildBackupArchiveRoot } from "./backup-shared.js";
import { backupVerifyCommand } from "./backup-verify.js";
import { backupCreateCommand } from "./backup.js";
const TEST_ARCHIVE_ROOT = "2026-03-09T00-00-00.000Z-openclaw-backup";
const createBackupVerifyRuntime = () => ({
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
});
function createBackupManifest(assetArchivePath: string) {
return {
schemaVersion: 1,
createdAt: "2026-03-09T00:00:00.000Z",
archiveRoot: TEST_ARCHIVE_ROOT,
runtimeVersion: "test",
platform: process.platform,
nodeVersion: process.version,
assets: [
{
kind: "state",
sourcePath: "/tmp/.openclaw",
archivePath: assetArchivePath,
},
],
};
}
async function withBrokenArchiveFixture(
options: {
tempPrefix: string;
manifestAssetArchivePath: string;
payloads: Array<{ fileName: string; contents: string; archivePath?: string }>;
buildTarEntries?: (paths: { manifestPath: string; payloadPaths: string[] }) => string[];
},
run: (archivePath: string) => Promise<void>,
) {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), options.tempPrefix));
const archivePath = path.join(tempDir, "broken.tar.gz");
const manifestPath = path.join(tempDir, "manifest.json");
const payloadSpecs = await Promise.all(
options.payloads.map(async (payload) => {
const payloadPath = path.join(tempDir, payload.fileName);
await fs.writeFile(payloadPath, payload.contents, "utf8");
return {
path: payloadPath,
archivePath: payload.archivePath ?? options.manifestAssetArchivePath,
};
}),
);
const payloadEntryPathBySource = new Map(
payloadSpecs.map((payload) => [payload.path, payload.archivePath]),
);
try {
await fs.writeFile(
manifestPath,
`${JSON.stringify(createBackupManifest(options.manifestAssetArchivePath), null, 2)}\n`,
"utf8",
);
await tar.c(
{
file: archivePath,
gzip: true,
portable: true,
preservePaths: true,
onWriteEntry: (entry) => {
if (entry.path === manifestPath) {
entry.path = `${TEST_ARCHIVE_ROOT}/manifest.json`;
return;
}
const payloadEntryPath = payloadEntryPathBySource.get(entry.path);
if (payloadEntryPath) {
entry.path = payloadEntryPath;
}
},
},
options.buildTarEntries?.({
manifestPath,
payloadPaths: payloadSpecs.map((payload) => payload.path),
}) ?? [manifestPath, ...payloadSpecs.map((payload) => payload.path)],
);
await run(archivePath);
} finally {
await fs.rm(tempDir, { recursive: true, force: true });
}
}
describe("backupVerifyCommand", () => {
let tempHome: TempHomeEnv;
@ -26,12 +112,7 @@ describe("backupVerifyCommand", () => {
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
await fs.writeFile(path.join(stateDir, "state.txt"), "hello\n", "utf8");
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const runtime = createBackupVerifyRuntime();
const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0);
const created = await backupCreateCommand(runtime, { output: archiveDir, nowMs });
const verified = await backupVerifyCommand(runtime, { archive: created.archivePath });
@ -53,12 +134,7 @@ describe("backupVerifyCommand", () => {
await fs.writeFile(path.join(root, "payload", "data.txt"), "x\n", "utf8");
await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, ["root"]);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const runtime = createBackupVerifyRuntime();
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/expected exactly one backup manifest entry/i,
);
@ -95,12 +171,7 @@ describe("backupVerifyCommand", () => {
);
await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, [rootName]);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const runtime = createBackupVerifyRuntime();
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/missing payload for manifest asset/i,
);
@ -110,119 +181,37 @@ describe("backupVerifyCommand", () => {
});
it("fails when archive paths contain traversal segments", async () => {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-traversal-"));
const archivePath = path.join(tempDir, "broken.tar.gz");
const manifestPath = path.join(tempDir, "manifest.json");
const payloadPath = path.join(tempDir, "payload.txt");
try {
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
const traversalPath = `${rootName}/payload/../escaped.txt`;
const manifest = {
schemaVersion: 1,
createdAt: "2026-03-09T00:00:00.000Z",
archiveRoot: rootName,
runtimeVersion: "test",
platform: process.platform,
nodeVersion: process.version,
assets: [
{
kind: "state",
sourcePath: "/tmp/.openclaw",
archivePath: traversalPath,
},
],
};
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
await fs.writeFile(payloadPath, "payload\n", "utf8");
await tar.c(
{
file: archivePath,
gzip: true,
portable: true,
preservePaths: true,
onWriteEntry: (entry) => {
if (entry.path === manifestPath) {
entry.path = `${rootName}/manifest.json`;
return;
}
if (entry.path === payloadPath) {
entry.path = traversalPath;
}
},
},
[manifestPath, payloadPath],
);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/path traversal segments/i,
);
} finally {
await fs.rm(tempDir, { recursive: true, force: true });
}
const traversalPath = `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`;
await withBrokenArchiveFixture(
{
tempPrefix: "openclaw-backup-traversal-",
manifestAssetArchivePath: traversalPath,
payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: traversalPath }],
},
async (archivePath) => {
const runtime = createBackupVerifyRuntime();
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/path traversal segments/i,
);
},
);
});
it("fails when archive paths contain backslashes", async () => {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-backslash-"));
const archivePath = path.join(tempDir, "broken.tar.gz");
const manifestPath = path.join(tempDir, "manifest.json");
const payloadPath = path.join(tempDir, "payload.txt");
try {
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
const invalidPath = `${rootName}/payload\\..\\escaped.txt`;
const manifest = {
schemaVersion: 1,
createdAt: "2026-03-09T00:00:00.000Z",
archiveRoot: rootName,
runtimeVersion: "test",
platform: process.platform,
nodeVersion: process.version,
assets: [
{
kind: "state",
sourcePath: "/tmp/.openclaw",
archivePath: invalidPath,
},
],
};
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
await fs.writeFile(payloadPath, "payload\n", "utf8");
await tar.c(
{
file: archivePath,
gzip: true,
portable: true,
preservePaths: true,
onWriteEntry: (entry) => {
if (entry.path === manifestPath) {
entry.path = `${rootName}/manifest.json`;
return;
}
if (entry.path === payloadPath) {
entry.path = invalidPath;
}
},
},
[manifestPath, payloadPath],
);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/forward slashes/i,
);
} finally {
await fs.rm(tempDir, { recursive: true, force: true });
}
const invalidPath = `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`;
await withBrokenArchiveFixture(
{
tempPrefix: "openclaw-backup-backslash-",
manifestAssetArchivePath: invalidPath,
payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: invalidPath }],
},
async (archivePath) => {
const runtime = createBackupVerifyRuntime();
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/forward slashes/i,
);
},
);
});
it("ignores payload manifest.json files when locating the backup manifest", async () => {
@ -251,12 +240,7 @@ describe("backupVerifyCommand", () => {
"utf8",
);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const runtime = createBackupVerifyRuntime();
const created = await backupCreateCommand(runtime, {
output: archiveDir,
includeWorkspace: true,
@ -274,119 +258,44 @@ describe("backupVerifyCommand", () => {
});
it("fails when the archive contains duplicate root manifest entries", async () => {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-manifest-"));
const archivePath = path.join(tempDir, "broken.tar.gz");
const manifestPath = path.join(tempDir, "manifest.json");
const payloadPath = path.join(tempDir, "payload.txt");
try {
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
const manifest = {
schemaVersion: 1,
createdAt: "2026-03-09T00:00:00.000Z",
archiveRoot: rootName,
runtimeVersion: "test",
platform: process.platform,
nodeVersion: process.version,
assets: [
{
kind: "state",
sourcePath: "/tmp/.openclaw",
archivePath: `${rootName}/payload/posix/tmp/.openclaw/payload.txt`,
},
const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`;
await withBrokenArchiveFixture(
{
tempPrefix: "openclaw-backup-duplicate-manifest-",
manifestAssetArchivePath: payloadArchivePath,
payloads: [{ fileName: "payload.txt", contents: "payload\n" }],
buildTarEntries: ({ manifestPath, payloadPaths }) => [
manifestPath,
manifestPath,
...payloadPaths,
],
};
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
await fs.writeFile(payloadPath, "payload\n", "utf8");
await tar.c(
{
file: archivePath,
gzip: true,
portable: true,
preservePaths: true,
onWriteEntry: (entry) => {
if (entry.path === manifestPath) {
entry.path = `${rootName}/manifest.json`;
return;
}
if (entry.path === payloadPath) {
entry.path = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`;
}
},
},
[manifestPath, manifestPath, payloadPath],
);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/expected exactly one backup manifest entry, found 2/i,
);
} finally {
await fs.rm(tempDir, { recursive: true, force: true });
}
},
async (archivePath) => {
const runtime = createBackupVerifyRuntime();
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/expected exactly one backup manifest entry, found 2/i,
);
},
);
});
it("fails when the archive contains duplicate payload entries", async () => {
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-payload-"));
const archivePath = path.join(tempDir, "broken.tar.gz");
const manifestPath = path.join(tempDir, "manifest.json");
const payloadPathA = path.join(tempDir, "payload-a.txt");
const payloadPathB = path.join(tempDir, "payload-b.txt");
try {
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
const payloadArchivePath = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`;
const manifest = {
schemaVersion: 1,
createdAt: "2026-03-09T00:00:00.000Z",
archiveRoot: rootName,
runtimeVersion: "test",
platform: process.platform,
nodeVersion: process.version,
assets: [
{
kind: "state",
sourcePath: "/tmp/.openclaw",
archivePath: payloadArchivePath,
},
const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`;
await withBrokenArchiveFixture(
{
tempPrefix: "openclaw-backup-duplicate-payload-",
manifestAssetArchivePath: payloadArchivePath,
payloads: [
{ fileName: "payload-a.txt", contents: "payload-a\n", archivePath: payloadArchivePath },
{ fileName: "payload-b.txt", contents: "payload-b\n", archivePath: payloadArchivePath },
],
};
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
await fs.writeFile(payloadPathA, "payload-a\n", "utf8");
await fs.writeFile(payloadPathB, "payload-b\n", "utf8");
await tar.c(
{
file: archivePath,
gzip: true,
portable: true,
preservePaths: true,
onWriteEntry: (entry) => {
if (entry.path === manifestPath) {
entry.path = `${rootName}/manifest.json`;
return;
}
if (entry.path === payloadPathA || entry.path === payloadPathB) {
entry.path = payloadArchivePath;
}
},
},
[manifestPath, payloadPathA, payloadPathB],
);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/duplicate entry path/i,
);
} finally {
await fs.rm(tempDir, { recursive: true, force: true });
}
},
async (archivePath) => {
const runtime = createBackupVerifyRuntime();
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
/duplicate entry path/i,
);
},
);
});
});

View File

@ -41,6 +41,41 @@ describe("backup commands", () => {
await tempHome.restore();
});
async function withInvalidWorkspaceBackupConfig<T>(
fn: (runtime: {
log: ReturnType<typeof vi.fn>;
error: ReturnType<typeof vi.fn>;
exit: ReturnType<typeof vi.fn>;
}) => Promise<T>,
) {
const stateDir = path.join(tempHome.home, ".openclaw");
const configPath = path.join(tempHome.home, "custom-config.json");
process.env.OPENCLAW_CONFIG_PATH = configPath;
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
try {
return await fn(runtime);
} finally {
delete process.env.OPENCLAW_CONFIG_PATH;
}
}
function expectWorkspaceCoveredByState(
plan: Awaited<ReturnType<typeof resolveBackupPlanFromDisk>>,
) {
expect(plan.included).toHaveLength(1);
expect(plan.included[0]?.kind).toBe("state");
expect(plan.skipped).toEqual(
expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]),
);
}
it("collapses default config, credentials, and workspace into the state backup root", async () => {
const stateDir = path.join(tempHome.home, ".openclaw");
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
@ -50,12 +85,7 @@ describe("backup commands", () => {
await fs.writeFile(path.join(stateDir, "workspace", "SOUL.md"), "# soul\n", "utf8");
const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 });
expect(plan.included).toHaveLength(1);
expect(plan.included[0]?.kind).toBe("state");
expect(plan.skipped).toEqual(
expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]),
);
expectWorkspaceCoveredByState(plan);
});
it("orders coverage checks by canonical path so symlinked workspaces do not duplicate state", async () => {
@ -84,12 +114,7 @@ describe("backup commands", () => {
);
const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 });
expect(plan.included).toHaveLength(1);
expect(plan.included[0]?.kind).toBe("state");
expect(plan.skipped).toEqual(
expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]),
);
expectWorkspaceCoveredByState(plan);
} finally {
await fs.rm(symlinkDir, { recursive: true, force: true });
}
@ -336,41 +361,15 @@ describe("backup commands", () => {
});
it("fails fast when config is invalid and workspace backup is enabled", async () => {
const stateDir = path.join(tempHome.home, ".openclaw");
const configPath = path.join(tempHome.home, "custom-config.json");
process.env.OPENCLAW_CONFIG_PATH = configPath;
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
try {
await withInvalidWorkspaceBackupConfig(async (runtime) => {
await expect(backupCreateCommand(runtime, { dryRun: true })).rejects.toThrow(
/--no-include-workspace/i,
);
} finally {
delete process.env.OPENCLAW_CONFIG_PATH;
}
});
});
it("allows explicit partial backups when config is invalid", async () => {
const stateDir = path.join(tempHome.home, ".openclaw");
const configPath = path.join(tempHome.home, "custom-config.json");
process.env.OPENCLAW_CONFIG_PATH = configPath;
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
try {
await withInvalidWorkspaceBackupConfig(async (runtime) => {
const result = await backupCreateCommand(runtime, {
dryRun: true,
includeWorkspace: false,
@ -378,9 +377,7 @@ describe("backup commands", () => {
expect(result.includeWorkspace).toBe(false);
expect(result.assets.some((asset) => asset.kind === "workspace")).toBe(false);
} finally {
delete process.env.OPENCLAW_CONFIG_PATH;
}
});
});
it("backs up only the active config file when --only-config is requested", async () => {

View File

@ -1,6 +1,7 @@
import { afterEach, describe, expect, it, vi } from "vitest";
const mocks = vi.hoisted(() => ({
loadAuthProfileStoreForSecretsRuntime: vi.fn(),
resolvePreferredNodePath: vi.fn(),
resolveGatewayProgramArguments: vi.fn(),
resolveSystemNodeInfo: vi.fn(),
@ -8,6 +9,10 @@ const mocks = vi.hoisted(() => ({
buildServiceEnvironment: vi.fn(),
}));
vi.mock("../agents/auth-profiles.js", () => ({
loadAuthProfileStoreForSecretsRuntime: mocks.loadAuthProfileStoreForSecretsRuntime,
}));
vi.mock("../daemon/runtime-paths.js", () => ({
resolvePreferredNodePath: mocks.resolvePreferredNodePath,
resolveSystemNodeInfo: mocks.resolveSystemNodeInfo,
@ -63,6 +68,10 @@ function mockNodeGatewayPlanFixture(
programArguments: ["node", "gateway"],
workingDirectory,
});
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
version: 1,
profiles: {},
});
mocks.resolveSystemNodeInfo.mockResolvedValue({
path: "/opt/node",
version,
@ -232,6 +241,67 @@ describe("buildGatewayInstallPlan", () => {
expect(plan.environment.HOME).toBe("/Users/service");
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
});
it("merges env-backed auth-profile refs into the service environment", async () => {
mockNodeGatewayPlanFixture({
serviceEnvironment: {
OPENCLAW_PORT: "3000",
},
});
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
version: 1,
profiles: {
"openai:default": {
type: "api_key",
provider: "openai",
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
},
"anthropic:default": {
type: "token",
provider: "anthropic",
tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" },
},
},
});
const plan = await buildGatewayInstallPlan({
env: {
OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret
ANTHROPIC_TOKEN: "ant-test-token",
},
port: 3000,
runtime: "node",
});
expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test");
expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token");
});
it("skips unresolved auth-profile env refs", async () => {
mockNodeGatewayPlanFixture({
serviceEnvironment: {
OPENCLAW_PORT: "3000",
},
});
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
version: 1,
profiles: {
"openai:default": {
type: "api_key",
provider: "openai",
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
},
},
});
const plan = await buildGatewayInstallPlan({
env: {},
port: 3000,
runtime: "node",
});
expect(plan.environment.OPENAI_API_KEY).toBeUndefined();
});
});
describe("gatewayInstallErrorHint", () => {

View File

@ -1,3 +1,7 @@
import {
loadAuthProfileStoreForSecretsRuntime,
type AuthProfileStore,
} from "../agents/auth-profiles.js";
import { formatCliCommand } from "../cli/command-format.js";
import { collectConfigServiceEnvVars } from "../config/env-vars.js";
import type { OpenClawConfig } from "../config/types.js";
@ -19,6 +23,33 @@ export type GatewayInstallPlan = {
environment: Record<string, string | undefined>;
};
function collectAuthProfileServiceEnvVars(params: {
env: Record<string, string | undefined>;
authStore?: AuthProfileStore;
}): Record<string, string> {
const authStore = params.authStore ?? loadAuthProfileStoreForSecretsRuntime();
const entries: Record<string, string> = {};
for (const credential of Object.values(authStore.profiles)) {
const ref =
credential.type === "api_key"
? credential.keyRef
: credential.type === "token"
? credential.tokenRef
: undefined;
if (!ref || ref.source !== "env") {
continue;
}
const value = params.env[ref.id]?.trim();
if (!value) {
continue;
}
entries[ref.id] = value;
}
return entries;
}
export async function buildGatewayInstallPlan(params: {
env: Record<string, string | undefined>;
port: number;
@ -28,6 +59,7 @@ export async function buildGatewayInstallPlan(params: {
warn?: DaemonInstallWarnFn;
/** Full config to extract env vars from (env vars + inline env keys). */
config?: OpenClawConfig;
authStore?: AuthProfileStore;
}): Promise<GatewayInstallPlan> {
const { devMode, nodePath } = await resolveDaemonInstallRuntimeInputs({
env: params.env,
@ -61,6 +93,10 @@ export async function buildGatewayInstallPlan(params: {
// Config env vars are added first so service-specific vars take precedence.
const environment: Record<string, string | undefined> = {
...collectConfigServiceEnvVars(params.config),
...collectAuthProfileServiceEnvVars({
env: params.env,
authStore: params.authStore,
}),
};
Object.assign(environment, serviceEnvironment);

View File

@ -26,6 +26,32 @@ async function makeRootWithEmptyCfg() {
return { root, cfg };
}
function writeLegacyTelegramAllowFromStore(oauthDir: string) {
fs.writeFileSync(
path.join(oauthDir, "telegram-allowFrom.json"),
JSON.stringify(
{
version: 1,
allowFrom: ["123456"],
},
null,
2,
) + "\n",
"utf-8",
);
}
async function runTelegramAllowFromMigration(params: { root: string; cfg: OpenClawConfig }) {
const oauthDir = ensureCredentialsDir(params.root);
writeLegacyTelegramAllowFromStore(oauthDir);
const detected = await detectLegacyStateMigrations({
cfg: params.cfg,
env: { OPENCLAW_STATE_DIR: params.root } as NodeJS.ProcessEnv,
});
const result = await runLegacyStateMigrations({ detected, now: () => 123 });
return { oauthDir, detected, result };
}
afterEach(async () => {
resetAutoMigrateLegacyStateForTest();
resetAutoMigrateLegacyStateDirForTest();
@ -277,30 +303,11 @@ describe("doctor legacy state migrations", () => {
it("migrates legacy Telegram pairing allowFrom store to account-scoped default file", async () => {
const { root, cfg } = await makeRootWithEmptyCfg();
const oauthDir = ensureCredentialsDir(root);
fs.writeFileSync(
path.join(oauthDir, "telegram-allowFrom.json"),
JSON.stringify(
{
version: 1,
allowFrom: ["123456"],
},
null,
2,
) + "\n",
"utf-8",
);
const detected = await detectLegacyStateMigrations({
cfg,
env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv,
});
const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg });
expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true);
expect(
detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)),
).toEqual(["telegram-default-allowFrom.json"]);
const result = await runLegacyStateMigrations({ detected, now: () => 123 });
expect(result.warnings).toEqual([]);
const target = path.join(oauthDir, "telegram-default-allowFrom.json");
@ -323,30 +330,11 @@ describe("doctor legacy state migrations", () => {
},
},
};
const oauthDir = ensureCredentialsDir(root);
fs.writeFileSync(
path.join(oauthDir, "telegram-allowFrom.json"),
JSON.stringify(
{
version: 1,
allowFrom: ["123456"],
},
null,
2,
) + "\n",
"utf-8",
);
const detected = await detectLegacyStateMigrations({
cfg,
env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv,
});
const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg });
expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true);
expect(
detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)).toSorted(),
).toEqual(["telegram-bot1-allowFrom.json", "telegram-bot2-allowFrom.json"]);
const result = await runLegacyStateMigrations({ detected, now: () => 123 });
expect(result.warnings).toEqual([]);
const bot1Target = path.join(oauthDir, "telegram-bot1-allowFrom.json");

View File

@ -1,7 +1,28 @@
import { describe, expect, it } from "vitest";
import { NON_ENV_SECRETREF_MARKER } from "../../agents/model-auth-markers.js";
import { withEnv } from "../../test-utils/env.js";
import { resolveProviderAuthOverview } from "./list.auth-overview.js";
function resolveOpenAiOverview(apiKey: string) {
return resolveProviderAuthOverview({
provider: "openai",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey,
models: [],
},
},
},
} as never,
store: { version: 1, profiles: {} } as never,
modelsPath: "/tmp/models.json",
});
}
describe("resolveProviderAuthOverview", () => {
it("does not throw when token profile only has tokenRef", () => {
const overview = resolveProviderAuthOverview({
@ -24,23 +45,9 @@ describe("resolveProviderAuthOverview", () => {
});
it("renders marker-backed models.json auth as marker detail", () => {
const overview = resolveProviderAuthOverview({
provider: "openai",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: NON_ENV_SECRETREF_MARKER,
models: [],
},
},
},
} as never,
store: { version: 1, profiles: {} } as never,
modelsPath: "/tmp/models.json",
});
const overview = withEnv({ OPENAI_API_KEY: undefined }, () =>
resolveOpenAiOverview(NON_ENV_SECRETREF_MARKER),
);
expect(overview.effective.kind).toBe("missing");
expect(overview.effective.detail).toBe("missing");
@ -48,23 +55,9 @@ describe("resolveProviderAuthOverview", () => {
});
it("keeps env-var-shaped models.json values masked to avoid accidental plaintext exposure", () => {
const overview = resolveProviderAuthOverview({
provider: "openai",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: "OPENAI_API_KEY", // pragma: allowlist secret
models: [],
},
},
},
} as never,
store: { version: 1, profiles: {} } as never,
modelsPath: "/tmp/models.json",
});
const overview = withEnv({ OPENAI_API_KEY: undefined }, () =>
resolveOpenAiOverview("OPENAI_API_KEY"),
);
expect(overview.effective.kind).toBe("missing");
expect(overview.effective.detail).toBe("missing");
@ -76,23 +69,7 @@ describe("resolveProviderAuthOverview", () => {
const prior = process.env.OPENAI_API_KEY;
process.env.OPENAI_API_KEY = "sk-openai-from-env"; // pragma: allowlist secret
try {
const overview = resolveProviderAuthOverview({
provider: "openai",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: "OPENAI_API_KEY", // pragma: allowlist secret
models: [],
},
},
},
} as never,
store: { version: 1, profiles: {} } as never,
modelsPath: "/tmp/models.json",
});
const overview = resolveOpenAiOverview("OPENAI_API_KEY");
expect(overview.effective.kind).toBe("env");
expect(overview.effective.detail).not.toContain("OPENAI_API_KEY");
} finally {

View File

@ -1,5 +1,6 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import type { RuntimeEnv } from "../runtime.js";
import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js";
import type { WizardPrompter } from "../wizard/prompts.js";
import {
configureOllamaNonInteractive,
@ -23,27 +24,6 @@ vi.mock("./oauth-env.js", () => ({
isRemoteEnvironment: isRemoteEnvironmentMock,
}));
function jsonResponse(body: unknown, status = 200): Response {
return new Response(JSON.stringify(body), {
status,
headers: { "Content-Type": "application/json" },
});
}
function requestUrl(input: string | URL | Request): string {
if (typeof input === "string") {
return input;
}
if (input instanceof URL) {
return input.toString();
}
return input.url;
}
function requestBody(body: BodyInit | null | undefined): string {
return typeof body === "string" ? body : "{}";
}
function createOllamaFetchMock(params: {
tags?: string[];
show?: Record<string, number | undefined>;
@ -61,7 +41,7 @@ function createOllamaFetchMock(params: {
return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) });
}
if (url.endsWith("/api/show")) {
const body = JSON.parse(requestBody(init?.body)) as { name?: string };
const body = JSON.parse(requestBodyText(init?.body)) as { name?: string };
const contextWindow = body.name ? params.show?.[body.name] : undefined;
return contextWindow
? jsonResponse({ model_info: { "llama.context_length": contextWindow } })
@ -77,6 +57,45 @@ function createOllamaFetchMock(params: {
});
}
function createModePrompter(
mode: "local" | "remote",
params?: { confirm?: boolean },
): WizardPrompter {
return {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
select: vi.fn().mockResolvedValueOnce(mode),
...(params?.confirm !== undefined
? { confirm: vi.fn().mockResolvedValueOnce(params.confirm) }
: {}),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
}
function createSignedOutRemoteFetchMock() {
return createOllamaFetchMock({
tags: ["llama3:8b"],
meResponses: [
jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401),
jsonResponse({ username: "testuser" }),
],
});
}
function createDefaultOllamaConfig(primary: string) {
return {
agents: { defaults: { model: { primary } } },
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
};
}
function createRuntime() {
return {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as unknown as RuntimeEnv;
}
describe("ollama setup", () => {
afterEach(() => {
vi.unstubAllGlobals();
@ -86,11 +105,7 @@ describe("ollama setup", () => {
});
it("returns suggested default model for local mode", async () => {
const prompter = {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
select: vi.fn().mockResolvedValueOnce("local"),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
const prompter = createModePrompter("local");
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] });
vi.stubGlobal("fetch", fetchMock);
@ -101,11 +116,7 @@ describe("ollama setup", () => {
});
it("returns suggested default model for remote mode", async () => {
const prompter = {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
select: vi.fn().mockResolvedValueOnce("remote"),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
const prompter = createModePrompter("remote");
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] });
vi.stubGlobal("fetch", fetchMock);
@ -116,11 +127,7 @@ describe("ollama setup", () => {
});
it("mode selection affects model ordering (local)", async () => {
const prompter = {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
select: vi.fn().mockResolvedValueOnce("local"),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
const prompter = createModePrompter("local");
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] });
vi.stubGlobal("fetch", fetchMock);
@ -134,20 +141,8 @@ describe("ollama setup", () => {
});
it("cloud+local mode triggers /api/me check and opens sign-in URL", async () => {
const prompter = {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
select: vi.fn().mockResolvedValueOnce("remote"),
confirm: vi.fn().mockResolvedValueOnce(true),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
const fetchMock = createOllamaFetchMock({
tags: ["llama3:8b"],
meResponses: [
jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401),
jsonResponse({ username: "testuser" }),
],
});
const prompter = createModePrompter("remote", { confirm: true });
const fetchMock = createSignedOutRemoteFetchMock();
vi.stubGlobal("fetch", fetchMock);
await promptAndConfigureOllama({ cfg: {}, prompter });
@ -158,20 +153,8 @@ describe("ollama setup", () => {
it("cloud+local mode does not open browser in remote environment", async () => {
isRemoteEnvironmentMock.mockReturnValue(true);
const prompter = {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
select: vi.fn().mockResolvedValueOnce("remote"),
confirm: vi.fn().mockResolvedValueOnce(true),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
const fetchMock = createOllamaFetchMock({
tags: ["llama3:8b"],
meResponses: [
jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401),
jsonResponse({ username: "testuser" }),
],
});
const prompter = createModePrompter("remote", { confirm: true });
const fetchMock = createSignedOutRemoteFetchMock();
vi.stubGlobal("fetch", fetchMock);
await promptAndConfigureOllama({ cfg: {}, prompter });
@ -180,11 +163,7 @@ describe("ollama setup", () => {
});
it("local mode does not trigger cloud auth", async () => {
const prompter = {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
select: vi.fn().mockResolvedValueOnce("local"),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
const prompter = createModePrompter("local");
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] });
vi.stubGlobal("fetch", fetchMock);
@ -258,10 +237,7 @@ describe("ollama setup", () => {
vi.stubGlobal("fetch", fetchMock);
await ensureOllamaModelPulled({
config: {
agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } },
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
},
config: createDefaultOllamaConfig("ollama/glm-4.7-flash"),
prompter,
});
@ -276,10 +252,7 @@ describe("ollama setup", () => {
vi.stubGlobal("fetch", fetchMock);
await ensureOllamaModelPulled({
config: {
agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } },
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
},
config: createDefaultOllamaConfig("ollama/glm-4.7-flash"),
prompter,
});
@ -292,10 +265,7 @@ describe("ollama setup", () => {
vi.stubGlobal("fetch", fetchMock);
await ensureOllamaModelPulled({
config: {
agents: { defaults: { model: { primary: "ollama/kimi-k2.5:cloud" } } },
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
},
config: createDefaultOllamaConfig("ollama/kimi-k2.5:cloud"),
prompter,
});
@ -324,12 +294,7 @@ describe("ollama setup", () => {
pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }),
});
vi.stubGlobal("fetch", fetchMock);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as unknown as RuntimeEnv;
const runtime = createRuntime();
const result = await configureOllamaNonInteractive({
nextConfig: {
@ -362,12 +327,7 @@ describe("ollama setup", () => {
pullResponse: new Response('{"status":"success"}\n', { status: 200 }),
});
vi.stubGlobal("fetch", fetchMock);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as unknown as RuntimeEnv;
const runtime = createRuntime();
const result = await configureOllamaNonInteractive({
nextConfig: {},
@ -379,7 +339,7 @@ describe("ollama setup", () => {
});
const pullRequest = fetchMock.mock.calls[1]?.[1];
expect(JSON.parse(requestBody(pullRequest?.body))).toEqual({ name: "llama3.2:latest" });
expect(JSON.parse(requestBodyText(pullRequest?.body))).toEqual({ name: "llama3.2:latest" });
expect(result.agents?.defaults?.model).toEqual(
expect.objectContaining({ primary: "ollama/llama3.2:latest" }),
);
@ -388,12 +348,7 @@ describe("ollama setup", () => {
it("accepts cloud models in non-interactive mode without pulling", async () => {
const fetchMock = createOllamaFetchMock({ tags: [] });
vi.stubGlobal("fetch", fetchMock);
const runtime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
} as unknown as RuntimeEnv;
const runtime = createRuntime();
const result = await configureOllamaNonInteractive({
nextConfig: {},

View File

@ -85,6 +85,29 @@ import {
MODELSTUDIO_DEFAULT_MODEL_REF,
} from "./onboard-auth.models.js";
function mergeProviderModels<T extends { id: string }>(
existingProvider: Record<string, unknown> | undefined,
defaultModels: T[],
): T[] {
const existingModels = Array.isArray(existingProvider?.models)
? (existingProvider.models as T[])
: [];
const mergedModels = [...existingModels];
const seen = new Set(existingModels.map((model) => model.id));
for (const model of defaultModels) {
if (!seen.has(model.id)) {
mergedModels.push(model);
seen.add(model.id);
}
}
return mergedModels;
}
function getNormalizedProviderApiKey(existingProvider: Record<string, unknown> | undefined) {
const { apiKey } = (existingProvider ?? {}) as { apiKey?: string };
return typeof apiKey === "string" ? apiKey.trim() || undefined : undefined;
}
export function applyZaiProviderConfig(
cfg: OpenClawConfig,
params?: { endpoint?: string; modelId?: string },
@ -100,7 +123,6 @@ export function applyZaiProviderConfig(
const providers = { ...cfg.models?.providers };
const existingProvider = providers.zai;
const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : [];
const defaultModels = [
buildZaiModelDefinition({ id: "glm-5" }),
@ -109,21 +131,13 @@ export function applyZaiProviderConfig(
buildZaiModelDefinition({ id: "glm-4.7-flashx" }),
];
const mergedModels = [...existingModels];
const seen = new Set(existingModels.map((m) => m.id));
for (const model of defaultModels) {
if (!seen.has(model.id)) {
mergedModels.push(model);
seen.add(model.id);
}
}
const mergedModels = mergeProviderModels(existingProvider, defaultModels);
const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
string,
unknown
> as { apiKey?: string };
const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined;
const normalizedApiKey = resolvedApiKey?.trim();
const normalizedApiKey = getNormalizedProviderApiKey(existingProvider);
const baseUrl = params?.endpoint
? resolveZaiBaseUrl(params.endpoint)
@ -256,12 +270,11 @@ export function applySyntheticProviderConfig(cfg: OpenClawConfig): OpenClawConfi
(model) => !existingModels.some((existing) => existing.id === model.id),
),
];
const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
string,
unknown
> as { apiKey?: string };
const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined;
const normalizedApiKey = resolvedApiKey?.trim();
const normalizedApiKey = getNormalizedProviderApiKey(existingProvider);
providers.synthetic = {
...existingProviderRest,
baseUrl: SYNTHETIC_BASE_URL,
@ -609,7 +622,6 @@ function applyModelStudioProviderConfigWithBaseUrl(
const providers = { ...cfg.models?.providers };
const existingProvider = providers.modelstudio;
const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : [];
const defaultModels = [
buildModelStudioModelDefinition({ id: "qwen3.5-plus" }),
@ -622,21 +634,13 @@ function applyModelStudioProviderConfigWithBaseUrl(
buildModelStudioModelDefinition({ id: "kimi-k2.5" }),
];
const mergedModels = [...existingModels];
const seen = new Set(existingModels.map((m) => m.id));
for (const model of defaultModels) {
if (!seen.has(model.id)) {
mergedModels.push(model);
seen.add(model.id);
}
}
const mergedModels = mergeProviderModels(existingProvider, defaultModels);
const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
string,
unknown
> as { apiKey?: string };
const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined;
const normalizedApiKey = resolvedApiKey?.trim();
const normalizedApiKey = getNormalizedProviderApiKey(existingProvider);
providers.modelstudio = {
...existingProviderRest,

View File

@ -8,38 +8,42 @@ describe("talk config validation fail-closed behavior", () => {
vi.restoreAllMocks();
});
async function expectInvalidTalkConfig(config: unknown, messagePattern: RegExp) {
await withTempHomeConfig(config, async () => {
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
let thrown: unknown;
try {
loadConfig();
} catch (error) {
thrown = error;
}
expect(thrown).toBeInstanceOf(Error);
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
expect((thrown as Error).message).toMatch(messagePattern);
expect(consoleSpy).toHaveBeenCalled();
});
}
it.each([
["boolean", true],
["string", "1500"],
["float", 1500.5],
])("rejects %s talk.silenceTimeoutMs during config load", async (_label, value) => {
await withTempHomeConfig(
await expectInvalidTalkConfig(
{
agents: { list: [{ id: "main" }] },
talk: {
silenceTimeoutMs: value,
},
},
async () => {
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
let thrown: unknown;
try {
loadConfig();
} catch (error) {
thrown = error;
}
expect(thrown).toBeInstanceOf(Error);
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
expect((thrown as Error).message).toMatch(/silenceTimeoutMs|talk/i);
expect(consoleSpy).toHaveBeenCalled();
},
/silenceTimeoutMs|talk/i,
);
});
it("rejects talk.provider when it does not match talk.providers during config load", async () => {
await withTempHomeConfig(
await expectInvalidTalkConfig(
{
agents: { list: [{ id: "main" }] },
talk: {
@ -51,26 +55,12 @@ describe("talk config validation fail-closed behavior", () => {
},
},
},
async () => {
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
let thrown: unknown;
try {
loadConfig();
} catch (error) {
thrown = error;
}
expect(thrown).toBeInstanceOf(Error);
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
expect((thrown as Error).message).toMatch(/talk\.provider|talk\.providers|acme/i);
expect(consoleSpy).toHaveBeenCalled();
},
/talk\.provider|talk\.providers|acme/i,
);
});
it("rejects multi-provider talk config without talk.provider during config load", async () => {
await withTempHomeConfig(
await expectInvalidTalkConfig(
{
agents: { list: [{ id: "main" }] },
talk: {
@ -84,21 +74,7 @@ describe("talk config validation fail-closed behavior", () => {
},
},
},
async () => {
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
let thrown: unknown;
try {
loadConfig();
} catch (error) {
thrown = error;
}
expect(thrown).toBeInstanceOf(Error);
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
expect((thrown as Error).message).toMatch(/talk\.provider|required/i);
expect(consoleSpy).toHaveBeenCalled();
},
/talk\.provider|required/i,
);
});
});

View File

@ -4,7 +4,7 @@ export type BrowserProfileConfig = {
/** CDP URL for this profile (use for remote Chrome). */
cdpUrl?: string;
/** Profile driver (default: openclaw). */
driver?: "openclaw" | "clawd" | "extension";
driver?: "openclaw" | "clawd" | "extension" | "existing-session";
/** If true, never launch a browser for this profile; only attach. Falls back to browser.attachOnly. */
attachOnly?: boolean;
/** Profile color (hex). Auto-assigned at creation. */

View File

@ -360,7 +360,12 @@ export const OpenClawSchema = z
cdpPort: z.number().int().min(1).max(65535).optional(),
cdpUrl: z.string().optional(),
driver: z
.union([z.literal("openclaw"), z.literal("clawd"), z.literal("extension")])
.union([
z.literal("openclaw"),
z.literal("clawd"),
z.literal("extension"),
z.literal("existing-session"),
])
.optional(),
attachOnly: z.boolean().optional(),
color: HexColorSchema,

View File

@ -1,6 +1,7 @@
import "./isolated-agent.mocks.js";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js";
import { runCronIsolatedAgentTurn } from "./isolated-agent.js";
import {
makeCfg,
@ -9,27 +10,6 @@ import {
writeSessionStoreEntries,
} from "./isolated-agent.test-harness.js";
function makeDeps() {
return {
sendMessageSlack: vi.fn(),
sendMessageWhatsApp: vi.fn(),
sendMessageTelegram: vi.fn(),
sendMessageDiscord: vi.fn(),
sendMessageSignal: vi.fn(),
sendMessageIMessage: vi.fn(),
};
}
function mockEmbeddedOk() {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
}
function lastEmbeddedLane(): string | undefined {
const calls = vi.mocked(runEmbeddedPiAgent).mock.calls;
expect(calls.length).toBeGreaterThan(0);
@ -45,11 +25,11 @@ async function runLaneCase(home: string, lane?: string) {
lastTo: "",
},
});
mockEmbeddedOk();
mockAgentPayloads([{ text: "ok" }]);
await runCronIsolatedAgentTurn({
cfg: makeCfg(home, storePath),
deps: makeDeps(),
deps: createCliDeps(),
job: makeJob({ kind: "agentTurn", message: "do it", deliver: false }),
message: "do it",
sessionKey: "cron:job-1",

View File

@ -2,6 +2,7 @@ import "./isolated-agent.mocks.js";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js";
import { runCronIsolatedAgentTurn } from "./isolated-agent.js";
import {
makeCfg,
@ -13,27 +14,6 @@ import type { CronJob } from "./types.js";
const withTempHome = withTempCronHome;
function makeDeps() {
return {
sendMessageSlack: vi.fn(),
sendMessageWhatsApp: vi.fn(),
sendMessageTelegram: vi.fn(),
sendMessageDiscord: vi.fn(),
sendMessageSignal: vi.fn(),
sendMessageIMessage: vi.fn(),
};
}
function mockEmbeddedOk() {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
}
/**
* Extract the provider and model from the last runEmbeddedPiAgent call.
*/
@ -62,7 +42,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) {
},
...options.storeEntries,
});
mockEmbeddedOk();
mockAgentPayloads([{ text: "ok" }]);
const jobPayload = options.jobPayload ?? {
kind: "agentTurn" as const,
@ -72,7 +52,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) {
const res = await runCronIsolatedAgentTurn({
cfg: makeCfg(home, storePath, options.cfgOverrides),
deps: makeDeps(),
deps: createCliDeps(),
job: makeJob(jobPayload),
message: DEFAULT_MESSAGE,
sessionKey: options.sessionKey ?? "cron:job-1",
@ -310,7 +290,7 @@ describe("cron model formatting and precedence edge cases", () => {
// Step 2: No job model, session store says openai
vi.mocked(runEmbeddedPiAgent).mockClear();
mockEmbeddedOk();
mockAgentPayloads([{ text: "ok" }]);
const step2 = await runTurn(home, {
jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false },
storeEntries: {
@ -327,7 +307,7 @@ describe("cron model formatting and precedence edge cases", () => {
// Step 3: Job payload says anthropic, session store still says openai
vi.mocked(runEmbeddedPiAgent).mockClear();
mockEmbeddedOk();
mockAgentPayloads([{ text: "ok" }]);
const step3 = await runTurn(home, {
jobPayload: {
kind: "agentTurn",
@ -365,7 +345,7 @@ describe("cron model formatting and precedence edge cases", () => {
// Run 2: no override — must revert to default anthropic
vi.mocked(runEmbeddedPiAgent).mockClear();
mockEmbeddedOk();
mockAgentPayloads([{ text: "ok" }]);
const r2 = await runTurn(home, {
jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false },
});

View File

@ -133,6 +133,16 @@ async function runTelegramDeliveryResult(bestEffort: boolean) {
return outcome;
}
function expectSuccessfulTelegramTextDelivery(params: {
res: Awaited<ReturnType<typeof runCronIsolatedAgentTurn>>;
deps: CliDeps;
}): void {
expect(params.res.status).toBe("ok");
expect(params.res.delivered).toBe(true);
expect(params.res.deliveryAttempted).toBe(true);
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
}
async function runSignalDeliveryResult(bestEffort: boolean) {
let outcome:
| {
@ -379,31 +389,11 @@ describe("runCronIsolatedAgentTurn", () => {
});
it("delivers text directly when best-effort is disabled", async () => {
await withTempHome(async (home) => {
const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" });
const deps = createCliDeps();
mockAgentPayloads([{ text: "hello from cron" }]);
const res = await runTelegramAnnounceTurn({
home,
storePath,
deps,
delivery: {
mode: "announce",
channel: "telegram",
to: "123",
bestEffort: false,
},
});
expect(res.status).toBe("ok");
expect(res.delivered).toBe(true);
expect(res.deliveryAttempted).toBe(true);
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
expectDirectTelegramDelivery(deps, {
chatId: "123",
text: "hello from cron",
});
const { res, deps } = await runTelegramDeliveryResult(false);
expectSuccessfulTelegramTextDelivery({ res, deps });
expectDirectTelegramDelivery(deps, {
chatId: "123",
text: "hello from cron",
});
});
@ -459,10 +449,7 @@ describe("runCronIsolatedAgentTurn", () => {
},
});
expect(res.status).toBe("ok");
expect(res.delivered).toBe(true);
expect(res.deliveryAttempted).toBe(true);
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
expectSuccessfulTelegramTextDelivery({ res, deps });
expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(2);
expect(deps.sendMessageTelegram).toHaveBeenLastCalledWith(
"123",
@ -490,10 +477,7 @@ describe("runCronIsolatedAgentTurn", () => {
it("delivers text directly when best-effort is enabled", async () => {
const { res, deps } = await runTelegramDeliveryResult(true);
expect(res.status).toBe("ok");
expect(res.delivered).toBe(true);
expect(res.deliveryAttempted).toBe(true);
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
expectSuccessfulTelegramTextDelivery({ res, deps });
expectDirectTelegramDelivery(deps, {
chatId: "123",
text: "hello from cron",

View File

@ -31,6 +31,25 @@ const launchdRestartHandoffState = vi.hoisted(() => ({
}));
const defaultProgramArguments = ["node", "-e", "process.exit(0)"];
function expectLaunchctlEnableBootstrapOrder(env: Record<string, string | undefined>) {
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
const label = "ai.openclaw.gateway";
const plistPath = resolveLaunchAgentPlistPath(env);
const serviceId = `${domain}/${label}`;
const enableIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "enable" && c[1] === serviceId,
);
const bootstrapIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
);
expect(enableIndex).toBeGreaterThanOrEqual(0);
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
expect(enableIndex).toBeLessThan(bootstrapIndex);
return { domain, label, serviceId, bootstrapIndex };
}
function normalizeLaunchctlArgs(file: string, args: string[]): string[] {
if (file === "launchctl") {
return args;
@ -219,25 +238,12 @@ describe("launchd bootstrap repair", () => {
const repair = await repairLaunchAgentBootstrap({ env });
expect(repair.ok).toBe(true);
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
const label = "ai.openclaw.gateway";
const plistPath = resolveLaunchAgentPlistPath(env);
const serviceId = `${domain}/${label}`;
const enableIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "enable" && c[1] === serviceId,
);
const bootstrapIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
);
const { serviceId, bootstrapIndex } = expectLaunchctlEnableBootstrapOrder(env);
const kickstartIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId,
);
expect(enableIndex).toBeGreaterThanOrEqual(0);
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
expect(kickstartIndex).toBeGreaterThanOrEqual(0);
expect(enableIndex).toBeLessThan(bootstrapIndex);
expect(bootstrapIndex).toBeLessThan(kickstartIndex);
});
});
@ -258,23 +264,10 @@ describe("launchd install", () => {
programArguments: defaultProgramArguments,
});
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
const label = "ai.openclaw.gateway";
const plistPath = resolveLaunchAgentPlistPath(env);
const serviceId = `${domain}/${label}`;
const enableIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "enable" && c[1] === serviceId,
);
const bootstrapIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
);
const { serviceId } = expectLaunchctlEnableBootstrapOrder(env);
const installKickstartIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "kickstart" && c[2] === serviceId,
);
expect(enableIndex).toBeGreaterThanOrEqual(0);
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
expect(enableIndex).toBeLessThan(bootstrapIndex);
expect(installKickstartIndex).toBe(-1);
});
@ -360,24 +353,13 @@ describe("launchd install", () => {
stdout: new PassThrough(),
});
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
const label = "ai.openclaw.gateway";
const plistPath = resolveLaunchAgentPlistPath(env);
const serviceId = `${domain}/${label}`;
const { serviceId } = expectLaunchctlEnableBootstrapOrder(env);
const kickstartCalls = state.launchctlCalls.filter(
(c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId,
);
const enableIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "enable" && c[1] === serviceId,
);
const bootstrapIndex = state.launchctlCalls.findIndex(
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
);
expect(result).toEqual({ outcome: "completed" });
expect(kickstartCalls).toHaveLength(2);
expect(enableIndex).toBeGreaterThanOrEqual(0);
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false);
});

View File

@ -1,34 +1,19 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { PassThrough } from "node:stream";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { quoteCmdScriptArg } from "./cmd-argv.js";
const schtasksResponses = vi.hoisted(
() => [] as Array<{ code: number; stdout: string; stderr: string }>,
);
const schtasksCalls = vi.hoisted(() => [] as string[][]);
const inspectPortUsage = vi.hoisted(() => vi.fn());
const killProcessTree = vi.hoisted(() => vi.fn());
import "./test-helpers/schtasks-base-mocks.js";
import {
inspectPortUsage,
killProcessTree,
resetSchtasksBaseMocks,
schtasksResponses,
withWindowsEnv,
} from "./test-helpers/schtasks-fixtures.js";
const childUnref = vi.hoisted(() => vi.fn());
const spawn = vi.hoisted(() => vi.fn(() => ({ unref: childUnref })));
vi.mock("./schtasks-exec.js", () => ({
execSchtasks: async (argv: string[]) => {
schtasksCalls.push(argv);
return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" };
},
}));
vi.mock("../infra/ports.js", () => ({
inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args),
}));
vi.mock("../process/kill-tree.js", () => ({
killProcessTree: (...args: unknown[]) => killProcessTree(...args),
}));
vi.mock("node:child_process", async (importOriginal) => {
const actual = await importOriginal<typeof import("node:child_process")>();
return {
@ -43,6 +28,7 @@ const {
readScheduledTaskRuntime,
restartScheduledTask,
resolveTaskScriptPath,
stopScheduledTask,
} = await import("./schtasks.js");
function resolveStartupEntryPath(env: Record<string, string>) {
@ -57,6 +43,7 @@ function resolveStartupEntryPath(env: Record<string, string>) {
);
}
<<<<<<< HEAD
async function withWindowsEnv(
run: (params: { tmpDir: string; env: Record<string, string> }) => Promise<void>,
) {
@ -74,11 +61,43 @@ async function withWindowsEnv(
}
}
async function writeGatewayScript(env: Record<string, string>, port = 18789) {
const scriptPath = resolveTaskScriptPath(env);
await fs.mkdir(path.dirname(scriptPath), { recursive: true });
await fs.writeFile(
scriptPath,
[
"@echo off",
`set "OPENCLAW_GATEWAY_PORT=${port}"`,
`"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`,
"",
].join("\r\n"),
"utf8",
);
}
||||||| parent of 8fb2c3f894 (refactor: share windows daemon test fixtures)
async function withWindowsEnv(
run: (params: { tmpDir: string; env: Record<string, string> }) => Promise<void>,
) {
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-win-startup-"));
const env = {
USERPROFILE: tmpDir,
APPDATA: path.join(tmpDir, "AppData", "Roaming"),
OPENCLAW_PROFILE: "default",
OPENCLAW_GATEWAY_PORT: "18789",
};
try {
await run({ tmpDir, env });
} finally {
await fs.rm(tmpDir, { recursive: true, force: true });
}
}
=======
>>>>>>> 8fb2c3f894 (refactor: share windows daemon test fixtures)
beforeEach(() => {
schtasksResponses.length = 0;
schtasksCalls.length = 0;
inspectPortUsage.mockReset();
killProcessTree.mockReset();
resetSchtasksBaseMocks();
spawn.mockClear();
childUnref.mockClear();
});
@ -89,7 +108,7 @@ afterEach(() => {
describe("Windows startup fallback", () => {
it("falls back to a Startup-folder launcher when schtasks create is denied", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },
{ code: 5, stdout: "", stderr: "ERROR: Access is denied." },
@ -124,7 +143,7 @@ describe("Windows startup fallback", () => {
});
it("falls back to a Startup-folder launcher when schtasks create hangs", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },
{ code: 124, stdout: "", stderr: "schtasks timed out after 15000ms" },
@ -148,7 +167,7 @@ describe("Windows startup fallback", () => {
});
it("treats an installed Startup-folder launcher as loaded", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },
{ code: 1, stdout: "", stderr: "not found" },
@ -161,7 +180,7 @@ describe("Windows startup fallback", () => {
});
it("reports runtime from the gateway listener when using the Startup fallback", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },
{ code: 1, stdout: "", stderr: "not found" },
@ -183,7 +202,7 @@ describe("Windows startup fallback", () => {
});
it("restarts the Startup fallback by killing the current pid and relaunching the entry", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },
{ code: 1, stdout: "", stderr: "not found" },
@ -211,4 +230,39 @@ describe("Windows startup fallback", () => {
);
});
});
it("kills the Startup fallback runtime even when the CLI env omits the gateway port", async () => {
await withWindowsEnv(async ({ env }) => {
schtasksResponses.push({ code: 0, stdout: "", stderr: "" });
await writeGatewayScript(env);
await fs.mkdir(path.dirname(resolveStartupEntryPath(env)), { recursive: true });
await fs.writeFile(resolveStartupEntryPath(env), "@echo off\r\n", "utf8");
inspectPortUsage
.mockResolvedValueOnce({
port: 18789,
status: "busy",
listeners: [{ pid: 5151, command: "node.exe" }],
hints: [],
})
.mockResolvedValueOnce({
port: 18789,
status: "busy",
listeners: [{ pid: 5151, command: "node.exe" }],
hints: [],
})
.mockResolvedValueOnce({
port: 18789,
status: "free",
listeners: [],
hints: [],
});
const stdout = new PassThrough();
const envWithoutPort = { ...env };
delete envWithoutPort.OPENCLAW_GATEWAY_PORT;
await stopScheduledTask({ env: envWithoutPort, stdout });
expect(killProcessTree).toHaveBeenCalledWith(5151, { graceMs: 300 });
});
});
});

View File

@ -1,34 +1,20 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { PassThrough } from "node:stream";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
const schtasksResponses = vi.hoisted(
() => [] as Array<{ code: number; stdout: string; stderr: string }>,
);
const schtasksCalls = vi.hoisted(() => [] as string[][]);
const inspectPortUsage = vi.hoisted(() => vi.fn());
const killProcessTree = vi.hoisted(() => vi.fn());
import "./test-helpers/schtasks-base-mocks.js";
import {
inspectPortUsage,
killProcessTree,
resetSchtasksBaseMocks,
schtasksCalls,
schtasksResponses,
withWindowsEnv,
} from "./test-helpers/schtasks-fixtures.js";
const findVerifiedGatewayListenerPidsOnPortSync = vi.hoisted(() =>
vi.fn<(port: number) => number[]>(() => []),
);
vi.mock("./schtasks-exec.js", () => ({
execSchtasks: async (argv: string[]) => {
schtasksCalls.push(argv);
return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" };
},
}));
vi.mock("../infra/ports.js", () => ({
inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args),
}));
vi.mock("../process/kill-tree.js", () => ({
killProcessTree: (...args: unknown[]) => killProcessTree(...args),
}));
vi.mock("../infra/gateway-processes.js", () => ({
findVerifiedGatewayListenerPidsOnPortSync: (port: number) =>
findVerifiedGatewayListenerPidsOnPortSync(port),
@ -37,23 +23,6 @@ vi.mock("../infra/gateway-processes.js", () => ({
const { restartScheduledTask, resolveTaskScriptPath, stopScheduledTask } =
await import("./schtasks.js");
async function withWindowsEnv(
run: (params: { tmpDir: string; env: Record<string, string> }) => Promise<void>,
) {
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-win-stop-"));
const env = {
USERPROFILE: tmpDir,
APPDATA: path.join(tmpDir, "AppData", "Roaming"),
OPENCLAW_PROFILE: "default",
OPENCLAW_GATEWAY_PORT: "18789",
};
try {
await run({ tmpDir, env });
} finally {
await fs.rm(tmpDir, { recursive: true, force: true });
}
}
async function writeGatewayScript(env: Record<string, string>, port = 18789) {
const scriptPath = resolveTaskScriptPath(env);
await fs.mkdir(path.dirname(scriptPath), { recursive: true });
@ -70,10 +39,7 @@ async function writeGatewayScript(env: Record<string, string>, port = 18789) {
}
beforeEach(() => {
schtasksResponses.length = 0;
schtasksCalls.length = 0;
inspectPortUsage.mockReset();
killProcessTree.mockReset();
resetSchtasksBaseMocks();
findVerifiedGatewayListenerPidsOnPortSync.mockReset();
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]);
inspectPortUsage.mockResolvedValue({
@ -90,7 +56,7 @@ afterEach(() => {
describe("Scheduled Task stop/restart cleanup", () => {
it("kills lingering verified gateway listeners after schtasks stop", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-stop-", async ({ env }) => {
await writeGatewayScript(env);
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },
@ -168,7 +134,7 @@ describe("Scheduled Task stop/restart cleanup", () => {
});
it("falls back to inspected gateway listeners when sync verification misses on Windows", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-stop-", async ({ env }) => {
await writeGatewayScript(env);
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },
@ -206,7 +172,7 @@ describe("Scheduled Task stop/restart cleanup", () => {
});
it("kills lingering verified gateway listeners and waits for port release before restart", async () => {
await withWindowsEnv(async ({ env }) => {
await withWindowsEnv("openclaw-win-stop-", async ({ env }) => {
await writeGatewayScript(env);
schtasksResponses.push(
{ code: 0, stdout: "", stderr: "" },

View File

@ -161,6 +161,12 @@ export type ScheduledTaskInfo = {
lastRunResult?: string;
};
function hasListenerPid<T extends { pid?: number | null }>(
listener: T,
): listener is T & { pid: number } {
return typeof listener.pid === "number";
}
export function parseSchtasksQuery(output: string): ScheduledTaskInfo {
const entries = parseKeyValueOutput(output, ":");
const info: ScheduledTaskInfo = {};
@ -388,7 +394,7 @@ async function resolveScheduledTaskGatewayListenerPids(port: number): Promise<nu
new Set(
diagnostics.listeners
.map((listener) => listener.pid)
.filter((pid): pid is number => Number.isFinite(pid) && pid > 0),
.filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0),
),
);
}
@ -472,7 +478,7 @@ async function terminateBusyPortListeners(port: number): Promise<number[]> {
new Set(
diagnostics.listeners
.map((listener) => listener.pid)
.filter((pid): pid is number => Number.isFinite(pid) && pid > 0),
.filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0),
),
);
for (const pid of pids) {
@ -482,7 +488,7 @@ async function terminateBusyPortListeners(port: number): Promise<number[]> {
}
async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise<GatewayServiceRuntime> {
const port = resolveConfiguredGatewayPort(env);
const port = (await resolveScheduledTaskPort(env)) ?? resolveConfiguredGatewayPort(env);
if (!port) {
return {
status: "unknown",
@ -496,7 +502,7 @@ async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise<GatewaySe
detail: `Startup-folder login item installed; could not inspect port ${port}.`,
};
}
const listener = diagnostics.listeners.find((item) => typeof item.pid === "number");
const listener = diagnostics.listeners.find(hasListenerPid);
return {
status: diagnostics.status === "busy" ? "running" : "stopped",
...(listener?.pid ? { pid: listener.pid } : {}),

View File

@ -0,0 +1,22 @@
import { vi } from "vitest";
import {
inspectPortUsage,
killProcessTree,
schtasksCalls,
schtasksResponses,
} from "./schtasks-fixtures.js";
vi.mock("../schtasks-exec.js", () => ({
execSchtasks: async (argv: string[]) => {
schtasksCalls.push(argv);
return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" };
},
}));
vi.mock("../../infra/ports.js", () => ({
inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args),
}));
vi.mock("../../process/kill-tree.js", () => ({
killProcessTree: (...args: unknown[]) => killProcessTree(...args),
}));

View File

@ -0,0 +1,34 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { vi } from "vitest";
export const schtasksResponses: Array<{ code: number; stdout: string; stderr: string }> = [];
export const schtasksCalls: string[][] = [];
export const inspectPortUsage = vi.fn();
export const killProcessTree = vi.fn();
export async function withWindowsEnv(
prefix: string,
run: (params: { tmpDir: string; env: Record<string, string> }) => Promise<void>,
) {
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
const env = {
USERPROFILE: tmpDir,
APPDATA: path.join(tmpDir, "AppData", "Roaming"),
OPENCLAW_PROFILE: "default",
OPENCLAW_GATEWAY_PORT: "18789",
};
try {
await run({ tmpDir, env });
} finally {
await fs.rm(tmpDir, { recursive: true, force: true });
}
}
export function resetSchtasksBaseMocks() {
schtasksResponses.length = 0;
schtasksCalls.length = 0;
inspectPortUsage.mockReset();
killProcessTree.mockReset();
}

View File

@ -123,6 +123,30 @@ describe("createDiscordGatewayPlugin", () => {
};
}
async function registerGatewayClient(plugin: unknown) {
await (
plugin as {
registerClient: (client: { options: { token: string } }) => Promise<void>;
}
).registerClient({
options: { token: "token-123" },
});
}
async function expectGatewayRegisterFetchFailure(response: Response) {
const runtime = createRuntime();
globalFetchMock.mockResolvedValue(response);
const plugin = createDiscordGatewayPlugin({
discordConfig: {},
runtime,
});
await expect(registerGatewayClient(plugin)).rejects.toThrow(
"Failed to get gateway information from Discord: fetch failed",
);
expect(baseRegisterClientSpy).not.toHaveBeenCalled();
}
beforeEach(() => {
vi.stubGlobal("fetch", globalFetchMock);
baseRegisterClientSpy.mockClear();
@ -165,28 +189,12 @@ describe("createDiscordGatewayPlugin", () => {
});
it("maps plain-text Discord 503 responses to fetch failed", async () => {
const runtime = createRuntime();
globalFetchMock.mockResolvedValue({
await expectGatewayRegisterFetchFailure({
ok: false,
status: 503,
text: async () =>
"upstream connect error or disconnect/reset before headers. reset reason: overflow",
} as Response);
const plugin = createDiscordGatewayPlugin({
discordConfig: {},
runtime,
});
await expect(
(
plugin as unknown as {
registerClient: (client: { options: { token: string } }) => Promise<void>;
}
).registerClient({
options: { token: "token-123" },
}),
).rejects.toThrow("Failed to get gateway information from Discord: fetch failed");
expect(baseRegisterClientSpy).not.toHaveBeenCalled();
});
it("uses proxy agent for gateway WebSocket when configured", async () => {
@ -257,28 +265,12 @@ describe("createDiscordGatewayPlugin", () => {
});
it("maps body read failures to fetch failed", async () => {
const runtime = createRuntime();
globalFetchMock.mockResolvedValue({
await expectGatewayRegisterFetchFailure({
ok: true,
status: 200,
text: async () => {
throw new Error("body stream closed");
},
} as unknown as Response);
const plugin = createDiscordGatewayPlugin({
discordConfig: {},
runtime,
});
await expect(
(
plugin as unknown as {
registerClient: (client: { options: { token: string } }) => Promise<void>;
}
).registerClient({
options: { token: "token-123" },
}),
).rejects.toThrow("Failed to get gateway information from Discord: fetch failed");
expect(baseRegisterClientSpy).not.toHaveBeenCalled();
});
});

View File

@ -344,6 +344,20 @@ describe("GatewayClient connect auth payload", () => {
return parsed.params?.auth ?? {};
}
function connectRequestFrom(ws: MockWebSocket) {
const raw = ws.sent.find((frame) => frame.includes('"method":"connect"'));
expect(raw).toBeTruthy();
return JSON.parse(raw ?? "{}") as {
id?: string;
params?: {
auth?: {
token?: string;
deviceToken?: string;
};
};
};
}
function emitConnectChallenge(ws: MockWebSocket, nonce = "nonce-1") {
ws.emitMessage(
JSON.stringify({
@ -354,6 +368,63 @@ describe("GatewayClient connect auth payload", () => {
);
}
function startClientAndConnect(params: { client: GatewayClient; nonce?: string }) {
params.client.start();
const ws = getLatestWs();
ws.emitOpen();
emitConnectChallenge(ws, params.nonce);
return { ws, connect: connectRequestFrom(ws) };
}
function emitConnectFailure(
ws: MockWebSocket,
connectId: string | undefined,
details: Record<string, unknown>,
) {
ws.emitMessage(
JSON.stringify({
type: "res",
id: connectId,
ok: false,
error: {
code: "INVALID_REQUEST",
message: "unauthorized",
details,
},
}),
);
}
async function expectRetriedConnectAuth(params: {
firstWs: MockWebSocket;
connectId: string | undefined;
failureDetails: Record<string, unknown>;
}) {
emitConnectFailure(params.firstWs, params.connectId, params.failureDetails);
await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 });
const ws = getLatestWs();
ws.emitOpen();
emitConnectChallenge(ws, "nonce-2");
return connectFrameFrom(ws);
}
async function expectNoReconnectAfterConnectFailure(params: {
client: GatewayClient;
firstWs: MockWebSocket;
connectId: string | undefined;
failureDetails: Record<string, unknown>;
}) {
vi.useFakeTimers();
try {
emitConnectFailure(params.firstWs, params.connectId, params.failureDetails);
await vi.advanceTimersByTimeAsync(30_000);
expect(wsInstances).toHaveLength(1);
} finally {
params.client.stop();
vi.useRealTimers();
}
}
it("uses explicit shared token and does not inject stored device token", () => {
loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" });
const client = new GatewayClient({
@ -457,37 +528,16 @@ describe("GatewayClient connect auth payload", () => {
token: "shared-token",
});
client.start();
const ws1 = getLatestWs();
ws1.emitOpen();
emitConnectChallenge(ws1);
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
expect(firstConnectRaw).toBeTruthy();
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as {
id?: string;
params?: { auth?: { token?: string; deviceToken?: string } };
};
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
expect(firstConnect.params?.auth?.token).toBe("shared-token");
expect(firstConnect.params?.auth?.deviceToken).toBeUndefined();
ws1.emitMessage(
JSON.stringify({
type: "res",
id: firstConnect.id,
ok: false,
error: {
code: "INVALID_REQUEST",
message: "unauthorized",
details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
},
}),
);
await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 });
const ws2 = getLatestWs();
ws2.emitOpen();
emitConnectChallenge(ws2, "nonce-2");
expect(connectFrameFrom(ws2)).toMatchObject({
const retriedAuth = await expectRetriedConnectAuth({
firstWs: ws1,
connectId: firstConnect.id,
failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
});
expect(retriedAuth).toMatchObject({
token: "shared-token",
deviceToken: "stored-device-token",
});
@ -501,32 +551,13 @@ describe("GatewayClient connect auth payload", () => {
token: "shared-token",
});
client.start();
const ws1 = getLatestWs();
ws1.emitOpen();
emitConnectChallenge(ws1);
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
expect(firstConnectRaw).toBeTruthy();
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string };
ws1.emitMessage(
JSON.stringify({
type: "res",
id: firstConnect.id,
ok: false,
error: {
code: "INVALID_REQUEST",
message: "unauthorized",
details: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" },
},
}),
);
await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 });
const ws2 = getLatestWs();
ws2.emitOpen();
emitConnectChallenge(ws2, "nonce-2");
expect(connectFrameFrom(ws2)).toMatchObject({
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
const retriedAuth = await expectRetriedConnectAuth({
firstWs: ws1,
connectId: firstConnect.id,
failureDetails: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" },
});
expect(retriedAuth).toMatchObject({
token: "shared-token",
deviceToken: "stored-device-token",
});
@ -534,71 +565,33 @@ describe("GatewayClient connect auth payload", () => {
});
it("does not auto-reconnect on AUTH_TOKEN_MISSING connect failures", async () => {
vi.useFakeTimers();
const client = new GatewayClient({
url: "ws://127.0.0.1:18789",
token: "shared-token",
});
client.start();
const ws1 = getLatestWs();
ws1.emitOpen();
emitConnectChallenge(ws1);
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
expect(firstConnectRaw).toBeTruthy();
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string };
ws1.emitMessage(
JSON.stringify({
type: "res",
id: firstConnect.id,
ok: false,
error: {
code: "INVALID_REQUEST",
message: "unauthorized",
details: { code: "AUTH_TOKEN_MISSING" },
},
}),
);
await vi.advanceTimersByTimeAsync(30_000);
expect(wsInstances).toHaveLength(1);
client.stop();
vi.useRealTimers();
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
await expectNoReconnectAfterConnectFailure({
client,
firstWs: ws1,
connectId: firstConnect.id,
failureDetails: { code: "AUTH_TOKEN_MISSING" },
});
});
it("does not auto-reconnect on token mismatch when retry is not trusted", async () => {
vi.useFakeTimers();
loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" });
const client = new GatewayClient({
url: "wss://gateway.example.com:18789",
token: "shared-token",
});
client.start();
const ws1 = getLatestWs();
ws1.emitOpen();
emitConnectChallenge(ws1);
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
expect(firstConnectRaw).toBeTruthy();
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string };
ws1.emitMessage(
JSON.stringify({
type: "res",
id: firstConnect.id,
ok: false,
error: {
code: "INVALID_REQUEST",
message: "unauthorized",
details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
},
}),
);
await vi.advanceTimersByTimeAsync(30_000);
expect(wsInstances).toHaveLength(1);
client.stop();
vi.useRealTimers();
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
await expectNoReconnectAfterConnectFailure({
client,
firstWs: ws1,
connectId: firstConnect.id,
failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
});
});
});

View File

@ -1,4 +1,5 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import type { DedupeEntry } from "../server-shared.js";
import {
__testing,
readTerminalSnapshotFromGatewayDedupe,
@ -8,7 +9,7 @@ import {
describe("agent wait dedupe helper", () => {
function setRunEntry(params: {
dedupe: Map<unknown, unknown>;
dedupe: Map<string, DedupeEntry>;
kind: "agent" | "chat";
runId: string;
ts?: number;

View File

@ -6,6 +6,30 @@ import {
} from "./chat.abort.test-helpers.js";
import { chatHandlers } from "./chat.js";
async function invokeSingleRunAbort({
context,
runId = "run-1",
connId,
deviceId,
scopes,
}: {
context: ReturnType<typeof createChatAbortContext>;
runId?: string;
connId: string;
deviceId: string;
scopes: string[];
}) {
return await invokeChatAbortHandler({
handler: chatHandlers["chat.abort"],
context,
request: { sessionKey: "main", runId },
client: {
connId,
connect: { device: { id: deviceId }, scopes },
},
});
}
describe("chat.abort authorization", () => {
it("rejects explicit run aborts from other clients", async () => {
const context = createChatAbortContext({
@ -17,14 +41,11 @@ describe("chat.abort authorization", () => {
]),
});
const respond = await invokeChatAbortHandler({
handler: chatHandlers["chat.abort"],
const respond = await invokeSingleRunAbort({
context,
request: { sessionKey: "main", runId: "run-1" },
client: {
connId: "conn-other",
connect: { device: { id: "dev-other" }, scopes: ["operator.write"] },
},
connId: "conn-other",
deviceId: "dev-other",
scopes: ["operator.write"],
});
const [ok, payload, error] = respond.mock.calls.at(-1) ?? [];
@ -92,14 +113,11 @@ describe("chat.abort authorization", () => {
]),
});
const respond = await invokeChatAbortHandler({
handler: chatHandlers["chat.abort"],
const respond = await invokeSingleRunAbort({
context,
request: { sessionKey: "main", runId: "run-1" },
client: {
connId: "conn-admin",
connect: { device: { id: "dev-admin" }, scopes: ["operator.admin"] },
},
connId: "conn-admin",
deviceId: "dev-admin",
scopes: ["operator.admin"],
});
const [ok, payload] = respond.mock.calls.at(-1) ?? [];

View File

@ -251,7 +251,7 @@ describe("resolveGatewayRuntimeConfig", () => {
});
describe("HTTP security headers", () => {
it.each([
const cases = [
{
name: "resolves strict transport security headers from config",
strictTransportSecurity: " max-age=31536000; includeSubDomains ",
@ -267,7 +267,13 @@ describe("resolveGatewayRuntimeConfig", () => {
strictTransportSecurity: " ",
expected: undefined,
},
])("$name", async ({ strictTransportSecurity, expected }) => {
] satisfies ReadonlyArray<{
name: string;
strictTransportSecurity: string | false;
expected: string | undefined;
}>;
it.each(cases)("$name", async ({ strictTransportSecurity, expected }) => {
const result = await resolveGatewayRuntimeConfig({
cfg: {
gateway: {

View File

@ -34,6 +34,27 @@ function expectAuthErrorDetails(params: {
}
}
async function expectSharedOperatorScopesCleared(
port: number,
auth: { token?: string; password?: string },
) {
const ws = await openWs(port);
try {
const res = await connectReq(ws, {
...auth,
scopes: ["operator.admin"],
device: null,
});
expect(res.ok).toBe(true);
const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false });
expect(adminRes.ok).toBe(false);
expect(adminRes.error?.message).toBe("missing scope: operator.admin");
} finally {
ws.close();
}
}
describe("gateway auth compatibility baseline", () => {
describe("token mode", () => {
let server: Awaited<ReturnType<typeof startGatewayServer>>;
@ -64,21 +85,7 @@ describe("gateway auth compatibility baseline", () => {
});
test("clears client-declared scopes for shared-token operator connects", async () => {
const ws = await openWs(port);
try {
const res = await connectReq(ws, {
token: "secret",
scopes: ["operator.admin"],
device: null,
});
expect(res.ok).toBe(true);
const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false });
expect(adminRes.ok).toBe(false);
expect(adminRes.error?.message).toBe("missing scope: operator.admin");
} finally {
ws.close();
}
await expectSharedOperatorScopesCleared(port, { token: "secret" });
});
test("returns stable token-missing details for control ui without token", async () => {
@ -184,21 +191,7 @@ describe("gateway auth compatibility baseline", () => {
});
test("clears client-declared scopes for shared-password operator connects", async () => {
const ws = await openWs(port);
try {
const res = await connectReq(ws, {
password: "secret",
scopes: ["operator.admin"],
device: null,
});
expect(res.ok).toBe(true);
const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false });
expect(adminRes.ok).toBe(false);
expect(adminRes.error?.message).toBe("missing scope: operator.admin");
} finally {
ws.close();
}
await expectSharedOperatorScopesCleared(port, { password: "secret" });
});
});

View File

@ -40,6 +40,7 @@ type TalkConfigPayload = {
ui?: { seamColor?: string };
};
};
type TalkConfig = NonNullable<NonNullable<TalkConfigPayload["config"]>["talk"]>;
const TALK_CONFIG_DEVICE_PATH = path.join(
os.tmpdir(),
`openclaw-talk-config-device-${process.pid}.json`,
@ -95,7 +96,7 @@ async function fetchTalkConfig(
}
function expectElevenLabsTalkConfig(
talk: TalkConfigPayload["config"] extends { talk?: infer T } ? T : never,
talk: TalkConfig | undefined,
expected: {
voiceId?: string;
apiKey?: string | SecretRef;

View File

@ -91,13 +91,13 @@ describe("agent-events sequencing", () => {
isControlUiVisible: true,
});
registerAgentRunContext("run-ctx", {
verboseLevel: "high",
verboseLevel: "full",
isHeartbeat: true,
});
expect(getAgentRunContext("run-ctx")).toEqual({
sessionKey: "session-main",
verboseLevel: "high",
verboseLevel: "full",
isHeartbeat: true,
isControlUiVisible: true,
});

36
src/infra/backoff.test.ts Normal file
View File

@ -0,0 +1,36 @@
import { describe, expect, it, vi } from "vitest";
import { computeBackoff, sleepWithAbort, type BackoffPolicy } from "./backoff.js";
describe("backoff helpers", () => {
const policy: BackoffPolicy = {
initialMs: 100,
maxMs: 250,
factor: 2,
jitter: 0.5,
};
it("treats attempts below one as the first backoff step", () => {
const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0);
try {
expect(computeBackoff(policy, 0)).toBe(100);
expect(computeBackoff(policy, 1)).toBe(100);
} finally {
randomSpy.mockRestore();
}
});
it("adds jitter and clamps to maxMs", () => {
const randomSpy = vi.spyOn(Math, "random").mockReturnValue(1);
try {
expect(computeBackoff(policy, 2)).toBe(250);
expect(computeBackoff({ ...policy, maxMs: 450 }, 2)).toBe(300);
} finally {
randomSpy.mockRestore();
}
});
it("returns immediately for non-positive sleep durations", async () => {
await expect(sleepWithAbort(0, AbortSignal.abort())).resolves.toBeUndefined();
await expect(sleepWithAbort(-5)).resolves.toBeUndefined();
});
});

View File

@ -0,0 +1,84 @@
import { describe, expect, it } from "vitest";
import { formatBackupCreateSummary, type BackupCreateResult } from "./backup-create.js";
function makeResult(overrides: Partial<BackupCreateResult> = {}): BackupCreateResult {
return {
createdAt: "2026-01-01T00:00:00.000Z",
archiveRoot: "openclaw-backup-2026-01-01",
archivePath: "/tmp/openclaw-backup.tar.gz",
dryRun: false,
includeWorkspace: true,
onlyConfig: false,
verified: false,
assets: [],
skipped: [],
...overrides,
};
}
describe("formatBackupCreateSummary", () => {
it("formats created archives with included and skipped paths", () => {
const lines = formatBackupCreateSummary(
makeResult({
verified: true,
assets: [
{
kind: "state",
sourcePath: "/state",
archivePath: "archive/state",
displayPath: "~/.openclaw",
},
],
skipped: [
{
kind: "workspace",
sourcePath: "/workspace",
displayPath: "~/Projects/openclaw",
reason: "covered",
coveredBy: "~/.openclaw",
},
],
}),
);
expect(lines).toEqual([
"Backup archive: /tmp/openclaw-backup.tar.gz",
"Included 1 path:",
"- state: ~/.openclaw",
"Skipped 1 path:",
"- workspace: ~/Projects/openclaw (covered by ~/.openclaw)",
"Created /tmp/openclaw-backup.tar.gz",
"Archive verification: passed",
]);
});
it("formats dry runs and pluralized counts", () => {
const lines = formatBackupCreateSummary(
makeResult({
dryRun: true,
assets: [
{
kind: "config",
sourcePath: "/config",
archivePath: "archive/config",
displayPath: "~/.openclaw/config.json",
},
{
kind: "oauth",
sourcePath: "/oauth",
archivePath: "archive/oauth",
displayPath: "~/.openclaw/oauth",
},
],
}),
);
expect(lines).toEqual([
"Backup archive: /tmp/openclaw-backup.tar.gz",
"Included 2 paths:",
"- config: ~/.openclaw/config.json",
"- oauth: ~/.openclaw/oauth",
"Dry run only; archive was not written.",
]);
});
});

View File

@ -0,0 +1,38 @@
import { describe, expect, it, vi } from "vitest";
import type { runExec } from "../process/exec.js";
import type { RuntimeEnv } from "../runtime.js";
import { ensureBinary } from "./binaries.js";
describe("ensureBinary", () => {
it("passes through when the binary exists", async () => {
const exec: typeof runExec = vi.fn().mockResolvedValue({
stdout: "",
stderr: "",
});
const runtime: RuntimeEnv = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
await ensureBinary("node", exec, runtime);
expect(exec).toHaveBeenCalledWith("which", ["node"]);
expect(runtime.error).not.toHaveBeenCalled();
expect(runtime.exit).not.toHaveBeenCalled();
});
it("logs and exits when the binary is missing", async () => {
const exec: typeof runExec = vi.fn().mockRejectedValue(new Error("missing"));
const error = vi.fn();
const exit = vi.fn(() => {
throw new Error("exit");
});
await expect(ensureBinary("ghost", exec, { log: vi.fn(), error, exit })).rejects.toThrow(
"exit",
);
expect(error).toHaveBeenCalledWith("Missing required binary: ghost. Please install it.");
expect(exit).toHaveBeenCalledWith(1);
});
});

View File

@ -0,0 +1,27 @@
import { describe, expect, it, vi } from "vitest";
const logDebugMock = vi.hoisted(() => vi.fn());
vi.mock("../logger.js", () => ({
logDebug: (...args: unknown[]) => logDebugMock(...args),
}));
const { ignoreCiaoCancellationRejection } = await import("./bonjour-ciao.js");
describe("bonjour-ciao", () => {
it("ignores and logs ciao cancellation rejections", () => {
expect(
ignoreCiaoCancellationRejection(new Error("Ciao announcement cancelled by shutdown")),
).toBe(true);
expect(logDebugMock).toHaveBeenCalledWith(
expect.stringContaining("ignoring unhandled ciao rejection"),
);
});
it("keeps unrelated rejections visible", () => {
logDebugMock.mockReset();
expect(ignoreCiaoCancellationRejection(new Error("boom"))).toBe(false);
expect(logDebugMock).not.toHaveBeenCalled();
});
});

View File

@ -0,0 +1,16 @@
import { describe, expect, it } from "vitest";
import { formatBonjourError } from "./bonjour-errors.js";
describe("formatBonjourError", () => {
it("formats named errors with their type prefix", () => {
const err = new Error("timed out");
err.name = "AbortError";
expect(formatBonjourError(err)).toBe("AbortError: timed out");
});
it("falls back to plain error strings and non-error values", () => {
expect(formatBonjourError(new Error(""))).toBe("Error");
expect(formatBonjourError("boom")).toBe("boom");
expect(formatBonjourError(42)).toBe("42");
});
});

View File

@ -0,0 +1,204 @@
import path from "node:path";
import { beforeEach, describe, expect, it, vi } from "vitest";
const resolveBoundaryPathSyncMock = vi.hoisted(() => vi.fn());
const resolveBoundaryPathMock = vi.hoisted(() => vi.fn());
const openVerifiedFileSyncMock = vi.hoisted(() => vi.fn());
vi.mock("./boundary-path.js", () => ({
resolveBoundaryPathSync: (...args: unknown[]) => resolveBoundaryPathSyncMock(...args),
resolveBoundaryPath: (...args: unknown[]) => resolveBoundaryPathMock(...args),
}));
vi.mock("./safe-open-sync.js", () => ({
openVerifiedFileSync: (...args: unknown[]) => openVerifiedFileSyncMock(...args),
}));
const { canUseBoundaryFileOpen, openBoundaryFile, openBoundaryFileSync } =
await import("./boundary-file-read.js");
describe("boundary-file-read", () => {
beforeEach(() => {
resolveBoundaryPathSyncMock.mockReset();
resolveBoundaryPathMock.mockReset();
openVerifiedFileSyncMock.mockReset();
});
it("recognizes the required sync fs surface", () => {
const validFs = {
openSync() {},
closeSync() {},
fstatSync() {},
lstatSync() {},
realpathSync() {},
readFileSync() {},
constants: {},
} as never;
expect(canUseBoundaryFileOpen(validFs)).toBe(true);
expect(
canUseBoundaryFileOpen({
...validFs,
openSync: undefined,
} as never),
).toBe(false);
expect(
canUseBoundaryFileOpen({
...validFs,
constants: null,
} as never),
).toBe(false);
});
it("maps sync boundary resolution into verified file opens", () => {
const stat = { size: 3 } as never;
const ioFs = { marker: "io" } as never;
const absolutePath = path.resolve("plugin.json");
resolveBoundaryPathSyncMock.mockReturnValue({
canonicalPath: "/real/plugin.json",
rootCanonicalPath: "/real/root",
});
openVerifiedFileSyncMock.mockReturnValue({
ok: true,
path: "/real/plugin.json",
fd: 7,
stat,
});
const opened = openBoundaryFileSync({
absolutePath: "plugin.json",
rootPath: "/workspace",
boundaryLabel: "plugin root",
ioFs,
});
expect(resolveBoundaryPathSyncMock).toHaveBeenCalledWith({
absolutePath,
rootPath: "/workspace",
rootCanonicalPath: undefined,
boundaryLabel: "plugin root",
skipLexicalRootCheck: undefined,
});
expect(openVerifiedFileSyncMock).toHaveBeenCalledWith({
filePath: absolutePath,
resolvedPath: "/real/plugin.json",
rejectHardlinks: true,
maxBytes: undefined,
allowedType: undefined,
ioFs,
});
expect(opened).toEqual({
ok: true,
path: "/real/plugin.json",
fd: 7,
stat,
rootRealPath: "/real/root",
});
});
it("returns validation errors when sync boundary resolution throws", () => {
const error = new Error("outside root");
resolveBoundaryPathSyncMock.mockImplementation(() => {
throw error;
});
const opened = openBoundaryFileSync({
absolutePath: "plugin.json",
rootPath: "/workspace",
boundaryLabel: "plugin root",
});
expect(opened).toEqual({
ok: false,
reason: "validation",
error,
});
expect(openVerifiedFileSyncMock).not.toHaveBeenCalled();
});
it("guards against unexpected async sync-resolution results", () => {
resolveBoundaryPathSyncMock.mockReturnValue(
Promise.resolve({
canonicalPath: "/real/plugin.json",
rootCanonicalPath: "/real/root",
}),
);
const opened = openBoundaryFileSync({
absolutePath: "plugin.json",
rootPath: "/workspace",
boundaryLabel: "plugin root",
});
expect(opened.ok).toBe(false);
if (opened.ok) {
return;
}
expect(opened.reason).toBe("validation");
expect(String(opened.error)).toContain("Unexpected async boundary resolution");
});
it("awaits async boundary resolution before verifying the file", async () => {
const ioFs = { marker: "io" } as never;
const absolutePath = path.resolve("notes.txt");
resolveBoundaryPathMock.mockResolvedValue({
canonicalPath: "/real/notes.txt",
rootCanonicalPath: "/real/root",
});
openVerifiedFileSyncMock.mockReturnValue({
ok: false,
reason: "validation",
error: new Error("blocked"),
});
const opened = await openBoundaryFile({
absolutePath: "notes.txt",
rootPath: "/workspace",
boundaryLabel: "workspace",
aliasPolicy: { allowFinalSymlinkForUnlink: true },
ioFs,
});
expect(resolveBoundaryPathMock).toHaveBeenCalledWith({
absolutePath,
rootPath: "/workspace",
rootCanonicalPath: undefined,
boundaryLabel: "workspace",
policy: { allowFinalSymlinkForUnlink: true },
skipLexicalRootCheck: undefined,
});
expect(openVerifiedFileSyncMock).toHaveBeenCalledWith({
filePath: absolutePath,
resolvedPath: "/real/notes.txt",
rejectHardlinks: true,
maxBytes: undefined,
allowedType: undefined,
ioFs,
});
expect(opened).toEqual({
ok: false,
reason: "validation",
error: expect.any(Error),
});
});
it("maps async boundary resolution failures to validation errors", async () => {
const error = new Error("escaped");
resolveBoundaryPathMock.mockRejectedValue(error);
const opened = await openBoundaryFile({
absolutePath: "notes.txt",
rootPath: "/workspace",
boundaryLabel: "workspace",
});
expect(opened).toEqual({
ok: false,
reason: "validation",
error,
});
expect(openVerifiedFileSyncMock).not.toHaveBeenCalled();
});
});

View File

@ -0,0 +1,64 @@
import { describe, expect, it } from "vitest";
import { resolveCanvasHostUrl } from "./canvas-host-url.js";
describe("resolveCanvasHostUrl", () => {
it("returns undefined when no canvas port or usable host is available", () => {
expect(resolveCanvasHostUrl({})).toBeUndefined();
expect(resolveCanvasHostUrl({ canvasPort: 3000, hostOverride: "127.0.0.1" })).toBeUndefined();
});
it("prefers non-loopback host overrides and preserves explicit ports", () => {
expect(
resolveCanvasHostUrl({
canvasPort: 3000,
hostOverride: " canvas.openclaw.ai ",
requestHost: "gateway.local:9000",
localAddress: "192.168.1.10",
}),
).toBe("http://canvas.openclaw.ai:3000");
});
it("falls back from rejected loopback overrides to request hosts", () => {
expect(
resolveCanvasHostUrl({
canvasPort: 3000,
hostOverride: "127.0.0.1",
requestHost: "example.com:8443",
}),
).toBe("http://example.com:3000");
});
it("maps proxied default gateway ports to request-host ports or scheme defaults", () => {
expect(
resolveCanvasHostUrl({
canvasPort: 18789,
requestHost: "gateway.example.com:9443",
forwardedProto: "https",
}),
).toBe("https://gateway.example.com:9443");
expect(
resolveCanvasHostUrl({
canvasPort: 18789,
requestHost: "gateway.example.com",
forwardedProto: ["https", "http"],
}),
).toBe("https://gateway.example.com:443");
expect(
resolveCanvasHostUrl({
canvasPort: 18789,
requestHost: "gateway.example.com",
}),
).toBe("http://gateway.example.com:80");
});
it("brackets ipv6 hosts and can fall back to local addresses", () => {
expect(
resolveCanvasHostUrl({
canvasPort: 3000,
requestHost: "not a host",
localAddress: "2001:db8::1",
scheme: "https",
}),
).toBe("https://[2001:db8::1]:3000");
});
});

Some files were not shown because too many files have changed in this diff Show More