* 'main' of https://github.com/openclaw/openclaw: (640 commits)
  ci: add npm token fallback for npm releases
  build: prepare 2026.3.13-beta.1
  docs: reorder unreleased changelog by impact
  fix: keep windows onboarding logs ascii-safe
  test: harden parallels all-os smoke harness
  chore: bump pi to 0.58.0
  fix(browser): prefer user profile over chrome relay
  build: upload Android native debug symbols
  Gateway: treat scope-limited probe RPC as degraded reachability (#45622)
  build: shrink Android app release bundle
  fix: keep exec summaries inline
  docs: fix changelog formatting
  test(discord): align rate limit error mock with carbon
  build(android): strip unused dnsjava resolver service before R8
  build(android): add auto-bump signed aab release script
  fix(browser): add browser session selection
  fix(models): apply Gemini model-id normalization to google-vertex provider (#42435)
  fix(feishu): add early event-level dedup to prevent duplicate replies (#43762)
  fix: unblock discord startup on deploy rate limits
  fix: default Android TLS setup codes to port 443
  ...

# Conflicts:
#	src/browser/pw-tools-core.interactions.batch.test.ts
#	src/browser/pw-tools-core.interactions.ts
This commit is contained in:
Vincent Koc 2026-03-13 22:13:33 -07:00
commit 81ecae9d7a
981 changed files with 50166 additions and 27183 deletions

View File

@ -1,5 +1,11 @@
.git .git
.worktrees .worktrees
# Sensitive files docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN
# into the project root; keep it out of the build context.
.env
.env.*
.bun-cache .bun-cache
.bun .bun
.tmp .tmp

View File

@ -7,7 +7,7 @@ on:
concurrency: concurrency:
group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }} cancel-in-progress: true
env: env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
@ -38,9 +38,8 @@ jobs:
id: check id: check
uses: ./.github/actions/detect-docs-changes uses: ./.github/actions/detect-docs-changes
# Detect which heavy areas are touched so PRs can skip unrelated expensive jobs. # Detect which heavy areas are touched so CI can skip unrelated expensive jobs.
# Push to main keeps broad coverage, but this job still needs to run so # Fail-safe: if detection fails, downstream jobs run.
# downstream jobs that list it in `needs` are not skipped.
changed-scope: changed-scope:
needs: [docs-scope] needs: [docs-scope]
if: needs.docs-scope.outputs.docs_only != 'true' if: needs.docs-scope.outputs.docs_only != 'true'
@ -82,7 +81,7 @@ jobs:
# Build dist once for Node-relevant changes and share it with downstream jobs. # Build dist once for Node-relevant changes and share it with downstream jobs.
build-artifacts: build-artifacts:
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404 runs-on: blacksmith-16vcpu-ubuntu-2404
steps: steps:
- name: Checkout - name: Checkout
@ -141,7 +140,7 @@ jobs:
checks: checks:
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404 runs-on: blacksmith-16vcpu-ubuntu-2404
strategy: strategy:
fail-fast: false fail-fast: false
@ -149,6 +148,13 @@ jobs:
include: include:
- runtime: node - runtime: node
task: test task: test
shard_index: 1
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node
task: test
shard_index: 2
shard_count: 2
command: pnpm canvas:a2ui:bundle && pnpm test command: pnpm canvas:a2ui:bundle && pnpm test
- runtime: node - runtime: node
task: extensions task: extensions
@ -160,40 +166,47 @@ jobs:
task: test task: test
command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts
steps: steps:
- name: Skip bun lane on push - name: Skip bun lane on pull requests
if: github.event_name == 'push' && matrix.runtime == 'bun' if: github.event_name == 'pull_request' && matrix.runtime == 'bun'
run: echo "Skipping bun test lane on push events." run: echo "Skipping Bun compatibility lane on pull requests."
- name: Checkout - name: Checkout
if: github.event_name != 'push' || matrix.runtime != 'bun' if: github.event_name != 'pull_request' || matrix.runtime != 'bun'
uses: actions/checkout@v6 uses: actions/checkout@v6
with: with:
submodules: false submodules: false
- name: Setup Node environment - name: Setup Node environment
if: matrix.runtime != 'bun' || github.event_name != 'push' if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
uses: ./.github/actions/setup-node-env uses: ./.github/actions/setup-node-env
with: with:
install-bun: "${{ matrix.runtime == 'bun' }}" install-bun: "${{ matrix.runtime == 'bun' }}"
use-sticky-disk: "false" use-sticky-disk: "false"
- name: Configure Node test resources - name: Configure Node test resources
if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
env:
SHARD_COUNT: ${{ matrix.shard_count || '' }}
SHARD_INDEX: ${{ matrix.shard_index || '' }}
run: | run: |
# `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes. # `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes.
# Default heap limits have been too low on Linux CI (V8 OOM near 4GB). # Default heap limits have been too low on Linux CI (V8 OOM near 4GB).
echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV" echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV" echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV"
if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then
echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV"
echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV"
fi
- name: Run ${{ matrix.task }} (${{ matrix.runtime }}) - name: Run ${{ matrix.task }} (${{ matrix.runtime }})
if: matrix.runtime != 'bun' || github.event_name != 'push' if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
run: ${{ matrix.command }} run: ${{ matrix.command }}
# Types, lint, and format check. # Types, lint, and format check.
check: check:
name: "check" name: "check"
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404 runs-on: blacksmith-16vcpu-ubuntu-2404
steps: steps:
- name: Checkout - name: Checkout
@ -239,7 +252,7 @@ jobs:
compat-node22: compat-node22:
name: "compat-node22" name: "compat-node22"
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404 runs-on: blacksmith-16vcpu-ubuntu-2404
steps: steps:
- name: Checkout - name: Checkout
@ -272,7 +285,7 @@ jobs:
skills-python: skills-python:
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true') if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404 runs-on: blacksmith-16vcpu-ubuntu-2404
steps: steps:
- name: Checkout - name: Checkout
@ -365,7 +378,7 @@ jobs:
checks-windows: checks-windows:
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true') if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true'
runs-on: blacksmith-32vcpu-windows-2025 runs-on: blacksmith-32vcpu-windows-2025
timeout-minutes: 45 timeout-minutes: 45
env: env:
@ -727,7 +740,7 @@ jobs:
android: android:
needs: [docs-scope, changed-scope] needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true') if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404 runs-on: blacksmith-16vcpu-ubuntu-2404
strategy: strategy:
fail-fast: false fail-fast: false

View File

@ -69,8 +69,13 @@ jobs:
run: pnpm release:check run: pnpm release:check
- name: Publish - name: Publish
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: | run: |
set -euo pipefail set -euo pipefail
if [[ -n "${NODE_AUTH_TOKEN:-}" ]]; then
printf '//registry.npmjs.org/:_authToken=%s\n' "$NODE_AUTH_TOKEN" > "$HOME/.npmrc"
fi
PACKAGE_VERSION=$(node -p "require('./package.json').version") PACKAGE_VERSION=$(node -p "require('./package.json').version")
if [[ "$PACKAGE_VERSION" == *-beta.* ]]; then if [[ "$PACKAGE_VERSION" == *-beta.* ]]; then

16
.jscpd.json Normal file
View File

@ -0,0 +1,16 @@
{
"gitignore": true,
"noSymlinks": true,
"ignore": [
"**/node_modules/**",
"**/dist/**",
"dist/**",
"**/.git/**",
"**/coverage/**",
"**/build/**",
"**/.build/**",
"**/.artifacts/**",
"docs/zh-CN/**",
"**/CHANGELOG.md"
]
}

View File

@ -132,6 +132,7 @@
- Framework: Vitest with V8 coverage thresholds (70% lines/branches/functions/statements). - Framework: Vitest with V8 coverage thresholds (70% lines/branches/functions/statements).
- Naming: match source names with `*.test.ts`; e2e in `*.e2e.test.ts`. - Naming: match source names with `*.test.ts`; e2e in `*.e2e.test.ts`.
- Run `pnpm test` (or `pnpm test:coverage`) before pushing when you touch logic. - Run `pnpm test` (or `pnpm test:coverage`) before pushing when you touch logic.
- For targeted/local debugging, keep using the wrapper: `pnpm test -- <path-or-filter> [vitest args...]` (for example `pnpm test -- src/commands/onboard-search.test.ts -t "shows registered plugin providers"`); do not default to raw `pnpm vitest run ...` because it bypasses wrapper config/profile/pool routing.
- Do not set test workers above 16; tried already. - Do not set test workers above 16; tried already.
- If local Vitest runs cause memory pressure (common on non-Mac-Studio hosts), use `OPENCLAW_TEST_PROFILE=low OPENCLAW_TEST_SERIAL_GATEWAY=1 pnpm test` for land/gate runs. - If local Vitest runs cause memory pressure (common on non-Mac-Studio hosts), use `OPENCLAW_TEST_PROFILE=low OPENCLAW_TEST_SERIAL_GATEWAY=1 pnpm test` for land/gate runs.
- Live tests (real keys): `CLAWDBOT_LIVE_TEST=1 pnpm test:live` (OpenClaw-only) or `LIVE=1 pnpm test:live` (includes provider live tests). Docker: `pnpm test:docker:live-models`, `pnpm test:docker:live-gateway`. Onboarding Docker E2E: `pnpm test:docker:onboard`. - Live tests (real keys): `CLAWDBOT_LIVE_TEST=1 pnpm test:live` (OpenClaw-only) or `LIVE=1 pnpm test:live` (includes provider live tests). Docker: `pnpm test:docker:live-models`, `pnpm test:docker:live-gateway`. Onboarding Docker E2E: `pnpm test:docker:onboard`.
@ -201,6 +202,42 @@
## Agent-Specific Notes ## Agent-Specific Notes
- Vocabulary: "makeup" = "mac app". - Vocabulary: "makeup" = "mac app".
- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested.
- Parallels macOS smoke playbook:
- `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`.
- Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed.
- Preferred automation entrypoint: `pnpm test:parallels:macos`. It restores the snapshot most closely matching `macOS 26.3.1 fresh`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
- Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero.
- Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded.
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-smoke.*`.
- All-OS parallel runs should share the host `dist` build via `/tmp/openclaw-parallels-build.lock` instead of rebuilding three times.
- Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails.
- Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`.
- For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green.
- Dont run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially.
- Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading.
- Parallels Windows smoke playbook:
- Preferred automation entrypoint: `pnpm test:parallels:windows`. It restores the snapshot most closely matching `pre-openclaw-native-e2e-2026-03-12`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
- Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero.
- Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded.
- Always use `prlctl exec --current-user` for Windows guest runs; plain `prlctl exec` lands in `NT AUTHORITY\SYSTEM` and does not match the real desktop-user install path.
- Prefer explicit `npm.cmd` / `openclaw.cmd`. Bare `npm` / `openclaw` in PowerShell can hit the `.ps1` shim and fail under restrictive execution policy.
- Use PowerShell only as the transport (`powershell.exe -NoProfile -ExecutionPolicy Bypass`) and call the `.cmd` shims explicitly from inside it.
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-windows.*`.
- Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails.
- Keep Windows onboarding/status text ASCII-clean in logs. Fancy punctuation in banners shows up as mojibake through the current guest PowerShell capture path.
- Parallels Linux smoke playbook:
- Preferred automation entrypoint: `pnpm test:parallels:linux`. It restores the snapshot most closely matching `fresh` on `Ubuntu 24.04.3 ARM64`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
- Use plain `prlctl exec` on this snapshot. `--current-user` is not the right transport there.
- Fresh snapshot reality: `curl` is missing and `apt-get update` can fail on clock skew. Bootstrap with `apt-get -o Acquire::Check-Date=false update` and install `curl ca-certificates` before testing installer paths.
- Fresh `main` tgz smoke on Linux still needs the latest-release installer first, because this snapshot has no Node/npm before bootstrap. The harness does stable bootstrap first, then overlays current `main`.
- This snapshot does not have a usable `systemd --user` session. Treat managed daemon install as unsupported here; use `--skip-health`, then verify with direct `openclaw gateway run --bind loopback --port 18789 --force`.
- Env-backed auth refs are still fine, but any direct shell launch (`openclaw gateway run`, `openclaw agent --local`, Linux `gateway status --deep` against that direct run) must inherit the referenced env vars in the same shell.
- `prlctl exec` reaps detached Linux child processes on this snapshot, so a background `openclaw gateway run` launched from automation is not a trustworthy smoke path. The harness verifies installer + `agent --local`; do direct gateway checks only from an interactive guest shell when needed.
- When you do run Linux gateway checks manually from an interactive guest shell, use `openclaw gateway status --deep --require-rpc` so an RPC miss is a hard failure.
- Prefer direct argv guest commands for fetch/install steps (`curl`, `npm install -g`, `openclaw ...`) over nested `bash -lc` quoting; Linux guest quoting through Parallels was the flaky part.
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-linux.*`.
- Current expected outcome on Linux smoke: fresh + upgrade should pass installer and `agent --local`; gateway remains `skipped-no-detached-linux-gateway` on this snapshot and should not be treated as a regression by itself.
- Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`. - Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`.
- When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`). - When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`).
- Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`. - Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`.

View File

@ -7,32 +7,64 @@ Docs: https://docs.openclaw.ai
### Changes ### Changes
- Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus. - Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus.
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
- iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman. - iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman.
- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chromes own setup guides.
- Browser/agents: add built-in `profile="user"` for the logged-in host browser and `profile="chrome-relay"` for the extension relay, so agent browser calls can prefer the real signed-in browser without the extra `browserSession` selector.
- Browser/act automation: add batched actions, selector targeting, and delayed clicks for browser act requests with normalized batch dispatch. Thanks @vincentkoc.
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
- Dependencies/pi: bump `@mariozechner/pi-agent-core`, `@mariozechner/pi-ai`, `@mariozechner/pi-coding-agent`, and `@mariozechner/pi-tui` to `0.58.0`.
### Fixes ### Fixes
- Dashboard/chat UI: stop reloading full chat history on every live tool result in dashboard v2 so tool-heavy runs no longer trigger UI freeze/re-render storms while the final event still refreshes persisted history. (#45541) Thanks @BunsDev.
- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang. - Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang.
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups. - Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding. - Browser/existing-session: harden driver validation and session lifecycle so transport errors trigger reconnects while tool-level errors preserve the session, and extract shared ARIA role sets to deduplicate Playwright and Chrome MCP snapshot paths. (#45682) Thanks @odysseus0.
- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus. - Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata.
- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv. - Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman.
- Gateway/session reset: preserve `lastAccountId` and `lastThreadId` across gateway session resets so replies keep routing back to the same account and thread after `/reset`. (#44773) Thanks @Lanfei. - Gateway/session reset: preserve `lastAccountId` and `lastThreadId` across gateway session resets so replies keep routing back to the same account and thread after `/reset`. (#44773) Thanks @Lanfei.
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei. - macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
- Gateway/status: add `openclaw gateway status --require-rpc` and clearer Linux non-interactive daemon-install failure reporting so automation can fail hard on probe misses instead of treating a printed RPC error as green.
- macOS/exec approvals: respect per-agent exec approval settings in the gateway prompter, including allowlist fallback when the native prompt cannot be shown, so gateway-triggered `system.run` requests follow configured policy instead of always prompting or denying unexpectedly. (#13707) Thanks @sliekens.
- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus.
- Telegram/inbound media IPv4 fallback: retry SSRF-guarded Telegram file downloads once with the same IPv4 fallback policy as Bot API calls so fresh installs on IPv6-broken hosts no longer fail to download inbound images.
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart.
- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`.
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding.
- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman.
- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss.
- Dashboard/chat UI: render oversized plain-text replies as normal paragraphs instead of capped gray code blocks, so long desktop chat responses stay readable without tab-switching refreshes.
- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han.
- Gateway/Control UI: restore the operator-only device-auth bypass and classify browser connect failures so origin and device-identity problems no longer show up as auth errors in the Control UI and web chat. (#45512) thanks @sallyom.
- macOS/voice wake: stop crashing wake-word command extraction when speech segment ranges come from a different transcript instance.
- Discord/allowlists: honor raw `guild_id` when hydrated guild objects are missing so allowlisted channels and threads like `#maintainers` no longer get false-dropped before channel allowlist checks.
- macOS/runtime locator: require Node >=22.16.0 during macOS runtime discovery so the app no longer accepts Node versions that the main runtime guard rejects later. Thanks @sumleo.
- Agents/custom providers: preserve blank API keys for loopback OpenAI-compatible custom providers by clearing the synthetic Authorization header at runtime, while keeping explicit apiKey and oauth/token config from silently downgrading into fake bearer auth. (#45631) Thanks @xinhuagu.
- Models/google-vertex Gemini flash-lite normalization: apply existing bare-ID preview normalization to `google-vertex` model refs and provider configs so `google-vertex/gemini-3.1-flash-lite` resolves as `gemini-3.1-flash-lite-preview`. (#42435) thanks @scoootscooob.
- iMessage/remote attachments: reject unsafe remote attachment paths before spawning SCP, so sender-controlled filenames can no longer inject shell metacharacters into remote media staging. Thanks @lintsinghua.
- Telegram/webhook auth: validate the Telegram webhook secret before reading or parsing request bodies, so unauthenticated requests are rejected immediately instead of consuming up to 1 MB first. Thanks @space08.
- Security/device pairing: make bootstrap setup codes single-use so pending device pairing requests cannot be silently replayed and widened to admin before approval. Thanks @tdjackey.
- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization.
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants.
- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token.
- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks.
- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins.
- Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn.
- Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc.
- Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference. - Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference.
- Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97. - Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97.
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv.
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
- Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179. - Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179.
- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
- Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec. - Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec.
- Signal/config validation: add `channels.signal.groups` schema support so per-group `requireMention`, `tools`, and `toolsBySender` overrides no longer get rejected during config validation. (#27199) Thanks @unisone. - Signal/config validation: add `channels.signal.groups` schema support so per-group `requireMention`, `tools`, and `toolsBySender` overrides no longer get rejected during config validation. (#27199) Thanks @unisone.
- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh. - Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates. - Telegram/media errors: redact Telegram file URLs before building media fetch errors so failed inbound downloads do not leak bot tokens into logs. Thanks @space08.
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
## 2026.3.12 ## 2026.3.12
@ -45,6 +77,7 @@ Docs: https://docs.openclaw.ai
- Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi - Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi
- Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff - Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff
- Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc. - Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc.
- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc.
### Fixes ### Fixes
@ -101,13 +134,16 @@ Docs: https://docs.openclaw.ai
- Gateway/session stores: regenerate the Swift push-test protocol models and align Windows native session-store realpath handling so protocol checks and sync session discovery stop drifting on Windows. (#44266) thanks @jalehman. - Gateway/session stores: regenerate the Swift push-test protocol models and align Windows native session-store realpath handling so protocol checks and sync session discovery stop drifting on Windows. (#44266) thanks @jalehman.
- Context engine/session routing: forward optional `sessionKey` through context-engine lifecycle calls so plugins can see structured routing metadata during bootstrap, assembly, post-turn ingestion, and compaction. (#44157) thanks @jalehman. - Context engine/session routing: forward optional `sessionKey` through context-engine lifecycle calls so plugins can see structured routing metadata during bootstrap, assembly, post-turn ingestion, and compaction. (#44157) thanks @jalehman.
- Agents/failover: classify z.ai `network_error` stop reasons as retryable timeouts so provider connectivity failures trigger fallback instead of surfacing raw unhandled-stop-reason errors. (#43884) Thanks @hougangdev. - Agents/failover: classify z.ai `network_error` stop reasons as retryable timeouts so provider connectivity failures trigger fallback instead of surfacing raw unhandled-stop-reason errors. (#43884) Thanks @hougangdev.
- Config/Anthropic startup: inline Anthropic alias normalization during config load so gateway startup no longer crashes on dated Anthropic model refs like `anthropic/claude-sonnet-4-20250514`. (#45520) Thanks @BunsDev.
- Memory/session sync: add mode-aware post-compaction session reindexing with `agents.defaults.compaction.postIndexSync` plus `agents.defaults.memorySearch.sync.sessions.postCompactionForce`, so compacted session memory can refresh immediately without forcing every deployment into synchronous reindexing. (#25561) thanks @rodrigouroz. - Memory/session sync: add mode-aware post-compaction session reindexing with `agents.defaults.compaction.postIndexSync` plus `agents.defaults.memorySearch.sync.sessions.postCompactionForce`, so compacted session memory can refresh immediately without forcing every deployment into synchronous reindexing. (#25561) thanks @rodrigouroz.
- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb. - Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb.
- Telegram/native command sync: suppress expected `BOT_COMMANDS_TOO_MUCH` retry error noise, add a final fallback summary log, and document the difference between command-menu overflow and real Telegram network failures. - Telegram/native command sync: suppress expected `BOT_COMMANDS_TOO_MUCH` retry error noise, add a final fallback summary log, and document the difference between command-menu overflow and real Telegram network failures.
- Mattermost/reply media delivery: pass agent-scoped `mediaLocalRoots` through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666. - Mattermost/reply media delivery: pass agent-scoped `mediaLocalRoots` through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666.
- Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process `HOME`/`OPENCLAW_HOME` changes no longer reuse stale plugin state or misreport `~/...` plugins as untracked. (#44046) thanks @gumadeiras. - Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process `HOME`/`OPENCLAW_HOME` changes no longer reuse stale plugin state or misreport `~/...` plugins as untracked. (#44046) thanks @gumadeiras.
- Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated `session.store` roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras. - Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated `session.store` roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras.
- Browser/existing-session: stop reporting fake CDP ports/URLs for live attached Chrome sessions, render `transport: chrome-mcp` in CLI/status output instead of `port: 0`, and keep timeout diagnostics transport-aware when no direct CDP URL exists.
- Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and `models list --plain`, and migrate legacy duplicated `openrouter/openrouter/...` config entries forward on write. - Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and `models list --plain`, and migrate legacy duplicated `openrouter/openrouter/...` config entries forward on write.
- Feishu/event dedupe: keep early duplicate suppression aligned with the shared Feishu message-id contract and release the pre-queue dedupe marker after failed dispatch so retried events can recover instead of being dropped until the short TTL expires. (#43762) Thanks @yunweibang.
- Gateway/hooks: bucket hook auth failures by forwarded client IP behind trusted proxies and warn when `hooks.allowedAgentIds` leaves hook routing unrestricted. - Gateway/hooks: bucket hook auth failures by forwarded client IP behind trusted proxies and warn when `hooks.allowedAgentIds` leaves hook routing unrestricted.
- Agents/compaction: skip the post-compaction `cache-ttl` marker write when a compaction completed in the same attempt, preventing the next turn from immediately triggering a second tiny compaction. (#28548) thanks @MoerAI. - Agents/compaction: skip the post-compaction `cache-ttl` marker write when a compaction completed in the same attempt, preventing the next turn from immediately triggering a second tiny compaction. (#28548) thanks @MoerAI.
- Native chat/macOS: add `/new`, `/reset`, and `/clear` reset triggers, keep shared main-session aliases aligned, and ignore stale model-selection completions so native chat state stays in sync across reset and fast model changes. (#10898) Thanks @Nachx639. - Native chat/macOS: add `/new`, `/reset`, and `/clear` reset triggers, keep shared main-session aliases aligned, and ignore stale model-selection completions so native chat state stays in sync across reset and fast model changes. (#10898) Thanks @Nachx639.
@ -118,6 +154,8 @@ Docs: https://docs.openclaw.ai
- Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn. - Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn.
- CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621. - CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621.
- Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte. - Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte.
- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh.
- Feishu/file uploads: preserve literal UTF-8 filenames in `im.file.create` so Chinese and other non-ASCII filenames no longer appear percent-encoded in chat. (#34262) Thanks @fabiaodemianyang and @KangShuaiFu.
## 2026.3.11 ## 2026.3.11
@ -258,6 +296,7 @@ Docs: https://docs.openclaw.ai
- Agents/failover: classify ZenMux quota-refresh `402` responses as `rate_limit` so model fallback retries continue instead of stopping on a temporary subscription window. (#43917) thanks @bwjoke. - Agents/failover: classify ZenMux quota-refresh `402` responses as `rate_limit` so model fallback retries continue instead of stopping on a temporary subscription window. (#43917) thanks @bwjoke.
- Agents/failover: classify HTTP 422 malformed-request responses as `format` and recognize OpenRouter "requires more credits" billing errors so provider fallback triggers instead of surfacing raw errors. (#43823) thanks @jnMetaCode. - Agents/failover: classify HTTP 422 malformed-request responses as `format` and recognize OpenRouter "requires more credits" billing errors so provider fallback triggers instead of surfacing raw errors. (#43823) thanks @jnMetaCode.
- Memory/QMD Windows: fail closed when `qmd.cmd` or `mcporter.cmd` wrappers cannot be resolved to a direct entrypoint, so memory search no longer falls back to shell execution on Windows. - Memory/QMD Windows: fail closed when `qmd.cmd` or `mcporter.cmd` wrappers cannot be resolved to a direct entrypoint, so memory search no longer falls back to shell execution on Windows.
- macOS/remote gateway: stop PortGuardian from killing Docker Desktop and other external listeners on the gateway port in remote mode, so containerized and tunneled gateway setups no longer lose their port-forward owner on app startup. (#6755) Thanks @teslamint.
## 2026.3.8 ## 2026.3.8
@ -3261,7 +3300,7 @@ Docs: https://docs.openclaw.ai
- Agents: add CLI log hint to "agent failed before reply" messages. (#1550) Thanks @sweepies. - Agents: add CLI log hint to "agent failed before reply" messages. (#1550) Thanks @sweepies.
- Agents: warn and ignore tool allowlists that only reference unknown or unloaded plugin tools. (#1566) - Agents: warn and ignore tool allowlists that only reference unknown or unloaded plugin tools. (#1566)
- Agents: treat plugin-only tool allowlists as opt-ins; keep core tools enabled. (#1467) - Agents: treat plugin-only tool allowlists as opt-ins; keep core tools enabled. (#1467)
- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (commit 084002998) - Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (#45459) Thanks @LyttonFeng and @vincentkoc.
- Slack: honor open groupPolicy for unlisted channels in message + slash gating. (#1563) Thanks @itsjaydesu. - Slack: honor open groupPolicy for unlisted channels in message + slash gating. (#1563) Thanks @itsjaydesu.
- Discord: limit autoThread mention bypass to bot-owned threads; keep ack reactions mention-gated. (#1511) Thanks @pvoo. - Discord: limit autoThread mention bypass to bot-owned threads; keep ack reactions mention-gated. (#1511) Thanks @pvoo.
- Discord: retry rate-limited allowlist resolution + command deploy to avoid gateway crashes. (commit f70ac0c7c) - Discord: retry rate-limited allowlist resolution + command deploy to avoid gateway crashes. (commit f70ac0c7c)

View File

@ -132,6 +132,7 @@ WORKDIR /app
RUN --mount=type=cache,id=openclaw-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ RUN --mount=type=cache,id=openclaw-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ --mount=type=cache,id=openclaw-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update && \ apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
procps hostname curl git openssl procps hostname curl git openssl

View File

@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update \ apt-get update \
&& apt-get upgrade -y --no-install-recommends \
&& apt-get install -y --no-install-recommends \ && apt-get install -y --no-install-recommends \
bash \ bash \
ca-certificates \ ca-certificates \

View File

@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update \ apt-get update \
&& apt-get upgrade -y --no-install-recommends \
&& apt-get install -y --no-install-recommends \ && apt-get install -y --no-install-recommends \
bash \ bash \
ca-certificates \ ca-certificates \

View File

@ -24,6 +24,7 @@ ENV PATH=${BUN_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/sbin
RUN --mount=type=cache,id=openclaw-sandbox-common-apt-cache,target=/var/cache/apt,sharing=locked \ RUN --mount=type=cache,id=openclaw-sandbox-common-apt-cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,id=openclaw-sandbox-common-apt-lists,target=/var/lib/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-common-apt-lists,target=/var/lib/apt,sharing=locked \
apt-get update \ apt-get update \
&& apt-get upgrade -y --no-install-recommends \
&& apt-get install -y --no-install-recommends ${PACKAGES} && apt-get install -y --no-install-recommends ${PACKAGES}
RUN if [ "${INSTALL_PNPM}" = "1" ]; then npm install -g pnpm; fi RUN if [ "${INSTALL_PNPM}" = "1" ]; then npm install -g pnpm; fi

View File

@ -101,25 +101,19 @@ public enum WakeWordGate {
} }
public static func commandText( public static func commandText(
transcript: String, transcript _: String,
segments: [WakeWordSegment], segments: [WakeWordSegment],
triggerEndTime: TimeInterval) triggerEndTime: TimeInterval)
-> String { -> String {
let threshold = triggerEndTime + 0.001 let threshold = triggerEndTime + 0.001
var commandWords: [String] = []
commandWords.reserveCapacity(segments.count)
for segment in segments where segment.start >= threshold { for segment in segments where segment.start >= threshold {
if normalizeToken(segment.text).isEmpty { continue } let normalized = normalizeToken(segment.text)
if let range = segment.range { if normalized.isEmpty { continue }
let slice = transcript[range.lowerBound...] commandWords.append(segment.text)
return String(slice).trimmingCharacters(in: Self.whitespaceAndPunctuation)
}
break
} }
return commandWords.joined(separator: " ").trimmingCharacters(in: Self.whitespaceAndPunctuation)
let text = segments
.filter { $0.start >= threshold && !normalizeToken($0.text).isEmpty }
.map(\.text)
.joined(separator: " ")
return text.trimmingCharacters(in: Self.whitespaceAndPunctuation)
} }
public static func matchesTextOnly(text: String, triggers: [String]) -> Bool { public static func matchesTextOnly(text: String, triggers: [String]) -> Bool {

View File

@ -46,6 +46,25 @@ import Testing
let match = WakeWordGate.match(transcript: transcript, segments: segments, config: config) let match = WakeWordGate.match(transcript: transcript, segments: segments, config: config)
#expect(match?.command == "do it") #expect(match?.command == "do it")
} }
@Test func commandTextHandlesForeignRangeIndices() {
let transcript = "hey clawd do thing"
let other = "do thing"
let foreignRange = other.range(of: "do")
let segments = [
WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")),
WakeWordSegment(text: "clawd", start: 0.2, duration: 0.1, range: transcript.range(of: "clawd")),
WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange),
WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil),
]
let command = WakeWordGate.commandText(
transcript: transcript,
segments: segments,
triggerEndTime: 0.3)
#expect(command == "do thing")
}
} }
private func makeSegments( private func makeSegments(

View File

@ -30,8 +30,12 @@ cd apps/android
./gradlew :app:assembleDebug ./gradlew :app:assembleDebug
./gradlew :app:installDebug ./gradlew :app:installDebug
./gradlew :app:testDebugUnitTest ./gradlew :app:testDebugUnitTest
cd ../..
bun run android:bundle:release
``` ```
`bun run android:bundle:release` auto-bumps Android `versionName`/`versionCode` in `apps/android/app/build.gradle.kts`, then builds a signed release `.aab`.
## Kotlin Lint + Format ## Kotlin Lint + Format
```bash ```bash

View File

@ -1,5 +1,7 @@
import com.android.build.api.variant.impl.VariantOutputImpl import com.android.build.api.variant.impl.VariantOutputImpl
val dnsjavaInetAddressResolverService = "META-INF/services/java.net.spi.InetAddressResolverProvider"
val androidStoreFile = providers.gradleProperty("OPENCLAW_ANDROID_STORE_FILE").orNull?.takeIf { it.isNotBlank() } val androidStoreFile = providers.gradleProperty("OPENCLAW_ANDROID_STORE_FILE").orNull?.takeIf { it.isNotBlank() }
val androidStorePassword = providers.gradleProperty("OPENCLAW_ANDROID_STORE_PASSWORD").orNull?.takeIf { it.isNotBlank() } val androidStorePassword = providers.gradleProperty("OPENCLAW_ANDROID_STORE_PASSWORD").orNull?.takeIf { it.isNotBlank() }
val androidKeyAlias = providers.gradleProperty("OPENCLAW_ANDROID_KEY_ALIAS").orNull?.takeIf { it.isNotBlank() } val androidKeyAlias = providers.gradleProperty("OPENCLAW_ANDROID_KEY_ALIAS").orNull?.takeIf { it.isNotBlank() }
@ -63,8 +65,8 @@ android {
applicationId = "ai.openclaw.app" applicationId = "ai.openclaw.app"
minSdk = 31 minSdk = 31
targetSdk = 36 targetSdk = 36
versionCode = 202603130 versionCode = 2026031400
versionName = "2026.3.13" versionName = "2026.3.14"
ndk { ndk {
// Support all major ABIs — native libs are tiny (~47 KB per ABI) // Support all major ABIs — native libs are tiny (~47 KB per ABI)
abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64") abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64")
@ -78,6 +80,9 @@ android {
} }
isMinifyEnabled = true isMinifyEnabled = true
isShrinkResources = true isShrinkResources = true
ndk {
debugSymbolLevel = "SYMBOL_TABLE"
}
proguardFiles(getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro") proguardFiles(getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro")
} }
debug { debug {
@ -104,6 +109,10 @@ android {
"/META-INF/LICENSE*.txt", "/META-INF/LICENSE*.txt",
"DebugProbesKt.bin", "DebugProbesKt.bin",
"kotlin-tooling-metadata.json", "kotlin-tooling-metadata.json",
"org/bouncycastle/pqc/crypto/picnic/lowmcL1.bin.properties",
"org/bouncycastle/pqc/crypto/picnic/lowmcL3.bin.properties",
"org/bouncycastle/pqc/crypto/picnic/lowmcL5.bin.properties",
"org/bouncycastle/x509/CertPathReviewerMessages*.properties",
) )
} }
} }
@ -168,7 +177,6 @@ dependencies {
// material-icons-extended pulled in full icon set (~20 MB DEX). Only ~18 icons used. // material-icons-extended pulled in full icon set (~20 MB DEX). Only ~18 icons used.
// R8 will tree-shake unused icons when minify is enabled on release builds. // R8 will tree-shake unused icons when minify is enabled on release builds.
implementation("androidx.compose.material:material-icons-extended") implementation("androidx.compose.material:material-icons-extended")
implementation("androidx.navigation:navigation-compose:2.9.7")
debugImplementation("androidx.compose.ui:ui-tooling") debugImplementation("androidx.compose.ui:ui-tooling")
@ -193,7 +201,6 @@ dependencies {
implementation("androidx.camera:camera-camera2:1.5.2") implementation("androidx.camera:camera-camera2:1.5.2")
implementation("androidx.camera:camera-lifecycle:1.5.2") implementation("androidx.camera:camera-lifecycle:1.5.2")
implementation("androidx.camera:camera-video:1.5.2") implementation("androidx.camera:camera-video:1.5.2")
implementation("androidx.camera:camera-view:1.5.2")
implementation("com.google.android.gms:play-services-code-scanner:16.1.0") implementation("com.google.android.gms:play-services-code-scanner:16.1.0")
// Unicast DNS-SD (Wide-Area Bonjour) for tailnet discovery domains. // Unicast DNS-SD (Wide-Area Bonjour) for tailnet discovery domains.
@ -211,3 +218,45 @@ dependencies {
tasks.withType<Test>().configureEach { tasks.withType<Test>().configureEach {
useJUnitPlatform() useJUnitPlatform()
} }
val stripReleaseDnsjavaServiceDescriptor =
tasks.register("stripReleaseDnsjavaServiceDescriptor") {
val mergedJar =
layout.buildDirectory.file(
"intermediates/merged_java_res/release/mergeReleaseJavaResource/base.jar",
)
inputs.file(mergedJar)
outputs.file(mergedJar)
doLast {
val jarFile = mergedJar.get().asFile
if (!jarFile.exists()) {
return@doLast
}
val unpackDir = temporaryDir.resolve("merged-java-res")
delete(unpackDir)
copy {
from(zipTree(jarFile))
into(unpackDir)
exclude(dnsjavaInetAddressResolverService)
}
delete(jarFile)
ant.invokeMethod(
"zip",
mapOf(
"destfile" to jarFile.absolutePath,
"basedir" to unpackDir.absolutePath,
),
)
}
}
tasks.matching { it.name == "stripReleaseDnsjavaServiceDescriptor" }.configureEach {
dependsOn("mergeReleaseJavaResource")
}
tasks.matching { it.name == "minifyReleaseWithR8" }.configureEach {
dependsOn(stripReleaseDnsjavaServiceDescriptor)
}

View File

@ -1,26 +1,6 @@
# ── App classes ───────────────────────────────────────────────────
-keep class ai.openclaw.app.** { *; }
# ── Bouncy Castle ─────────────────────────────────────────────────
-keep class org.bouncycastle.** { *; }
-dontwarn org.bouncycastle.** -dontwarn org.bouncycastle.**
# ── CameraX ───────────────────────────────────────────────────────
-keep class androidx.camera.** { *; }
# ── kotlinx.serialization ────────────────────────────────────────
-keep class kotlinx.serialization.** { *; }
-keepclassmembers class * {
@kotlinx.serialization.Serializable *;
}
-keepattributes *Annotation*, InnerClasses
# ── OkHttp ────────────────────────────────────────────────────────
-dontwarn okhttp3.** -dontwarn okhttp3.**
-dontwarn okio.** -dontwarn okio.**
-keep class okhttp3.internal.platform.** { *; }
# ── Misc suppressions ────────────────────────────────────────────
-dontwarn com.sun.jna.** -dontwarn com.sun.jna.**
-dontwarn javax.naming.** -dontwarn javax.naming.**
-dontwarn lombok.Generated -dontwarn lombok.Generated

View File

@ -97,7 +97,7 @@ internal fun parseGatewayEndpoint(rawInput: String): GatewayEndpointConfig? {
"wss", "https" -> true "wss", "https" -> true
else -> true else -> true
} }
val port = uri.port.takeIf { it in 1..65535 } ?: 18789 val port = uri.port.takeIf { it in 1..65535 } ?: if (tls) 443 else 18789
val displayUrl = "${if (tls) "https" else "http"}://$host:$port" val displayUrl = "${if (tls) "https" else "http"}://$host:$port"
return GatewayEndpointConfig(host = host, port = port, tls = tls, displayUrl = displayUrl) return GatewayEndpointConfig(host = host, port = port, tls = tls, displayUrl = displayUrl)

View File

@ -92,6 +92,30 @@ class GatewayConfigResolverTest {
assertNull(resolved?.password?.takeIf { it.isNotEmpty() }) assertNull(resolved?.password?.takeIf { it.isNotEmpty() })
} }
@Test
fun resolveGatewayConnectConfigDefaultsPortlessWssSetupCodeTo443() {
val setupCode =
encodeSetupCode("""{"url":"wss://gateway.example","bootstrapToken":"bootstrap-1"}""")
val resolved =
resolveGatewayConnectConfig(
useSetupCode = true,
setupCode = setupCode,
manualHost = "",
manualPort = "",
manualTls = true,
fallbackToken = "shared-token",
fallbackPassword = "shared-password",
)
assertEquals("gateway.example", resolved?.host)
assertEquals(443, resolved?.port)
assertEquals(true, resolved?.tls)
assertEquals("bootstrap-1", resolved?.bootstrapToken)
assertNull(resolved?.token?.takeIf { it.isNotEmpty() })
assertNull(resolved?.password?.takeIf { it.isNotEmpty() })
}
private fun encodeSetupCode(payloadJson: String): String { private fun encodeSetupCode(payloadJson: String): String {
return Base64.getUrlEncoder().withoutPadding().encodeToString(payloadJson.toByteArray(Charsets.UTF_8)) return Base64.getUrlEncoder().withoutPadding().encodeToString(payloadJson.toByteArray(Charsets.UTF_8))
} }

View File

@ -0,0 +1,125 @@
#!/usr/bin/env bun
import { $ } from "bun";
import { dirname, join } from "node:path";
import { fileURLToPath } from "node:url";
const scriptDir = dirname(fileURLToPath(import.meta.url));
const androidDir = join(scriptDir, "..");
const buildGradlePath = join(androidDir, "app", "build.gradle.kts");
const bundlePath = join(androidDir, "app", "build", "outputs", "bundle", "release", "app-release.aab");
type VersionState = {
versionName: string;
versionCode: number;
};
type ParsedVersionMatches = {
versionNameMatch: RegExpMatchArray;
versionCodeMatch: RegExpMatchArray;
};
function formatVersionName(date: Date): string {
const year = date.getFullYear();
const month = date.getMonth() + 1;
const day = date.getDate();
return `${year}.${month}.${day}`;
}
function formatVersionCodePrefix(date: Date): string {
const year = date.getFullYear().toString();
const month = (date.getMonth() + 1).toString().padStart(2, "0");
const day = date.getDate().toString().padStart(2, "0");
return `${year}${month}${day}`;
}
function parseVersionMatches(buildGradleText: string): ParsedVersionMatches {
const versionCodeMatch = buildGradleText.match(/versionCode = (\d+)/);
const versionNameMatch = buildGradleText.match(/versionName = "([^"]+)"/);
if (!versionCodeMatch || !versionNameMatch) {
throw new Error(`Couldn't parse versionName/versionCode from ${buildGradlePath}`);
}
return { versionCodeMatch, versionNameMatch };
}
function resolveNextVersionCode(currentVersionCode: number, todayPrefix: string): number {
const currentRaw = currentVersionCode.toString();
let nextSuffix = 0;
if (currentRaw.startsWith(todayPrefix)) {
const suffixRaw = currentRaw.slice(todayPrefix.length);
nextSuffix = (suffixRaw ? Number.parseInt(suffixRaw, 10) : 0) + 1;
}
if (!Number.isInteger(nextSuffix) || nextSuffix < 0 || nextSuffix > 99) {
throw new Error(
`Can't auto-bump Android versionCode for ${todayPrefix}: next suffix ${nextSuffix} is invalid`,
);
}
return Number.parseInt(`${todayPrefix}${nextSuffix.toString().padStart(2, "0")}`, 10);
}
function resolveNextVersion(buildGradleText: string, date: Date): VersionState {
const { versionCodeMatch } = parseVersionMatches(buildGradleText);
const currentVersionCode = Number.parseInt(versionCodeMatch[1] ?? "", 10);
if (!Number.isInteger(currentVersionCode)) {
throw new Error(`Invalid Android versionCode in ${buildGradlePath}`);
}
const versionName = formatVersionName(date);
const versionCode = resolveNextVersionCode(currentVersionCode, formatVersionCodePrefix(date));
return { versionName, versionCode };
}
function updateBuildGradleVersions(buildGradleText: string, nextVersion: VersionState): string {
return buildGradleText
.replace(/versionCode = \d+/, `versionCode = ${nextVersion.versionCode}`)
.replace(/versionName = "[^"]+"/, `versionName = "${nextVersion.versionName}"`);
}
async function sha256Hex(path: string): Promise<string> {
const buffer = await Bun.file(path).arrayBuffer();
const digest = await crypto.subtle.digest("SHA-256", buffer);
return Array.from(new Uint8Array(digest), (byte) => byte.toString(16).padStart(2, "0")).join("");
}
async function verifyBundleSignature(path: string): Promise<void> {
await $`jarsigner -verify ${path}`.quiet();
}
async function main() {
const buildGradleFile = Bun.file(buildGradlePath);
const originalText = await buildGradleFile.text();
const nextVersion = resolveNextVersion(originalText, new Date());
const updatedText = updateBuildGradleVersions(originalText, nextVersion);
if (updatedText === originalText) {
throw new Error("Android version bump produced no change");
}
console.log(`Android versionName -> ${nextVersion.versionName}`);
console.log(`Android versionCode -> ${nextVersion.versionCode}`);
await Bun.write(buildGradlePath, updatedText);
try {
await $`./gradlew :app:bundleRelease`.cwd(androidDir);
} catch (error) {
await Bun.write(buildGradlePath, originalText);
throw error;
}
const bundleFile = Bun.file(bundlePath);
if (!(await bundleFile.exists())) {
throw new Error(`Signed bundle missing at ${bundlePath}`);
}
await verifyBundleSignature(bundlePath);
const hash = await sha256Hex(bundlePath);
console.log(`Signed AAB: ${bundlePath}`);
console.log(`SHA-256: ${hash}`);
}
await main();

View File

@ -45,8 +45,8 @@ enum ExecApprovalEvaluator {
let skillAllow: Bool let skillAllow: Bool
if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty { if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty {
let bins = await SkillBinsCache.shared.currentBins() let bins = await SkillBinsCache.shared.currentTrust()
skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) } skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins)
} else { } else {
skillAllow = false skillAllow = false
} }
@ -65,4 +65,26 @@ enum ExecApprovalEvaluator {
allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil, allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil,
skillAllow: skillAllow) skillAllow: skillAllow)
} }
static func isSkillAutoAllowed(
_ resolutions: [ExecCommandResolution],
trustedBinsByName: [String: Set<String>]) -> Bool
{
guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false }
return resolutions.allSatisfy { resolution in
guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName),
let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath)
else {
return false
}
return trustedBinsByName[executableName]?.contains(resolvedPath) == true
}
}
static func _testIsSkillAutoAllowed(
_ resolutions: [ExecCommandResolution],
trustedBinsByName: [String: Set<String>]) -> Bool
{
self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName)
}
} }

View File

@ -370,6 +370,17 @@ enum ExecApprovalsStore {
static func resolve(agentId: String?) -> ExecApprovalsResolved { static func resolve(agentId: String?) -> ExecApprovalsResolved {
let file = self.ensureFile() let file = self.ensureFile()
return self.resolveFromFile(file, agentId: agentId)
}
/// Read-only resolve: loads file without writing (no ensureFile side effects).
/// Safe to call from background threads / off MainActor.
static func resolveReadOnly(agentId: String?) -> ExecApprovalsResolved {
let file = self.loadFile()
return self.resolveFromFile(file, agentId: agentId)
}
private static func resolveFromFile(_ file: ExecApprovalsFile, agentId: String?) -> ExecApprovalsResolved {
let defaults = file.defaults ?? ExecApprovalsDefaults() let defaults = file.defaults ?? ExecApprovalsDefaults()
let resolvedDefaults = ExecApprovalsResolvedDefaults( let resolvedDefaults = ExecApprovalsResolvedDefaults(
security: defaults.security ?? self.defaultSecurity, security: defaults.security ?? self.defaultSecurity,
@ -777,6 +788,7 @@ actor SkillBinsCache {
static let shared = SkillBinsCache() static let shared = SkillBinsCache()
private var bins: Set<String> = [] private var bins: Set<String> = []
private var trustByName: [String: Set<String>] = [:]
private var lastRefresh: Date? private var lastRefresh: Date?
private let refreshInterval: TimeInterval = 90 private let refreshInterval: TimeInterval = 90
@ -787,27 +799,90 @@ actor SkillBinsCache {
return self.bins return self.bins
} }
func currentTrust(force: Bool = false) async -> [String: Set<String>] {
if force || self.isStale() {
await self.refresh()
}
return self.trustByName
}
func refresh() async { func refresh() async {
do { do {
let report = try await GatewayConnection.shared.skillsStatus() let report = try await GatewayConnection.shared.skillsStatus()
var next = Set<String>() let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths())
for skill in report.skills { self.bins = trust.names
for bin in skill.requirements.bins { self.trustByName = trust.pathsByName
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
if !trimmed.isEmpty { next.insert(trimmed) }
}
}
self.bins = next
self.lastRefresh = Date() self.lastRefresh = Date()
} catch { } catch {
if self.lastRefresh == nil { if self.lastRefresh == nil {
self.bins = [] self.bins = []
self.trustByName = [:]
} }
} }
} }
static func normalizeSkillBinName(_ value: String) -> String? {
let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased()
return trimmed.isEmpty ? nil : trimmed
}
static func normalizeResolvedPath(_ value: String?) -> String? {
let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
guard !trimmed.isEmpty else { return nil }
return URL(fileURLWithPath: trimmed).standardizedFileURL.path
}
static func buildTrustIndex(
report: SkillsStatusReport,
searchPaths: [String]) -> SkillBinTrustIndex
{
var names = Set<String>()
var pathsByName: [String: Set<String>] = [:]
for skill in report.skills {
for bin in skill.requirements.bins {
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { continue }
names.insert(trimmed)
guard let name = self.normalizeSkillBinName(trimmed),
let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths),
let normalizedPath = self.normalizeResolvedPath(resolvedPath)
else {
continue
}
var paths = pathsByName[name] ?? Set<String>()
paths.insert(normalizedPath)
pathsByName[name] = paths
}
}
return SkillBinTrustIndex(names: names, pathsByName: pathsByName)
}
private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? {
let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin
if expanded.contains("/") || expanded.contains("\\") {
return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil
}
return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths)
}
private func isStale() -> Bool { private func isStale() -> Bool {
guard let lastRefresh else { return true } guard let lastRefresh else { return true }
return Date().timeIntervalSince(lastRefresh) > self.refreshInterval return Date().timeIntervalSince(lastRefresh) > self.refreshInterval
} }
static func _testBuildTrustIndex(
report: SkillsStatusReport,
searchPaths: [String]) -> SkillBinTrustIndex
{
self.buildTrustIndex(report: report, searchPaths: searchPaths)
}
}
struct SkillBinTrustIndex {
let names: Set<String>
let pathsByName: [String: Set<String>]
} }

View File

@ -43,7 +43,33 @@ final class ExecApprovalsGatewayPrompter {
do { do {
let data = try JSONEncoder().encode(payload) let data = try JSONEncoder().encode(payload)
let request = try JSONDecoder().decode(GatewayApprovalRequest.self, from: data) let request = try JSONDecoder().decode(GatewayApprovalRequest.self, from: data)
guard self.shouldPresent(request: request) else { return } let presentation = self.shouldPresent(request: request)
guard presentation.shouldAsk else {
// Ask policy says no prompt needed resolve based on security policy
let decision: ExecApprovalDecision = presentation.security == .full ? .allowOnce : .deny
try await GatewayConnection.shared.requestVoid(
method: .execApprovalResolve,
params: [
"id": AnyCodable(request.id),
"decision": AnyCodable(decision.rawValue),
],
timeoutMs: 10000)
return
}
guard presentation.canPresent else {
let decision = Self.fallbackDecision(
request: request.request,
askFallback: presentation.askFallback,
allowlist: presentation.allowlist)
try await GatewayConnection.shared.requestVoid(
method: .execApprovalResolve,
params: [
"id": AnyCodable(request.id),
"decision": AnyCodable(decision.rawValue),
],
timeoutMs: 10000)
return
}
let decision = ExecApprovalsPromptPresenter.prompt(request.request) let decision = ExecApprovalsPromptPresenter.prompt(request.request)
try await GatewayConnection.shared.requestVoid( try await GatewayConnection.shared.requestVoid(
method: .execApprovalResolve, method: .execApprovalResolve,
@ -57,16 +83,89 @@ final class ExecApprovalsGatewayPrompter {
} }
} }
private func shouldPresent(request: GatewayApprovalRequest) -> Bool { /// Whether the ask policy requires prompting the user.
/// Note: this only determines if a prompt is shown, not whether the action is allowed.
/// The security policy (full/deny/allowlist) decides the actual outcome.
private static func shouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool {
switch ask {
case .always:
return true
case .onMiss:
return security == .allowlist
case .off:
return false
}
}
struct PresentationDecision {
/// Whether the ask policy requires prompting the user (not whether the action is allowed).
var shouldAsk: Bool
/// Whether the prompt can actually be shown (session match, recent activity, etc.).
var canPresent: Bool
/// The resolved security policy, used to determine allow/deny when no prompt is shown.
var security: ExecSecurity
/// Fallback security policy when a prompt is needed but can't be presented.
var askFallback: ExecSecurity
var allowlist: [ExecAllowlistEntry]
}
private func shouldPresent(request: GatewayApprovalRequest) -> PresentationDecision {
let mode = AppStateStore.shared.connectionMode let mode = AppStateStore.shared.connectionMode
let activeSession = WebChatManager.shared.activeSessionKey?.trimmingCharacters(in: .whitespacesAndNewlines) let activeSession = WebChatManager.shared.activeSessionKey?.trimmingCharacters(in: .whitespacesAndNewlines)
let requestSession = request.request.sessionKey?.trimmingCharacters(in: .whitespacesAndNewlines) let requestSession = request.request.sessionKey?.trimmingCharacters(in: .whitespacesAndNewlines)
return Self.shouldPresent(
// Read-only resolve to avoid disk writes on the MainActor
let approvals = ExecApprovalsStore.resolveReadOnly(agentId: request.request.agentId)
let security = approvals.agent.security
let ask = approvals.agent.ask
let shouldAsk = Self.shouldAsk(security: security, ask: ask)
let canPresent = shouldAsk && Self.shouldPresent(
mode: mode, mode: mode,
activeSession: activeSession, activeSession: activeSession,
requestSession: requestSession, requestSession: requestSession,
lastInputSeconds: Self.lastInputSeconds(), lastInputSeconds: Self.lastInputSeconds(),
thresholdSeconds: 120) thresholdSeconds: 120)
return PresentationDecision(
shouldAsk: shouldAsk,
canPresent: canPresent,
security: security,
askFallback: approvals.agent.askFallback,
allowlist: approvals.allowlist)
}
private static func fallbackDecision(
request: ExecApprovalPromptRequest,
askFallback: ExecSecurity,
allowlist: [ExecAllowlistEntry]) -> ExecApprovalDecision
{
guard askFallback == .allowlist else {
return askFallback == .full ? .allowOnce : .deny
}
let resolution = self.fallbackResolution(for: request)
let match = ExecAllowlistMatcher.match(entries: allowlist, resolution: resolution)
return match == nil ? .deny : .allowOnce
}
private static func fallbackResolution(for request: ExecApprovalPromptRequest) -> ExecCommandResolution? {
let resolvedPath = request.resolvedPath?.trimmingCharacters(in: .whitespacesAndNewlines)
let trimmedResolvedPath = (resolvedPath?.isEmpty == false) ? resolvedPath : nil
let rawExecutable = self.firstToken(from: request.command) ?? trimmedResolvedPath ?? ""
guard !rawExecutable.isEmpty || trimmedResolvedPath != nil else { return nil }
let executableName = trimmedResolvedPath.map { URL(fileURLWithPath: $0).lastPathComponent } ?? rawExecutable
return ExecCommandResolution(
rawExecutable: rawExecutable,
resolvedPath: trimmedResolvedPath,
executableName: executableName,
cwd: request.cwd)
}
private static func firstToken(from command: String) -> String? {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return nil }
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
} }
private static func shouldPresent( private static func shouldPresent(
@ -117,5 +216,29 @@ extension ExecApprovalsGatewayPrompter {
lastInputSeconds: lastInputSeconds, lastInputSeconds: lastInputSeconds,
thresholdSeconds: thresholdSeconds) thresholdSeconds: thresholdSeconds)
} }
static func _testShouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool {
self.shouldAsk(security: security, ask: ask)
}
static func _testFallbackDecision(
command: String,
resolvedPath: String?,
askFallback: ExecSecurity,
allowlistPatterns: [String]) -> ExecApprovalDecision
{
self.fallbackDecision(
request: ExecApprovalPromptRequest(
command: command,
cwd: nil,
host: nil,
security: nil,
ask: nil,
agentId: nil,
resolvedPath: resolvedPath,
sessionKey: nil),
askFallback: askFallback,
allowlist: allowlistPatterns.map { ExecAllowlistEntry(pattern: $0) })
}
} }
#endif #endif

View File

@ -37,8 +37,7 @@ struct ExecCommandResolution {
var resolutions: [ExecCommandResolution] = [] var resolutions: [ExecCommandResolution] = []
resolutions.reserveCapacity(segments.count) resolutions.reserveCapacity(segments.count)
for segment in segments { for segment in segments {
guard let token = self.parseFirstToken(segment), guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env)
let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env)
else { else {
return [] return []
} }
@ -88,6 +87,20 @@ struct ExecCommandResolution {
cwd: cwd) cwd: cwd)
} }
private static func resolveShellSegmentExecutable(
_ segment: String,
cwd: String?,
env: [String: String]?) -> ExecCommandResolution?
{
let tokens = self.tokenizeShellWords(segment)
guard !tokens.isEmpty else { return nil }
let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens)
guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else {
return nil
}
return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env)
}
private static func parseFirstToken(_ command: String) -> String? { private static func parseFirstToken(_ command: String) -> String? {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return nil } guard !trimmed.isEmpty else { return nil }
@ -102,6 +115,59 @@ struct ExecCommandResolution {
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init) return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
} }
private static func tokenizeShellWords(_ command: String) -> [String] {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return [] }
var tokens: [String] = []
var current = ""
var inSingle = false
var inDouble = false
var escaped = false
func appendCurrent() {
guard !current.isEmpty else { return }
tokens.append(current)
current.removeAll(keepingCapacity: true)
}
for ch in trimmed {
if escaped {
current.append(ch)
escaped = false
continue
}
if ch == "\\", !inSingle {
escaped = true
continue
}
if ch == "'", !inDouble {
inSingle.toggle()
continue
}
if ch == "\"", !inSingle {
inDouble.toggle()
continue
}
if ch.isWhitespace, !inSingle, !inDouble {
appendCurrent()
continue
}
current.append(ch)
}
if escaped {
current.append("\\")
}
appendCurrent()
return tokens
}
private enum ShellTokenContext { private enum ShellTokenContext {
case unquoted case unquoted
case doubleQuoted case doubleQuoted
@ -148,8 +214,14 @@ struct ExecCommandResolution {
while idx < chars.count { while idx < chars.count {
let ch = chars[idx] let ch = chars[idx]
let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil
let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle)
if escaped { if escaped {
if ch == "\n" {
escaped = false
idx += 1
continue
}
current.append(ch) current.append(ch)
escaped = false escaped = false
idx += 1 idx += 1
@ -157,6 +229,10 @@ struct ExecCommandResolution {
} }
if ch == "\\", !inSingle { if ch == "\\", !inSingle {
if next == "\n" {
idx += 2
continue
}
current.append(ch) current.append(ch)
escaped = true escaped = true
idx += 1 idx += 1
@ -177,7 +253,7 @@ struct ExecCommandResolution {
continue continue
} }
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) { if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) {
// Fail closed on command/process substitution in allowlist mode, // Fail closed on command/process substitution in allowlist mode,
// including command substitution inside double-quoted shell strings. // including command substitution inside double-quoted shell strings.
return nil return nil
@ -201,6 +277,25 @@ struct ExecCommandResolution {
return segments return segments
} }
private static func nextShellSignificantCharacter(
chars: [Character],
after idx: Int,
inSingle: Bool) -> Character?
{
guard !inSingle else {
return idx + 1 < chars.count ? chars[idx + 1] : nil
}
var cursor = idx + 1
while cursor < chars.count {
if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" {
cursor += 2
continue
}
return chars[cursor]
}
return nil
}
private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool { private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool {
let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted
guard let rules = self.shellFailClosedRules[context] else { guard let rules = self.shellFailClosedRules[context] else {

View File

@ -47,7 +47,7 @@ actor PortGuardian {
let listeners = await self.listeners(on: port) let listeners = await self.listeners(on: port)
guard !listeners.isEmpty else { continue } guard !listeners.isEmpty else { continue }
for listener in listeners { for listener in listeners {
if self.isExpected(listener, port: port, mode: mode) { if Self.isExpected(listener, port: port, mode: mode) {
let message = """ let message = """
port \(port) already served by expected \(listener.command) port \(port) already served by expected \(listener.command)
(pid \(listener.pid)) keeping (pid \(listener.pid)) keeping
@ -55,6 +55,14 @@ actor PortGuardian {
self.logger.info("\(message, privacy: .public)") self.logger.info("\(message, privacy: .public)")
continue continue
} }
if mode == .remote {
let message = """
port \(port) held by \(listener.command)
(pid \(listener.pid)) in remote mode not killing
"""
self.logger.warning(message)
continue
}
let killed = await self.kill(listener.pid) let killed = await self.kill(listener.pid)
if killed { if killed {
let message = """ let message = """
@ -271,8 +279,8 @@ actor PortGuardian {
switch mode { switch mode {
case .remote: case .remote:
expectedDesc = "SSH tunnel to remote gateway" expectedDesc = "Remote gateway (SSH tunnel, Docker, or direct)"
okPredicate = { $0.command.lowercased().contains("ssh") } okPredicate = { _ in true }
case .local: case .local:
expectedDesc = "Gateway websocket (node/tsx)" expectedDesc = "Gateway websocket (node/tsx)"
okPredicate = { listener in okPredicate = { listener in
@ -352,13 +360,12 @@ actor PortGuardian {
return sigkill.ok return sigkill.ok
} }
private func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool { private static func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool {
let cmd = listener.command.lowercased() let cmd = listener.command.lowercased()
let full = listener.fullCommand.lowercased() let full = listener.fullCommand.lowercased()
switch mode { switch mode {
case .remote: case .remote:
// Remote mode expects an SSH tunnel for the gateway WebSocket port. if port == GatewayEnvironment.gatewayPort() { return true }
if port == GatewayEnvironment.gatewayPort() { return cmd.contains("ssh") }
return false return false
case .local: case .local:
// The gateway daemon may listen as `openclaw` or as its runtime (`node`, `bun`, etc). // The gateway daemon may listen as `openclaw` or as its runtime (`node`, `bun`, etc).
@ -406,6 +413,16 @@ extension PortGuardian {
self.parseListeners(from: text).map { ($0.pid, $0.command, $0.fullCommand, $0.user) } self.parseListeners(from: text).map { ($0.pid, $0.command, $0.fullCommand, $0.user) }
} }
static func _testIsExpected(
command: String,
fullCommand: String,
port: Int,
mode: AppState.ConnectionMode) -> Bool
{
let listener = Listener(pid: 0, command: command, fullCommand: fullCommand, user: nil)
return Self.isExpected(listener, port: port, mode: mode)
}
static func _testBuildReport( static func _testBuildReport(
port: Int, port: Int,
mode: AppState.ConnectionMode, mode: AppState.ConnectionMode,

View File

@ -54,7 +54,7 @@ enum RuntimeResolutionError: Error {
enum RuntimeLocator { enum RuntimeLocator {
private static let logger = Logger(subsystem: "ai.openclaw", category: "runtime") private static let logger = Logger(subsystem: "ai.openclaw", category: "runtime")
private static let minNode = RuntimeVersion(major: 22, minor: 0, patch: 0) private static let minNode = RuntimeVersion(major: 22, minor: 16, patch: 0)
static func resolve( static func resolve(
searchPaths: [String] = CommandResolver.preferredPaths()) -> Result<RuntimeResolution, RuntimeResolutionError> searchPaths: [String] = CommandResolver.preferredPaths()) -> Result<RuntimeResolution, RuntimeResolutionError>
@ -91,7 +91,7 @@ enum RuntimeLocator {
switch error { switch error {
case let .notFound(searchPaths): case let .notFound(searchPaths):
[ [
"openclaw needs Node >=22.0.0 but found no runtime.", "openclaw needs Node >=22.16.0 but found no runtime.",
"PATH searched: \(searchPaths.joined(separator: ":"))", "PATH searched: \(searchPaths.joined(separator: ":"))",
"Install Node: https://nodejs.org/en/download", "Install Node: https://nodejs.org/en/download",
].joined(separator: "\n") ].joined(separator: "\n")
@ -105,7 +105,7 @@ enum RuntimeLocator {
[ [
"Could not parse \(kind.rawValue) version output \"\(raw)\" from \(path).", "Could not parse \(kind.rawValue) version output \"\(raw)\" from \(path).",
"PATH searched: \(searchPaths.joined(separator: ":"))", "PATH searched: \(searchPaths.joined(separator: ":"))",
"Try reinstalling or pinning a supported version (Node >=22.0.0).", "Try reinstalling or pinning a supported version (Node >=22.16.0).",
].joined(separator: "\n") ].joined(separator: "\n")
} }
} }

View File

@ -141,6 +141,26 @@ struct ExecAllowlistTests {
#expect(resolutions.isEmpty) #expect(resolutions.isEmpty)
} }
@Test func `resolve for allowlist fails closed on line-continued command substitution`() {
let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on chained line-continued command substitution`() {
let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.isEmpty)
}
@Test func `resolve for allowlist fails closed on quoted backticks`() { @Test func `resolve for allowlist fails closed on quoted backticks`() {
let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""] let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""]
let resolutions = ExecCommandResolution.resolveForAllowlist( let resolutions = ExecCommandResolution.resolveForAllowlist(
@ -208,6 +228,30 @@ struct ExecAllowlistTests {
#expect(resolutions[1].executableName == "touch") #expect(resolutions[1].executableName == "touch")
} }
@Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() {
let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env assignments inside shell segments`() {
let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env to effective direct executable`() { @Test func `resolve for allowlist unwraps env to effective direct executable`() {
let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"] let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"]
let resolutions = ExecCommandResolution.resolveForAllowlist( let resolutions = ExecCommandResolution.resolveForAllowlist(

View File

@ -52,4 +52,51 @@ struct ExecApprovalsGatewayPrompterTests {
lastInputSeconds: 400) lastInputSeconds: 400)
#expect(!remote) #expect(!remote)
} }
// MARK: - shouldAsk
@Test func askAlwaysPromptsRegardlessOfSecurity() {
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .always))
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .always))
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .always))
}
@Test func askOnMissPromptsOnlyForAllowlist() {
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .onMiss))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .onMiss))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .onMiss))
}
@Test func askOffNeverPrompts() {
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .off))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .off))
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .off))
}
@Test func fallbackAllowlistAllowsMatchingResolvedPath() {
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
command: "git status",
resolvedPath: "/usr/bin/git",
askFallback: .allowlist,
allowlistPatterns: ["/usr/bin/git"])
#expect(decision == .allowOnce)
}
@Test func fallbackAllowlistDeniesAllowlistMiss() {
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
command: "git status",
resolvedPath: "/usr/bin/git",
askFallback: .allowlist,
allowlistPatterns: ["/usr/bin/rg"])
#expect(decision == .deny)
}
@Test func fallbackFullAllowsWhenPromptCannotBeShown() {
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
command: "git status",
resolvedPath: "/usr/bin/git",
askFallback: .full,
allowlistPatterns: [])
#expect(decision == .allowOnce)
}
} }

View File

@ -0,0 +1,90 @@
import Foundation
import Testing
@testable import OpenClaw
struct ExecSkillBinTrustTests {
@Test func `build trust index resolves skill bin paths`() throws {
let fixture = try Self.makeExecutable(named: "jq")
defer { try? FileManager.default.removeItem(at: fixture.root) }
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [fixture.root.path])
#expect(trust.names == ["jq"])
#expect(trust.pathsByName["jq"] == [fixture.path])
}
@Test func `skill auto allow accepts trusted resolved skill bin path`() throws {
let fixture = try Self.makeExecutable(named: "jq")
defer { try? FileManager.default.removeItem(at: fixture.root) }
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [fixture.root.path])
let resolution = ExecCommandResolution(
rawExecutable: "jq",
resolvedPath: fixture.path,
executableName: "jq",
cwd: nil)
#expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
}
@Test func `skill auto allow rejects same basename at different path`() throws {
let trusted = try Self.makeExecutable(named: "jq")
let untrusted = try Self.makeExecutable(named: "jq")
defer {
try? FileManager.default.removeItem(at: trusted.root)
try? FileManager.default.removeItem(at: untrusted.root)
}
let trust = SkillBinsCache._testBuildTrustIndex(
report: Self.makeReport(bins: ["jq"]),
searchPaths: [trusted.root.path])
let resolution = ExecCommandResolution(
rawExecutable: "jq",
resolvedPath: untrusted.path,
executableName: "jq",
cwd: nil)
#expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
}
private static func makeExecutable(named name: String) throws -> (root: URL, path: String) {
let root = FileManager.default.temporaryDirectory
.appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true)
try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true)
let file = root.appendingPathComponent(name)
try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8)
try FileManager.default.setAttributes(
[.posixPermissions: NSNumber(value: Int16(0o755))],
ofItemAtPath: file.path)
return (root, file.path)
}
private static func makeReport(bins: [String]) -> SkillsStatusReport {
SkillsStatusReport(
workspaceDir: "/tmp/workspace",
managedSkillsDir: "/tmp/skills",
skills: [
SkillStatus(
name: "test-skill",
description: "test",
source: "local",
filePath: "/tmp/skills/test-skill/SKILL.md",
baseDir: "/tmp/skills/test-skill",
skillKey: "test-skill",
primaryEnv: nil,
emoji: nil,
homepage: nil,
always: false,
disabled: false,
eligible: true,
requirements: SkillRequirements(bins: bins, env: [], config: []),
missing: SkillMissing(bins: [], env: [], config: []),
configChecks: [],
install: [])
])
}
}

View File

@ -139,6 +139,54 @@ struct LowCoverageHelperTests {
#expect(emptyReport.summary.contains("Nothing is listening")) #expect(emptyReport.summary.contains("Nothing is listening"))
} }
@Test func `port guardian remote mode does not kill docker`() {
#expect(PortGuardian._testIsExpected(
command: "com.docker.backend",
fullCommand: "com.docker.backend",
port: 18789, mode: .remote) == true)
#expect(PortGuardian._testIsExpected(
command: "ssh",
fullCommand: "ssh -L 18789:localhost:18789 user@host",
port: 18789, mode: .remote) == true)
#expect(PortGuardian._testIsExpected(
command: "podman",
fullCommand: "podman",
port: 18789, mode: .remote) == true)
}
@Test func `port guardian local mode still rejects unexpected`() {
#expect(PortGuardian._testIsExpected(
command: "com.docker.backend",
fullCommand: "com.docker.backend",
port: 18789, mode: .local) == false)
#expect(PortGuardian._testIsExpected(
command: "python",
fullCommand: "python server.py",
port: 18789, mode: .local) == false)
#expect(PortGuardian._testIsExpected(
command: "node",
fullCommand: "node /path/to/gateway-daemon",
port: 18789, mode: .local) == true)
}
@Test func `port guardian remote mode report accepts any listener`() {
let dockerReport = PortGuardian._testBuildReport(
port: 18789, mode: .remote,
listeners: [(pid: 99, command: "com.docker.backend",
fullCommand: "com.docker.backend", user: "me")])
#expect(dockerReport.offenders.isEmpty)
let localDockerReport = PortGuardian._testBuildReport(
port: 18789, mode: .local,
listeners: [(pid: 99, command: "com.docker.backend",
fullCommand: "com.docker.backend", user: "me")])
#expect(!localDockerReport.offenders.isEmpty)
}
@Test @MainActor func `canvas scheme handler resolves files and errors`() throws { @Test @MainActor func `canvas scheme handler resolves files and errors`() throws {
let root = FileManager().temporaryDirectory let root = FileManager().temporaryDirectory
.appendingPathComponent("canvas-\(UUID().uuidString)", isDirectory: true) .appendingPathComponent("canvas-\(UUID().uuidString)", isDirectory: true)

View File

@ -16,7 +16,7 @@ struct RuntimeLocatorTests {
@Test func `resolve succeeds with valid node`() throws { @Test func `resolve succeeds with valid node`() throws {
let script = """ let script = """
#!/bin/sh #!/bin/sh
echo v22.5.0 echo v22.16.0
""" """
let node = try self.makeTempExecutable(contents: script) let node = try self.makeTempExecutable(contents: script)
let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path]) let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path])
@ -25,7 +25,23 @@ struct RuntimeLocatorTests {
return return
} }
#expect(res.path == node.path) #expect(res.path == node.path)
#expect(res.version == RuntimeVersion(major: 22, minor: 5, patch: 0)) #expect(res.version == RuntimeVersion(major: 22, minor: 16, patch: 0))
}
@Test func `resolve fails on boundary below minimum`() throws {
let script = """
#!/bin/sh
echo v22.15.9
"""
let node = try self.makeTempExecutable(contents: script)
let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path])
guard case let .failure(.unsupported(_, found, required, path, _)) = result else {
Issue.record("Expected unsupported error, got \(result)")
return
}
#expect(found == RuntimeVersion(major: 22, minor: 15, patch: 9))
#expect(required == RuntimeVersion(major: 22, minor: 16, patch: 0))
#expect(path == node.path)
} }
@Test func `resolve fails when too old`() throws { @Test func `resolve fails when too old`() throws {
@ -60,7 +76,17 @@ struct RuntimeLocatorTests {
@Test func `describe failure includes paths`() { @Test func `describe failure includes paths`() {
let msg = RuntimeLocator.describeFailure(.notFound(searchPaths: ["/tmp/a", "/tmp/b"])) let msg = RuntimeLocator.describeFailure(.notFound(searchPaths: ["/tmp/a", "/tmp/b"]))
#expect(msg.contains("Node >=22.16.0"))
#expect(msg.contains("PATH searched: /tmp/a:/tmp/b")) #expect(msg.contains("PATH searched: /tmp/a:/tmp/b"))
let parseMsg = RuntimeLocator.describeFailure(
.versionParse(
kind: .node,
raw: "garbage",
path: "/usr/local/bin/node",
searchPaths: ["/usr/local/bin"],
))
#expect(parseMsg.contains("Node >=22.16.0"))
} }
@Test func `runtime version parses with leading V and metadata`() { @Test func `runtime version parses with leading V and metadata`() {

View File

@ -74,4 +74,22 @@ struct VoiceWakeRuntimeTests {
let config = WakeWordGateConfig(triggers: ["openclaw"], minPostTriggerGap: 0.3) let config = WakeWordGateConfig(triggers: ["openclaw"], minPostTriggerGap: 0.3)
#expect(WakeWordGate.match(transcript: transcript, segments: segments, config: config)?.command == "do thing") #expect(WakeWordGate.match(transcript: transcript, segments: segments, config: config)?.command == "do thing")
} }
@Test func `gate command text handles foreign string ranges`() {
let transcript = "hey openclaw do thing"
let other = "do thing"
let foreignRange = other.range(of: "do")
let segments = [
WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")),
WakeWordSegment(text: "openclaw", start: 0.2, duration: 0.1, range: transcript.range(of: "openclaw")),
WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange),
WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil),
]
#expect(
WakeWordGate.commandText(
transcript: transcript,
segments: segments,
triggerEndTime: 0.3) == "do thing")
}
} }

View File

@ -73,7 +73,7 @@ await web_search({
## Notes ## Notes
- OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits. - OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits.
- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans. - Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
- The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service). - The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service).
- Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`). - Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`).

View File

@ -218,6 +218,55 @@ For actions/directory reads, user token can be preferred when configured. For wr
- if encoded option values exceed Slack limits, the flow falls back to buttons - if encoded option values exceed Slack limits, the flow falls back to buttons
- For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value. - For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value.
## Interactive replies
Slack can render agent-authored interactive reply controls, but this feature is disabled by default.
Enable it globally:
```json5
{
channels: {
slack: {
capabilities: {
interactiveReplies: true,
},
},
},
}
```
Or enable it for one Slack account only:
```json5
{
channels: {
slack: {
accounts: {
ops: {
capabilities: {
interactiveReplies: true,
},
},
},
},
},
}
```
When enabled, agents can emit Slack-only reply directives:
- `[[slack_buttons: Approve:approve, Reject:reject]]`
- `[[slack_select: Choose a target | Canary:canary, Production:production]]`
These directives compile into Slack Block Kit and route clicks or selections back through the existing Slack interaction event path.
Notes:
- This is Slack-specific UI. Other channels do not translate Slack Block Kit directives into their own button systems.
- The interactive callback values are OpenClaw-generated opaque tokens, not raw agent-authored values.
- If generated interactive blocks would exceed Slack Block Kit limits, OpenClaw falls back to the original text reply instead of sending an invalid blocks payload.
Default slash command settings: Default slash command settings:
- `enabled: false` - `enabled: false`

View File

@ -9,32 +9,32 @@ read_when:
# CI Pipeline # CI Pipeline
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed. The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed.
## Job Overview ## Job Overview
| Job | Purpose | When it runs | | Job | Purpose | When it runs |
| ----------------- | ------------------------------------------------------- | ------------------------------------------------- | | ----------------- | ------------------------------------------------------- | ---------------------------------- |
| `docs-scope` | Detect docs-only changes | Always | | `docs-scope` | Detect docs-only changes | Always |
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs | | `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes |
| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes | | `check` | TypeScript types, lint, format | Non-docs, node changes |
| `check-docs` | Markdown lint + broken link check | Docs changed | | `check-docs` | Markdown lint + broken link check | Docs changed |
| `code-analysis` | LOC threshold check (1000 lines) | PRs only | | `secrets` | Detect leaked secrets | Always |
| `secrets` | Detect leaked secrets | Always | | `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes |
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes | | `release-check` | Validate npm pack contents | Pushes to `main` after build |
| `release-check` | Validate npm pack contents | After build | | `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes |
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes | | `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | | `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | | `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes | | `android` | Gradle build + tests | Non-docs, android changes |
## Fail-Fast Order ## Fail-Fast Order
Jobs are ordered so cheap checks fail before expensive ones run: Jobs are ordered so cheap checks fail before expensive ones run:
1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min) 1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first)
2. `build-artifacts` (blocked on above) 2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
3. `checks`, `checks-windows`, `macos`, `android` (blocked on build) 3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22`
Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`. Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`.

View File

@ -27,7 +27,7 @@ Related:
## Quick start (local) ## Quick start (local)
```bash ```bash
openclaw browser --browser-profile chrome tabs openclaw browser profiles
openclaw browser --browser-profile openclaw start openclaw browser --browser-profile openclaw start
openclaw browser --browser-profile openclaw open https://example.com openclaw browser --browser-profile openclaw open https://example.com
openclaw browser --browser-profile openclaw snapshot openclaw browser --browser-profile openclaw snapshot
@ -38,7 +38,8 @@ openclaw browser --browser-profile openclaw snapshot
Profiles are named browser routing configs. In practice: Profiles are named browser routing configs. In practice:
- `openclaw`: launches/attaches to a dedicated OpenClaw-managed Chrome instance (isolated user data dir). - `openclaw`: launches/attaches to a dedicated OpenClaw-managed Chrome instance (isolated user data dir).
- `chrome`: controls your existing Chrome tab(s) via the Chrome extension relay. - `user`: controls your existing signed-in Chrome session via Chrome DevTools MCP.
- `chrome-relay`: controls your existing Chrome tab(s) via the Chrome extension relay.
```bash ```bash
openclaw browser profiles openclaw browser profiles

View File

@ -126,6 +126,23 @@ openclaw gateway probe
openclaw gateway probe --json openclaw gateway probe --json
``` ```
Interpretation:
- `Reachable: yes` means at least one target accepted a WebSocket connect.
- `RPC: ok` means detail RPC calls (`health`/`status`/`system-presence`/`config.get`) also succeeded.
- `RPC: limited - missing scope: operator.read` means connect succeeded but detail RPC is scope-limited. This is reported as **degraded** reachability, not full failure.
- Exit code is non-zero only when no probed target is reachable.
JSON notes (`--json`):
- Top level:
- `ok`: at least one target is reachable.
- `degraded`: at least one target had scope-limited detail RPC.
- Per target (`targets[].connect`):
- `ok`: reachability after connect + degraded classification.
- `rpcOk`: full detail RPC success.
- `scopeLimited`: detail RPC failed due to missing operator scope.
#### Remote over SSH (Mac app parity) #### Remote over SSH (Mac app parity)
The macOS app “Remote over SSH” mode uses a local port-forward so the remote gateway (which may be bound to loopback only) becomes reachable at `ws://127.0.0.1:<port>`. The macOS app “Remote over SSH” mode uses a local port-forward so the remote gateway (which may be bound to loopback only) becomes reachable at `ws://127.0.0.1:<port>`.

View File

@ -2342,7 +2342,7 @@ See [Plugins](/tools/plugin).
browser: { browser: {
enabled: true, enabled: true,
evaluateEnabled: true, evaluateEnabled: true,
defaultProfile: "chrome", defaultProfile: "user",
ssrfPolicy: { ssrfPolicy: {
dangerouslyAllowPrivateNetwork: true, // default trusted-network mode dangerouslyAllowPrivateNetwork: true, // default trusted-network mode
// allowPrivateNetwork: true, // legacy alias // allowPrivateNetwork: true, // legacy alias

View File

@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first.
Under the hood, requests are executed as a normal Gateway agent run (same codepath as Under the hood, requests are executed as a normal Gateway agent run (same codepath as
`openclaw agent`), so routing/permissions/config match your Gateway. `openclaw agent`), so routing/permissions/config match your Gateway.
## Authentication ## Authentication, security, and routing
Uses the Gateway auth configuration. Send a bearer token: Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api):
- `Authorization: Bearer <token>` - use `Authorization: Bearer <token>` with the normal Gateway auth config
- treat the endpoint as full operator access for the gateway instance
- select agents with `model: "openclaw:<agentId>"`, `model: "agent:<agentId>"`, or `x-openclaw-agent-id`
- use `x-openclaw-session-key` for explicit session routing
Notes: Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`.
- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`).
- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`).
- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`.
## Security boundary (important)
Treat this endpoint as a **full operator-access** surface for the gateway instance.
- HTTP bearer auth here is not a narrow per-user scope model.
- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential.
- Requests run through the same control-plane agent path as trusted operator actions.
- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway.
- If the target agent policy allows sensitive tools, this endpoint can use them.
- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet.
See [Security](/gateway/security) and [Remote access](/gateway/remote).
## Choosing an agent
No custom headers required: encode the agent id in the OpenResponses `model` field:
- `model: "openclaw:<agentId>"` (example: `"openclaw:main"`, `"openclaw:beta"`)
- `model: "agent:<agentId>"` (alias)
Or target a specific OpenClaw agent by header:
- `x-openclaw-agent-id: <agentId>` (default: `main`)
Advanced:
- `x-openclaw-session-key: <sessionKey>` to fully control session routing.
## Enabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `true`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: true },
},
},
},
}
```
## Disabling the endpoint
Set `gateway.http.endpoints.responses.enabled` to `false`:
```json5
{
gateway: {
http: {
endpoints: {
responses: { enabled: false },
},
},
},
}
```
## Session behavior ## Session behavior

View File

@ -289,7 +289,7 @@ Look for:
- Valid browser executable path. - Valid browser executable path.
- CDP profile reachability. - CDP profile reachability.
- Extension relay tab attachment for `profile="chrome"`. - Extension relay tab attachment for `profile="chrome-relay"`.
Common signatures: Common signatures:

View File

@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost):
- No real keys required - No real keys required
- Should be fast and stable - Should be fast and stable
- Pool note: - Pool note:
- OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards. - OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards.
- On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`). - On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there.
- Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`). - Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`).
### E2E (gateway smoke) ### E2E (gateway smoke)

View File

@ -28,7 +28,7 @@ Good output in one line:
- `openclaw status` → shows configured channels and no obvious auth errors. - `openclaw status` → shows configured channels and no obvious auth errors.
- `openclaw status --all` → full report is present and shareable. - `openclaw status --all` → full report is present and shareable.
- `openclaw gateway probe` → expected gateway target is reachable. - `openclaw gateway probe` → expected gateway target is reachable (`Reachable: yes`). `RPC: limited - missing scope: operator.read` is degraded diagnostics, not a connect failure.
- `openclaw gateway status``Runtime: running` and `RPC probe: ok`. - `openclaw gateway status``Runtime: running` and `RPC probe: ok`.
- `openclaw doctor` → no blocking config/service errors. - `openclaw doctor` → no blocking config/service errors.
- `openclaw channels status --probe` → channels report `connected` or `ready`. - `openclaw channels status --probe` → channels report `connected` or `ready`.

View File

@ -0,0 +1,138 @@
---
summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts"
read_when:
- You are deploying OpenClaw on a cloud VM with Docker
- You need the shared binary bake, persistence, and update flow
title: "Docker VM Runtime"
---
# Docker VM Runtime
Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers.
## Bake required binaries into the image
Installing binaries inside a running container is a trap.
Anything installed at runtime will be lost on restart.
All external binaries required by skills must be installed at image build time.
The examples below show three common binaries only:
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
## Build and launch
```bash
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory.
Use a larger machine class before retrying.
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
Verify Gateway:
```bash
docker compose logs -f openclaw-gateway
```
Expected output:
```
[gateway] listening on ws://0.0.0.0:18789
```
## What persists where
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
## Updates
To update OpenClaw on the VM:
```bash
git pull
docker compose build
docker compose up -d
```

View File

@ -281,77 +281,20 @@ services:
--- ---
## 10) Bake required binaries into the image (critical) ## 10) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap. Use the shared runtime guide for the common Docker host flow:
Anything installed at runtime will be lost on restart.
All external binaries required by skills must be installed at image build time. - [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
The examples below show three common binaries only: - [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
--- ---
## 11) Build and launch ## 11) GCP-specific launch notes
```bash On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
docker compose build
docker compose up -d openclaw-gateway
```
If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing: When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing:
@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins
If you changed the gateway port, replace `18789` with your configured port. If you changed the gateway port, replace `18789` with your configured port.
Verify binaries: ## 12) Access from your laptop
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 12) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
---
## 13) Access from your laptop
Create an SSH tunnel to forward the Gateway port: Create an SSH tunnel to forward the Gateway port:
@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list
docker compose run --rm openclaw-cli devices approve <requestId> docker compose run --rm openclaw-cli devices approve <requestId>
``` ```
--- Need the shared persistence and update reference again?
See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates).
## What persists where (source of truth)
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
## Updates
To update OpenClaw on the VM:
```bash
cd ~/openclaw
git pull
docker compose build
docker compose up -d
```
--- ---

View File

@ -202,107 +202,20 @@ services:
--- ---
## 7) Bake required binaries into the image (critical) ## 7) Shared Docker VM runtime steps
Installing binaries inside a running container is a trap. Use the shared runtime guide for the common Docker host flow:
Anything installed at runtime will be lost on restart.
All external binaries required by skills must be installed at image build time. - [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
The examples below show three common binaries only: - [What persists where](/install/docker-vm-runtime#what-persists-where)
- [Updates](/install/docker-vm-runtime#updates)
- `gog` for Gmail access
- `goplaces` for Google Places
- `wacli` for WhatsApp
These are examples, not a complete list.
You may install as many binaries as needed using the same pattern.
If you add new skills later that depend on additional binaries, you must:
1. Update the Dockerfile
2. Rebuild the image
3. Restart the containers
**Example Dockerfile**
```dockerfile
FROM node:24-bookworm
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
# Example binary 1: Gmail CLI
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
# Example binary 2: Google Places CLI
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
# Example binary 3: WhatsApp CLI
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
# Add more binaries below using the same pattern
WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
COPY ui/package.json ./ui/package.json
COPY scripts ./scripts
RUN corepack enable
RUN pnpm install --frozen-lockfile
COPY . .
RUN pnpm build
RUN pnpm ui:install
RUN pnpm ui:build
ENV NODE_ENV=production
CMD ["node","dist/index.js"]
```
--- ---
## 8) Build and launch ## 8) Hetzner-specific access
```bash After the shared build and launch steps, tunnel from your laptop:
docker compose build
docker compose up -d openclaw-gateway
```
Verify binaries:
```bash
docker compose exec openclaw-gateway which gog
docker compose exec openclaw-gateway which goplaces
docker compose exec openclaw-gateway which wacli
```
Expected output:
```
/usr/local/bin/gog
/usr/local/bin/goplaces
/usr/local/bin/wacli
```
---
## 9) Verify Gateway
```bash
docker compose logs -f openclaw-gateway
```
Success:
```
[gateway] listening on ws://0.0.0.0:18789
```
From your laptop:
```bash ```bash
ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP
@ -316,25 +229,7 @@ Paste your gateway token.
--- ---
## What persists where (source of truth) The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where).
OpenClaw runs in Docker, but Docker is not the source of truth.
All long-lived state must survive restarts, rebuilds, and reboots.
| Component | Location | Persistence mechanism | Notes |
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
| OS packages | Container filesystem | Docker image | Do not install at runtime |
| Docker container | Ephemeral | Restartable | Safe to destroy |
---
## Infrastructure as Code (Terraform) ## Infrastructure as Code (Terraform)

View File

@ -9,6 +9,8 @@ title: "Android App"
# Android App (Node) # Android App (Node)
> **Note:** The Android app has not been publicly released yet. The source code is available in the [OpenClaw repository](https://github.com/openclaw/openclaw) under `apps/android`. You can build it yourself using Java 17 and the Android SDK (`./gradlew :app:assembleDebug`). See [apps/android/README.md](https://github.com/openclaw/openclaw/blob/main/apps/android/README.md) for build instructions.
## Support snapshot ## Support snapshot
- Role: companion node app (Android does not host the Gateway). - Role: companion node app (Android does not host the Gateway).

View File

@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set:
} }
``` ```
`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin
normalizes the provider-supplied `From` value and compares it to `allowFrom`.
Webhook verification authenticates provider delivery and payload integrity, but
it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as
caller-ID filtering, not strong caller identity.
Auto-responses use the agent system. Tune with: Auto-responses use the agent system. Tune with:
- `responseModel` - `responseModel`

View File

@ -85,8 +85,8 @@ See [Memory](/concepts/memory).
- **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey` - **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey`
- **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey` - **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey`
**Brave Search free credit:** Each Brave plan includes $5/month in renewing **Brave Search free credit:** Each Brave plan includes \$5/month in renewing
free credit. The Search plan costs $5 per 1,000 requests, so the credit covers free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers
1,000 requests/month at no charge. Set your usage limit in the Brave dashboard 1,000 requests/month at no charge. Set your usage limit in the Brave dashboard
to avoid unexpected charges. to avoid unexpected charges.

View File

@ -11,7 +11,7 @@ title: "Tests"
- `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests dont collide with a running instance. Use this when a prior gateway run left port 18789 occupied. - `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests dont collide with a running instance. Use this when a prior gateway run left port 18789 occupied.
- `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic. - `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic.
- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`. - `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
- `pnpm test`: runs the fast core unit lane by default for quick local feedback. - `pnpm test`: runs the fast core unit lane by default for quick local feedback.
- `pnpm test:channels`: runs channel-heavy suites. - `pnpm test:channels`: runs channel-heavy suites.
- `pnpm test:extensions`: runs extension/plugin suites. - `pnpm test:extensions`: runs extension/plugin suites.

View File

@ -167,93 +167,8 @@ openclaw onboard --non-interactive \
`--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts. `--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts.
</Note> </Note>
<AccordionGroup> Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples).
<Accordion title="Gemini example"> Use this reference page for flag semantics and step ordering.
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice gemini-api-key \
--gemini-api-key "$GEMINI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Z.AI example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice zai-api-key \
--zai-api-key "$ZAI_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Vercel AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ai-gateway-api-key \
--ai-gateway-api-key "$AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Cloudflare AI Gateway example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice cloudflare-ai-gateway-api-key \
--cloudflare-ai-gateway-account-id "your-account-id" \
--cloudflare-ai-gateway-gateway-id "your-gateway-id" \
--cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Moonshot example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice moonshot-api-key \
--moonshot-api-key "$MOONSHOT_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="Synthetic example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice synthetic-api-key \
--synthetic-api-key "$SYNTHETIC_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
</Accordion>
<Accordion title="OpenCode example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice opencode-zen \
--opencode-zen-api-key "$OPENCODE_API_KEY" \
--gateway-port 18789 \
--gateway-bind loopback
```
Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog.
</Accordion>
<Accordion title="Ollama example">
```bash
openclaw onboard --non-interactive \
--mode local \
--auth-choice ollama \
--custom-model-id "qwen3.5:27b" \
--accept-risk \
--gateway-port 18789 \
--gateway-bind loopback
```
Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance.
</Accordion>
</AccordionGroup>
### Add agent (non-interactive) ### Add agent (non-interactive)

View File

@ -123,7 +123,7 @@ curl -s http://127.0.0.1:18791/tabs
### Problem: "Chrome extension relay is running, but no tab is connected" ### Problem: "Chrome extension relay is running, but no tab is connected"
Youre using the `chrome` profile (extension relay). It expects the OpenClaw Youre using the `chrome-relay` profile (extension relay). It expects the OpenClaw
browser extension to be attached to a live tab. browser extension to be attached to a live tab.
Fix options: Fix options:
@ -135,5 +135,5 @@ Fix options:
Notes: Notes:
- The `chrome` profile uses your **system default Chromium browser** when possible. - The `chrome-relay` profile uses your **system default Chromium browser** when possible.
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl`; only set those for remote CDP. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl`; only set those for remote CDP.

View File

@ -20,6 +20,13 @@ Back to the main browser docs: [Browser](/tools/browser).
OpenClaw controls a **dedicated Chrome profile** (named `openclaw`, orangetinted UI). This is separate from your daily browser profile. OpenClaw controls a **dedicated Chrome profile** (named `openclaw`, orangetinted UI). This is separate from your daily browser profile.
For agent browser tool calls:
- Default choice: the agent should use its isolated `openclaw` browser.
- Use `profile="user"` only when existing logged-in sessions matter and the user is at the computer to click/approve any attach prompt.
- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow.
- If you have multiple user-browser profiles, specify the profile explicitly instead of guessing.
Two easy ways to access it: Two easy ways to access it:
1. **Ask the agent to open the browser** and then log in yourself. 1. **Ask the agent to open the browser** and then log in yourself.

View File

@ -33,7 +33,7 @@ Choose this when:
### Option 2: Chrome extension relay ### Option 2: Chrome extension relay
Use the built-in `chrome` profile plus the OpenClaw Chrome extension. Use the built-in `chrome-relay` profile plus the OpenClaw Chrome extension.
Choose this when: Choose this when:
@ -155,7 +155,7 @@ Example:
{ {
browser: { browser: {
enabled: true, enabled: true,
defaultProfile: "chrome", defaultProfile: "chrome-relay",
relayBindHost: "0.0.0.0", relayBindHost: "0.0.0.0",
}, },
} }
@ -197,7 +197,7 @@ openclaw browser tabs --browser-profile remote
For the extension relay: For the extension relay:
```bash ```bash
openclaw browser tabs --browser-profile chrome openclaw browser tabs --browser-profile chrome-relay
``` ```
Good result: Good result:

View File

@ -18,8 +18,8 @@ Beginner view:
- Think of it as a **separate, agent-only browser**. - Think of it as a **separate, agent-only browser**.
- The `openclaw` profile does **not** touch your personal browser profile. - The `openclaw` profile does **not** touch your personal browser profile.
- The agent can **open tabs, read pages, click, and type** in a safe lane. - The agent can **open tabs, read pages, click, and type** in a safe lane.
- The default `chrome` profile uses the **system default Chromium browser** via the - The built-in `user` profile attaches to your real signed-in Chrome session;
extension relay; switch to `openclaw` for the isolated managed browser. `chrome-relay` is the explicit extension-relay profile.
## What you get ## What you get
@ -43,11 +43,22 @@ openclaw browser --browser-profile openclaw snapshot
If you get “Browser disabled”, enable it in config (see below) and restart the If you get “Browser disabled”, enable it in config (see below) and restart the
Gateway. Gateway.
## Profiles: `openclaw` vs `chrome` ## Profiles: `openclaw` vs `user` vs `chrome-relay`
- `openclaw`: managed, isolated browser (no extension required). - `openclaw`: managed, isolated browser (no extension required).
- `chrome`: extension relay to your **system browser** (requires the OpenClaw - `user`: built-in Chrome MCP attach profile for your **real signed-in Chrome**
extension to be attached to a tab). session.
- `chrome-relay`: extension relay to your **system browser** (requires the
OpenClaw extension to be attached to a tab).
For agent browser tool calls:
- Default: use the isolated `openclaw` browser.
- Prefer `profile="user"` when existing logged-in sessions matter and the user
is at the computer to click/approve any attach prompt.
- Use `profile="chrome-relay"` only when the user explicitly wants the Chrome
extension / toolbar-button attach flow.
- `profile` is the explicit override when you want a specific browser mode.
Set `browser.defaultProfile: "openclaw"` if you want managed mode by default. Set `browser.defaultProfile: "openclaw"` if you want managed mode by default.
@ -68,7 +79,7 @@ Browser settings live in `~/.openclaw/openclaw.json`.
// cdpUrl: "http://127.0.0.1:18792", // legacy single-profile override // cdpUrl: "http://127.0.0.1:18792", // legacy single-profile override
remoteCdpTimeoutMs: 1500, // remote CDP HTTP timeout (ms) remoteCdpTimeoutMs: 1500, // remote CDP HTTP timeout (ms)
remoteCdpHandshakeTimeoutMs: 3000, // remote CDP WebSocket handshake timeout (ms) remoteCdpHandshakeTimeoutMs: 3000, // remote CDP WebSocket handshake timeout (ms)
defaultProfile: "chrome", defaultProfile: "openclaw",
color: "#FF4500", color: "#FF4500",
headless: false, headless: false,
noSandbox: false, noSandbox: false,
@ -77,6 +88,16 @@ Browser settings live in `~/.openclaw/openclaw.json`.
profiles: { profiles: {
openclaw: { cdpPort: 18800, color: "#FF4500" }, openclaw: { cdpPort: 18800, color: "#FF4500" },
work: { cdpPort: 18801, color: "#0066CC" }, work: { cdpPort: 18801, color: "#0066CC" },
user: {
driver: "existing-session",
attachOnly: true,
color: "#00AA00",
},
"chrome-relay": {
driver: "extension",
cdpUrl: "http://127.0.0.1:18792",
color: "#00AA00",
},
remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" }, remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" },
}, },
}, },
@ -97,9 +118,11 @@ Notes:
- `browser.ssrfPolicy.allowPrivateNetwork` remains supported as a legacy alias for compatibility. - `browser.ssrfPolicy.allowPrivateNetwork` remains supported as a legacy alias for compatibility.
- `attachOnly: true` means “never launch a local browser; only attach if it is already running.” - `attachOnly: true` means “never launch a local browser; only attach if it is already running.”
- `color` + per-profile `color` tint the browser UI so you can see which profile is active. - `color` + per-profile `color` tint the browser UI so you can see which profile is active.
- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay. - Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "user"` to opt into the signed-in user browser, or `defaultProfile: "chrome-relay"` for the extension relay.
- Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary. - Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary.
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP.
- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do
not set `cdpUrl` for that driver.
## Use Brave (or another Chromium-based browser) ## Use Brave (or another Chromium-based browser)
@ -264,11 +287,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be:
- **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port - **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port
- **remote**: an explicit CDP URL (Chromium-based browser running elsewhere) - **remote**: an explicit CDP URL (Chromium-based browser running elsewhere)
- **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension - **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension
- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect
Defaults: Defaults:
- The `openclaw` profile is auto-created if missing. - The `openclaw` profile is auto-created if missing.
- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default). - The `chrome-relay` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default).
- Existing-session profiles are opt-in; create them with `--driver existing-session`.
- Local CDP ports allocate from **1880018899** by default. - Local CDP ports allocate from **1880018899** by default.
- Deleting a profile moves its local data directory to Trash. - Deleting a profile moves its local data directory to Trash.
@ -311,8 +336,8 @@ openclaw browser extension install
2. Use it: 2. Use it:
- CLI: `openclaw browser --browser-profile chrome tabs` - CLI: `openclaw browser --browser-profile chrome-relay tabs`
- Agent tool: `browser` with `profile="chrome"` - Agent tool: `browser` with `profile="chrome-relay"`
Optional: if you want a different name or relay port, create your own profile: Optional: if you want a different name or relay port, create your own profile:
@ -328,6 +353,81 @@ Notes:
- This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions). - This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions).
- Detach by clicking the extension icon again. - Detach by clicking the extension icon again.
- Agent use: prefer `profile="user"` for logged-in sites. Use `profile="chrome-relay"`
only when you specifically want the extension flow. The user must be present
to click the extension and attach the tab.
## Chrome existing-session via MCP
OpenClaw can also attach to a running Chrome profile through the official
Chrome DevTools MCP server. This reuses the tabs and login state already open in
that Chrome profile.
Official background and setup references:
- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp)
Built-in profile:
- `user`
Optional: create your own custom existing-session profile if you want a
different name or color.
Then in Chrome:
1. Open `chrome://inspect/#remote-debugging`
2. Enable remote debugging
3. Keep Chrome running and approve the connection prompt when OpenClaw attaches
Live attach smoke test:
```bash
openclaw browser --browser-profile user start
openclaw browser --browser-profile user status
openclaw browser --browser-profile user tabs
openclaw browser --browser-profile user snapshot --format ai
```
What success looks like:
- `status` shows `driver: existing-session`
- `status` shows `transport: chrome-mcp`
- `status` shows `running: true`
- `tabs` lists your already-open Chrome tabs
- `snapshot` returns refs from the selected live tab
What to check if attach does not work:
- Chrome is version `144+`
- remote debugging is enabled at `chrome://inspect/#remote-debugging`
- Chrome showed and you accepted the attach consent prompt
Agent use:
- Use `profile="user"` when you need the users logged-in browser state.
- If you use a custom existing-session profile, pass that explicit profile name.
- Prefer `profile="user"` over `profile="chrome-relay"` unless the user
explicitly wants the extension / attach-tab flow.
- Only choose this mode when the user is at the computer to approve the attach
prompt.
- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect`
Notes:
- This path is higher-risk than the isolated `openclaw` profile because it can
act inside your signed-in browser session.
- OpenClaw does not launch Chrome for this driver; it attaches to an existing
session only.
- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not
the legacy default-profile remote debugging port workflow.
- Existing-session screenshots support page captures and `--ref` element
captures from snapshots, but not CSS `--element` selectors.
- Existing-session `wait --url` supports exact, substring, and glob patterns
like other browser drivers. `wait --load networkidle` is not supported yet.
- Some features still require the extension relay or managed browser path, such
as PDF export and download interception.
- Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated. - Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated.
WSL2 / cross-namespace example: WSL2 / cross-namespace example:
@ -337,7 +437,7 @@ WSL2 / cross-namespace example:
browser: { browser: {
enabled: true, enabled: true,
relayBindHost: "0.0.0.0", relayBindHost: "0.0.0.0",
defaultProfile: "chrome", defaultProfile: "chrome-relay",
}, },
} }
``` ```

View File

@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs
Attach/detach happens via a **single Chrome toolbar button**. Attach/detach happens via a **single Chrome toolbar button**.
If you want Chromes official DevTools MCP attach flow instead of the OpenClaw
extension relay, use an `existing-session` browser profile instead. See
[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chromes own
setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your
browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp).
## What it is (concept) ## What it is (concept)
There are three parts: There are three parts:
@ -55,7 +62,7 @@ After upgrading OpenClaw:
## Use it (set gateway token once) ## Use it (set gateway token once)
OpenClaw ships with a built-in browser profile named `chrome` that targets the extension relay on the default port. OpenClaw ships with a built-in browser profile named `chrome-relay` that targets the extension relay on the default port.
Before first attach, open extension Options and set: Before first attach, open extension Options and set:
@ -64,8 +71,8 @@ Before first attach, open extension Options and set:
Use it: Use it:
- CLI: `openclaw browser --browser-profile chrome tabs` - CLI: `openclaw browser --browser-profile chrome-relay tabs`
- Agent tool: `browser` with `profile="chrome"` - Agent tool: `browser` with `profile="chrome-relay"`
If you want a different name or a different relay port, create your own profile: If you want a different name or a different relay port, create your own profile:

View File

@ -316,7 +316,11 @@ Common parameters:
Notes: Notes:
- Requires `browser.enabled=true` (default is `true`; set `false` to disable). - Requires `browser.enabled=true` (default is `true`; set `false` to disable).
- All actions accept optional `profile` parameter for multi-instance support. - All actions accept optional `profile` parameter for multi-instance support.
- When `profile` is omitted, uses `browser.defaultProfile` (defaults to "chrome"). - Omit `profile` for the safe default: isolated OpenClaw-managed browser (`openclaw`).
- Use `profile="user"` for the real local host browser when existing logins/cookies matter and the user is present to click/approve any attach prompt.
- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow.
- `profile="user"` and `profile="chrome-relay"` are host-only; do not combine them with sandbox/node targets.
- When `profile` is omitted, uses `browser.defaultProfile` (defaults to `openclaw`).
- Profile names: lowercase alphanumeric + hyphens only (max 64 chars). - Profile names: lowercase alphanumeric + hyphens only (max 64 chars).
- Port range: 18800-18899 (~100 profiles max). - Port range: 18800-18899 (~100 profiles max).
- Remote profiles are attach-only (no start/stop/reset). - Remote profiles are attach-only (no start/stop/reset).

View File

@ -65,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi
2. In the dashboard, choose the **Search** plan and generate an API key. 2. In the dashboard, choose the **Search** plan and generate an API key.
3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment. 3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment.
Each Brave plan includes **$5/month in free credit** (renewing). The Search Each Brave plan includes **\$5/month in free credit** (renewing). The Search
plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
your usage limit in the Brave dashboard to avoid unexpected charges. See the your usage limit in the Brave dashboard to avoid unexpected charges. See the
[Brave API portal](https://brave.com/search/api/) for current plans and [Brave API portal](https://brave.com/search/api/) for current plans and
pricing. pricing.

View File

@ -54,6 +54,49 @@ describe("acpx ensure", () => {
} }
}); });
function mockEnsureInstallFlow() {
spawnAndCollectMock
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
}
function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) {
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars,
});
}
it("accepts the pinned acpx version", async () => { it("accepts the pinned acpx version", async () => {
spawnAndCollectMock.mockResolvedValueOnce({ spawnAndCollectMock.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`, stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
@ -177,25 +220,7 @@ describe("acpx ensure", () => {
}); });
it("installs and verifies pinned acpx when precheck fails", async () => { it("installs and verifies pinned acpx when precheck fails", async () => {
spawnAndCollectMock mockEnsureInstallFlow();
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
await ensureAcpx({ await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx", command: "/plugin/node_modules/.bin/acpx",
@ -204,33 +229,11 @@ describe("acpx ensure", () => {
}); });
expect(spawnAndCollectMock).toHaveBeenCalledTimes(3); expect(spawnAndCollectMock).toHaveBeenCalledTimes(3);
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ expectEnsureInstallCalls();
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
});
}); });
it("threads stripProviderAuthEnvVars through version probes and install", async () => { it("threads stripProviderAuthEnvVars through version probes and install", async () => {
spawnAndCollectMock mockEnsureInstallFlow();
.mockResolvedValueOnce({
stdout: "acpx 0.0.9\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: "added 1 package\n",
stderr: "",
code: 0,
error: null,
})
.mockResolvedValueOnce({
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
stderr: "",
code: 0,
error: null,
});
await ensureAcpx({ await ensureAcpx({
command: "/plugin/node_modules/.bin/acpx", command: "/plugin/node_modules/.bin/acpx",
@ -239,24 +242,7 @@ describe("acpx ensure", () => {
stripProviderAuthEnvVars: true, stripProviderAuthEnvVars: true,
}); });
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ expectEnsureInstallCalls(true);
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
command: "npm",
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
command: "/plugin/node_modules/.bin/acpx",
args: ["--version"],
cwd: "/plugin",
stripProviderAuthEnvVars: true,
});
}); });
it("fails with actionable error when npm install fails", async () => { it("fails with actionable error when npm install fails", async () => {

View File

@ -254,6 +254,44 @@ describe("waitForExit", () => {
}); });
describe("spawnAndCollect", () => { describe("spawnAndCollect", () => {
type SpawnedEnvSnapshot = {
openai?: string;
github?: string;
hf?: string;
openclaw?: string;
shell?: string;
};
function stubProviderAuthEnv(env: Record<string, string>) {
for (const [key, value] of Object.entries(env)) {
vi.stubEnv(key, value);
}
}
async function collectSpawnedEnvSnapshot(options?: {
stripProviderAuthEnvVars?: boolean;
openAiEnvKey?: string;
githubEnvKey?: string;
hfEnvKey?: string;
}): Promise<SpawnedEnvSnapshot> {
const openAiEnvKey = options?.openAiEnvKey ?? "OPENAI_API_KEY";
const githubEnvKey = options?.githubEnvKey ?? "GITHUB_TOKEN";
const hfEnvKey = options?.hfEnvKey ?? "HF_TOKEN";
const result = await spawnAndCollect({
command: process.execPath,
args: [
"-e",
`process.stdout.write(JSON.stringify({openai:process.env.${openAiEnvKey},github:process.env.${githubEnvKey},hf:process.env.${hfEnvKey},openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))`,
],
cwd: process.cwd(),
stripProviderAuthEnvVars: options?.stripProviderAuthEnvVars,
});
expect(result.code).toBe(0);
expect(result.error).toBeNull();
return JSON.parse(result.stdout) as SpawnedEnvSnapshot;
}
it("returns abort error immediately when signal is already aborted", async () => { it("returns abort error immediately when signal is already aborted", async () => {
const controller = new AbortController(); const controller = new AbortController();
controller.abort(); controller.abort();
@ -292,31 +330,15 @@ describe("spawnAndCollect", () => {
}); });
it("strips shared provider auth env vars from spawned acpx children", async () => { it("strips shared provider auth env vars from spawned acpx children", async () => {
vi.stubEnv("OPENAI_API_KEY", "openai-secret"); stubProviderAuthEnv({
vi.stubEnv("GITHUB_TOKEN", "gh-secret"); OPENAI_API_KEY: "openai-secret",
vi.stubEnv("HF_TOKEN", "hf-secret"); GITHUB_TOKEN: "gh-secret",
vi.stubEnv("OPENCLAW_API_KEY", "keep-me"); HF_TOKEN: "hf-secret",
OPENCLAW_API_KEY: "keep-me",
const result = await spawnAndCollect({ });
command: process.execPath, const parsed = await collectSpawnedEnvSnapshot({
args: [
"-e",
"process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
],
cwd: process.cwd(),
stripProviderAuthEnvVars: true, stripProviderAuthEnvVars: true,
}); });
expect(result.code).toBe(0);
expect(result.error).toBeNull();
const parsed = JSON.parse(result.stdout) as {
openai?: string;
github?: string;
hf?: string;
openclaw?: string;
shell?: string;
};
expect(parsed.openai).toBeUndefined(); expect(parsed.openai).toBeUndefined();
expect(parsed.github).toBeUndefined(); expect(parsed.github).toBeUndefined();
expect(parsed.hf).toBeUndefined(); expect(parsed.hf).toBeUndefined();
@ -325,29 +347,16 @@ describe("spawnAndCollect", () => {
}); });
it("strips provider auth env vars case-insensitively", async () => { it("strips provider auth env vars case-insensitively", async () => {
vi.stubEnv("OpenAI_Api_Key", "openai-secret"); stubProviderAuthEnv({
vi.stubEnv("Github_Token", "gh-secret"); OpenAI_Api_Key: "openai-secret",
vi.stubEnv("OPENCLAW_API_KEY", "keep-me"); Github_Token: "gh-secret",
OPENCLAW_API_KEY: "keep-me",
const result = await spawnAndCollect({ });
command: process.execPath, const parsed = await collectSpawnedEnvSnapshot({
args: [ stripProviderAuthEnvVars: true,
"-e", openAiEnvKey: "OpenAI_Api_Key",
"process.stdout.write(JSON.stringify({openai:process.env.OpenAI_Api_Key,github:process.env.Github_Token,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))", githubEnvKey: "Github_Token",
],
cwd: process.cwd(),
stripProviderAuthEnvVars: true,
}); });
expect(result.code).toBe(0);
expect(result.error).toBeNull();
const parsed = JSON.parse(result.stdout) as {
openai?: string;
github?: string;
openclaw?: string;
shell?: string;
};
expect(parsed.openai).toBeUndefined(); expect(parsed.openai).toBeUndefined();
expect(parsed.github).toBeUndefined(); expect(parsed.github).toBeUndefined();
expect(parsed.openclaw).toBe("keep-me"); expect(parsed.openclaw).toBe("keep-me");
@ -355,30 +364,13 @@ describe("spawnAndCollect", () => {
}); });
it("preserves provider auth env vars for explicit custom commands by default", async () => { it("preserves provider auth env vars for explicit custom commands by default", async () => {
vi.stubEnv("OPENAI_API_KEY", "openai-secret"); stubProviderAuthEnv({
vi.stubEnv("GITHUB_TOKEN", "gh-secret"); OPENAI_API_KEY: "openai-secret",
vi.stubEnv("HF_TOKEN", "hf-secret"); GITHUB_TOKEN: "gh-secret",
vi.stubEnv("OPENCLAW_API_KEY", "keep-me"); HF_TOKEN: "hf-secret",
OPENCLAW_API_KEY: "keep-me",
const result = await spawnAndCollect({
command: process.execPath,
args: [
"-e",
"process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
],
cwd: process.cwd(),
}); });
const parsed = await collectSpawnedEnvSnapshot();
expect(result.code).toBe(0);
expect(result.error).toBeNull();
const parsed = JSON.parse(result.stdout) as {
openai?: string;
github?: string;
hf?: string;
openclaw?: string;
shell?: string;
};
expect(parsed.openai).toBe("openai-secret"); expect(parsed.openai).toBe("openai-secret");
expect(parsed.github).toBe("gh-secret"); expect(parsed.github).toBe("gh-secret");
expect(parsed.hf).toBe("hf-secret"); expect(parsed.hf).toBe("hf-secret");

View File

@ -82,6 +82,15 @@ describe("downloadBlueBubblesAttachment", () => {
).rejects.toThrow("too large"); ).rejects.toThrow("too large");
} }
function mockSuccessfulAttachmentDownload(buffer = new Uint8Array([1])) {
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(buffer.buffer),
});
return buffer;
}
it("throws when guid is missing", async () => { it("throws when guid is missing", async () => {
const attachment: BlueBubblesAttachment = {}; const attachment: BlueBubblesAttachment = {};
await expect( await expect(
@ -159,12 +168,7 @@ describe("downloadBlueBubblesAttachment", () => {
}); });
it("encodes guid in URL", async () => { it("encodes guid in URL", async () => {
const mockBuffer = new Uint8Array([1]); mockSuccessfulAttachmentDownload();
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
const attachment: BlueBubblesAttachment = { guid: "att/with/special chars" }; const attachment: BlueBubblesAttachment = { guid: "att/with/special chars" };
await downloadBlueBubblesAttachment(attachment, { await downloadBlueBubblesAttachment(attachment, {
@ -244,12 +248,7 @@ describe("downloadBlueBubblesAttachment", () => {
}); });
it("resolves credentials from config when opts not provided", async () => { it("resolves credentials from config when opts not provided", async () => {
const mockBuffer = new Uint8Array([1]); mockSuccessfulAttachmentDownload();
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
const attachment: BlueBubblesAttachment = { guid: "att-config" }; const attachment: BlueBubblesAttachment = { guid: "att-config" };
const result = await downloadBlueBubblesAttachment(attachment, { const result = await downloadBlueBubblesAttachment(attachment, {
@ -270,12 +269,7 @@ describe("downloadBlueBubblesAttachment", () => {
}); });
it("passes ssrfPolicy with allowPrivateNetwork when config enables it", async () => { it("passes ssrfPolicy with allowPrivateNetwork when config enables it", async () => {
const mockBuffer = new Uint8Array([1]); mockSuccessfulAttachmentDownload();
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
const attachment: BlueBubblesAttachment = { guid: "att-ssrf" }; const attachment: BlueBubblesAttachment = { guid: "att-ssrf" };
await downloadBlueBubblesAttachment(attachment, { await downloadBlueBubblesAttachment(attachment, {
@ -295,12 +289,7 @@ describe("downloadBlueBubblesAttachment", () => {
}); });
it("auto-allowlists serverUrl hostname when allowPrivateNetwork is not set", async () => { it("auto-allowlists serverUrl hostname when allowPrivateNetwork is not set", async () => {
const mockBuffer = new Uint8Array([1]); mockSuccessfulAttachmentDownload();
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
const attachment: BlueBubblesAttachment = { guid: "att-no-ssrf" }; const attachment: BlueBubblesAttachment = { guid: "att-no-ssrf" };
await downloadBlueBubblesAttachment(attachment, { await downloadBlueBubblesAttachment(attachment, {
@ -313,12 +302,7 @@ describe("downloadBlueBubblesAttachment", () => {
}); });
it("auto-allowlists private IP serverUrl hostname when allowPrivateNetwork is not set", async () => { it("auto-allowlists private IP serverUrl hostname when allowPrivateNetwork is not set", async () => {
const mockBuffer = new Uint8Array([1]); mockSuccessfulAttachmentDownload();
mockFetch.mockResolvedValueOnce({
ok: true,
headers: new Headers(),
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
});
const attachment: BlueBubblesAttachment = { guid: "att-private-ip" }; const attachment: BlueBubblesAttachment = { guid: "att-private-ip" };
await downloadBlueBubblesAttachment(attachment, { await downloadBlueBubblesAttachment(attachment, {
@ -352,6 +336,14 @@ describe("sendBlueBubblesAttachment", () => {
return Buffer.from(body).toString("utf8"); return Buffer.from(body).toString("utf8");
} }
function expectVoiceAttachmentBody() {
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array;
const bodyText = decodeBody(body);
expect(bodyText).toContain('name="isAudioMessage"');
expect(bodyText).toContain("true");
return bodyText;
}
it("marks voice memos when asVoice is true and mp3 is provided", async () => { it("marks voice memos when asVoice is true and mp3 is provided", async () => {
mockFetch.mockResolvedValueOnce({ mockFetch.mockResolvedValueOnce({
ok: true, ok: true,
@ -367,10 +359,7 @@ describe("sendBlueBubblesAttachment", () => {
opts: { serverUrl: "http://localhost:1234", password: "test" }, opts: { serverUrl: "http://localhost:1234", password: "test" },
}); });
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; const bodyText = expectVoiceAttachmentBody();
const bodyText = decodeBody(body);
expect(bodyText).toContain('name="isAudioMessage"');
expect(bodyText).toContain("true");
expect(bodyText).toContain('filename="voice.mp3"'); expect(bodyText).toContain('filename="voice.mp3"');
}); });
@ -389,8 +378,7 @@ describe("sendBlueBubblesAttachment", () => {
opts: { serverUrl: "http://localhost:1234", password: "test" }, opts: { serverUrl: "http://localhost:1234", password: "test" },
}); });
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; const bodyText = expectVoiceAttachmentBody();
const bodyText = decodeBody(body);
expect(bodyText).toContain('filename="voice.mp3"'); expect(bodyText).toContain('filename="voice.mp3"');
expect(bodyText).toContain('name="voice.mp3"'); expect(bodyText).toContain('name="voice.mp3"');
}); });

View File

@ -2,7 +2,7 @@ import crypto from "node:crypto";
import path from "node:path"; import path from "node:path";
import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles"; import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles";
import { resolveBlueBubblesServerAccount } from "./account-resolve.js"; import { resolveBlueBubblesServerAccount } from "./account-resolve.js";
import { postMultipartFormData } from "./multipart.js"; import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js";
import { import {
getCachedBlueBubblesPrivateApiStatus, getCachedBlueBubblesPrivateApiStatus,
isBlueBubblesPrivateApiStatusEnabled, isBlueBubblesPrivateApiStatusEnabled,
@ -262,12 +262,7 @@ export async function sendBlueBubblesAttachment(params: {
timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads
}); });
if (!res.ok) { await assertMultipartActionOk(res, "attachment send");
const errorText = await res.text();
throw new Error(
`BlueBubbles attachment send failed (${res.status}): ${errorText || "unknown"}`,
);
}
const responseBody = await res.text(); const responseBody = await res.text();
if (!responseBody) { if (!responseBody) {

View File

@ -29,6 +29,11 @@ describe("chat", () => {
}); });
} }
function mockTwoOkTextResponses() {
mockOkTextResponse();
mockOkTextResponse();
}
async function expectCalledUrlIncludesPassword(params: { async function expectCalledUrlIncludesPassword(params: {
password: string; password: string;
invoke: () => Promise<void>; invoke: () => Promise<void>;
@ -198,15 +203,7 @@ describe("chat", () => {
}); });
it("uses POST for start and DELETE for stop", async () => { it("uses POST for start and DELETE for stop", async () => {
mockFetch mockTwoOkTextResponses();
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
})
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
});
await sendBlueBubblesTyping("iMessage;-;+15551234567", true, { await sendBlueBubblesTyping("iMessage;-;+15551234567", true, {
serverUrl: "http://localhost:1234", serverUrl: "http://localhost:1234",
@ -442,15 +439,7 @@ describe("chat", () => {
}); });
it("adds and removes participant using matching endpoint", async () => { it("adds and removes participant using matching endpoint", async () => {
mockFetch mockTwoOkTextResponses();
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
})
.mockResolvedValueOnce({
ok: true,
text: () => Promise.resolve(""),
});
await addBlueBubblesParticipant("chat-guid", "+15551234567", { await addBlueBubblesParticipant("chat-guid", "+15551234567", {
serverUrl: "http://localhost:1234", serverUrl: "http://localhost:1234",

View File

@ -2,7 +2,7 @@ import crypto from "node:crypto";
import path from "node:path"; import path from "node:path";
import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles"; import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles";
import { resolveBlueBubblesServerAccount } from "./account-resolve.js"; import { resolveBlueBubblesServerAccount } from "./account-resolve.js";
import { postMultipartFormData } from "./multipart.js"; import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js";
import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js"; import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js";
import { blueBubblesFetchWithTimeout, buildBlueBubblesApiUrl } from "./types.js"; import { blueBubblesFetchWithTimeout, buildBlueBubblesApiUrl } from "./types.js";
@ -55,12 +55,7 @@ async function sendBlueBubblesChatEndpointRequest(params: {
{ method: params.method }, { method: params.method },
params.opts.timeoutMs, params.opts.timeoutMs,
); );
if (!res.ok) { await assertMultipartActionOk(res, params.action);
const errorText = await res.text().catch(() => "");
throw new Error(
`BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`,
);
}
} }
async function sendPrivateApiJsonRequest(params: { async function sendPrivateApiJsonRequest(params: {
@ -86,12 +81,7 @@ async function sendPrivateApiJsonRequest(params: {
} }
const res = await blueBubblesFetchWithTimeout(url, request, params.opts.timeoutMs); const res = await blueBubblesFetchWithTimeout(url, request, params.opts.timeoutMs);
if (!res.ok) { await assertMultipartActionOk(res, params.action);
const errorText = await res.text().catch(() => "");
throw new Error(
`BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`,
);
}
} }
export async function markBlueBubblesChatRead( export async function markBlueBubblesChatRead(
@ -329,8 +319,5 @@ export async function setGroupIconBlueBubbles(
timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads
}); });
if (!res.ok) { await assertMultipartActionOk(res, "setGroupIcon");
const errorText = await res.text().catch(() => "");
throw new Error(`BlueBubbles setGroupIcon failed (${res.status}): ${errorText || "unknown"}`);
}
} }

View File

@ -70,6 +70,70 @@ async function makeTempDir(): Promise<string> {
return dir; return dir;
} }
async function makeTempFile(
fileName: string,
contents: string,
dir?: string,
): Promise<{ dir: string; filePath: string }> {
const resolvedDir = dir ?? (await makeTempDir());
const filePath = path.join(resolvedDir, fileName);
await fs.writeFile(filePath, contents, "utf8");
return { dir: resolvedDir, filePath };
}
async function sendLocalMedia(params: {
cfg: OpenClawConfig;
mediaPath: string;
accountId?: string;
}) {
return sendBlueBubblesMedia({
cfg: params.cfg,
to: "chat:123",
accountId: params.accountId,
mediaPath: params.mediaPath,
});
}
async function expectRejectedLocalMedia(params: {
cfg: OpenClawConfig;
mediaPath: string;
error: RegExp;
accountId?: string;
}) {
await expect(
sendLocalMedia({
cfg: params.cfg,
mediaPath: params.mediaPath,
accountId: params.accountId,
}),
).rejects.toThrow(params.error);
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
}
async function expectAllowedLocalMedia(params: {
cfg: OpenClawConfig;
mediaPath: string;
expectedAttachment: Record<string, unknown>;
accountId?: string;
expectMimeDetection?: boolean;
}) {
const result = await sendLocalMedia({
cfg: params.cfg,
mediaPath: params.mediaPath,
accountId: params.accountId,
});
expect(result).toEqual({ messageId: "msg-1" });
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
expect.objectContaining(params.expectedAttachment),
);
if (params.expectMimeDetection) {
expect(runtimeMocks.detectMime).toHaveBeenCalled();
}
}
beforeEach(() => { beforeEach(() => {
const runtime = createMockRuntime(); const runtime = createMockRuntime();
runtimeMocks = runtime.mocks; runtimeMocks = runtime.mocks;
@ -110,57 +174,43 @@ describe("sendBlueBubblesMedia local-path hardening", () => {
const outsideFile = path.join(outsideDir, "outside.txt"); const outsideFile = path.join(outsideDir, "outside.txt");
await fs.writeFile(outsideFile, "not allowed", "utf8"); await fs.writeFile(outsideFile, "not allowed", "utf8");
await expect( await expectRejectedLocalMedia({
sendBlueBubblesMedia({ cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), mediaPath: outsideFile,
to: "chat:123", error: /not under any configured mediaLocalRoots/i,
mediaPath: outsideFile, });
}),
).rejects.toThrow(/not under any configured mediaLocalRoots/i);
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
}); });
it("allows local paths that are explicitly configured", async () => { it("allows local paths that are explicitly configured", async () => {
const allowedRoot = await makeTempDir(); const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile(
const allowedFile = path.join(allowedRoot, "allowed.txt"); "allowed.txt",
await fs.writeFile(allowedFile, "allowed", "utf8"); "allowed",
);
const result = await sendBlueBubblesMedia({ await expectAllowedLocalMedia({
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
to: "chat:123",
mediaPath: allowedFile, mediaPath: allowedFile,
}); expectedAttachment: {
expect(result).toEqual({ messageId: "msg-1" });
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
expect.objectContaining({
filename: "allowed.txt", filename: "allowed.txt",
contentType: "text/plain", contentType: "text/plain",
}), },
); expectMimeDetection: true,
expect(runtimeMocks.detectMime).toHaveBeenCalled(); });
}); });
it("allows file:// media paths and file:// local roots", async () => { it("allows file:// media paths and file:// local roots", async () => {
const allowedRoot = await makeTempDir(); const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile(
const allowedFile = path.join(allowedRoot, "allowed.txt"); "allowed.txt",
await fs.writeFile(allowedFile, "allowed", "utf8"); "allowed",
const result = await sendBlueBubblesMedia({
cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }),
to: "chat:123",
mediaPath: pathToFileURL(allowedFile).toString(),
});
expect(result).toEqual({ messageId: "msg-1" });
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
expect.objectContaining({
filename: "allowed.txt",
}),
); );
await expectAllowedLocalMedia({
cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }),
mediaPath: pathToFileURL(allowedFile).toString(),
expectedAttachment: {
filename: "allowed.txt",
},
});
}); });
it("uses account-specific mediaLocalRoots over top-level roots", async () => { it("uses account-specific mediaLocalRoots over top-level roots", async () => {
@ -213,15 +263,11 @@ describe("sendBlueBubblesMedia local-path hardening", () => {
return; return;
} }
await expect( await expectRejectedLocalMedia({
sendBlueBubblesMedia({ cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), mediaPath: linkPath,
to: "chat:123", error: /not under any configured mediaLocalRoots/i,
mediaPath: linkPath, });
}),
).rejects.toThrow(/not under any configured mediaLocalRoots/i);
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
}); });
it("rejects relative mediaLocalRoots entries", async () => { it("rejects relative mediaLocalRoots entries", async () => {

View File

@ -1,18 +1,24 @@
import { describe, expect, it } from "vitest"; import { describe, expect, it } from "vitest";
import { normalizeWebhookMessage, normalizeWebhookReaction } from "./monitor-normalize.js"; import { normalizeWebhookMessage, normalizeWebhookReaction } from "./monitor-normalize.js";
function createFallbackDmPayload(overrides: Record<string, unknown> = {}) {
return {
guid: "msg-1",
isGroup: false,
isFromMe: false,
handle: null,
chatGuid: "iMessage;-;+15551234567",
...overrides,
};
}
describe("normalizeWebhookMessage", () => { describe("normalizeWebhookMessage", () => {
it("falls back to DM chatGuid handle when sender handle is missing", () => { it("falls back to DM chatGuid handle when sender handle is missing", () => {
const result = normalizeWebhookMessage({ const result = normalizeWebhookMessage({
type: "new-message", type: "new-message",
data: { data: createFallbackDmPayload({
guid: "msg-1",
text: "hello", text: "hello",
isGroup: false, }),
isFromMe: false,
handle: null,
chatGuid: "iMessage;-;+15551234567",
},
}); });
expect(result).not.toBeNull(); expect(result).not.toBeNull();
@ -78,15 +84,11 @@ describe("normalizeWebhookReaction", () => {
it("falls back to DM chatGuid handle when reaction sender handle is missing", () => { it("falls back to DM chatGuid handle when reaction sender handle is missing", () => {
const result = normalizeWebhookReaction({ const result = normalizeWebhookReaction({
type: "updated-message", type: "updated-message",
data: { data: createFallbackDmPayload({
guid: "msg-2", guid: "msg-2",
associatedMessageGuid: "p:0/msg-1", associatedMessageGuid: "p:0/msg-1",
associatedMessageType: 2000, associatedMessageType: 2000,
isGroup: false, }),
isFromMe: false,
handle: null,
chatGuid: "iMessage;-;+15551234567",
},
}); });
expect(result).not.toBeNull(); expect(result).not.toBeNull();

View File

@ -582,6 +582,29 @@ export function parseTapbackText(params: {
return null; return null;
} }
const parseLeadingReactionAction = (
prefix: "reacted" | "removed",
defaultAction: "added" | "removed",
) => {
if (!lower.startsWith(prefix)) {
return null;
}
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint;
if (!emoji) {
return null;
}
const quotedText = extractQuotedTapbackText(trimmed);
if (params.requireQuoted && !quotedText) {
return null;
}
const fallback = trimmed.slice(prefix.length).trim();
return {
emoji,
action: params.actionHint ?? defaultAction,
quotedText: quotedText ?? fallback,
};
};
for (const [pattern, { emoji, action }] of TAPBACK_TEXT_MAP) { for (const [pattern, { emoji, action }] of TAPBACK_TEXT_MAP) {
if (lower.startsWith(pattern)) { if (lower.startsWith(pattern)) {
// Extract quoted text if present (e.g., 'Loved "hello"' -> "hello") // Extract quoted text if present (e.g., 'Loved "hello"' -> "hello")
@ -599,30 +622,14 @@ export function parseTapbackText(params: {
} }
} }
if (lower.startsWith("reacted")) { const reacted = parseLeadingReactionAction("reacted", "added");
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; if (reacted) {
if (!emoji) { return reacted;
return null;
}
const quotedText = extractQuotedTapbackText(trimmed);
if (params.requireQuoted && !quotedText) {
return null;
}
const fallback = trimmed.slice("reacted".length).trim();
return { emoji, action: params.actionHint ?? "added", quotedText: quotedText ?? fallback };
} }
if (lower.startsWith("removed")) { const removed = parseLeadingReactionAction("removed", "removed");
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; if (removed) {
if (!emoji) { return removed;
return null;
}
const quotedText = extractQuotedTapbackText(trimmed);
if (params.requireQuoted && !quotedText) {
return null;
}
const fallback = trimmed.slice("removed".length).trim();
return { emoji, action: params.actionHint ?? "removed", quotedText: quotedText ?? fallback };
} }
return null; return null;
} }

View File

@ -302,65 +302,102 @@ describe("BlueBubbles webhook monitor", () => {
}; };
} }
describe("webhook parsing + auth handling", () => { async function dispatchWebhook(req: IncomingMessage) {
it("rejects non-POST requests", async () => { const res = createMockResponse();
const account = createMockAccount(); const handled = await handleBlueBubblesWebhookRequest(req, res);
const config: OpenClawConfig = {}; return { handled, res };
const core = createMockRuntime(); }
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({ function createWebhookRequestForTest(params?: {
method?: string;
url?: string;
body?: unknown;
headers?: Record<string, string>;
remoteAddress?: string;
}) {
const req = createMockRequest(
params?.method ?? "POST",
params?.url ?? "/bluebubbles-webhook",
params?.body ?? {},
params?.headers,
);
if (params?.remoteAddress) {
setRequestRemoteAddress(req, params.remoteAddress);
}
return req;
}
function createHangingWebhookRequest(url = "/bluebubbles-webhook?password=test-password") {
const req = new EventEmitter() as IncomingMessage;
const destroyMock = vi.fn();
req.method = "POST";
req.url = url;
req.headers = {};
req.destroy = destroyMock as unknown as IncomingMessage["destroy"];
setRequestRemoteAddress(req, "127.0.0.1");
return { req, destroyMock };
}
function registerWebhookTargets(
params: Array<{
account: ResolvedBlueBubblesAccount;
statusSink?: (event: unknown) => void;
}>,
) {
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const unregisterFns = params.map(({ account, statusSink }) =>
registerBlueBubblesWebhookTarget({
account, account,
config, config,
runtime: { log: vi.fn(), error: vi.fn() }, runtime: { log: vi.fn(), error: vi.fn() },
core, core,
path: "/bluebubbles-webhook", path: "/bluebubbles-webhook",
}); statusSink,
}),
);
const req = createMockRequest("GET", "/bluebubbles-webhook", {}); unregister = () => {
const res = createMockResponse(); for (const unregisterFn of unregisterFns) {
unregisterFn();
}
};
}
const handled = await handleBlueBubblesWebhookRequest(req, res); async function expectWebhookStatus(
req: IncomingMessage,
expectedStatus: number,
expectedBody?: string,
) {
const { handled, res } = await dispatchWebhook(req);
expect(handled).toBe(true);
expect(res.statusCode).toBe(expectedStatus);
if (expectedBody !== undefined) {
expect(res.body).toBe(expectedBody);
}
return res;
}
expect(handled).toBe(true); describe("webhook parsing + auth handling", () => {
expect(res.statusCode).toBe(405); it("rejects non-POST requests", async () => {
setupWebhookTarget();
const req = createWebhookRequestForTest({ method: "GET" });
await expectWebhookStatus(req, 405);
}); });
it("accepts POST requests with valid JSON payload", async () => { it("accepts POST requests with valid JSON payload", async () => {
setupWebhookTarget(); setupWebhookTarget();
const payload = createNewMessagePayload({ date: Date.now() }); const payload = createNewMessagePayload({ date: Date.now() });
const req = createWebhookRequestForTest({ body: payload });
const req = createMockRequest("POST", "/bluebubbles-webhook", payload); await expectWebhookStatus(req, 200, "ok");
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
expect(res.body).toBe("ok");
}); });
it("rejects requests with invalid JSON", async () => { it("rejects requests with invalid JSON", async () => {
const account = createMockAccount(); setupWebhookTarget();
const config: OpenClawConfig = {}; const req = createWebhookRequestForTest({ body: "invalid json {{" });
const core = createMockRuntime(); await expectWebhookStatus(req, 400);
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const req = createMockRequest("POST", "/bluebubbles-webhook", "invalid json {{");
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(400);
}); });
it("accepts URL-encoded payload wrappers", async () => { it("accepts URL-encoded payload wrappers", async () => {
@ -369,42 +406,17 @@ describe("BlueBubbles webhook monitor", () => {
const encodedBody = new URLSearchParams({ const encodedBody = new URLSearchParams({
payload: JSON.stringify(payload), payload: JSON.stringify(payload),
}).toString(); }).toString();
const req = createWebhookRequestForTest({ body: encodedBody });
const req = createMockRequest("POST", "/bluebubbles-webhook", encodedBody); await expectWebhookStatus(req, 200, "ok");
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
expect(res.body).toBe("ok");
}); });
it("returns 408 when request body times out (Slow-Loris protection)", async () => { it("returns 408 when request body times out (Slow-Loris protection)", async () => {
vi.useFakeTimers(); vi.useFakeTimers();
try { try {
const account = createMockAccount(); setupWebhookTarget();
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
// Create a request that never sends data or ends (simulates slow-loris) // Create a request that never sends data or ends (simulates slow-loris)
const req = new EventEmitter() as IncomingMessage; const { req, destroyMock } = createHangingWebhookRequest();
req.method = "POST";
req.url = "/bluebubbles-webhook?password=test-password";
req.headers = {};
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress: "127.0.0.1",
};
req.destroy = vi.fn();
const res = createMockResponse(); const res = createMockResponse();
@ -416,7 +428,7 @@ describe("BlueBubbles webhook monitor", () => {
const handled = await handledPromise; const handled = await handledPromise;
expect(handled).toBe(true); expect(handled).toBe(true);
expect(res.statusCode).toBe(408); expect(res.statusCode).toBe(408);
expect(req.destroy).toHaveBeenCalled(); expect(destroyMock).toHaveBeenCalled();
} finally { } finally {
vi.useRealTimers(); vi.useRealTimers();
} }
@ -424,140 +436,62 @@ describe("BlueBubbles webhook monitor", () => {
it("rejects unauthorized requests before reading the body", async () => { it("rejects unauthorized requests before reading the body", async () => {
const account = createMockAccount({ password: "secret-token" }); const account = createMockAccount({ password: "secret-token" });
const config: OpenClawConfig = {}; setupWebhookTarget({ account });
const core = createMockRuntime(); const { req } = createHangingWebhookRequest("/bluebubbles-webhook?password=wrong-token");
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const req = new EventEmitter() as IncomingMessage;
req.method = "POST";
req.url = "/bluebubbles-webhook?password=wrong-token";
req.headers = {};
const onSpy = vi.spyOn(req, "on"); const onSpy = vi.spyOn(req, "on");
(req as unknown as { socket: { remoteAddress: string } }).socket = { await expectWebhookStatus(req, 401);
remoteAddress: "127.0.0.1",
};
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function)); expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function));
}); });
it("authenticates via password query parameter", async () => { it("authenticates via password query parameter", async () => {
const account = createMockAccount({ password: "secret-token" }); const account = createMockAccount({ password: "secret-token" });
// Mock non-localhost request
const req = createMockRequest(
"POST",
"/bluebubbles-webhook?password=secret-token",
createNewMessagePayload(),
);
setRequestRemoteAddress(req, "192.168.1.100");
setupWebhookTarget({ account }); setupWebhookTarget({ account });
const req = createWebhookRequestForTest({
const res = createMockResponse(); url: "/bluebubbles-webhook?password=secret-token",
const handled = await handleBlueBubblesWebhookRequest(req, res); body: createNewMessagePayload(),
remoteAddress: "192.168.1.100",
expect(handled).toBe(true); });
expect(res.statusCode).toBe(200); await expectWebhookStatus(req, 200);
}); });
it("authenticates via x-password header", async () => { it("authenticates via x-password header", async () => {
const account = createMockAccount({ password: "secret-token" }); const account = createMockAccount({ password: "secret-token" });
const req = createMockRequest(
"POST",
"/bluebubbles-webhook",
createNewMessagePayload(),
{ "x-password": "secret-token" }, // pragma: allowlist secret
);
setRequestRemoteAddress(req, "192.168.1.100");
setupWebhookTarget({ account }); setupWebhookTarget({ account });
const req = createWebhookRequestForTest({
const res = createMockResponse(); body: createNewMessagePayload(),
const handled = await handleBlueBubblesWebhookRequest(req, res); headers: { "x-password": "secret-token" }, // pragma: allowlist secret
remoteAddress: "192.168.1.100",
expect(handled).toBe(true); });
expect(res.statusCode).toBe(200); await expectWebhookStatus(req, 200);
}); });
it("rejects unauthorized requests with wrong password", async () => { it("rejects unauthorized requests with wrong password", async () => {
const account = createMockAccount({ password: "secret-token" }); const account = createMockAccount({ password: "secret-token" });
const req = createMockRequest(
"POST",
"/bluebubbles-webhook?password=wrong-token",
createNewMessagePayload(),
);
setRequestRemoteAddress(req, "192.168.1.100");
setupWebhookTarget({ account }); setupWebhookTarget({ account });
const req = createWebhookRequestForTest({
const res = createMockResponse(); url: "/bluebubbles-webhook?password=wrong-token",
const handled = await handleBlueBubblesWebhookRequest(req, res); body: createNewMessagePayload(),
remoteAddress: "192.168.1.100",
expect(handled).toBe(true); });
expect(res.statusCode).toBe(401); await expectWebhookStatus(req, 401);
}); });
it("rejects ambiguous routing when multiple targets match the same password", async () => { it("rejects ambiguous routing when multiple targets match the same password", async () => {
const accountA = createMockAccount({ password: "secret-token" }); const accountA = createMockAccount({ password: "secret-token" });
const accountB = createMockAccount({ password: "secret-token" }); const accountB = createMockAccount({ password: "secret-token" });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const sinkA = vi.fn(); const sinkA = vi.fn();
const sinkB = vi.fn(); const sinkB = vi.fn();
registerWebhookTargets([
{ account: accountA, statusSink: sinkA },
{ account: accountB, statusSink: sinkB },
]);
const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", { const req = createWebhookRequestForTest({
type: "new-message", url: "/bluebubbles-webhook?password=secret-token",
data: { body: createNewMessagePayload(),
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
});
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress: "192.168.1.100", remoteAddress: "192.168.1.100",
};
const unregisterA = registerBlueBubblesWebhookTarget({
account: accountA,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkA,
}); });
const unregisterB = registerBlueBubblesWebhookTarget({ await expectWebhookStatus(req, 401);
account: accountB,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkB,
});
unregister = () => {
unregisterA();
unregisterB();
};
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
expect(sinkA).not.toHaveBeenCalled(); expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).not.toHaveBeenCalled(); expect(sinkB).not.toHaveBeenCalled();
}); });
@ -565,107 +499,38 @@ describe("BlueBubbles webhook monitor", () => {
it("ignores targets without passwords when a password-authenticated target matches", async () => { it("ignores targets without passwords when a password-authenticated target matches", async () => {
const accountStrict = createMockAccount({ password: "secret-token" }); const accountStrict = createMockAccount({ password: "secret-token" });
const accountWithoutPassword = createMockAccount({ password: undefined }); const accountWithoutPassword = createMockAccount({ password: undefined });
const config: OpenClawConfig = {};
const core = createMockRuntime();
setBlueBubblesRuntime(core);
const sinkStrict = vi.fn(); const sinkStrict = vi.fn();
const sinkWithoutPassword = vi.fn(); const sinkWithoutPassword = vi.fn();
registerWebhookTargets([
{ account: accountStrict, statusSink: sinkStrict },
{ account: accountWithoutPassword, statusSink: sinkWithoutPassword },
]);
const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", { const req = createWebhookRequestForTest({
type: "new-message", url: "/bluebubbles-webhook?password=secret-token",
data: { body: createNewMessagePayload(),
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
});
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress: "192.168.1.100", remoteAddress: "192.168.1.100",
};
const unregisterStrict = registerBlueBubblesWebhookTarget({
account: accountStrict,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkStrict,
}); });
const unregisterNoPassword = registerBlueBubblesWebhookTarget({ await expectWebhookStatus(req, 200);
account: accountWithoutPassword,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
statusSink: sinkWithoutPassword,
});
unregister = () => {
unregisterStrict();
unregisterNoPassword();
};
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(200);
expect(sinkStrict).toHaveBeenCalledTimes(1); expect(sinkStrict).toHaveBeenCalledTimes(1);
expect(sinkWithoutPassword).not.toHaveBeenCalled(); expect(sinkWithoutPassword).not.toHaveBeenCalled();
}); });
it("requires authentication for loopback requests when password is configured", async () => { it("requires authentication for loopback requests when password is configured", async () => {
const account = createMockAccount({ password: "secret-token" }); const account = createMockAccount({ password: "secret-token" });
const config: OpenClawConfig = {}; setupWebhookTarget({ account });
const core = createMockRuntime();
setBlueBubblesRuntime(core);
for (const remoteAddress of ["127.0.0.1", "::1", "::ffff:127.0.0.1"]) { for (const remoteAddress of ["127.0.0.1", "::1", "::ffff:127.0.0.1"]) {
const req = createMockRequest("POST", "/bluebubbles-webhook", { const req = createWebhookRequestForTest({
type: "new-message", body: createNewMessagePayload(),
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
});
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress, remoteAddress,
};
const loopbackUnregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
}); });
await expectWebhookStatus(req, 401);
const res = createMockResponse();
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
loopbackUnregister();
} }
}); });
it("rejects targets without passwords for loopback and proxied-looking requests", async () => { it("rejects targets without passwords for loopback and proxied-looking requests", async () => {
const account = createMockAccount({ password: undefined }); const account = createMockAccount({ password: undefined });
const config: OpenClawConfig = {}; setupWebhookTarget({ account });
const core = createMockRuntime();
setBlueBubblesRuntime(core);
unregister = registerBlueBubblesWebhookTarget({
account,
config,
runtime: { log: vi.fn(), error: vi.fn() },
core,
path: "/bluebubbles-webhook",
});
const headerVariants: Record<string, string>[] = [ const headerVariants: Record<string, string>[] = [
{ host: "localhost" }, { host: "localhost" },
@ -673,28 +538,12 @@ describe("BlueBubbles webhook monitor", () => {
{ host: "localhost", forwarded: "for=203.0.113.10;proto=https;host=example.com" }, { host: "localhost", forwarded: "for=203.0.113.10;proto=https;host=example.com" },
]; ];
for (const headers of headerVariants) { for (const headers of headerVariants) {
const req = createMockRequest( const req = createWebhookRequestForTest({
"POST", body: createNewMessagePayload(),
"/bluebubbles-webhook",
{
type: "new-message",
data: {
text: "hello",
handle: { address: "+15551234567" },
isGroup: false,
isFromMe: false,
guid: "msg-1",
},
},
headers, headers,
);
(req as unknown as { socket: { remoteAddress: string } }).socket = {
remoteAddress: "127.0.0.1", remoteAddress: "127.0.0.1",
}; });
const res = createMockResponse(); await expectWebhookStatus(req, 401);
const handled = await handleBlueBubblesWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401);
} }
}); });

View File

@ -30,3 +30,11 @@ export async function postMultipartFormData(params: {
params.timeoutMs, params.timeoutMs,
); );
} }
export async function assertMultipartActionOk(response: Response, action: string): Promise<void> {
if (response.ok) {
return;
}
const errorText = await response.text().catch(() => "");
throw new Error(`BlueBubbles ${action} failed (${response.status}): ${errorText || "unknown"}`);
}

View File

@ -19,7 +19,7 @@ describe("reactions", () => {
}); });
describe("sendBlueBubblesReaction", () => { describe("sendBlueBubblesReaction", () => {
async function expectRemovedReaction(emoji: string) { async function expectRemovedReaction(emoji: string, expectedReaction = "-love") {
mockFetch.mockResolvedValueOnce({ mockFetch.mockResolvedValueOnce({
ok: true, ok: true,
text: () => Promise.resolve(""), text: () => Promise.resolve(""),
@ -37,7 +37,7 @@ describe("reactions", () => {
}); });
const body = JSON.parse(mockFetch.mock.calls[0][1].body); const body = JSON.parse(mockFetch.mock.calls[0][1].body);
expect(body.reaction).toBe("-love"); expect(body.reaction).toBe(expectedReaction);
} }
it("throws when chatGuid is empty", async () => { it("throws when chatGuid is empty", async () => {
@ -327,45 +327,11 @@ describe("reactions", () => {
describe("reaction removal aliases", () => { describe("reaction removal aliases", () => {
it("handles emoji-based removal", async () => { it("handles emoji-based removal", async () => {
mockFetch.mockResolvedValueOnce({ await expectRemovedReaction("👍", "-like");
ok: true,
text: () => Promise.resolve(""),
});
await sendBlueBubblesReaction({
chatGuid: "chat-123",
messageGuid: "msg-123",
emoji: "👍",
remove: true,
opts: {
serverUrl: "http://localhost:1234",
password: "test",
},
});
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
expect(body.reaction).toBe("-like");
}); });
it("handles text alias removal", async () => { it("handles text alias removal", async () => {
mockFetch.mockResolvedValueOnce({ await expectRemovedReaction("haha", "-laugh");
ok: true,
text: () => Promise.resolve(""),
});
await sendBlueBubblesReaction({
chatGuid: "chat-123",
messageGuid: "msg-123",
emoji: "haha",
remove: true,
opts: {
serverUrl: "http://localhost:1234",
password: "test",
},
});
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
expect(body.reaction).toBe("-laugh");
}); });
}); });
}); });

View File

@ -108,13 +108,21 @@ function resolveScheme(
return cfg.gateway?.tls?.enabled === true ? "wss" : "ws"; return cfg.gateway?.tls?.enabled === true ? "wss" : "ws";
} }
function isPrivateIPv4(address: string): boolean { function parseIPv4Octets(address: string): [number, number, number, number] | null {
const parts = address.split("."); const parts = address.split(".");
if (parts.length != 4) { if (parts.length !== 4) {
return false; return null;
} }
const octets = parts.map((part) => Number.parseInt(part, 10)); const octets = parts.map((part) => Number.parseInt(part, 10));
if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) { if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) {
return null;
}
return octets as [number, number, number, number];
}
function isPrivateIPv4(address: string): boolean {
const octets = parseIPv4Octets(address);
if (!octets) {
return false; return false;
} }
const [a, b] = octets; const [a, b] = octets;
@ -131,12 +139,8 @@ function isPrivateIPv4(address: string): boolean {
} }
function isTailnetIPv4(address: string): boolean { function isTailnetIPv4(address: string): boolean {
const parts = address.split("."); const octets = parseIPv4Octets(address);
if (parts.length !== 4) { if (!octets) {
return false;
}
const octets = parts.map((part) => Number.parseInt(part, 10));
if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) {
return false; return false;
} }
const [a, b] = octets; const [a, b] = octets;

View File

@ -1,6 +1,8 @@
import type { IncomingMessage } from "node:http"; import type { IncomingMessage } from "node:http";
import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs";
import { describe, expect, it, vi } from "vitest"; import { describe, expect, it, vi } from "vitest";
import { createMockServerResponse } from "../../src/test-utils/mock-http-response.js"; import { createMockServerResponse } from "../../src/test-utils/mock-http-response.js";
import { createTestPluginApi } from "../test-utils/plugin-api.js";
import plugin from "./index.js"; import plugin from "./index.js";
describe("diffs plugin registration", () => { describe("diffs plugin registration", () => {
@ -9,33 +11,19 @@ describe("diffs plugin registration", () => {
const registerHttpRoute = vi.fn(); const registerHttpRoute = vi.fn();
const on = vi.fn(); const on = vi.fn();
plugin.register?.({ plugin.register?.(
id: "diffs", createTestPluginApi({
name: "Diffs", id: "diffs",
description: "Diffs", name: "Diffs",
source: "test", description: "Diffs",
config: {}, source: "test",
runtime: {} as never, config: {},
logger: { runtime: {} as never,
info() {}, registerTool,
warn() {}, registerHttpRoute,
error() {}, on,
}, }),
registerTool, );
registerHook() {},
registerHttpRoute,
registerChannel() {},
registerGatewayMethod() {},
registerCli() {},
registerService() {},
registerProvider() {},
registerCommand() {},
registerContextEngine() {},
resolvePath(input: string) {
return input;
},
on,
});
expect(registerTool).toHaveBeenCalledTimes(1); expect(registerTool).toHaveBeenCalledTimes(1);
expect(registerHttpRoute).toHaveBeenCalledTimes(1); expect(registerHttpRoute).toHaveBeenCalledTimes(1);
@ -55,17 +43,15 @@ describe("diffs plugin registration", () => {
}); });
it("applies plugin-config defaults through registered tool and viewer handler", async () => { it("applies plugin-config defaults through registered tool and viewer handler", async () => {
let registeredTool: type RegisteredTool = {
| { execute?: (toolCallId: string, params: Record<string, unknown>) => Promise<unknown> } execute?: (toolCallId: string, params: Record<string, unknown>) => Promise<unknown>;
| undefined; };
let registeredHttpRouteHandler: type RegisteredHttpRouteParams = Parameters<OpenClawPluginApi["registerHttpRoute"]>[0];
| ((
req: IncomingMessage,
res: ReturnType<typeof createMockServerResponse>,
) => Promise<boolean>)
| undefined;
plugin.register?.({ let registeredTool: RegisteredTool | undefined;
let registeredHttpRouteHandler: RegisteredHttpRouteParams["handler"] | undefined;
const api = createTestPluginApi({
id: "diffs", id: "diffs",
name: "Diffs", name: "Diffs",
description: "Diffs", description: "Diffs",
@ -88,31 +74,16 @@ describe("diffs plugin registration", () => {
}, },
}, },
runtime: {} as never, runtime: {} as never,
logger: { registerTool(tool: Parameters<OpenClawPluginApi["registerTool"]>[0]) {
info() {},
warn() {},
error() {},
},
registerTool(tool) {
registeredTool = typeof tool === "function" ? undefined : tool; registeredTool = typeof tool === "function" ? undefined : tool;
}, },
registerHook() {}, registerHttpRoute(params: RegisteredHttpRouteParams) {
registerHttpRoute(params) { registeredHttpRouteHandler = params.handler;
registeredHttpRouteHandler = params.handler as typeof registeredHttpRouteHandler;
}, },
registerChannel() {},
registerGatewayMethod() {},
registerCli() {},
registerService() {},
registerProvider() {},
registerCommand() {},
registerContextEngine() {},
resolvePath(input: string) {
return input;
},
on() {},
}); });
plugin.register?.(api as unknown as OpenClawPluginApi);
const result = await registeredTool?.execute?.("tool-1", { const result = await registeredTool?.execute?.("tool-1", {
before: "one\n", before: "one\n",
after: "two\n", after: "two\n",

View File

@ -8,7 +8,7 @@
"build:viewer": "bun build src/viewer-client.ts --target browser --format esm --minify --outfile assets/viewer-runtime.js" "build:viewer": "bun build src/viewer-client.ts --target browser --format esm --minify --outfile assets/viewer-runtime.js"
}, },
"dependencies": { "dependencies": {
"@pierre/diffs": "1.0.11", "@pierre/diffs": "1.1.0",
"@sinclair/typebox": "0.34.48", "@sinclair/typebox": "0.34.48",
"playwright-core": "1.58.2" "playwright-core": "1.58.2"
}, },

View File

@ -9,6 +9,19 @@ describe("createDiffsHttpHandler", () => {
let store: DiffArtifactStore; let store: DiffArtifactStore;
let cleanupRootDir: () => Promise<void>; let cleanupRootDir: () => Promise<void>;
async function handleLocalGet(url: string) {
const handler = createDiffsHttpHandler({ store });
const res = createMockServerResponse();
const handled = await handler(
localReq({
method: "GET",
url,
}),
res,
);
return { handled, res };
}
beforeEach(async () => { beforeEach(async () => {
({ store, cleanup: cleanupRootDir } = await createDiffStoreHarness("openclaw-diffs-http-")); ({ store, cleanup: cleanupRootDir } = await createDiffStoreHarness("openclaw-diffs-http-"));
}); });
@ -19,16 +32,7 @@ describe("createDiffsHttpHandler", () => {
it("serves a stored diff document", async () => { it("serves a stored diff document", async () => {
const artifact = await createViewerArtifact(store); const artifact = await createViewerArtifact(store);
const { handled, res } = await handleLocalGet(artifact.viewerPath);
const handler = createDiffsHttpHandler({ store });
const res = createMockServerResponse();
const handled = await handler(
localReq({
method: "GET",
url: artifact.viewerPath,
}),
res,
);
expect(handled).toBe(true); expect(handled).toBe(true);
expect(res.statusCode).toBe(200); expect(res.statusCode).toBe(200);
@ -38,15 +42,8 @@ describe("createDiffsHttpHandler", () => {
it("rejects invalid tokens", async () => { it("rejects invalid tokens", async () => {
const artifact = await createViewerArtifact(store); const artifact = await createViewerArtifact(store);
const { handled, res } = await handleLocalGet(
const handler = createDiffsHttpHandler({ store }); artifact.viewerPath.replace(artifact.token, "bad-token"),
const res = createMockServerResponse();
const handled = await handler(
localReq({
method: "GET",
url: artifact.viewerPath.replace(artifact.token, "bad-token"),
}),
res,
); );
expect(handled).toBe(true); expect(handled).toBe(true);

View File

@ -1,5 +1,12 @@
import type { FileContents, FileDiffMetadata, SupportedLanguages } from "@pierre/diffs"; import fs from "node:fs/promises";
import { parsePatchFiles } from "@pierre/diffs"; import { createRequire } from "node:module";
import type {
FileContents,
FileDiffMetadata,
SupportedLanguages,
ThemeRegistrationResolved,
} from "@pierre/diffs";
import { RegisteredCustomThemes, parsePatchFiles } from "@pierre/diffs";
import { preloadFileDiff, preloadMultiFileDiff } from "@pierre/diffs/ssr"; import { preloadFileDiff, preloadMultiFileDiff } from "@pierre/diffs/ssr";
import type { import type {
DiffInput, DiffInput,
@ -13,6 +20,45 @@ import { VIEWER_LOADER_PATH } from "./viewer-assets.js";
const DEFAULT_FILE_NAME = "diff.txt"; const DEFAULT_FILE_NAME = "diff.txt";
const MAX_PATCH_FILE_COUNT = 128; const MAX_PATCH_FILE_COUNT = 128;
const MAX_PATCH_TOTAL_LINES = 120_000; const MAX_PATCH_TOTAL_LINES = 120_000;
const diffsRequire = createRequire(import.meta.resolve("@pierre/diffs"));
let pierreThemesPatched = false;
function createThemeLoader(
themeName: "pierre-dark" | "pierre-light",
themePath: string,
): () => Promise<ThemeRegistrationResolved> {
let cachedTheme: ThemeRegistrationResolved | undefined;
return async () => {
if (cachedTheme) {
return cachedTheme;
}
const raw = await fs.readFile(themePath, "utf8");
const parsed = JSON.parse(raw) as Record<string, unknown>;
cachedTheme = {
...parsed,
name: themeName,
} as ThemeRegistrationResolved;
return cachedTheme;
};
}
function patchPierreThemeLoadersForNode24(): void {
if (pierreThemesPatched) {
return;
}
try {
const darkThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-dark.json");
const lightThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-light.json");
RegisteredCustomThemes.set("pierre-dark", createThemeLoader("pierre-dark", darkThemePath));
RegisteredCustomThemes.set("pierre-light", createThemeLoader("pierre-light", lightThemePath));
pierreThemesPatched = true;
} catch {
// Keep upstream loaders if theme files cannot be resolved.
}
}
patchPierreThemeLoadersForNode24();
function escapeCssString(value: string): string { function escapeCssString(value: string): string {
return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"'); return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"');

View File

@ -2,6 +2,7 @@ import fs from "node:fs/promises";
import path from "node:path"; import path from "node:path";
import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createTestPluginApi } from "../../test-utils/plugin-api.js";
import type { DiffScreenshotter } from "./browser.js"; import type { DiffScreenshotter } from "./browser.js";
import { DEFAULT_DIFFS_TOOL_DEFAULTS } from "./config.js"; import { DEFAULT_DIFFS_TOOL_DEFAULTS } from "./config.js";
import { DiffArtifactStore } from "./store.js"; import { DiffArtifactStore } from "./store.js";
@ -135,9 +136,7 @@ describe("diffs tool", () => {
mode: "file", mode: "file",
}); });
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); expectArtifactOnlyFileResult(screenshotter, result);
expect((result?.details as Record<string, unknown>).mode).toBe("file");
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
}); });
it("honors ttlSeconds for artifact-only file output", async () => { it("honors ttlSeconds for artifact-only file output", async () => {
@ -227,9 +226,7 @@ describe("diffs tool", () => {
after: "two\n", after: "two\n",
}); });
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); expectArtifactOnlyFileResult(screenshotter, result);
expect((result?.details as Record<string, unknown>).mode).toBe("file");
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
}); });
it("falls back to view output when both mode cannot render an image", async () => { it("falls back to view output when both mode cannot render an image", async () => {
@ -387,7 +384,7 @@ describe("diffs tool", () => {
}); });
function createApi(): OpenClawPluginApi { function createApi(): OpenClawPluginApi {
return { return createTestPluginApi({
id: "diffs", id: "diffs",
name: "Diffs", name: "Diffs",
description: "Diffs", description: "Diffs",
@ -399,26 +396,7 @@ function createApi(): OpenClawPluginApi {
}, },
}, },
runtime: {} as OpenClawPluginApi["runtime"], runtime: {} as OpenClawPluginApi["runtime"],
logger: { }) as OpenClawPluginApi;
info() {},
warn() {},
error() {},
},
registerTool() {},
registerHook() {},
registerHttpRoute() {},
registerChannel() {},
registerGatewayMethod() {},
registerCli() {},
registerService() {},
registerProvider() {},
registerCommand() {},
registerContextEngine() {},
resolvePath(input: string) {
return input;
},
on() {},
};
} }
function createToolWithScreenshotter( function createToolWithScreenshotter(
@ -434,6 +412,15 @@ function createToolWithScreenshotter(
}); });
} }
function expectArtifactOnlyFileResult(
screenshotter: DiffScreenshotter,
result: { details?: unknown } | null | undefined,
) {
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
expect((result?.details as Record<string, unknown>).mode).toBe("file");
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
}
function createPngScreenshotter( function createPngScreenshotter(
params: { params: {
assertHtml?: (html: string) => void; assertHtml?: (html: string) => void;

View File

@ -75,6 +75,27 @@ function getRequiredHandler(
return handler; return handler;
} }
function resolveSubagentDeliveryTargetForTest(requesterOrigin: {
channel: string;
accountId: string;
to: string;
threadId?: string;
}) {
const handlers = registerHandlersForTest();
const handler = getRequiredHandler(handlers, "subagent_delivery_target");
return handler(
{
childSessionKey: "agent:main:subagent:child",
requesterSessionKey: "agent:main:main",
requesterOrigin,
childRunId: "run-1",
spawnMode: "session",
expectsCompletionMessage: true,
},
{},
);
}
function createSpawnEvent(overrides?: { function createSpawnEvent(overrides?: {
childSessionKey?: string; childSessionKey?: string;
agentId?: string; agentId?: string;
@ -324,25 +345,12 @@ describe("discord subagent hook handlers", () => {
hookMocks.listThreadBindingsBySessionKey.mockReturnValueOnce([ hookMocks.listThreadBindingsBySessionKey.mockReturnValueOnce([
{ accountId: "work", threadId: "777" }, { accountId: "work", threadId: "777" },
]); ]);
const handlers = registerHandlersForTest(); const result = resolveSubagentDeliveryTargetForTest({
const handler = getRequiredHandler(handlers, "subagent_delivery_target"); channel: "discord",
accountId: "work",
const result = handler( to: "channel:123",
{ threadId: "777",
childSessionKey: "agent:main:subagent:child", });
requesterSessionKey: "agent:main:main",
requesterOrigin: {
channel: "discord",
accountId: "work",
to: "channel:123",
threadId: "777",
},
childRunId: "run-1",
spawnMode: "session",
expectsCompletionMessage: true,
},
{},
);
expect(hookMocks.listThreadBindingsBySessionKey).toHaveBeenCalledWith({ expect(hookMocks.listThreadBindingsBySessionKey).toHaveBeenCalledWith({
targetSessionKey: "agent:main:subagent:child", targetSessionKey: "agent:main:subagent:child",
@ -364,24 +372,11 @@ describe("discord subagent hook handlers", () => {
{ accountId: "work", threadId: "777" }, { accountId: "work", threadId: "777" },
{ accountId: "work", threadId: "888" }, { accountId: "work", threadId: "888" },
]); ]);
const handlers = registerHandlersForTest(); const result = resolveSubagentDeliveryTargetForTest({
const handler = getRequiredHandler(handlers, "subagent_delivery_target"); channel: "discord",
accountId: "work",
const result = handler( to: "channel:123",
{ });
childSessionKey: "agent:main:subagent:child",
requesterSessionKey: "agent:main:main",
requesterOrigin: {
channel: "discord",
accountId: "work",
to: "channel:123",
},
childRunId: "run-1",
spawnMode: "session",
expectsCompletionMessage: true,
},
{},
);
expect(result).toBeUndefined(); expect(result).toBeUndefined();
}); });

View File

@ -9,6 +9,23 @@ import type { FeishuConfig } from "./types.js";
const asConfig = (value: Partial<FeishuConfig>) => value as FeishuConfig; const asConfig = (value: Partial<FeishuConfig>) => value as FeishuConfig;
function makeDefaultAndRouterAccounts() {
return {
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
};
}
function expectExplicitDefaultAccountSelection(
account: ReturnType<typeof resolveFeishuAccount>,
appId: string,
) {
expect(account.accountId).toBe("router-d");
expect(account.selectionSource).toBe("explicit-default");
expect(account.configured).toBe(true);
expect(account.appId).toBe(appId);
}
function withEnvVar(key: string, value: string | undefined, run: () => void) { function withEnvVar(key: string, value: string | undefined, run: () => void) {
const prev = process.env[key]; const prev = process.env[key];
if (value === undefined) { if (value === undefined) {
@ -44,10 +61,7 @@ describe("resolveDefaultFeishuAccountId", () => {
channels: { channels: {
feishu: { feishu: {
defaultAccount: "router-d", defaultAccount: "router-d",
accounts: { accounts: makeDefaultAndRouterAccounts(),
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
},
}, },
}, },
}; };
@ -278,10 +292,7 @@ describe("resolveFeishuAccount", () => {
}; };
const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined }); const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined });
expect(account.accountId).toBe("router-d"); expectExplicitDefaultAccountSelection(account, "top_level_app");
expect(account.selectionSource).toBe("explicit-default");
expect(account.configured).toBe(true);
expect(account.appId).toBe("top_level_app");
}); });
it("uses configured default account when accountId is omitted", () => { it("uses configured default account when accountId is omitted", () => {
@ -298,10 +309,7 @@ describe("resolveFeishuAccount", () => {
}; };
const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined }); const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined });
expect(account.accountId).toBe("router-d"); expectExplicitDefaultAccountSelection(account, "cli_router");
expect(account.selectionSource).toBe("explicit-default");
expect(account.configured).toBe(true);
expect(account.appId).toBe("cli_router");
}); });
it("keeps explicit accountId selection", () => { it("keeps explicit accountId selection", () => {
@ -309,10 +317,7 @@ describe("resolveFeishuAccount", () => {
channels: { channels: {
feishu: { feishu: {
defaultAccount: "router-d", defaultAccount: "router-d",
accounts: { accounts: makeDefaultAndRouterAccounts(),
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
},
}, },
}, },
}; };

View File

@ -15,7 +15,7 @@ import {
} from "openclaw/plugin-sdk/feishu"; } from "openclaw/plugin-sdk/feishu";
import { resolveFeishuAccount } from "./accounts.js"; import { resolveFeishuAccount } from "./accounts.js";
import { createFeishuClient } from "./client.js"; import { createFeishuClient } from "./client.js";
import { tryRecordMessage, tryRecordMessagePersistent } from "./dedup.js"; import { finalizeFeishuMessageProcessing, tryRecordMessagePersistent } from "./dedup.js";
import { maybeCreateDynamicAgent } from "./dynamic-agent.js"; import { maybeCreateDynamicAgent } from "./dynamic-agent.js";
import { normalizeFeishuExternalKey } from "./external-keys.js"; import { normalizeFeishuExternalKey } from "./external-keys.js";
import { downloadMessageResourceFeishu } from "./media.js"; import { downloadMessageResourceFeishu } from "./media.js";
@ -867,8 +867,18 @@ export async function handleFeishuMessage(params: {
runtime?: RuntimeEnv; runtime?: RuntimeEnv;
chatHistories?: Map<string, HistoryEntry[]>; chatHistories?: Map<string, HistoryEntry[]>;
accountId?: string; accountId?: string;
processingClaimHeld?: boolean;
}): Promise<void> { }): Promise<void> {
const { cfg, event, botOpenId, botName, runtime, chatHistories, accountId } = params; const {
cfg,
event,
botOpenId,
botName,
runtime,
chatHistories,
accountId,
processingClaimHeld = false,
} = params;
// Resolve account with merged config // Resolve account with merged config
const account = resolveFeishuAccount({ cfg, accountId }); const account = resolveFeishuAccount({ cfg, accountId });
@ -877,16 +887,15 @@ export async function handleFeishuMessage(params: {
const log = runtime?.log ?? console.log; const log = runtime?.log ?? console.log;
const error = runtime?.error ?? console.error; const error = runtime?.error ?? console.error;
// Dedup: synchronous memory guard prevents concurrent duplicate dispatch
// before the async persistent check completes.
const messageId = event.message.message_id; const messageId = event.message.message_id;
const memoryDedupeKey = `${account.accountId}:${messageId}`; if (
if (!tryRecordMessage(memoryDedupeKey)) { !(await finalizeFeishuMessageProcessing({
log(`feishu: skipping duplicate message ${messageId} (memory dedup)`); messageId,
return; namespace: account.accountId,
} log,
// Persistent dedup survives restarts and reconnects. claimHeld: processingClaimHeld,
if (!(await tryRecordMessagePersistent(messageId, account.accountId, log))) { }))
) {
log(`feishu: skipping duplicate message ${messageId}`); log(`feishu: skipping duplicate message ${messageId}`);
return; return;
} }

View File

@ -1,6 +1,16 @@
import { describe, expect, it } from "vitest"; import { describe, expect, it } from "vitest";
import { FeishuConfigSchema, FeishuGroupSchema } from "./config-schema.js"; import { FeishuConfigSchema, FeishuGroupSchema } from "./config-schema.js";
function expectSchemaIssue(
result: ReturnType<typeof FeishuConfigSchema.safeParse>,
issuePath: string,
) {
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues.some((issue) => issue.path.join(".") === issuePath)).toBe(true);
}
}
describe("FeishuConfigSchema webhook validation", () => { describe("FeishuConfigSchema webhook validation", () => {
it("applies top-level defaults", () => { it("applies top-level defaults", () => {
const result = FeishuConfigSchema.parse({}); const result = FeishuConfigSchema.parse({});
@ -39,12 +49,7 @@ describe("FeishuConfigSchema webhook validation", () => {
appSecret: "secret_top", // pragma: allowlist secret appSecret: "secret_top", // pragma: allowlist secret
}); });
expect(result.success).toBe(false); expectSchemaIssue(result, "verificationToken");
if (!result.success) {
expect(
result.error.issues.some((issue) => issue.path.join(".") === "verificationToken"),
).toBe(true);
}
}); });
it("rejects top-level webhook mode without encryptKey", () => { it("rejects top-level webhook mode without encryptKey", () => {
@ -55,10 +60,7 @@ describe("FeishuConfigSchema webhook validation", () => {
appSecret: "secret_top", // pragma: allowlist secret appSecret: "secret_top", // pragma: allowlist secret
}); });
expect(result.success).toBe(false); expectSchemaIssue(result, "encryptKey");
if (!result.success) {
expect(result.error.issues.some((issue) => issue.path.join(".") === "encryptKey")).toBe(true);
}
}); });
it("accepts top-level webhook mode with verificationToken and encryptKey", () => { it("accepts top-level webhook mode with verificationToken and encryptKey", () => {
@ -84,14 +86,7 @@ describe("FeishuConfigSchema webhook validation", () => {
}, },
}); });
expect(result.success).toBe(false); expectSchemaIssue(result, "accounts.main.verificationToken");
if (!result.success) {
expect(
result.error.issues.some(
(issue) => issue.path.join(".") === "accounts.main.verificationToken",
),
).toBe(true);
}
}); });
it("rejects account webhook mode without encryptKey", () => { it("rejects account webhook mode without encryptKey", () => {
@ -106,12 +101,7 @@ describe("FeishuConfigSchema webhook validation", () => {
}, },
}); });
expect(result.success).toBe(false); expectSchemaIssue(result, "accounts.main.encryptKey");
if (!result.success) {
expect(
result.error.issues.some((issue) => issue.path.join(".") === "accounts.main.encryptKey"),
).toBe(true);
}
}); });
it("accepts account webhook mode inheriting top-level verificationToken and encryptKey", () => { it("accepts account webhook mode inheriting top-level verificationToken and encryptKey", () => {

View File

@ -10,9 +10,15 @@ import {
const DEDUP_TTL_MS = 24 * 60 * 60 * 1000; const DEDUP_TTL_MS = 24 * 60 * 60 * 1000;
const MEMORY_MAX_SIZE = 1_000; const MEMORY_MAX_SIZE = 1_000;
const FILE_MAX_ENTRIES = 10_000; const FILE_MAX_ENTRIES = 10_000;
const EVENT_DEDUP_TTL_MS = 5 * 60 * 1000;
const EVENT_MEMORY_MAX_SIZE = 2_000;
type PersistentDedupeData = Record<string, number>; type PersistentDedupeData = Record<string, number>;
const memoryDedupe = createDedupeCache({ ttlMs: DEDUP_TTL_MS, maxSize: MEMORY_MAX_SIZE }); const memoryDedupe = createDedupeCache({ ttlMs: DEDUP_TTL_MS, maxSize: MEMORY_MAX_SIZE });
const processingClaims = createDedupeCache({
ttlMs: EVENT_DEDUP_TTL_MS,
maxSize: EVENT_MEMORY_MAX_SIZE,
});
function resolveStateDirFromEnv(env: NodeJS.ProcessEnv = process.env): string { function resolveStateDirFromEnv(env: NodeJS.ProcessEnv = process.env): string {
const stateOverride = env.OPENCLAW_STATE_DIR?.trim() || env.CLAWDBOT_STATE_DIR?.trim(); const stateOverride = env.OPENCLAW_STATE_DIR?.trim() || env.CLAWDBOT_STATE_DIR?.trim();
@ -37,6 +43,103 @@ const persistentDedupe = createPersistentDedupe({
resolveFilePath: resolveNamespaceFilePath, resolveFilePath: resolveNamespaceFilePath,
}); });
function resolveEventDedupeKey(
namespace: string,
messageId: string | undefined | null,
): string | null {
const trimmed = messageId?.trim();
if (!trimmed) {
return null;
}
return `${namespace}:${trimmed}`;
}
function normalizeMessageId(messageId: string | undefined | null): string | null {
const trimmed = messageId?.trim();
return trimmed ? trimmed : null;
}
function resolveMemoryDedupeKey(
namespace: string,
messageId: string | undefined | null,
): string | null {
const trimmed = normalizeMessageId(messageId);
if (!trimmed) {
return null;
}
return `${namespace}:${trimmed}`;
}
export function tryBeginFeishuMessageProcessing(
messageId: string | undefined | null,
namespace = "global",
): boolean {
return !processingClaims.check(resolveEventDedupeKey(namespace, messageId));
}
export function releaseFeishuMessageProcessing(
messageId: string | undefined | null,
namespace = "global",
): void {
processingClaims.delete(resolveEventDedupeKey(namespace, messageId));
}
export async function finalizeFeishuMessageProcessing(params: {
messageId: string | undefined | null;
namespace?: string;
log?: (...args: unknown[]) => void;
claimHeld?: boolean;
}): Promise<boolean> {
const { messageId, namespace = "global", log, claimHeld = false } = params;
const normalizedMessageId = normalizeMessageId(messageId);
const memoryKey = resolveMemoryDedupeKey(namespace, messageId);
if (!memoryKey || !normalizedMessageId) {
return false;
}
if (!claimHeld && !tryBeginFeishuMessageProcessing(normalizedMessageId, namespace)) {
return false;
}
if (!tryRecordMessage(memoryKey)) {
releaseFeishuMessageProcessing(normalizedMessageId, namespace);
return false;
}
if (!(await tryRecordMessagePersistent(normalizedMessageId, namespace, log))) {
releaseFeishuMessageProcessing(normalizedMessageId, namespace);
return false;
}
return true;
}
export async function recordProcessedFeishuMessage(
messageId: string | undefined | null,
namespace = "global",
log?: (...args: unknown[]) => void,
): Promise<boolean> {
const normalizedMessageId = normalizeMessageId(messageId);
const memoryKey = resolveMemoryDedupeKey(namespace, messageId);
if (!memoryKey || !normalizedMessageId) {
return false;
}
tryRecordMessage(memoryKey);
return await tryRecordMessagePersistent(normalizedMessageId, namespace, log);
}
export async function hasProcessedFeishuMessage(
messageId: string | undefined | null,
namespace = "global",
log?: (...args: unknown[]) => void,
): Promise<boolean> {
const normalizedMessageId = normalizeMessageId(messageId);
const memoryKey = resolveMemoryDedupeKey(namespace, messageId);
if (!memoryKey || !normalizedMessageId) {
return false;
}
if (hasRecordedMessage(memoryKey)) {
return true;
}
return hasRecordedMessagePersistent(normalizedMessageId, namespace, log);
}
/** /**
* Synchronous dedup memory only. * Synchronous dedup memory only.
* Kept for backward compatibility; prefer {@link tryRecordMessagePersistent}. * Kept for backward compatibility; prefer {@link tryRecordMessagePersistent}.

View File

@ -64,18 +64,21 @@ function expectMediaTimeoutClientConfigured(): void {
); );
} }
function mockResolvedFeishuAccount() {
resolveFeishuAccountMock.mockReturnValue({
configured: true,
accountId: "main",
config: {},
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
});
}
describe("sendMediaFeishu msg_type routing", () => { describe("sendMediaFeishu msg_type routing", () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); vi.clearAllMocks();
mockResolvedFeishuAccount();
resolveFeishuAccountMock.mockReturnValue({
configured: true,
accountId: "main",
config: {},
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
});
normalizeFeishuTargetMock.mockReturnValue("ou_target"); normalizeFeishuTargetMock.mockReturnValue("ou_target");
resolveReceiveIdTypeMock.mockReturnValue("open_id"); resolveReceiveIdTypeMock.mockReturnValue("open_id");
@ -381,7 +384,7 @@ describe("sendMediaFeishu msg_type routing", () => {
expect(messageResourceGetMock).not.toHaveBeenCalled(); expect(messageResourceGetMock).not.toHaveBeenCalled();
}); });
it("encodes Chinese filenames for file uploads", async () => { it("preserves Chinese filenames for file uploads", async () => {
await sendMediaFeishu({ await sendMediaFeishu({
cfg: {} as any, cfg: {} as any,
to: "user:ou_target", to: "user:ou_target",
@ -390,8 +393,7 @@ describe("sendMediaFeishu msg_type routing", () => {
}); });
const createCall = fileCreateMock.mock.calls[0][0]; const createCall = fileCreateMock.mock.calls[0][0];
expect(createCall.data.file_name).not.toBe("测试文档.pdf"); expect(createCall.data.file_name).toBe("测试文档.pdf");
expect(createCall.data.file_name).toBe(encodeURIComponent("测试文档") + ".pdf");
}); });
it("preserves ASCII filenames unchanged for file uploads", async () => { it("preserves ASCII filenames unchanged for file uploads", async () => {
@ -406,7 +408,7 @@ describe("sendMediaFeishu msg_type routing", () => {
expect(createCall.data.file_name).toBe("report-2026.pdf"); expect(createCall.data.file_name).toBe("report-2026.pdf");
}); });
it("encodes special characters (em-dash, full-width brackets) in filenames", async () => { it("preserves special Unicode characters (em-dash, full-width brackets) in filenames", async () => {
await sendMediaFeishu({ await sendMediaFeishu({
cfg: {} as any, cfg: {} as any,
to: "user:ou_target", to: "user:ou_target",
@ -415,9 +417,7 @@ describe("sendMediaFeishu msg_type routing", () => {
}); });
const createCall = fileCreateMock.mock.calls[0][0]; const createCall = fileCreateMock.mock.calls[0][0];
expect(createCall.data.file_name).toMatch(/\.md$/); expect(createCall.data.file_name).toBe("报告—详情2026.md");
expect(createCall.data.file_name).not.toContain("—");
expect(createCall.data.file_name).not.toContain("");
}); });
}); });
@ -427,71 +427,48 @@ describe("sanitizeFileNameForUpload", () => {
expect(sanitizeFileNameForUpload("my-file_v2.txt")).toBe("my-file_v2.txt"); expect(sanitizeFileNameForUpload("my-file_v2.txt")).toBe("my-file_v2.txt");
}); });
it("encodes Chinese characters in basename, preserves extension", () => { it("preserves Chinese characters", () => {
const result = sanitizeFileNameForUpload("测试文件.md"); expect(sanitizeFileNameForUpload("测试文件.md")).toBe("测试文件.md");
expect(result).toBe(encodeURIComponent("测试文件") + ".md"); expect(sanitizeFileNameForUpload("武汉15座山登山信息汇总.csv")).toBe(
expect(result).toMatch(/\.md$/); "武汉15座山登山信息汇总.csv",
);
}); });
it("encodes em-dash and full-width brackets", () => { it("preserves em-dash and full-width brackets", () => {
const result = sanitizeFileNameForUpload("文件—说明v2.pdf"); expect(sanitizeFileNameForUpload("文件—说明v2.pdf")).toBe("文件—说明v2.pdf");
expect(result).toMatch(/\.pdf$/);
expect(result).not.toContain("—");
expect(result).not.toContain("");
expect(result).not.toContain("");
}); });
it("encodes single quotes and parentheses per RFC 5987", () => { it("preserves single quotes and parentheses", () => {
const result = sanitizeFileNameForUpload("文件'(test).txt"); expect(sanitizeFileNameForUpload("文件'(test).txt")).toBe("文件'(test).txt");
expect(result).toContain("%27");
expect(result).toContain("%28");
expect(result).toContain("%29");
expect(result).toMatch(/\.txt$/);
}); });
it("handles filenames without extension", () => { it("preserves filenames without extension", () => {
const result = sanitizeFileNameForUpload("测试文件"); expect(sanitizeFileNameForUpload("测试文件")).toBe("测试文件");
expect(result).toBe(encodeURIComponent("测试文件"));
}); });
it("handles mixed ASCII and non-ASCII", () => { it("preserves mixed ASCII and non-ASCII", () => {
const result = sanitizeFileNameForUpload("Report_报告_2026.xlsx"); expect(sanitizeFileNameForUpload("Report_报告_2026.xlsx")).toBe("Report_报告_2026.xlsx");
expect(result).toMatch(/\.xlsx$/);
expect(result).not.toContain("报告");
}); });
it("encodes non-ASCII extensions", () => { it("preserves emoji filenames", () => {
const result = sanitizeFileNameForUpload("报告.文档"); expect(sanitizeFileNameForUpload("report_😀.txt")).toBe("report_😀.txt");
expect(result).toContain("%E6%96%87%E6%A1%A3");
expect(result).not.toContain("文档");
}); });
it("encodes emoji filenames", () => { it("strips control characters", () => {
const result = sanitizeFileNameForUpload("report_😀.txt"); expect(sanitizeFileNameForUpload("bad\x00file.txt")).toBe("bad_file.txt");
expect(result).toContain("%F0%9F%98%80"); expect(sanitizeFileNameForUpload("inject\r\nheader.txt")).toBe("inject__header.txt");
expect(result).toMatch(/\.txt$/);
}); });
it("encodes mixed ASCII and non-ASCII extensions", () => { it("strips quotes and backslashes to prevent header injection", () => {
const result = sanitizeFileNameForUpload("notes_总结.v测试"); expect(sanitizeFileNameForUpload('file"name.txt')).toBe("file_name.txt");
expect(result).toContain("notes_"); expect(sanitizeFileNameForUpload("file\\name.txt")).toBe("file_name.txt");
expect(result).toContain("%E6%B5%8B%E8%AF%95");
expect(result).not.toContain("测试");
}); });
}); });
describe("downloadMessageResourceFeishu", () => { describe("downloadMessageResourceFeishu", () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); vi.clearAllMocks();
mockResolvedFeishuAccount();
resolveFeishuAccountMock.mockReturnValue({
configured: true,
accountId: "main",
config: {},
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
});
createFeishuClientMock.mockReturnValue({ createFeishuClientMock.mockReturnValue({
im: { im: {

View File

@ -226,21 +226,17 @@ export async function uploadImageFeishu(params: {
} }
/** /**
* Encode a filename for safe use in Feishu multipart/form-data uploads. * Sanitize a filename for safe use in Feishu multipart/form-data uploads.
* Non-ASCII characters (Chinese, em-dash, full-width brackets, etc.) cause * Strips control characters and multipart-injection vectors (CWE-93) while
* the upload to silently fail when passed raw through the SDK's form-data * preserving the original UTF-8 display name (Chinese, emoji, etc.).
* serialization. RFC 5987 percent-encoding keeps headers 7-bit clean while *
* Feishu's server decodes and preserves the original display name. * Previous versions percent-encoded non-ASCII characters, but the Feishu
* `im.file.create` API uses `file_name` as a literal display name it does
* NOT decode percent-encoding so encoded filenames appeared as garbled text
* in chat (regression in v2026.3.2).
*/ */
export function sanitizeFileNameForUpload(fileName: string): string { export function sanitizeFileNameForUpload(fileName: string): string {
const ASCII_ONLY = /^[\x20-\x7E]+$/; return fileName.replace(/[\x00-\x1F\x7F\r\n"\\]/g, "_");
if (ASCII_ONLY.test(fileName)) {
return fileName;
}
return encodeURIComponent(fileName)
.replace(/'/g, "%27")
.replace(/\(/g, "%28")
.replace(/\)/g, "%29");
} }
/** /**

View File

@ -12,10 +12,10 @@ import {
import { handleFeishuCardAction, type FeishuCardActionEvent } from "./card-action.js"; import { handleFeishuCardAction, type FeishuCardActionEvent } from "./card-action.js";
import { createEventDispatcher } from "./client.js"; import { createEventDispatcher } from "./client.js";
import { import {
hasRecordedMessage, hasProcessedFeishuMessage,
hasRecordedMessagePersistent, recordProcessedFeishuMessage,
tryRecordMessage, releaseFeishuMessageProcessing,
tryRecordMessagePersistent, tryBeginFeishuMessageProcessing,
warmupDedupFromDisk, warmupDedupFromDisk,
} from "./dedup.js"; } from "./dedup.js";
import { isMentionForwardRequest } from "./mention.js"; import { isMentionForwardRequest } from "./mention.js";
@ -264,6 +264,7 @@ function registerEventHandlers(
runtime, runtime,
chatHistories, chatHistories,
accountId, accountId,
processingClaimHeld: true,
}); });
await enqueue(chatId, task); await enqueue(chatId, task);
}; };
@ -291,10 +292,8 @@ function registerEventHandlers(
return; return;
} }
for (const messageId of suppressedIds) { for (const messageId of suppressedIds) {
// Keep in-memory dedupe in sync with handleFeishuMessage's keying.
tryRecordMessage(`${accountId}:${messageId}`);
try { try {
await tryRecordMessagePersistent(messageId, accountId, log); await recordProcessedFeishuMessage(messageId, accountId, log);
} catch (err) { } catch (err) {
error( error(
`feishu[${accountId}]: failed to record merged dedupe id ${messageId}: ${String(err)}`, `feishu[${accountId}]: failed to record merged dedupe id ${messageId}: ${String(err)}`,
@ -303,15 +302,7 @@ function registerEventHandlers(
} }
}; };
const isMessageAlreadyProcessed = async (entry: FeishuMessageEvent): Promise<boolean> => { const isMessageAlreadyProcessed = async (entry: FeishuMessageEvent): Promise<boolean> => {
const messageId = entry.message.message_id?.trim(); return await hasProcessedFeishuMessage(entry.message.message_id, accountId, log);
if (!messageId) {
return false;
}
const memoryKey = `${accountId}:${messageId}`;
if (hasRecordedMessage(memoryKey)) {
return true;
}
return hasRecordedMessagePersistent(messageId, accountId, log);
}; };
const inboundDebouncer = core.channel.debounce.createInboundDebouncer<FeishuMessageEvent>({ const inboundDebouncer = core.channel.debounce.createInboundDebouncer<FeishuMessageEvent>({
debounceMs: inboundDebounceMs, debounceMs: inboundDebounceMs,
@ -384,19 +375,28 @@ function registerEventHandlers(
}, },
}); });
}, },
onError: (err) => { onError: (err, entries) => {
for (const entry of entries) {
releaseFeishuMessageProcessing(entry.message.message_id, accountId);
}
error(`feishu[${accountId}]: inbound debounce flush failed: ${String(err)}`); error(`feishu[${accountId}]: inbound debounce flush failed: ${String(err)}`);
}, },
}); });
eventDispatcher.register({ eventDispatcher.register({
"im.message.receive_v1": async (data) => { "im.message.receive_v1": async (data) => {
const event = data as unknown as FeishuMessageEvent;
const messageId = event.message?.message_id?.trim();
if (!tryBeginFeishuMessageProcessing(messageId, accountId)) {
log(`feishu[${accountId}]: dropping duplicate event for message ${messageId}`);
return;
}
const processMessage = async () => { const processMessage = async () => {
const event = data as unknown as FeishuMessageEvent;
await inboundDebouncer.enqueue(event); await inboundDebouncer.enqueue(event);
}; };
if (fireAndForget) { if (fireAndForget) {
void processMessage().catch((err) => { void processMessage().catch((err) => {
releaseFeishuMessageProcessing(messageId, accountId);
error(`feishu[${accountId}]: error handling message: ${String(err)}`); error(`feishu[${accountId}]: error handling message: ${String(err)}`);
}); });
return; return;
@ -404,6 +404,7 @@ function registerEventHandlers(
try { try {
await processMessage(); await processMessage();
} catch (err) { } catch (err) {
releaseFeishuMessageProcessing(messageId, accountId);
error(`feishu[${accountId}]: error handling message: ${String(err)}`); error(`feishu[${accountId}]: error handling message: ${String(err)}`);
} }
}, },

View File

@ -78,6 +78,25 @@ async function resolveReactionWithLookup(params: {
}); });
} }
async function resolveNonBotReaction(params?: { cfg?: ClawdbotConfig; uuid?: () => string }) {
return await resolveReactionSyntheticEvent({
cfg: params?.cfg ?? cfg,
accountId: "default",
event: makeReactionEvent(),
botOpenId: "ou_bot",
fetchMessage: async () => ({
messageId: "om_msg1",
chatId: "oc_group",
chatType: "group",
senderOpenId: "ou_other",
senderType: "user",
content: "hello",
contentType: "text",
}),
...(params?.uuid ? { uuid: params.uuid } : {}),
});
}
type FeishuMention = NonNullable<FeishuMessageEvent["message"]["mentions"]>[number]; type FeishuMention = NonNullable<FeishuMessageEvent["message"]["mentions"]>[number];
function buildDebounceConfig(): ClawdbotConfig { function buildDebounceConfig(): ClawdbotConfig {
@ -179,11 +198,23 @@ function getFirstDispatchedEvent(): FeishuMessageEvent {
return firstParams.event; return firstParams.event;
} }
function expectSingleDispatchedEvent(): FeishuMessageEvent {
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
return getFirstDispatchedEvent();
}
function expectParsedFirstDispatchedEvent(botOpenId = "ou_bot") {
const dispatched = expectSingleDispatchedEvent();
return {
dispatched,
parsed: parseFeishuMessageEvent(dispatched, botOpenId),
};
}
function setDedupPassThroughMocks(): void { function setDedupPassThroughMocks(): void {
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false); vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false);
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false);
} }
function createMention(params: { openId: string; name: string; key?: string }): FeishuMention { function createMention(params: { openId: string; name: string; key?: string }): FeishuMention {
@ -203,6 +234,12 @@ async function enqueueDebouncedMessage(
await Promise.resolve(); await Promise.resolve();
} }
function setStaleRetryMocks(messageId = "om_old") {
vi.spyOn(dedup, "hasProcessedFeishuMessage").mockImplementation(
async (currentMessageId) => currentMessageId === messageId,
);
}
describe("resolveReactionSyntheticEvent", () => { describe("resolveReactionSyntheticEvent", () => {
it("filters app self-reactions", async () => { it("filters app self-reactions", async () => {
const event = makeReactionEvent({ operator_type: "app" }); const event = makeReactionEvent({ operator_type: "app" });
@ -262,28 +299,12 @@ describe("resolveReactionSyntheticEvent", () => {
}); });
it("filters reactions on non-bot messages", async () => { it("filters reactions on non-bot messages", async () => {
const event = makeReactionEvent(); const result = await resolveNonBotReaction();
const result = await resolveReactionSyntheticEvent({
cfg,
accountId: "default",
event,
botOpenId: "ou_bot",
fetchMessage: async () => ({
messageId: "om_msg1",
chatId: "oc_group",
chatType: "group",
senderOpenId: "ou_other",
senderType: "user",
content: "hello",
contentType: "text",
}),
});
expect(result).toBeNull(); expect(result).toBeNull();
}); });
it("allows non-bot reactions when reactionNotifications is all", async () => { it("allows non-bot reactions when reactionNotifications is all", async () => {
const event = makeReactionEvent(); const result = await resolveNonBotReaction({
const result = await resolveReactionSyntheticEvent({
cfg: { cfg: {
channels: { channels: {
feishu: { feishu: {
@ -291,18 +312,6 @@ describe("resolveReactionSyntheticEvent", () => {
}, },
}, },
} as ClawdbotConfig, } as ClawdbotConfig,
accountId: "default",
event,
botOpenId: "ou_bot",
fetchMessage: async () => ({
messageId: "om_msg1",
chatId: "oc_group",
chatType: "group",
senderOpenId: "ou_other",
senderType: "user",
content: "hello",
contentType: "text",
}),
uuid: () => "fixed-uuid", uuid: () => "fixed-uuid",
}); });
expect(result?.message.message_id).toBe("om_msg1:reaction:THUMBSUP:fixed-uuid"); expect(result?.message.message_id).toBe("om_msg1:reaction:THUMBSUP:fixed-uuid");
@ -457,18 +466,16 @@ describe("Feishu inbound debounce regressions", () => {
); );
await vi.advanceTimersByTimeAsync(25); await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); const dispatched = expectSingleDispatchedEvent();
const dispatched = getFirstDispatchedEvent();
const mergedMentions = dispatched.message.mentions ?? []; const mergedMentions = dispatched.message.mentions ?? [];
expect(mergedMentions.some((mention) => mention.id.open_id === "ou_bot")).toBe(true); expect(mergedMentions.some((mention) => mention.id.open_id === "ou_bot")).toBe(true);
expect(mergedMentions.some((mention) => mention.id.open_id === "ou_user_a")).toBe(false); expect(mergedMentions.some((mention) => mention.id.open_id === "ou_user_a")).toBe(false);
}); });
it("passes prefetched botName through to handleFeishuMessage", async () => { it("passes prefetched botName through to handleFeishuMessage", async () => {
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false); vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false);
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false);
const onMessage = await setupDebounceMonitor({ botName: "OpenClaw Bot" }); const onMessage = await setupDebounceMonitor({ botName: "OpenClaw Bot" });
await onMessage( await onMessage(
@ -517,9 +524,7 @@ describe("Feishu inbound debounce regressions", () => {
); );
await vi.advanceTimersByTimeAsync(25); await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); const { dispatched, parsed } = expectParsedFirstDispatchedEvent();
const dispatched = getFirstDispatchedEvent();
const parsed = parseFeishuMessageEvent(dispatched, "ou_bot");
expect(parsed.mentionedBot).toBe(true); expect(parsed.mentionedBot).toBe(true);
expect(parsed.mentionTargets).toBeUndefined(); expect(parsed.mentionTargets).toBeUndefined();
const mergedMentions = dispatched.message.mentions ?? []; const mergedMentions = dispatched.message.mentions ?? [];
@ -547,19 +552,14 @@ describe("Feishu inbound debounce regressions", () => {
); );
await vi.advanceTimersByTimeAsync(25); await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); const { parsed } = expectParsedFirstDispatchedEvent();
const dispatched = getFirstDispatchedEvent();
const parsed = parseFeishuMessageEvent(dispatched, "ou_bot");
expect(parsed.mentionedBot).toBe(true); expect(parsed.mentionedBot).toBe(true);
}); });
it("excludes previously processed retries from combined debounce text", async () => { it("excludes previously processed retries from combined debounce text", async () => {
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old")); setStaleRetryMocks();
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation(
async (messageId) => messageId === "om_old",
);
const onMessage = await setupDebounceMonitor(); const onMessage = await setupDebounceMonitor();
await onMessage(createTextEvent({ messageId: "om_old", text: "stale" })); await onMessage(createTextEvent({ messageId: "om_old", text: "stale" }));
@ -576,20 +576,16 @@ describe("Feishu inbound debounce regressions", () => {
await Promise.resolve(); await Promise.resolve();
await vi.advanceTimersByTimeAsync(25); await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); const dispatched = expectSingleDispatchedEvent();
const dispatched = getFirstDispatchedEvent();
expect(dispatched.message.message_id).toBe("om_new_2"); expect(dispatched.message.message_id).toBe("om_new_2");
const combined = JSON.parse(dispatched.message.content) as { text?: string }; const combined = JSON.parse(dispatched.message.content) as { text?: string };
expect(combined.text).toBe("first\nsecond"); expect(combined.text).toBe("first\nsecond");
}); });
it("uses latest fresh message id when debounce batch ends with stale retry", async () => { it("uses latest fresh message id when debounce batch ends with stale retry", async () => {
const recordSpy = vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); const recordSpy = vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old")); setStaleRetryMocks();
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation(
async (messageId) => messageId === "om_old",
);
const onMessage = await setupDebounceMonitor(); const onMessage = await setupDebounceMonitor();
await onMessage(createTextEvent({ messageId: "om_new", text: "fresh" })); await onMessage(createTextEvent({ messageId: "om_new", text: "fresh" }));
@ -600,12 +596,58 @@ describe("Feishu inbound debounce regressions", () => {
await Promise.resolve(); await Promise.resolve();
await vi.advanceTimersByTimeAsync(25); await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); const dispatched = expectSingleDispatchedEvent();
const dispatched = getFirstDispatchedEvent();
expect(dispatched.message.message_id).toBe("om_new"); expect(dispatched.message.message_id).toBe("om_new");
const combined = JSON.parse(dispatched.message.content) as { text?: string }; const combined = JSON.parse(dispatched.message.content) as { text?: string };
expect(combined.text).toBe("fresh"); expect(combined.text).toBe("fresh");
expect(recordSpy).toHaveBeenCalledWith("default:om_old"); expect(recordSpy).toHaveBeenCalledWith("om_old", "default", expect.any(Function));
expect(recordSpy).not.toHaveBeenCalledWith("default:om_new"); expect(recordSpy).not.toHaveBeenCalledWith("om_new", "default", expect.any(Function));
});
it("releases early event dedupe when debounced dispatch fails", async () => {
setDedupPassThroughMocks();
const enqueueMock = vi.fn();
setFeishuRuntime(
createPluginRuntimeMock({
channel: {
debounce: {
createInboundDebouncer: <T>(params: {
onError?: (err: unknown, items: T[]) => void;
}) => ({
enqueue: async (item: T) => {
enqueueMock(item);
params.onError?.(new Error("dispatch failed"), [item]);
},
flushKey: async () => {},
}),
resolveInboundDebounceMs,
},
text: {
hasControlCommand,
},
},
}),
);
const onMessage = await setupDebounceMonitor();
const event = createTextEvent({ messageId: "om_retryable", text: "hello" });
await enqueueDebouncedMessage(onMessage, event);
expect(enqueueMock).toHaveBeenCalledTimes(1);
await enqueueDebouncedMessage(onMessage, event);
expect(enqueueMock).toHaveBeenCalledTimes(2);
expect(handleFeishuMessageMock).not.toHaveBeenCalled();
});
it("drops duplicate inbound events before they re-enter the debounce pipeline", async () => {
const onMessage = await setupDebounceMonitor();
const event = createTextEvent({ messageId: "om_duplicate", text: "hello" });
await enqueueDebouncedMessage(onMessage, event);
await vi.advanceTimersByTimeAsync(25);
await enqueueDebouncedMessage(onMessage, event);
await vi.advanceTimersByTimeAsync(25);
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
}); });
}); });

View File

@ -3,33 +3,19 @@ import { afterEach, describe, expect, it, vi } from "vitest";
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js"; import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
const probeFeishuMock = vi.hoisted(() => vi.fn()); const probeFeishuMock = vi.hoisted(() => vi.fn());
const feishuClientMockModule = vi.hoisted(() => ({
createFeishuWSClient: vi.fn(() => ({ start: vi.fn() })),
createEventDispatcher: vi.fn(() => ({ register: vi.fn() })),
}));
const feishuRuntimeMockModule = vi.hoisted(() => ({
getFeishuRuntime: () => ({
channel: {
debounce: {
resolveInboundDebounceMs: () => 0,
createInboundDebouncer: () => ({
enqueue: async () => {},
flushKey: async () => {},
}),
},
text: {
hasControlCommand: () => false,
},
},
}),
}));
vi.mock("./probe.js", () => ({ vi.mock("./probe.js", () => ({
probeFeishu: probeFeishuMock, probeFeishu: probeFeishuMock,
})); }));
vi.mock("./client.js", () => feishuClientMockModule); vi.mock("./client.js", async () => {
vi.mock("./runtime.js", () => feishuRuntimeMockModule); const { createFeishuClientMockModule } = await import("./monitor.test-mocks.js");
return createFeishuClientMockModule();
});
vi.mock("./runtime.js", async () => {
const { createFeishuRuntimeMockModule } = await import("./monitor.test-mocks.js");
return createFeishuRuntimeMockModule();
});
function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig { function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig {
return { return {
@ -52,6 +38,12 @@ function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig
} as ClawdbotConfig; } as ClawdbotConfig;
} }
async function waitForStartedAccount(started: string[], accountId: string) {
for (let i = 0; i < 10 && !started.includes(accountId); i += 1) {
await Promise.resolve();
}
}
afterEach(() => { afterEach(() => {
stopFeishuMonitor(); stopFeishuMonitor();
}); });
@ -116,10 +108,7 @@ describe("Feishu monitor startup preflight", () => {
}); });
try { try {
for (let i = 0; i < 10 && !started.includes("beta"); i += 1) { await waitForStartedAccount(started, "beta");
await Promise.resolve();
}
expect(started).toEqual(["alpha", "beta"]); expect(started).toEqual(["alpha", "beta"]);
expect(started.filter((accountId) => accountId === "alpha")).toHaveLength(1); expect(started.filter((accountId) => accountId === "alpha")).toHaveLength(1);
} finally { } finally {
@ -153,10 +142,7 @@ describe("Feishu monitor startup preflight", () => {
}); });
try { try {
for (let i = 0; i < 10 && !started.includes("beta"); i += 1) { await waitForStartedAccount(started, "beta");
await Promise.resolve();
}
expect(started).toEqual(["alpha", "beta"]); expect(started).toEqual(["alpha", "beta"]);
expect(runtime.error).toHaveBeenCalledWith( expect(runtime.error).toHaveBeenCalledWith(
expect.stringContaining("bot info probe timed out"), expect.stringContaining("bot info probe timed out"),

View File

@ -1,9 +1,7 @@
import crypto from "node:crypto"; import crypto from "node:crypto";
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { afterEach, describe, expect, it, vi } from "vitest"; import { afterEach, describe, expect, it, vi } from "vitest";
import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js"; import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js";
import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js";
const probeFeishuMock = vi.hoisted(() => vi.fn()); const probeFeishuMock = vi.hoisted(() => vi.fn());
@ -23,61 +21,6 @@ vi.mock("./runtime.js", () => createFeishuRuntimeMockModule());
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js"; import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
function buildConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
function signFeishuPayload(params: { function signFeishuPayload(params: {
encryptKey: string; encryptKey: string;
payload: Record<string, unknown>; payload: Record<string, unknown>;
@ -107,41 +50,12 @@ function encryptFeishuPayload(encryptKey: string, payload: Record<string, unknow
return Buffer.concat([iv, encrypted]).toString("base64"); return Buffer.concat([iv, encrypted]).toString("base64");
} }
async function withRunningWebhookMonitor( async function postSignedPayload(url: string, payload: Record<string, unknown>) {
params: { return await fetch(url, {
accountId: string; method: "POST",
path: string; headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
verificationToken: string; body: JSON.stringify(payload),
encryptKey: string;
},
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
}); });
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitorFeishuProvider({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
} }
afterEach(() => { afterEach(() => {
@ -159,6 +73,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
const payload = { type: "url_verification", challenge: "challenge-token" }; const payload = { type: "url_verification", challenge: "challenge-token" };
const response = await fetch(url, { const response = await fetch(url, {
@ -185,6 +100,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
const response = await fetch(url, { const response = await fetch(url, {
method: "POST", method: "POST",
@ -208,6 +124,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
const response = await fetch(url, { const response = await fetch(url, {
method: "POST", method: "POST",
@ -231,13 +148,10 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
const payload = { type: "url_verification", challenge: "challenge-token" }; const payload = { type: "url_verification", challenge: "challenge-token" };
const response = await fetch(url, { const response = await postSignedPayload(url, payload);
method: "POST",
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
body: JSON.stringify(payload),
});
expect(response.status).toBe(200); expect(response.status).toBe(200);
await expect(response.json()).resolves.toEqual({ challenge: "challenge-token" }); await expect(response.json()).resolves.toEqual({ challenge: "challenge-token" });
@ -255,17 +169,14 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
const payload = { const payload = {
schema: "2.0", schema: "2.0",
header: { event_type: "unknown.event" }, header: { event_type: "unknown.event" },
event: {}, event: {},
}; };
const response = await fetch(url, { const response = await postSignedPayload(url, payload);
method: "POST",
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
body: JSON.stringify(payload),
});
expect(response.status).toBe(200); expect(response.status).toBe(200);
expect(await response.text()).toContain("no unknown.event event handle"); expect(await response.text()).toContain("no unknown.event event handle");
@ -283,6 +194,7 @@ describe("Feishu webhook signed-request e2e", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
const payload = { const payload = {
encrypt: encryptFeishuPayload("encrypt_key", { encrypt: encryptFeishuPayload("encrypt_key", {
@ -290,11 +202,7 @@ describe("Feishu webhook signed-request e2e", () => {
challenge: "encrypted-challenge-token", challenge: "encrypted-challenge-token",
}), }),
}; };
const response = await fetch(url, { const response = await postSignedPayload(url, payload);
method: "POST",
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
body: JSON.stringify(payload),
});
expect(response.status).toBe(200); expect(response.status).toBe(200);
await expect(response.json()).resolves.toEqual({ await expect(response.json()).resolves.toEqual({

View File

@ -1,11 +1,13 @@
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { afterEach, describe, expect, it, vi } from "vitest"; import { afterEach, describe, expect, it, vi } from "vitest";
import { import {
createFeishuClientMockModule, createFeishuClientMockModule,
createFeishuRuntimeMockModule, createFeishuRuntimeMockModule,
} from "./monitor.test-mocks.js"; } from "./monitor.test-mocks.js";
import {
buildWebhookConfig,
getFreePort,
withRunningWebhookMonitor,
} from "./monitor.webhook.test-helpers.js";
const probeFeishuMock = vi.hoisted(() => vi.fn()); const probeFeishuMock = vi.hoisted(() => vi.fn());
@ -33,98 +35,6 @@ import {
stopFeishuMonitor, stopFeishuMonitor,
} from "./monitor.js"; } from "./monitor.js";
async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
function buildConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitorFeishuProvider({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}
afterEach(() => { afterEach(() => {
clearFeishuWebhookRateLimitStateForTest(); clearFeishuWebhookRateLimitStateForTest();
stopFeishuMonitor(); stopFeishuMonitor();
@ -134,7 +44,7 @@ describe("Feishu webhook security hardening", () => {
it("rejects webhook mode without verificationToken", async () => { it("rejects webhook mode without verificationToken", async () => {
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
const cfg = buildConfig({ const cfg = buildWebhookConfig({
accountId: "missing-token", accountId: "missing-token",
path: "/hook-missing-token", path: "/hook-missing-token",
port: await getFreePort(), port: await getFreePort(),
@ -148,7 +58,7 @@ describe("Feishu webhook security hardening", () => {
it("rejects webhook mode without encryptKey", async () => { it("rejects webhook mode without encryptKey", async () => {
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
const cfg = buildConfig({ const cfg = buildWebhookConfig({
accountId: "missing-encrypt-key", accountId: "missing-encrypt-key",
path: "/hook-missing-encrypt", path: "/hook-missing-encrypt",
port: await getFreePort(), port: await getFreePort(),
@ -167,6 +77,7 @@ describe("Feishu webhook security hardening", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
const response = await fetch(url, { const response = await fetch(url, {
method: "POST", method: "POST",
@ -189,6 +100,7 @@ describe("Feishu webhook security hardening", () => {
verificationToken: "verify_token", verificationToken: "verify_token",
encryptKey: "encrypt_key", encryptKey: "encrypt_key",
}, },
monitorFeishuProvider,
async (url) => { async (url) => {
let saw429 = false; let saw429 = false;
for (let i = 0; i < 130; i += 1) { for (let i = 0; i < 130; i += 1) {

View File

@ -0,0 +1,98 @@
import { createServer } from "node:http";
import type { AddressInfo } from "node:net";
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
import { vi } from "vitest";
import type { monitorFeishuProvider } from "./monitor.js";
export async function getFreePort(): Promise<number> {
const server = createServer();
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
const address = server.address() as AddressInfo | null;
if (!address) {
throw new Error("missing server address");
}
await new Promise<void>((resolve) => server.close(() => resolve()));
return address.port;
}
async function waitUntilServerReady(url: string): Promise<void> {
for (let i = 0; i < 50; i += 1) {
try {
const response = await fetch(url, { method: "GET" });
if (response.status >= 200 && response.status < 500) {
return;
}
} catch {
// retry
}
await new Promise((resolve) => setTimeout(resolve, 20));
}
throw new Error(`server did not start: ${url}`);
}
export function buildWebhookConfig(params: {
accountId: string;
path: string;
port: number;
verificationToken?: string;
encryptKey?: string;
}): ClawdbotConfig {
return {
channels: {
feishu: {
enabled: true,
accounts: {
[params.accountId]: {
enabled: true,
appId: "cli_test",
appSecret: "secret_test", // pragma: allowlist secret
connectionMode: "webhook",
webhookHost: "127.0.0.1",
webhookPort: params.port,
webhookPath: params.path,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
},
},
},
},
} as ClawdbotConfig;
}
export async function withRunningWebhookMonitor(
params: {
accountId: string;
path: string;
verificationToken: string;
encryptKey: string;
},
monitor: typeof monitorFeishuProvider,
run: (url: string) => Promise<void>,
) {
const port = await getFreePort();
const cfg = buildWebhookConfig({
accountId: params.accountId,
path: params.path,
port,
encryptKey: params.encryptKey,
verificationToken: params.verificationToken,
});
const abortController = new AbortController();
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
const monitorPromise = monitor({
config: cfg,
runtime,
abortSignal: abortController.signal,
});
const url = `http://127.0.0.1:${port}${params.path}`;
await waitUntilServerReady(url);
try {
await run(url);
} finally {
abortController.abort();
await monitorPromise;
}
}

View File

@ -29,12 +29,16 @@ vi.mock("./runtime.js", () => ({
import { feishuOutbound } from "./outbound.js"; import { feishuOutbound } from "./outbound.js";
const sendText = feishuOutbound.sendText!; const sendText = feishuOutbound.sendText!;
function resetOutboundMocks() {
vi.clearAllMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
}
describe("feishuOutbound.sendText local-image auto-convert", () => { describe("feishuOutbound.sendText local-image auto-convert", () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); resetOutboundMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
}); });
async function createTmpImage(ext = ".png"): Promise<{ dir: string; file: string }> { async function createTmpImage(ext = ".png"): Promise<{ dir: string; file: string }> {
@ -181,10 +185,7 @@ describe("feishuOutbound.sendText local-image auto-convert", () => {
describe("feishuOutbound.sendText replyToId forwarding", () => { describe("feishuOutbound.sendText replyToId forwarding", () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); resetOutboundMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
}); });
it("forwards replyToId as replyToMessageId to sendMessageFeishu", async () => { it("forwards replyToId as replyToMessageId to sendMessageFeishu", async () => {
@ -249,10 +250,7 @@ describe("feishuOutbound.sendText replyToId forwarding", () => {
describe("feishuOutbound.sendMedia replyToId forwarding", () => { describe("feishuOutbound.sendMedia replyToId forwarding", () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); resetOutboundMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
}); });
it("forwards replyToId to sendMediaFeishu", async () => { it("forwards replyToId to sendMediaFeishu", async () => {
@ -292,10 +290,7 @@ describe("feishuOutbound.sendMedia replyToId forwarding", () => {
describe("feishuOutbound.sendMedia renderMode", () => { describe("feishuOutbound.sendMedia renderMode", () => {
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); resetOutboundMocks();
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
}); });
it("uses markdown cards for captions when renderMode=card", async () => { it("uses markdown cards for captions when renderMode=card", async () => {

View File

@ -8,6 +8,22 @@ vi.mock("./client.js", () => ({
import { FEISHU_PROBE_REQUEST_TIMEOUT_MS, probeFeishu, clearProbeCache } from "./probe.js"; import { FEISHU_PROBE_REQUEST_TIMEOUT_MS, probeFeishu, clearProbeCache } from "./probe.js";
const DEFAULT_CREDS = { appId: "cli_123", appSecret: "secret" } as const; // pragma: allowlist secret
const DEFAULT_SUCCESS_RESPONSE = {
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
} as const;
const DEFAULT_SUCCESS_RESULT = {
ok: true,
appId: "cli_123",
botName: "TestBot",
botOpenId: "ou_abc123",
} as const;
const BOT1_RESPONSE = {
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
} as const;
function makeRequestFn(response: Record<string, unknown>) { function makeRequestFn(response: Record<string, unknown>) {
return vi.fn().mockResolvedValue(response); return vi.fn().mockResolvedValue(response);
} }
@ -18,6 +34,64 @@ function setupClient(response: Record<string, unknown>) {
return requestFn; return requestFn;
} }
function setupSuccessClient() {
return setupClient(DEFAULT_SUCCESS_RESPONSE);
}
async function expectDefaultSuccessResult(
creds = DEFAULT_CREDS,
expected: Awaited<ReturnType<typeof probeFeishu>> = DEFAULT_SUCCESS_RESULT,
) {
const result = await probeFeishu(creds);
expect(result).toEqual(expected);
}
async function withFakeTimers(run: () => Promise<void>) {
vi.useFakeTimers();
try {
await run();
} finally {
vi.useRealTimers();
}
}
async function expectErrorResultCached(params: {
requestFn: ReturnType<typeof vi.fn>;
expectedError: string;
ttlMs: number;
}) {
createFeishuClientMock.mockReturnValue({ request: params.requestFn });
const first = await probeFeishu(DEFAULT_CREDS);
const second = await probeFeishu(DEFAULT_CREDS);
expect(first).toMatchObject({ ok: false, error: params.expectedError });
expect(second).toMatchObject({ ok: false, error: params.expectedError });
expect(params.requestFn).toHaveBeenCalledTimes(1);
vi.advanceTimersByTime(params.ttlMs + 1);
await probeFeishu(DEFAULT_CREDS);
expect(params.requestFn).toHaveBeenCalledTimes(2);
}
async function expectFreshDefaultProbeAfter(
requestFn: ReturnType<typeof vi.fn>,
invalidate: () => void,
) {
await probeFeishu(DEFAULT_CREDS);
expect(requestFn).toHaveBeenCalledTimes(1);
invalidate();
await probeFeishu(DEFAULT_CREDS);
expect(requestFn).toHaveBeenCalledTimes(2);
}
async function readSequentialDefaultProbePair() {
const first = await probeFeishu(DEFAULT_CREDS);
return { first, second: await probeFeishu(DEFAULT_CREDS) };
}
describe("probeFeishu", () => { describe("probeFeishu", () => {
beforeEach(() => { beforeEach(() => {
clearProbeCache(); clearProbeCache();
@ -44,28 +118,16 @@ describe("probeFeishu", () => {
}); });
it("returns bot info on successful probe", async () => { it("returns bot info on successful probe", async () => {
const requestFn = setupClient({ const requestFn = setupSuccessClient();
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
});
const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret await expectDefaultSuccessResult();
expect(result).toEqual({
ok: true,
appId: "cli_123",
botName: "TestBot",
botOpenId: "ou_abc123",
});
expect(requestFn).toHaveBeenCalledTimes(1); expect(requestFn).toHaveBeenCalledTimes(1);
}); });
it("passes the probe timeout to the Feishu request", async () => { it("passes the probe timeout to the Feishu request", async () => {
const requestFn = setupClient({ const requestFn = setupSuccessClient();
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
});
await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret await probeFeishu(DEFAULT_CREDS);
expect(requestFn).toHaveBeenCalledWith( expect(requestFn).toHaveBeenCalledWith(
expect.objectContaining({ expect.objectContaining({
@ -77,19 +139,16 @@ describe("probeFeishu", () => {
}); });
it("returns timeout error when request exceeds timeout", async () => { it("returns timeout error when request exceeds timeout", async () => {
vi.useFakeTimers(); await withFakeTimers(async () => {
try {
const requestFn = vi.fn().mockImplementation(() => new Promise(() => {})); const requestFn = vi.fn().mockImplementation(() => new Promise(() => {}));
createFeishuClientMock.mockReturnValue({ request: requestFn }); createFeishuClientMock.mockReturnValue({ request: requestFn });
const promise = probeFeishu({ appId: "cli_123", appSecret: "secret" }, { timeoutMs: 1_000 }); const promise = probeFeishu(DEFAULT_CREDS, { timeoutMs: 1_000 });
await vi.advanceTimersByTimeAsync(1_000); await vi.advanceTimersByTimeAsync(1_000);
const result = await promise; const result = await promise;
expect(result).toMatchObject({ ok: false, error: "probe timed out after 1000ms" }); expect(result).toMatchObject({ ok: false, error: "probe timed out after 1000ms" });
} finally { });
vi.useRealTimers();
}
}); });
it("returns aborted when abort signal is already aborted", async () => { it("returns aborted when abort signal is already aborted", async () => {
@ -106,14 +165,9 @@ describe("probeFeishu", () => {
expect(createFeishuClientMock).not.toHaveBeenCalled(); expect(createFeishuClientMock).not.toHaveBeenCalled();
}); });
it("returns cached result on subsequent calls within TTL", async () => { it("returns cached result on subsequent calls within TTL", async () => {
const requestFn = setupClient({ const requestFn = setupSuccessClient();
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
});
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret const { first, second } = await readSequentialDefaultProbePair();
const first = await probeFeishu(creds);
const second = await probeFeishu(creds);
expect(first).toEqual(second); expect(first).toEqual(second);
// Only one API call should have been made // Only one API call should have been made
@ -121,76 +175,37 @@ describe("probeFeishu", () => {
}); });
it("makes a fresh API call after cache expires", async () => { it("makes a fresh API call after cache expires", async () => {
vi.useFakeTimers(); await withFakeTimers(async () => {
try { const requestFn = setupSuccessClient();
const requestFn = setupClient({
code: 0, await expectFreshDefaultProbeAfter(requestFn, () => {
bot: { bot_name: "TestBot", open_id: "ou_abc123" }, vi.advanceTimersByTime(10 * 60 * 1000 + 1);
}); });
});
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(1);
// Advance time past the success TTL
vi.advanceTimersByTime(10 * 60 * 1000 + 1);
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
} finally {
vi.useRealTimers();
}
}); });
it("caches failed probe results (API error) for the error TTL", async () => { it("caches failed probe results (API error) for the error TTL", async () => {
vi.useFakeTimers(); await withFakeTimers(async () => {
try { await expectErrorResultCached({
const requestFn = makeRequestFn({ code: 99, msg: "token expired" }); requestFn: makeRequestFn({ code: 99, msg: "token expired" }),
createFeishuClientMock.mockReturnValue({ request: requestFn }); expectedError: "API error: token expired",
ttlMs: 60 * 1000,
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret });
const first = await probeFeishu(creds); });
const second = await probeFeishu(creds);
expect(first).toMatchObject({ ok: false, error: "API error: token expired" });
expect(second).toMatchObject({ ok: false, error: "API error: token expired" });
expect(requestFn).toHaveBeenCalledTimes(1);
vi.advanceTimersByTime(60 * 1000 + 1);
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
} finally {
vi.useRealTimers();
}
}); });
it("caches thrown request errors for the error TTL", async () => { it("caches thrown request errors for the error TTL", async () => {
vi.useFakeTimers(); await withFakeTimers(async () => {
try { await expectErrorResultCached({
const requestFn = vi.fn().mockRejectedValue(new Error("network error")); requestFn: vi.fn().mockRejectedValue(new Error("network error")),
createFeishuClientMock.mockReturnValue({ request: requestFn }); expectedError: "network error",
ttlMs: 60 * 1000,
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret });
const first = await probeFeishu(creds); });
const second = await probeFeishu(creds);
expect(first).toMatchObject({ ok: false, error: "network error" });
expect(second).toMatchObject({ ok: false, error: "network error" });
expect(requestFn).toHaveBeenCalledTimes(1);
vi.advanceTimersByTime(60 * 1000 + 1);
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
} finally {
vi.useRealTimers();
}
}); });
it("caches per account independently", async () => { it("caches per account independently", async () => {
const requestFn = setupClient({ const requestFn = setupClient(BOT1_RESPONSE);
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
});
await probeFeishu({ appId: "cli_aaa", appSecret: "s1" }); // pragma: allowlist secret await probeFeishu({ appId: "cli_aaa", appSecret: "s1" }); // pragma: allowlist secret
expect(requestFn).toHaveBeenCalledTimes(1); expect(requestFn).toHaveBeenCalledTimes(1);
@ -205,10 +220,7 @@ describe("probeFeishu", () => {
}); });
it("does not share cache between accounts with same appId but different appSecret", async () => { it("does not share cache between accounts with same appId but different appSecret", async () => {
const requestFn = setupClient({ const requestFn = setupClient(BOT1_RESPONSE);
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
});
// First account with appId + secret A // First account with appId + secret A
await probeFeishu({ appId: "cli_shared", appSecret: "secret_aaa" }); // pragma: allowlist secret await probeFeishu({ appId: "cli_shared", appSecret: "secret_aaa" }); // pragma: allowlist secret
@ -221,10 +233,7 @@ describe("probeFeishu", () => {
}); });
it("uses accountId for cache key when available", async () => { it("uses accountId for cache key when available", async () => {
const requestFn = setupClient({ const requestFn = setupClient(BOT1_RESPONSE);
code: 0,
bot: { bot_name: "Bot1", open_id: "ou_1" },
});
// Two accounts with same appId+appSecret but different accountIds are cached separately // Two accounts with same appId+appSecret but different accountIds are cached separately
await probeFeishu({ accountId: "acct-1", appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret await probeFeishu({ accountId: "acct-1", appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
@ -239,19 +248,11 @@ describe("probeFeishu", () => {
}); });
it("clearProbeCache forces fresh API call", async () => { it("clearProbeCache forces fresh API call", async () => {
const requestFn = setupClient({ const requestFn = setupSuccessClient();
code: 0,
bot: { bot_name: "TestBot", open_id: "ou_abc123" }, await expectFreshDefaultProbeAfter(requestFn, () => {
clearProbeCache();
}); });
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(1);
clearProbeCache();
await probeFeishu(creds);
expect(requestFn).toHaveBeenCalledTimes(2);
}); });
it("handles response.data.bot fallback path", async () => { it("handles response.data.bot fallback path", async () => {
@ -260,10 +261,8 @@ describe("probeFeishu", () => {
data: { bot: { bot_name: "DataBot", open_id: "ou_data" } }, data: { bot: { bot_name: "DataBot", open_id: "ou_data" } },
}); });
const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret await expectDefaultSuccessResult(DEFAULT_CREDS, {
expect(result).toEqual({ ...DEFAULT_SUCCESS_RESULT,
ok: true,
appId: "cli_123",
botName: "DataBot", botName: "DataBot",
botOpenId: "ou_data", botOpenId: "ou_data",
}); });

View File

@ -25,44 +25,33 @@ vi.mock("./typing.js", () => ({
addTypingIndicator: addTypingIndicatorMock, addTypingIndicator: addTypingIndicatorMock,
removeTypingIndicator: removeTypingIndicatorMock, removeTypingIndicator: removeTypingIndicatorMock,
})); }));
vi.mock("./streaming-card.js", () => ({ vi.mock("./streaming-card.js", async () => {
mergeStreamingText: (previousText: string | undefined, nextText: string | undefined) => { const actual = await vi.importActual<typeof import("./streaming-card.js")>("./streaming-card.js");
const previous = typeof previousText === "string" ? previousText : ""; return {
const next = typeof nextText === "string" ? nextText : ""; mergeStreamingText: actual.mergeStreamingText,
if (!next) { FeishuStreamingSession: class {
return previous; active = false;
} start = vi.fn(async () => {
if (!previous || next === previous) { this.active = true;
return next; });
} update = vi.fn(async () => {});
if (next.startsWith(previous)) { close = vi.fn(async () => {
return next; this.active = false;
} });
if (previous.startsWith(next)) { isActive = vi.fn(() => this.active);
return previous;
}
return `${previous}${next}`;
},
FeishuStreamingSession: class {
active = false;
start = vi.fn(async () => {
this.active = true;
});
update = vi.fn(async () => {});
close = vi.fn(async () => {
this.active = false;
});
isActive = vi.fn(() => this.active);
constructor() { constructor() {
streamingInstances.push(this); streamingInstances.push(this);
} }
}, },
})); };
});
import { createFeishuReplyDispatcher } from "./reply-dispatcher.js"; import { createFeishuReplyDispatcher } from "./reply-dispatcher.js";
describe("createFeishuReplyDispatcher streaming behavior", () => { describe("createFeishuReplyDispatcher streaming behavior", () => {
type ReplyDispatcherArgs = Parameters<typeof createFeishuReplyDispatcher>[0];
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); vi.clearAllMocks();
streamingInstances.length = 0; streamingInstances.length = 0;
@ -128,6 +117,25 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
return createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; return createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
} }
function createRuntimeLogger() {
return { log: vi.fn(), error: vi.fn() } as never;
}
function createDispatcherHarness(overrides: Partial<ReplyDispatcherArgs> = {}) {
const result = createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
...overrides,
});
return {
result,
options: createReplyDispatcherWithTypingMock.mock.calls.at(-1)?.[0],
};
}
it("skips typing indicator when account typingIndicator is disabled", async () => { it("skips typing indicator when account typingIndicator is disabled", async () => {
resolveFeishuAccountMock.mockReturnValue({ resolveFeishuAccountMock.mockReturnValue({
accountId: "main", accountId: "main",
@ -209,14 +217,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("keeps auto mode plain text on non-streaming send path", async () => { it("keeps auto mode plain text on non-streaming send path", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness();
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "plain text" }, { kind: "final" }); await options.deliver({ text: "plain text" }, { kind: "final" });
expect(streamingInstances).toHaveLength(0); expect(streamingInstances).toHaveLength(0);
@ -225,14 +226,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("suppresses internal block payload delivery", async () => { it("suppresses internal block payload delivery", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness();
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "internal reasoning chunk" }, { kind: "block" }); await options.deliver({ text: "internal reasoning chunk" }, { kind: "block" });
expect(streamingInstances).toHaveLength(0); expect(streamingInstances).toHaveLength(0);
@ -253,15 +247,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("uses streaming session for auto mode markdown payloads", async () => { it("uses streaming session for auto mode markdown payloads", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
rootId: "om_root_topic", rootId: "om_root_topic",
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
expect(streamingInstances).toHaveLength(1); expect(streamingInstances).toHaveLength(1);
@ -277,14 +266,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("closes streaming with block text when final reply is missing", async () => { it("closes streaming with block text when final reply is missing", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```md\npartial answer\n```" }, { kind: "block" }); await options.deliver({ text: "```md\npartial answer\n```" }, { kind: "block" });
await options.onIdle?.(); await options.onIdle?.();
@ -295,14 +279,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("delivers distinct final payloads after streaming close", async () => { it("delivers distinct final payloads after streaming close", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```md\n完整回复第一段\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n完整回复第一段\n```" }, { kind: "final" });
await options.deliver({ text: "```md\n完整回复第一段 + 第二段\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n完整回复第一段 + 第二段\n```" }, { kind: "final" });
@ -316,14 +295,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("skips exact duplicate final text after streaming close", async () => { it("skips exact duplicate final text after streaming close", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" });
await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" });
@ -383,14 +357,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}, },
}); });
const result = createFeishuReplyDispatcher({ const { result, options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.onReplyStart?.(); await options.onReplyStart?.();
await result.replyOptions.onPartialReply?.({ text: "hello" }); await result.replyOptions.onPartialReply?.({ text: "hello" });
await options.deliver({ text: "lo world" }, { kind: "block" }); await options.deliver({ text: "lo world" }, { kind: "block" });
@ -402,14 +371,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("sends media-only payloads as attachments", async () => { it("sends media-only payloads as attachments", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness();
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" }); await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" });
expect(sendMediaFeishuMock).toHaveBeenCalledTimes(1); expect(sendMediaFeishuMock).toHaveBeenCalledTimes(1);
@ -424,14 +386,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("falls back to legacy mediaUrl when mediaUrls is an empty array", async () => { it("falls back to legacy mediaUrl when mediaUrls is an empty array", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness();
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver( await options.deliver(
{ text: "caption", mediaUrl: "https://example.com/a.png", mediaUrls: [] }, { text: "caption", mediaUrl: "https://example.com/a.png", mediaUrls: [] },
{ kind: "final" }, { kind: "final" },
@ -447,14 +402,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("sends attachments after streaming final markdown replies", async () => { it("sends attachments after streaming final markdown replies", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver( await options.deliver(
{ text: "```ts\nconst x = 1\n```", mediaUrls: ["https://example.com/a.png"] }, { text: "```ts\nconst x = 1\n```", mediaUrls: ["https://example.com/a.png"] },
{ kind: "final" }, { kind: "final" },
@ -472,16 +422,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("passes replyInThread to sendMessageFeishu for plain text", async () => { it("passes replyInThread to sendMessageFeishu for plain text", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
replyToMessageId: "om_msg", replyToMessageId: "om_msg",
replyInThread: true, replyInThread: true,
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "plain text" }, { kind: "final" }); await options.deliver({ text: "plain text" }, { kind: "final" });
expect(sendMessageFeishuMock).toHaveBeenCalledWith( expect(sendMessageFeishuMock).toHaveBeenCalledWith(
@ -504,16 +448,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}, },
}); });
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
replyToMessageId: "om_msg", replyToMessageId: "om_msg",
replyInThread: true, replyInThread: true,
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "card text" }, { kind: "final" }); await options.deliver({ text: "card text" }, { kind: "final" });
expect(sendMarkdownCardFeishuMock).toHaveBeenCalledWith( expect(sendMarkdownCardFeishuMock).toHaveBeenCalledWith(
@ -525,16 +463,11 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("passes replyToMessageId and replyInThread to streaming.start()", async () => { it("passes replyToMessageId and replyInThread to streaming.start()", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
replyToMessageId: "om_msg", replyToMessageId: "om_msg",
replyInThread: true, replyInThread: true,
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
expect(streamingInstances).toHaveLength(1); expect(streamingInstances).toHaveLength(1);
@ -545,18 +478,13 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("disables streaming for thread replies and keeps reply metadata", async () => { it("disables streaming for thread replies and keeps reply metadata", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never, runtime: createRuntimeLogger(),
agentId: "agent",
runtime: { log: vi.fn(), error: vi.fn() } as never,
chatId: "oc_chat",
replyToMessageId: "om_msg", replyToMessageId: "om_msg",
replyInThread: false, replyInThread: false,
threadReply: true, threadReply: true,
rootId: "om_root_topic", rootId: "om_root_topic",
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
expect(streamingInstances).toHaveLength(0); expect(streamingInstances).toHaveLength(0);
@ -569,16 +497,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
}); });
it("passes replyInThread to media attachments", async () => { it("passes replyInThread to media attachments", async () => {
createFeishuReplyDispatcher({ const { options } = createDispatcherHarness({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
replyToMessageId: "om_msg", replyToMessageId: "om_msg",
replyInThread: true, replyInThread: true,
}); });
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" }); await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" });
expect(sendMediaFeishuMock).toHaveBeenCalledWith( expect(sendMediaFeishuMock).toHaveBeenCalledWith(

View File

@ -224,6 +224,41 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
lastPartial = ""; lastPartial = "";
}; };
const sendChunkedTextReply = async (params: {
text: string;
useCard: boolean;
infoKind?: string;
}) => {
let first = true;
const chunkSource = params.useCard
? params.text
: core.channel.text.convertMarkdownTables(params.text, tableMode);
for (const chunk of core.channel.text.chunkTextWithMode(
chunkSource,
textChunkLimit,
chunkMode,
)) {
const message = {
cfg,
to: chatId,
text: chunk,
replyToMessageId: sendReplyToMessageId,
replyInThread: effectiveReplyInThread,
mentions: first ? mentionTargets : undefined,
accountId,
};
if (params.useCard) {
await sendMarkdownCardFeishu(message);
} else {
await sendMessageFeishu(message);
}
first = false;
}
if (params.infoKind === "final") {
deliveredFinalTexts.add(params.text);
}
};
const { dispatcher, replyOptions, markDispatchIdle } = const { dispatcher, replyOptions, markDispatchIdle } =
core.channel.reply.createReplyDispatcherWithTyping({ core.channel.reply.createReplyDispatcherWithTyping({
responsePrefix: prefixContext.responsePrefix, responsePrefix: prefixContext.responsePrefix,
@ -303,48 +338,10 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
return; return;
} }
let first = true;
if (useCard) { if (useCard) {
for (const chunk of core.channel.text.chunkTextWithMode( await sendChunkedTextReply({ text, useCard: true, infoKind: info?.kind });
text,
textChunkLimit,
chunkMode,
)) {
await sendMarkdownCardFeishu({
cfg,
to: chatId,
text: chunk,
replyToMessageId: sendReplyToMessageId,
replyInThread: effectiveReplyInThread,
mentions: first ? mentionTargets : undefined,
accountId,
});
first = false;
}
if (info?.kind === "final") {
deliveredFinalTexts.add(text);
}
} else { } else {
const converted = core.channel.text.convertMarkdownTables(text, tableMode); await sendChunkedTextReply({ text, useCard: false, infoKind: info?.kind });
for (const chunk of core.channel.text.chunkTextWithMode(
converted,
textChunkLimit,
chunkMode,
)) {
await sendMessageFeishu({
cfg,
to: chatId,
text: chunk,
replyToMessageId: sendReplyToMessageId,
replyInThread: effectiveReplyInThread,
mentions: first ? mentionTargets : undefined,
accountId,
});
first = false;
}
if (info?.kind === "final") {
deliveredFinalTexts.add(text);
}
} }
} }

View File

@ -25,6 +25,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
const replyMock = vi.fn(); const replyMock = vi.fn();
const createMock = vi.fn(); const createMock = vi.fn();
async function expectFallbackResult(
send: () => Promise<{ messageId?: string }>,
expectedMessageId: string,
) {
const result = await send();
expect(replyMock).toHaveBeenCalledTimes(1);
expect(createMock).toHaveBeenCalledTimes(1);
expect(result.messageId).toBe(expectedMessageId);
}
beforeEach(() => { beforeEach(() => {
vi.clearAllMocks(); vi.clearAllMocks();
resolveFeishuSendTargetMock.mockReturnValue({ resolveFeishuSendTargetMock.mockReturnValue({
@ -51,16 +61,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_new" }, data: { message_id: "om_new" },
}); });
const result = await sendMessageFeishu({ await expectFallbackResult(
cfg: {} as never, () =>
to: "user:ou_target", sendMessageFeishu({
text: "hello", cfg: {} as never,
replyToMessageId: "om_parent", to: "user:ou_target",
}); text: "hello",
replyToMessageId: "om_parent",
expect(replyMock).toHaveBeenCalledTimes(1); }),
expect(createMock).toHaveBeenCalledTimes(1); "om_new",
expect(result.messageId).toBe("om_new"); );
}); });
it("falls back to create for withdrawn card replies", async () => { it("falls back to create for withdrawn card replies", async () => {
@ -73,16 +83,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_card_new" }, data: { message_id: "om_card_new" },
}); });
const result = await sendCardFeishu({ await expectFallbackResult(
cfg: {} as never, () =>
to: "user:ou_target", sendCardFeishu({
card: { schema: "2.0" }, cfg: {} as never,
replyToMessageId: "om_parent", to: "user:ou_target",
}); card: { schema: "2.0" },
replyToMessageId: "om_parent",
expect(replyMock).toHaveBeenCalledTimes(1); }),
expect(createMock).toHaveBeenCalledTimes(1); "om_card_new",
expect(result.messageId).toBe("om_card_new"); );
}); });
it("still throws for non-withdrawn reply failures", async () => { it("still throws for non-withdrawn reply failures", async () => {
@ -111,16 +121,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_thrown_fallback" }, data: { message_id: "om_thrown_fallback" },
}); });
const result = await sendMessageFeishu({ await expectFallbackResult(
cfg: {} as never, () =>
to: "user:ou_target", sendMessageFeishu({
text: "hello", cfg: {} as never,
replyToMessageId: "om_parent", to: "user:ou_target",
}); text: "hello",
replyToMessageId: "om_parent",
expect(replyMock).toHaveBeenCalledTimes(1); }),
expect(createMock).toHaveBeenCalledTimes(1); "om_thrown_fallback",
expect(result.messageId).toBe("om_thrown_fallback"); );
}); });
it("falls back to create when card reply throws a not-found AxiosError", async () => { it("falls back to create when card reply throws a not-found AxiosError", async () => {
@ -133,16 +143,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
data: { message_id: "om_axios_fallback" }, data: { message_id: "om_axios_fallback" },
}); });
const result = await sendCardFeishu({ await expectFallbackResult(
cfg: {} as never, () =>
to: "user:ou_target", sendCardFeishu({
card: { schema: "2.0" }, cfg: {} as never,
replyToMessageId: "om_parent", to: "user:ou_target",
}); card: { schema: "2.0" },
replyToMessageId: "om_parent",
expect(replyMock).toHaveBeenCalledTimes(1); }),
expect(createMock).toHaveBeenCalledTimes(1); "om_axios_fallback",
expect(result.messageId).toBe("om_axios_fallback"); );
}); });
it("re-throws non-withdrawn thrown errors for text messages", async () => { it("re-throws non-withdrawn thrown errors for text messages", async () => {

View File

@ -55,6 +55,30 @@ type FeishuCreateMessageClient = {
}; };
}; };
type FeishuMessageSender = {
id?: string;
id_type?: string;
sender_type?: string;
};
type FeishuMessageGetItem = {
message_id?: string;
chat_id?: string;
chat_type?: FeishuChatType;
msg_type?: string;
body?: { content?: string };
sender?: FeishuMessageSender;
create_time?: string;
};
type FeishuGetMessageResponse = {
code?: number;
msg?: string;
data?: FeishuMessageGetItem & {
items?: FeishuMessageGetItem[];
};
};
/** Send a direct message as a fallback when a reply target is unavailable. */ /** Send a direct message as a fallback when a reply target is unavailable. */
async function sendFallbackDirect( async function sendFallbackDirect(
client: FeishuCreateMessageClient, client: FeishuCreateMessageClient,
@ -214,36 +238,7 @@ export async function getMessageFeishu(params: {
try { try {
const response = (await client.im.message.get({ const response = (await client.im.message.get({
path: { message_id: messageId }, path: { message_id: messageId },
})) as { })) as FeishuGetMessageResponse;
code?: number;
msg?: string;
data?: {
items?: Array<{
message_id?: string;
chat_id?: string;
chat_type?: FeishuChatType;
msg_type?: string;
body?: { content?: string };
sender?: {
id?: string;
id_type?: string;
sender_type?: string;
};
create_time?: string;
}>;
message_id?: string;
chat_id?: string;
chat_type?: FeishuChatType;
msg_type?: string;
body?: { content?: string };
sender?: {
id?: string;
id_type?: string;
sender_type?: string;
};
create_time?: string;
};
};
if (response.code !== 0) { if (response.code !== 0) {
return null; return null;

View File

@ -144,6 +144,13 @@ describe("extractGeminiCliCredentials", () => {
} }
} }
function expectFakeCliCredentials(result: unknown) {
expect(result).toEqual({
clientId: FAKE_CLIENT_ID,
clientSecret: FAKE_CLIENT_SECRET,
});
}
beforeEach(async () => { beforeEach(async () => {
vi.clearAllMocks(); vi.clearAllMocks();
originalPath = process.env.PATH; originalPath = process.env.PATH;
@ -169,10 +176,7 @@ describe("extractGeminiCliCredentials", () => {
clearCredentialsCache(); clearCredentialsCache();
const result = extractGeminiCliCredentials(); const result = extractGeminiCliCredentials();
expect(result).toEqual({ expectFakeCliCredentials(result);
clientId: FAKE_CLIENT_ID,
clientSecret: FAKE_CLIENT_SECRET,
});
}); });
it("extracts credentials when PATH entry is an npm global shim", async () => { it("extracts credentials when PATH entry is an npm global shim", async () => {
@ -182,10 +186,7 @@ describe("extractGeminiCliCredentials", () => {
clearCredentialsCache(); clearCredentialsCache();
const result = extractGeminiCliCredentials(); const result = extractGeminiCliCredentials();
expect(result).toEqual({ expectFakeCliCredentials(result);
clientId: FAKE_CLIENT_ID,
clientSecret: FAKE_CLIENT_SECRET,
});
}); });
it("returns null when oauth2.js cannot be found", async () => { it("returns null when oauth2.js cannot be found", async () => {
@ -274,16 +275,16 @@ describe("loginGeminiCliOAuth", () => {
}); });
} }
async function runRemoteLoginWithCapturedAuthUrl( type LoginGeminiCliOAuthFn = (options: {
loginGeminiCliOAuth: (options: { isRemote: boolean;
isRemote: boolean; openUrl: () => Promise<void>;
openUrl: () => Promise<void>; log: (msg: string) => void;
log: (msg: string) => void; note: () => Promise<void>;
note: () => Promise<void>; prompt: () => Promise<string>;
prompt: () => Promise<string>; progress: { update: () => void; stop: () => void };
progress: { update: () => void; stop: () => void }; }) => Promise<{ projectId: string }>;
}) => Promise<{ projectId: string }>,
) { async function runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth: LoginGeminiCliOAuthFn) {
let authUrl = ""; let authUrl = "";
const result = await loginGeminiCliOAuth({ const result = await loginGeminiCliOAuth({
isRemote: true, isRemote: true,
@ -304,6 +305,14 @@ describe("loginGeminiCliOAuth", () => {
return { result, authUrl }; return { result, authUrl };
} }
async function runRemoteLoginExpectingProjectId(
loginGeminiCliOAuth: LoginGeminiCliOAuthFn,
projectId: string,
) {
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth);
expect(result.projectId).toBe(projectId);
}
let envSnapshot: Partial<Record<(typeof ENV_KEYS)[number], string>>; let envSnapshot: Partial<Record<(typeof ENV_KEYS)[number], string>>;
beforeEach(() => { beforeEach(() => {
envSnapshot = Object.fromEntries(ENV_KEYS.map((key) => [key, process.env[key]])); envSnapshot = Object.fromEntries(ENV_KEYS.map((key) => [key, process.env[key]]));
@ -357,9 +366,7 @@ describe("loginGeminiCliOAuth", () => {
vi.stubGlobal("fetch", fetchMock); vi.stubGlobal("fetch", fetchMock);
const { loginGeminiCliOAuth } = await import("./oauth.js"); const { loginGeminiCliOAuth } = await import("./oauth.js");
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "daily-project");
expect(result.projectId).toBe("daily-project");
const loadRequests = requests.filter((request) => const loadRequests = requests.filter((request) =>
request.url.includes("v1internal:loadCodeAssist"), request.url.includes("v1internal:loadCodeAssist"),
); );
@ -414,9 +421,7 @@ describe("loginGeminiCliOAuth", () => {
vi.stubGlobal("fetch", fetchMock); vi.stubGlobal("fetch", fetchMock);
const { loginGeminiCliOAuth } = await import("./oauth.js"); const { loginGeminiCliOAuth } = await import("./oauth.js");
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "env-project");
expect(result.projectId).toBe("env-project");
expect(requests.filter((url) => url.includes("v1internal:loadCodeAssist"))).toHaveLength(3); expect(requests.filter((url) => url.includes("v1internal:loadCodeAssist"))).toHaveLength(3);
expect(requests.some((url) => url.includes("v1internal:onboardUser"))).toBe(false); expect(requests.some((url) => url.includes("v1internal:onboardUser"))).toBe(false);
}); });

View File

@ -7,6 +7,9 @@
"dependencies": { "dependencies": {
"google-auth-library": "^10.6.1" "google-auth-library": "^10.6.1"
}, },
"devDependencies": {
"openclaw": "workspace:*"
},
"peerDependencies": { "peerDependencies": {
"openclaw": ">=2026.3.11" "openclaw": ">=2026.3.11"
}, },

View File

@ -13,6 +13,21 @@ const account = {
config: {}, config: {},
} as ResolvedGoogleChatAccount; } as ResolvedGoogleChatAccount;
function stubSuccessfulSend(name: string) {
const fetchMock = vi
.fn()
.mockResolvedValue(new Response(JSON.stringify({ name }), { status: 200 }));
vi.stubGlobal("fetch", fetchMock);
return fetchMock;
}
async function expectDownloadToRejectForResponse(response: Response) {
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response));
await expect(
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
).rejects.toThrow(/max bytes/i);
}
describe("downloadGoogleChatMedia", () => { describe("downloadGoogleChatMedia", () => {
afterEach(() => { afterEach(() => {
vi.unstubAllGlobals(); vi.unstubAllGlobals();
@ -29,11 +44,7 @@ describe("downloadGoogleChatMedia", () => {
status: 200, status: 200,
headers: { "content-length": "50", "content-type": "application/octet-stream" }, headers: { "content-length": "50", "content-type": "application/octet-stream" },
}); });
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); await expectDownloadToRejectForResponse(response);
await expect(
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
).rejects.toThrow(/max bytes/i);
}); });
it("rejects when streamed payload exceeds max bytes", async () => { it("rejects when streamed payload exceeds max bytes", async () => {
@ -52,11 +63,7 @@ describe("downloadGoogleChatMedia", () => {
status: 200, status: 200,
headers: { "content-type": "application/octet-stream" }, headers: { "content-type": "application/octet-stream" },
}); });
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); await expectDownloadToRejectForResponse(response);
await expect(
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
).rejects.toThrow(/max bytes/i);
}); });
}); });
@ -66,12 +73,7 @@ describe("sendGoogleChatMessage", () => {
}); });
it("adds messageReplyOption when sending to an existing thread", async () => { it("adds messageReplyOption when sending to an existing thread", async () => {
const fetchMock = vi const fetchMock = stubSuccessfulSend("spaces/AAA/messages/123");
.fn()
.mockResolvedValue(
new Response(JSON.stringify({ name: "spaces/AAA/messages/123" }), { status: 200 }),
);
vi.stubGlobal("fetch", fetchMock);
await sendGoogleChatMessage({ await sendGoogleChatMessage({
account, account,
@ -89,12 +91,7 @@ describe("sendGoogleChatMessage", () => {
}); });
it("does not set messageReplyOption for non-thread sends", async () => { it("does not set messageReplyOption for non-thread sends", async () => {
const fetchMock = vi const fetchMock = stubSuccessfulSend("spaces/AAA/messages/124");
.fn()
.mockResolvedValue(
new Response(JSON.stringify({ name: "spaces/AAA/messages/124" }), { status: 200 }),
);
vi.stubGlobal("fetch", fetchMock);
await sendGoogleChatMessage({ await sendGoogleChatMessage({
account, account,

View File

@ -14,70 +14,24 @@ const headersToObject = (headers?: HeadersInit): Record<string, string> =>
? Object.fromEntries(headers) ? Object.fromEntries(headers)
: headers || {}; : headers || {};
async function fetchJson<T>( async function withGoogleChatResponse<T>(params: {
account: ResolvedGoogleChatAccount, account: ResolvedGoogleChatAccount;
url: string, url: string;
init: RequestInit, init?: RequestInit;
): Promise<T> { auditContext: string;
const token = await getGoogleChatAccessToken(account); errorPrefix?: string;
const { response: res, release } = await fetchWithSsrFGuard({ handleResponse: (response: Response) => Promise<T>;
}): Promise<T> {
const {
account,
url, url,
init: { init,
...init, auditContext,
headers: { errorPrefix = "Google Chat API",
...headersToObject(init.headers), handleResponse,
Authorization: `Bearer ${token}`, } = params;
"Content-Type": "application/json",
},
},
auditContext: "googlechat.api.json",
});
try {
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
}
return (await res.json()) as T;
} finally {
await release();
}
}
async function fetchOk(
account: ResolvedGoogleChatAccount,
url: string,
init: RequestInit,
): Promise<void> {
const token = await getGoogleChatAccessToken(account); const token = await getGoogleChatAccessToken(account);
const { response: res, release } = await fetchWithSsrFGuard({ const { response, release } = await fetchWithSsrFGuard({
url,
init: {
...init,
headers: {
...headersToObject(init.headers),
Authorization: `Bearer ${token}`,
},
},
auditContext: "googlechat.api.ok",
});
try {
if (!res.ok) {
const text = await res.text().catch(() => "");
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
}
} finally {
await release();
}
}
async function fetchBuffer(
account: ResolvedGoogleChatAccount,
url: string,
init?: RequestInit,
options?: { maxBytes?: number },
): Promise<{ buffer: Buffer; contentType?: string }> {
const token = await getGoogleChatAccessToken(account);
const { response: res, release } = await fetchWithSsrFGuard({
url, url,
init: { init: {
...init, ...init,
@ -86,52 +40,103 @@ async function fetchBuffer(
Authorization: `Bearer ${token}`, Authorization: `Bearer ${token}`,
}, },
}, },
auditContext: "googlechat.api.buffer", auditContext,
}); });
try { try {
if (!res.ok) { if (!response.ok) {
const text = await res.text().catch(() => ""); const text = await response.text().catch(() => "");
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`); throw new Error(`${errorPrefix} ${response.status}: ${text || response.statusText}`);
} }
const maxBytes = options?.maxBytes; return await handleResponse(response);
const lengthHeader = res.headers.get("content-length");
if (maxBytes && lengthHeader) {
const length = Number(lengthHeader);
if (Number.isFinite(length) && length > maxBytes) {
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
}
if (!maxBytes || !res.body) {
const buffer = Buffer.from(await res.arrayBuffer());
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
}
const reader = res.body.getReader();
const chunks: Buffer[] = [];
let total = 0;
while (true) {
const { done, value } = await reader.read();
if (done) {
break;
}
if (!value) {
continue;
}
total += value.length;
if (total > maxBytes) {
await reader.cancel();
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
chunks.push(Buffer.from(value));
}
const buffer = Buffer.concat(chunks, total);
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
} finally { } finally {
await release(); await release();
} }
} }
async function fetchJson<T>(
account: ResolvedGoogleChatAccount,
url: string,
init: RequestInit,
): Promise<T> {
return await withGoogleChatResponse({
account,
url,
init: {
...init,
headers: {
...headersToObject(init.headers),
"Content-Type": "application/json",
},
},
auditContext: "googlechat.api.json",
handleResponse: async (response) => (await response.json()) as T,
});
}
async function fetchOk(
account: ResolvedGoogleChatAccount,
url: string,
init: RequestInit,
): Promise<void> {
await withGoogleChatResponse({
account,
url,
init,
auditContext: "googlechat.api.ok",
handleResponse: async () => undefined,
});
}
async function fetchBuffer(
account: ResolvedGoogleChatAccount,
url: string,
init?: RequestInit,
options?: { maxBytes?: number },
): Promise<{ buffer: Buffer; contentType?: string }> {
return await withGoogleChatResponse({
account,
url,
init,
auditContext: "googlechat.api.buffer",
handleResponse: async (res) => {
const maxBytes = options?.maxBytes;
const lengthHeader = res.headers.get("content-length");
if (maxBytes && lengthHeader) {
const length = Number(lengthHeader);
if (Number.isFinite(length) && length > maxBytes) {
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
}
if (!maxBytes || !res.body) {
const buffer = Buffer.from(await res.arrayBuffer());
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
}
const reader = res.body.getReader();
const chunks: Buffer[] = [];
let total = 0;
while (true) {
const { done, value } = await reader.read();
if (done) {
break;
}
if (!value) {
continue;
}
total += value.length;
if (total > maxBytes) {
await reader.cancel();
throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`);
}
chunks.push(Buffer.from(value));
}
const buffer = Buffer.concat(chunks, total);
const contentType = res.headers.get("content-type") ?? undefined;
return { buffer, contentType };
},
});
}
export async function sendGoogleChatMessage(params: { export async function sendGoogleChatMessage(params: {
account: ResolvedGoogleChatAccount; account: ResolvedGoogleChatAccount;
space: string; space: string;
@ -208,34 +213,29 @@ export async function uploadGoogleChatAttachment(params: {
Buffer.from(footer, "utf8"), Buffer.from(footer, "utf8"),
]); ]);
const token = await getGoogleChatAccessToken(account);
const url = `${CHAT_UPLOAD_BASE}/${space}/attachments:upload?uploadType=multipart`; const url = `${CHAT_UPLOAD_BASE}/${space}/attachments:upload?uploadType=multipart`;
const { response: res, release } = await fetchWithSsrFGuard({ const payload = await withGoogleChatResponse<{
attachmentDataRef?: { attachmentUploadToken?: string };
}>({
account,
url, url,
init: { init: {
method: "POST", method: "POST",
headers: { headers: {
Authorization: `Bearer ${token}`,
"Content-Type": `multipart/related; boundary=${boundary}`, "Content-Type": `multipart/related; boundary=${boundary}`,
}, },
body, body,
}, },
auditContext: "googlechat.upload", auditContext: "googlechat.upload",
errorPrefix: "Google Chat upload",
handleResponse: async (response) =>
(await response.json()) as {
attachmentDataRef?: { attachmentUploadToken?: string };
},
}); });
try { return {
if (!res.ok) { attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken,
const text = await res.text().catch(() => ""); };
throw new Error(`Google Chat upload ${res.status}: ${text || res.statusText}`);
}
const payload = (await res.json()) as {
attachmentDataRef?: { attachmentUploadToken?: string };
};
return {
attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken,
};
} finally {
await release();
}
} }
export async function downloadGoogleChatMedia(params: { export async function downloadGoogleChatMedia(params: {

View File

@ -1,6 +1,10 @@
import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/googlechat"; import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/googlechat";
import { afterEach, describe, expect, it, vi } from "vitest"; import { afterEach, describe, expect, it, vi } from "vitest";
import { createStartAccountContext } from "../../test-utils/start-account-context.js"; import {
abortStartedAccount,
expectPendingUntilAbort,
startAccountAndTrackLifecycle,
} from "../../test-utils/start-account-lifecycle.js";
import type { ResolvedGoogleChatAccount } from "./accounts.js"; import type { ResolvedGoogleChatAccount } from "./accounts.js";
const hoisted = vi.hoisted(() => ({ const hoisted = vi.hoisted(() => ({
@ -39,29 +43,25 @@ describe("googlechatPlugin gateway.startAccount", () => {
}, },
}; };
const patches: ChannelAccountSnapshot[] = []; const { abort, patches, task, isSettled } = startAccountAndTrackLifecycle({
const abort = new AbortController(); startAccount: googlechatPlugin.gateway!.startAccount!,
const task = googlechatPlugin.gateway!.startAccount!( account,
createStartAccountContext({
account,
abortSignal: abort.signal,
statusPatchSink: (next) => patches.push({ ...next }),
}),
);
let settled = false;
void task.then(() => {
settled = true;
}); });
await vi.waitFor(() => { await expectPendingUntilAbort({
expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce(); waitForStarted: () =>
vi.waitFor(() => {
expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce();
}),
isSettled,
abort,
task,
assertBeforeAbort: () => {
expect(unregister).not.toHaveBeenCalled();
},
assertAfterAbort: () => {
expect(unregister).toHaveBeenCalledOnce();
},
}); });
expect(settled).toBe(false);
expect(unregister).not.toHaveBeenCalled();
abort.abort();
await task;
expect(unregister).toHaveBeenCalledOnce();
expect(patches.some((entry) => entry.running === true)).toBe(true); expect(patches.some((entry) => entry.running === true)).toBe(true);
expect(patches.some((entry) => entry.running === false)).toBe(true); expect(patches.some((entry) => entry.running === false)).toBe(true);
}); });

View File

@ -30,6 +30,7 @@ import {
type OpenClawConfig, type OpenClawConfig,
} from "openclaw/plugin-sdk/googlechat"; } from "openclaw/plugin-sdk/googlechat";
import { GoogleChatConfigSchema } from "openclaw/plugin-sdk/googlechat"; import { GoogleChatConfigSchema } from "openclaw/plugin-sdk/googlechat";
import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js";
import { import {
listGoogleChatAccountIds, listGoogleChatAccountIds,
resolveDefaultGoogleChatAccountId, resolveDefaultGoogleChatAccountId,
@ -473,20 +474,14 @@ export const googlechatPlugin: ChannelPlugin<ResolvedGoogleChatAccount> = {
} }
return issues; return issues;
}), }),
buildChannelSummary: ({ snapshot }) => ({ buildChannelSummary: ({ snapshot }) =>
configured: snapshot.configured ?? false, buildPassiveProbedChannelStatusSummary(snapshot, {
credentialSource: snapshot.credentialSource ?? "none", credentialSource: snapshot.credentialSource ?? "none",
audienceType: snapshot.audienceType ?? null, audienceType: snapshot.audienceType ?? null,
audience: snapshot.audience ?? null, audience: snapshot.audience ?? null,
webhookPath: snapshot.webhookPath ?? null, webhookPath: snapshot.webhookPath ?? null,
webhookUrl: snapshot.webhookUrl ?? null, webhookUrl: snapshot.webhookUrl ?? null,
running: snapshot.running ?? false, }),
lastStartAt: snapshot.lastStartAt ?? null,
lastStopAt: snapshot.lastStopAt ?? null,
lastError: snapshot.lastError ?? null,
probe: snapshot.probe,
lastProbeAt: snapshot.lastProbeAt ?? null,
}),
probeAccount: async ({ account }) => probeGoogleChat(account), probeAccount: async ({ account }) => probeGoogleChat(account),
buildAccountSnapshot: ({ account, runtime, probe }) => { buildAccountSnapshot: ({ account, runtime, probe }) => {
const base = buildComputedAccountStatusSnapshot({ const base = buildComputedAccountStatusSnapshot({

View File

@ -117,6 +117,34 @@ function registerTwoTargets() {
}; };
} }
async function dispatchWebhookRequest(req: IncomingMessage) {
const res = createMockServerResponse();
const handled = await handleGoogleChatWebhookRequest(req, res);
expect(handled).toBe(true);
return res;
}
async function expectVerifiedRoute(params: {
request: IncomingMessage;
expectedStatus: number;
sinkA: ReturnType<typeof vi.fn>;
sinkB: ReturnType<typeof vi.fn>;
expectedSink: "none" | "A" | "B";
}) {
const res = await dispatchWebhookRequest(params.request);
expect(res.statusCode).toBe(params.expectedStatus);
const expectedCounts =
params.expectedSink === "A" ? [1, 0] : params.expectedSink === "B" ? [0, 1] : [0, 0];
expect(params.sinkA).toHaveBeenCalledTimes(expectedCounts[0]);
expect(params.sinkB).toHaveBeenCalledTimes(expectedCounts[1]);
}
function mockSecondVerifierSuccess() {
vi.mocked(verifyGoogleChatRequest)
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
.mockResolvedValueOnce({ ok: true });
}
describe("Google Chat webhook routing", () => { describe("Google Chat webhook routing", () => {
afterEach(() => { afterEach(() => {
setActivePluginRegistry(createEmptyPluginRegistry()); setActivePluginRegistry(createEmptyPluginRegistry());
@ -165,45 +193,37 @@ describe("Google Chat webhook routing", () => {
const { sinkA, sinkB, unregister } = registerTwoTargets(); const { sinkA, sinkB, unregister } = registerTwoTargets();
try { try {
const res = createMockServerResponse(); await expectVerifiedRoute({
const handled = await handleGoogleChatWebhookRequest( request: createWebhookRequest({
createWebhookRequest({
authorization: "Bearer test-token", authorization: "Bearer test-token",
payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/AAA" } }, payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/AAA" } },
}), }),
res, expectedStatus: 401,
); sinkA,
sinkB,
expect(handled).toBe(true); expectedSink: "none",
expect(res.statusCode).toBe(401); });
expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).not.toHaveBeenCalled();
} finally { } finally {
unregister(); unregister();
} }
}); });
it("routes to the single verified target when earlier targets fail verification", async () => { it("routes to the single verified target when earlier targets fail verification", async () => {
vi.mocked(verifyGoogleChatRequest) mockSecondVerifierSuccess();
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
.mockResolvedValueOnce({ ok: true });
const { sinkA, sinkB, unregister } = registerTwoTargets(); const { sinkA, sinkB, unregister } = registerTwoTargets();
try { try {
const res = createMockServerResponse(); await expectVerifiedRoute({
const handled = await handleGoogleChatWebhookRequest( request: createWebhookRequest({
createWebhookRequest({
authorization: "Bearer test-token", authorization: "Bearer test-token",
payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/BBB" } }, payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/BBB" } },
}), }),
res, expectedStatus: 200,
); sinkA,
sinkB,
expect(handled).toBe(true); expectedSink: "B",
expect(res.statusCode).toBe(200); });
expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).toHaveBeenCalledTimes(1);
} finally { } finally {
unregister(); unregister();
} }
@ -218,10 +238,7 @@ describe("Google Chat webhook routing", () => {
authorization: "Bearer invalid-token", authorization: "Bearer invalid-token",
}); });
const onSpy = vi.spyOn(req, "on"); const onSpy = vi.spyOn(req, "on");
const res = createMockServerResponse(); const res = await dispatchWebhookRequest(req);
const handled = await handleGoogleChatWebhookRequest(req, res);
expect(handled).toBe(true);
expect(res.statusCode).toBe(401); expect(res.statusCode).toBe(401);
expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function)); expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function));
} finally { } finally {
@ -230,15 +247,12 @@ describe("Google Chat webhook routing", () => {
}); });
it("supports add-on requests that provide systemIdToken in the body", async () => { it("supports add-on requests that provide systemIdToken in the body", async () => {
vi.mocked(verifyGoogleChatRequest) mockSecondVerifierSuccess();
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
.mockResolvedValueOnce({ ok: true });
const { sinkA, sinkB, unregister } = registerTwoTargets(); const { sinkA, sinkB, unregister } = registerTwoTargets();
try { try {
const res = createMockServerResponse(); await expectVerifiedRoute({
const handled = await handleGoogleChatWebhookRequest( request: createWebhookRequest({
createWebhookRequest({
payload: { payload: {
commonEventObject: { hostApp: "CHAT" }, commonEventObject: { hostApp: "CHAT" },
authorizationEventObject: { systemIdToken: "addon-token" }, authorizationEventObject: { systemIdToken: "addon-token" },
@ -252,13 +266,11 @@ describe("Google Chat webhook routing", () => {
}, },
}, },
}), }),
res, expectedStatus: 200,
); sinkA,
sinkB,
expect(handled).toBe(true); expectedSink: "B",
expect(res.statusCode).toBe(200); });
expect(sinkA).not.toHaveBeenCalled();
expect(sinkB).toHaveBeenCalledTimes(1);
} finally { } finally {
unregister(); unregister();
} }

View File

@ -29,6 +29,7 @@ import {
type ChannelPlugin, type ChannelPlugin,
type ResolvedIMessageAccount, type ResolvedIMessageAccount,
} from "openclaw/plugin-sdk/imessage"; } from "openclaw/plugin-sdk/imessage";
import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js";
import { getIMessageRuntime } from "./runtime.js"; import { getIMessageRuntime } from "./runtime.js";
const meta = getChatChannelMeta("imessage"); const meta = getChatChannelMeta("imessage");
@ -264,17 +265,11 @@ export const imessagePlugin: ChannelPlugin<ResolvedIMessageAccount> = {
dbPath: null, dbPath: null,
}, },
collectStatusIssues: (accounts) => collectStatusIssuesFromLastError("imessage", accounts), collectStatusIssues: (accounts) => collectStatusIssuesFromLastError("imessage", accounts),
buildChannelSummary: ({ snapshot }) => ({ buildChannelSummary: ({ snapshot }) =>
configured: snapshot.configured ?? false, buildPassiveProbedChannelStatusSummary(snapshot, {
running: snapshot.running ?? false, cliPath: snapshot.cliPath ?? null,
lastStartAt: snapshot.lastStartAt ?? null, dbPath: snapshot.dbPath ?? null,
lastStopAt: snapshot.lastStopAt ?? null, }),
lastError: snapshot.lastError ?? null,
cliPath: snapshot.cliPath ?? null,
dbPath: snapshot.dbPath ?? null,
probe: snapshot.probe,
lastProbeAt: snapshot.lastProbeAt ?? null,
}),
probeAccount: async ({ timeoutMs }) => probeAccount: async ({ timeoutMs }) =>
getIMessageRuntime().channel.imessage.probeIMessage(timeoutMs), getIMessageRuntime().channel.imessage.probeIMessage(timeoutMs),
buildAccountSnapshot: ({ account, runtime, probe }) => ({ buildAccountSnapshot: ({ account, runtime, probe }) => ({

Some files were not shown because too many files have changed in this diff Show More