mirror of https://github.com/openclaw/openclaw.git
Merge branch 'main' of https://github.com/openclaw/openclaw
* 'main' of https://github.com/openclaw/openclaw: (640 commits) ci: add npm token fallback for npm releases build: prepare 2026.3.13-beta.1 docs: reorder unreleased changelog by impact fix: keep windows onboarding logs ascii-safe test: harden parallels all-os smoke harness chore: bump pi to 0.58.0 fix(browser): prefer user profile over chrome relay build: upload Android native debug symbols Gateway: treat scope-limited probe RPC as degraded reachability (#45622) build: shrink Android app release bundle fix: keep exec summaries inline docs: fix changelog formatting test(discord): align rate limit error mock with carbon build(android): strip unused dnsjava resolver service before R8 build(android): add auto-bump signed aab release script fix(browser): add browser session selection fix(models): apply Gemini model-id normalization to google-vertex provider (#42435) fix(feishu): add early event-level dedup to prevent duplicate replies (#43762) fix: unblock discord startup on deploy rate limits fix: default Android TLS setup codes to port 443 ... # Conflicts: # src/browser/pw-tools-core.interactions.batch.test.ts # src/browser/pw-tools-core.interactions.ts
This commit is contained in:
commit
81ecae9d7a
|
|
@ -1,5 +1,11 @@
|
|||
.git
|
||||
.worktrees
|
||||
|
||||
# Sensitive files – docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN
|
||||
# into the project root; keep it out of the build context.
|
||||
.env
|
||||
.env.*
|
||||
|
||||
.bun-cache
|
||||
.bun
|
||||
.tmp
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ on:
|
|||
|
||||
concurrency:
|
||||
group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
|
||||
|
|
@ -38,9 +38,8 @@ jobs:
|
|||
id: check
|
||||
uses: ./.github/actions/detect-docs-changes
|
||||
|
||||
# Detect which heavy areas are touched so PRs can skip unrelated expensive jobs.
|
||||
# Push to main keeps broad coverage, but this job still needs to run so
|
||||
# downstream jobs that list it in `needs` are not skipped.
|
||||
# Detect which heavy areas are touched so CI can skip unrelated expensive jobs.
|
||||
# Fail-safe: if detection fails, downstream jobs run.
|
||||
changed-scope:
|
||||
needs: [docs-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true'
|
||||
|
|
@ -82,7 +81,7 @@ jobs:
|
|||
# Build dist once for Node-relevant changes and share it with downstream jobs.
|
||||
build-artifacts:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
@ -141,7 +140,7 @@ jobs:
|
|||
|
||||
checks:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
|
@ -149,6 +148,13 @@ jobs:
|
|||
include:
|
||||
- runtime: node
|
||||
task: test
|
||||
shard_index: 1
|
||||
shard_count: 2
|
||||
command: pnpm canvas:a2ui:bundle && pnpm test
|
||||
- runtime: node
|
||||
task: test
|
||||
shard_index: 2
|
||||
shard_count: 2
|
||||
command: pnpm canvas:a2ui:bundle && pnpm test
|
||||
- runtime: node
|
||||
task: extensions
|
||||
|
|
@ -160,40 +166,47 @@ jobs:
|
|||
task: test
|
||||
command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts
|
||||
steps:
|
||||
- name: Skip bun lane on push
|
||||
if: github.event_name == 'push' && matrix.runtime == 'bun'
|
||||
run: echo "Skipping bun test lane on push events."
|
||||
- name: Skip bun lane on pull requests
|
||||
if: github.event_name == 'pull_request' && matrix.runtime == 'bun'
|
||||
run: echo "Skipping Bun compatibility lane on pull requests."
|
||||
|
||||
- name: Checkout
|
||||
if: github.event_name != 'push' || matrix.runtime != 'bun'
|
||||
if: github.event_name != 'pull_request' || matrix.runtime != 'bun'
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: false
|
||||
|
||||
- name: Setup Node environment
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'push'
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
|
||||
uses: ./.github/actions/setup-node-env
|
||||
with:
|
||||
install-bun: "${{ matrix.runtime == 'bun' }}"
|
||||
use-sticky-disk: "false"
|
||||
|
||||
- name: Configure Node test resources
|
||||
if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
|
||||
if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
|
||||
env:
|
||||
SHARD_COUNT: ${{ matrix.shard_count || '' }}
|
||||
SHARD_INDEX: ${{ matrix.shard_index || '' }}
|
||||
run: |
|
||||
# `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes.
|
||||
# Default heap limits have been too low on Linux CI (V8 OOM near 4GB).
|
||||
echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV"
|
||||
echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV"
|
||||
if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then
|
||||
echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV"
|
||||
echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV"
|
||||
fi
|
||||
|
||||
- name: Run ${{ matrix.task }} (${{ matrix.runtime }})
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'push'
|
||||
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
|
||||
run: ${{ matrix.command }}
|
||||
|
||||
# Types, lint, and format check.
|
||||
check:
|
||||
name: "check"
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
@ -239,7 +252,7 @@ jobs:
|
|||
compat-node22:
|
||||
name: "compat-node22"
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true')
|
||||
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
@ -272,7 +285,7 @@ jobs:
|
|||
|
||||
skills-python:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
@ -365,7 +378,7 @@ jobs:
|
|||
|
||||
checks-windows:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true'
|
||||
runs-on: blacksmith-32vcpu-windows-2025
|
||||
timeout-minutes: 45
|
||||
env:
|
||||
|
|
@ -727,7 +740,7 @@ jobs:
|
|||
|
||||
android:
|
||||
needs: [docs-scope, changed-scope]
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true')
|
||||
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true'
|
||||
runs-on: blacksmith-16vcpu-ubuntu-2404
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
|
|
|||
|
|
@ -69,8 +69,13 @@ jobs:
|
|||
run: pnpm release:check
|
||||
|
||||
- name: Publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [[ -n "${NODE_AUTH_TOKEN:-}" ]]; then
|
||||
printf '//registry.npmjs.org/:_authToken=%s\n' "$NODE_AUTH_TOKEN" > "$HOME/.npmrc"
|
||||
fi
|
||||
PACKAGE_VERSION=$(node -p "require('./package.json').version")
|
||||
|
||||
if [[ "$PACKAGE_VERSION" == *-beta.* ]]; then
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"gitignore": true,
|
||||
"noSymlinks": true,
|
||||
"ignore": [
|
||||
"**/node_modules/**",
|
||||
"**/dist/**",
|
||||
"dist/**",
|
||||
"**/.git/**",
|
||||
"**/coverage/**",
|
||||
"**/build/**",
|
||||
"**/.build/**",
|
||||
"**/.artifacts/**",
|
||||
"docs/zh-CN/**",
|
||||
"**/CHANGELOG.md"
|
||||
]
|
||||
}
|
||||
37
AGENTS.md
37
AGENTS.md
|
|
@ -132,6 +132,7 @@
|
|||
- Framework: Vitest with V8 coverage thresholds (70% lines/branches/functions/statements).
|
||||
- Naming: match source names with `*.test.ts`; e2e in `*.e2e.test.ts`.
|
||||
- Run `pnpm test` (or `pnpm test:coverage`) before pushing when you touch logic.
|
||||
- For targeted/local debugging, keep using the wrapper: `pnpm test -- <path-or-filter> [vitest args...]` (for example `pnpm test -- src/commands/onboard-search.test.ts -t "shows registered plugin providers"`); do not default to raw `pnpm vitest run ...` because it bypasses wrapper config/profile/pool routing.
|
||||
- Do not set test workers above 16; tried already.
|
||||
- If local Vitest runs cause memory pressure (common on non-Mac-Studio hosts), use `OPENCLAW_TEST_PROFILE=low OPENCLAW_TEST_SERIAL_GATEWAY=1 pnpm test` for land/gate runs.
|
||||
- Live tests (real keys): `CLAWDBOT_LIVE_TEST=1 pnpm test:live` (OpenClaw-only) or `LIVE=1 pnpm test:live` (includes provider live tests). Docker: `pnpm test:docker:live-models`, `pnpm test:docker:live-gateway`. Onboarding Docker E2E: `pnpm test:docker:onboard`.
|
||||
|
|
@ -201,6 +202,42 @@
|
|||
## Agent-Specific Notes
|
||||
|
||||
- Vocabulary: "makeup" = "mac app".
|
||||
- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested.
|
||||
- Parallels macOS smoke playbook:
|
||||
- `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`.
|
||||
- Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed.
|
||||
- Preferred automation entrypoint: `pnpm test:parallels:macos`. It restores the snapshot most closely matching `macOS 26.3.1 fresh`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
|
||||
- Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero.
|
||||
- Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded.
|
||||
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-smoke.*`.
|
||||
- All-OS parallel runs should share the host `dist` build via `/tmp/openclaw-parallels-build.lock` instead of rebuilding three times.
|
||||
- Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails.
|
||||
- Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`.
|
||||
- For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green.
|
||||
- Don’t run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially.
|
||||
- Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading.
|
||||
- Parallels Windows smoke playbook:
|
||||
- Preferred automation entrypoint: `pnpm test:parallels:windows`. It restores the snapshot most closely matching `pre-openclaw-native-e2e-2026-03-12`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
|
||||
- Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero.
|
||||
- Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded.
|
||||
- Always use `prlctl exec --current-user` for Windows guest runs; plain `prlctl exec` lands in `NT AUTHORITY\SYSTEM` and does not match the real desktop-user install path.
|
||||
- Prefer explicit `npm.cmd` / `openclaw.cmd`. Bare `npm` / `openclaw` in PowerShell can hit the `.ps1` shim and fail under restrictive execution policy.
|
||||
- Use PowerShell only as the transport (`powershell.exe -NoProfile -ExecutionPolicy Bypass`) and call the `.cmd` shims explicitly from inside it.
|
||||
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-windows.*`.
|
||||
- Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails.
|
||||
- Keep Windows onboarding/status text ASCII-clean in logs. Fancy punctuation in banners shows up as mojibake through the current guest PowerShell capture path.
|
||||
- Parallels Linux smoke playbook:
|
||||
- Preferred automation entrypoint: `pnpm test:parallels:linux`. It restores the snapshot most closely matching `fresh` on `Ubuntu 24.04.3 ARM64`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes.
|
||||
- Use plain `prlctl exec` on this snapshot. `--current-user` is not the right transport there.
|
||||
- Fresh snapshot reality: `curl` is missing and `apt-get update` can fail on clock skew. Bootstrap with `apt-get -o Acquire::Check-Date=false update` and install `curl ca-certificates` before testing installer paths.
|
||||
- Fresh `main` tgz smoke on Linux still needs the latest-release installer first, because this snapshot has no Node/npm before bootstrap. The harness does stable bootstrap first, then overlays current `main`.
|
||||
- This snapshot does not have a usable `systemd --user` session. Treat managed daemon install as unsupported here; use `--skip-health`, then verify with direct `openclaw gateway run --bind loopback --port 18789 --force`.
|
||||
- Env-backed auth refs are still fine, but any direct shell launch (`openclaw gateway run`, `openclaw agent --local`, Linux `gateway status --deep` against that direct run) must inherit the referenced env vars in the same shell.
|
||||
- `prlctl exec` reaps detached Linux child processes on this snapshot, so a background `openclaw gateway run` launched from automation is not a trustworthy smoke path. The harness verifies installer + `agent --local`; do direct gateway checks only from an interactive guest shell when needed.
|
||||
- When you do run Linux gateway checks manually from an interactive guest shell, use `openclaw gateway status --deep --require-rpc` so an RPC miss is a hard failure.
|
||||
- Prefer direct argv guest commands for fetch/install steps (`curl`, `npm install -g`, `openclaw ...`) over nested `bash -lc` quoting; Linux guest quoting through Parallels was the flaky part.
|
||||
- Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-linux.*`.
|
||||
- Current expected outcome on Linux smoke: fresh + upgrade should pass installer and `agent --local`; gateway remains `skipped-no-detached-linux-gateway` on this snapshot and should not be treated as a regression by itself.
|
||||
- Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`.
|
||||
- When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`).
|
||||
- Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`.
|
||||
|
|
|
|||
69
CHANGELOG.md
69
CHANGELOG.md
|
|
@ -7,32 +7,64 @@ Docs: https://docs.openclaw.ai
|
|||
### Changes
|
||||
|
||||
- Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus.
|
||||
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
|
||||
- iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman.
|
||||
- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chrome’s own setup guides.
|
||||
- Browser/agents: add built-in `profile="user"` for the logged-in host browser and `profile="chrome-relay"` for the extension relay, so agent browser calls can prefer the real signed-in browser without the extra `browserSession` selector.
|
||||
- Browser/act automation: add batched actions, selector targeting, and delayed clicks for browser act requests with normalized batch dispatch. Thanks @vincentkoc.
|
||||
- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
|
||||
- Dependencies/pi: bump `@mariozechner/pi-agent-core`, `@mariozechner/pi-ai`, `@mariozechner/pi-coding-agent`, and `@mariozechner/pi-tui` to `0.58.0`.
|
||||
|
||||
### Fixes
|
||||
|
||||
- Dashboard/chat UI: stop reloading full chat history on every live tool result in dashboard v2 so tool-heavy runs no longer trigger UI freeze/re-render storms while the final event still refreshes persisted history. (#45541) Thanks @BunsDev.
|
||||
- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang.
|
||||
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
|
||||
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding.
|
||||
- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus.
|
||||
- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv.
|
||||
- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman.
|
||||
- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
|
||||
- Browser/existing-session: harden driver validation and session lifecycle so transport errors trigger reconnects while tool-level errors preserve the session, and extract shared ARIA role sets to deduplicate Playwright and Chrome MCP snapshot paths. (#45682) Thanks @odysseus0.
|
||||
- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata.
|
||||
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
|
||||
- Gateway/session reset: preserve `lastAccountId` and `lastThreadId` across gateway session resets so replies keep routing back to the same account and thread after `/reset`. (#44773) Thanks @Lanfei.
|
||||
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
|
||||
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
|
||||
- Gateway/status: add `openclaw gateway status --require-rpc` and clearer Linux non-interactive daemon-install failure reporting so automation can fail hard on probe misses instead of treating a printed RPC error as green.
|
||||
- macOS/exec approvals: respect per-agent exec approval settings in the gateway prompter, including allowlist fallback when the native prompt cannot be shown, so gateway-triggered `system.run` requests follow configured policy instead of always prompting or denying unexpectedly. (#13707) Thanks @sliekens.
|
||||
- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus.
|
||||
- Telegram/inbound media IPv4 fallback: retry SSRF-guarded Telegram file downloads once with the same IPv4 fallback policy as Bot API calls so fresh installs on IPv6-broken hosts no longer fail to download inbound images.
|
||||
- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups.
|
||||
- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart.
|
||||
- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`.
|
||||
- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding.
|
||||
- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman.
|
||||
- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss.
|
||||
- Dashboard/chat UI: render oversized plain-text replies as normal paragraphs instead of capped gray code blocks, so long desktop chat responses stay readable without tab-switching refreshes.
|
||||
- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han.
|
||||
- Gateway/Control UI: restore the operator-only device-auth bypass and classify browser connect failures so origin and device-identity problems no longer show up as auth errors in the Control UI and web chat. (#45512) thanks @sallyom.
|
||||
- macOS/voice wake: stop crashing wake-word command extraction when speech segment ranges come from a different transcript instance.
|
||||
- Discord/allowlists: honor raw `guild_id` when hydrated guild objects are missing so allowlisted channels and threads like `#maintainers` no longer get false-dropped before channel allowlist checks.
|
||||
- macOS/runtime locator: require Node >=22.16.0 during macOS runtime discovery so the app no longer accepts Node versions that the main runtime guard rejects later. Thanks @sumleo.
|
||||
- Agents/custom providers: preserve blank API keys for loopback OpenAI-compatible custom providers by clearing the synthetic Authorization header at runtime, while keeping explicit apiKey and oauth/token config from silently downgrading into fake bearer auth. (#45631) Thanks @xinhuagu.
|
||||
- Models/google-vertex Gemini flash-lite normalization: apply existing bare-ID preview normalization to `google-vertex` model refs and provider configs so `google-vertex/gemini-3.1-flash-lite` resolves as `gemini-3.1-flash-lite-preview`. (#42435) thanks @scoootscooob.
|
||||
- iMessage/remote attachments: reject unsafe remote attachment paths before spawning SCP, so sender-controlled filenames can no longer inject shell metacharacters into remote media staging. Thanks @lintsinghua.
|
||||
- Telegram/webhook auth: validate the Telegram webhook secret before reading or parsing request bodies, so unauthenticated requests are rejected immediately instead of consuming up to 1 MB first. Thanks @space08.
|
||||
- Security/device pairing: make bootstrap setup codes single-use so pending device pairing requests cannot be silently replayed and widened to admin before approval. Thanks @tdjackey.
|
||||
- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization.
|
||||
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
|
||||
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
|
||||
- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants.
|
||||
- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token.
|
||||
- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks.
|
||||
- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins.
|
||||
- Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn.
|
||||
- Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc.
|
||||
- Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference.
|
||||
- Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97.
|
||||
- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
|
||||
- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv.
|
||||
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
|
||||
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
|
||||
- Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179.
|
||||
- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
|
||||
- Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec.
|
||||
- Signal/config validation: add `channels.signal.groups` schema support so per-group `requireMention`, `tools`, and `toolsBySender` overrides no longer get rejected during config validation. (#27199) Thanks @unisone.
|
||||
- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
|
||||
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
|
||||
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
|
||||
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
|
||||
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.
|
||||
- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
|
||||
- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
|
||||
- Telegram/media errors: redact Telegram file URLs before building media fetch errors so failed inbound downloads do not leak bot tokens into logs. Thanks @space08.
|
||||
|
||||
## 2026.3.12
|
||||
|
||||
|
|
@ -45,6 +77,7 @@ Docs: https://docs.openclaw.ai
|
|||
- Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi
|
||||
- Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff
|
||||
- Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc.
|
||||
- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc.
|
||||
|
||||
### Fixes
|
||||
|
||||
|
|
@ -101,13 +134,16 @@ Docs: https://docs.openclaw.ai
|
|||
- Gateway/session stores: regenerate the Swift push-test protocol models and align Windows native session-store realpath handling so protocol checks and sync session discovery stop drifting on Windows. (#44266) thanks @jalehman.
|
||||
- Context engine/session routing: forward optional `sessionKey` through context-engine lifecycle calls so plugins can see structured routing metadata during bootstrap, assembly, post-turn ingestion, and compaction. (#44157) thanks @jalehman.
|
||||
- Agents/failover: classify z.ai `network_error` stop reasons as retryable timeouts so provider connectivity failures trigger fallback instead of surfacing raw unhandled-stop-reason errors. (#43884) Thanks @hougangdev.
|
||||
- Config/Anthropic startup: inline Anthropic alias normalization during config load so gateway startup no longer crashes on dated Anthropic model refs like `anthropic/claude-sonnet-4-20250514`. (#45520) Thanks @BunsDev.
|
||||
- Memory/session sync: add mode-aware post-compaction session reindexing with `agents.defaults.compaction.postIndexSync` plus `agents.defaults.memorySearch.sync.sessions.postCompactionForce`, so compacted session memory can refresh immediately without forcing every deployment into synchronous reindexing. (#25561) thanks @rodrigouroz.
|
||||
- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb.
|
||||
- Telegram/native command sync: suppress expected `BOT_COMMANDS_TOO_MUCH` retry error noise, add a final fallback summary log, and document the difference between command-menu overflow and real Telegram network failures.
|
||||
- Mattermost/reply media delivery: pass agent-scoped `mediaLocalRoots` through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666.
|
||||
- Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process `HOME`/`OPENCLAW_HOME` changes no longer reuse stale plugin state or misreport `~/...` plugins as untracked. (#44046) thanks @gumadeiras.
|
||||
- Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated `session.store` roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras.
|
||||
- Browser/existing-session: stop reporting fake CDP ports/URLs for live attached Chrome sessions, render `transport: chrome-mcp` in CLI/status output instead of `port: 0`, and keep timeout diagnostics transport-aware when no direct CDP URL exists.
|
||||
- Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and `models list --plain`, and migrate legacy duplicated `openrouter/openrouter/...` config entries forward on write.
|
||||
- Feishu/event dedupe: keep early duplicate suppression aligned with the shared Feishu message-id contract and release the pre-queue dedupe marker after failed dispatch so retried events can recover instead of being dropped until the short TTL expires. (#43762) Thanks @yunweibang.
|
||||
- Gateway/hooks: bucket hook auth failures by forwarded client IP behind trusted proxies and warn when `hooks.allowedAgentIds` leaves hook routing unrestricted.
|
||||
- Agents/compaction: skip the post-compaction `cache-ttl` marker write when a compaction completed in the same attempt, preventing the next turn from immediately triggering a second tiny compaction. (#28548) thanks @MoerAI.
|
||||
- Native chat/macOS: add `/new`, `/reset`, and `/clear` reset triggers, keep shared main-session aliases aligned, and ignore stale model-selection completions so native chat state stays in sync across reset and fast model changes. (#10898) Thanks @Nachx639.
|
||||
|
|
@ -118,6 +154,8 @@ Docs: https://docs.openclaw.ai
|
|||
- Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn.
|
||||
- CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621.
|
||||
- Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte.
|
||||
- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh.
|
||||
- Feishu/file uploads: preserve literal UTF-8 filenames in `im.file.create` so Chinese and other non-ASCII filenames no longer appear percent-encoded in chat. (#34262) Thanks @fabiaodemianyang and @KangShuaiFu.
|
||||
|
||||
## 2026.3.11
|
||||
|
||||
|
|
@ -258,6 +296,7 @@ Docs: https://docs.openclaw.ai
|
|||
- Agents/failover: classify ZenMux quota-refresh `402` responses as `rate_limit` so model fallback retries continue instead of stopping on a temporary subscription window. (#43917) thanks @bwjoke.
|
||||
- Agents/failover: classify HTTP 422 malformed-request responses as `format` and recognize OpenRouter "requires more credits" billing errors so provider fallback triggers instead of surfacing raw errors. (#43823) thanks @jnMetaCode.
|
||||
- Memory/QMD Windows: fail closed when `qmd.cmd` or `mcporter.cmd` wrappers cannot be resolved to a direct entrypoint, so memory search no longer falls back to shell execution on Windows.
|
||||
- macOS/remote gateway: stop PortGuardian from killing Docker Desktop and other external listeners on the gateway port in remote mode, so containerized and tunneled gateway setups no longer lose their port-forward owner on app startup. (#6755) Thanks @teslamint.
|
||||
|
||||
## 2026.3.8
|
||||
|
||||
|
|
@ -3261,7 +3300,7 @@ Docs: https://docs.openclaw.ai
|
|||
- Agents: add CLI log hint to "agent failed before reply" messages. (#1550) Thanks @sweepies.
|
||||
- Agents: warn and ignore tool allowlists that only reference unknown or unloaded plugin tools. (#1566)
|
||||
- Agents: treat plugin-only tool allowlists as opt-ins; keep core tools enabled. (#1467)
|
||||
- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (commit 084002998)
|
||||
- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (#45459) Thanks @LyttonFeng and @vincentkoc.
|
||||
- Slack: honor open groupPolicy for unlisted channels in message + slash gating. (#1563) Thanks @itsjaydesu.
|
||||
- Discord: limit autoThread mention bypass to bot-owned threads; keep ack reactions mention-gated. (#1511) Thanks @pvoo.
|
||||
- Discord: retry rate-limited allowlist resolution + command deploy to avoid gateway crashes. (commit f70ac0c7c)
|
||||
|
|
|
|||
|
|
@ -132,6 +132,7 @@ WORKDIR /app
|
|||
RUN --mount=type=cache,id=openclaw-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,id=openclaw-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
procps hostname curl git openssl
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
|||
RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get upgrade -y --no-install-recommends \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
bash \
|
||||
ca-certificates \
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
|||
RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get upgrade -y --no-install-recommends \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
bash \
|
||||
ca-certificates \
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ ENV PATH=${BUN_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/sbin
|
|||
RUN --mount=type=cache,id=openclaw-sandbox-common-apt-cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,id=openclaw-sandbox-common-apt-lists,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get upgrade -y --no-install-recommends \
|
||||
&& apt-get install -y --no-install-recommends ${PACKAGES}
|
||||
|
||||
RUN if [ "${INSTALL_PNPM}" = "1" ]; then npm install -g pnpm; fi
|
||||
|
|
|
|||
|
|
@ -101,25 +101,19 @@ public enum WakeWordGate {
|
|||
}
|
||||
|
||||
public static func commandText(
|
||||
transcript: String,
|
||||
transcript _: String,
|
||||
segments: [WakeWordSegment],
|
||||
triggerEndTime: TimeInterval)
|
||||
-> String {
|
||||
let threshold = triggerEndTime + 0.001
|
||||
var commandWords: [String] = []
|
||||
commandWords.reserveCapacity(segments.count)
|
||||
for segment in segments where segment.start >= threshold {
|
||||
if normalizeToken(segment.text).isEmpty { continue }
|
||||
if let range = segment.range {
|
||||
let slice = transcript[range.lowerBound...]
|
||||
return String(slice).trimmingCharacters(in: Self.whitespaceAndPunctuation)
|
||||
let normalized = normalizeToken(segment.text)
|
||||
if normalized.isEmpty { continue }
|
||||
commandWords.append(segment.text)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
let text = segments
|
||||
.filter { $0.start >= threshold && !normalizeToken($0.text).isEmpty }
|
||||
.map(\.text)
|
||||
.joined(separator: " ")
|
||||
return text.trimmingCharacters(in: Self.whitespaceAndPunctuation)
|
||||
return commandWords.joined(separator: " ").trimmingCharacters(in: Self.whitespaceAndPunctuation)
|
||||
}
|
||||
|
||||
public static func matchesTextOnly(text: String, triggers: [String]) -> Bool {
|
||||
|
|
|
|||
|
|
@ -46,6 +46,25 @@ import Testing
|
|||
let match = WakeWordGate.match(transcript: transcript, segments: segments, config: config)
|
||||
#expect(match?.command == "do it")
|
||||
}
|
||||
|
||||
@Test func commandTextHandlesForeignRangeIndices() {
|
||||
let transcript = "hey clawd do thing"
|
||||
let other = "do thing"
|
||||
let foreignRange = other.range(of: "do")
|
||||
let segments = [
|
||||
WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")),
|
||||
WakeWordSegment(text: "clawd", start: 0.2, duration: 0.1, range: transcript.range(of: "clawd")),
|
||||
WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange),
|
||||
WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil),
|
||||
]
|
||||
|
||||
let command = WakeWordGate.commandText(
|
||||
transcript: transcript,
|
||||
segments: segments,
|
||||
triggerEndTime: 0.3)
|
||||
|
||||
#expect(command == "do thing")
|
||||
}
|
||||
}
|
||||
|
||||
private func makeSegments(
|
||||
|
|
|
|||
|
|
@ -30,8 +30,12 @@ cd apps/android
|
|||
./gradlew :app:assembleDebug
|
||||
./gradlew :app:installDebug
|
||||
./gradlew :app:testDebugUnitTest
|
||||
cd ../..
|
||||
bun run android:bundle:release
|
||||
```
|
||||
|
||||
`bun run android:bundle:release` auto-bumps Android `versionName`/`versionCode` in `apps/android/app/build.gradle.kts`, then builds a signed release `.aab`.
|
||||
|
||||
## Kotlin Lint + Format
|
||||
|
||||
```bash
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
import com.android.build.api.variant.impl.VariantOutputImpl
|
||||
|
||||
val dnsjavaInetAddressResolverService = "META-INF/services/java.net.spi.InetAddressResolverProvider"
|
||||
|
||||
val androidStoreFile = providers.gradleProperty("OPENCLAW_ANDROID_STORE_FILE").orNull?.takeIf { it.isNotBlank() }
|
||||
val androidStorePassword = providers.gradleProperty("OPENCLAW_ANDROID_STORE_PASSWORD").orNull?.takeIf { it.isNotBlank() }
|
||||
val androidKeyAlias = providers.gradleProperty("OPENCLAW_ANDROID_KEY_ALIAS").orNull?.takeIf { it.isNotBlank() }
|
||||
|
|
@ -63,8 +65,8 @@ android {
|
|||
applicationId = "ai.openclaw.app"
|
||||
minSdk = 31
|
||||
targetSdk = 36
|
||||
versionCode = 202603130
|
||||
versionName = "2026.3.13"
|
||||
versionCode = 2026031400
|
||||
versionName = "2026.3.14"
|
||||
ndk {
|
||||
// Support all major ABIs — native libs are tiny (~47 KB per ABI)
|
||||
abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64")
|
||||
|
|
@ -78,6 +80,9 @@ android {
|
|||
}
|
||||
isMinifyEnabled = true
|
||||
isShrinkResources = true
|
||||
ndk {
|
||||
debugSymbolLevel = "SYMBOL_TABLE"
|
||||
}
|
||||
proguardFiles(getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro")
|
||||
}
|
||||
debug {
|
||||
|
|
@ -104,6 +109,10 @@ android {
|
|||
"/META-INF/LICENSE*.txt",
|
||||
"DebugProbesKt.bin",
|
||||
"kotlin-tooling-metadata.json",
|
||||
"org/bouncycastle/pqc/crypto/picnic/lowmcL1.bin.properties",
|
||||
"org/bouncycastle/pqc/crypto/picnic/lowmcL3.bin.properties",
|
||||
"org/bouncycastle/pqc/crypto/picnic/lowmcL5.bin.properties",
|
||||
"org/bouncycastle/x509/CertPathReviewerMessages*.properties",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
@ -168,7 +177,6 @@ dependencies {
|
|||
// material-icons-extended pulled in full icon set (~20 MB DEX). Only ~18 icons used.
|
||||
// R8 will tree-shake unused icons when minify is enabled on release builds.
|
||||
implementation("androidx.compose.material:material-icons-extended")
|
||||
implementation("androidx.navigation:navigation-compose:2.9.7")
|
||||
|
||||
debugImplementation("androidx.compose.ui:ui-tooling")
|
||||
|
||||
|
|
@ -193,7 +201,6 @@ dependencies {
|
|||
implementation("androidx.camera:camera-camera2:1.5.2")
|
||||
implementation("androidx.camera:camera-lifecycle:1.5.2")
|
||||
implementation("androidx.camera:camera-video:1.5.2")
|
||||
implementation("androidx.camera:camera-view:1.5.2")
|
||||
implementation("com.google.android.gms:play-services-code-scanner:16.1.0")
|
||||
|
||||
// Unicast DNS-SD (Wide-Area Bonjour) for tailnet discovery domains.
|
||||
|
|
@ -211,3 +218,45 @@ dependencies {
|
|||
tasks.withType<Test>().configureEach {
|
||||
useJUnitPlatform()
|
||||
}
|
||||
|
||||
val stripReleaseDnsjavaServiceDescriptor =
|
||||
tasks.register("stripReleaseDnsjavaServiceDescriptor") {
|
||||
val mergedJar =
|
||||
layout.buildDirectory.file(
|
||||
"intermediates/merged_java_res/release/mergeReleaseJavaResource/base.jar",
|
||||
)
|
||||
|
||||
inputs.file(mergedJar)
|
||||
outputs.file(mergedJar)
|
||||
|
||||
doLast {
|
||||
val jarFile = mergedJar.get().asFile
|
||||
if (!jarFile.exists()) {
|
||||
return@doLast
|
||||
}
|
||||
|
||||
val unpackDir = temporaryDir.resolve("merged-java-res")
|
||||
delete(unpackDir)
|
||||
copy {
|
||||
from(zipTree(jarFile))
|
||||
into(unpackDir)
|
||||
exclude(dnsjavaInetAddressResolverService)
|
||||
}
|
||||
delete(jarFile)
|
||||
ant.invokeMethod(
|
||||
"zip",
|
||||
mapOf(
|
||||
"destfile" to jarFile.absolutePath,
|
||||
"basedir" to unpackDir.absolutePath,
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
tasks.matching { it.name == "stripReleaseDnsjavaServiceDescriptor" }.configureEach {
|
||||
dependsOn("mergeReleaseJavaResource")
|
||||
}
|
||||
|
||||
tasks.matching { it.name == "minifyReleaseWithR8" }.configureEach {
|
||||
dependsOn(stripReleaseDnsjavaServiceDescriptor)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,26 +1,6 @@
|
|||
# ── App classes ───────────────────────────────────────────────────
|
||||
-keep class ai.openclaw.app.** { *; }
|
||||
|
||||
# ── Bouncy Castle ─────────────────────────────────────────────────
|
||||
-keep class org.bouncycastle.** { *; }
|
||||
-dontwarn org.bouncycastle.**
|
||||
|
||||
# ── CameraX ───────────────────────────────────────────────────────
|
||||
-keep class androidx.camera.** { *; }
|
||||
|
||||
# ── kotlinx.serialization ────────────────────────────────────────
|
||||
-keep class kotlinx.serialization.** { *; }
|
||||
-keepclassmembers class * {
|
||||
@kotlinx.serialization.Serializable *;
|
||||
}
|
||||
-keepattributes *Annotation*, InnerClasses
|
||||
|
||||
# ── OkHttp ────────────────────────────────────────────────────────
|
||||
-dontwarn okhttp3.**
|
||||
-dontwarn okio.**
|
||||
-keep class okhttp3.internal.platform.** { *; }
|
||||
|
||||
# ── Misc suppressions ────────────────────────────────────────────
|
||||
-dontwarn com.sun.jna.**
|
||||
-dontwarn javax.naming.**
|
||||
-dontwarn lombok.Generated
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ internal fun parseGatewayEndpoint(rawInput: String): GatewayEndpointConfig? {
|
|||
"wss", "https" -> true
|
||||
else -> true
|
||||
}
|
||||
val port = uri.port.takeIf { it in 1..65535 } ?: 18789
|
||||
val port = uri.port.takeIf { it in 1..65535 } ?: if (tls) 443 else 18789
|
||||
val displayUrl = "${if (tls) "https" else "http"}://$host:$port"
|
||||
|
||||
return GatewayEndpointConfig(host = host, port = port, tls = tls, displayUrl = displayUrl)
|
||||
|
|
|
|||
|
|
@ -92,6 +92,30 @@ class GatewayConfigResolverTest {
|
|||
assertNull(resolved?.password?.takeIf { it.isNotEmpty() })
|
||||
}
|
||||
|
||||
@Test
|
||||
fun resolveGatewayConnectConfigDefaultsPortlessWssSetupCodeTo443() {
|
||||
val setupCode =
|
||||
encodeSetupCode("""{"url":"wss://gateway.example","bootstrapToken":"bootstrap-1"}""")
|
||||
|
||||
val resolved =
|
||||
resolveGatewayConnectConfig(
|
||||
useSetupCode = true,
|
||||
setupCode = setupCode,
|
||||
manualHost = "",
|
||||
manualPort = "",
|
||||
manualTls = true,
|
||||
fallbackToken = "shared-token",
|
||||
fallbackPassword = "shared-password",
|
||||
)
|
||||
|
||||
assertEquals("gateway.example", resolved?.host)
|
||||
assertEquals(443, resolved?.port)
|
||||
assertEquals(true, resolved?.tls)
|
||||
assertEquals("bootstrap-1", resolved?.bootstrapToken)
|
||||
assertNull(resolved?.token?.takeIf { it.isNotEmpty() })
|
||||
assertNull(resolved?.password?.takeIf { it.isNotEmpty() })
|
||||
}
|
||||
|
||||
private fun encodeSetupCode(payloadJson: String): String {
|
||||
return Base64.getUrlEncoder().withoutPadding().encodeToString(payloadJson.toByteArray(Charsets.UTF_8))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,125 @@
|
|||
#!/usr/bin/env bun
|
||||
|
||||
import { $ } from "bun";
|
||||
import { dirname, join } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const scriptDir = dirname(fileURLToPath(import.meta.url));
|
||||
const androidDir = join(scriptDir, "..");
|
||||
const buildGradlePath = join(androidDir, "app", "build.gradle.kts");
|
||||
const bundlePath = join(androidDir, "app", "build", "outputs", "bundle", "release", "app-release.aab");
|
||||
|
||||
type VersionState = {
|
||||
versionName: string;
|
||||
versionCode: number;
|
||||
};
|
||||
|
||||
type ParsedVersionMatches = {
|
||||
versionNameMatch: RegExpMatchArray;
|
||||
versionCodeMatch: RegExpMatchArray;
|
||||
};
|
||||
|
||||
function formatVersionName(date: Date): string {
|
||||
const year = date.getFullYear();
|
||||
const month = date.getMonth() + 1;
|
||||
const day = date.getDate();
|
||||
return `${year}.${month}.${day}`;
|
||||
}
|
||||
|
||||
function formatVersionCodePrefix(date: Date): string {
|
||||
const year = date.getFullYear().toString();
|
||||
const month = (date.getMonth() + 1).toString().padStart(2, "0");
|
||||
const day = date.getDate().toString().padStart(2, "0");
|
||||
return `${year}${month}${day}`;
|
||||
}
|
||||
|
||||
function parseVersionMatches(buildGradleText: string): ParsedVersionMatches {
|
||||
const versionCodeMatch = buildGradleText.match(/versionCode = (\d+)/);
|
||||
const versionNameMatch = buildGradleText.match(/versionName = "([^"]+)"/);
|
||||
if (!versionCodeMatch || !versionNameMatch) {
|
||||
throw new Error(`Couldn't parse versionName/versionCode from ${buildGradlePath}`);
|
||||
}
|
||||
return { versionCodeMatch, versionNameMatch };
|
||||
}
|
||||
|
||||
function resolveNextVersionCode(currentVersionCode: number, todayPrefix: string): number {
|
||||
const currentRaw = currentVersionCode.toString();
|
||||
let nextSuffix = 0;
|
||||
|
||||
if (currentRaw.startsWith(todayPrefix)) {
|
||||
const suffixRaw = currentRaw.slice(todayPrefix.length);
|
||||
nextSuffix = (suffixRaw ? Number.parseInt(suffixRaw, 10) : 0) + 1;
|
||||
}
|
||||
|
||||
if (!Number.isInteger(nextSuffix) || nextSuffix < 0 || nextSuffix > 99) {
|
||||
throw new Error(
|
||||
`Can't auto-bump Android versionCode for ${todayPrefix}: next suffix ${nextSuffix} is invalid`,
|
||||
);
|
||||
}
|
||||
|
||||
return Number.parseInt(`${todayPrefix}${nextSuffix.toString().padStart(2, "0")}`, 10);
|
||||
}
|
||||
|
||||
function resolveNextVersion(buildGradleText: string, date: Date): VersionState {
|
||||
const { versionCodeMatch } = parseVersionMatches(buildGradleText);
|
||||
const currentVersionCode = Number.parseInt(versionCodeMatch[1] ?? "", 10);
|
||||
if (!Number.isInteger(currentVersionCode)) {
|
||||
throw new Error(`Invalid Android versionCode in ${buildGradlePath}`);
|
||||
}
|
||||
|
||||
const versionName = formatVersionName(date);
|
||||
const versionCode = resolveNextVersionCode(currentVersionCode, formatVersionCodePrefix(date));
|
||||
return { versionName, versionCode };
|
||||
}
|
||||
|
||||
function updateBuildGradleVersions(buildGradleText: string, nextVersion: VersionState): string {
|
||||
return buildGradleText
|
||||
.replace(/versionCode = \d+/, `versionCode = ${nextVersion.versionCode}`)
|
||||
.replace(/versionName = "[^"]+"/, `versionName = "${nextVersion.versionName}"`);
|
||||
}
|
||||
|
||||
async function sha256Hex(path: string): Promise<string> {
|
||||
const buffer = await Bun.file(path).arrayBuffer();
|
||||
const digest = await crypto.subtle.digest("SHA-256", buffer);
|
||||
return Array.from(new Uint8Array(digest), (byte) => byte.toString(16).padStart(2, "0")).join("");
|
||||
}
|
||||
|
||||
async function verifyBundleSignature(path: string): Promise<void> {
|
||||
await $`jarsigner -verify ${path}`.quiet();
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const buildGradleFile = Bun.file(buildGradlePath);
|
||||
const originalText = await buildGradleFile.text();
|
||||
const nextVersion = resolveNextVersion(originalText, new Date());
|
||||
const updatedText = updateBuildGradleVersions(originalText, nextVersion);
|
||||
|
||||
if (updatedText === originalText) {
|
||||
throw new Error("Android version bump produced no change");
|
||||
}
|
||||
|
||||
console.log(`Android versionName -> ${nextVersion.versionName}`);
|
||||
console.log(`Android versionCode -> ${nextVersion.versionCode}`);
|
||||
|
||||
await Bun.write(buildGradlePath, updatedText);
|
||||
|
||||
try {
|
||||
await $`./gradlew :app:bundleRelease`.cwd(androidDir);
|
||||
} catch (error) {
|
||||
await Bun.write(buildGradlePath, originalText);
|
||||
throw error;
|
||||
}
|
||||
|
||||
const bundleFile = Bun.file(bundlePath);
|
||||
if (!(await bundleFile.exists())) {
|
||||
throw new Error(`Signed bundle missing at ${bundlePath}`);
|
||||
}
|
||||
|
||||
await verifyBundleSignature(bundlePath);
|
||||
const hash = await sha256Hex(bundlePath);
|
||||
|
||||
console.log(`Signed AAB: ${bundlePath}`);
|
||||
console.log(`SHA-256: ${hash}`);
|
||||
}
|
||||
|
||||
await main();
|
||||
|
|
@ -45,8 +45,8 @@ enum ExecApprovalEvaluator {
|
|||
|
||||
let skillAllow: Bool
|
||||
if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty {
|
||||
let bins = await SkillBinsCache.shared.currentBins()
|
||||
skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) }
|
||||
let bins = await SkillBinsCache.shared.currentTrust()
|
||||
skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins)
|
||||
} else {
|
||||
skillAllow = false
|
||||
}
|
||||
|
|
@ -65,4 +65,26 @@ enum ExecApprovalEvaluator {
|
|||
allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil,
|
||||
skillAllow: skillAllow)
|
||||
}
|
||||
|
||||
static func isSkillAutoAllowed(
|
||||
_ resolutions: [ExecCommandResolution],
|
||||
trustedBinsByName: [String: Set<String>]) -> Bool
|
||||
{
|
||||
guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false }
|
||||
return resolutions.allSatisfy { resolution in
|
||||
guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName),
|
||||
let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath)
|
||||
else {
|
||||
return false
|
||||
}
|
||||
return trustedBinsByName[executableName]?.contains(resolvedPath) == true
|
||||
}
|
||||
}
|
||||
|
||||
static func _testIsSkillAutoAllowed(
|
||||
_ resolutions: [ExecCommandResolution],
|
||||
trustedBinsByName: [String: Set<String>]) -> Bool
|
||||
{
|
||||
self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -370,6 +370,17 @@ enum ExecApprovalsStore {
|
|||
|
||||
static func resolve(agentId: String?) -> ExecApprovalsResolved {
|
||||
let file = self.ensureFile()
|
||||
return self.resolveFromFile(file, agentId: agentId)
|
||||
}
|
||||
|
||||
/// Read-only resolve: loads file without writing (no ensureFile side effects).
|
||||
/// Safe to call from background threads / off MainActor.
|
||||
static func resolveReadOnly(agentId: String?) -> ExecApprovalsResolved {
|
||||
let file = self.loadFile()
|
||||
return self.resolveFromFile(file, agentId: agentId)
|
||||
}
|
||||
|
||||
private static func resolveFromFile(_ file: ExecApprovalsFile, agentId: String?) -> ExecApprovalsResolved {
|
||||
let defaults = file.defaults ?? ExecApprovalsDefaults()
|
||||
let resolvedDefaults = ExecApprovalsResolvedDefaults(
|
||||
security: defaults.security ?? self.defaultSecurity,
|
||||
|
|
@ -777,6 +788,7 @@ actor SkillBinsCache {
|
|||
static let shared = SkillBinsCache()
|
||||
|
||||
private var bins: Set<String> = []
|
||||
private var trustByName: [String: Set<String>] = [:]
|
||||
private var lastRefresh: Date?
|
||||
private let refreshInterval: TimeInterval = 90
|
||||
|
||||
|
|
@ -787,27 +799,90 @@ actor SkillBinsCache {
|
|||
return self.bins
|
||||
}
|
||||
|
||||
func currentTrust(force: Bool = false) async -> [String: Set<String>] {
|
||||
if force || self.isStale() {
|
||||
await self.refresh()
|
||||
}
|
||||
return self.trustByName
|
||||
}
|
||||
|
||||
func refresh() async {
|
||||
do {
|
||||
let report = try await GatewayConnection.shared.skillsStatus()
|
||||
var next = Set<String>()
|
||||
for skill in report.skills {
|
||||
for bin in skill.requirements.bins {
|
||||
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
if !trimmed.isEmpty { next.insert(trimmed) }
|
||||
}
|
||||
}
|
||||
self.bins = next
|
||||
let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths())
|
||||
self.bins = trust.names
|
||||
self.trustByName = trust.pathsByName
|
||||
self.lastRefresh = Date()
|
||||
} catch {
|
||||
if self.lastRefresh == nil {
|
||||
self.bins = []
|
||||
self.trustByName = [:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static func normalizeSkillBinName(_ value: String) -> String? {
|
||||
let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased()
|
||||
return trimmed.isEmpty ? nil : trimmed
|
||||
}
|
||||
|
||||
static func normalizeResolvedPath(_ value: String?) -> String? {
|
||||
let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? ""
|
||||
guard !trimmed.isEmpty else { return nil }
|
||||
return URL(fileURLWithPath: trimmed).standardizedFileURL.path
|
||||
}
|
||||
|
||||
static func buildTrustIndex(
|
||||
report: SkillsStatusReport,
|
||||
searchPaths: [String]) -> SkillBinTrustIndex
|
||||
{
|
||||
var names = Set<String>()
|
||||
var pathsByName: [String: Set<String>] = [:]
|
||||
|
||||
for skill in report.skills {
|
||||
for bin in skill.requirements.bins {
|
||||
let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { continue }
|
||||
names.insert(trimmed)
|
||||
|
||||
guard let name = self.normalizeSkillBinName(trimmed),
|
||||
let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths),
|
||||
let normalizedPath = self.normalizeResolvedPath(resolvedPath)
|
||||
else {
|
||||
continue
|
||||
}
|
||||
|
||||
var paths = pathsByName[name] ?? Set<String>()
|
||||
paths.insert(normalizedPath)
|
||||
pathsByName[name] = paths
|
||||
}
|
||||
}
|
||||
|
||||
return SkillBinTrustIndex(names: names, pathsByName: pathsByName)
|
||||
}
|
||||
|
||||
private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? {
|
||||
let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin
|
||||
if expanded.contains("/") || expanded.contains("\\") {
|
||||
return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil
|
||||
}
|
||||
return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths)
|
||||
}
|
||||
|
||||
private func isStale() -> Bool {
|
||||
guard let lastRefresh else { return true }
|
||||
return Date().timeIntervalSince(lastRefresh) > self.refreshInterval
|
||||
}
|
||||
|
||||
static func _testBuildTrustIndex(
|
||||
report: SkillsStatusReport,
|
||||
searchPaths: [String]) -> SkillBinTrustIndex
|
||||
{
|
||||
self.buildTrustIndex(report: report, searchPaths: searchPaths)
|
||||
}
|
||||
}
|
||||
|
||||
struct SkillBinTrustIndex {
|
||||
let names: Set<String>
|
||||
let pathsByName: [String: Set<String>]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,7 +43,33 @@ final class ExecApprovalsGatewayPrompter {
|
|||
do {
|
||||
let data = try JSONEncoder().encode(payload)
|
||||
let request = try JSONDecoder().decode(GatewayApprovalRequest.self, from: data)
|
||||
guard self.shouldPresent(request: request) else { return }
|
||||
let presentation = self.shouldPresent(request: request)
|
||||
guard presentation.shouldAsk else {
|
||||
// Ask policy says no prompt needed – resolve based on security policy
|
||||
let decision: ExecApprovalDecision = presentation.security == .full ? .allowOnce : .deny
|
||||
try await GatewayConnection.shared.requestVoid(
|
||||
method: .execApprovalResolve,
|
||||
params: [
|
||||
"id": AnyCodable(request.id),
|
||||
"decision": AnyCodable(decision.rawValue),
|
||||
],
|
||||
timeoutMs: 10000)
|
||||
return
|
||||
}
|
||||
guard presentation.canPresent else {
|
||||
let decision = Self.fallbackDecision(
|
||||
request: request.request,
|
||||
askFallback: presentation.askFallback,
|
||||
allowlist: presentation.allowlist)
|
||||
try await GatewayConnection.shared.requestVoid(
|
||||
method: .execApprovalResolve,
|
||||
params: [
|
||||
"id": AnyCodable(request.id),
|
||||
"decision": AnyCodable(decision.rawValue),
|
||||
],
|
||||
timeoutMs: 10000)
|
||||
return
|
||||
}
|
||||
let decision = ExecApprovalsPromptPresenter.prompt(request.request)
|
||||
try await GatewayConnection.shared.requestVoid(
|
||||
method: .execApprovalResolve,
|
||||
|
|
@ -57,16 +83,89 @@ final class ExecApprovalsGatewayPrompter {
|
|||
}
|
||||
}
|
||||
|
||||
private func shouldPresent(request: GatewayApprovalRequest) -> Bool {
|
||||
/// Whether the ask policy requires prompting the user.
|
||||
/// Note: this only determines if a prompt is shown, not whether the action is allowed.
|
||||
/// The security policy (full/deny/allowlist) decides the actual outcome.
|
||||
private static func shouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool {
|
||||
switch ask {
|
||||
case .always:
|
||||
return true
|
||||
case .onMiss:
|
||||
return security == .allowlist
|
||||
case .off:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
struct PresentationDecision {
|
||||
/// Whether the ask policy requires prompting the user (not whether the action is allowed).
|
||||
var shouldAsk: Bool
|
||||
/// Whether the prompt can actually be shown (session match, recent activity, etc.).
|
||||
var canPresent: Bool
|
||||
/// The resolved security policy, used to determine allow/deny when no prompt is shown.
|
||||
var security: ExecSecurity
|
||||
/// Fallback security policy when a prompt is needed but can't be presented.
|
||||
var askFallback: ExecSecurity
|
||||
var allowlist: [ExecAllowlistEntry]
|
||||
}
|
||||
|
||||
private func shouldPresent(request: GatewayApprovalRequest) -> PresentationDecision {
|
||||
let mode = AppStateStore.shared.connectionMode
|
||||
let activeSession = WebChatManager.shared.activeSessionKey?.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
let requestSession = request.request.sessionKey?.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
return Self.shouldPresent(
|
||||
|
||||
// Read-only resolve to avoid disk writes on the MainActor
|
||||
let approvals = ExecApprovalsStore.resolveReadOnly(agentId: request.request.agentId)
|
||||
let security = approvals.agent.security
|
||||
let ask = approvals.agent.ask
|
||||
|
||||
let shouldAsk = Self.shouldAsk(security: security, ask: ask)
|
||||
|
||||
let canPresent = shouldAsk && Self.shouldPresent(
|
||||
mode: mode,
|
||||
activeSession: activeSession,
|
||||
requestSession: requestSession,
|
||||
lastInputSeconds: Self.lastInputSeconds(),
|
||||
thresholdSeconds: 120)
|
||||
|
||||
return PresentationDecision(
|
||||
shouldAsk: shouldAsk,
|
||||
canPresent: canPresent,
|
||||
security: security,
|
||||
askFallback: approvals.agent.askFallback,
|
||||
allowlist: approvals.allowlist)
|
||||
}
|
||||
|
||||
private static func fallbackDecision(
|
||||
request: ExecApprovalPromptRequest,
|
||||
askFallback: ExecSecurity,
|
||||
allowlist: [ExecAllowlistEntry]) -> ExecApprovalDecision
|
||||
{
|
||||
guard askFallback == .allowlist else {
|
||||
return askFallback == .full ? .allowOnce : .deny
|
||||
}
|
||||
let resolution = self.fallbackResolution(for: request)
|
||||
let match = ExecAllowlistMatcher.match(entries: allowlist, resolution: resolution)
|
||||
return match == nil ? .deny : .allowOnce
|
||||
}
|
||||
|
||||
private static func fallbackResolution(for request: ExecApprovalPromptRequest) -> ExecCommandResolution? {
|
||||
let resolvedPath = request.resolvedPath?.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
let trimmedResolvedPath = (resolvedPath?.isEmpty == false) ? resolvedPath : nil
|
||||
let rawExecutable = self.firstToken(from: request.command) ?? trimmedResolvedPath ?? ""
|
||||
guard !rawExecutable.isEmpty || trimmedResolvedPath != nil else { return nil }
|
||||
let executableName = trimmedResolvedPath.map { URL(fileURLWithPath: $0).lastPathComponent } ?? rawExecutable
|
||||
return ExecCommandResolution(
|
||||
rawExecutable: rawExecutable,
|
||||
resolvedPath: trimmedResolvedPath,
|
||||
executableName: executableName,
|
||||
cwd: request.cwd)
|
||||
}
|
||||
|
||||
private static func firstToken(from command: String) -> String? {
|
||||
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { return nil }
|
||||
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
|
||||
}
|
||||
|
||||
private static func shouldPresent(
|
||||
|
|
@ -117,5 +216,29 @@ extension ExecApprovalsGatewayPrompter {
|
|||
lastInputSeconds: lastInputSeconds,
|
||||
thresholdSeconds: thresholdSeconds)
|
||||
}
|
||||
|
||||
static func _testShouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool {
|
||||
self.shouldAsk(security: security, ask: ask)
|
||||
}
|
||||
|
||||
static func _testFallbackDecision(
|
||||
command: String,
|
||||
resolvedPath: String?,
|
||||
askFallback: ExecSecurity,
|
||||
allowlistPatterns: [String]) -> ExecApprovalDecision
|
||||
{
|
||||
self.fallbackDecision(
|
||||
request: ExecApprovalPromptRequest(
|
||||
command: command,
|
||||
cwd: nil,
|
||||
host: nil,
|
||||
security: nil,
|
||||
ask: nil,
|
||||
agentId: nil,
|
||||
resolvedPath: resolvedPath,
|
||||
sessionKey: nil),
|
||||
askFallback: askFallback,
|
||||
allowlist: allowlistPatterns.map { ExecAllowlistEntry(pattern: $0) })
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -37,8 +37,7 @@ struct ExecCommandResolution {
|
|||
var resolutions: [ExecCommandResolution] = []
|
||||
resolutions.reserveCapacity(segments.count)
|
||||
for segment in segments {
|
||||
guard let token = self.parseFirstToken(segment),
|
||||
let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env)
|
||||
guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env)
|
||||
else {
|
||||
return []
|
||||
}
|
||||
|
|
@ -88,6 +87,20 @@ struct ExecCommandResolution {
|
|||
cwd: cwd)
|
||||
}
|
||||
|
||||
private static func resolveShellSegmentExecutable(
|
||||
_ segment: String,
|
||||
cwd: String?,
|
||||
env: [String: String]?) -> ExecCommandResolution?
|
||||
{
|
||||
let tokens = self.tokenizeShellWords(segment)
|
||||
guard !tokens.isEmpty else { return nil }
|
||||
let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens)
|
||||
guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else {
|
||||
return nil
|
||||
}
|
||||
return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env)
|
||||
}
|
||||
|
||||
private static func parseFirstToken(_ command: String) -> String? {
|
||||
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { return nil }
|
||||
|
|
@ -102,6 +115,59 @@ struct ExecCommandResolution {
|
|||
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
|
||||
}
|
||||
|
||||
private static func tokenizeShellWords(_ command: String) -> [String] {
|
||||
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
|
||||
guard !trimmed.isEmpty else { return [] }
|
||||
|
||||
var tokens: [String] = []
|
||||
var current = ""
|
||||
var inSingle = false
|
||||
var inDouble = false
|
||||
var escaped = false
|
||||
|
||||
func appendCurrent() {
|
||||
guard !current.isEmpty else { return }
|
||||
tokens.append(current)
|
||||
current.removeAll(keepingCapacity: true)
|
||||
}
|
||||
|
||||
for ch in trimmed {
|
||||
if escaped {
|
||||
current.append(ch)
|
||||
escaped = false
|
||||
continue
|
||||
}
|
||||
|
||||
if ch == "\\", !inSingle {
|
||||
escaped = true
|
||||
continue
|
||||
}
|
||||
|
||||
if ch == "'", !inDouble {
|
||||
inSingle.toggle()
|
||||
continue
|
||||
}
|
||||
|
||||
if ch == "\"", !inSingle {
|
||||
inDouble.toggle()
|
||||
continue
|
||||
}
|
||||
|
||||
if ch.isWhitespace, !inSingle, !inDouble {
|
||||
appendCurrent()
|
||||
continue
|
||||
}
|
||||
|
||||
current.append(ch)
|
||||
}
|
||||
|
||||
if escaped {
|
||||
current.append("\\")
|
||||
}
|
||||
appendCurrent()
|
||||
return tokens
|
||||
}
|
||||
|
||||
private enum ShellTokenContext {
|
||||
case unquoted
|
||||
case doubleQuoted
|
||||
|
|
@ -148,8 +214,14 @@ struct ExecCommandResolution {
|
|||
while idx < chars.count {
|
||||
let ch = chars[idx]
|
||||
let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil
|
||||
let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle)
|
||||
|
||||
if escaped {
|
||||
if ch == "\n" {
|
||||
escaped = false
|
||||
idx += 1
|
||||
continue
|
||||
}
|
||||
current.append(ch)
|
||||
escaped = false
|
||||
idx += 1
|
||||
|
|
@ -157,6 +229,10 @@ struct ExecCommandResolution {
|
|||
}
|
||||
|
||||
if ch == "\\", !inSingle {
|
||||
if next == "\n" {
|
||||
idx += 2
|
||||
continue
|
||||
}
|
||||
current.append(ch)
|
||||
escaped = true
|
||||
idx += 1
|
||||
|
|
@ -177,7 +253,7 @@ struct ExecCommandResolution {
|
|||
continue
|
||||
}
|
||||
|
||||
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) {
|
||||
if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) {
|
||||
// Fail closed on command/process substitution in allowlist mode,
|
||||
// including command substitution inside double-quoted shell strings.
|
||||
return nil
|
||||
|
|
@ -201,6 +277,25 @@ struct ExecCommandResolution {
|
|||
return segments
|
||||
}
|
||||
|
||||
private static func nextShellSignificantCharacter(
|
||||
chars: [Character],
|
||||
after idx: Int,
|
||||
inSingle: Bool) -> Character?
|
||||
{
|
||||
guard !inSingle else {
|
||||
return idx + 1 < chars.count ? chars[idx + 1] : nil
|
||||
}
|
||||
var cursor = idx + 1
|
||||
while cursor < chars.count {
|
||||
if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" {
|
||||
cursor += 2
|
||||
continue
|
||||
}
|
||||
return chars[cursor]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool {
|
||||
let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted
|
||||
guard let rules = self.shellFailClosedRules[context] else {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ actor PortGuardian {
|
|||
let listeners = await self.listeners(on: port)
|
||||
guard !listeners.isEmpty else { continue }
|
||||
for listener in listeners {
|
||||
if self.isExpected(listener, port: port, mode: mode) {
|
||||
if Self.isExpected(listener, port: port, mode: mode) {
|
||||
let message = """
|
||||
port \(port) already served by expected \(listener.command)
|
||||
(pid \(listener.pid)) — keeping
|
||||
|
|
@ -55,6 +55,14 @@ actor PortGuardian {
|
|||
self.logger.info("\(message, privacy: .public)")
|
||||
continue
|
||||
}
|
||||
if mode == .remote {
|
||||
let message = """
|
||||
port \(port) held by \(listener.command)
|
||||
(pid \(listener.pid)) in remote mode — not killing
|
||||
"""
|
||||
self.logger.warning(message)
|
||||
continue
|
||||
}
|
||||
let killed = await self.kill(listener.pid)
|
||||
if killed {
|
||||
let message = """
|
||||
|
|
@ -271,8 +279,8 @@ actor PortGuardian {
|
|||
|
||||
switch mode {
|
||||
case .remote:
|
||||
expectedDesc = "SSH tunnel to remote gateway"
|
||||
okPredicate = { $0.command.lowercased().contains("ssh") }
|
||||
expectedDesc = "Remote gateway (SSH tunnel, Docker, or direct)"
|
||||
okPredicate = { _ in true }
|
||||
case .local:
|
||||
expectedDesc = "Gateway websocket (node/tsx)"
|
||||
okPredicate = { listener in
|
||||
|
|
@ -352,13 +360,12 @@ actor PortGuardian {
|
|||
return sigkill.ok
|
||||
}
|
||||
|
||||
private func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool {
|
||||
private static func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool {
|
||||
let cmd = listener.command.lowercased()
|
||||
let full = listener.fullCommand.lowercased()
|
||||
switch mode {
|
||||
case .remote:
|
||||
// Remote mode expects an SSH tunnel for the gateway WebSocket port.
|
||||
if port == GatewayEnvironment.gatewayPort() { return cmd.contains("ssh") }
|
||||
if port == GatewayEnvironment.gatewayPort() { return true }
|
||||
return false
|
||||
case .local:
|
||||
// The gateway daemon may listen as `openclaw` or as its runtime (`node`, `bun`, etc).
|
||||
|
|
@ -406,6 +413,16 @@ extension PortGuardian {
|
|||
self.parseListeners(from: text).map { ($0.pid, $0.command, $0.fullCommand, $0.user) }
|
||||
}
|
||||
|
||||
static func _testIsExpected(
|
||||
command: String,
|
||||
fullCommand: String,
|
||||
port: Int,
|
||||
mode: AppState.ConnectionMode) -> Bool
|
||||
{
|
||||
let listener = Listener(pid: 0, command: command, fullCommand: fullCommand, user: nil)
|
||||
return Self.isExpected(listener, port: port, mode: mode)
|
||||
}
|
||||
|
||||
static func _testBuildReport(
|
||||
port: Int,
|
||||
mode: AppState.ConnectionMode,
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ enum RuntimeResolutionError: Error {
|
|||
|
||||
enum RuntimeLocator {
|
||||
private static let logger = Logger(subsystem: "ai.openclaw", category: "runtime")
|
||||
private static let minNode = RuntimeVersion(major: 22, minor: 0, patch: 0)
|
||||
private static let minNode = RuntimeVersion(major: 22, minor: 16, patch: 0)
|
||||
|
||||
static func resolve(
|
||||
searchPaths: [String] = CommandResolver.preferredPaths()) -> Result<RuntimeResolution, RuntimeResolutionError>
|
||||
|
|
@ -91,7 +91,7 @@ enum RuntimeLocator {
|
|||
switch error {
|
||||
case let .notFound(searchPaths):
|
||||
[
|
||||
"openclaw needs Node >=22.0.0 but found no runtime.",
|
||||
"openclaw needs Node >=22.16.0 but found no runtime.",
|
||||
"PATH searched: \(searchPaths.joined(separator: ":"))",
|
||||
"Install Node: https://nodejs.org/en/download",
|
||||
].joined(separator: "\n")
|
||||
|
|
@ -105,7 +105,7 @@ enum RuntimeLocator {
|
|||
[
|
||||
"Could not parse \(kind.rawValue) version output \"\(raw)\" from \(path).",
|
||||
"PATH searched: \(searchPaths.joined(separator: ":"))",
|
||||
"Try reinstalling or pinning a supported version (Node >=22.0.0).",
|
||||
"Try reinstalling or pinning a supported version (Node >=22.16.0).",
|
||||
].joined(separator: "\n")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -141,6 +141,26 @@ struct ExecAllowlistTests {
|
|||
#expect(resolutions.isEmpty)
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist fails closed on line-continued command substitution`() {
|
||||
let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.isEmpty)
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist fails closed on chained line-continued command substitution`() {
|
||||
let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.isEmpty)
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist fails closed on quoted backticks`() {
|
||||
let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
|
|
@ -208,6 +228,30 @@ struct ExecAllowlistTests {
|
|||
#expect(resolutions[1].executableName == "touch")
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() {
|
||||
let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.count == 1)
|
||||
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
|
||||
#expect(resolutions[0].executableName == "touch")
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist unwraps env assignments inside shell segments`() {
|
||||
let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
command: command,
|
||||
rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test",
|
||||
cwd: nil,
|
||||
env: ["PATH": "/usr/bin:/bin"])
|
||||
#expect(resolutions.count == 1)
|
||||
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
|
||||
#expect(resolutions[0].executableName == "touch")
|
||||
}
|
||||
|
||||
@Test func `resolve for allowlist unwraps env to effective direct executable`() {
|
||||
let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"]
|
||||
let resolutions = ExecCommandResolution.resolveForAllowlist(
|
||||
|
|
|
|||
|
|
@ -52,4 +52,51 @@ struct ExecApprovalsGatewayPrompterTests {
|
|||
lastInputSeconds: 400)
|
||||
#expect(!remote)
|
||||
}
|
||||
|
||||
// MARK: - shouldAsk
|
||||
|
||||
@Test func askAlwaysPromptsRegardlessOfSecurity() {
|
||||
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .always))
|
||||
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .always))
|
||||
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .always))
|
||||
}
|
||||
|
||||
@Test func askOnMissPromptsOnlyForAllowlist() {
|
||||
#expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .onMiss))
|
||||
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .onMiss))
|
||||
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .onMiss))
|
||||
}
|
||||
|
||||
@Test func askOffNeverPrompts() {
|
||||
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .off))
|
||||
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .off))
|
||||
#expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .off))
|
||||
}
|
||||
|
||||
@Test func fallbackAllowlistAllowsMatchingResolvedPath() {
|
||||
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
|
||||
command: "git status",
|
||||
resolvedPath: "/usr/bin/git",
|
||||
askFallback: .allowlist,
|
||||
allowlistPatterns: ["/usr/bin/git"])
|
||||
#expect(decision == .allowOnce)
|
||||
}
|
||||
|
||||
@Test func fallbackAllowlistDeniesAllowlistMiss() {
|
||||
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
|
||||
command: "git status",
|
||||
resolvedPath: "/usr/bin/git",
|
||||
askFallback: .allowlist,
|
||||
allowlistPatterns: ["/usr/bin/rg"])
|
||||
#expect(decision == .deny)
|
||||
}
|
||||
|
||||
@Test func fallbackFullAllowsWhenPromptCannotBeShown() {
|
||||
let decision = ExecApprovalsGatewayPrompter._testFallbackDecision(
|
||||
command: "git status",
|
||||
resolvedPath: "/usr/bin/git",
|
||||
askFallback: .full,
|
||||
allowlistPatterns: [])
|
||||
#expect(decision == .allowOnce)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,90 @@
|
|||
import Foundation
|
||||
import Testing
|
||||
@testable import OpenClaw
|
||||
|
||||
struct ExecSkillBinTrustTests {
|
||||
@Test func `build trust index resolves skill bin paths`() throws {
|
||||
let fixture = try Self.makeExecutable(named: "jq")
|
||||
defer { try? FileManager.default.removeItem(at: fixture.root) }
|
||||
|
||||
let trust = SkillBinsCache._testBuildTrustIndex(
|
||||
report: Self.makeReport(bins: ["jq"]),
|
||||
searchPaths: [fixture.root.path])
|
||||
|
||||
#expect(trust.names == ["jq"])
|
||||
#expect(trust.pathsByName["jq"] == [fixture.path])
|
||||
}
|
||||
|
||||
@Test func `skill auto allow accepts trusted resolved skill bin path`() throws {
|
||||
let fixture = try Self.makeExecutable(named: "jq")
|
||||
defer { try? FileManager.default.removeItem(at: fixture.root) }
|
||||
|
||||
let trust = SkillBinsCache._testBuildTrustIndex(
|
||||
report: Self.makeReport(bins: ["jq"]),
|
||||
searchPaths: [fixture.root.path])
|
||||
let resolution = ExecCommandResolution(
|
||||
rawExecutable: "jq",
|
||||
resolvedPath: fixture.path,
|
||||
executableName: "jq",
|
||||
cwd: nil)
|
||||
|
||||
#expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
|
||||
}
|
||||
|
||||
@Test func `skill auto allow rejects same basename at different path`() throws {
|
||||
let trusted = try Self.makeExecutable(named: "jq")
|
||||
let untrusted = try Self.makeExecutable(named: "jq")
|
||||
defer {
|
||||
try? FileManager.default.removeItem(at: trusted.root)
|
||||
try? FileManager.default.removeItem(at: untrusted.root)
|
||||
}
|
||||
|
||||
let trust = SkillBinsCache._testBuildTrustIndex(
|
||||
report: Self.makeReport(bins: ["jq"]),
|
||||
searchPaths: [trusted.root.path])
|
||||
let resolution = ExecCommandResolution(
|
||||
rawExecutable: "jq",
|
||||
resolvedPath: untrusted.path,
|
||||
executableName: "jq",
|
||||
cwd: nil)
|
||||
|
||||
#expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName))
|
||||
}
|
||||
|
||||
private static func makeExecutable(named name: String) throws -> (root: URL, path: String) {
|
||||
let root = FileManager.default.temporaryDirectory
|
||||
.appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true)
|
||||
try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true)
|
||||
let file = root.appendingPathComponent(name)
|
||||
try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8)
|
||||
try FileManager.default.setAttributes(
|
||||
[.posixPermissions: NSNumber(value: Int16(0o755))],
|
||||
ofItemAtPath: file.path)
|
||||
return (root, file.path)
|
||||
}
|
||||
|
||||
private static func makeReport(bins: [String]) -> SkillsStatusReport {
|
||||
SkillsStatusReport(
|
||||
workspaceDir: "/tmp/workspace",
|
||||
managedSkillsDir: "/tmp/skills",
|
||||
skills: [
|
||||
SkillStatus(
|
||||
name: "test-skill",
|
||||
description: "test",
|
||||
source: "local",
|
||||
filePath: "/tmp/skills/test-skill/SKILL.md",
|
||||
baseDir: "/tmp/skills/test-skill",
|
||||
skillKey: "test-skill",
|
||||
primaryEnv: nil,
|
||||
emoji: nil,
|
||||
homepage: nil,
|
||||
always: false,
|
||||
disabled: false,
|
||||
eligible: true,
|
||||
requirements: SkillRequirements(bins: bins, env: [], config: []),
|
||||
missing: SkillMissing(bins: [], env: [], config: []),
|
||||
configChecks: [],
|
||||
install: [])
|
||||
])
|
||||
}
|
||||
}
|
||||
|
|
@ -139,6 +139,54 @@ struct LowCoverageHelperTests {
|
|||
#expect(emptyReport.summary.contains("Nothing is listening"))
|
||||
}
|
||||
|
||||
@Test func `port guardian remote mode does not kill docker`() {
|
||||
#expect(PortGuardian._testIsExpected(
|
||||
command: "com.docker.backend",
|
||||
fullCommand: "com.docker.backend",
|
||||
port: 18789, mode: .remote) == true)
|
||||
|
||||
#expect(PortGuardian._testIsExpected(
|
||||
command: "ssh",
|
||||
fullCommand: "ssh -L 18789:localhost:18789 user@host",
|
||||
port: 18789, mode: .remote) == true)
|
||||
|
||||
#expect(PortGuardian._testIsExpected(
|
||||
command: "podman",
|
||||
fullCommand: "podman",
|
||||
port: 18789, mode: .remote) == true)
|
||||
}
|
||||
|
||||
@Test func `port guardian local mode still rejects unexpected`() {
|
||||
#expect(PortGuardian._testIsExpected(
|
||||
command: "com.docker.backend",
|
||||
fullCommand: "com.docker.backend",
|
||||
port: 18789, mode: .local) == false)
|
||||
|
||||
#expect(PortGuardian._testIsExpected(
|
||||
command: "python",
|
||||
fullCommand: "python server.py",
|
||||
port: 18789, mode: .local) == false)
|
||||
|
||||
#expect(PortGuardian._testIsExpected(
|
||||
command: "node",
|
||||
fullCommand: "node /path/to/gateway-daemon",
|
||||
port: 18789, mode: .local) == true)
|
||||
}
|
||||
|
||||
@Test func `port guardian remote mode report accepts any listener`() {
|
||||
let dockerReport = PortGuardian._testBuildReport(
|
||||
port: 18789, mode: .remote,
|
||||
listeners: [(pid: 99, command: "com.docker.backend",
|
||||
fullCommand: "com.docker.backend", user: "me")])
|
||||
#expect(dockerReport.offenders.isEmpty)
|
||||
|
||||
let localDockerReport = PortGuardian._testBuildReport(
|
||||
port: 18789, mode: .local,
|
||||
listeners: [(pid: 99, command: "com.docker.backend",
|
||||
fullCommand: "com.docker.backend", user: "me")])
|
||||
#expect(!localDockerReport.offenders.isEmpty)
|
||||
}
|
||||
|
||||
@Test @MainActor func `canvas scheme handler resolves files and errors`() throws {
|
||||
let root = FileManager().temporaryDirectory
|
||||
.appendingPathComponent("canvas-\(UUID().uuidString)", isDirectory: true)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ struct RuntimeLocatorTests {
|
|||
@Test func `resolve succeeds with valid node`() throws {
|
||||
let script = """
|
||||
#!/bin/sh
|
||||
echo v22.5.0
|
||||
echo v22.16.0
|
||||
"""
|
||||
let node = try self.makeTempExecutable(contents: script)
|
||||
let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path])
|
||||
|
|
@ -25,7 +25,23 @@ struct RuntimeLocatorTests {
|
|||
return
|
||||
}
|
||||
#expect(res.path == node.path)
|
||||
#expect(res.version == RuntimeVersion(major: 22, minor: 5, patch: 0))
|
||||
#expect(res.version == RuntimeVersion(major: 22, minor: 16, patch: 0))
|
||||
}
|
||||
|
||||
@Test func `resolve fails on boundary below minimum`() throws {
|
||||
let script = """
|
||||
#!/bin/sh
|
||||
echo v22.15.9
|
||||
"""
|
||||
let node = try self.makeTempExecutable(contents: script)
|
||||
let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path])
|
||||
guard case let .failure(.unsupported(_, found, required, path, _)) = result else {
|
||||
Issue.record("Expected unsupported error, got \(result)")
|
||||
return
|
||||
}
|
||||
#expect(found == RuntimeVersion(major: 22, minor: 15, patch: 9))
|
||||
#expect(required == RuntimeVersion(major: 22, minor: 16, patch: 0))
|
||||
#expect(path == node.path)
|
||||
}
|
||||
|
||||
@Test func `resolve fails when too old`() throws {
|
||||
|
|
@ -60,7 +76,17 @@ struct RuntimeLocatorTests {
|
|||
|
||||
@Test func `describe failure includes paths`() {
|
||||
let msg = RuntimeLocator.describeFailure(.notFound(searchPaths: ["/tmp/a", "/tmp/b"]))
|
||||
#expect(msg.contains("Node >=22.16.0"))
|
||||
#expect(msg.contains("PATH searched: /tmp/a:/tmp/b"))
|
||||
|
||||
let parseMsg = RuntimeLocator.describeFailure(
|
||||
.versionParse(
|
||||
kind: .node,
|
||||
raw: "garbage",
|
||||
path: "/usr/local/bin/node",
|
||||
searchPaths: ["/usr/local/bin"],
|
||||
))
|
||||
#expect(parseMsg.contains("Node >=22.16.0"))
|
||||
}
|
||||
|
||||
@Test func `runtime version parses with leading V and metadata`() {
|
||||
|
|
|
|||
|
|
@ -74,4 +74,22 @@ struct VoiceWakeRuntimeTests {
|
|||
let config = WakeWordGateConfig(triggers: ["openclaw"], minPostTriggerGap: 0.3)
|
||||
#expect(WakeWordGate.match(transcript: transcript, segments: segments, config: config)?.command == "do thing")
|
||||
}
|
||||
|
||||
@Test func `gate command text handles foreign string ranges`() {
|
||||
let transcript = "hey openclaw do thing"
|
||||
let other = "do thing"
|
||||
let foreignRange = other.range(of: "do")
|
||||
let segments = [
|
||||
WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")),
|
||||
WakeWordSegment(text: "openclaw", start: 0.2, duration: 0.1, range: transcript.range(of: "openclaw")),
|
||||
WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange),
|
||||
WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil),
|
||||
]
|
||||
|
||||
#expect(
|
||||
WakeWordGate.commandText(
|
||||
transcript: transcript,
|
||||
segments: segments,
|
||||
triggerEndTime: 0.3) == "do thing")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ await web_search({
|
|||
## Notes
|
||||
|
||||
- OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits.
|
||||
- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
|
||||
- Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans.
|
||||
- The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service).
|
||||
- Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`).
|
||||
|
||||
|
|
|
|||
|
|
@ -218,6 +218,55 @@ For actions/directory reads, user token can be preferred when configured. For wr
|
|||
- if encoded option values exceed Slack limits, the flow falls back to buttons
|
||||
- For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value.
|
||||
|
||||
## Interactive replies
|
||||
|
||||
Slack can render agent-authored interactive reply controls, but this feature is disabled by default.
|
||||
|
||||
Enable it globally:
|
||||
|
||||
```json5
|
||||
{
|
||||
channels: {
|
||||
slack: {
|
||||
capabilities: {
|
||||
interactiveReplies: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Or enable it for one Slack account only:
|
||||
|
||||
```json5
|
||||
{
|
||||
channels: {
|
||||
slack: {
|
||||
accounts: {
|
||||
ops: {
|
||||
capabilities: {
|
||||
interactiveReplies: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
When enabled, agents can emit Slack-only reply directives:
|
||||
|
||||
- `[[slack_buttons: Approve:approve, Reject:reject]]`
|
||||
- `[[slack_select: Choose a target | Canary:canary, Production:production]]`
|
||||
|
||||
These directives compile into Slack Block Kit and route clicks or selections back through the existing Slack interaction event path.
|
||||
|
||||
Notes:
|
||||
|
||||
- This is Slack-specific UI. Other channels do not translate Slack Block Kit directives into their own button systems.
|
||||
- The interactive callback values are OpenClaw-generated opaque tokens, not raw agent-authored values.
|
||||
- If generated interactive blocks would exceed Slack Block Kit limits, OpenClaw falls back to the original text reply instead of sending an invalid blocks payload.
|
||||
|
||||
Default slash command settings:
|
||||
|
||||
- `enabled: false`
|
||||
|
|
|
|||
22
docs/ci.md
22
docs/ci.md
|
|
@ -9,21 +9,21 @@ read_when:
|
|||
|
||||
# CI Pipeline
|
||||
|
||||
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed.
|
||||
The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed.
|
||||
|
||||
## Job Overview
|
||||
|
||||
| Job | Purpose | When it runs |
|
||||
| ----------------- | ------------------------------------------------------- | ------------------------------------------------- |
|
||||
| ----------------- | ------------------------------------------------------- | ---------------------------------- |
|
||||
| `docs-scope` | Detect docs-only changes | Always |
|
||||
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs |
|
||||
| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes |
|
||||
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes |
|
||||
| `check` | TypeScript types, lint, format | Non-docs, node changes |
|
||||
| `check-docs` | Markdown lint + broken link check | Docs changed |
|
||||
| `code-analysis` | LOC threshold check (1000 lines) | PRs only |
|
||||
| `secrets` | Detect leaked secrets | Always |
|
||||
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes |
|
||||
| `release-check` | Validate npm pack contents | After build |
|
||||
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes |
|
||||
| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes |
|
||||
| `release-check` | Validate npm pack contents | Pushes to `main` after build |
|
||||
| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes |
|
||||
| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes |
|
||||
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
|
||||
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
|
||||
| `android` | Gradle build + tests | Non-docs, android changes |
|
||||
|
|
@ -32,9 +32,9 @@ The CI runs on every push to `main` and every pull request. It uses smart scopin
|
|||
|
||||
Jobs are ordered so cheap checks fail before expensive ones run:
|
||||
|
||||
1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min)
|
||||
2. `build-artifacts` (blocked on above)
|
||||
3. `checks`, `checks-windows`, `macos`, `android` (blocked on build)
|
||||
1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first)
|
||||
2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
|
||||
3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22`
|
||||
|
||||
Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`.
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ Related:
|
|||
## Quick start (local)
|
||||
|
||||
```bash
|
||||
openclaw browser --browser-profile chrome tabs
|
||||
openclaw browser profiles
|
||||
openclaw browser --browser-profile openclaw start
|
||||
openclaw browser --browser-profile openclaw open https://example.com
|
||||
openclaw browser --browser-profile openclaw snapshot
|
||||
|
|
@ -38,7 +38,8 @@ openclaw browser --browser-profile openclaw snapshot
|
|||
Profiles are named browser routing configs. In practice:
|
||||
|
||||
- `openclaw`: launches/attaches to a dedicated OpenClaw-managed Chrome instance (isolated user data dir).
|
||||
- `chrome`: controls your existing Chrome tab(s) via the Chrome extension relay.
|
||||
- `user`: controls your existing signed-in Chrome session via Chrome DevTools MCP.
|
||||
- `chrome-relay`: controls your existing Chrome tab(s) via the Chrome extension relay.
|
||||
|
||||
```bash
|
||||
openclaw browser profiles
|
||||
|
|
|
|||
|
|
@ -126,6 +126,23 @@ openclaw gateway probe
|
|||
openclaw gateway probe --json
|
||||
```
|
||||
|
||||
Interpretation:
|
||||
|
||||
- `Reachable: yes` means at least one target accepted a WebSocket connect.
|
||||
- `RPC: ok` means detail RPC calls (`health`/`status`/`system-presence`/`config.get`) also succeeded.
|
||||
- `RPC: limited - missing scope: operator.read` means connect succeeded but detail RPC is scope-limited. This is reported as **degraded** reachability, not full failure.
|
||||
- Exit code is non-zero only when no probed target is reachable.
|
||||
|
||||
JSON notes (`--json`):
|
||||
|
||||
- Top level:
|
||||
- `ok`: at least one target is reachable.
|
||||
- `degraded`: at least one target had scope-limited detail RPC.
|
||||
- Per target (`targets[].connect`):
|
||||
- `ok`: reachability after connect + degraded classification.
|
||||
- `rpcOk`: full detail RPC success.
|
||||
- `scopeLimited`: detail RPC failed due to missing operator scope.
|
||||
|
||||
#### Remote over SSH (Mac app parity)
|
||||
|
||||
The macOS app “Remote over SSH” mode uses a local port-forward so the remote gateway (which may be bound to loopback only) becomes reachable at `ws://127.0.0.1:<port>`.
|
||||
|
|
|
|||
|
|
@ -2342,7 +2342,7 @@ See [Plugins](/tools/plugin).
|
|||
browser: {
|
||||
enabled: true,
|
||||
evaluateEnabled: true,
|
||||
defaultProfile: "chrome",
|
||||
defaultProfile: "user",
|
||||
ssrfPolicy: {
|
||||
dangerouslyAllowPrivateNetwork: true, // default trusted-network mode
|
||||
// allowPrivateNetwork: true, // legacy alias
|
||||
|
|
|
|||
|
|
@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first.
|
|||
Under the hood, requests are executed as a normal Gateway agent run (same codepath as
|
||||
`openclaw agent`), so routing/permissions/config match your Gateway.
|
||||
|
||||
## Authentication
|
||||
## Authentication, security, and routing
|
||||
|
||||
Uses the Gateway auth configuration. Send a bearer token:
|
||||
Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api):
|
||||
|
||||
- `Authorization: Bearer <token>`
|
||||
- use `Authorization: Bearer <token>` with the normal Gateway auth config
|
||||
- treat the endpoint as full operator access for the gateway instance
|
||||
- select agents with `model: "openclaw:<agentId>"`, `model: "agent:<agentId>"`, or `x-openclaw-agent-id`
|
||||
- use `x-openclaw-session-key` for explicit session routing
|
||||
|
||||
Notes:
|
||||
|
||||
- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`).
|
||||
- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`).
|
||||
- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`.
|
||||
|
||||
## Security boundary (important)
|
||||
|
||||
Treat this endpoint as a **full operator-access** surface for the gateway instance.
|
||||
|
||||
- HTTP bearer auth here is not a narrow per-user scope model.
|
||||
- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential.
|
||||
- Requests run through the same control-plane agent path as trusted operator actions.
|
||||
- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway.
|
||||
- If the target agent policy allows sensitive tools, this endpoint can use them.
|
||||
- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet.
|
||||
|
||||
See [Security](/gateway/security) and [Remote access](/gateway/remote).
|
||||
|
||||
## Choosing an agent
|
||||
|
||||
No custom headers required: encode the agent id in the OpenResponses `model` field:
|
||||
|
||||
- `model: "openclaw:<agentId>"` (example: `"openclaw:main"`, `"openclaw:beta"`)
|
||||
- `model: "agent:<agentId>"` (alias)
|
||||
|
||||
Or target a specific OpenClaw agent by header:
|
||||
|
||||
- `x-openclaw-agent-id: <agentId>` (default: `main`)
|
||||
|
||||
Advanced:
|
||||
|
||||
- `x-openclaw-session-key: <sessionKey>` to fully control session routing.
|
||||
|
||||
## Enabling the endpoint
|
||||
|
||||
Set `gateway.http.endpoints.responses.enabled` to `true`:
|
||||
|
||||
```json5
|
||||
{
|
||||
gateway: {
|
||||
http: {
|
||||
endpoints: {
|
||||
responses: { enabled: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## Disabling the endpoint
|
||||
|
||||
Set `gateway.http.endpoints.responses.enabled` to `false`:
|
||||
|
||||
```json5
|
||||
{
|
||||
gateway: {
|
||||
http: {
|
||||
endpoints: {
|
||||
responses: { enabled: false },
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`.
|
||||
|
||||
## Session behavior
|
||||
|
||||
|
|
|
|||
|
|
@ -289,7 +289,7 @@ Look for:
|
|||
|
||||
- Valid browser executable path.
|
||||
- CDP profile reachability.
|
||||
- Extension relay tab attachment for `profile="chrome"`.
|
||||
- Extension relay tab attachment for `profile="chrome-relay"`.
|
||||
|
||||
Common signatures:
|
||||
|
||||
|
|
|
|||
|
|
@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost):
|
|||
- No real keys required
|
||||
- Should be fast and stable
|
||||
- Pool note:
|
||||
- OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards.
|
||||
- On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`).
|
||||
- OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards.
|
||||
- On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there.
|
||||
- Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`).
|
||||
|
||||
### E2E (gateway smoke)
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ Good output in one line:
|
|||
|
||||
- `openclaw status` → shows configured channels and no obvious auth errors.
|
||||
- `openclaw status --all` → full report is present and shareable.
|
||||
- `openclaw gateway probe` → expected gateway target is reachable.
|
||||
- `openclaw gateway probe` → expected gateway target is reachable (`Reachable: yes`). `RPC: limited - missing scope: operator.read` is degraded diagnostics, not a connect failure.
|
||||
- `openclaw gateway status` → `Runtime: running` and `RPC probe: ok`.
|
||||
- `openclaw doctor` → no blocking config/service errors.
|
||||
- `openclaw channels status --probe` → channels report `connected` or `ready`.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,138 @@
|
|||
---
|
||||
summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts"
|
||||
read_when:
|
||||
- You are deploying OpenClaw on a cloud VM with Docker
|
||||
- You need the shared binary bake, persistence, and update flow
|
||||
title: "Docker VM Runtime"
|
||||
---
|
||||
|
||||
# Docker VM Runtime
|
||||
|
||||
Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers.
|
||||
|
||||
## Bake required binaries into the image
|
||||
|
||||
Installing binaries inside a running container is a trap.
|
||||
Anything installed at runtime will be lost on restart.
|
||||
|
||||
All external binaries required by skills must be installed at image build time.
|
||||
|
||||
The examples below show three common binaries only:
|
||||
|
||||
- `gog` for Gmail access
|
||||
- `goplaces` for Google Places
|
||||
- `wacli` for WhatsApp
|
||||
|
||||
These are examples, not a complete list.
|
||||
You may install as many binaries as needed using the same pattern.
|
||||
|
||||
If you add new skills later that depend on additional binaries, you must:
|
||||
|
||||
1. Update the Dockerfile
|
||||
2. Rebuild the image
|
||||
3. Restart the containers
|
||||
|
||||
**Example Dockerfile**
|
||||
|
||||
```dockerfile
|
||||
FROM node:24-bookworm
|
||||
|
||||
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Example binary 1: Gmail CLI
|
||||
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
|
||||
|
||||
# Example binary 2: Google Places CLI
|
||||
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
|
||||
|
||||
# Example binary 3: WhatsApp CLI
|
||||
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
|
||||
|
||||
# Add more binaries below using the same pattern
|
||||
|
||||
WORKDIR /app
|
||||
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
|
||||
COPY ui/package.json ./ui/package.json
|
||||
COPY scripts ./scripts
|
||||
|
||||
RUN corepack enable
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
COPY . .
|
||||
RUN pnpm build
|
||||
RUN pnpm ui:install
|
||||
RUN pnpm ui:build
|
||||
|
||||
ENV NODE_ENV=production
|
||||
|
||||
CMD ["node","dist/index.js"]
|
||||
```
|
||||
|
||||
## Build and launch
|
||||
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up -d openclaw-gateway
|
||||
```
|
||||
|
||||
If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory.
|
||||
Use a larger machine class before retrying.
|
||||
|
||||
Verify binaries:
|
||||
|
||||
```bash
|
||||
docker compose exec openclaw-gateway which gog
|
||||
docker compose exec openclaw-gateway which goplaces
|
||||
docker compose exec openclaw-gateway which wacli
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
/usr/local/bin/gog
|
||||
/usr/local/bin/goplaces
|
||||
/usr/local/bin/wacli
|
||||
```
|
||||
|
||||
Verify Gateway:
|
||||
|
||||
```bash
|
||||
docker compose logs -f openclaw-gateway
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
[gateway] listening on ws://0.0.0.0:18789
|
||||
```
|
||||
|
||||
## What persists where
|
||||
|
||||
OpenClaw runs in Docker, but Docker is not the source of truth.
|
||||
All long-lived state must survive restarts, rebuilds, and reboots.
|
||||
|
||||
| Component | Location | Persistence mechanism | Notes |
|
||||
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
|
||||
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
|
||||
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
|
||||
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
|
||||
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
|
||||
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
|
||||
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
|
||||
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
|
||||
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
|
||||
| OS packages | Container filesystem | Docker image | Do not install at runtime |
|
||||
| Docker container | Ephemeral | Restartable | Safe to destroy |
|
||||
|
||||
## Updates
|
||||
|
||||
To update OpenClaw on the VM:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
docker compose build
|
||||
docker compose up -d
|
||||
```
|
||||
|
|
@ -281,77 +281,20 @@ services:
|
|||
|
||||
---
|
||||
|
||||
## 10) Bake required binaries into the image (critical)
|
||||
## 10) Shared Docker VM runtime steps
|
||||
|
||||
Installing binaries inside a running container is a trap.
|
||||
Anything installed at runtime will be lost on restart.
|
||||
Use the shared runtime guide for the common Docker host flow:
|
||||
|
||||
All external binaries required by skills must be installed at image build time.
|
||||
|
||||
The examples below show three common binaries only:
|
||||
|
||||
- `gog` for Gmail access
|
||||
- `goplaces` for Google Places
|
||||
- `wacli` for WhatsApp
|
||||
|
||||
These are examples, not a complete list.
|
||||
You may install as many binaries as needed using the same pattern.
|
||||
|
||||
If you add new skills later that depend on additional binaries, you must:
|
||||
|
||||
1. Update the Dockerfile
|
||||
2. Rebuild the image
|
||||
3. Restart the containers
|
||||
|
||||
**Example Dockerfile**
|
||||
|
||||
```dockerfile
|
||||
FROM node:24-bookworm
|
||||
|
||||
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Example binary 1: Gmail CLI
|
||||
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
|
||||
|
||||
# Example binary 2: Google Places CLI
|
||||
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
|
||||
|
||||
# Example binary 3: WhatsApp CLI
|
||||
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
|
||||
|
||||
# Add more binaries below using the same pattern
|
||||
|
||||
WORKDIR /app
|
||||
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
|
||||
COPY ui/package.json ./ui/package.json
|
||||
COPY scripts ./scripts
|
||||
|
||||
RUN corepack enable
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
COPY . .
|
||||
RUN pnpm build
|
||||
RUN pnpm ui:install
|
||||
RUN pnpm ui:build
|
||||
|
||||
ENV NODE_ENV=production
|
||||
|
||||
CMD ["node","dist/index.js"]
|
||||
```
|
||||
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
|
||||
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
|
||||
- [What persists where](/install/docker-vm-runtime#what-persists-where)
|
||||
- [Updates](/install/docker-vm-runtime#updates)
|
||||
|
||||
---
|
||||
|
||||
## 11) Build and launch
|
||||
## 11) GCP-specific launch notes
|
||||
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up -d openclaw-gateway
|
||||
```
|
||||
|
||||
If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
|
||||
On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds.
|
||||
|
||||
When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing:
|
||||
|
||||
|
|
@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins
|
|||
|
||||
If you changed the gateway port, replace `18789` with your configured port.
|
||||
|
||||
Verify binaries:
|
||||
|
||||
```bash
|
||||
docker compose exec openclaw-gateway which gog
|
||||
docker compose exec openclaw-gateway which goplaces
|
||||
docker compose exec openclaw-gateway which wacli
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
/usr/local/bin/gog
|
||||
/usr/local/bin/goplaces
|
||||
/usr/local/bin/wacli
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 12) Verify Gateway
|
||||
|
||||
```bash
|
||||
docker compose logs -f openclaw-gateway
|
||||
```
|
||||
|
||||
Success:
|
||||
|
||||
```
|
||||
[gateway] listening on ws://0.0.0.0:18789
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 13) Access from your laptop
|
||||
## 12) Access from your laptop
|
||||
|
||||
Create an SSH tunnel to forward the Gateway port:
|
||||
|
||||
|
|
@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list
|
|||
docker compose run --rm openclaw-cli devices approve <requestId>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What persists where (source of truth)
|
||||
|
||||
OpenClaw runs in Docker, but Docker is not the source of truth.
|
||||
All long-lived state must survive restarts, rebuilds, and reboots.
|
||||
|
||||
| Component | Location | Persistence mechanism | Notes |
|
||||
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
|
||||
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
|
||||
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
|
||||
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
|
||||
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
|
||||
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
|
||||
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
|
||||
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
|
||||
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
|
||||
| OS packages | Container filesystem | Docker image | Do not install at runtime |
|
||||
| Docker container | Ephemeral | Restartable | Safe to destroy |
|
||||
|
||||
---
|
||||
|
||||
## Updates
|
||||
|
||||
To update OpenClaw on the VM:
|
||||
|
||||
```bash
|
||||
cd ~/openclaw
|
||||
git pull
|
||||
docker compose build
|
||||
docker compose up -d
|
||||
```
|
||||
Need the shared persistence and update reference again?
|
||||
See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates).
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -202,107 +202,20 @@ services:
|
|||
|
||||
---
|
||||
|
||||
## 7) Bake required binaries into the image (critical)
|
||||
## 7) Shared Docker VM runtime steps
|
||||
|
||||
Installing binaries inside a running container is a trap.
|
||||
Anything installed at runtime will be lost on restart.
|
||||
Use the shared runtime guide for the common Docker host flow:
|
||||
|
||||
All external binaries required by skills must be installed at image build time.
|
||||
|
||||
The examples below show three common binaries only:
|
||||
|
||||
- `gog` for Gmail access
|
||||
- `goplaces` for Google Places
|
||||
- `wacli` for WhatsApp
|
||||
|
||||
These are examples, not a complete list.
|
||||
You may install as many binaries as needed using the same pattern.
|
||||
|
||||
If you add new skills later that depend on additional binaries, you must:
|
||||
|
||||
1. Update the Dockerfile
|
||||
2. Rebuild the image
|
||||
3. Restart the containers
|
||||
|
||||
**Example Dockerfile**
|
||||
|
||||
```dockerfile
|
||||
FROM node:24-bookworm
|
||||
|
||||
RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Example binary 1: Gmail CLI
|
||||
RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog
|
||||
|
||||
# Example binary 2: Google Places CLI
|
||||
RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces
|
||||
|
||||
# Example binary 3: WhatsApp CLI
|
||||
RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \
|
||||
| tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli
|
||||
|
||||
# Add more binaries below using the same pattern
|
||||
|
||||
WORKDIR /app
|
||||
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./
|
||||
COPY ui/package.json ./ui/package.json
|
||||
COPY scripts ./scripts
|
||||
|
||||
RUN corepack enable
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
COPY . .
|
||||
RUN pnpm build
|
||||
RUN pnpm ui:install
|
||||
RUN pnpm ui:build
|
||||
|
||||
ENV NODE_ENV=production
|
||||
|
||||
CMD ["node","dist/index.js"]
|
||||
```
|
||||
- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image)
|
||||
- [Build and launch](/install/docker-vm-runtime#build-and-launch)
|
||||
- [What persists where](/install/docker-vm-runtime#what-persists-where)
|
||||
- [Updates](/install/docker-vm-runtime#updates)
|
||||
|
||||
---
|
||||
|
||||
## 8) Build and launch
|
||||
## 8) Hetzner-specific access
|
||||
|
||||
```bash
|
||||
docker compose build
|
||||
docker compose up -d openclaw-gateway
|
||||
```
|
||||
|
||||
Verify binaries:
|
||||
|
||||
```bash
|
||||
docker compose exec openclaw-gateway which gog
|
||||
docker compose exec openclaw-gateway which goplaces
|
||||
docker compose exec openclaw-gateway which wacli
|
||||
```
|
||||
|
||||
Expected output:
|
||||
|
||||
```
|
||||
/usr/local/bin/gog
|
||||
/usr/local/bin/goplaces
|
||||
/usr/local/bin/wacli
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9) Verify Gateway
|
||||
|
||||
```bash
|
||||
docker compose logs -f openclaw-gateway
|
||||
```
|
||||
|
||||
Success:
|
||||
|
||||
```
|
||||
[gateway] listening on ws://0.0.0.0:18789
|
||||
```
|
||||
|
||||
From your laptop:
|
||||
After the shared build and launch steps, tunnel from your laptop:
|
||||
|
||||
```bash
|
||||
ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP
|
||||
|
|
@ -316,25 +229,7 @@ Paste your gateway token.
|
|||
|
||||
---
|
||||
|
||||
## What persists where (source of truth)
|
||||
|
||||
OpenClaw runs in Docker, but Docker is not the source of truth.
|
||||
All long-lived state must survive restarts, rebuilds, and reboots.
|
||||
|
||||
| Component | Location | Persistence mechanism | Notes |
|
||||
| ------------------- | --------------------------------- | ---------------------- | -------------------------------- |
|
||||
| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens |
|
||||
| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys |
|
||||
| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state |
|
||||
| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts |
|
||||
| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login |
|
||||
| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` |
|
||||
| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time |
|
||||
| Node runtime | Container filesystem | Docker image | Rebuilt every image build |
|
||||
| OS packages | Container filesystem | Docker image | Do not install at runtime |
|
||||
| Docker container | Ephemeral | Restartable | Safe to destroy |
|
||||
|
||||
---
|
||||
The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where).
|
||||
|
||||
## Infrastructure as Code (Terraform)
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ title: "Android App"
|
|||
|
||||
# Android App (Node)
|
||||
|
||||
> **Note:** The Android app has not been publicly released yet. The source code is available in the [OpenClaw repository](https://github.com/openclaw/openclaw) under `apps/android`. You can build it yourself using Java 17 and the Android SDK (`./gradlew :app:assembleDebug`). See [apps/android/README.md](https://github.com/openclaw/openclaw/blob/main/apps/android/README.md) for build instructions.
|
||||
|
||||
## Support snapshot
|
||||
|
||||
- Role: companion node app (Android does not host the Gateway).
|
||||
|
|
|
|||
|
|
@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set:
|
|||
}
|
||||
```
|
||||
|
||||
`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin
|
||||
normalizes the provider-supplied `From` value and compares it to `allowFrom`.
|
||||
Webhook verification authenticates provider delivery and payload integrity, but
|
||||
it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as
|
||||
caller-ID filtering, not strong caller identity.
|
||||
|
||||
Auto-responses use the agent system. Tune with:
|
||||
|
||||
- `responseModel`
|
||||
|
|
|
|||
|
|
@ -85,8 +85,8 @@ See [Memory](/concepts/memory).
|
|||
- **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey`
|
||||
- **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey`
|
||||
|
||||
**Brave Search free credit:** Each Brave plan includes $5/month in renewing
|
||||
free credit. The Search plan costs $5 per 1,000 requests, so the credit covers
|
||||
**Brave Search free credit:** Each Brave plan includes \$5/month in renewing
|
||||
free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers
|
||||
1,000 requests/month at no charge. Set your usage limit in the Brave dashboard
|
||||
to avoid unexpected charges.
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ title: "Tests"
|
|||
|
||||
- `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests don’t collide with a running instance. Use this when a prior gateway run left port 18789 occupied.
|
||||
- `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic.
|
||||
- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
|
||||
- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
|
||||
- `pnpm test`: runs the fast core unit lane by default for quick local feedback.
|
||||
- `pnpm test:channels`: runs channel-heavy suites.
|
||||
- `pnpm test:extensions`: runs extension/plugin suites.
|
||||
|
|
|
|||
|
|
@ -167,93 +167,8 @@ openclaw onboard --non-interactive \
|
|||
`--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts.
|
||||
</Note>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Gemini example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice gemini-api-key \
|
||||
--gemini-api-key "$GEMINI_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Z.AI example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice zai-api-key \
|
||||
--zai-api-key "$ZAI_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Vercel AI Gateway example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice ai-gateway-api-key \
|
||||
--ai-gateway-api-key "$AI_GATEWAY_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Cloudflare AI Gateway example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice cloudflare-ai-gateway-api-key \
|
||||
--cloudflare-ai-gateway-account-id "your-account-id" \
|
||||
--cloudflare-ai-gateway-gateway-id "your-gateway-id" \
|
||||
--cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Moonshot example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice moonshot-api-key \
|
||||
--moonshot-api-key "$MOONSHOT_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="Synthetic example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice synthetic-api-key \
|
||||
--synthetic-api-key "$SYNTHETIC_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
</Accordion>
|
||||
<Accordion title="OpenCode example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice opencode-zen \
|
||||
--opencode-zen-api-key "$OPENCODE_API_KEY" \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog.
|
||||
</Accordion>
|
||||
<Accordion title="Ollama example">
|
||||
```bash
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice ollama \
|
||||
--custom-model-id "qwen3.5:27b" \
|
||||
--accept-risk \
|
||||
--gateway-port 18789 \
|
||||
--gateway-bind loopback
|
||||
```
|
||||
Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance.
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples).
|
||||
Use this reference page for flag semantics and step ordering.
|
||||
|
||||
### Add agent (non-interactive)
|
||||
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ curl -s http://127.0.0.1:18791/tabs
|
|||
|
||||
### Problem: "Chrome extension relay is running, but no tab is connected"
|
||||
|
||||
You’re using the `chrome` profile (extension relay). It expects the OpenClaw
|
||||
You’re using the `chrome-relay` profile (extension relay). It expects the OpenClaw
|
||||
browser extension to be attached to a live tab.
|
||||
|
||||
Fix options:
|
||||
|
|
@ -135,5 +135,5 @@ Fix options:
|
|||
|
||||
Notes:
|
||||
|
||||
- The `chrome` profile uses your **system default Chromium browser** when possible.
|
||||
- The `chrome-relay` profile uses your **system default Chromium browser** when possible.
|
||||
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl`; only set those for remote CDP.
|
||||
|
|
|
|||
|
|
@ -20,6 +20,13 @@ Back to the main browser docs: [Browser](/tools/browser).
|
|||
|
||||
OpenClaw controls a **dedicated Chrome profile** (named `openclaw`, orange‑tinted UI). This is separate from your daily browser profile.
|
||||
|
||||
For agent browser tool calls:
|
||||
|
||||
- Default choice: the agent should use its isolated `openclaw` browser.
|
||||
- Use `profile="user"` only when existing logged-in sessions matter and the user is at the computer to click/approve any attach prompt.
|
||||
- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow.
|
||||
- If you have multiple user-browser profiles, specify the profile explicitly instead of guessing.
|
||||
|
||||
Two easy ways to access it:
|
||||
|
||||
1. **Ask the agent to open the browser** and then log in yourself.
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ Choose this when:
|
|||
|
||||
### Option 2: Chrome extension relay
|
||||
|
||||
Use the built-in `chrome` profile plus the OpenClaw Chrome extension.
|
||||
Use the built-in `chrome-relay` profile plus the OpenClaw Chrome extension.
|
||||
|
||||
Choose this when:
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ Example:
|
|||
{
|
||||
browser: {
|
||||
enabled: true,
|
||||
defaultProfile: "chrome",
|
||||
defaultProfile: "chrome-relay",
|
||||
relayBindHost: "0.0.0.0",
|
||||
},
|
||||
}
|
||||
|
|
@ -197,7 +197,7 @@ openclaw browser tabs --browser-profile remote
|
|||
For the extension relay:
|
||||
|
||||
```bash
|
||||
openclaw browser tabs --browser-profile chrome
|
||||
openclaw browser tabs --browser-profile chrome-relay
|
||||
```
|
||||
|
||||
Good result:
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ Beginner view:
|
|||
- Think of it as a **separate, agent-only browser**.
|
||||
- The `openclaw` profile does **not** touch your personal browser profile.
|
||||
- The agent can **open tabs, read pages, click, and type** in a safe lane.
|
||||
- The default `chrome` profile uses the **system default Chromium browser** via the
|
||||
extension relay; switch to `openclaw` for the isolated managed browser.
|
||||
- The built-in `user` profile attaches to your real signed-in Chrome session;
|
||||
`chrome-relay` is the explicit extension-relay profile.
|
||||
|
||||
## What you get
|
||||
|
||||
|
|
@ -43,11 +43,22 @@ openclaw browser --browser-profile openclaw snapshot
|
|||
If you get “Browser disabled”, enable it in config (see below) and restart the
|
||||
Gateway.
|
||||
|
||||
## Profiles: `openclaw` vs `chrome`
|
||||
## Profiles: `openclaw` vs `user` vs `chrome-relay`
|
||||
|
||||
- `openclaw`: managed, isolated browser (no extension required).
|
||||
- `chrome`: extension relay to your **system browser** (requires the OpenClaw
|
||||
extension to be attached to a tab).
|
||||
- `user`: built-in Chrome MCP attach profile for your **real signed-in Chrome**
|
||||
session.
|
||||
- `chrome-relay`: extension relay to your **system browser** (requires the
|
||||
OpenClaw extension to be attached to a tab).
|
||||
|
||||
For agent browser tool calls:
|
||||
|
||||
- Default: use the isolated `openclaw` browser.
|
||||
- Prefer `profile="user"` when existing logged-in sessions matter and the user
|
||||
is at the computer to click/approve any attach prompt.
|
||||
- Use `profile="chrome-relay"` only when the user explicitly wants the Chrome
|
||||
extension / toolbar-button attach flow.
|
||||
- `profile` is the explicit override when you want a specific browser mode.
|
||||
|
||||
Set `browser.defaultProfile: "openclaw"` if you want managed mode by default.
|
||||
|
||||
|
|
@ -68,7 +79,7 @@ Browser settings live in `~/.openclaw/openclaw.json`.
|
|||
// cdpUrl: "http://127.0.0.1:18792", // legacy single-profile override
|
||||
remoteCdpTimeoutMs: 1500, // remote CDP HTTP timeout (ms)
|
||||
remoteCdpHandshakeTimeoutMs: 3000, // remote CDP WebSocket handshake timeout (ms)
|
||||
defaultProfile: "chrome",
|
||||
defaultProfile: "openclaw",
|
||||
color: "#FF4500",
|
||||
headless: false,
|
||||
noSandbox: false,
|
||||
|
|
@ -77,6 +88,16 @@ Browser settings live in `~/.openclaw/openclaw.json`.
|
|||
profiles: {
|
||||
openclaw: { cdpPort: 18800, color: "#FF4500" },
|
||||
work: { cdpPort: 18801, color: "#0066CC" },
|
||||
user: {
|
||||
driver: "existing-session",
|
||||
attachOnly: true,
|
||||
color: "#00AA00",
|
||||
},
|
||||
"chrome-relay": {
|
||||
driver: "extension",
|
||||
cdpUrl: "http://127.0.0.1:18792",
|
||||
color: "#00AA00",
|
||||
},
|
||||
remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" },
|
||||
},
|
||||
},
|
||||
|
|
@ -97,9 +118,11 @@ Notes:
|
|||
- `browser.ssrfPolicy.allowPrivateNetwork` remains supported as a legacy alias for compatibility.
|
||||
- `attachOnly: true` means “never launch a local browser; only attach if it is already running.”
|
||||
- `color` + per-profile `color` tint the browser UI so you can see which profile is active.
|
||||
- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay.
|
||||
- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "user"` to opt into the signed-in user browser, or `defaultProfile: "chrome-relay"` for the extension relay.
|
||||
- Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary.
|
||||
- Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP.
|
||||
- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do
|
||||
not set `cdpUrl` for that driver.
|
||||
|
||||
## Use Brave (or another Chromium-based browser)
|
||||
|
||||
|
|
@ -264,11 +287,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be:
|
|||
- **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port
|
||||
- **remote**: an explicit CDP URL (Chromium-based browser running elsewhere)
|
||||
- **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension
|
||||
- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect
|
||||
|
||||
Defaults:
|
||||
|
||||
- The `openclaw` profile is auto-created if missing.
|
||||
- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default).
|
||||
- The `chrome-relay` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default).
|
||||
- Existing-session profiles are opt-in; create them with `--driver existing-session`.
|
||||
- Local CDP ports allocate from **18800–18899** by default.
|
||||
- Deleting a profile moves its local data directory to Trash.
|
||||
|
||||
|
|
@ -311,8 +336,8 @@ openclaw browser extension install
|
|||
|
||||
2. Use it:
|
||||
|
||||
- CLI: `openclaw browser --browser-profile chrome tabs`
|
||||
- Agent tool: `browser` with `profile="chrome"`
|
||||
- CLI: `openclaw browser --browser-profile chrome-relay tabs`
|
||||
- Agent tool: `browser` with `profile="chrome-relay"`
|
||||
|
||||
Optional: if you want a different name or relay port, create your own profile:
|
||||
|
||||
|
|
@ -328,6 +353,81 @@ Notes:
|
|||
|
||||
- This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions).
|
||||
- Detach by clicking the extension icon again.
|
||||
- Agent use: prefer `profile="user"` for logged-in sites. Use `profile="chrome-relay"`
|
||||
only when you specifically want the extension flow. The user must be present
|
||||
to click the extension and attach the tab.
|
||||
|
||||
## Chrome existing-session via MCP
|
||||
|
||||
OpenClaw can also attach to a running Chrome profile through the official
|
||||
Chrome DevTools MCP server. This reuses the tabs and login state already open in
|
||||
that Chrome profile.
|
||||
|
||||
Official background and setup references:
|
||||
|
||||
- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
|
||||
- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp)
|
||||
|
||||
Built-in profile:
|
||||
|
||||
- `user`
|
||||
|
||||
Optional: create your own custom existing-session profile if you want a
|
||||
different name or color.
|
||||
|
||||
Then in Chrome:
|
||||
|
||||
1. Open `chrome://inspect/#remote-debugging`
|
||||
2. Enable remote debugging
|
||||
3. Keep Chrome running and approve the connection prompt when OpenClaw attaches
|
||||
|
||||
Live attach smoke test:
|
||||
|
||||
```bash
|
||||
openclaw browser --browser-profile user start
|
||||
openclaw browser --browser-profile user status
|
||||
openclaw browser --browser-profile user tabs
|
||||
openclaw browser --browser-profile user snapshot --format ai
|
||||
```
|
||||
|
||||
What success looks like:
|
||||
|
||||
- `status` shows `driver: existing-session`
|
||||
- `status` shows `transport: chrome-mcp`
|
||||
- `status` shows `running: true`
|
||||
- `tabs` lists your already-open Chrome tabs
|
||||
- `snapshot` returns refs from the selected live tab
|
||||
|
||||
What to check if attach does not work:
|
||||
|
||||
- Chrome is version `144+`
|
||||
- remote debugging is enabled at `chrome://inspect/#remote-debugging`
|
||||
- Chrome showed and you accepted the attach consent prompt
|
||||
|
||||
Agent use:
|
||||
|
||||
- Use `profile="user"` when you need the user’s logged-in browser state.
|
||||
- If you use a custom existing-session profile, pass that explicit profile name.
|
||||
- Prefer `profile="user"` over `profile="chrome-relay"` unless the user
|
||||
explicitly wants the extension / attach-tab flow.
|
||||
- Only choose this mode when the user is at the computer to approve the attach
|
||||
prompt.
|
||||
- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect`
|
||||
|
||||
Notes:
|
||||
|
||||
- This path is higher-risk than the isolated `openclaw` profile because it can
|
||||
act inside your signed-in browser session.
|
||||
- OpenClaw does not launch Chrome for this driver; it attaches to an existing
|
||||
session only.
|
||||
- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not
|
||||
the legacy default-profile remote debugging port workflow.
|
||||
- Existing-session screenshots support page captures and `--ref` element
|
||||
captures from snapshots, but not CSS `--element` selectors.
|
||||
- Existing-session `wait --url` supports exact, substring, and glob patterns
|
||||
like other browser drivers. `wait --load networkidle` is not supported yet.
|
||||
- Some features still require the extension relay or managed browser path, such
|
||||
as PDF export and download interception.
|
||||
- Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated.
|
||||
|
||||
WSL2 / cross-namespace example:
|
||||
|
|
@ -337,7 +437,7 @@ WSL2 / cross-namespace example:
|
|||
browser: {
|
||||
enabled: true,
|
||||
relayBindHost: "0.0.0.0",
|
||||
defaultProfile: "chrome",
|
||||
defaultProfile: "chrome-relay",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs
|
|||
|
||||
Attach/detach happens via a **single Chrome toolbar button**.
|
||||
|
||||
If you want Chrome’s official DevTools MCP attach flow instead of the OpenClaw
|
||||
extension relay, use an `existing-session` browser profile instead. See
|
||||
[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chrome’s own
|
||||
setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your
|
||||
browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session)
|
||||
and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp).
|
||||
|
||||
## What it is (concept)
|
||||
|
||||
There are three parts:
|
||||
|
|
@ -55,7 +62,7 @@ After upgrading OpenClaw:
|
|||
|
||||
## Use it (set gateway token once)
|
||||
|
||||
OpenClaw ships with a built-in browser profile named `chrome` that targets the extension relay on the default port.
|
||||
OpenClaw ships with a built-in browser profile named `chrome-relay` that targets the extension relay on the default port.
|
||||
|
||||
Before first attach, open extension Options and set:
|
||||
|
||||
|
|
@ -64,8 +71,8 @@ Before first attach, open extension Options and set:
|
|||
|
||||
Use it:
|
||||
|
||||
- CLI: `openclaw browser --browser-profile chrome tabs`
|
||||
- Agent tool: `browser` with `profile="chrome"`
|
||||
- CLI: `openclaw browser --browser-profile chrome-relay tabs`
|
||||
- Agent tool: `browser` with `profile="chrome-relay"`
|
||||
|
||||
If you want a different name or a different relay port, create your own profile:
|
||||
|
||||
|
|
|
|||
|
|
@ -316,7 +316,11 @@ Common parameters:
|
|||
Notes:
|
||||
- Requires `browser.enabled=true` (default is `true`; set `false` to disable).
|
||||
- All actions accept optional `profile` parameter for multi-instance support.
|
||||
- When `profile` is omitted, uses `browser.defaultProfile` (defaults to "chrome").
|
||||
- Omit `profile` for the safe default: isolated OpenClaw-managed browser (`openclaw`).
|
||||
- Use `profile="user"` for the real local host browser when existing logins/cookies matter and the user is present to click/approve any attach prompt.
|
||||
- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow.
|
||||
- `profile="user"` and `profile="chrome-relay"` are host-only; do not combine them with sandbox/node targets.
|
||||
- When `profile` is omitted, uses `browser.defaultProfile` (defaults to `openclaw`).
|
||||
- Profile names: lowercase alphanumeric + hyphens only (max 64 chars).
|
||||
- Port range: 18800-18899 (~100 profiles max).
|
||||
- Remote profiles are attach-only (no start/stop/reset).
|
||||
|
|
|
|||
|
|
@ -65,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi
|
|||
2. In the dashboard, choose the **Search** plan and generate an API key.
|
||||
3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment.
|
||||
|
||||
Each Brave plan includes **$5/month in free credit** (renewing). The Search
|
||||
plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
|
||||
Each Brave plan includes **\$5/month in free credit** (renewing). The Search
|
||||
plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set
|
||||
your usage limit in the Brave dashboard to avoid unexpected charges. See the
|
||||
[Brave API portal](https://brave.com/search/api/) for current plans and
|
||||
pricing.
|
||||
|
|
|
|||
|
|
@ -54,6 +54,49 @@ describe("acpx ensure", () => {
|
|||
}
|
||||
});
|
||||
|
||||
function mockEnsureInstallFlow() {
|
||||
spawnAndCollectMock
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "acpx 0.0.9\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "added 1 package\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
});
|
||||
}
|
||||
|
||||
function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) {
|
||||
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
|
||||
command: "npm",
|
||||
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars,
|
||||
});
|
||||
}
|
||||
|
||||
it("accepts the pinned acpx version", async () => {
|
||||
spawnAndCollectMock.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
|
|
@ -177,25 +220,7 @@ describe("acpx ensure", () => {
|
|||
});
|
||||
|
||||
it("installs and verifies pinned acpx when precheck fails", async () => {
|
||||
spawnAndCollectMock
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "acpx 0.0.9\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "added 1 package\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
});
|
||||
mockEnsureInstallFlow();
|
||||
|
||||
await ensureAcpx({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
|
|
@ -204,33 +229,11 @@ describe("acpx ensure", () => {
|
|||
});
|
||||
|
||||
expect(spawnAndCollectMock).toHaveBeenCalledTimes(3);
|
||||
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
|
||||
command: "npm",
|
||||
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
|
||||
cwd: "/plugin",
|
||||
});
|
||||
expectEnsureInstallCalls();
|
||||
});
|
||||
|
||||
it("threads stripProviderAuthEnvVars through version probes and install", async () => {
|
||||
spawnAndCollectMock
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "acpx 0.0.9\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: "added 1 package\n",
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
stdout: `acpx ${ACPX_PINNED_VERSION}\n`,
|
||||
stderr: "",
|
||||
code: 0,
|
||||
error: null,
|
||||
});
|
||||
mockEnsureInstallFlow();
|
||||
|
||||
await ensureAcpx({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
|
|
@ -239,24 +242,7 @@ describe("acpx ensure", () => {
|
|||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
|
||||
expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({
|
||||
command: "npm",
|
||||
args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({
|
||||
command: "/plugin/node_modules/.bin/acpx",
|
||||
args: ["--version"],
|
||||
cwd: "/plugin",
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
expectEnsureInstallCalls(true);
|
||||
});
|
||||
|
||||
it("fails with actionable error when npm install fails", async () => {
|
||||
|
|
|
|||
|
|
@ -254,6 +254,44 @@ describe("waitForExit", () => {
|
|||
});
|
||||
|
||||
describe("spawnAndCollect", () => {
|
||||
type SpawnedEnvSnapshot = {
|
||||
openai?: string;
|
||||
github?: string;
|
||||
hf?: string;
|
||||
openclaw?: string;
|
||||
shell?: string;
|
||||
};
|
||||
|
||||
function stubProviderAuthEnv(env: Record<string, string>) {
|
||||
for (const [key, value] of Object.entries(env)) {
|
||||
vi.stubEnv(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
async function collectSpawnedEnvSnapshot(options?: {
|
||||
stripProviderAuthEnvVars?: boolean;
|
||||
openAiEnvKey?: string;
|
||||
githubEnvKey?: string;
|
||||
hfEnvKey?: string;
|
||||
}): Promise<SpawnedEnvSnapshot> {
|
||||
const openAiEnvKey = options?.openAiEnvKey ?? "OPENAI_API_KEY";
|
||||
const githubEnvKey = options?.githubEnvKey ?? "GITHUB_TOKEN";
|
||||
const hfEnvKey = options?.hfEnvKey ?? "HF_TOKEN";
|
||||
const result = await spawnAndCollect({
|
||||
command: process.execPath,
|
||||
args: [
|
||||
"-e",
|
||||
`process.stdout.write(JSON.stringify({openai:process.env.${openAiEnvKey},github:process.env.${githubEnvKey},hf:process.env.${hfEnvKey},openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))`,
|
||||
],
|
||||
cwd: process.cwd(),
|
||||
stripProviderAuthEnvVars: options?.stripProviderAuthEnvVars,
|
||||
});
|
||||
|
||||
expect(result.code).toBe(0);
|
||||
expect(result.error).toBeNull();
|
||||
return JSON.parse(result.stdout) as SpawnedEnvSnapshot;
|
||||
}
|
||||
|
||||
it("returns abort error immediately when signal is already aborted", async () => {
|
||||
const controller = new AbortController();
|
||||
controller.abort();
|
||||
|
|
@ -292,31 +330,15 @@ describe("spawnAndCollect", () => {
|
|||
});
|
||||
|
||||
it("strips shared provider auth env vars from spawned acpx children", async () => {
|
||||
vi.stubEnv("OPENAI_API_KEY", "openai-secret");
|
||||
vi.stubEnv("GITHUB_TOKEN", "gh-secret");
|
||||
vi.stubEnv("HF_TOKEN", "hf-secret");
|
||||
vi.stubEnv("OPENCLAW_API_KEY", "keep-me");
|
||||
|
||||
const result = await spawnAndCollect({
|
||||
command: process.execPath,
|
||||
args: [
|
||||
"-e",
|
||||
"process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
|
||||
],
|
||||
cwd: process.cwd(),
|
||||
stubProviderAuthEnv({
|
||||
OPENAI_API_KEY: "openai-secret",
|
||||
GITHUB_TOKEN: "gh-secret",
|
||||
HF_TOKEN: "hf-secret",
|
||||
OPENCLAW_API_KEY: "keep-me",
|
||||
});
|
||||
const parsed = await collectSpawnedEnvSnapshot({
|
||||
stripProviderAuthEnvVars: true,
|
||||
});
|
||||
|
||||
expect(result.code).toBe(0);
|
||||
expect(result.error).toBeNull();
|
||||
|
||||
const parsed = JSON.parse(result.stdout) as {
|
||||
openai?: string;
|
||||
github?: string;
|
||||
hf?: string;
|
||||
openclaw?: string;
|
||||
shell?: string;
|
||||
};
|
||||
expect(parsed.openai).toBeUndefined();
|
||||
expect(parsed.github).toBeUndefined();
|
||||
expect(parsed.hf).toBeUndefined();
|
||||
|
|
@ -325,29 +347,16 @@ describe("spawnAndCollect", () => {
|
|||
});
|
||||
|
||||
it("strips provider auth env vars case-insensitively", async () => {
|
||||
vi.stubEnv("OpenAI_Api_Key", "openai-secret");
|
||||
vi.stubEnv("Github_Token", "gh-secret");
|
||||
vi.stubEnv("OPENCLAW_API_KEY", "keep-me");
|
||||
|
||||
const result = await spawnAndCollect({
|
||||
command: process.execPath,
|
||||
args: [
|
||||
"-e",
|
||||
"process.stdout.write(JSON.stringify({openai:process.env.OpenAI_Api_Key,github:process.env.Github_Token,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
|
||||
],
|
||||
cwd: process.cwd(),
|
||||
stripProviderAuthEnvVars: true,
|
||||
stubProviderAuthEnv({
|
||||
OpenAI_Api_Key: "openai-secret",
|
||||
Github_Token: "gh-secret",
|
||||
OPENCLAW_API_KEY: "keep-me",
|
||||
});
|
||||
const parsed = await collectSpawnedEnvSnapshot({
|
||||
stripProviderAuthEnvVars: true,
|
||||
openAiEnvKey: "OpenAI_Api_Key",
|
||||
githubEnvKey: "Github_Token",
|
||||
});
|
||||
|
||||
expect(result.code).toBe(0);
|
||||
expect(result.error).toBeNull();
|
||||
|
||||
const parsed = JSON.parse(result.stdout) as {
|
||||
openai?: string;
|
||||
github?: string;
|
||||
openclaw?: string;
|
||||
shell?: string;
|
||||
};
|
||||
expect(parsed.openai).toBeUndefined();
|
||||
expect(parsed.github).toBeUndefined();
|
||||
expect(parsed.openclaw).toBe("keep-me");
|
||||
|
|
@ -355,30 +364,13 @@ describe("spawnAndCollect", () => {
|
|||
});
|
||||
|
||||
it("preserves provider auth env vars for explicit custom commands by default", async () => {
|
||||
vi.stubEnv("OPENAI_API_KEY", "openai-secret");
|
||||
vi.stubEnv("GITHUB_TOKEN", "gh-secret");
|
||||
vi.stubEnv("HF_TOKEN", "hf-secret");
|
||||
vi.stubEnv("OPENCLAW_API_KEY", "keep-me");
|
||||
|
||||
const result = await spawnAndCollect({
|
||||
command: process.execPath,
|
||||
args: [
|
||||
"-e",
|
||||
"process.stdout.write(JSON.stringify({openai:process.env.OPENAI_API_KEY,github:process.env.GITHUB_TOKEN,hf:process.env.HF_TOKEN,openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))",
|
||||
],
|
||||
cwd: process.cwd(),
|
||||
stubProviderAuthEnv({
|
||||
OPENAI_API_KEY: "openai-secret",
|
||||
GITHUB_TOKEN: "gh-secret",
|
||||
HF_TOKEN: "hf-secret",
|
||||
OPENCLAW_API_KEY: "keep-me",
|
||||
});
|
||||
|
||||
expect(result.code).toBe(0);
|
||||
expect(result.error).toBeNull();
|
||||
|
||||
const parsed = JSON.parse(result.stdout) as {
|
||||
openai?: string;
|
||||
github?: string;
|
||||
hf?: string;
|
||||
openclaw?: string;
|
||||
shell?: string;
|
||||
};
|
||||
const parsed = await collectSpawnedEnvSnapshot();
|
||||
expect(parsed.openai).toBe("openai-secret");
|
||||
expect(parsed.github).toBe("gh-secret");
|
||||
expect(parsed.hf).toBe("hf-secret");
|
||||
|
|
|
|||
|
|
@ -82,6 +82,15 @@ describe("downloadBlueBubblesAttachment", () => {
|
|||
).rejects.toThrow("too large");
|
||||
}
|
||||
|
||||
function mockSuccessfulAttachmentDownload(buffer = new Uint8Array([1])) {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
headers: new Headers(),
|
||||
arrayBuffer: () => Promise.resolve(buffer.buffer),
|
||||
});
|
||||
return buffer;
|
||||
}
|
||||
|
||||
it("throws when guid is missing", async () => {
|
||||
const attachment: BlueBubblesAttachment = {};
|
||||
await expect(
|
||||
|
|
@ -159,12 +168,7 @@ describe("downloadBlueBubblesAttachment", () => {
|
|||
});
|
||||
|
||||
it("encodes guid in URL", async () => {
|
||||
const mockBuffer = new Uint8Array([1]);
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
headers: new Headers(),
|
||||
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
|
||||
});
|
||||
mockSuccessfulAttachmentDownload();
|
||||
|
||||
const attachment: BlueBubblesAttachment = { guid: "att/with/special chars" };
|
||||
await downloadBlueBubblesAttachment(attachment, {
|
||||
|
|
@ -244,12 +248,7 @@ describe("downloadBlueBubblesAttachment", () => {
|
|||
});
|
||||
|
||||
it("resolves credentials from config when opts not provided", async () => {
|
||||
const mockBuffer = new Uint8Array([1]);
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
headers: new Headers(),
|
||||
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
|
||||
});
|
||||
mockSuccessfulAttachmentDownload();
|
||||
|
||||
const attachment: BlueBubblesAttachment = { guid: "att-config" };
|
||||
const result = await downloadBlueBubblesAttachment(attachment, {
|
||||
|
|
@ -270,12 +269,7 @@ describe("downloadBlueBubblesAttachment", () => {
|
|||
});
|
||||
|
||||
it("passes ssrfPolicy with allowPrivateNetwork when config enables it", async () => {
|
||||
const mockBuffer = new Uint8Array([1]);
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
headers: new Headers(),
|
||||
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
|
||||
});
|
||||
mockSuccessfulAttachmentDownload();
|
||||
|
||||
const attachment: BlueBubblesAttachment = { guid: "att-ssrf" };
|
||||
await downloadBlueBubblesAttachment(attachment, {
|
||||
|
|
@ -295,12 +289,7 @@ describe("downloadBlueBubblesAttachment", () => {
|
|||
});
|
||||
|
||||
it("auto-allowlists serverUrl hostname when allowPrivateNetwork is not set", async () => {
|
||||
const mockBuffer = new Uint8Array([1]);
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
headers: new Headers(),
|
||||
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
|
||||
});
|
||||
mockSuccessfulAttachmentDownload();
|
||||
|
||||
const attachment: BlueBubblesAttachment = { guid: "att-no-ssrf" };
|
||||
await downloadBlueBubblesAttachment(attachment, {
|
||||
|
|
@ -313,12 +302,7 @@ describe("downloadBlueBubblesAttachment", () => {
|
|||
});
|
||||
|
||||
it("auto-allowlists private IP serverUrl hostname when allowPrivateNetwork is not set", async () => {
|
||||
const mockBuffer = new Uint8Array([1]);
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
headers: new Headers(),
|
||||
arrayBuffer: () => Promise.resolve(mockBuffer.buffer),
|
||||
});
|
||||
mockSuccessfulAttachmentDownload();
|
||||
|
||||
const attachment: BlueBubblesAttachment = { guid: "att-private-ip" };
|
||||
await downloadBlueBubblesAttachment(attachment, {
|
||||
|
|
@ -352,6 +336,14 @@ describe("sendBlueBubblesAttachment", () => {
|
|||
return Buffer.from(body).toString("utf8");
|
||||
}
|
||||
|
||||
function expectVoiceAttachmentBody() {
|
||||
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array;
|
||||
const bodyText = decodeBody(body);
|
||||
expect(bodyText).toContain('name="isAudioMessage"');
|
||||
expect(bodyText).toContain("true");
|
||||
return bodyText;
|
||||
}
|
||||
|
||||
it("marks voice memos when asVoice is true and mp3 is provided", async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
|
|
@ -367,10 +359,7 @@ describe("sendBlueBubblesAttachment", () => {
|
|||
opts: { serverUrl: "http://localhost:1234", password: "test" },
|
||||
});
|
||||
|
||||
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array;
|
||||
const bodyText = decodeBody(body);
|
||||
expect(bodyText).toContain('name="isAudioMessage"');
|
||||
expect(bodyText).toContain("true");
|
||||
const bodyText = expectVoiceAttachmentBody();
|
||||
expect(bodyText).toContain('filename="voice.mp3"');
|
||||
});
|
||||
|
||||
|
|
@ -389,8 +378,7 @@ describe("sendBlueBubblesAttachment", () => {
|
|||
opts: { serverUrl: "http://localhost:1234", password: "test" },
|
||||
});
|
||||
|
||||
const body = mockFetch.mock.calls[0][1]?.body as Uint8Array;
|
||||
const bodyText = decodeBody(body);
|
||||
const bodyText = expectVoiceAttachmentBody();
|
||||
expect(bodyText).toContain('filename="voice.mp3"');
|
||||
expect(bodyText).toContain('name="voice.mp3"');
|
||||
});
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import crypto from "node:crypto";
|
|||
import path from "node:path";
|
||||
import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles";
|
||||
import { resolveBlueBubblesServerAccount } from "./account-resolve.js";
|
||||
import { postMultipartFormData } from "./multipart.js";
|
||||
import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js";
|
||||
import {
|
||||
getCachedBlueBubblesPrivateApiStatus,
|
||||
isBlueBubblesPrivateApiStatusEnabled,
|
||||
|
|
@ -262,12 +262,7 @@ export async function sendBlueBubblesAttachment(params: {
|
|||
timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text();
|
||||
throw new Error(
|
||||
`BlueBubbles attachment send failed (${res.status}): ${errorText || "unknown"}`,
|
||||
);
|
||||
}
|
||||
await assertMultipartActionOk(res, "attachment send");
|
||||
|
||||
const responseBody = await res.text();
|
||||
if (!responseBody) {
|
||||
|
|
|
|||
|
|
@ -29,6 +29,11 @@ describe("chat", () => {
|
|||
});
|
||||
}
|
||||
|
||||
function mockTwoOkTextResponses() {
|
||||
mockOkTextResponse();
|
||||
mockOkTextResponse();
|
||||
}
|
||||
|
||||
async function expectCalledUrlIncludesPassword(params: {
|
||||
password: string;
|
||||
invoke: () => Promise<void>;
|
||||
|
|
@ -198,15 +203,7 @@ describe("chat", () => {
|
|||
});
|
||||
|
||||
it("uses POST for start and DELETE for stop", async () => {
|
||||
mockFetch
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: () => Promise.resolve(""),
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: () => Promise.resolve(""),
|
||||
});
|
||||
mockTwoOkTextResponses();
|
||||
|
||||
await sendBlueBubblesTyping("iMessage;-;+15551234567", true, {
|
||||
serverUrl: "http://localhost:1234",
|
||||
|
|
@ -442,15 +439,7 @@ describe("chat", () => {
|
|||
});
|
||||
|
||||
it("adds and removes participant using matching endpoint", async () => {
|
||||
mockFetch
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: () => Promise.resolve(""),
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: () => Promise.resolve(""),
|
||||
});
|
||||
mockTwoOkTextResponses();
|
||||
|
||||
await addBlueBubblesParticipant("chat-guid", "+15551234567", {
|
||||
serverUrl: "http://localhost:1234",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import crypto from "node:crypto";
|
|||
import path from "node:path";
|
||||
import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles";
|
||||
import { resolveBlueBubblesServerAccount } from "./account-resolve.js";
|
||||
import { postMultipartFormData } from "./multipart.js";
|
||||
import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js";
|
||||
import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js";
|
||||
import { blueBubblesFetchWithTimeout, buildBlueBubblesApiUrl } from "./types.js";
|
||||
|
||||
|
|
@ -55,12 +55,7 @@ async function sendBlueBubblesChatEndpointRequest(params: {
|
|||
{ method: params.method },
|
||||
params.opts.timeoutMs,
|
||||
);
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => "");
|
||||
throw new Error(
|
||||
`BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`,
|
||||
);
|
||||
}
|
||||
await assertMultipartActionOk(res, params.action);
|
||||
}
|
||||
|
||||
async function sendPrivateApiJsonRequest(params: {
|
||||
|
|
@ -86,12 +81,7 @@ async function sendPrivateApiJsonRequest(params: {
|
|||
}
|
||||
|
||||
const res = await blueBubblesFetchWithTimeout(url, request, params.opts.timeoutMs);
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => "");
|
||||
throw new Error(
|
||||
`BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`,
|
||||
);
|
||||
}
|
||||
await assertMultipartActionOk(res, params.action);
|
||||
}
|
||||
|
||||
export async function markBlueBubblesChatRead(
|
||||
|
|
@ -329,8 +319,5 @@ export async function setGroupIconBlueBubbles(
|
|||
timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const errorText = await res.text().catch(() => "");
|
||||
throw new Error(`BlueBubbles setGroupIcon failed (${res.status}): ${errorText || "unknown"}`);
|
||||
}
|
||||
await assertMultipartActionOk(res, "setGroupIcon");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,6 +70,70 @@ async function makeTempDir(): Promise<string> {
|
|||
return dir;
|
||||
}
|
||||
|
||||
async function makeTempFile(
|
||||
fileName: string,
|
||||
contents: string,
|
||||
dir?: string,
|
||||
): Promise<{ dir: string; filePath: string }> {
|
||||
const resolvedDir = dir ?? (await makeTempDir());
|
||||
const filePath = path.join(resolvedDir, fileName);
|
||||
await fs.writeFile(filePath, contents, "utf8");
|
||||
return { dir: resolvedDir, filePath };
|
||||
}
|
||||
|
||||
async function sendLocalMedia(params: {
|
||||
cfg: OpenClawConfig;
|
||||
mediaPath: string;
|
||||
accountId?: string;
|
||||
}) {
|
||||
return sendBlueBubblesMedia({
|
||||
cfg: params.cfg,
|
||||
to: "chat:123",
|
||||
accountId: params.accountId,
|
||||
mediaPath: params.mediaPath,
|
||||
});
|
||||
}
|
||||
|
||||
async function expectRejectedLocalMedia(params: {
|
||||
cfg: OpenClawConfig;
|
||||
mediaPath: string;
|
||||
error: RegExp;
|
||||
accountId?: string;
|
||||
}) {
|
||||
await expect(
|
||||
sendLocalMedia({
|
||||
cfg: params.cfg,
|
||||
mediaPath: params.mediaPath,
|
||||
accountId: params.accountId,
|
||||
}),
|
||||
).rejects.toThrow(params.error);
|
||||
|
||||
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
|
||||
}
|
||||
|
||||
async function expectAllowedLocalMedia(params: {
|
||||
cfg: OpenClawConfig;
|
||||
mediaPath: string;
|
||||
expectedAttachment: Record<string, unknown>;
|
||||
accountId?: string;
|
||||
expectMimeDetection?: boolean;
|
||||
}) {
|
||||
const result = await sendLocalMedia({
|
||||
cfg: params.cfg,
|
||||
mediaPath: params.mediaPath,
|
||||
accountId: params.accountId,
|
||||
});
|
||||
|
||||
expect(result).toEqual({ messageId: "msg-1" });
|
||||
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
|
||||
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
|
||||
expect.objectContaining(params.expectedAttachment),
|
||||
);
|
||||
if (params.expectMimeDetection) {
|
||||
expect(runtimeMocks.detectMime).toHaveBeenCalled();
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
const runtime = createMockRuntime();
|
||||
runtimeMocks = runtime.mocks;
|
||||
|
|
@ -110,57 +174,43 @@ describe("sendBlueBubblesMedia local-path hardening", () => {
|
|||
const outsideFile = path.join(outsideDir, "outside.txt");
|
||||
await fs.writeFile(outsideFile, "not allowed", "utf8");
|
||||
|
||||
await expect(
|
||||
sendBlueBubblesMedia({
|
||||
await expectRejectedLocalMedia({
|
||||
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
|
||||
to: "chat:123",
|
||||
mediaPath: outsideFile,
|
||||
}),
|
||||
).rejects.toThrow(/not under any configured mediaLocalRoots/i);
|
||||
|
||||
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
|
||||
error: /not under any configured mediaLocalRoots/i,
|
||||
});
|
||||
});
|
||||
|
||||
it("allows local paths that are explicitly configured", async () => {
|
||||
const allowedRoot = await makeTempDir();
|
||||
const allowedFile = path.join(allowedRoot, "allowed.txt");
|
||||
await fs.writeFile(allowedFile, "allowed", "utf8");
|
||||
const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile(
|
||||
"allowed.txt",
|
||||
"allowed",
|
||||
);
|
||||
|
||||
const result = await sendBlueBubblesMedia({
|
||||
await expectAllowedLocalMedia({
|
||||
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
|
||||
to: "chat:123",
|
||||
mediaPath: allowedFile,
|
||||
});
|
||||
|
||||
expect(result).toEqual({ messageId: "msg-1" });
|
||||
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
|
||||
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
|
||||
expect.objectContaining({
|
||||
expectedAttachment: {
|
||||
filename: "allowed.txt",
|
||||
contentType: "text/plain",
|
||||
}),
|
||||
);
|
||||
expect(runtimeMocks.detectMime).toHaveBeenCalled();
|
||||
},
|
||||
expectMimeDetection: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("allows file:// media paths and file:// local roots", async () => {
|
||||
const allowedRoot = await makeTempDir();
|
||||
const allowedFile = path.join(allowedRoot, "allowed.txt");
|
||||
await fs.writeFile(allowedFile, "allowed", "utf8");
|
||||
|
||||
const result = await sendBlueBubblesMedia({
|
||||
cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }),
|
||||
to: "chat:123",
|
||||
mediaPath: pathToFileURL(allowedFile).toString(),
|
||||
});
|
||||
|
||||
expect(result).toEqual({ messageId: "msg-1" });
|
||||
expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1);
|
||||
expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual(
|
||||
expect.objectContaining({
|
||||
filename: "allowed.txt",
|
||||
}),
|
||||
const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile(
|
||||
"allowed.txt",
|
||||
"allowed",
|
||||
);
|
||||
|
||||
await expectAllowedLocalMedia({
|
||||
cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }),
|
||||
mediaPath: pathToFileURL(allowedFile).toString(),
|
||||
expectedAttachment: {
|
||||
filename: "allowed.txt",
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("uses account-specific mediaLocalRoots over top-level roots", async () => {
|
||||
|
|
@ -213,15 +263,11 @@ describe("sendBlueBubblesMedia local-path hardening", () => {
|
|||
return;
|
||||
}
|
||||
|
||||
await expect(
|
||||
sendBlueBubblesMedia({
|
||||
await expectRejectedLocalMedia({
|
||||
cfg: createConfig({ mediaLocalRoots: [allowedRoot] }),
|
||||
to: "chat:123",
|
||||
mediaPath: linkPath,
|
||||
}),
|
||||
).rejects.toThrow(/not under any configured mediaLocalRoots/i);
|
||||
|
||||
expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled();
|
||||
error: /not under any configured mediaLocalRoots/i,
|
||||
});
|
||||
});
|
||||
|
||||
it("rejects relative mediaLocalRoots entries", async () => {
|
||||
|
|
|
|||
|
|
@ -1,18 +1,24 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import { normalizeWebhookMessage, normalizeWebhookReaction } from "./monitor-normalize.js";
|
||||
|
||||
function createFallbackDmPayload(overrides: Record<string, unknown> = {}) {
|
||||
return {
|
||||
guid: "msg-1",
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
handle: null,
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
describe("normalizeWebhookMessage", () => {
|
||||
it("falls back to DM chatGuid handle when sender handle is missing", () => {
|
||||
const result = normalizeWebhookMessage({
|
||||
type: "new-message",
|
||||
data: {
|
||||
guid: "msg-1",
|
||||
data: createFallbackDmPayload({
|
||||
text: "hello",
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
handle: null,
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
|
|
@ -78,15 +84,11 @@ describe("normalizeWebhookReaction", () => {
|
|||
it("falls back to DM chatGuid handle when reaction sender handle is missing", () => {
|
||||
const result = normalizeWebhookReaction({
|
||||
type: "updated-message",
|
||||
data: {
|
||||
data: createFallbackDmPayload({
|
||||
guid: "msg-2",
|
||||
associatedMessageGuid: "p:0/msg-1",
|
||||
associatedMessageType: 2000,
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
handle: null,
|
||||
chatGuid: "iMessage;-;+15551234567",
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
expect(result).not.toBeNull();
|
||||
|
|
|
|||
|
|
@ -582,6 +582,29 @@ export function parseTapbackText(params: {
|
|||
return null;
|
||||
}
|
||||
|
||||
const parseLeadingReactionAction = (
|
||||
prefix: "reacted" | "removed",
|
||||
defaultAction: "added" | "removed",
|
||||
) => {
|
||||
if (!lower.startsWith(prefix)) {
|
||||
return null;
|
||||
}
|
||||
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint;
|
||||
if (!emoji) {
|
||||
return null;
|
||||
}
|
||||
const quotedText = extractQuotedTapbackText(trimmed);
|
||||
if (params.requireQuoted && !quotedText) {
|
||||
return null;
|
||||
}
|
||||
const fallback = trimmed.slice(prefix.length).trim();
|
||||
return {
|
||||
emoji,
|
||||
action: params.actionHint ?? defaultAction,
|
||||
quotedText: quotedText ?? fallback,
|
||||
};
|
||||
};
|
||||
|
||||
for (const [pattern, { emoji, action }] of TAPBACK_TEXT_MAP) {
|
||||
if (lower.startsWith(pattern)) {
|
||||
// Extract quoted text if present (e.g., 'Loved "hello"' -> "hello")
|
||||
|
|
@ -599,30 +622,14 @@ export function parseTapbackText(params: {
|
|||
}
|
||||
}
|
||||
|
||||
if (lower.startsWith("reacted")) {
|
||||
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint;
|
||||
if (!emoji) {
|
||||
return null;
|
||||
}
|
||||
const quotedText = extractQuotedTapbackText(trimmed);
|
||||
if (params.requireQuoted && !quotedText) {
|
||||
return null;
|
||||
}
|
||||
const fallback = trimmed.slice("reacted".length).trim();
|
||||
return { emoji, action: params.actionHint ?? "added", quotedText: quotedText ?? fallback };
|
||||
const reacted = parseLeadingReactionAction("reacted", "added");
|
||||
if (reacted) {
|
||||
return reacted;
|
||||
}
|
||||
|
||||
if (lower.startsWith("removed")) {
|
||||
const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint;
|
||||
if (!emoji) {
|
||||
return null;
|
||||
}
|
||||
const quotedText = extractQuotedTapbackText(trimmed);
|
||||
if (params.requireQuoted && !quotedText) {
|
||||
return null;
|
||||
}
|
||||
const fallback = trimmed.slice("removed".length).trim();
|
||||
return { emoji, action: params.actionHint ?? "removed", quotedText: quotedText ?? fallback };
|
||||
const removed = parseLeadingReactionAction("removed", "removed");
|
||||
if (removed) {
|
||||
return removed;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -302,65 +302,102 @@ describe("BlueBubbles webhook monitor", () => {
|
|||
};
|
||||
}
|
||||
|
||||
describe("webhook parsing + auth handling", () => {
|
||||
it("rejects non-POST requests", async () => {
|
||||
const account = createMockAccount();
|
||||
async function dispatchWebhook(req: IncomingMessage) {
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
return { handled, res };
|
||||
}
|
||||
|
||||
function createWebhookRequestForTest(params?: {
|
||||
method?: string;
|
||||
url?: string;
|
||||
body?: unknown;
|
||||
headers?: Record<string, string>;
|
||||
remoteAddress?: string;
|
||||
}) {
|
||||
const req = createMockRequest(
|
||||
params?.method ?? "POST",
|
||||
params?.url ?? "/bluebubbles-webhook",
|
||||
params?.body ?? {},
|
||||
params?.headers,
|
||||
);
|
||||
if (params?.remoteAddress) {
|
||||
setRequestRemoteAddress(req, params.remoteAddress);
|
||||
}
|
||||
return req;
|
||||
}
|
||||
|
||||
function createHangingWebhookRequest(url = "/bluebubbles-webhook?password=test-password") {
|
||||
const req = new EventEmitter() as IncomingMessage;
|
||||
const destroyMock = vi.fn();
|
||||
req.method = "POST";
|
||||
req.url = url;
|
||||
req.headers = {};
|
||||
req.destroy = destroyMock as unknown as IncomingMessage["destroy"];
|
||||
setRequestRemoteAddress(req, "127.0.0.1");
|
||||
return { req, destroyMock };
|
||||
}
|
||||
|
||||
function registerWebhookTargets(
|
||||
params: Array<{
|
||||
account: ResolvedBlueBubblesAccount;
|
||||
statusSink?: (event: unknown) => void;
|
||||
}>,
|
||||
) {
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
const unregisterFns = params.map(({ account, statusSink }) =>
|
||||
registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
statusSink,
|
||||
}),
|
||||
);
|
||||
|
||||
const req = createMockRequest("GET", "/bluebubbles-webhook", {});
|
||||
const res = createMockResponse();
|
||||
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
unregister = () => {
|
||||
for (const unregisterFn of unregisterFns) {
|
||||
unregisterFn();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
async function expectWebhookStatus(
|
||||
req: IncomingMessage,
|
||||
expectedStatus: number,
|
||||
expectedBody?: string,
|
||||
) {
|
||||
const { handled, res } = await dispatchWebhook(req);
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(405);
|
||||
expect(res.statusCode).toBe(expectedStatus);
|
||||
if (expectedBody !== undefined) {
|
||||
expect(res.body).toBe(expectedBody);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
describe("webhook parsing + auth handling", () => {
|
||||
it("rejects non-POST requests", async () => {
|
||||
setupWebhookTarget();
|
||||
const req = createWebhookRequestForTest({ method: "GET" });
|
||||
await expectWebhookStatus(req, 405);
|
||||
});
|
||||
|
||||
it("accepts POST requests with valid JSON payload", async () => {
|
||||
setupWebhookTarget();
|
||||
const payload = createNewMessagePayload({ date: Date.now() });
|
||||
|
||||
const req = createMockRequest("POST", "/bluebubbles-webhook", payload);
|
||||
const res = createMockResponse();
|
||||
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.body).toBe("ok");
|
||||
const req = createWebhookRequestForTest({ body: payload });
|
||||
await expectWebhookStatus(req, 200, "ok");
|
||||
});
|
||||
|
||||
it("rejects requests with invalid JSON", async () => {
|
||||
const account = createMockAccount();
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const req = createMockRequest("POST", "/bluebubbles-webhook", "invalid json {{");
|
||||
const res = createMockResponse();
|
||||
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(400);
|
||||
setupWebhookTarget();
|
||||
const req = createWebhookRequestForTest({ body: "invalid json {{" });
|
||||
await expectWebhookStatus(req, 400);
|
||||
});
|
||||
|
||||
it("accepts URL-encoded payload wrappers", async () => {
|
||||
|
|
@ -369,42 +406,17 @@ describe("BlueBubbles webhook monitor", () => {
|
|||
const encodedBody = new URLSearchParams({
|
||||
payload: JSON.stringify(payload),
|
||||
}).toString();
|
||||
|
||||
const req = createMockRequest("POST", "/bluebubbles-webhook", encodedBody);
|
||||
const res = createMockResponse();
|
||||
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.body).toBe("ok");
|
||||
const req = createWebhookRequestForTest({ body: encodedBody });
|
||||
await expectWebhookStatus(req, 200, "ok");
|
||||
});
|
||||
|
||||
it("returns 408 when request body times out (Slow-Loris protection)", async () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
const account = createMockAccount();
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
setupWebhookTarget();
|
||||
|
||||
// Create a request that never sends data or ends (simulates slow-loris)
|
||||
const req = new EventEmitter() as IncomingMessage;
|
||||
req.method = "POST";
|
||||
req.url = "/bluebubbles-webhook?password=test-password";
|
||||
req.headers = {};
|
||||
(req as unknown as { socket: { remoteAddress: string } }).socket = {
|
||||
remoteAddress: "127.0.0.1",
|
||||
};
|
||||
req.destroy = vi.fn();
|
||||
const { req, destroyMock } = createHangingWebhookRequest();
|
||||
|
||||
const res = createMockResponse();
|
||||
|
||||
|
|
@ -416,7 +428,7 @@ describe("BlueBubbles webhook monitor", () => {
|
|||
const handled = await handledPromise;
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(408);
|
||||
expect(req.destroy).toHaveBeenCalled();
|
||||
expect(destroyMock).toHaveBeenCalled();
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
|
|
@ -424,140 +436,62 @@ describe("BlueBubbles webhook monitor", () => {
|
|||
|
||||
it("rejects unauthorized requests before reading the body", async () => {
|
||||
const account = createMockAccount({ password: "secret-token" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const req = new EventEmitter() as IncomingMessage;
|
||||
req.method = "POST";
|
||||
req.url = "/bluebubbles-webhook?password=wrong-token";
|
||||
req.headers = {};
|
||||
setupWebhookTarget({ account });
|
||||
const { req } = createHangingWebhookRequest("/bluebubbles-webhook?password=wrong-token");
|
||||
const onSpy = vi.spyOn(req, "on");
|
||||
(req as unknown as { socket: { remoteAddress: string } }).socket = {
|
||||
remoteAddress: "127.0.0.1",
|
||||
};
|
||||
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(401);
|
||||
await expectWebhookStatus(req, 401);
|
||||
expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function));
|
||||
});
|
||||
|
||||
it("authenticates via password query parameter", async () => {
|
||||
const account = createMockAccount({ password: "secret-token" });
|
||||
|
||||
// Mock non-localhost request
|
||||
const req = createMockRequest(
|
||||
"POST",
|
||||
"/bluebubbles-webhook?password=secret-token",
|
||||
createNewMessagePayload(),
|
||||
);
|
||||
setRequestRemoteAddress(req, "192.168.1.100");
|
||||
setupWebhookTarget({ account });
|
||||
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
const req = createWebhookRequestForTest({
|
||||
url: "/bluebubbles-webhook?password=secret-token",
|
||||
body: createNewMessagePayload(),
|
||||
remoteAddress: "192.168.1.100",
|
||||
});
|
||||
await expectWebhookStatus(req, 200);
|
||||
});
|
||||
|
||||
it("authenticates via x-password header", async () => {
|
||||
const account = createMockAccount({ password: "secret-token" });
|
||||
|
||||
const req = createMockRequest(
|
||||
"POST",
|
||||
"/bluebubbles-webhook",
|
||||
createNewMessagePayload(),
|
||||
{ "x-password": "secret-token" }, // pragma: allowlist secret
|
||||
);
|
||||
setRequestRemoteAddress(req, "192.168.1.100");
|
||||
setupWebhookTarget({ account });
|
||||
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
const req = createWebhookRequestForTest({
|
||||
body: createNewMessagePayload(),
|
||||
headers: { "x-password": "secret-token" }, // pragma: allowlist secret
|
||||
remoteAddress: "192.168.1.100",
|
||||
});
|
||||
await expectWebhookStatus(req, 200);
|
||||
});
|
||||
|
||||
it("rejects unauthorized requests with wrong password", async () => {
|
||||
const account = createMockAccount({ password: "secret-token" });
|
||||
const req = createMockRequest(
|
||||
"POST",
|
||||
"/bluebubbles-webhook?password=wrong-token",
|
||||
createNewMessagePayload(),
|
||||
);
|
||||
setRequestRemoteAddress(req, "192.168.1.100");
|
||||
setupWebhookTarget({ account });
|
||||
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(401);
|
||||
const req = createWebhookRequestForTest({
|
||||
url: "/bluebubbles-webhook?password=wrong-token",
|
||||
body: createNewMessagePayload(),
|
||||
remoteAddress: "192.168.1.100",
|
||||
});
|
||||
await expectWebhookStatus(req, 401);
|
||||
});
|
||||
|
||||
it("rejects ambiguous routing when multiple targets match the same password", async () => {
|
||||
const accountA = createMockAccount({ password: "secret-token" });
|
||||
const accountB = createMockAccount({ password: "secret-token" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
const sinkA = vi.fn();
|
||||
const sinkB = vi.fn();
|
||||
registerWebhookTargets([
|
||||
{ account: accountA, statusSink: sinkA },
|
||||
{ account: accountB, statusSink: sinkB },
|
||||
]);
|
||||
|
||||
const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "hello",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-1",
|
||||
},
|
||||
});
|
||||
(req as unknown as { socket: { remoteAddress: string } }).socket = {
|
||||
const req = createWebhookRequestForTest({
|
||||
url: "/bluebubbles-webhook?password=secret-token",
|
||||
body: createNewMessagePayload(),
|
||||
remoteAddress: "192.168.1.100",
|
||||
};
|
||||
|
||||
const unregisterA = registerBlueBubblesWebhookTarget({
|
||||
account: accountA,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
statusSink: sinkA,
|
||||
});
|
||||
const unregisterB = registerBlueBubblesWebhookTarget({
|
||||
account: accountB,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
statusSink: sinkB,
|
||||
});
|
||||
unregister = () => {
|
||||
unregisterA();
|
||||
unregisterB();
|
||||
};
|
||||
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(401);
|
||||
await expectWebhookStatus(req, 401);
|
||||
expect(sinkA).not.toHaveBeenCalled();
|
||||
expect(sinkB).not.toHaveBeenCalled();
|
||||
});
|
||||
|
|
@ -565,107 +499,38 @@ describe("BlueBubbles webhook monitor", () => {
|
|||
it("ignores targets without passwords when a password-authenticated target matches", async () => {
|
||||
const accountStrict = createMockAccount({ password: "secret-token" });
|
||||
const accountWithoutPassword = createMockAccount({ password: undefined });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
const sinkStrict = vi.fn();
|
||||
const sinkWithoutPassword = vi.fn();
|
||||
registerWebhookTargets([
|
||||
{ account: accountStrict, statusSink: sinkStrict },
|
||||
{ account: accountWithoutPassword, statusSink: sinkWithoutPassword },
|
||||
]);
|
||||
|
||||
const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "hello",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-1",
|
||||
},
|
||||
});
|
||||
(req as unknown as { socket: { remoteAddress: string } }).socket = {
|
||||
const req = createWebhookRequestForTest({
|
||||
url: "/bluebubbles-webhook?password=secret-token",
|
||||
body: createNewMessagePayload(),
|
||||
remoteAddress: "192.168.1.100",
|
||||
};
|
||||
|
||||
const unregisterStrict = registerBlueBubblesWebhookTarget({
|
||||
account: accountStrict,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
statusSink: sinkStrict,
|
||||
});
|
||||
const unregisterNoPassword = registerBlueBubblesWebhookTarget({
|
||||
account: accountWithoutPassword,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
statusSink: sinkWithoutPassword,
|
||||
});
|
||||
unregister = () => {
|
||||
unregisterStrict();
|
||||
unregisterNoPassword();
|
||||
};
|
||||
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
await expectWebhookStatus(req, 200);
|
||||
expect(sinkStrict).toHaveBeenCalledTimes(1);
|
||||
expect(sinkWithoutPassword).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("requires authentication for loopback requests when password is configured", async () => {
|
||||
const account = createMockAccount({ password: "secret-token" });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
setupWebhookTarget({ account });
|
||||
for (const remoteAddress of ["127.0.0.1", "::1", "::ffff:127.0.0.1"]) {
|
||||
const req = createMockRequest("POST", "/bluebubbles-webhook", {
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "hello",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-1",
|
||||
},
|
||||
});
|
||||
(req as unknown as { socket: { remoteAddress: string } }).socket = {
|
||||
const req = createWebhookRequestForTest({
|
||||
body: createNewMessagePayload(),
|
||||
remoteAddress,
|
||||
};
|
||||
|
||||
const loopbackUnregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(401);
|
||||
|
||||
loopbackUnregister();
|
||||
await expectWebhookStatus(req, 401);
|
||||
}
|
||||
});
|
||||
|
||||
it("rejects targets without passwords for loopback and proxied-looking requests", async () => {
|
||||
const account = createMockAccount({ password: undefined });
|
||||
const config: OpenClawConfig = {};
|
||||
const core = createMockRuntime();
|
||||
setBlueBubblesRuntime(core);
|
||||
|
||||
unregister = registerBlueBubblesWebhookTarget({
|
||||
account,
|
||||
config,
|
||||
runtime: { log: vi.fn(), error: vi.fn() },
|
||||
core,
|
||||
path: "/bluebubbles-webhook",
|
||||
});
|
||||
setupWebhookTarget({ account });
|
||||
|
||||
const headerVariants: Record<string, string>[] = [
|
||||
{ host: "localhost" },
|
||||
|
|
@ -673,28 +538,12 @@ describe("BlueBubbles webhook monitor", () => {
|
|||
{ host: "localhost", forwarded: "for=203.0.113.10;proto=https;host=example.com" },
|
||||
];
|
||||
for (const headers of headerVariants) {
|
||||
const req = createMockRequest(
|
||||
"POST",
|
||||
"/bluebubbles-webhook",
|
||||
{
|
||||
type: "new-message",
|
||||
data: {
|
||||
text: "hello",
|
||||
handle: { address: "+15551234567" },
|
||||
isGroup: false,
|
||||
isFromMe: false,
|
||||
guid: "msg-1",
|
||||
},
|
||||
},
|
||||
const req = createWebhookRequestForTest({
|
||||
body: createNewMessagePayload(),
|
||||
headers,
|
||||
);
|
||||
(req as unknown as { socket: { remoteAddress: string } }).socket = {
|
||||
remoteAddress: "127.0.0.1",
|
||||
};
|
||||
const res = createMockResponse();
|
||||
const handled = await handleBlueBubblesWebhookRequest(req, res);
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(401);
|
||||
});
|
||||
await expectWebhookStatus(req, 401);
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -30,3 +30,11 @@ export async function postMultipartFormData(params: {
|
|||
params.timeoutMs,
|
||||
);
|
||||
}
|
||||
|
||||
export async function assertMultipartActionOk(response: Response, action: string): Promise<void> {
|
||||
if (response.ok) {
|
||||
return;
|
||||
}
|
||||
const errorText = await response.text().catch(() => "");
|
||||
throw new Error(`BlueBubbles ${action} failed (${response.status}): ${errorText || "unknown"}`);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ describe("reactions", () => {
|
|||
});
|
||||
|
||||
describe("sendBlueBubblesReaction", () => {
|
||||
async function expectRemovedReaction(emoji: string) {
|
||||
async function expectRemovedReaction(emoji: string, expectedReaction = "-love") {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: () => Promise.resolve(""),
|
||||
|
|
@ -37,7 +37,7 @@ describe("reactions", () => {
|
|||
});
|
||||
|
||||
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
|
||||
expect(body.reaction).toBe("-love");
|
||||
expect(body.reaction).toBe(expectedReaction);
|
||||
}
|
||||
|
||||
it("throws when chatGuid is empty", async () => {
|
||||
|
|
@ -327,45 +327,11 @@ describe("reactions", () => {
|
|||
|
||||
describe("reaction removal aliases", () => {
|
||||
it("handles emoji-based removal", async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: () => Promise.resolve(""),
|
||||
});
|
||||
|
||||
await sendBlueBubblesReaction({
|
||||
chatGuid: "chat-123",
|
||||
messageGuid: "msg-123",
|
||||
emoji: "👍",
|
||||
remove: true,
|
||||
opts: {
|
||||
serverUrl: "http://localhost:1234",
|
||||
password: "test",
|
||||
},
|
||||
});
|
||||
|
||||
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
|
||||
expect(body.reaction).toBe("-like");
|
||||
await expectRemovedReaction("👍", "-like");
|
||||
});
|
||||
|
||||
it("handles text alias removal", async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
text: () => Promise.resolve(""),
|
||||
});
|
||||
|
||||
await sendBlueBubblesReaction({
|
||||
chatGuid: "chat-123",
|
||||
messageGuid: "msg-123",
|
||||
emoji: "haha",
|
||||
remove: true,
|
||||
opts: {
|
||||
serverUrl: "http://localhost:1234",
|
||||
password: "test",
|
||||
},
|
||||
});
|
||||
|
||||
const body = JSON.parse(mockFetch.mock.calls[0][1].body);
|
||||
expect(body.reaction).toBe("-laugh");
|
||||
await expectRemovedReaction("haha", "-laugh");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -108,13 +108,21 @@ function resolveScheme(
|
|||
return cfg.gateway?.tls?.enabled === true ? "wss" : "ws";
|
||||
}
|
||||
|
||||
function isPrivateIPv4(address: string): boolean {
|
||||
function parseIPv4Octets(address: string): [number, number, number, number] | null {
|
||||
const parts = address.split(".");
|
||||
if (parts.length != 4) {
|
||||
return false;
|
||||
if (parts.length !== 4) {
|
||||
return null;
|
||||
}
|
||||
const octets = parts.map((part) => Number.parseInt(part, 10));
|
||||
if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) {
|
||||
return null;
|
||||
}
|
||||
return octets as [number, number, number, number];
|
||||
}
|
||||
|
||||
function isPrivateIPv4(address: string): boolean {
|
||||
const octets = parseIPv4Octets(address);
|
||||
if (!octets) {
|
||||
return false;
|
||||
}
|
||||
const [a, b] = octets;
|
||||
|
|
@ -131,12 +139,8 @@ function isPrivateIPv4(address: string): boolean {
|
|||
}
|
||||
|
||||
function isTailnetIPv4(address: string): boolean {
|
||||
const parts = address.split(".");
|
||||
if (parts.length !== 4) {
|
||||
return false;
|
||||
}
|
||||
const octets = parts.map((part) => Number.parseInt(part, 10));
|
||||
if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) {
|
||||
const octets = parseIPv4Octets(address);
|
||||
if (!octets) {
|
||||
return false;
|
||||
}
|
||||
const [a, b] = octets;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
import type { IncomingMessage } from "node:http";
|
||||
import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { createMockServerResponse } from "../../src/test-utils/mock-http-response.js";
|
||||
import { createTestPluginApi } from "../test-utils/plugin-api.js";
|
||||
import plugin from "./index.js";
|
||||
|
||||
describe("diffs plugin registration", () => {
|
||||
|
|
@ -9,33 +11,19 @@ describe("diffs plugin registration", () => {
|
|||
const registerHttpRoute = vi.fn();
|
||||
const on = vi.fn();
|
||||
|
||||
plugin.register?.({
|
||||
plugin.register?.(
|
||||
createTestPluginApi({
|
||||
id: "diffs",
|
||||
name: "Diffs",
|
||||
description: "Diffs",
|
||||
source: "test",
|
||||
config: {},
|
||||
runtime: {} as never,
|
||||
logger: {
|
||||
info() {},
|
||||
warn() {},
|
||||
error() {},
|
||||
},
|
||||
registerTool,
|
||||
registerHook() {},
|
||||
registerHttpRoute,
|
||||
registerChannel() {},
|
||||
registerGatewayMethod() {},
|
||||
registerCli() {},
|
||||
registerService() {},
|
||||
registerProvider() {},
|
||||
registerCommand() {},
|
||||
registerContextEngine() {},
|
||||
resolvePath(input: string) {
|
||||
return input;
|
||||
},
|
||||
on,
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
||||
expect(registerTool).toHaveBeenCalledTimes(1);
|
||||
expect(registerHttpRoute).toHaveBeenCalledTimes(1);
|
||||
|
|
@ -55,17 +43,15 @@ describe("diffs plugin registration", () => {
|
|||
});
|
||||
|
||||
it("applies plugin-config defaults through registered tool and viewer handler", async () => {
|
||||
let registeredTool:
|
||||
| { execute?: (toolCallId: string, params: Record<string, unknown>) => Promise<unknown> }
|
||||
| undefined;
|
||||
let registeredHttpRouteHandler:
|
||||
| ((
|
||||
req: IncomingMessage,
|
||||
res: ReturnType<typeof createMockServerResponse>,
|
||||
) => Promise<boolean>)
|
||||
| undefined;
|
||||
type RegisteredTool = {
|
||||
execute?: (toolCallId: string, params: Record<string, unknown>) => Promise<unknown>;
|
||||
};
|
||||
type RegisteredHttpRouteParams = Parameters<OpenClawPluginApi["registerHttpRoute"]>[0];
|
||||
|
||||
plugin.register?.({
|
||||
let registeredTool: RegisteredTool | undefined;
|
||||
let registeredHttpRouteHandler: RegisteredHttpRouteParams["handler"] | undefined;
|
||||
|
||||
const api = createTestPluginApi({
|
||||
id: "diffs",
|
||||
name: "Diffs",
|
||||
description: "Diffs",
|
||||
|
|
@ -88,31 +74,16 @@ describe("diffs plugin registration", () => {
|
|||
},
|
||||
},
|
||||
runtime: {} as never,
|
||||
logger: {
|
||||
info() {},
|
||||
warn() {},
|
||||
error() {},
|
||||
},
|
||||
registerTool(tool) {
|
||||
registerTool(tool: Parameters<OpenClawPluginApi["registerTool"]>[0]) {
|
||||
registeredTool = typeof tool === "function" ? undefined : tool;
|
||||
},
|
||||
registerHook() {},
|
||||
registerHttpRoute(params) {
|
||||
registeredHttpRouteHandler = params.handler as typeof registeredHttpRouteHandler;
|
||||
registerHttpRoute(params: RegisteredHttpRouteParams) {
|
||||
registeredHttpRouteHandler = params.handler;
|
||||
},
|
||||
registerChannel() {},
|
||||
registerGatewayMethod() {},
|
||||
registerCli() {},
|
||||
registerService() {},
|
||||
registerProvider() {},
|
||||
registerCommand() {},
|
||||
registerContextEngine() {},
|
||||
resolvePath(input: string) {
|
||||
return input;
|
||||
},
|
||||
on() {},
|
||||
});
|
||||
|
||||
plugin.register?.(api as unknown as OpenClawPluginApi);
|
||||
|
||||
const result = await registeredTool?.execute?.("tool-1", {
|
||||
before: "one\n",
|
||||
after: "two\n",
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
"build:viewer": "bun build src/viewer-client.ts --target browser --format esm --minify --outfile assets/viewer-runtime.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@pierre/diffs": "1.0.11",
|
||||
"@pierre/diffs": "1.1.0",
|
||||
"@sinclair/typebox": "0.34.48",
|
||||
"playwright-core": "1.58.2"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -9,6 +9,19 @@ describe("createDiffsHttpHandler", () => {
|
|||
let store: DiffArtifactStore;
|
||||
let cleanupRootDir: () => Promise<void>;
|
||||
|
||||
async function handleLocalGet(url: string) {
|
||||
const handler = createDiffsHttpHandler({ store });
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handler(
|
||||
localReq({
|
||||
method: "GET",
|
||||
url,
|
||||
}),
|
||||
res,
|
||||
);
|
||||
return { handled, res };
|
||||
}
|
||||
|
||||
beforeEach(async () => {
|
||||
({ store, cleanup: cleanupRootDir } = await createDiffStoreHarness("openclaw-diffs-http-"));
|
||||
});
|
||||
|
|
@ -19,16 +32,7 @@ describe("createDiffsHttpHandler", () => {
|
|||
|
||||
it("serves a stored diff document", async () => {
|
||||
const artifact = await createViewerArtifact(store);
|
||||
|
||||
const handler = createDiffsHttpHandler({ store });
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handler(
|
||||
localReq({
|
||||
method: "GET",
|
||||
url: artifact.viewerPath,
|
||||
}),
|
||||
res,
|
||||
);
|
||||
const { handled, res } = await handleLocalGet(artifact.viewerPath);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
|
|
@ -38,15 +42,8 @@ describe("createDiffsHttpHandler", () => {
|
|||
|
||||
it("rejects invalid tokens", async () => {
|
||||
const artifact = await createViewerArtifact(store);
|
||||
|
||||
const handler = createDiffsHttpHandler({ store });
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handler(
|
||||
localReq({
|
||||
method: "GET",
|
||||
url: artifact.viewerPath.replace(artifact.token, "bad-token"),
|
||||
}),
|
||||
res,
|
||||
const { handled, res } = await handleLocalGet(
|
||||
artifact.viewerPath.replace(artifact.token, "bad-token"),
|
||||
);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
|
|
|
|||
|
|
@ -1,5 +1,12 @@
|
|||
import type { FileContents, FileDiffMetadata, SupportedLanguages } from "@pierre/diffs";
|
||||
import { parsePatchFiles } from "@pierre/diffs";
|
||||
import fs from "node:fs/promises";
|
||||
import { createRequire } from "node:module";
|
||||
import type {
|
||||
FileContents,
|
||||
FileDiffMetadata,
|
||||
SupportedLanguages,
|
||||
ThemeRegistrationResolved,
|
||||
} from "@pierre/diffs";
|
||||
import { RegisteredCustomThemes, parsePatchFiles } from "@pierre/diffs";
|
||||
import { preloadFileDiff, preloadMultiFileDiff } from "@pierre/diffs/ssr";
|
||||
import type {
|
||||
DiffInput,
|
||||
|
|
@ -13,6 +20,45 @@ import { VIEWER_LOADER_PATH } from "./viewer-assets.js";
|
|||
const DEFAULT_FILE_NAME = "diff.txt";
|
||||
const MAX_PATCH_FILE_COUNT = 128;
|
||||
const MAX_PATCH_TOTAL_LINES = 120_000;
|
||||
const diffsRequire = createRequire(import.meta.resolve("@pierre/diffs"));
|
||||
|
||||
let pierreThemesPatched = false;
|
||||
|
||||
function createThemeLoader(
|
||||
themeName: "pierre-dark" | "pierre-light",
|
||||
themePath: string,
|
||||
): () => Promise<ThemeRegistrationResolved> {
|
||||
let cachedTheme: ThemeRegistrationResolved | undefined;
|
||||
return async () => {
|
||||
if (cachedTheme) {
|
||||
return cachedTheme;
|
||||
}
|
||||
const raw = await fs.readFile(themePath, "utf8");
|
||||
const parsed = JSON.parse(raw) as Record<string, unknown>;
|
||||
cachedTheme = {
|
||||
...parsed,
|
||||
name: themeName,
|
||||
} as ThemeRegistrationResolved;
|
||||
return cachedTheme;
|
||||
};
|
||||
}
|
||||
|
||||
function patchPierreThemeLoadersForNode24(): void {
|
||||
if (pierreThemesPatched) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const darkThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-dark.json");
|
||||
const lightThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-light.json");
|
||||
RegisteredCustomThemes.set("pierre-dark", createThemeLoader("pierre-dark", darkThemePath));
|
||||
RegisteredCustomThemes.set("pierre-light", createThemeLoader("pierre-light", lightThemePath));
|
||||
pierreThemesPatched = true;
|
||||
} catch {
|
||||
// Keep upstream loaders if theme files cannot be resolved.
|
||||
}
|
||||
}
|
||||
|
||||
patchPierreThemeLoadersForNode24();
|
||||
|
||||
function escapeCssString(value: string): string {
|
||||
return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"');
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import fs from "node:fs/promises";
|
|||
import path from "node:path";
|
||||
import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { createTestPluginApi } from "../../test-utils/plugin-api.js";
|
||||
import type { DiffScreenshotter } from "./browser.js";
|
||||
import { DEFAULT_DIFFS_TOOL_DEFAULTS } from "./config.js";
|
||||
import { DiffArtifactStore } from "./store.js";
|
||||
|
|
@ -135,9 +136,7 @@ describe("diffs tool", () => {
|
|||
mode: "file",
|
||||
});
|
||||
|
||||
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
|
||||
expect((result?.details as Record<string, unknown>).mode).toBe("file");
|
||||
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
|
||||
expectArtifactOnlyFileResult(screenshotter, result);
|
||||
});
|
||||
|
||||
it("honors ttlSeconds for artifact-only file output", async () => {
|
||||
|
|
@ -227,9 +226,7 @@ describe("diffs tool", () => {
|
|||
after: "two\n",
|
||||
});
|
||||
|
||||
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
|
||||
expect((result?.details as Record<string, unknown>).mode).toBe("file");
|
||||
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
|
||||
expectArtifactOnlyFileResult(screenshotter, result);
|
||||
});
|
||||
|
||||
it("falls back to view output when both mode cannot render an image", async () => {
|
||||
|
|
@ -387,7 +384,7 @@ describe("diffs tool", () => {
|
|||
});
|
||||
|
||||
function createApi(): OpenClawPluginApi {
|
||||
return {
|
||||
return createTestPluginApi({
|
||||
id: "diffs",
|
||||
name: "Diffs",
|
||||
description: "Diffs",
|
||||
|
|
@ -399,26 +396,7 @@ function createApi(): OpenClawPluginApi {
|
|||
},
|
||||
},
|
||||
runtime: {} as OpenClawPluginApi["runtime"],
|
||||
logger: {
|
||||
info() {},
|
||||
warn() {},
|
||||
error() {},
|
||||
},
|
||||
registerTool() {},
|
||||
registerHook() {},
|
||||
registerHttpRoute() {},
|
||||
registerChannel() {},
|
||||
registerGatewayMethod() {},
|
||||
registerCli() {},
|
||||
registerService() {},
|
||||
registerProvider() {},
|
||||
registerCommand() {},
|
||||
registerContextEngine() {},
|
||||
resolvePath(input: string) {
|
||||
return input;
|
||||
},
|
||||
on() {},
|
||||
};
|
||||
}) as OpenClawPluginApi;
|
||||
}
|
||||
|
||||
function createToolWithScreenshotter(
|
||||
|
|
@ -434,6 +412,15 @@ function createToolWithScreenshotter(
|
|||
});
|
||||
}
|
||||
|
||||
function expectArtifactOnlyFileResult(
|
||||
screenshotter: DiffScreenshotter,
|
||||
result: { details?: unknown } | null | undefined,
|
||||
) {
|
||||
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
|
||||
expect((result?.details as Record<string, unknown>).mode).toBe("file");
|
||||
expect((result?.details as Record<string, unknown>).viewerUrl).toBeUndefined();
|
||||
}
|
||||
|
||||
function createPngScreenshotter(
|
||||
params: {
|
||||
assertHtml?: (html: string) => void;
|
||||
|
|
|
|||
|
|
@ -75,6 +75,27 @@ function getRequiredHandler(
|
|||
return handler;
|
||||
}
|
||||
|
||||
function resolveSubagentDeliveryTargetForTest(requesterOrigin: {
|
||||
channel: string;
|
||||
accountId: string;
|
||||
to: string;
|
||||
threadId?: string;
|
||||
}) {
|
||||
const handlers = registerHandlersForTest();
|
||||
const handler = getRequiredHandler(handlers, "subagent_delivery_target");
|
||||
return handler(
|
||||
{
|
||||
childSessionKey: "agent:main:subagent:child",
|
||||
requesterSessionKey: "agent:main:main",
|
||||
requesterOrigin,
|
||||
childRunId: "run-1",
|
||||
spawnMode: "session",
|
||||
expectsCompletionMessage: true,
|
||||
},
|
||||
{},
|
||||
);
|
||||
}
|
||||
|
||||
function createSpawnEvent(overrides?: {
|
||||
childSessionKey?: string;
|
||||
agentId?: string;
|
||||
|
|
@ -324,25 +345,12 @@ describe("discord subagent hook handlers", () => {
|
|||
hookMocks.listThreadBindingsBySessionKey.mockReturnValueOnce([
|
||||
{ accountId: "work", threadId: "777" },
|
||||
]);
|
||||
const handlers = registerHandlersForTest();
|
||||
const handler = getRequiredHandler(handlers, "subagent_delivery_target");
|
||||
|
||||
const result = handler(
|
||||
{
|
||||
childSessionKey: "agent:main:subagent:child",
|
||||
requesterSessionKey: "agent:main:main",
|
||||
requesterOrigin: {
|
||||
const result = resolveSubagentDeliveryTargetForTest({
|
||||
channel: "discord",
|
||||
accountId: "work",
|
||||
to: "channel:123",
|
||||
threadId: "777",
|
||||
},
|
||||
childRunId: "run-1",
|
||||
spawnMode: "session",
|
||||
expectsCompletionMessage: true,
|
||||
},
|
||||
{},
|
||||
);
|
||||
});
|
||||
|
||||
expect(hookMocks.listThreadBindingsBySessionKey).toHaveBeenCalledWith({
|
||||
targetSessionKey: "agent:main:subagent:child",
|
||||
|
|
@ -364,24 +372,11 @@ describe("discord subagent hook handlers", () => {
|
|||
{ accountId: "work", threadId: "777" },
|
||||
{ accountId: "work", threadId: "888" },
|
||||
]);
|
||||
const handlers = registerHandlersForTest();
|
||||
const handler = getRequiredHandler(handlers, "subagent_delivery_target");
|
||||
|
||||
const result = handler(
|
||||
{
|
||||
childSessionKey: "agent:main:subagent:child",
|
||||
requesterSessionKey: "agent:main:main",
|
||||
requesterOrigin: {
|
||||
const result = resolveSubagentDeliveryTargetForTest({
|
||||
channel: "discord",
|
||||
accountId: "work",
|
||||
to: "channel:123",
|
||||
},
|
||||
childRunId: "run-1",
|
||||
spawnMode: "session",
|
||||
expectsCompletionMessage: true,
|
||||
},
|
||||
{},
|
||||
);
|
||||
});
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
|
|
|||
|
|
@ -9,6 +9,23 @@ import type { FeishuConfig } from "./types.js";
|
|||
|
||||
const asConfig = (value: Partial<FeishuConfig>) => value as FeishuConfig;
|
||||
|
||||
function makeDefaultAndRouterAccounts() {
|
||||
return {
|
||||
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
|
||||
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
|
||||
};
|
||||
}
|
||||
|
||||
function expectExplicitDefaultAccountSelection(
|
||||
account: ReturnType<typeof resolveFeishuAccount>,
|
||||
appId: string,
|
||||
) {
|
||||
expect(account.accountId).toBe("router-d");
|
||||
expect(account.selectionSource).toBe("explicit-default");
|
||||
expect(account.configured).toBe(true);
|
||||
expect(account.appId).toBe(appId);
|
||||
}
|
||||
|
||||
function withEnvVar(key: string, value: string | undefined, run: () => void) {
|
||||
const prev = process.env[key];
|
||||
if (value === undefined) {
|
||||
|
|
@ -44,10 +61,7 @@ describe("resolveDefaultFeishuAccountId", () => {
|
|||
channels: {
|
||||
feishu: {
|
||||
defaultAccount: "router-d",
|
||||
accounts: {
|
||||
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
|
||||
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
|
||||
},
|
||||
accounts: makeDefaultAndRouterAccounts(),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
@ -278,10 +292,7 @@ describe("resolveFeishuAccount", () => {
|
|||
};
|
||||
|
||||
const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined });
|
||||
expect(account.accountId).toBe("router-d");
|
||||
expect(account.selectionSource).toBe("explicit-default");
|
||||
expect(account.configured).toBe(true);
|
||||
expect(account.appId).toBe("top_level_app");
|
||||
expectExplicitDefaultAccountSelection(account, "top_level_app");
|
||||
});
|
||||
|
||||
it("uses configured default account when accountId is omitted", () => {
|
||||
|
|
@ -298,10 +309,7 @@ describe("resolveFeishuAccount", () => {
|
|||
};
|
||||
|
||||
const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined });
|
||||
expect(account.accountId).toBe("router-d");
|
||||
expect(account.selectionSource).toBe("explicit-default");
|
||||
expect(account.configured).toBe(true);
|
||||
expect(account.appId).toBe("cli_router");
|
||||
expectExplicitDefaultAccountSelection(account, "cli_router");
|
||||
});
|
||||
|
||||
it("keeps explicit accountId selection", () => {
|
||||
|
|
@ -309,10 +317,7 @@ describe("resolveFeishuAccount", () => {
|
|||
channels: {
|
||||
feishu: {
|
||||
defaultAccount: "router-d",
|
||||
accounts: {
|
||||
default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret
|
||||
"router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret
|
||||
},
|
||||
accounts: makeDefaultAndRouterAccounts(),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import {
|
|||
} from "openclaw/plugin-sdk/feishu";
|
||||
import { resolveFeishuAccount } from "./accounts.js";
|
||||
import { createFeishuClient } from "./client.js";
|
||||
import { tryRecordMessage, tryRecordMessagePersistent } from "./dedup.js";
|
||||
import { finalizeFeishuMessageProcessing, tryRecordMessagePersistent } from "./dedup.js";
|
||||
import { maybeCreateDynamicAgent } from "./dynamic-agent.js";
|
||||
import { normalizeFeishuExternalKey } from "./external-keys.js";
|
||||
import { downloadMessageResourceFeishu } from "./media.js";
|
||||
|
|
@ -867,8 +867,18 @@ export async function handleFeishuMessage(params: {
|
|||
runtime?: RuntimeEnv;
|
||||
chatHistories?: Map<string, HistoryEntry[]>;
|
||||
accountId?: string;
|
||||
processingClaimHeld?: boolean;
|
||||
}): Promise<void> {
|
||||
const { cfg, event, botOpenId, botName, runtime, chatHistories, accountId } = params;
|
||||
const {
|
||||
cfg,
|
||||
event,
|
||||
botOpenId,
|
||||
botName,
|
||||
runtime,
|
||||
chatHistories,
|
||||
accountId,
|
||||
processingClaimHeld = false,
|
||||
} = params;
|
||||
|
||||
// Resolve account with merged config
|
||||
const account = resolveFeishuAccount({ cfg, accountId });
|
||||
|
|
@ -877,16 +887,15 @@ export async function handleFeishuMessage(params: {
|
|||
const log = runtime?.log ?? console.log;
|
||||
const error = runtime?.error ?? console.error;
|
||||
|
||||
// Dedup: synchronous memory guard prevents concurrent duplicate dispatch
|
||||
// before the async persistent check completes.
|
||||
const messageId = event.message.message_id;
|
||||
const memoryDedupeKey = `${account.accountId}:${messageId}`;
|
||||
if (!tryRecordMessage(memoryDedupeKey)) {
|
||||
log(`feishu: skipping duplicate message ${messageId} (memory dedup)`);
|
||||
return;
|
||||
}
|
||||
// Persistent dedup survives restarts and reconnects.
|
||||
if (!(await tryRecordMessagePersistent(messageId, account.accountId, log))) {
|
||||
if (
|
||||
!(await finalizeFeishuMessageProcessing({
|
||||
messageId,
|
||||
namespace: account.accountId,
|
||||
log,
|
||||
claimHeld: processingClaimHeld,
|
||||
}))
|
||||
) {
|
||||
log(`feishu: skipping duplicate message ${messageId}`);
|
||||
return;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,16 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import { FeishuConfigSchema, FeishuGroupSchema } from "./config-schema.js";
|
||||
|
||||
function expectSchemaIssue(
|
||||
result: ReturnType<typeof FeishuConfigSchema.safeParse>,
|
||||
issuePath: string,
|
||||
) {
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(result.error.issues.some((issue) => issue.path.join(".") === issuePath)).toBe(true);
|
||||
}
|
||||
}
|
||||
|
||||
describe("FeishuConfigSchema webhook validation", () => {
|
||||
it("applies top-level defaults", () => {
|
||||
const result = FeishuConfigSchema.parse({});
|
||||
|
|
@ -39,12 +49,7 @@ describe("FeishuConfigSchema webhook validation", () => {
|
|||
appSecret: "secret_top", // pragma: allowlist secret
|
||||
});
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(
|
||||
result.error.issues.some((issue) => issue.path.join(".") === "verificationToken"),
|
||||
).toBe(true);
|
||||
}
|
||||
expectSchemaIssue(result, "verificationToken");
|
||||
});
|
||||
|
||||
it("rejects top-level webhook mode without encryptKey", () => {
|
||||
|
|
@ -55,10 +60,7 @@ describe("FeishuConfigSchema webhook validation", () => {
|
|||
appSecret: "secret_top", // pragma: allowlist secret
|
||||
});
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(result.error.issues.some((issue) => issue.path.join(".") === "encryptKey")).toBe(true);
|
||||
}
|
||||
expectSchemaIssue(result, "encryptKey");
|
||||
});
|
||||
|
||||
it("accepts top-level webhook mode with verificationToken and encryptKey", () => {
|
||||
|
|
@ -84,14 +86,7 @@ describe("FeishuConfigSchema webhook validation", () => {
|
|||
},
|
||||
});
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(
|
||||
result.error.issues.some(
|
||||
(issue) => issue.path.join(".") === "accounts.main.verificationToken",
|
||||
),
|
||||
).toBe(true);
|
||||
}
|
||||
expectSchemaIssue(result, "accounts.main.verificationToken");
|
||||
});
|
||||
|
||||
it("rejects account webhook mode without encryptKey", () => {
|
||||
|
|
@ -106,12 +101,7 @@ describe("FeishuConfigSchema webhook validation", () => {
|
|||
},
|
||||
});
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(
|
||||
result.error.issues.some((issue) => issue.path.join(".") === "accounts.main.encryptKey"),
|
||||
).toBe(true);
|
||||
}
|
||||
expectSchemaIssue(result, "accounts.main.encryptKey");
|
||||
});
|
||||
|
||||
it("accepts account webhook mode inheriting top-level verificationToken and encryptKey", () => {
|
||||
|
|
|
|||
|
|
@ -10,9 +10,15 @@ import {
|
|||
const DEDUP_TTL_MS = 24 * 60 * 60 * 1000;
|
||||
const MEMORY_MAX_SIZE = 1_000;
|
||||
const FILE_MAX_ENTRIES = 10_000;
|
||||
const EVENT_DEDUP_TTL_MS = 5 * 60 * 1000;
|
||||
const EVENT_MEMORY_MAX_SIZE = 2_000;
|
||||
type PersistentDedupeData = Record<string, number>;
|
||||
|
||||
const memoryDedupe = createDedupeCache({ ttlMs: DEDUP_TTL_MS, maxSize: MEMORY_MAX_SIZE });
|
||||
const processingClaims = createDedupeCache({
|
||||
ttlMs: EVENT_DEDUP_TTL_MS,
|
||||
maxSize: EVENT_MEMORY_MAX_SIZE,
|
||||
});
|
||||
|
||||
function resolveStateDirFromEnv(env: NodeJS.ProcessEnv = process.env): string {
|
||||
const stateOverride = env.OPENCLAW_STATE_DIR?.trim() || env.CLAWDBOT_STATE_DIR?.trim();
|
||||
|
|
@ -37,6 +43,103 @@ const persistentDedupe = createPersistentDedupe({
|
|||
resolveFilePath: resolveNamespaceFilePath,
|
||||
});
|
||||
|
||||
function resolveEventDedupeKey(
|
||||
namespace: string,
|
||||
messageId: string | undefined | null,
|
||||
): string | null {
|
||||
const trimmed = messageId?.trim();
|
||||
if (!trimmed) {
|
||||
return null;
|
||||
}
|
||||
return `${namespace}:${trimmed}`;
|
||||
}
|
||||
|
||||
function normalizeMessageId(messageId: string | undefined | null): string | null {
|
||||
const trimmed = messageId?.trim();
|
||||
return trimmed ? trimmed : null;
|
||||
}
|
||||
|
||||
function resolveMemoryDedupeKey(
|
||||
namespace: string,
|
||||
messageId: string | undefined | null,
|
||||
): string | null {
|
||||
const trimmed = normalizeMessageId(messageId);
|
||||
if (!trimmed) {
|
||||
return null;
|
||||
}
|
||||
return `${namespace}:${trimmed}`;
|
||||
}
|
||||
|
||||
export function tryBeginFeishuMessageProcessing(
|
||||
messageId: string | undefined | null,
|
||||
namespace = "global",
|
||||
): boolean {
|
||||
return !processingClaims.check(resolveEventDedupeKey(namespace, messageId));
|
||||
}
|
||||
|
||||
export function releaseFeishuMessageProcessing(
|
||||
messageId: string | undefined | null,
|
||||
namespace = "global",
|
||||
): void {
|
||||
processingClaims.delete(resolveEventDedupeKey(namespace, messageId));
|
||||
}
|
||||
|
||||
export async function finalizeFeishuMessageProcessing(params: {
|
||||
messageId: string | undefined | null;
|
||||
namespace?: string;
|
||||
log?: (...args: unknown[]) => void;
|
||||
claimHeld?: boolean;
|
||||
}): Promise<boolean> {
|
||||
const { messageId, namespace = "global", log, claimHeld = false } = params;
|
||||
const normalizedMessageId = normalizeMessageId(messageId);
|
||||
const memoryKey = resolveMemoryDedupeKey(namespace, messageId);
|
||||
if (!memoryKey || !normalizedMessageId) {
|
||||
return false;
|
||||
}
|
||||
if (!claimHeld && !tryBeginFeishuMessageProcessing(normalizedMessageId, namespace)) {
|
||||
return false;
|
||||
}
|
||||
if (!tryRecordMessage(memoryKey)) {
|
||||
releaseFeishuMessageProcessing(normalizedMessageId, namespace);
|
||||
return false;
|
||||
}
|
||||
if (!(await tryRecordMessagePersistent(normalizedMessageId, namespace, log))) {
|
||||
releaseFeishuMessageProcessing(normalizedMessageId, namespace);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function recordProcessedFeishuMessage(
|
||||
messageId: string | undefined | null,
|
||||
namespace = "global",
|
||||
log?: (...args: unknown[]) => void,
|
||||
): Promise<boolean> {
|
||||
const normalizedMessageId = normalizeMessageId(messageId);
|
||||
const memoryKey = resolveMemoryDedupeKey(namespace, messageId);
|
||||
if (!memoryKey || !normalizedMessageId) {
|
||||
return false;
|
||||
}
|
||||
tryRecordMessage(memoryKey);
|
||||
return await tryRecordMessagePersistent(normalizedMessageId, namespace, log);
|
||||
}
|
||||
|
||||
export async function hasProcessedFeishuMessage(
|
||||
messageId: string | undefined | null,
|
||||
namespace = "global",
|
||||
log?: (...args: unknown[]) => void,
|
||||
): Promise<boolean> {
|
||||
const normalizedMessageId = normalizeMessageId(messageId);
|
||||
const memoryKey = resolveMemoryDedupeKey(namespace, messageId);
|
||||
if (!memoryKey || !normalizedMessageId) {
|
||||
return false;
|
||||
}
|
||||
if (hasRecordedMessage(memoryKey)) {
|
||||
return true;
|
||||
}
|
||||
return hasRecordedMessagePersistent(normalizedMessageId, namespace, log);
|
||||
}
|
||||
|
||||
/**
|
||||
* Synchronous dedup — memory only.
|
||||
* Kept for backward compatibility; prefer {@link tryRecordMessagePersistent}.
|
||||
|
|
|
|||
|
|
@ -64,10 +64,7 @@ function expectMediaTimeoutClientConfigured(): void {
|
|||
);
|
||||
}
|
||||
|
||||
describe("sendMediaFeishu msg_type routing", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
function mockResolvedFeishuAccount() {
|
||||
resolveFeishuAccountMock.mockReturnValue({
|
||||
configured: true,
|
||||
accountId: "main",
|
||||
|
|
@ -76,6 +73,12 @@ describe("sendMediaFeishu msg_type routing", () => {
|
|||
appSecret: "app_secret",
|
||||
domain: "feishu",
|
||||
});
|
||||
}
|
||||
|
||||
describe("sendMediaFeishu msg_type routing", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockResolvedFeishuAccount();
|
||||
|
||||
normalizeFeishuTargetMock.mockReturnValue("ou_target");
|
||||
resolveReceiveIdTypeMock.mockReturnValue("open_id");
|
||||
|
|
@ -381,7 +384,7 @@ describe("sendMediaFeishu msg_type routing", () => {
|
|||
expect(messageResourceGetMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("encodes Chinese filenames for file uploads", async () => {
|
||||
it("preserves Chinese filenames for file uploads", async () => {
|
||||
await sendMediaFeishu({
|
||||
cfg: {} as any,
|
||||
to: "user:ou_target",
|
||||
|
|
@ -390,8 +393,7 @@ describe("sendMediaFeishu msg_type routing", () => {
|
|||
});
|
||||
|
||||
const createCall = fileCreateMock.mock.calls[0][0];
|
||||
expect(createCall.data.file_name).not.toBe("测试文档.pdf");
|
||||
expect(createCall.data.file_name).toBe(encodeURIComponent("测试文档") + ".pdf");
|
||||
expect(createCall.data.file_name).toBe("测试文档.pdf");
|
||||
});
|
||||
|
||||
it("preserves ASCII filenames unchanged for file uploads", async () => {
|
||||
|
|
@ -406,7 +408,7 @@ describe("sendMediaFeishu msg_type routing", () => {
|
|||
expect(createCall.data.file_name).toBe("report-2026.pdf");
|
||||
});
|
||||
|
||||
it("encodes special characters (em-dash, full-width brackets) in filenames", async () => {
|
||||
it("preserves special Unicode characters (em-dash, full-width brackets) in filenames", async () => {
|
||||
await sendMediaFeishu({
|
||||
cfg: {} as any,
|
||||
to: "user:ou_target",
|
||||
|
|
@ -415,9 +417,7 @@ describe("sendMediaFeishu msg_type routing", () => {
|
|||
});
|
||||
|
||||
const createCall = fileCreateMock.mock.calls[0][0];
|
||||
expect(createCall.data.file_name).toMatch(/\.md$/);
|
||||
expect(createCall.data.file_name).not.toContain("—");
|
||||
expect(createCall.data.file_name).not.toContain("(");
|
||||
expect(createCall.data.file_name).toBe("报告—详情(2026).md");
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -427,71 +427,48 @@ describe("sanitizeFileNameForUpload", () => {
|
|||
expect(sanitizeFileNameForUpload("my-file_v2.txt")).toBe("my-file_v2.txt");
|
||||
});
|
||||
|
||||
it("encodes Chinese characters in basename, preserves extension", () => {
|
||||
const result = sanitizeFileNameForUpload("测试文件.md");
|
||||
expect(result).toBe(encodeURIComponent("测试文件") + ".md");
|
||||
expect(result).toMatch(/\.md$/);
|
||||
it("preserves Chinese characters", () => {
|
||||
expect(sanitizeFileNameForUpload("测试文件.md")).toBe("测试文件.md");
|
||||
expect(sanitizeFileNameForUpload("武汉15座山登山信息汇总.csv")).toBe(
|
||||
"武汉15座山登山信息汇总.csv",
|
||||
);
|
||||
});
|
||||
|
||||
it("encodes em-dash and full-width brackets", () => {
|
||||
const result = sanitizeFileNameForUpload("文件—说明(v2).pdf");
|
||||
expect(result).toMatch(/\.pdf$/);
|
||||
expect(result).not.toContain("—");
|
||||
expect(result).not.toContain("(");
|
||||
expect(result).not.toContain(")");
|
||||
it("preserves em-dash and full-width brackets", () => {
|
||||
expect(sanitizeFileNameForUpload("文件—说明(v2).pdf")).toBe("文件—说明(v2).pdf");
|
||||
});
|
||||
|
||||
it("encodes single quotes and parentheses per RFC 5987", () => {
|
||||
const result = sanitizeFileNameForUpload("文件'(test).txt");
|
||||
expect(result).toContain("%27");
|
||||
expect(result).toContain("%28");
|
||||
expect(result).toContain("%29");
|
||||
expect(result).toMatch(/\.txt$/);
|
||||
it("preserves single quotes and parentheses", () => {
|
||||
expect(sanitizeFileNameForUpload("文件'(test).txt")).toBe("文件'(test).txt");
|
||||
});
|
||||
|
||||
it("handles filenames without extension", () => {
|
||||
const result = sanitizeFileNameForUpload("测试文件");
|
||||
expect(result).toBe(encodeURIComponent("测试文件"));
|
||||
it("preserves filenames without extension", () => {
|
||||
expect(sanitizeFileNameForUpload("测试文件")).toBe("测试文件");
|
||||
});
|
||||
|
||||
it("handles mixed ASCII and non-ASCII", () => {
|
||||
const result = sanitizeFileNameForUpload("Report_报告_2026.xlsx");
|
||||
expect(result).toMatch(/\.xlsx$/);
|
||||
expect(result).not.toContain("报告");
|
||||
it("preserves mixed ASCII and non-ASCII", () => {
|
||||
expect(sanitizeFileNameForUpload("Report_报告_2026.xlsx")).toBe("Report_报告_2026.xlsx");
|
||||
});
|
||||
|
||||
it("encodes non-ASCII extensions", () => {
|
||||
const result = sanitizeFileNameForUpload("报告.文档");
|
||||
expect(result).toContain("%E6%96%87%E6%A1%A3");
|
||||
expect(result).not.toContain("文档");
|
||||
it("preserves emoji filenames", () => {
|
||||
expect(sanitizeFileNameForUpload("report_😀.txt")).toBe("report_😀.txt");
|
||||
});
|
||||
|
||||
it("encodes emoji filenames", () => {
|
||||
const result = sanitizeFileNameForUpload("report_😀.txt");
|
||||
expect(result).toContain("%F0%9F%98%80");
|
||||
expect(result).toMatch(/\.txt$/);
|
||||
it("strips control characters", () => {
|
||||
expect(sanitizeFileNameForUpload("bad\x00file.txt")).toBe("bad_file.txt");
|
||||
expect(sanitizeFileNameForUpload("inject\r\nheader.txt")).toBe("inject__header.txt");
|
||||
});
|
||||
|
||||
it("encodes mixed ASCII and non-ASCII extensions", () => {
|
||||
const result = sanitizeFileNameForUpload("notes_总结.v测试");
|
||||
expect(result).toContain("notes_");
|
||||
expect(result).toContain("%E6%B5%8B%E8%AF%95");
|
||||
expect(result).not.toContain("测试");
|
||||
it("strips quotes and backslashes to prevent header injection", () => {
|
||||
expect(sanitizeFileNameForUpload('file"name.txt')).toBe("file_name.txt");
|
||||
expect(sanitizeFileNameForUpload("file\\name.txt")).toBe("file_name.txt");
|
||||
});
|
||||
});
|
||||
|
||||
describe("downloadMessageResourceFeishu", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
resolveFeishuAccountMock.mockReturnValue({
|
||||
configured: true,
|
||||
accountId: "main",
|
||||
config: {},
|
||||
appId: "app_id",
|
||||
appSecret: "app_secret",
|
||||
domain: "feishu",
|
||||
});
|
||||
mockResolvedFeishuAccount();
|
||||
|
||||
createFeishuClientMock.mockReturnValue({
|
||||
im: {
|
||||
|
|
|
|||
|
|
@ -226,21 +226,17 @@ export async function uploadImageFeishu(params: {
|
|||
}
|
||||
|
||||
/**
|
||||
* Encode a filename for safe use in Feishu multipart/form-data uploads.
|
||||
* Non-ASCII characters (Chinese, em-dash, full-width brackets, etc.) cause
|
||||
* the upload to silently fail when passed raw through the SDK's form-data
|
||||
* serialization. RFC 5987 percent-encoding keeps headers 7-bit clean while
|
||||
* Feishu's server decodes and preserves the original display name.
|
||||
* Sanitize a filename for safe use in Feishu multipart/form-data uploads.
|
||||
* Strips control characters and multipart-injection vectors (CWE-93) while
|
||||
* preserving the original UTF-8 display name (Chinese, emoji, etc.).
|
||||
*
|
||||
* Previous versions percent-encoded non-ASCII characters, but the Feishu
|
||||
* `im.file.create` API uses `file_name` as a literal display name — it does
|
||||
* NOT decode percent-encoding — so encoded filenames appeared as garbled text
|
||||
* in chat (regression in v2026.3.2).
|
||||
*/
|
||||
export function sanitizeFileNameForUpload(fileName: string): string {
|
||||
const ASCII_ONLY = /^[\x20-\x7E]+$/;
|
||||
if (ASCII_ONLY.test(fileName)) {
|
||||
return fileName;
|
||||
}
|
||||
return encodeURIComponent(fileName)
|
||||
.replace(/'/g, "%27")
|
||||
.replace(/\(/g, "%28")
|
||||
.replace(/\)/g, "%29");
|
||||
return fileName.replace(/[\x00-\x1F\x7F\r\n"\\]/g, "_");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -12,10 +12,10 @@ import {
|
|||
import { handleFeishuCardAction, type FeishuCardActionEvent } from "./card-action.js";
|
||||
import { createEventDispatcher } from "./client.js";
|
||||
import {
|
||||
hasRecordedMessage,
|
||||
hasRecordedMessagePersistent,
|
||||
tryRecordMessage,
|
||||
tryRecordMessagePersistent,
|
||||
hasProcessedFeishuMessage,
|
||||
recordProcessedFeishuMessage,
|
||||
releaseFeishuMessageProcessing,
|
||||
tryBeginFeishuMessageProcessing,
|
||||
warmupDedupFromDisk,
|
||||
} from "./dedup.js";
|
||||
import { isMentionForwardRequest } from "./mention.js";
|
||||
|
|
@ -264,6 +264,7 @@ function registerEventHandlers(
|
|||
runtime,
|
||||
chatHistories,
|
||||
accountId,
|
||||
processingClaimHeld: true,
|
||||
});
|
||||
await enqueue(chatId, task);
|
||||
};
|
||||
|
|
@ -291,10 +292,8 @@ function registerEventHandlers(
|
|||
return;
|
||||
}
|
||||
for (const messageId of suppressedIds) {
|
||||
// Keep in-memory dedupe in sync with handleFeishuMessage's keying.
|
||||
tryRecordMessage(`${accountId}:${messageId}`);
|
||||
try {
|
||||
await tryRecordMessagePersistent(messageId, accountId, log);
|
||||
await recordProcessedFeishuMessage(messageId, accountId, log);
|
||||
} catch (err) {
|
||||
error(
|
||||
`feishu[${accountId}]: failed to record merged dedupe id ${messageId}: ${String(err)}`,
|
||||
|
|
@ -303,15 +302,7 @@ function registerEventHandlers(
|
|||
}
|
||||
};
|
||||
const isMessageAlreadyProcessed = async (entry: FeishuMessageEvent): Promise<boolean> => {
|
||||
const messageId = entry.message.message_id?.trim();
|
||||
if (!messageId) {
|
||||
return false;
|
||||
}
|
||||
const memoryKey = `${accountId}:${messageId}`;
|
||||
if (hasRecordedMessage(memoryKey)) {
|
||||
return true;
|
||||
}
|
||||
return hasRecordedMessagePersistent(messageId, accountId, log);
|
||||
return await hasProcessedFeishuMessage(entry.message.message_id, accountId, log);
|
||||
};
|
||||
const inboundDebouncer = core.channel.debounce.createInboundDebouncer<FeishuMessageEvent>({
|
||||
debounceMs: inboundDebounceMs,
|
||||
|
|
@ -384,19 +375,28 @@ function registerEventHandlers(
|
|||
},
|
||||
});
|
||||
},
|
||||
onError: (err) => {
|
||||
onError: (err, entries) => {
|
||||
for (const entry of entries) {
|
||||
releaseFeishuMessageProcessing(entry.message.message_id, accountId);
|
||||
}
|
||||
error(`feishu[${accountId}]: inbound debounce flush failed: ${String(err)}`);
|
||||
},
|
||||
});
|
||||
|
||||
eventDispatcher.register({
|
||||
"im.message.receive_v1": async (data) => {
|
||||
const processMessage = async () => {
|
||||
const event = data as unknown as FeishuMessageEvent;
|
||||
const messageId = event.message?.message_id?.trim();
|
||||
if (!tryBeginFeishuMessageProcessing(messageId, accountId)) {
|
||||
log(`feishu[${accountId}]: dropping duplicate event for message ${messageId}`);
|
||||
return;
|
||||
}
|
||||
const processMessage = async () => {
|
||||
await inboundDebouncer.enqueue(event);
|
||||
};
|
||||
if (fireAndForget) {
|
||||
void processMessage().catch((err) => {
|
||||
releaseFeishuMessageProcessing(messageId, accountId);
|
||||
error(`feishu[${accountId}]: error handling message: ${String(err)}`);
|
||||
});
|
||||
return;
|
||||
|
|
@ -404,6 +404,7 @@ function registerEventHandlers(
|
|||
try {
|
||||
await processMessage();
|
||||
} catch (err) {
|
||||
releaseFeishuMessageProcessing(messageId, accountId);
|
||||
error(`feishu[${accountId}]: error handling message: ${String(err)}`);
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -78,6 +78,25 @@ async function resolveReactionWithLookup(params: {
|
|||
});
|
||||
}
|
||||
|
||||
async function resolveNonBotReaction(params?: { cfg?: ClawdbotConfig; uuid?: () => string }) {
|
||||
return await resolveReactionSyntheticEvent({
|
||||
cfg: params?.cfg ?? cfg,
|
||||
accountId: "default",
|
||||
event: makeReactionEvent(),
|
||||
botOpenId: "ou_bot",
|
||||
fetchMessage: async () => ({
|
||||
messageId: "om_msg1",
|
||||
chatId: "oc_group",
|
||||
chatType: "group",
|
||||
senderOpenId: "ou_other",
|
||||
senderType: "user",
|
||||
content: "hello",
|
||||
contentType: "text",
|
||||
}),
|
||||
...(params?.uuid ? { uuid: params.uuid } : {}),
|
||||
});
|
||||
}
|
||||
|
||||
type FeishuMention = NonNullable<FeishuMessageEvent["message"]["mentions"]>[number];
|
||||
|
||||
function buildDebounceConfig(): ClawdbotConfig {
|
||||
|
|
@ -179,11 +198,23 @@ function getFirstDispatchedEvent(): FeishuMessageEvent {
|
|||
return firstParams.event;
|
||||
}
|
||||
|
||||
function expectSingleDispatchedEvent(): FeishuMessageEvent {
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
return getFirstDispatchedEvent();
|
||||
}
|
||||
|
||||
function expectParsedFirstDispatchedEvent(botOpenId = "ou_bot") {
|
||||
const dispatched = expectSingleDispatchedEvent();
|
||||
return {
|
||||
dispatched,
|
||||
parsed: parseFeishuMessageEvent(dispatched, botOpenId),
|
||||
};
|
||||
}
|
||||
|
||||
function setDedupPassThroughMocks(): void {
|
||||
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
|
||||
vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false);
|
||||
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false);
|
||||
vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
|
||||
vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false);
|
||||
}
|
||||
|
||||
function createMention(params: { openId: string; name: string; key?: string }): FeishuMention {
|
||||
|
|
@ -203,6 +234,12 @@ async function enqueueDebouncedMessage(
|
|||
await Promise.resolve();
|
||||
}
|
||||
|
||||
function setStaleRetryMocks(messageId = "om_old") {
|
||||
vi.spyOn(dedup, "hasProcessedFeishuMessage").mockImplementation(
|
||||
async (currentMessageId) => currentMessageId === messageId,
|
||||
);
|
||||
}
|
||||
|
||||
describe("resolveReactionSyntheticEvent", () => {
|
||||
it("filters app self-reactions", async () => {
|
||||
const event = makeReactionEvent({ operator_type: "app" });
|
||||
|
|
@ -262,28 +299,12 @@ describe("resolveReactionSyntheticEvent", () => {
|
|||
});
|
||||
|
||||
it("filters reactions on non-bot messages", async () => {
|
||||
const event = makeReactionEvent();
|
||||
const result = await resolveReactionSyntheticEvent({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
event,
|
||||
botOpenId: "ou_bot",
|
||||
fetchMessage: async () => ({
|
||||
messageId: "om_msg1",
|
||||
chatId: "oc_group",
|
||||
chatType: "group",
|
||||
senderOpenId: "ou_other",
|
||||
senderType: "user",
|
||||
content: "hello",
|
||||
contentType: "text",
|
||||
}),
|
||||
});
|
||||
const result = await resolveNonBotReaction();
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it("allows non-bot reactions when reactionNotifications is all", async () => {
|
||||
const event = makeReactionEvent();
|
||||
const result = await resolveReactionSyntheticEvent({
|
||||
const result = await resolveNonBotReaction({
|
||||
cfg: {
|
||||
channels: {
|
||||
feishu: {
|
||||
|
|
@ -291,18 +312,6 @@ describe("resolveReactionSyntheticEvent", () => {
|
|||
},
|
||||
},
|
||||
} as ClawdbotConfig,
|
||||
accountId: "default",
|
||||
event,
|
||||
botOpenId: "ou_bot",
|
||||
fetchMessage: async () => ({
|
||||
messageId: "om_msg1",
|
||||
chatId: "oc_group",
|
||||
chatType: "group",
|
||||
senderOpenId: "ou_other",
|
||||
senderType: "user",
|
||||
content: "hello",
|
||||
contentType: "text",
|
||||
}),
|
||||
uuid: () => "fixed-uuid",
|
||||
});
|
||||
expect(result?.message.message_id).toBe("om_msg1:reaction:THUMBSUP:fixed-uuid");
|
||||
|
|
@ -457,18 +466,16 @@ describe("Feishu inbound debounce regressions", () => {
|
|||
);
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
const dispatched = getFirstDispatchedEvent();
|
||||
const dispatched = expectSingleDispatchedEvent();
|
||||
const mergedMentions = dispatched.message.mentions ?? [];
|
||||
expect(mergedMentions.some((mention) => mention.id.open_id === "ou_bot")).toBe(true);
|
||||
expect(mergedMentions.some((mention) => mention.id.open_id === "ou_user_a")).toBe(false);
|
||||
});
|
||||
|
||||
it("passes prefetched botName through to handleFeishuMessage", async () => {
|
||||
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
|
||||
vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false);
|
||||
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false);
|
||||
vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
|
||||
vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false);
|
||||
const onMessage = await setupDebounceMonitor({ botName: "OpenClaw Bot" });
|
||||
|
||||
await onMessage(
|
||||
|
|
@ -517,9 +524,7 @@ describe("Feishu inbound debounce regressions", () => {
|
|||
);
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
const dispatched = getFirstDispatchedEvent();
|
||||
const parsed = parseFeishuMessageEvent(dispatched, "ou_bot");
|
||||
const { dispatched, parsed } = expectParsedFirstDispatchedEvent();
|
||||
expect(parsed.mentionedBot).toBe(true);
|
||||
expect(parsed.mentionTargets).toBeUndefined();
|
||||
const mergedMentions = dispatched.message.mentions ?? [];
|
||||
|
|
@ -547,19 +552,14 @@ describe("Feishu inbound debounce regressions", () => {
|
|||
);
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
const dispatched = getFirstDispatchedEvent();
|
||||
const parsed = parseFeishuMessageEvent(dispatched, "ou_bot");
|
||||
const { parsed } = expectParsedFirstDispatchedEvent();
|
||||
expect(parsed.mentionedBot).toBe(true);
|
||||
});
|
||||
|
||||
it("excludes previously processed retries from combined debounce text", async () => {
|
||||
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
|
||||
vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old"));
|
||||
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation(
|
||||
async (messageId) => messageId === "om_old",
|
||||
);
|
||||
vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
|
||||
setStaleRetryMocks();
|
||||
const onMessage = await setupDebounceMonitor();
|
||||
|
||||
await onMessage(createTextEvent({ messageId: "om_old", text: "stale" }));
|
||||
|
|
@ -576,20 +576,16 @@ describe("Feishu inbound debounce regressions", () => {
|
|||
await Promise.resolve();
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
const dispatched = getFirstDispatchedEvent();
|
||||
const dispatched = expectSingleDispatchedEvent();
|
||||
expect(dispatched.message.message_id).toBe("om_new_2");
|
||||
const combined = JSON.parse(dispatched.message.content) as { text?: string };
|
||||
expect(combined.text).toBe("first\nsecond");
|
||||
});
|
||||
|
||||
it("uses latest fresh message id when debounce batch ends with stale retry", async () => {
|
||||
const recordSpy = vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
|
||||
vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old"));
|
||||
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation(
|
||||
async (messageId) => messageId === "om_old",
|
||||
);
|
||||
vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true);
|
||||
const recordSpy = vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true);
|
||||
setStaleRetryMocks();
|
||||
const onMessage = await setupDebounceMonitor();
|
||||
|
||||
await onMessage(createTextEvent({ messageId: "om_new", text: "fresh" }));
|
||||
|
|
@ -600,12 +596,58 @@ describe("Feishu inbound debounce regressions", () => {
|
|||
await Promise.resolve();
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
const dispatched = getFirstDispatchedEvent();
|
||||
const dispatched = expectSingleDispatchedEvent();
|
||||
expect(dispatched.message.message_id).toBe("om_new");
|
||||
const combined = JSON.parse(dispatched.message.content) as { text?: string };
|
||||
expect(combined.text).toBe("fresh");
|
||||
expect(recordSpy).toHaveBeenCalledWith("default:om_old");
|
||||
expect(recordSpy).not.toHaveBeenCalledWith("default:om_new");
|
||||
expect(recordSpy).toHaveBeenCalledWith("om_old", "default", expect.any(Function));
|
||||
expect(recordSpy).not.toHaveBeenCalledWith("om_new", "default", expect.any(Function));
|
||||
});
|
||||
|
||||
it("releases early event dedupe when debounced dispatch fails", async () => {
|
||||
setDedupPassThroughMocks();
|
||||
const enqueueMock = vi.fn();
|
||||
setFeishuRuntime(
|
||||
createPluginRuntimeMock({
|
||||
channel: {
|
||||
debounce: {
|
||||
createInboundDebouncer: <T>(params: {
|
||||
onError?: (err: unknown, items: T[]) => void;
|
||||
}) => ({
|
||||
enqueue: async (item: T) => {
|
||||
enqueueMock(item);
|
||||
params.onError?.(new Error("dispatch failed"), [item]);
|
||||
},
|
||||
flushKey: async () => {},
|
||||
}),
|
||||
resolveInboundDebounceMs,
|
||||
},
|
||||
text: {
|
||||
hasControlCommand,
|
||||
},
|
||||
},
|
||||
}),
|
||||
);
|
||||
const onMessage = await setupDebounceMonitor();
|
||||
const event = createTextEvent({ messageId: "om_retryable", text: "hello" });
|
||||
|
||||
await enqueueDebouncedMessage(onMessage, event);
|
||||
expect(enqueueMock).toHaveBeenCalledTimes(1);
|
||||
|
||||
await enqueueDebouncedMessage(onMessage, event);
|
||||
expect(enqueueMock).toHaveBeenCalledTimes(2);
|
||||
expect(handleFeishuMessageMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("drops duplicate inbound events before they re-enter the debounce pipeline", async () => {
|
||||
const onMessage = await setupDebounceMonitor();
|
||||
const event = createTextEvent({ messageId: "om_duplicate", text: "hello" });
|
||||
|
||||
await enqueueDebouncedMessage(onMessage, event);
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
await enqueueDebouncedMessage(onMessage, event);
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -3,33 +3,19 @@ import { afterEach, describe, expect, it, vi } from "vitest";
|
|||
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
|
||||
|
||||
const probeFeishuMock = vi.hoisted(() => vi.fn());
|
||||
const feishuClientMockModule = vi.hoisted(() => ({
|
||||
createFeishuWSClient: vi.fn(() => ({ start: vi.fn() })),
|
||||
createEventDispatcher: vi.fn(() => ({ register: vi.fn() })),
|
||||
}));
|
||||
const feishuRuntimeMockModule = vi.hoisted(() => ({
|
||||
getFeishuRuntime: () => ({
|
||||
channel: {
|
||||
debounce: {
|
||||
resolveInboundDebounceMs: () => 0,
|
||||
createInboundDebouncer: () => ({
|
||||
enqueue: async () => {},
|
||||
flushKey: async () => {},
|
||||
}),
|
||||
},
|
||||
text: {
|
||||
hasControlCommand: () => false,
|
||||
},
|
||||
},
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock("./probe.js", () => ({
|
||||
probeFeishu: probeFeishuMock,
|
||||
}));
|
||||
|
||||
vi.mock("./client.js", () => feishuClientMockModule);
|
||||
vi.mock("./runtime.js", () => feishuRuntimeMockModule);
|
||||
vi.mock("./client.js", async () => {
|
||||
const { createFeishuClientMockModule } = await import("./monitor.test-mocks.js");
|
||||
return createFeishuClientMockModule();
|
||||
});
|
||||
vi.mock("./runtime.js", async () => {
|
||||
const { createFeishuRuntimeMockModule } = await import("./monitor.test-mocks.js");
|
||||
return createFeishuRuntimeMockModule();
|
||||
});
|
||||
|
||||
function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig {
|
||||
return {
|
||||
|
|
@ -52,6 +38,12 @@ function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig
|
|||
} as ClawdbotConfig;
|
||||
}
|
||||
|
||||
async function waitForStartedAccount(started: string[], accountId: string) {
|
||||
for (let i = 0; i < 10 && !started.includes(accountId); i += 1) {
|
||||
await Promise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
stopFeishuMonitor();
|
||||
});
|
||||
|
|
@ -116,10 +108,7 @@ describe("Feishu monitor startup preflight", () => {
|
|||
});
|
||||
|
||||
try {
|
||||
for (let i = 0; i < 10 && !started.includes("beta"); i += 1) {
|
||||
await Promise.resolve();
|
||||
}
|
||||
|
||||
await waitForStartedAccount(started, "beta");
|
||||
expect(started).toEqual(["alpha", "beta"]);
|
||||
expect(started.filter((accountId) => accountId === "alpha")).toHaveLength(1);
|
||||
} finally {
|
||||
|
|
@ -153,10 +142,7 @@ describe("Feishu monitor startup preflight", () => {
|
|||
});
|
||||
|
||||
try {
|
||||
for (let i = 0; i < 10 && !started.includes("beta"); i += 1) {
|
||||
await Promise.resolve();
|
||||
}
|
||||
|
||||
await waitForStartedAccount(started, "beta");
|
||||
expect(started).toEqual(["alpha", "beta"]);
|
||||
expect(runtime.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("bot info probe timed out"),
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
import crypto from "node:crypto";
|
||||
import { createServer } from "node:http";
|
||||
import type { AddressInfo } from "node:net";
|
||||
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js";
|
||||
import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js";
|
||||
|
||||
const probeFeishuMock = vi.hoisted(() => vi.fn());
|
||||
|
||||
|
|
@ -23,61 +21,6 @@ vi.mock("./runtime.js", () => createFeishuRuntimeMockModule());
|
|||
|
||||
import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js";
|
||||
|
||||
async function getFreePort(): Promise<number> {
|
||||
const server = createServer();
|
||||
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
|
||||
const address = server.address() as AddressInfo | null;
|
||||
if (!address) {
|
||||
throw new Error("missing server address");
|
||||
}
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
return address.port;
|
||||
}
|
||||
|
||||
async function waitUntilServerReady(url: string): Promise<void> {
|
||||
for (let i = 0; i < 50; i += 1) {
|
||||
try {
|
||||
const response = await fetch(url, { method: "GET" });
|
||||
if (response.status >= 200 && response.status < 500) {
|
||||
return;
|
||||
}
|
||||
} catch {
|
||||
// retry
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||
}
|
||||
throw new Error(`server did not start: ${url}`);
|
||||
}
|
||||
|
||||
function buildConfig(params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
port: number;
|
||||
verificationToken?: string;
|
||||
encryptKey?: string;
|
||||
}): ClawdbotConfig {
|
||||
return {
|
||||
channels: {
|
||||
feishu: {
|
||||
enabled: true,
|
||||
accounts: {
|
||||
[params.accountId]: {
|
||||
enabled: true,
|
||||
appId: "cli_test",
|
||||
appSecret: "secret_test", // pragma: allowlist secret
|
||||
connectionMode: "webhook",
|
||||
webhookHost: "127.0.0.1",
|
||||
webhookPort: params.port,
|
||||
webhookPath: params.path,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
}
|
||||
|
||||
function signFeishuPayload(params: {
|
||||
encryptKey: string;
|
||||
payload: Record<string, unknown>;
|
||||
|
|
@ -107,41 +50,12 @@ function encryptFeishuPayload(encryptKey: string, payload: Record<string, unknow
|
|||
return Buffer.concat([iv, encrypted]).toString("base64");
|
||||
}
|
||||
|
||||
async function withRunningWebhookMonitor(
|
||||
params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
verificationToken: string;
|
||||
encryptKey: string;
|
||||
},
|
||||
run: (url: string) => Promise<void>,
|
||||
) {
|
||||
const port = await getFreePort();
|
||||
const cfg = buildConfig({
|
||||
accountId: params.accountId,
|
||||
path: params.path,
|
||||
port,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
async function postSignedPayload(url: string, payload: Record<string, unknown>) {
|
||||
return await fetch(url, {
|
||||
method: "POST",
|
||||
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
|
||||
const monitorPromise = monitorFeishuProvider({
|
||||
config: cfg,
|
||||
runtime,
|
||||
abortSignal: abortController.signal,
|
||||
});
|
||||
|
||||
const url = `http://127.0.0.1:${port}${params.path}`;
|
||||
await waitUntilServerReady(url);
|
||||
|
||||
try {
|
||||
await run(url);
|
||||
} finally {
|
||||
abortController.abort();
|
||||
await monitorPromise;
|
||||
}
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
|
|
@ -159,6 +73,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = { type: "url_verification", challenge: "challenge-token" };
|
||||
const response = await fetch(url, {
|
||||
|
|
@ -185,6 +100,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
|
|
@ -208,6 +124,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
|
|
@ -231,13 +148,10 @@ describe("Feishu webhook signed-request e2e", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = { type: "url_verification", challenge: "challenge-token" };
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
const response = await postSignedPayload(url, payload);
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
await expect(response.json()).resolves.toEqual({ challenge: "challenge-token" });
|
||||
|
|
@ -255,17 +169,14 @@ describe("Feishu webhook signed-request e2e", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = {
|
||||
schema: "2.0",
|
||||
header: { event_type: "unknown.event" },
|
||||
event: {},
|
||||
};
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
const response = await postSignedPayload(url, payload);
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
expect(await response.text()).toContain("no unknown.event event handle");
|
||||
|
|
@ -283,6 +194,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const payload = {
|
||||
encrypt: encryptFeishuPayload("encrypt_key", {
|
||||
|
|
@ -290,11 +202,7 @@ describe("Feishu webhook signed-request e2e", () => {
|
|||
challenge: "encrypted-challenge-token",
|
||||
}),
|
||||
};
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }),
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
const response = await postSignedPayload(url, payload);
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
await expect(response.json()).resolves.toEqual({
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
import { createServer } from "node:http";
|
||||
import type { AddressInfo } from "node:net";
|
||||
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
createFeishuClientMockModule,
|
||||
createFeishuRuntimeMockModule,
|
||||
} from "./monitor.test-mocks.js";
|
||||
import {
|
||||
buildWebhookConfig,
|
||||
getFreePort,
|
||||
withRunningWebhookMonitor,
|
||||
} from "./monitor.webhook.test-helpers.js";
|
||||
|
||||
const probeFeishuMock = vi.hoisted(() => vi.fn());
|
||||
|
||||
|
|
@ -33,98 +35,6 @@ import {
|
|||
stopFeishuMonitor,
|
||||
} from "./monitor.js";
|
||||
|
||||
async function getFreePort(): Promise<number> {
|
||||
const server = createServer();
|
||||
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
|
||||
const address = server.address() as AddressInfo | null;
|
||||
if (!address) {
|
||||
throw new Error("missing server address");
|
||||
}
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
return address.port;
|
||||
}
|
||||
|
||||
async function waitUntilServerReady(url: string): Promise<void> {
|
||||
for (let i = 0; i < 50; i += 1) {
|
||||
try {
|
||||
const response = await fetch(url, { method: "GET" });
|
||||
if (response.status >= 200 && response.status < 500) {
|
||||
return;
|
||||
}
|
||||
} catch {
|
||||
// retry
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||
}
|
||||
throw new Error(`server did not start: ${url}`);
|
||||
}
|
||||
|
||||
function buildConfig(params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
port: number;
|
||||
verificationToken?: string;
|
||||
encryptKey?: string;
|
||||
}): ClawdbotConfig {
|
||||
return {
|
||||
channels: {
|
||||
feishu: {
|
||||
enabled: true,
|
||||
accounts: {
|
||||
[params.accountId]: {
|
||||
enabled: true,
|
||||
appId: "cli_test",
|
||||
appSecret: "secret_test", // pragma: allowlist secret
|
||||
connectionMode: "webhook",
|
||||
webhookHost: "127.0.0.1",
|
||||
webhookPort: params.port,
|
||||
webhookPath: params.path,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
}
|
||||
|
||||
async function withRunningWebhookMonitor(
|
||||
params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
verificationToken: string;
|
||||
encryptKey: string;
|
||||
},
|
||||
run: (url: string) => Promise<void>,
|
||||
) {
|
||||
const port = await getFreePort();
|
||||
const cfg = buildConfig({
|
||||
accountId: params.accountId,
|
||||
path: params.path,
|
||||
port,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
|
||||
const monitorPromise = monitorFeishuProvider({
|
||||
config: cfg,
|
||||
runtime,
|
||||
abortSignal: abortController.signal,
|
||||
});
|
||||
|
||||
const url = `http://127.0.0.1:${port}${params.path}`;
|
||||
await waitUntilServerReady(url);
|
||||
|
||||
try {
|
||||
await run(url);
|
||||
} finally {
|
||||
abortController.abort();
|
||||
await monitorPromise;
|
||||
}
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
clearFeishuWebhookRateLimitStateForTest();
|
||||
stopFeishuMonitor();
|
||||
|
|
@ -134,7 +44,7 @@ describe("Feishu webhook security hardening", () => {
|
|||
it("rejects webhook mode without verificationToken", async () => {
|
||||
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
|
||||
|
||||
const cfg = buildConfig({
|
||||
const cfg = buildWebhookConfig({
|
||||
accountId: "missing-token",
|
||||
path: "/hook-missing-token",
|
||||
port: await getFreePort(),
|
||||
|
|
@ -148,7 +58,7 @@ describe("Feishu webhook security hardening", () => {
|
|||
it("rejects webhook mode without encryptKey", async () => {
|
||||
probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" });
|
||||
|
||||
const cfg = buildConfig({
|
||||
const cfg = buildWebhookConfig({
|
||||
accountId: "missing-encrypt-key",
|
||||
path: "/hook-missing-encrypt",
|
||||
port: await getFreePort(),
|
||||
|
|
@ -167,6 +77,7 @@ describe("Feishu webhook security hardening", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
|
|
@ -189,6 +100,7 @@ describe("Feishu webhook security hardening", () => {
|
|||
verificationToken: "verify_token",
|
||||
encryptKey: "encrypt_key",
|
||||
},
|
||||
monitorFeishuProvider,
|
||||
async (url) => {
|
||||
let saw429 = false;
|
||||
for (let i = 0; i < 130; i += 1) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,98 @@
|
|||
import { createServer } from "node:http";
|
||||
import type { AddressInfo } from "node:net";
|
||||
import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu";
|
||||
import { vi } from "vitest";
|
||||
import type { monitorFeishuProvider } from "./monitor.js";
|
||||
|
||||
export async function getFreePort(): Promise<number> {
|
||||
const server = createServer();
|
||||
await new Promise<void>((resolve) => server.listen(0, "127.0.0.1", () => resolve()));
|
||||
const address = server.address() as AddressInfo | null;
|
||||
if (!address) {
|
||||
throw new Error("missing server address");
|
||||
}
|
||||
await new Promise<void>((resolve) => server.close(() => resolve()));
|
||||
return address.port;
|
||||
}
|
||||
|
||||
async function waitUntilServerReady(url: string): Promise<void> {
|
||||
for (let i = 0; i < 50; i += 1) {
|
||||
try {
|
||||
const response = await fetch(url, { method: "GET" });
|
||||
if (response.status >= 200 && response.status < 500) {
|
||||
return;
|
||||
}
|
||||
} catch {
|
||||
// retry
|
||||
}
|
||||
await new Promise((resolve) => setTimeout(resolve, 20));
|
||||
}
|
||||
throw new Error(`server did not start: ${url}`);
|
||||
}
|
||||
|
||||
export function buildWebhookConfig(params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
port: number;
|
||||
verificationToken?: string;
|
||||
encryptKey?: string;
|
||||
}): ClawdbotConfig {
|
||||
return {
|
||||
channels: {
|
||||
feishu: {
|
||||
enabled: true,
|
||||
accounts: {
|
||||
[params.accountId]: {
|
||||
enabled: true,
|
||||
appId: "cli_test",
|
||||
appSecret: "secret_test", // pragma: allowlist secret
|
||||
connectionMode: "webhook",
|
||||
webhookHost: "127.0.0.1",
|
||||
webhookPort: params.port,
|
||||
webhookPath: params.path,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
}
|
||||
|
||||
export async function withRunningWebhookMonitor(
|
||||
params: {
|
||||
accountId: string;
|
||||
path: string;
|
||||
verificationToken: string;
|
||||
encryptKey: string;
|
||||
},
|
||||
monitor: typeof monitorFeishuProvider,
|
||||
run: (url: string) => Promise<void>,
|
||||
) {
|
||||
const port = await getFreePort();
|
||||
const cfg = buildWebhookConfig({
|
||||
accountId: params.accountId,
|
||||
path: params.path,
|
||||
port,
|
||||
encryptKey: params.encryptKey,
|
||||
verificationToken: params.verificationToken,
|
||||
});
|
||||
|
||||
const abortController = new AbortController();
|
||||
const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() };
|
||||
const monitorPromise = monitor({
|
||||
config: cfg,
|
||||
runtime,
|
||||
abortSignal: abortController.signal,
|
||||
});
|
||||
|
||||
const url = `http://127.0.0.1:${port}${params.path}`;
|
||||
await waitUntilServerReady(url);
|
||||
|
||||
try {
|
||||
await run(url);
|
||||
} finally {
|
||||
abortController.abort();
|
||||
await monitorPromise;
|
||||
}
|
||||
}
|
||||
|
|
@ -29,12 +29,16 @@ vi.mock("./runtime.js", () => ({
|
|||
import { feishuOutbound } from "./outbound.js";
|
||||
const sendText = feishuOutbound.sendText!;
|
||||
|
||||
describe("feishuOutbound.sendText local-image auto-convert", () => {
|
||||
beforeEach(() => {
|
||||
function resetOutboundMocks() {
|
||||
vi.clearAllMocks();
|
||||
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
|
||||
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
|
||||
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
|
||||
}
|
||||
|
||||
describe("feishuOutbound.sendText local-image auto-convert", () => {
|
||||
beforeEach(() => {
|
||||
resetOutboundMocks();
|
||||
});
|
||||
|
||||
async function createTmpImage(ext = ".png"): Promise<{ dir: string; file: string }> {
|
||||
|
|
@ -181,10 +185,7 @@ describe("feishuOutbound.sendText local-image auto-convert", () => {
|
|||
|
||||
describe("feishuOutbound.sendText replyToId forwarding", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
|
||||
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
|
||||
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
|
||||
resetOutboundMocks();
|
||||
});
|
||||
|
||||
it("forwards replyToId as replyToMessageId to sendMessageFeishu", async () => {
|
||||
|
|
@ -249,10 +250,7 @@ describe("feishuOutbound.sendText replyToId forwarding", () => {
|
|||
|
||||
describe("feishuOutbound.sendMedia replyToId forwarding", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
|
||||
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
|
||||
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
|
||||
resetOutboundMocks();
|
||||
});
|
||||
|
||||
it("forwards replyToId to sendMediaFeishu", async () => {
|
||||
|
|
@ -292,10 +290,7 @@ describe("feishuOutbound.sendMedia replyToId forwarding", () => {
|
|||
|
||||
describe("feishuOutbound.sendMedia renderMode", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" });
|
||||
sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" });
|
||||
sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" });
|
||||
resetOutboundMocks();
|
||||
});
|
||||
|
||||
it("uses markdown cards for captions when renderMode=card", async () => {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,22 @@ vi.mock("./client.js", () => ({
|
|||
|
||||
import { FEISHU_PROBE_REQUEST_TIMEOUT_MS, probeFeishu, clearProbeCache } from "./probe.js";
|
||||
|
||||
const DEFAULT_CREDS = { appId: "cli_123", appSecret: "secret" } as const; // pragma: allowlist secret
|
||||
const DEFAULT_SUCCESS_RESPONSE = {
|
||||
code: 0,
|
||||
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
|
||||
} as const;
|
||||
const DEFAULT_SUCCESS_RESULT = {
|
||||
ok: true,
|
||||
appId: "cli_123",
|
||||
botName: "TestBot",
|
||||
botOpenId: "ou_abc123",
|
||||
} as const;
|
||||
const BOT1_RESPONSE = {
|
||||
code: 0,
|
||||
bot: { bot_name: "Bot1", open_id: "ou_1" },
|
||||
} as const;
|
||||
|
||||
function makeRequestFn(response: Record<string, unknown>) {
|
||||
return vi.fn().mockResolvedValue(response);
|
||||
}
|
||||
|
|
@ -18,6 +34,64 @@ function setupClient(response: Record<string, unknown>) {
|
|||
return requestFn;
|
||||
}
|
||||
|
||||
function setupSuccessClient() {
|
||||
return setupClient(DEFAULT_SUCCESS_RESPONSE);
|
||||
}
|
||||
|
||||
async function expectDefaultSuccessResult(
|
||||
creds = DEFAULT_CREDS,
|
||||
expected: Awaited<ReturnType<typeof probeFeishu>> = DEFAULT_SUCCESS_RESULT,
|
||||
) {
|
||||
const result = await probeFeishu(creds);
|
||||
expect(result).toEqual(expected);
|
||||
}
|
||||
|
||||
async function withFakeTimers(run: () => Promise<void>) {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
await run();
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
}
|
||||
|
||||
async function expectErrorResultCached(params: {
|
||||
requestFn: ReturnType<typeof vi.fn>;
|
||||
expectedError: string;
|
||||
ttlMs: number;
|
||||
}) {
|
||||
createFeishuClientMock.mockReturnValue({ request: params.requestFn });
|
||||
|
||||
const first = await probeFeishu(DEFAULT_CREDS);
|
||||
const second = await probeFeishu(DEFAULT_CREDS);
|
||||
expect(first).toMatchObject({ ok: false, error: params.expectedError });
|
||||
expect(second).toMatchObject({ ok: false, error: params.expectedError });
|
||||
expect(params.requestFn).toHaveBeenCalledTimes(1);
|
||||
|
||||
vi.advanceTimersByTime(params.ttlMs + 1);
|
||||
|
||||
await probeFeishu(DEFAULT_CREDS);
|
||||
expect(params.requestFn).toHaveBeenCalledTimes(2);
|
||||
}
|
||||
|
||||
async function expectFreshDefaultProbeAfter(
|
||||
requestFn: ReturnType<typeof vi.fn>,
|
||||
invalidate: () => void,
|
||||
) {
|
||||
await probeFeishu(DEFAULT_CREDS);
|
||||
expect(requestFn).toHaveBeenCalledTimes(1);
|
||||
|
||||
invalidate();
|
||||
|
||||
await probeFeishu(DEFAULT_CREDS);
|
||||
expect(requestFn).toHaveBeenCalledTimes(2);
|
||||
}
|
||||
|
||||
async function readSequentialDefaultProbePair() {
|
||||
const first = await probeFeishu(DEFAULT_CREDS);
|
||||
return { first, second: await probeFeishu(DEFAULT_CREDS) };
|
||||
}
|
||||
|
||||
describe("probeFeishu", () => {
|
||||
beforeEach(() => {
|
||||
clearProbeCache();
|
||||
|
|
@ -44,28 +118,16 @@ describe("probeFeishu", () => {
|
|||
});
|
||||
|
||||
it("returns bot info on successful probe", async () => {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
|
||||
});
|
||||
const requestFn = setupSuccessClient();
|
||||
|
||||
const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
|
||||
expect(result).toEqual({
|
||||
ok: true,
|
||||
appId: "cli_123",
|
||||
botName: "TestBot",
|
||||
botOpenId: "ou_abc123",
|
||||
});
|
||||
await expectDefaultSuccessResult();
|
||||
expect(requestFn).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("passes the probe timeout to the Feishu request", async () => {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
|
||||
});
|
||||
const requestFn = setupSuccessClient();
|
||||
|
||||
await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
|
||||
await probeFeishu(DEFAULT_CREDS);
|
||||
|
||||
expect(requestFn).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
|
|
@ -77,19 +139,16 @@ describe("probeFeishu", () => {
|
|||
});
|
||||
|
||||
it("returns timeout error when request exceeds timeout", async () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
await withFakeTimers(async () => {
|
||||
const requestFn = vi.fn().mockImplementation(() => new Promise(() => {}));
|
||||
createFeishuClientMock.mockReturnValue({ request: requestFn });
|
||||
|
||||
const promise = probeFeishu({ appId: "cli_123", appSecret: "secret" }, { timeoutMs: 1_000 });
|
||||
const promise = probeFeishu(DEFAULT_CREDS, { timeoutMs: 1_000 });
|
||||
await vi.advanceTimersByTimeAsync(1_000);
|
||||
const result = await promise;
|
||||
|
||||
expect(result).toMatchObject({ ok: false, error: "probe timed out after 1000ms" });
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("returns aborted when abort signal is already aborted", async () => {
|
||||
|
|
@ -106,14 +165,9 @@ describe("probeFeishu", () => {
|
|||
expect(createFeishuClientMock).not.toHaveBeenCalled();
|
||||
});
|
||||
it("returns cached result on subsequent calls within TTL", async () => {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
|
||||
});
|
||||
const requestFn = setupSuccessClient();
|
||||
|
||||
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
|
||||
const first = await probeFeishu(creds);
|
||||
const second = await probeFeishu(creds);
|
||||
const { first, second } = await readSequentialDefaultProbePair();
|
||||
|
||||
expect(first).toEqual(second);
|
||||
// Only one API call should have been made
|
||||
|
|
@ -121,76 +175,37 @@ describe("probeFeishu", () => {
|
|||
});
|
||||
|
||||
it("makes a fresh API call after cache expires", async () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
|
||||
});
|
||||
await withFakeTimers(async () => {
|
||||
const requestFn = setupSuccessClient();
|
||||
|
||||
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
|
||||
await probeFeishu(creds);
|
||||
expect(requestFn).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Advance time past the success TTL
|
||||
await expectFreshDefaultProbeAfter(requestFn, () => {
|
||||
vi.advanceTimersByTime(10 * 60 * 1000 + 1);
|
||||
|
||||
await probeFeishu(creds);
|
||||
expect(requestFn).toHaveBeenCalledTimes(2);
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it("caches failed probe results (API error) for the error TTL", async () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
const requestFn = makeRequestFn({ code: 99, msg: "token expired" });
|
||||
createFeishuClientMock.mockReturnValue({ request: requestFn });
|
||||
|
||||
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
|
||||
const first = await probeFeishu(creds);
|
||||
const second = await probeFeishu(creds);
|
||||
expect(first).toMatchObject({ ok: false, error: "API error: token expired" });
|
||||
expect(second).toMatchObject({ ok: false, error: "API error: token expired" });
|
||||
expect(requestFn).toHaveBeenCalledTimes(1);
|
||||
|
||||
vi.advanceTimersByTime(60 * 1000 + 1);
|
||||
|
||||
await probeFeishu(creds);
|
||||
expect(requestFn).toHaveBeenCalledTimes(2);
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
await withFakeTimers(async () => {
|
||||
await expectErrorResultCached({
|
||||
requestFn: makeRequestFn({ code: 99, msg: "token expired" }),
|
||||
expectedError: "API error: token expired",
|
||||
ttlMs: 60 * 1000,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it("caches thrown request errors for the error TTL", async () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
const requestFn = vi.fn().mockRejectedValue(new Error("network error"));
|
||||
createFeishuClientMock.mockReturnValue({ request: requestFn });
|
||||
|
||||
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
|
||||
const first = await probeFeishu(creds);
|
||||
const second = await probeFeishu(creds);
|
||||
expect(first).toMatchObject({ ok: false, error: "network error" });
|
||||
expect(second).toMatchObject({ ok: false, error: "network error" });
|
||||
expect(requestFn).toHaveBeenCalledTimes(1);
|
||||
|
||||
vi.advanceTimersByTime(60 * 1000 + 1);
|
||||
|
||||
await probeFeishu(creds);
|
||||
expect(requestFn).toHaveBeenCalledTimes(2);
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
await withFakeTimers(async () => {
|
||||
await expectErrorResultCached({
|
||||
requestFn: vi.fn().mockRejectedValue(new Error("network error")),
|
||||
expectedError: "network error",
|
||||
ttlMs: 60 * 1000,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it("caches per account independently", async () => {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "Bot1", open_id: "ou_1" },
|
||||
});
|
||||
const requestFn = setupClient(BOT1_RESPONSE);
|
||||
|
||||
await probeFeishu({ appId: "cli_aaa", appSecret: "s1" }); // pragma: allowlist secret
|
||||
expect(requestFn).toHaveBeenCalledTimes(1);
|
||||
|
|
@ -205,10 +220,7 @@ describe("probeFeishu", () => {
|
|||
});
|
||||
|
||||
it("does not share cache between accounts with same appId but different appSecret", async () => {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "Bot1", open_id: "ou_1" },
|
||||
});
|
||||
const requestFn = setupClient(BOT1_RESPONSE);
|
||||
|
||||
// First account with appId + secret A
|
||||
await probeFeishu({ appId: "cli_shared", appSecret: "secret_aaa" }); // pragma: allowlist secret
|
||||
|
|
@ -221,10 +233,7 @@ describe("probeFeishu", () => {
|
|||
});
|
||||
|
||||
it("uses accountId for cache key when available", async () => {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "Bot1", open_id: "ou_1" },
|
||||
});
|
||||
const requestFn = setupClient(BOT1_RESPONSE);
|
||||
|
||||
// Two accounts with same appId+appSecret but different accountIds are cached separately
|
||||
await probeFeishu({ accountId: "acct-1", appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
|
||||
|
|
@ -239,19 +248,11 @@ describe("probeFeishu", () => {
|
|||
});
|
||||
|
||||
it("clearProbeCache forces fresh API call", async () => {
|
||||
const requestFn = setupClient({
|
||||
code: 0,
|
||||
bot: { bot_name: "TestBot", open_id: "ou_abc123" },
|
||||
});
|
||||
|
||||
const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret
|
||||
await probeFeishu(creds);
|
||||
expect(requestFn).toHaveBeenCalledTimes(1);
|
||||
const requestFn = setupSuccessClient();
|
||||
|
||||
await expectFreshDefaultProbeAfter(requestFn, () => {
|
||||
clearProbeCache();
|
||||
|
||||
await probeFeishu(creds);
|
||||
expect(requestFn).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
|
||||
it("handles response.data.bot fallback path", async () => {
|
||||
|
|
@ -260,10 +261,8 @@ describe("probeFeishu", () => {
|
|||
data: { bot: { bot_name: "DataBot", open_id: "ou_data" } },
|
||||
});
|
||||
|
||||
const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret
|
||||
expect(result).toEqual({
|
||||
ok: true,
|
||||
appId: "cli_123",
|
||||
await expectDefaultSuccessResult(DEFAULT_CREDS, {
|
||||
...DEFAULT_SUCCESS_RESULT,
|
||||
botName: "DataBot",
|
||||
botOpenId: "ou_data",
|
||||
});
|
||||
|
|
|
|||
|
|
@ -25,24 +25,10 @@ vi.mock("./typing.js", () => ({
|
|||
addTypingIndicator: addTypingIndicatorMock,
|
||||
removeTypingIndicator: removeTypingIndicatorMock,
|
||||
}));
|
||||
vi.mock("./streaming-card.js", () => ({
|
||||
mergeStreamingText: (previousText: string | undefined, nextText: string | undefined) => {
|
||||
const previous = typeof previousText === "string" ? previousText : "";
|
||||
const next = typeof nextText === "string" ? nextText : "";
|
||||
if (!next) {
|
||||
return previous;
|
||||
}
|
||||
if (!previous || next === previous) {
|
||||
return next;
|
||||
}
|
||||
if (next.startsWith(previous)) {
|
||||
return next;
|
||||
}
|
||||
if (previous.startsWith(next)) {
|
||||
return previous;
|
||||
}
|
||||
return `${previous}${next}`;
|
||||
},
|
||||
vi.mock("./streaming-card.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("./streaming-card.js")>("./streaming-card.js");
|
||||
return {
|
||||
mergeStreamingText: actual.mergeStreamingText,
|
||||
FeishuStreamingSession: class {
|
||||
active = false;
|
||||
start = vi.fn(async () => {
|
||||
|
|
@ -58,11 +44,14 @@ vi.mock("./streaming-card.js", () => ({
|
|||
streamingInstances.push(this);
|
||||
}
|
||||
},
|
||||
}));
|
||||
};
|
||||
});
|
||||
|
||||
import { createFeishuReplyDispatcher } from "./reply-dispatcher.js";
|
||||
|
||||
describe("createFeishuReplyDispatcher streaming behavior", () => {
|
||||
type ReplyDispatcherArgs = Parameters<typeof createFeishuReplyDispatcher>[0];
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
streamingInstances.length = 0;
|
||||
|
|
@ -128,6 +117,25 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
return createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
}
|
||||
|
||||
function createRuntimeLogger() {
|
||||
return { log: vi.fn(), error: vi.fn() } as never;
|
||||
}
|
||||
|
||||
function createDispatcherHarness(overrides: Partial<ReplyDispatcherArgs> = {}) {
|
||||
const result = createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
...overrides,
|
||||
});
|
||||
|
||||
return {
|
||||
result,
|
||||
options: createReplyDispatcherWithTypingMock.mock.calls.at(-1)?.[0],
|
||||
};
|
||||
}
|
||||
|
||||
it("skips typing indicator when account typingIndicator is disabled", async () => {
|
||||
resolveFeishuAccountMock.mockReturnValue({
|
||||
accountId: "main",
|
||||
|
|
@ -209,14 +217,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("keeps auto mode plain text on non-streaming send path", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
const { options } = createDispatcherHarness();
|
||||
await options.deliver({ text: "plain text" }, { kind: "final" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(0);
|
||||
|
|
@ -225,14 +226,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("suppresses internal block payload delivery", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
const { options } = createDispatcherHarness();
|
||||
await options.deliver({ text: "internal reasoning chunk" }, { kind: "block" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(0);
|
||||
|
|
@ -253,15 +247,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("uses streaming session for auto mode markdown payloads", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
rootId: "om_root_topic",
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(1);
|
||||
|
|
@ -277,14 +266,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("closes streaming with block text when final reply is missing", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "```md\npartial answer\n```" }, { kind: "block" });
|
||||
await options.onIdle?.();
|
||||
|
||||
|
|
@ -295,14 +279,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("delivers distinct final payloads after streaming close", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "```md\n完整回复第一段\n```" }, { kind: "final" });
|
||||
await options.deliver({ text: "```md\n完整回复第一段 + 第二段\n```" }, { kind: "final" });
|
||||
|
||||
|
|
@ -316,14 +295,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("skips exact duplicate final text after streaming close", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" });
|
||||
await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" });
|
||||
|
||||
|
|
@ -383,14 +357,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
},
|
||||
});
|
||||
|
||||
const result = createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { result, options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.onReplyStart?.();
|
||||
await result.replyOptions.onPartialReply?.({ text: "hello" });
|
||||
await options.deliver({ text: "lo world" }, { kind: "block" });
|
||||
|
|
@ -402,14 +371,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("sends media-only payloads as attachments", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
const { options } = createDispatcherHarness();
|
||||
await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" });
|
||||
|
||||
expect(sendMediaFeishuMock).toHaveBeenCalledTimes(1);
|
||||
|
|
@ -424,14 +386,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("falls back to legacy mediaUrl when mediaUrls is an empty array", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
const { options } = createDispatcherHarness();
|
||||
await options.deliver(
|
||||
{ text: "caption", mediaUrl: "https://example.com/a.png", mediaUrls: [] },
|
||||
{ kind: "final" },
|
||||
|
|
@ -447,14 +402,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("sends attachments after streaming final markdown replies", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver(
|
||||
{ text: "```ts\nconst x = 1\n```", mediaUrls: ["https://example.com/a.png"] },
|
||||
{ kind: "final" },
|
||||
|
|
@ -472,16 +422,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("passes replyInThread to sendMessageFeishu for plain text", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
replyToMessageId: "om_msg",
|
||||
replyInThread: true,
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "plain text" }, { kind: "final" });
|
||||
|
||||
expect(sendMessageFeishuMock).toHaveBeenCalledWith(
|
||||
|
|
@ -504,16 +448,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
},
|
||||
});
|
||||
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
replyToMessageId: "om_msg",
|
||||
replyInThread: true,
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "card text" }, { kind: "final" });
|
||||
|
||||
expect(sendMarkdownCardFeishuMock).toHaveBeenCalledWith(
|
||||
|
|
@ -525,16 +463,11 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("passes replyToMessageId and replyInThread to streaming.start()", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
replyToMessageId: "om_msg",
|
||||
replyInThread: true,
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(1);
|
||||
|
|
@ -545,18 +478,13 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("disables streaming for thread replies and keeps reply metadata", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: { log: vi.fn(), error: vi.fn() } as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
runtime: createRuntimeLogger(),
|
||||
replyToMessageId: "om_msg",
|
||||
replyInThread: false,
|
||||
threadReply: true,
|
||||
rootId: "om_root_topic",
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" });
|
||||
|
||||
expect(streamingInstances).toHaveLength(0);
|
||||
|
|
@ -569,16 +497,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
|
|||
});
|
||||
|
||||
it("passes replyInThread to media attachments", async () => {
|
||||
createFeishuReplyDispatcher({
|
||||
cfg: {} as never,
|
||||
agentId: "agent",
|
||||
runtime: {} as never,
|
||||
chatId: "oc_chat",
|
||||
const { options } = createDispatcherHarness({
|
||||
replyToMessageId: "om_msg",
|
||||
replyInThread: true,
|
||||
});
|
||||
|
||||
const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0];
|
||||
await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" });
|
||||
|
||||
expect(sendMediaFeishuMock).toHaveBeenCalledWith(
|
||||
|
|
|
|||
|
|
@ -224,6 +224,41 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
|||
lastPartial = "";
|
||||
};
|
||||
|
||||
const sendChunkedTextReply = async (params: {
|
||||
text: string;
|
||||
useCard: boolean;
|
||||
infoKind?: string;
|
||||
}) => {
|
||||
let first = true;
|
||||
const chunkSource = params.useCard
|
||||
? params.text
|
||||
: core.channel.text.convertMarkdownTables(params.text, tableMode);
|
||||
for (const chunk of core.channel.text.chunkTextWithMode(
|
||||
chunkSource,
|
||||
textChunkLimit,
|
||||
chunkMode,
|
||||
)) {
|
||||
const message = {
|
||||
cfg,
|
||||
to: chatId,
|
||||
text: chunk,
|
||||
replyToMessageId: sendReplyToMessageId,
|
||||
replyInThread: effectiveReplyInThread,
|
||||
mentions: first ? mentionTargets : undefined,
|
||||
accountId,
|
||||
};
|
||||
if (params.useCard) {
|
||||
await sendMarkdownCardFeishu(message);
|
||||
} else {
|
||||
await sendMessageFeishu(message);
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
if (params.infoKind === "final") {
|
||||
deliveredFinalTexts.add(params.text);
|
||||
}
|
||||
};
|
||||
|
||||
const { dispatcher, replyOptions, markDispatchIdle } =
|
||||
core.channel.reply.createReplyDispatcherWithTyping({
|
||||
responsePrefix: prefixContext.responsePrefix,
|
||||
|
|
@ -303,48 +338,10 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
|
|||
return;
|
||||
}
|
||||
|
||||
let first = true;
|
||||
if (useCard) {
|
||||
for (const chunk of core.channel.text.chunkTextWithMode(
|
||||
text,
|
||||
textChunkLimit,
|
||||
chunkMode,
|
||||
)) {
|
||||
await sendMarkdownCardFeishu({
|
||||
cfg,
|
||||
to: chatId,
|
||||
text: chunk,
|
||||
replyToMessageId: sendReplyToMessageId,
|
||||
replyInThread: effectiveReplyInThread,
|
||||
mentions: first ? mentionTargets : undefined,
|
||||
accountId,
|
||||
});
|
||||
first = false;
|
||||
}
|
||||
if (info?.kind === "final") {
|
||||
deliveredFinalTexts.add(text);
|
||||
}
|
||||
await sendChunkedTextReply({ text, useCard: true, infoKind: info?.kind });
|
||||
} else {
|
||||
const converted = core.channel.text.convertMarkdownTables(text, tableMode);
|
||||
for (const chunk of core.channel.text.chunkTextWithMode(
|
||||
converted,
|
||||
textChunkLimit,
|
||||
chunkMode,
|
||||
)) {
|
||||
await sendMessageFeishu({
|
||||
cfg,
|
||||
to: chatId,
|
||||
text: chunk,
|
||||
replyToMessageId: sendReplyToMessageId,
|
||||
replyInThread: effectiveReplyInThread,
|
||||
mentions: first ? mentionTargets : undefined,
|
||||
accountId,
|
||||
});
|
||||
first = false;
|
||||
}
|
||||
if (info?.kind === "final") {
|
||||
deliveredFinalTexts.add(text);
|
||||
}
|
||||
await sendChunkedTextReply({ text, useCard: false, infoKind: info?.kind });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
|
|||
const replyMock = vi.fn();
|
||||
const createMock = vi.fn();
|
||||
|
||||
async function expectFallbackResult(
|
||||
send: () => Promise<{ messageId?: string }>,
|
||||
expectedMessageId: string,
|
||||
) {
|
||||
const result = await send();
|
||||
expect(replyMock).toHaveBeenCalledTimes(1);
|
||||
expect(createMock).toHaveBeenCalledTimes(1);
|
||||
expect(result.messageId).toBe(expectedMessageId);
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
resolveFeishuSendTargetMock.mockReturnValue({
|
||||
|
|
@ -51,16 +61,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
|
|||
data: { message_id: "om_new" },
|
||||
});
|
||||
|
||||
const result = await sendMessageFeishu({
|
||||
await expectFallbackResult(
|
||||
() =>
|
||||
sendMessageFeishu({
|
||||
cfg: {} as never,
|
||||
to: "user:ou_target",
|
||||
text: "hello",
|
||||
replyToMessageId: "om_parent",
|
||||
});
|
||||
|
||||
expect(replyMock).toHaveBeenCalledTimes(1);
|
||||
expect(createMock).toHaveBeenCalledTimes(1);
|
||||
expect(result.messageId).toBe("om_new");
|
||||
}),
|
||||
"om_new",
|
||||
);
|
||||
});
|
||||
|
||||
it("falls back to create for withdrawn card replies", async () => {
|
||||
|
|
@ -73,16 +83,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
|
|||
data: { message_id: "om_card_new" },
|
||||
});
|
||||
|
||||
const result = await sendCardFeishu({
|
||||
await expectFallbackResult(
|
||||
() =>
|
||||
sendCardFeishu({
|
||||
cfg: {} as never,
|
||||
to: "user:ou_target",
|
||||
card: { schema: "2.0" },
|
||||
replyToMessageId: "om_parent",
|
||||
});
|
||||
|
||||
expect(replyMock).toHaveBeenCalledTimes(1);
|
||||
expect(createMock).toHaveBeenCalledTimes(1);
|
||||
expect(result.messageId).toBe("om_card_new");
|
||||
}),
|
||||
"om_card_new",
|
||||
);
|
||||
});
|
||||
|
||||
it("still throws for non-withdrawn reply failures", async () => {
|
||||
|
|
@ -111,16 +121,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
|
|||
data: { message_id: "om_thrown_fallback" },
|
||||
});
|
||||
|
||||
const result = await sendMessageFeishu({
|
||||
await expectFallbackResult(
|
||||
() =>
|
||||
sendMessageFeishu({
|
||||
cfg: {} as never,
|
||||
to: "user:ou_target",
|
||||
text: "hello",
|
||||
replyToMessageId: "om_parent",
|
||||
});
|
||||
|
||||
expect(replyMock).toHaveBeenCalledTimes(1);
|
||||
expect(createMock).toHaveBeenCalledTimes(1);
|
||||
expect(result.messageId).toBe("om_thrown_fallback");
|
||||
}),
|
||||
"om_thrown_fallback",
|
||||
);
|
||||
});
|
||||
|
||||
it("falls back to create when card reply throws a not-found AxiosError", async () => {
|
||||
|
|
@ -133,16 +143,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => {
|
|||
data: { message_id: "om_axios_fallback" },
|
||||
});
|
||||
|
||||
const result = await sendCardFeishu({
|
||||
await expectFallbackResult(
|
||||
() =>
|
||||
sendCardFeishu({
|
||||
cfg: {} as never,
|
||||
to: "user:ou_target",
|
||||
card: { schema: "2.0" },
|
||||
replyToMessageId: "om_parent",
|
||||
});
|
||||
|
||||
expect(replyMock).toHaveBeenCalledTimes(1);
|
||||
expect(createMock).toHaveBeenCalledTimes(1);
|
||||
expect(result.messageId).toBe("om_axios_fallback");
|
||||
}),
|
||||
"om_axios_fallback",
|
||||
);
|
||||
});
|
||||
|
||||
it("re-throws non-withdrawn thrown errors for text messages", async () => {
|
||||
|
|
|
|||
|
|
@ -55,6 +55,30 @@ type FeishuCreateMessageClient = {
|
|||
};
|
||||
};
|
||||
|
||||
type FeishuMessageSender = {
|
||||
id?: string;
|
||||
id_type?: string;
|
||||
sender_type?: string;
|
||||
};
|
||||
|
||||
type FeishuMessageGetItem = {
|
||||
message_id?: string;
|
||||
chat_id?: string;
|
||||
chat_type?: FeishuChatType;
|
||||
msg_type?: string;
|
||||
body?: { content?: string };
|
||||
sender?: FeishuMessageSender;
|
||||
create_time?: string;
|
||||
};
|
||||
|
||||
type FeishuGetMessageResponse = {
|
||||
code?: number;
|
||||
msg?: string;
|
||||
data?: FeishuMessageGetItem & {
|
||||
items?: FeishuMessageGetItem[];
|
||||
};
|
||||
};
|
||||
|
||||
/** Send a direct message as a fallback when a reply target is unavailable. */
|
||||
async function sendFallbackDirect(
|
||||
client: FeishuCreateMessageClient,
|
||||
|
|
@ -214,36 +238,7 @@ export async function getMessageFeishu(params: {
|
|||
try {
|
||||
const response = (await client.im.message.get({
|
||||
path: { message_id: messageId },
|
||||
})) as {
|
||||
code?: number;
|
||||
msg?: string;
|
||||
data?: {
|
||||
items?: Array<{
|
||||
message_id?: string;
|
||||
chat_id?: string;
|
||||
chat_type?: FeishuChatType;
|
||||
msg_type?: string;
|
||||
body?: { content?: string };
|
||||
sender?: {
|
||||
id?: string;
|
||||
id_type?: string;
|
||||
sender_type?: string;
|
||||
};
|
||||
create_time?: string;
|
||||
}>;
|
||||
message_id?: string;
|
||||
chat_id?: string;
|
||||
chat_type?: FeishuChatType;
|
||||
msg_type?: string;
|
||||
body?: { content?: string };
|
||||
sender?: {
|
||||
id?: string;
|
||||
id_type?: string;
|
||||
sender_type?: string;
|
||||
};
|
||||
create_time?: string;
|
||||
};
|
||||
};
|
||||
})) as FeishuGetMessageResponse;
|
||||
|
||||
if (response.code !== 0) {
|
||||
return null;
|
||||
|
|
|
|||
|
|
@ -144,6 +144,13 @@ describe("extractGeminiCliCredentials", () => {
|
|||
}
|
||||
}
|
||||
|
||||
function expectFakeCliCredentials(result: unknown) {
|
||||
expect(result).toEqual({
|
||||
clientId: FAKE_CLIENT_ID,
|
||||
clientSecret: FAKE_CLIENT_SECRET,
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
originalPath = process.env.PATH;
|
||||
|
|
@ -169,10 +176,7 @@ describe("extractGeminiCliCredentials", () => {
|
|||
clearCredentialsCache();
|
||||
const result = extractGeminiCliCredentials();
|
||||
|
||||
expect(result).toEqual({
|
||||
clientId: FAKE_CLIENT_ID,
|
||||
clientSecret: FAKE_CLIENT_SECRET,
|
||||
});
|
||||
expectFakeCliCredentials(result);
|
||||
});
|
||||
|
||||
it("extracts credentials when PATH entry is an npm global shim", async () => {
|
||||
|
|
@ -182,10 +186,7 @@ describe("extractGeminiCliCredentials", () => {
|
|||
clearCredentialsCache();
|
||||
const result = extractGeminiCliCredentials();
|
||||
|
||||
expect(result).toEqual({
|
||||
clientId: FAKE_CLIENT_ID,
|
||||
clientSecret: FAKE_CLIENT_SECRET,
|
||||
});
|
||||
expectFakeCliCredentials(result);
|
||||
});
|
||||
|
||||
it("returns null when oauth2.js cannot be found", async () => {
|
||||
|
|
@ -274,16 +275,16 @@ describe("loginGeminiCliOAuth", () => {
|
|||
});
|
||||
}
|
||||
|
||||
async function runRemoteLoginWithCapturedAuthUrl(
|
||||
loginGeminiCliOAuth: (options: {
|
||||
type LoginGeminiCliOAuthFn = (options: {
|
||||
isRemote: boolean;
|
||||
openUrl: () => Promise<void>;
|
||||
log: (msg: string) => void;
|
||||
note: () => Promise<void>;
|
||||
prompt: () => Promise<string>;
|
||||
progress: { update: () => void; stop: () => void };
|
||||
}) => Promise<{ projectId: string }>,
|
||||
) {
|
||||
}) => Promise<{ projectId: string }>;
|
||||
|
||||
async function runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth: LoginGeminiCliOAuthFn) {
|
||||
let authUrl = "";
|
||||
const result = await loginGeminiCliOAuth({
|
||||
isRemote: true,
|
||||
|
|
@ -304,6 +305,14 @@ describe("loginGeminiCliOAuth", () => {
|
|||
return { result, authUrl };
|
||||
}
|
||||
|
||||
async function runRemoteLoginExpectingProjectId(
|
||||
loginGeminiCliOAuth: LoginGeminiCliOAuthFn,
|
||||
projectId: string,
|
||||
) {
|
||||
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth);
|
||||
expect(result.projectId).toBe(projectId);
|
||||
}
|
||||
|
||||
let envSnapshot: Partial<Record<(typeof ENV_KEYS)[number], string>>;
|
||||
beforeEach(() => {
|
||||
envSnapshot = Object.fromEntries(ENV_KEYS.map((key) => [key, process.env[key]]));
|
||||
|
|
@ -357,9 +366,7 @@ describe("loginGeminiCliOAuth", () => {
|
|||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const { loginGeminiCliOAuth } = await import("./oauth.js");
|
||||
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth);
|
||||
|
||||
expect(result.projectId).toBe("daily-project");
|
||||
await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "daily-project");
|
||||
const loadRequests = requests.filter((request) =>
|
||||
request.url.includes("v1internal:loadCodeAssist"),
|
||||
);
|
||||
|
|
@ -414,9 +421,7 @@ describe("loginGeminiCliOAuth", () => {
|
|||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const { loginGeminiCliOAuth } = await import("./oauth.js");
|
||||
const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth);
|
||||
|
||||
expect(result.projectId).toBe("env-project");
|
||||
await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "env-project");
|
||||
expect(requests.filter((url) => url.includes("v1internal:loadCodeAssist"))).toHaveLength(3);
|
||||
expect(requests.some((url) => url.includes("v1internal:onboardUser"))).toBe(false);
|
||||
});
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@
|
|||
"dependencies": {
|
||||
"google-auth-library": "^10.6.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"openclaw": "workspace:*"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"openclaw": ">=2026.3.11"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -13,6 +13,21 @@ const account = {
|
|||
config: {},
|
||||
} as ResolvedGoogleChatAccount;
|
||||
|
||||
function stubSuccessfulSend(name: string) {
|
||||
const fetchMock = vi
|
||||
.fn()
|
||||
.mockResolvedValue(new Response(JSON.stringify({ name }), { status: 200 }));
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
return fetchMock;
|
||||
}
|
||||
|
||||
async function expectDownloadToRejectForResponse(response: Response) {
|
||||
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response));
|
||||
await expect(
|
||||
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
|
||||
).rejects.toThrow(/max bytes/i);
|
||||
}
|
||||
|
||||
describe("downloadGoogleChatMedia", () => {
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
|
|
@ -29,11 +44,7 @@ describe("downloadGoogleChatMedia", () => {
|
|||
status: 200,
|
||||
headers: { "content-length": "50", "content-type": "application/octet-stream" },
|
||||
});
|
||||
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response));
|
||||
|
||||
await expect(
|
||||
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
|
||||
).rejects.toThrow(/max bytes/i);
|
||||
await expectDownloadToRejectForResponse(response);
|
||||
});
|
||||
|
||||
it("rejects when streamed payload exceeds max bytes", async () => {
|
||||
|
|
@ -52,11 +63,7 @@ describe("downloadGoogleChatMedia", () => {
|
|||
status: 200,
|
||||
headers: { "content-type": "application/octet-stream" },
|
||||
});
|
||||
vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response));
|
||||
|
||||
await expect(
|
||||
downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }),
|
||||
).rejects.toThrow(/max bytes/i);
|
||||
await expectDownloadToRejectForResponse(response);
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -66,12 +73,7 @@ describe("sendGoogleChatMessage", () => {
|
|||
});
|
||||
|
||||
it("adds messageReplyOption when sending to an existing thread", async () => {
|
||||
const fetchMock = vi
|
||||
.fn()
|
||||
.mockResolvedValue(
|
||||
new Response(JSON.stringify({ name: "spaces/AAA/messages/123" }), { status: 200 }),
|
||||
);
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
const fetchMock = stubSuccessfulSend("spaces/AAA/messages/123");
|
||||
|
||||
await sendGoogleChatMessage({
|
||||
account,
|
||||
|
|
@ -89,12 +91,7 @@ describe("sendGoogleChatMessage", () => {
|
|||
});
|
||||
|
||||
it("does not set messageReplyOption for non-thread sends", async () => {
|
||||
const fetchMock = vi
|
||||
.fn()
|
||||
.mockResolvedValue(
|
||||
new Response(JSON.stringify({ name: "spaces/AAA/messages/124" }), { status: 200 }),
|
||||
);
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
const fetchMock = stubSuccessfulSend("spaces/AAA/messages/124");
|
||||
|
||||
await sendGoogleChatMessage({
|
||||
account,
|
||||
|
|
|
|||
|
|
@ -14,70 +14,24 @@ const headersToObject = (headers?: HeadersInit): Record<string, string> =>
|
|||
? Object.fromEntries(headers)
|
||||
: headers || {};
|
||||
|
||||
async function fetchJson<T>(
|
||||
account: ResolvedGoogleChatAccount,
|
||||
url: string,
|
||||
init: RequestInit,
|
||||
): Promise<T> {
|
||||
const token = await getGoogleChatAccessToken(account);
|
||||
const { response: res, release } = await fetchWithSsrFGuard({
|
||||
async function withGoogleChatResponse<T>(params: {
|
||||
account: ResolvedGoogleChatAccount;
|
||||
url: string;
|
||||
init?: RequestInit;
|
||||
auditContext: string;
|
||||
errorPrefix?: string;
|
||||
handleResponse: (response: Response) => Promise<T>;
|
||||
}): Promise<T> {
|
||||
const {
|
||||
account,
|
||||
url,
|
||||
init: {
|
||||
...init,
|
||||
headers: {
|
||||
...headersToObject(init.headers),
|
||||
Authorization: `Bearer ${token}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
},
|
||||
auditContext: "googlechat.api.json",
|
||||
});
|
||||
try {
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => "");
|
||||
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
|
||||
}
|
||||
return (await res.json()) as T;
|
||||
} finally {
|
||||
await release();
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchOk(
|
||||
account: ResolvedGoogleChatAccount,
|
||||
url: string,
|
||||
init: RequestInit,
|
||||
): Promise<void> {
|
||||
init,
|
||||
auditContext,
|
||||
errorPrefix = "Google Chat API",
|
||||
handleResponse,
|
||||
} = params;
|
||||
const token = await getGoogleChatAccessToken(account);
|
||||
const { response: res, release } = await fetchWithSsrFGuard({
|
||||
url,
|
||||
init: {
|
||||
...init,
|
||||
headers: {
|
||||
...headersToObject(init.headers),
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
},
|
||||
auditContext: "googlechat.api.ok",
|
||||
});
|
||||
try {
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => "");
|
||||
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
|
||||
}
|
||||
} finally {
|
||||
await release();
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchBuffer(
|
||||
account: ResolvedGoogleChatAccount,
|
||||
url: string,
|
||||
init?: RequestInit,
|
||||
options?: { maxBytes?: number },
|
||||
): Promise<{ buffer: Buffer; contentType?: string }> {
|
||||
const token = await getGoogleChatAccessToken(account);
|
||||
const { response: res, release } = await fetchWithSsrFGuard({
|
||||
const { response, release } = await fetchWithSsrFGuard({
|
||||
url,
|
||||
init: {
|
||||
...init,
|
||||
|
|
@ -86,13 +40,65 @@ async function fetchBuffer(
|
|||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
},
|
||||
auditContext: "googlechat.api.buffer",
|
||||
auditContext,
|
||||
});
|
||||
try {
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => "");
|
||||
throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`);
|
||||
if (!response.ok) {
|
||||
const text = await response.text().catch(() => "");
|
||||
throw new Error(`${errorPrefix} ${response.status}: ${text || response.statusText}`);
|
||||
}
|
||||
return await handleResponse(response);
|
||||
} finally {
|
||||
await release();
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchJson<T>(
|
||||
account: ResolvedGoogleChatAccount,
|
||||
url: string,
|
||||
init: RequestInit,
|
||||
): Promise<T> {
|
||||
return await withGoogleChatResponse({
|
||||
account,
|
||||
url,
|
||||
init: {
|
||||
...init,
|
||||
headers: {
|
||||
...headersToObject(init.headers),
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
},
|
||||
auditContext: "googlechat.api.json",
|
||||
handleResponse: async (response) => (await response.json()) as T,
|
||||
});
|
||||
}
|
||||
|
||||
async function fetchOk(
|
||||
account: ResolvedGoogleChatAccount,
|
||||
url: string,
|
||||
init: RequestInit,
|
||||
): Promise<void> {
|
||||
await withGoogleChatResponse({
|
||||
account,
|
||||
url,
|
||||
init,
|
||||
auditContext: "googlechat.api.ok",
|
||||
handleResponse: async () => undefined,
|
||||
});
|
||||
}
|
||||
|
||||
async function fetchBuffer(
|
||||
account: ResolvedGoogleChatAccount,
|
||||
url: string,
|
||||
init?: RequestInit,
|
||||
options?: { maxBytes?: number },
|
||||
): Promise<{ buffer: Buffer; contentType?: string }> {
|
||||
return await withGoogleChatResponse({
|
||||
account,
|
||||
url,
|
||||
init,
|
||||
auditContext: "googlechat.api.buffer",
|
||||
handleResponse: async (res) => {
|
||||
const maxBytes = options?.maxBytes;
|
||||
const lengthHeader = res.headers.get("content-length");
|
||||
if (maxBytes && lengthHeader) {
|
||||
|
|
@ -127,9 +133,8 @@ async function fetchBuffer(
|
|||
const buffer = Buffer.concat(chunks, total);
|
||||
const contentType = res.headers.get("content-type") ?? undefined;
|
||||
return { buffer, contentType };
|
||||
} finally {
|
||||
await release();
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
export async function sendGoogleChatMessage(params: {
|
||||
|
|
@ -208,34 +213,29 @@ export async function uploadGoogleChatAttachment(params: {
|
|||
Buffer.from(footer, "utf8"),
|
||||
]);
|
||||
|
||||
const token = await getGoogleChatAccessToken(account);
|
||||
const url = `${CHAT_UPLOAD_BASE}/${space}/attachments:upload?uploadType=multipart`;
|
||||
const { response: res, release } = await fetchWithSsrFGuard({
|
||||
const payload = await withGoogleChatResponse<{
|
||||
attachmentDataRef?: { attachmentUploadToken?: string };
|
||||
}>({
|
||||
account,
|
||||
url,
|
||||
init: {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${token}`,
|
||||
"Content-Type": `multipart/related; boundary=${boundary}`,
|
||||
},
|
||||
body,
|
||||
},
|
||||
auditContext: "googlechat.upload",
|
||||
});
|
||||
try {
|
||||
if (!res.ok) {
|
||||
const text = await res.text().catch(() => "");
|
||||
throw new Error(`Google Chat upload ${res.status}: ${text || res.statusText}`);
|
||||
}
|
||||
const payload = (await res.json()) as {
|
||||
errorPrefix: "Google Chat upload",
|
||||
handleResponse: async (response) =>
|
||||
(await response.json()) as {
|
||||
attachmentDataRef?: { attachmentUploadToken?: string };
|
||||
};
|
||||
},
|
||||
});
|
||||
return {
|
||||
attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken,
|
||||
};
|
||||
} finally {
|
||||
await release();
|
||||
}
|
||||
}
|
||||
|
||||
export async function downloadGoogleChatMedia(params: {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,10 @@
|
|||
import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/googlechat";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { createStartAccountContext } from "../../test-utils/start-account-context.js";
|
||||
import {
|
||||
abortStartedAccount,
|
||||
expectPendingUntilAbort,
|
||||
startAccountAndTrackLifecycle,
|
||||
} from "../../test-utils/start-account-lifecycle.js";
|
||||
import type { ResolvedGoogleChatAccount } from "./accounts.js";
|
||||
|
||||
const hoisted = vi.hoisted(() => ({
|
||||
|
|
@ -39,29 +43,25 @@ describe("googlechatPlugin gateway.startAccount", () => {
|
|||
},
|
||||
};
|
||||
|
||||
const patches: ChannelAccountSnapshot[] = [];
|
||||
const abort = new AbortController();
|
||||
const task = googlechatPlugin.gateway!.startAccount!(
|
||||
createStartAccountContext({
|
||||
const { abort, patches, task, isSettled } = startAccountAndTrackLifecycle({
|
||||
startAccount: googlechatPlugin.gateway!.startAccount!,
|
||||
account,
|
||||
abortSignal: abort.signal,
|
||||
statusPatchSink: (next) => patches.push({ ...next }),
|
||||
}),
|
||||
);
|
||||
let settled = false;
|
||||
void task.then(() => {
|
||||
settled = true;
|
||||
});
|
||||
await vi.waitFor(() => {
|
||||
await expectPendingUntilAbort({
|
||||
waitForStarted: () =>
|
||||
vi.waitFor(() => {
|
||||
expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce();
|
||||
});
|
||||
expect(settled).toBe(false);
|
||||
}),
|
||||
isSettled,
|
||||
abort,
|
||||
task,
|
||||
assertBeforeAbort: () => {
|
||||
expect(unregister).not.toHaveBeenCalled();
|
||||
|
||||
abort.abort();
|
||||
await task;
|
||||
|
||||
},
|
||||
assertAfterAbort: () => {
|
||||
expect(unregister).toHaveBeenCalledOnce();
|
||||
},
|
||||
});
|
||||
expect(patches.some((entry) => entry.running === true)).toBe(true);
|
||||
expect(patches.some((entry) => entry.running === false)).toBe(true);
|
||||
});
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ import {
|
|||
type OpenClawConfig,
|
||||
} from "openclaw/plugin-sdk/googlechat";
|
||||
import { GoogleChatConfigSchema } from "openclaw/plugin-sdk/googlechat";
|
||||
import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js";
|
||||
import {
|
||||
listGoogleChatAccountIds,
|
||||
resolveDefaultGoogleChatAccountId,
|
||||
|
|
@ -473,19 +474,13 @@ export const googlechatPlugin: ChannelPlugin<ResolvedGoogleChatAccount> = {
|
|||
}
|
||||
return issues;
|
||||
}),
|
||||
buildChannelSummary: ({ snapshot }) => ({
|
||||
configured: snapshot.configured ?? false,
|
||||
buildChannelSummary: ({ snapshot }) =>
|
||||
buildPassiveProbedChannelStatusSummary(snapshot, {
|
||||
credentialSource: snapshot.credentialSource ?? "none",
|
||||
audienceType: snapshot.audienceType ?? null,
|
||||
audience: snapshot.audience ?? null,
|
||||
webhookPath: snapshot.webhookPath ?? null,
|
||||
webhookUrl: snapshot.webhookUrl ?? null,
|
||||
running: snapshot.running ?? false,
|
||||
lastStartAt: snapshot.lastStartAt ?? null,
|
||||
lastStopAt: snapshot.lastStopAt ?? null,
|
||||
lastError: snapshot.lastError ?? null,
|
||||
probe: snapshot.probe,
|
||||
lastProbeAt: snapshot.lastProbeAt ?? null,
|
||||
}),
|
||||
probeAccount: async ({ account }) => probeGoogleChat(account),
|
||||
buildAccountSnapshot: ({ account, runtime, probe }) => {
|
||||
|
|
|
|||
|
|
@ -117,6 +117,34 @@ function registerTwoTargets() {
|
|||
};
|
||||
}
|
||||
|
||||
async function dispatchWebhookRequest(req: IncomingMessage) {
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handleGoogleChatWebhookRequest(req, res);
|
||||
expect(handled).toBe(true);
|
||||
return res;
|
||||
}
|
||||
|
||||
async function expectVerifiedRoute(params: {
|
||||
request: IncomingMessage;
|
||||
expectedStatus: number;
|
||||
sinkA: ReturnType<typeof vi.fn>;
|
||||
sinkB: ReturnType<typeof vi.fn>;
|
||||
expectedSink: "none" | "A" | "B";
|
||||
}) {
|
||||
const res = await dispatchWebhookRequest(params.request);
|
||||
expect(res.statusCode).toBe(params.expectedStatus);
|
||||
const expectedCounts =
|
||||
params.expectedSink === "A" ? [1, 0] : params.expectedSink === "B" ? [0, 1] : [0, 0];
|
||||
expect(params.sinkA).toHaveBeenCalledTimes(expectedCounts[0]);
|
||||
expect(params.sinkB).toHaveBeenCalledTimes(expectedCounts[1]);
|
||||
}
|
||||
|
||||
function mockSecondVerifierSuccess() {
|
||||
vi.mocked(verifyGoogleChatRequest)
|
||||
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
|
||||
.mockResolvedValueOnce({ ok: true });
|
||||
}
|
||||
|
||||
describe("Google Chat webhook routing", () => {
|
||||
afterEach(() => {
|
||||
setActivePluginRegistry(createEmptyPluginRegistry());
|
||||
|
|
@ -165,45 +193,37 @@ describe("Google Chat webhook routing", () => {
|
|||
const { sinkA, sinkB, unregister } = registerTwoTargets();
|
||||
|
||||
try {
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handleGoogleChatWebhookRequest(
|
||||
createWebhookRequest({
|
||||
await expectVerifiedRoute({
|
||||
request: createWebhookRequest({
|
||||
authorization: "Bearer test-token",
|
||||
payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/AAA" } },
|
||||
}),
|
||||
res,
|
||||
);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(401);
|
||||
expect(sinkA).not.toHaveBeenCalled();
|
||||
expect(sinkB).not.toHaveBeenCalled();
|
||||
expectedStatus: 401,
|
||||
sinkA,
|
||||
sinkB,
|
||||
expectedSink: "none",
|
||||
});
|
||||
} finally {
|
||||
unregister();
|
||||
}
|
||||
});
|
||||
|
||||
it("routes to the single verified target when earlier targets fail verification", async () => {
|
||||
vi.mocked(verifyGoogleChatRequest)
|
||||
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
|
||||
.mockResolvedValueOnce({ ok: true });
|
||||
mockSecondVerifierSuccess();
|
||||
|
||||
const { sinkA, sinkB, unregister } = registerTwoTargets();
|
||||
|
||||
try {
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handleGoogleChatWebhookRequest(
|
||||
createWebhookRequest({
|
||||
await expectVerifiedRoute({
|
||||
request: createWebhookRequest({
|
||||
authorization: "Bearer test-token",
|
||||
payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/BBB" } },
|
||||
}),
|
||||
res,
|
||||
);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(sinkA).not.toHaveBeenCalled();
|
||||
expect(sinkB).toHaveBeenCalledTimes(1);
|
||||
expectedStatus: 200,
|
||||
sinkA,
|
||||
sinkB,
|
||||
expectedSink: "B",
|
||||
});
|
||||
} finally {
|
||||
unregister();
|
||||
}
|
||||
|
|
@ -218,10 +238,7 @@ describe("Google Chat webhook routing", () => {
|
|||
authorization: "Bearer invalid-token",
|
||||
});
|
||||
const onSpy = vi.spyOn(req, "on");
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handleGoogleChatWebhookRequest(req, res);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
const res = await dispatchWebhookRequest(req);
|
||||
expect(res.statusCode).toBe(401);
|
||||
expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function));
|
||||
} finally {
|
||||
|
|
@ -230,15 +247,12 @@ describe("Google Chat webhook routing", () => {
|
|||
});
|
||||
|
||||
it("supports add-on requests that provide systemIdToken in the body", async () => {
|
||||
vi.mocked(verifyGoogleChatRequest)
|
||||
.mockResolvedValueOnce({ ok: false, reason: "invalid" })
|
||||
.mockResolvedValueOnce({ ok: true });
|
||||
mockSecondVerifierSuccess();
|
||||
const { sinkA, sinkB, unregister } = registerTwoTargets();
|
||||
|
||||
try {
|
||||
const res = createMockServerResponse();
|
||||
const handled = await handleGoogleChatWebhookRequest(
|
||||
createWebhookRequest({
|
||||
await expectVerifiedRoute({
|
||||
request: createWebhookRequest({
|
||||
payload: {
|
||||
commonEventObject: { hostApp: "CHAT" },
|
||||
authorizationEventObject: { systemIdToken: "addon-token" },
|
||||
|
|
@ -252,13 +266,11 @@ describe("Google Chat webhook routing", () => {
|
|||
},
|
||||
},
|
||||
}),
|
||||
res,
|
||||
);
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(sinkA).not.toHaveBeenCalled();
|
||||
expect(sinkB).toHaveBeenCalledTimes(1);
|
||||
expectedStatus: 200,
|
||||
sinkA,
|
||||
sinkB,
|
||||
expectedSink: "B",
|
||||
});
|
||||
} finally {
|
||||
unregister();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import {
|
|||
type ChannelPlugin,
|
||||
type ResolvedIMessageAccount,
|
||||
} from "openclaw/plugin-sdk/imessage";
|
||||
import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js";
|
||||
import { getIMessageRuntime } from "./runtime.js";
|
||||
|
||||
const meta = getChatChannelMeta("imessage");
|
||||
|
|
@ -264,16 +265,10 @@ export const imessagePlugin: ChannelPlugin<ResolvedIMessageAccount> = {
|
|||
dbPath: null,
|
||||
},
|
||||
collectStatusIssues: (accounts) => collectStatusIssuesFromLastError("imessage", accounts),
|
||||
buildChannelSummary: ({ snapshot }) => ({
|
||||
configured: snapshot.configured ?? false,
|
||||
running: snapshot.running ?? false,
|
||||
lastStartAt: snapshot.lastStartAt ?? null,
|
||||
lastStopAt: snapshot.lastStopAt ?? null,
|
||||
lastError: snapshot.lastError ?? null,
|
||||
buildChannelSummary: ({ snapshot }) =>
|
||||
buildPassiveProbedChannelStatusSummary(snapshot, {
|
||||
cliPath: snapshot.cliPath ?? null,
|
||||
dbPath: snapshot.dbPath ?? null,
|
||||
probe: snapshot.probe,
|
||||
lastProbeAt: snapshot.lastProbeAt ?? null,
|
||||
}),
|
||||
probeAccount: async ({ timeoutMs }) =>
|
||||
getIMessageRuntime().channel.imessage.probeIMessage(timeoutMs),
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue