Merge branch 'main' into vincentkoc-code/telegram-fast-native-callback

This commit is contained in:
Vincent Koc 2026-03-13 16:49:35 -04:00 committed by GitHub
commit 5386d50840
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
37 changed files with 1790 additions and 1587 deletions

View File

@ -81,7 +81,7 @@ jobs:
# Build dist once for Node-relevant changes and share it with downstream jobs.
build-artifacts:
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout
@ -166,25 +166,25 @@ jobs:
task: test
command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts
steps:
- name: Skip bun lane on push
if: github.event_name == 'push' && matrix.runtime == 'bun'
run: echo "Skipping bun test lane on push events."
- name: Skip bun lane on pull requests
if: github.event_name == 'pull_request' && matrix.runtime == 'bun'
run: echo "Skipping Bun compatibility lane on pull requests."
- name: Checkout
if: github.event_name != 'push' || matrix.runtime != 'bun'
if: github.event_name != 'pull_request' || matrix.runtime != 'bun'
uses: actions/checkout@v6
with:
submodules: false
- name: Setup Node environment
if: matrix.runtime != 'bun' || github.event_name != 'push'
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
uses: ./.github/actions/setup-node-env
with:
install-bun: "${{ matrix.runtime == 'bun' }}"
use-sticky-disk: "false"
- name: Configure Node test resources
if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node'
env:
SHARD_COUNT: ${{ matrix.shard_count || '' }}
SHARD_INDEX: ${{ matrix.shard_index || '' }}
@ -199,7 +199,7 @@ jobs:
fi
- name: Run ${{ matrix.task }} (${{ matrix.runtime }})
if: matrix.runtime != 'bun' || github.event_name != 'push'
if: matrix.runtime != 'bun' || github.event_name != 'pull_request'
run: ${{ matrix.command }}
# Types, lint, and format check.
@ -252,7 +252,7 @@ jobs:
compat-node22:
name: "compat-node22"
needs: [docs-scope, changed-scope]
if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true'
runs-on: blacksmith-16vcpu-ubuntu-2404
steps:
- name: Checkout

View File

@ -32,6 +32,8 @@ Docs: https://docs.openclaw.ai
- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates.
- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants.
- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token.
- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization.
- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots.

View File

@ -37,8 +37,7 @@ struct ExecCommandResolution {
var resolutions: [ExecCommandResolution] = []
resolutions.reserveCapacity(segments.count)
for segment in segments {
guard let token = self.parseFirstToken(segment),
let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env)
guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env)
else {
return []
}
@ -88,6 +87,20 @@ struct ExecCommandResolution {
cwd: cwd)
}
private static func resolveShellSegmentExecutable(
_ segment: String,
cwd: String?,
env: [String: String]?) -> ExecCommandResolution?
{
let tokens = self.tokenizeShellWords(segment)
guard !tokens.isEmpty else { return nil }
let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens)
guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else {
return nil
}
return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env)
}
private static func parseFirstToken(_ command: String) -> String? {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return nil }
@ -102,6 +115,59 @@ struct ExecCommandResolution {
return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init)
}
private static func tokenizeShellWords(_ command: String) -> [String] {
let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines)
guard !trimmed.isEmpty else { return [] }
var tokens: [String] = []
var current = ""
var inSingle = false
var inDouble = false
var escaped = false
func appendCurrent() {
guard !current.isEmpty else { return }
tokens.append(current)
current.removeAll(keepingCapacity: true)
}
for ch in trimmed {
if escaped {
current.append(ch)
escaped = false
continue
}
if ch == "\\", !inSingle {
escaped = true
continue
}
if ch == "'", !inDouble {
inSingle.toggle()
continue
}
if ch == "\"", !inSingle {
inDouble.toggle()
continue
}
if ch.isWhitespace, !inSingle, !inDouble {
appendCurrent()
continue
}
current.append(ch)
}
if escaped {
current.append("\\")
}
appendCurrent()
return tokens
}
private enum ShellTokenContext {
case unquoted
case doubleQuoted

View File

@ -208,6 +208,30 @@ struct ExecAllowlistTests {
#expect(resolutions[1].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() {
let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env assignments inside shell segments`() {
let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"]
let resolutions = ExecCommandResolution.resolveForAllowlist(
command: command,
rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test",
cwd: nil,
env: ["PATH": "/usr/bin:/bin"])
#expect(resolutions.count == 1)
#expect(resolutions[0].resolvedPath == "/usr/bin/touch")
#expect(resolutions[0].executableName == "touch")
}
@Test func `resolve for allowlist unwraps env to effective direct executable`() {
let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"]
let resolutions = ExecCommandResolution.resolveForAllowlist(

View File

@ -19,11 +19,11 @@ The CI runs on every push to `main` and every pull request. It uses smart scopin
| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes |
| `check` | TypeScript types, lint, format | Non-docs, node changes |
| `check-docs` | Markdown lint + broken link check | Docs changed |
| `code-analysis` | LOC threshold check (1000 lines) | PRs only |
| `secrets` | Detect leaked secrets | Always |
| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes |
| `release-check` | Validate npm pack contents | After build |
| `checks` | Node/Bun tests + protocol check | Non-docs, node changes |
| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes |
| `release-check` | Validate npm pack contents | Pushes to `main` after build |
| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes |
| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes |
| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes |
| `macos` | Swift lint/build/test + TS tests | PRs with macos changes |
| `android` | Gradle build + tests | Non-docs, android changes |
@ -33,8 +33,8 @@ The CI runs on every push to `main` and every pull request. It uses smart scopin
Jobs are ordered so cheap checks fail before expensive ones run:
1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first)
2. `build-artifacts` + `release-check`
3. `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android`
3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22`
Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`.

View File

@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost):
- No real keys required
- Should be fast and stable
- Pool note:
- OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards.
- On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`).
- OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards.
- On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there.
- Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`).
### E2E (gateway smoke)

View File

@ -11,7 +11,7 @@ title: "Tests"
- `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests dont collide with a running instance. Use this when a prior gateway run left port 18789 occupied.
- `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic.
- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`.
- `pnpm test`: runs the fast core unit lane by default for quick local feedback.
- `pnpm test:channels`: runs channel-heavy suites.
- `pnpm test:extensions`: runs extension/plugin suites.

View File

@ -27,6 +27,28 @@ function createMockFetch(response?: { status?: number; body?: unknown; contentTy
return { mockFetch: mockFetch as unknown as typeof fetch, calls };
}
function createTestClient(response?: { status?: number; body?: unknown; contentType?: string }) {
const { mockFetch, calls } = createMockFetch(response);
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
return { client, calls };
}
async function updatePostAndCapture(
update: Parameters<typeof updateMattermostPost>[2],
response?: { status?: number; body?: unknown; contentType?: string },
) {
const { client, calls } = createTestClient(response ?? { body: { id: "post1" } });
await updateMattermostPost(client, "post1", update);
return {
calls,
body: JSON.parse(calls[0].init?.body as string) as Record<string, unknown>,
};
}
// ── normalizeMattermostBaseUrl ────────────────────────────────────────
describe("normalizeMattermostBaseUrl", () => {
@ -229,68 +251,38 @@ describe("createMattermostPost", () => {
describe("updateMattermostPost", () => {
it("sends PUT to /posts/{id}", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", { message: "Updated" });
const { calls } = await updatePostAndCapture({ message: "Updated" });
expect(calls[0].url).toContain("/posts/post1");
expect(calls[0].init?.method).toBe("PUT");
});
it("includes post id in the body", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", { message: "Updated" });
const body = JSON.parse(calls[0].init?.body as string);
const { body } = await updatePostAndCapture({ message: "Updated" });
expect(body.id).toBe("post1");
expect(body.message).toBe("Updated");
});
it("includes props for button completion updates", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", {
const { body } = await updatePostAndCapture({
message: "Original message",
props: {
attachments: [{ text: "✓ **do_now** selected by @tony" }],
},
});
const body = JSON.parse(calls[0].init?.body as string);
expect(body.message).toBe("Original message");
expect(body.props.attachments[0].text).toContain("✓");
expect(body.props.attachments[0].text).toContain("do_now");
expect(body.props).toMatchObject({
attachments: [{ text: expect.stringContaining("✓") }],
});
expect(body.props).toMatchObject({
attachments: [{ text: expect.stringContaining("do_now") }],
});
});
it("omits message when not provided", async () => {
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
const client = createMattermostClient({
baseUrl: "http://localhost:8065",
botToken: "tok",
fetchImpl: mockFetch,
});
await updateMattermostPost(client, "post1", {
const { body } = await updatePostAndCapture({
props: { attachments: [] },
});
const body = JSON.parse(calls[0].init?.body as string);
expect(body.id).toBe("post1");
expect(body.message).toBeUndefined();
expect(body.props).toEqual({ attachments: [] });

View File

@ -16,6 +16,35 @@ const accountFixture: ResolvedMattermostAccount = {
config: {},
};
function authorizeGroupCommand(senderId: string) {
return authorizeMattermostCommandInvocation({
account: {
...accountFixture,
config: {
groupPolicy: "allowlist",
allowFrom: ["trusted-user"],
},
},
cfg: {
commands: {
useAccessGroups: true,
},
},
senderId,
senderName: senderId,
channelId: "chan-1",
channelInfo: {
id: "chan-1",
type: "O",
name: "general",
display_name: "General",
},
storeAllowFrom: [],
allowTextCommands: true,
hasControlCommand: true,
});
}
describe("mattermost monitor authz", () => {
it("keeps DM allowlist merged with pairing-store entries", () => {
const resolved = resolveMattermostEffectiveAllowFromLists({
@ -72,32 +101,7 @@ describe("mattermost monitor authz", () => {
});
it("denies group control commands when the sender is outside the allowlist", () => {
const decision = authorizeMattermostCommandInvocation({
account: {
...accountFixture,
config: {
groupPolicy: "allowlist",
allowFrom: ["trusted-user"],
},
},
cfg: {
commands: {
useAccessGroups: true,
},
},
senderId: "attacker",
senderName: "attacker",
channelId: "chan-1",
channelInfo: {
id: "chan-1",
type: "O",
name: "general",
display_name: "General",
},
storeAllowFrom: [],
allowTextCommands: true,
hasControlCommand: true,
});
const decision = authorizeGroupCommand("attacker");
expect(decision).toMatchObject({
ok: false,
@ -107,32 +111,7 @@ describe("mattermost monitor authz", () => {
});
it("authorizes group control commands for allowlisted senders", () => {
const decision = authorizeMattermostCommandInvocation({
account: {
...accountFixture,
config: {
groupPolicy: "allowlist",
allowFrom: ["trusted-user"],
},
},
cfg: {
commands: {
useAccessGroups: true,
},
},
senderId: "trusted-user",
senderName: "trusted-user",
channelId: "chan-1",
channelInfo: {
id: "chan-1",
type: "O",
name: "general",
display_name: "General",
},
storeAllowFrom: [],
allowTextCommands: true,
hasControlCommand: true,
});
const decision = authorizeGroupCommand("trusted-user");
expect(decision).toMatchObject({
ok: true,

View File

@ -14,6 +14,28 @@ describe("mattermost reactions", () => {
resetMattermostReactionBotUserCacheForTests();
});
async function addReactionWithFetch(
fetchMock: ReturnType<typeof createMattermostReactionFetchMock>,
) {
return addMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
}
async function removeReactionWithFetch(
fetchMock: ReturnType<typeof createMattermostReactionFetchMock>,
) {
return removeMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
}
it("adds reactions by calling /users/me then POST /reactions", async () => {
const fetchMock = createMattermostReactionFetchMock({
mode: "add",
@ -21,12 +43,7 @@ describe("mattermost reactions", () => {
emojiName: "thumbsup",
});
const result = await addMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
const result = await addReactionWithFetch(fetchMock);
expect(result).toEqual({ ok: true });
expect(fetchMock).toHaveBeenCalled();
@ -41,12 +58,7 @@ describe("mattermost reactions", () => {
body: { id: "err", message: "boom" },
});
const result = await addMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
const result = await addReactionWithFetch(fetchMock);
expect(result.ok).toBe(false);
if (!result.ok) {
@ -61,12 +73,7 @@ describe("mattermost reactions", () => {
emojiName: "thumbsup",
});
const result = await removeMattermostReaction({
cfg: createMattermostTestConfig(),
postId: "POST1",
emojiName: "thumbsup",
fetchImpl: fetchMock as unknown as typeof fetch,
});
const result = await removeReactionWithFetch(fetchMock);
expect(result).toEqual({ ok: true });
expect(fetchMock).toHaveBeenCalled();

View File

@ -10,6 +10,25 @@ import {
} from "./slash-commands.js";
describe("slash-commands", () => {
async function registerSingleStatusCommand(
request: (path: string, init?: { method?: string }) => Promise<unknown>,
) {
const client = { request } as unknown as MattermostClient;
return registerSlashCommands({
client,
teamId: "team-1",
creatorUserId: "bot-user",
callbackUrl: "http://gateway/callback",
commands: [
{
trigger: "oc_status",
description: "status",
autoComplete: true,
},
],
});
}
it("parses application/x-www-form-urlencoded payloads", () => {
const payload = parseSlashCommandPayload(
"token=t1&team_id=team&channel_id=ch1&user_id=u1&command=%2Foc_status&text=now",
@ -101,21 +120,7 @@ describe("slash-commands", () => {
}
throw new Error(`unexpected request path: ${path}`);
});
const client = { request } as unknown as MattermostClient;
const result = await registerSlashCommands({
client,
teamId: "team-1",
creatorUserId: "bot-user",
callbackUrl: "http://gateway/callback",
commands: [
{
trigger: "oc_status",
description: "status",
autoComplete: true,
},
],
});
const result = await registerSingleStatusCommand(request);
expect(result).toHaveLength(1);
expect(result[0]?.managed).toBe(false);
@ -144,21 +149,7 @@ describe("slash-commands", () => {
}
throw new Error(`unexpected request path: ${path}`);
});
const client = { request } as unknown as MattermostClient;
const result = await registerSlashCommands({
client,
teamId: "team-1",
creatorUserId: "bot-user",
callbackUrl: "http://gateway/callback",
commands: [
{
trigger: "oc_status",
description: "status",
autoComplete: true,
},
],
});
const result = await registerSingleStatusCommand(request);
expect(result).toHaveLength(0);
expect(request).toHaveBeenCalledTimes(1);

View File

@ -58,6 +58,23 @@ const accountFixture: ResolvedMattermostAccount = {
config: {},
};
async function runSlashRequest(params: {
commandTokens: Set<string>;
body: string;
method?: string;
}) {
const handler = createSlashCommandHttpHandler({
account: accountFixture,
cfg: {} as OpenClawConfig,
runtime: {} as RuntimeEnv,
commandTokens: params.commandTokens,
});
const req = createRequest({ method: params.method, body: params.body });
const response = createResponse();
await handler(req, response.res);
return response;
}
describe("slash-http", () => {
it("rejects non-POST methods", async () => {
const handler = createSlashCommandHttpHandler({
@ -93,36 +110,20 @@ describe("slash-http", () => {
});
it("fails closed when no command tokens are registered", async () => {
const handler = createSlashCommandHttpHandler({
account: accountFixture,
cfg: {} as OpenClawConfig,
runtime: {} as RuntimeEnv,
const response = await runSlashRequest({
commandTokens: new Set<string>(),
});
const req = createRequest({
body: "token=tok1&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=",
});
const response = createResponse();
await handler(req, response.res);
expect(response.res.statusCode).toBe(401);
expect(response.getBody()).toContain("Unauthorized: invalid command token.");
});
it("rejects unknown command tokens", async () => {
const handler = createSlashCommandHttpHandler({
account: accountFixture,
cfg: {} as OpenClawConfig,
runtime: {} as RuntimeEnv,
const response = await runSlashRequest({
commandTokens: new Set(["known-token"]),
});
const req = createRequest({
body: "token=unknown&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=",
});
const response = createResponse();
await handler(req, response.res);
expect(response.res.statusCode).toBe(401);
expect(response.getBody()).toContain("Unauthorized: invalid command token.");

View File

@ -45,6 +45,27 @@ describe("uploadImageFromUrl", () => {
});
}
async function setupSuccessfulUpload(params?: {
sourceUrl?: string;
contentType?: string;
uploadedUrl?: string;
}) {
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
const sourceUrl = params?.sourceUrl ?? "https://example.com/image.png";
const contentType = params?.contentType ?? "image/png";
const mockBlob = new Blob(["fake-image"], { type: contentType });
mockSuccessfulFetch({
mockFetch,
blob: mockBlob,
finalUrl: sourceUrl,
contentType,
});
if (params?.uploadedUrl) {
mockUploadFile.mockResolvedValue({ url: params.uploadedUrl });
}
return { mockBlob, mockUploadFile, uploadImageFromUrl };
}
beforeEach(() => {
vi.clearAllMocks();
});
@ -54,16 +75,9 @@ describe("uploadImageFromUrl", () => {
});
it("fetches image and calls uploadFile, returns uploaded URL", async () => {
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
const mockBlob = new Blob(["fake-image"], { type: "image/png" });
mockSuccessfulFetch({
mockFetch,
blob: mockBlob,
finalUrl: "https://example.com/image.png",
contentType: "image/png",
const { mockBlob, mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload({
uploadedUrl: "https://memex.tlon.network/uploaded.png",
});
mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" });
const result = await uploadImageFromUrl("https://example.com/image.png");
@ -95,15 +109,7 @@ describe("uploadImageFromUrl", () => {
});
it("returns original URL if upload fails", async () => {
const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks();
const mockBlob = new Blob(["fake-image"], { type: "image/png" });
mockSuccessfulFetch({
mockFetch,
blob: mockBlob,
finalUrl: "https://example.com/image.png",
contentType: "image/png",
});
const { mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload();
mockUploadFile.mockRejectedValue(new Error("Upload failed"));
const result = await uploadImageFromUrl("https://example.com/image.png");

View File

@ -9,121 +9,87 @@ import {
} from "./manager.test-harness.js";
describe("CallManager verification on restore", () => {
it("skips stale calls reported terminal by provider", async () => {
async function initializeManager(params?: {
callOverrides?: Parameters<typeof makePersistedCall>[0];
providerResult?: FakeProvider["getCallStatusResult"];
configureProvider?: (provider: FakeProvider) => void;
configOverrides?: Partial<{ maxDurationSeconds: number }>;
}) {
const storePath = createTestStorePath();
const call = makePersistedCall();
const call = makePersistedCall(params?.callOverrides);
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatusResult = { status: "completed", isTerminal: true };
if (params?.providerResult) {
provider.getCallStatusResult = params.providerResult;
}
params?.configureProvider?.(provider);
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
...params?.configOverrides,
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
return { call, manager };
}
it("skips stale calls reported terminal by provider", async () => {
const { manager } = await initializeManager({
providerResult: { status: "completed", isTerminal: true },
});
expect(manager.getActiveCalls()).toHaveLength(0);
});
it("keeps calls reported active by provider", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall();
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatusResult = { status: "in-progress", isTerminal: false };
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { call, manager } = await initializeManager({
providerResult: { status: "in-progress", isTerminal: false },
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(1);
expect(manager.getActiveCalls()[0]?.callId).toBe(call.callId);
});
it("keeps calls when provider returns unknown (transient error)", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall();
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatusResult = { status: "error", isTerminal: false, isUnknown: true };
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { manager } = await initializeManager({
providerResult: { status: "error", isTerminal: false, isUnknown: true },
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(1);
});
it("skips calls older than maxDurationSeconds", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall({
startedAt: Date.now() - 600_000,
answeredAt: Date.now() - 590_000,
const { manager } = await initializeManager({
callOverrides: {
startedAt: Date.now() - 600_000,
answeredAt: Date.now() - 590_000,
},
configOverrides: { maxDurationSeconds: 300 },
});
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
maxDurationSeconds: 300,
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(0);
});
it("skips calls without providerCallId", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall({ providerCallId: undefined, state: "initiated" });
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { manager } = await initializeManager({
callOverrides: { providerCallId: undefined, state: "initiated" },
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(0);
});
it("keeps call when getCallStatus throws (verification failure)", async () => {
const storePath = createTestStorePath();
const call = makePersistedCall();
writeCallsToStore(storePath, [call]);
const provider = new FakeProvider();
provider.getCallStatus = async () => {
throw new Error("network failure");
};
const config = VoiceCallConfigSchema.parse({
enabled: true,
provider: "plivo",
fromNumber: "+15550000000",
const { manager } = await initializeManager({
configureProvider: (provider) => {
provider.getCallStatus = async () => {
throw new Error("network failure");
};
},
});
const manager = new CallManager(config, storePath);
await manager.initialize(provider, "https://example.com/voice/webhook");
expect(manager.getActiveCalls()).toHaveLength(1);
});

View File

@ -21,6 +21,12 @@ function createContext(rawBody: string, query?: WebhookContext["query"]): Webhoo
};
}
function expectStreamingTwiml(body: string) {
expect(body).toContain(STREAM_URL);
expect(body).toContain('<Parameter name="token" value="');
expect(body).toContain("<Connect>");
}
describe("TwilioProvider", () => {
it("returns streaming TwiML for outbound conversation calls before in-progress", () => {
const provider = createProvider();
@ -30,9 +36,7 @@ describe("TwilioProvider", () => {
const result = provider.parseWebhookEvent(ctx);
expect(result.providerResponseBody).toContain(STREAM_URL);
expect(result.providerResponseBody).toContain('<Parameter name="token" value="');
expect(result.providerResponseBody).toContain("<Connect>");
expectStreamingTwiml(result.providerResponseBody);
});
it("returns empty TwiML for status callbacks", () => {
@ -55,9 +59,7 @@ describe("TwilioProvider", () => {
const result = provider.parseWebhookEvent(ctx);
expect(result.providerResponseBody).toContain(STREAM_URL);
expect(result.providerResponseBody).toContain('<Parameter name="token" value="');
expect(result.providerResponseBody).toContain("<Connect>");
expectStreamingTwiml(result.providerResponseBody);
});
it("returns queue TwiML for second inbound call when first call is active", () => {

View File

@ -104,11 +104,11 @@ const hostMemoryGiB = Math.floor(os.totalmem() / 1024 ** 3);
const highMemLocalHost = !isCI && hostMemoryGiB >= 96;
const lowMemLocalHost = !isCI && hostMemoryGiB < 64;
const nodeMajor = Number.parseInt(process.versions.node.split(".")[0] ?? "", 10);
// vmForks is a big win for transform/import heavy suites, but Node 24+
// regressed with Vitest's vm runtime in this repo, and low-memory local hosts
// are more likely to hit per-worker V8 heap ceilings. Keep it opt-out via
// OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor < 24 : true;
// vmForks is a big win for transform/import heavy suites. Node 24 is stable again
// for the default unit-fast lane after moving the known flaky files to fork-only
// isolation, but Node 25+ still falls back to process forks until re-validated.
// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1.
const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true;
const useVmForks =
process.env.OPENCLAW_TEST_VM_FORKS === "1" ||
(process.env.OPENCLAW_TEST_VM_FORKS !== "0" && !isWindows && supportsVmForks && !lowMemLocalHost);

View File

@ -47,326 +47,274 @@ describe("CronService restart catch-up", () => {
};
}
it("executes an overdue recurring job immediately on start", async () => {
async function withRestartedCron(
jobs: unknown[],
run: (params: {
cron: CronService;
enqueueSystemEvent: ReturnType<typeof vi.fn>;
requestHeartbeatNow: ReturnType<typeof vi.fn>;
}) => Promise<void>,
) {
const store = await makeStorePath();
const enqueueSystemEvent = vi.fn();
const requestHeartbeatNow = vi.fn();
await writeStoreJobs(store.storePath, jobs);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
try {
await cron.start();
await run({ cron, enqueueSystemEvent, requestHeartbeatNow });
} finally {
cron.stop();
await store.cleanup();
}
}
it("executes an overdue recurring job immediately on start", async () => {
const dueAt = Date.parse("2025-12-13T15:00:00.000Z");
const lastRunAt = Date.parse("2025-12-12T15:00:00.000Z");
await writeStoreJobs(store.storePath, [
{
id: "restart-overdue-job",
name: "daily digest",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"),
schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "digest now" },
state: {
nextRunAtMs: dueAt,
lastRunAtMs: lastRunAt,
lastStatus: "ok",
await withRestartedCron(
[
{
id: "restart-overdue-job",
name: "daily digest",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"),
schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "digest now" },
state: {
nextRunAtMs: dueAt,
lastRunAtMs: lastRunAt,
lastStatus: "ok",
},
},
],
async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => {
expect(enqueueSystemEvent).toHaveBeenCalledWith(
"digest now",
expect.objectContaining({ agentId: undefined }),
);
expect(requestHeartbeatNow).toHaveBeenCalled();
const listedJobs = await cron.list({ includeDisabled: true });
const updated = listedJobs.find((job) => job.id === "restart-overdue-job");
expect(updated?.state.lastStatus).toBe("ok");
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z"));
expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z"));
},
]);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
await cron.start();
expect(enqueueSystemEvent).toHaveBeenCalledWith(
"digest now",
expect.objectContaining({ agentId: undefined }),
);
expect(requestHeartbeatNow).toHaveBeenCalled();
const jobs = await cron.list({ includeDisabled: true });
const updated = jobs.find((job) => job.id === "restart-overdue-job");
expect(updated?.state.lastStatus).toBe("ok");
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z"));
expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z"));
cron.stop();
await store.cleanup();
});
it("clears stale running markers without replaying interrupted startup jobs", async () => {
const store = await makeStorePath();
const enqueueSystemEvent = vi.fn();
const requestHeartbeatNow = vi.fn();
const dueAt = Date.parse("2025-12-13T16:00:00.000Z");
const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z");
await writeStoreJobs(store.storePath, [
{
id: "restart-stale-running",
name: "daily stale marker",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "resume stale marker" },
state: {
nextRunAtMs: dueAt,
runningAtMs: staleRunningAt,
await withRestartedCron(
[
{
id: "restart-stale-running",
name: "daily stale marker",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "resume stale marker" },
state: {
nextRunAtMs: dueAt,
runningAtMs: staleRunningAt,
},
},
],
async ({ cron, enqueueSystemEvent }) => {
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(noopLogger.warn).toHaveBeenCalledWith(
expect.objectContaining({ jobId: "restart-stale-running" }),
"cron: clearing stale running marker on startup",
);
const listedJobs = await cron.list({ includeDisabled: true });
const updated = listedJobs.find((job) => job.id === "restart-stale-running");
expect(updated?.state.runningAtMs).toBeUndefined();
expect(updated?.state.lastStatus).toBeUndefined();
expect(updated?.state.lastRunAtMs).toBeUndefined();
expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe(
true,
);
},
]);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
await cron.start();
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(noopLogger.warn).toHaveBeenCalledWith(
expect.objectContaining({ jobId: "restart-stale-running" }),
"cron: clearing stale running marker on startup",
);
const jobs = await cron.list({ includeDisabled: true });
const updated = jobs.find((job) => job.id === "restart-stale-running");
expect(updated?.state.runningAtMs).toBeUndefined();
expect(updated?.state.lastStatus).toBeUndefined();
expect(updated?.state.lastRunAtMs).toBeUndefined();
expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe(true);
cron.stop();
await store.cleanup();
});
it("replays the most recent missed cron slot after restart when nextRunAtMs already advanced", async () => {
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
const store = await makeStorePath();
const enqueueSystemEvent = vi.fn();
const requestHeartbeatNow = vi.fn();
await writeStoreJobs(store.storePath, [
{
id: "restart-missed-slot",
name: "every ten minutes +1",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "catch missed slot" },
state: {
// Persisted state may already be recomputed from restart time and
// point to the future slot, even though 04:01 was missed.
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
lastStatus: "ok",
await withRestartedCron(
[
{
id: "restart-missed-slot",
name: "every ten minutes +1",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "catch missed slot" },
state: {
// Persisted state may already be recomputed from restart time and
// point to the future slot, even though 04:01 was missed.
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
lastStatus: "ok",
},
},
],
async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => {
expect(enqueueSystemEvent).toHaveBeenCalledWith(
"catch missed slot",
expect.objectContaining({ agentId: undefined }),
);
expect(requestHeartbeatNow).toHaveBeenCalled();
const listedJobs = await cron.list({ includeDisabled: true });
const updated = listedJobs.find((job) => job.id === "restart-missed-slot");
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z"));
},
]);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
await cron.start();
expect(enqueueSystemEvent).toHaveBeenCalledWith(
"catch missed slot",
expect.objectContaining({ agentId: undefined }),
);
expect(requestHeartbeatNow).toHaveBeenCalled();
const jobs = await cron.list({ includeDisabled: true });
const updated = jobs.find((job) => job.id === "restart-missed-slot");
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z"));
cron.stop();
await store.cleanup();
});
it("does not replay interrupted one-shot jobs on startup", async () => {
const store = await makeStorePath();
const enqueueSystemEvent = vi.fn();
const requestHeartbeatNow = vi.fn();
const dueAt = Date.parse("2025-12-13T16:00:00.000Z");
const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z");
await writeStoreJobs(store.storePath, [
{
id: "restart-stale-one-shot",
name: "one shot stale marker",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "one-shot stale marker" },
state: {
nextRunAtMs: dueAt,
runningAtMs: staleRunningAt,
await withRestartedCron(
[
{
id: "restart-stale-one-shot",
name: "one shot stale marker",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "one-shot stale marker" },
state: {
nextRunAtMs: dueAt,
runningAtMs: staleRunningAt,
},
},
],
async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => {
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(requestHeartbeatNow).not.toHaveBeenCalled();
const listedJobs = await cron.list({ includeDisabled: true });
const updated = listedJobs.find((job) => job.id === "restart-stale-one-shot");
expect(updated?.state.runningAtMs).toBeUndefined();
},
]);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
await cron.start();
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(requestHeartbeatNow).not.toHaveBeenCalled();
const jobs = await cron.list({ includeDisabled: true });
const updated = jobs.find((job) => job.id === "restart-stale-one-shot");
expect(updated?.state.runningAtMs).toBeUndefined();
cron.stop();
await store.cleanup();
);
});
it("does not replay cron slot when the latest slot already ran before restart", async () => {
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
const store = await makeStorePath();
const enqueueSystemEvent = vi.fn();
const requestHeartbeatNow = vi.fn();
await writeStoreJobs(store.storePath, [
{
id: "restart-no-duplicate-slot",
name: "every ten minutes +1 no duplicate",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "already ran" },
state: {
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
lastStatus: "ok",
await withRestartedCron(
[
{
id: "restart-no-duplicate-slot",
name: "every ten minutes +1 no duplicate",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "already ran" },
state: {
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
lastStatus: "ok",
},
},
],
async ({ enqueueSystemEvent, requestHeartbeatNow }) => {
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(requestHeartbeatNow).not.toHaveBeenCalled();
},
]);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
await cron.start();
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(requestHeartbeatNow).not.toHaveBeenCalled();
cron.stop();
await store.cleanup();
);
});
it("does not replay missed cron slots while error backoff is pending after restart", async () => {
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
const store = await makeStorePath();
const enqueueSystemEvent = vi.fn();
const requestHeartbeatNow = vi.fn();
await writeStoreJobs(store.storePath, [
{
id: "restart-backoff-pending",
name: "backoff pending",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "do not run during backoff" },
state: {
// Next retry is intentionally delayed by backoff despite a newer cron slot.
nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
lastStatus: "error",
consecutiveErrors: 4,
await withRestartedCron(
[
{
id: "restart-backoff-pending",
name: "backoff pending",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "do not run during backoff" },
state: {
// Next retry is intentionally delayed by backoff despite a newer cron slot.
nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
lastStatus: "error",
consecutiveErrors: 4,
},
},
],
async ({ enqueueSystemEvent, requestHeartbeatNow }) => {
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(requestHeartbeatNow).not.toHaveBeenCalled();
},
]);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
await cron.start();
expect(enqueueSystemEvent).not.toHaveBeenCalled();
expect(requestHeartbeatNow).not.toHaveBeenCalled();
cron.stop();
await store.cleanup();
);
});
it("replays missed cron slot after restart when error backoff has already elapsed", async () => {
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
const store = await makeStorePath();
const enqueueSystemEvent = vi.fn();
const requestHeartbeatNow = vi.fn();
await writeStoreJobs(store.storePath, [
{
id: "restart-backoff-elapsed-replay",
name: "backoff elapsed replay",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "replay after backoff elapsed" },
state: {
// Startup maintenance may already point to a future slot (04:11) even
// though 04:01 was missed and the 30s error backoff has elapsed.
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
lastStatus: "error",
consecutiveErrors: 1,
await withRestartedCron(
[
{
id: "restart-backoff-elapsed-replay",
name: "backoff elapsed replay",
enabled: true,
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
sessionTarget: "main",
wakeMode: "next-heartbeat",
payload: { kind: "systemEvent", text: "replay after backoff elapsed" },
state: {
// Startup maintenance may already point to a future slot (04:11) even
// though 04:01 was missed and the 30s error backoff has elapsed.
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
lastStatus: "error",
consecutiveErrors: 1,
},
},
],
async ({ enqueueSystemEvent, requestHeartbeatNow }) => {
expect(enqueueSystemEvent).toHaveBeenCalledWith(
"replay after backoff elapsed",
expect.objectContaining({ agentId: undefined }),
);
expect(requestHeartbeatNow).toHaveBeenCalled();
},
]);
const cron = createRestartCronService({
storePath: store.storePath,
enqueueSystemEvent,
requestHeartbeatNow,
});
await cron.start();
expect(enqueueSystemEvent).toHaveBeenCalledWith(
"replay after backoff elapsed",
expect.objectContaining({ agentId: undefined }),
);
expect(requestHeartbeatNow).toHaveBeenCalled();
cron.stop();
await store.cleanup();
});
it("reschedules deferred missed jobs from the post-catchup clock so they stay in the future", async () => {

View File

@ -64,7 +64,7 @@ describe("formatBackupCreateSummary", () => {
displayPath: "~/.openclaw/config.json",
},
{
kind: "oauth",
kind: "credentials",
sourcePath: "/oauth",
archivePath: "archive/oauth",
displayPath: "~/.openclaw/oauth",
@ -77,7 +77,7 @@ describe("formatBackupCreateSummary", () => {
"Backup archive: /tmp/openclaw-backup.tar.gz",
"Included 2 paths:",
"- config: ~/.openclaw/config.json",
"- oauth: ~/.openclaw/oauth",
"- credentials: ~/.openclaw/oauth",
"Dry run only; archive was not written.",
]);
});

View File

@ -1,6 +1,3 @@
import fs from "node:fs";
import os from "node:os";
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { telegramOutbound } from "../channels/plugins/outbound/telegram.js";
import type { OpenClawConfig } from "../config/config.js";
@ -380,58 +377,6 @@ describe("exec approval forwarder", () => {
});
});
it("prefers turn-source routing over stale session last route", async () => {
vi.useFakeTimers();
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-exec-approval-forwarder-test-"));
try {
const storePath = path.join(tmpDir, "sessions.json");
fs.writeFileSync(
storePath,
JSON.stringify({
"agent:main:main": {
updatedAt: 1,
channel: "slack",
to: "U1",
lastChannel: "slack",
lastTo: "U1",
},
}),
"utf-8",
);
const cfg = {
session: { store: storePath },
approvals: { exec: { enabled: true, mode: "session" } },
} as OpenClawConfig;
const { deliver, forwarder } = createForwarder({ cfg });
await expect(
forwarder.handleRequested({
...baseRequest,
request: {
...baseRequest.request,
turnSourceChannel: "whatsapp",
turnSourceTo: "+15555550123",
turnSourceAccountId: "work",
turnSourceThreadId: "1739201675.123",
},
}),
).resolves.toBe(true);
expect(deliver).toHaveBeenCalledTimes(1);
expect(deliver).toHaveBeenCalledWith(
expect.objectContaining({
channel: "whatsapp",
to: "+15555550123",
accountId: "work",
threadId: 1739201675,
}),
);
} finally {
fs.rmSync(tmpDir, { recursive: true, force: true });
}
});
it("can forward resolved notices without pending cache when request payload is present", async () => {
vi.useFakeTimers();
const cfg = {

View File

@ -0,0 +1,142 @@
import { describe, expect, it } from "vitest";
import {
buildExecApprovalPendingReplyPayload,
buildExecApprovalUnavailableReplyPayload,
getExecApprovalApproverDmNoticeText,
getExecApprovalReplyMetadata,
} from "./exec-approval-reply.js";
describe("exec approval reply helpers", () => {
it("returns the approver DM notice text", () => {
expect(getExecApprovalApproverDmNoticeText()).toBe(
"Approval required. I sent the allowed approvers DMs.",
);
});
it("returns null for invalid reply metadata payloads", () => {
for (const payload of [
{},
{ channelData: null },
{ channelData: [] },
{ channelData: { execApproval: null } },
{ channelData: { execApproval: [] } },
{ channelData: { execApproval: { approvalId: "req-1", approvalSlug: " " } } },
{ channelData: { execApproval: { approvalId: " ", approvalSlug: "slug-1" } } },
]) {
expect(getExecApprovalReplyMetadata(payload)).toBeNull();
}
});
it("normalizes reply metadata and filters invalid decisions", () => {
expect(
getExecApprovalReplyMetadata({
channelData: {
execApproval: {
approvalId: " req-1 ",
approvalSlug: " slug-1 ",
allowedDecisions: ["allow-once", "bad", "deny", "allow-always", 3],
},
},
}),
).toEqual({
approvalId: "req-1",
approvalSlug: "slug-1",
allowedDecisions: ["allow-once", "deny", "allow-always"],
});
});
it("builds pending reply payloads with trimmed warning text and slug fallback", () => {
const payload = buildExecApprovalPendingReplyPayload({
warningText: " Heads up. ",
approvalId: "req-1",
approvalSlug: "slug-1",
command: "echo ok",
cwd: "/tmp/work",
host: "gateway",
nodeId: "node-1",
expiresAtMs: 2500,
nowMs: 1000,
});
expect(payload.channelData).toEqual({
execApproval: {
approvalId: "req-1",
approvalSlug: "slug-1",
allowedDecisions: ["allow-once", "allow-always", "deny"],
},
});
expect(payload.text).toContain("Heads up.");
expect(payload.text).toContain("```txt\n/approve slug-1 allow-once\n```");
expect(payload.text).toContain("```sh\necho ok\n```");
expect(payload.text).toContain("Host: gateway\nNode: node-1\nCWD: /tmp/work\nExpires in: 2s");
expect(payload.text).toContain("Full id: `req-1`");
});
it("uses a longer fence for commands containing triple backticks", () => {
const payload = buildExecApprovalPendingReplyPayload({
approvalId: "req-2",
approvalSlug: "slug-2",
approvalCommandId: " req-cmd-2 ",
command: "echo ```danger```",
host: "sandbox",
});
expect(payload.text).toContain("```txt\n/approve req-cmd-2 allow-once\n```");
expect(payload.text).toContain("````sh\necho ```danger```\n````");
expect(payload.text).not.toContain("Expires in:");
});
it("clamps pending reply expiration to zero seconds", () => {
const payload = buildExecApprovalPendingReplyPayload({
approvalId: "req-3",
approvalSlug: "slug-3",
command: "echo later",
host: "gateway",
expiresAtMs: 1000,
nowMs: 3000,
});
expect(payload.text).toContain("Expires in: 0s");
});
it("builds unavailable payloads for approver DMs and each fallback reason", () => {
expect(
buildExecApprovalUnavailableReplyPayload({
warningText: " Careful. ",
reason: "no-approval-route",
sentApproverDms: true,
}),
).toEqual({
text: "Careful.\n\nApproval required. I sent the allowed approvers DMs.",
});
const cases = [
{
reason: "initiating-platform-disabled" as const,
channelLabel: "Slack",
expected: "Exec approval is required, but chat exec approvals are not enabled on Slack.",
},
{
reason: "initiating-platform-unsupported" as const,
channelLabel: undefined,
expected:
"Exec approval is required, but this platform does not support chat exec approvals.",
},
{
reason: "no-approval-route" as const,
channelLabel: undefined,
expected:
"Exec approval is required, but no interactive approval client is currently available.",
},
];
for (const testCase of cases) {
expect(
buildExecApprovalUnavailableReplyPayload({
reason: testCase.reason,
channelLabel: testCase.channelLabel,
}).text,
).toContain(testCase.expected);
}
});
});

View File

@ -83,7 +83,7 @@ export function buildExecApprovalPendingReplyPayload(
const lines: string[] = [];
const warningText = params.warningText?.trim();
if (warningText) {
lines.push(warningText, "");
lines.push(warningText);
}
lines.push("Approval required.");
lines.push("Run:");
@ -133,7 +133,7 @@ export function buildExecApprovalUnavailableReplyPayload(
const lines: string[] = [];
const warningText = params.warningText?.trim();
if (warningText) {
lines.push(warningText, "");
lines.push(warningText);
}
if (params.sentApproverDms) {

View File

@ -0,0 +1,189 @@
import fs from "node:fs";
import os from "node:os";
import path from "node:path";
import { afterEach, describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import type { SessionEntry } from "../config/sessions.js";
import { resolveExecApprovalSessionTarget } from "./exec-approval-session-target.js";
import type { ExecApprovalRequest } from "./exec-approvals.js";
const tempDirs: string[] = [];
afterEach(() => {
for (const dir of tempDirs.splice(0)) {
fs.rmSync(dir, { recursive: true, force: true });
}
});
const baseRequest: ExecApprovalRequest = {
id: "req-1",
request: {
command: "echo hello",
sessionKey: "agent:main:main",
},
createdAtMs: 1000,
expiresAtMs: 6000,
};
function createTempDir(): string {
const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-exec-approval-session-target-"));
tempDirs.push(dir);
return dir;
}
function writeStoreFile(
storePath: string,
entries: Record<string, Partial<SessionEntry>>,
): OpenClawConfig {
fs.mkdirSync(path.dirname(storePath), { recursive: true });
fs.writeFileSync(storePath, JSON.stringify(entries), "utf-8");
return {
session: { store: storePath },
} as OpenClawConfig;
}
describe("exec approval session target", () => {
it("returns null for blank session keys, missing entries, and unresolved targets", () => {
const tmpDir = createTempDir();
const storePath = path.join(tmpDir, "sessions.json");
const cfg = writeStoreFile(storePath, {
"agent:main:main": {
sessionId: "main",
updatedAt: 1,
lastChannel: "slack",
},
});
const cases = [
{
request: {
...baseRequest,
request: {
...baseRequest.request,
sessionKey: " ",
},
},
},
{
request: {
...baseRequest,
request: {
...baseRequest.request,
sessionKey: "agent:main:missing",
},
},
},
{
request: baseRequest,
},
];
for (const testCase of cases) {
expect(
resolveExecApprovalSessionTarget({
cfg,
request: testCase.request,
}),
).toBeNull();
}
});
it("prefers turn-source routing over stale session delivery state", () => {
const tmpDir = createTempDir();
const storePath = path.join(tmpDir, "sessions.json");
const cfg = writeStoreFile(storePath, {
"agent:main:main": {
sessionId: "main",
updatedAt: 1,
channel: "slack",
to: "U1",
lastChannel: "slack",
lastTo: "U1",
},
});
expect(
resolveExecApprovalSessionTarget({
cfg,
request: baseRequest,
turnSourceChannel: " whatsapp ",
turnSourceTo: " +15555550123 ",
turnSourceAccountId: " work ",
turnSourceThreadId: "1739201675.123",
}),
).toEqual({
channel: "whatsapp",
to: "+15555550123",
accountId: "work",
threadId: 1739201675,
});
});
it("uses the parsed session-key agent id for store-path placeholders", () => {
const tmpDir = createTempDir();
const storePath = path.join(tmpDir, "{agentId}", "sessions.json");
const cfg = writeStoreFile(path.join(tmpDir, "helper", "sessions.json"), {
"agent:helper:main": {
sessionId: "main",
updatedAt: 1,
lastChannel: "discord",
lastTo: "channel:123",
lastAccountId: " Work ",
lastThreadId: "55",
},
});
cfg.session = { store: storePath };
expect(
resolveExecApprovalSessionTarget({
cfg,
request: {
...baseRequest,
request: {
...baseRequest.request,
sessionKey: "agent:helper:main",
},
},
}),
).toEqual({
channel: "discord",
to: "channel:123",
accountId: "work",
threadId: 55,
});
});
it("falls back to request agent id for legacy session keys", () => {
const tmpDir = createTempDir();
const storePath = path.join(tmpDir, "{agentId}", "sessions.json");
const cfg = writeStoreFile(path.join(tmpDir, "worker-1", "sessions.json"), {
"legacy-main": {
sessionId: "legacy-main",
updatedAt: 1,
lastChannel: "telegram",
lastTo: "-100123",
lastThreadId: 77,
},
});
cfg.session = { store: storePath };
expect(
resolveExecApprovalSessionTarget({
cfg,
request: {
...baseRequest,
request: {
...baseRequest.request,
agentId: "Worker 1",
sessionKey: "legacy-main",
},
},
}),
).toEqual({
channel: "telegram",
to: "-100123",
accountId: undefined,
threadId: 77,
});
});
});

View File

@ -0,0 +1,84 @@
import { describe, expect, it } from "vitest";
import {
maxAsk,
minSecurity,
normalizeExecAsk,
normalizeExecHost,
normalizeExecSecurity,
requiresExecApproval,
} from "./exec-approvals.js";
describe("exec approvals policy helpers", () => {
it("normalizes exec host values and rejects blanks or unknown values", () => {
expect(normalizeExecHost(" gateway ")).toBe("gateway");
expect(normalizeExecHost("NODE")).toBe("node");
expect(normalizeExecHost("")).toBeNull();
expect(normalizeExecHost("ssh")).toBeNull();
});
it("normalizes exec security and ask values", () => {
expect(normalizeExecSecurity(" allowlist ")).toBe("allowlist");
expect(normalizeExecSecurity("FULL")).toBe("full");
expect(normalizeExecSecurity("unknown")).toBeNull();
expect(normalizeExecAsk(" on-miss ")).toBe("on-miss");
expect(normalizeExecAsk("ALWAYS")).toBe("always");
expect(normalizeExecAsk("maybe")).toBeNull();
});
it("minSecurity returns the more restrictive value", () => {
expect(minSecurity("deny", "full")).toBe("deny");
expect(minSecurity("allowlist", "full")).toBe("allowlist");
expect(minSecurity("full", "allowlist")).toBe("allowlist");
});
it("maxAsk returns the more aggressive ask mode", () => {
expect(maxAsk("off", "always")).toBe("always");
expect(maxAsk("on-miss", "off")).toBe("on-miss");
expect(maxAsk("always", "on-miss")).toBe("always");
});
it("requiresExecApproval respects ask mode and allowlist satisfaction", () => {
const cases = [
{
ask: "always" as const,
security: "allowlist" as const,
analysisOk: true,
allowlistSatisfied: true,
expected: true,
},
{
ask: "off" as const,
security: "allowlist" as const,
analysisOk: true,
allowlistSatisfied: false,
expected: false,
},
{
ask: "on-miss" as const,
security: "allowlist" as const,
analysisOk: true,
allowlistSatisfied: true,
expected: false,
},
{
ask: "on-miss" as const,
security: "allowlist" as const,
analysisOk: false,
allowlistSatisfied: false,
expected: true,
},
{
ask: "on-miss" as const,
security: "full" as const,
analysisOk: false,
allowlistSatisfied: false,
expected: false,
},
];
for (const testCase of cases) {
expect(requiresExecApproval(testCase)).toBe(testCase.expected);
}
});
});

View File

@ -0,0 +1,235 @@
import fs from "node:fs";
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { makeTempDir } from "./exec-approvals-test-helpers.js";
const requestJsonlSocketMock = vi.hoisted(() => vi.fn());
vi.mock("./jsonl-socket.js", () => ({
requestJsonlSocket: (...args: unknown[]) => requestJsonlSocketMock(...args),
}));
import {
addAllowlistEntry,
ensureExecApprovals,
mergeExecApprovalsSocketDefaults,
normalizeExecApprovals,
readExecApprovalsSnapshot,
recordAllowlistUse,
requestExecApprovalViaSocket,
resolveExecApprovalsPath,
resolveExecApprovalsSocketPath,
type ExecApprovalsFile,
} from "./exec-approvals.js";
const tempDirs: string[] = [];
const originalOpenClawHome = process.env.OPENCLAW_HOME;
beforeEach(() => {
requestJsonlSocketMock.mockReset();
});
afterEach(() => {
vi.restoreAllMocks();
if (originalOpenClawHome === undefined) {
delete process.env.OPENCLAW_HOME;
} else {
process.env.OPENCLAW_HOME = originalOpenClawHome;
}
for (const dir of tempDirs.splice(0)) {
fs.rmSync(dir, { recursive: true, force: true });
}
});
function createHomeDir(): string {
const dir = makeTempDir();
tempDirs.push(dir);
process.env.OPENCLAW_HOME = dir;
return dir;
}
function approvalsFilePath(homeDir: string): string {
return path.join(homeDir, ".openclaw", "exec-approvals.json");
}
function readApprovalsFile(homeDir: string): ExecApprovalsFile {
return JSON.parse(fs.readFileSync(approvalsFilePath(homeDir), "utf8")) as ExecApprovalsFile;
}
describe("exec approvals store helpers", () => {
it("expands home-prefixed default file and socket paths", () => {
const dir = createHomeDir();
expect(path.normalize(resolveExecApprovalsPath())).toBe(
path.normalize(path.join(dir, ".openclaw", "exec-approvals.json")),
);
expect(path.normalize(resolveExecApprovalsSocketPath())).toBe(
path.normalize(path.join(dir, ".openclaw", "exec-approvals.sock")),
);
});
it("merges socket defaults from normalized, current, and built-in fallback", () => {
const normalized = normalizeExecApprovals({
version: 1,
agents: {},
socket: { path: "/tmp/a.sock", token: "a" },
});
const current = normalizeExecApprovals({
version: 1,
agents: {},
socket: { path: "/tmp/b.sock", token: "b" },
});
expect(mergeExecApprovalsSocketDefaults({ normalized, current }).socket).toEqual({
path: "/tmp/a.sock",
token: "a",
});
const merged = mergeExecApprovalsSocketDefaults({
normalized: normalizeExecApprovals({ version: 1, agents: {} }),
current,
});
expect(merged.socket).toEqual({
path: "/tmp/b.sock",
token: "b",
});
createHomeDir();
expect(
mergeExecApprovalsSocketDefaults({
normalized: normalizeExecApprovals({ version: 1, agents: {} }),
}).socket,
).toEqual({
path: resolveExecApprovalsSocketPath(),
token: "",
});
});
it("returns normalized empty snapshots for missing and invalid approvals files", () => {
const dir = createHomeDir();
const missing = readExecApprovalsSnapshot();
expect(missing.exists).toBe(false);
expect(missing.raw).toBeNull();
expect(missing.file).toEqual(normalizeExecApprovals({ version: 1, agents: {} }));
expect(missing.path).toBe(approvalsFilePath(dir));
fs.mkdirSync(path.dirname(approvalsFilePath(dir)), { recursive: true });
fs.writeFileSync(approvalsFilePath(dir), "{invalid", "utf8");
const invalid = readExecApprovalsSnapshot();
expect(invalid.exists).toBe(true);
expect(invalid.raw).toBe("{invalid");
expect(invalid.file).toEqual(normalizeExecApprovals({ version: 1, agents: {} }));
});
it("ensures approvals file with default socket path and generated token", () => {
const dir = createHomeDir();
const ensured = ensureExecApprovals();
const raw = fs.readFileSync(approvalsFilePath(dir), "utf8");
expect(ensured.socket?.path).toBe(resolveExecApprovalsSocketPath());
expect(ensured.socket?.token).toMatch(/^[A-Za-z0-9_-]{32}$/);
expect(raw.endsWith("\n")).toBe(true);
expect(readApprovalsFile(dir).socket).toEqual(ensured.socket);
});
it("adds trimmed allowlist entries once and persists generated ids", () => {
const dir = createHomeDir();
vi.spyOn(Date, "now").mockReturnValue(123_456);
const approvals = ensureExecApprovals();
addAllowlistEntry(approvals, "worker", " /usr/bin/rg ");
addAllowlistEntry(approvals, "worker", "/usr/bin/rg");
addAllowlistEntry(approvals, "worker", " ");
expect(readApprovalsFile(dir).agents?.worker?.allowlist).toEqual([
expect.objectContaining({
pattern: "/usr/bin/rg",
lastUsedAt: 123_456,
}),
]);
expect(readApprovalsFile(dir).agents?.worker?.allowlist?.[0]?.id).toMatch(/^[0-9a-f-]{36}$/i);
});
it("records allowlist usage on the matching entry and backfills missing ids", () => {
const dir = createHomeDir();
vi.spyOn(Date, "now").mockReturnValue(999_000);
const approvals: ExecApprovalsFile = {
version: 1,
agents: {
main: {
allowlist: [{ pattern: "/usr/bin/rg" }, { pattern: "/usr/bin/jq", id: "keep-id" }],
},
},
};
fs.mkdirSync(path.dirname(approvalsFilePath(dir)), { recursive: true });
fs.writeFileSync(approvalsFilePath(dir), JSON.stringify(approvals, null, 2), "utf8");
recordAllowlistUse(
approvals,
undefined,
{ pattern: "/usr/bin/rg" },
"rg needle",
"/opt/homebrew/bin/rg",
);
expect(readApprovalsFile(dir).agents?.main?.allowlist).toEqual([
expect.objectContaining({
pattern: "/usr/bin/rg",
lastUsedAt: 999_000,
lastUsedCommand: "rg needle",
lastResolvedPath: "/opt/homebrew/bin/rg",
}),
{ pattern: "/usr/bin/jq", id: "keep-id" },
]);
expect(readApprovalsFile(dir).agents?.main?.allowlist?.[0]?.id).toMatch(/^[0-9a-f-]{36}$/i);
});
it("returns null when approval socket credentials are missing", async () => {
await expect(
requestExecApprovalViaSocket({
socketPath: "",
token: "secret",
request: { command: "echo hi" },
}),
).resolves.toBeNull();
await expect(
requestExecApprovalViaSocket({
socketPath: "/tmp/socket",
token: "",
request: { command: "echo hi" },
}),
).resolves.toBeNull();
expect(requestJsonlSocketMock).not.toHaveBeenCalled();
});
it("builds approval socket payloads and accepts decision responses only", async () => {
requestJsonlSocketMock.mockImplementationOnce(async ({ payload, accept, timeoutMs }) => {
expect(timeoutMs).toBe(15_000);
const parsed = JSON.parse(payload) as {
type: string;
token: string;
id: string;
request: { command: string };
};
expect(parsed.type).toBe("request");
expect(parsed.token).toBe("secret");
expect(parsed.request).toEqual({ command: "echo hi" });
expect(parsed.id).toMatch(/^[0-9a-f-]{36}$/i);
expect(accept({ type: "noop", decision: "allow-once" })).toBeUndefined();
expect(accept({ type: "decision", decision: "allow-always" })).toBe("allow-always");
return "deny";
});
await expect(
requestExecApprovalViaSocket({
socketPath: "/tmp/socket",
token: "secret",
request: { command: "echo hi" },
}),
).resolves.toBe("deny");
});
});

View File

@ -9,68 +9,9 @@ import {
buildSafeBinsShellCommand,
evaluateExecAllowlist,
evaluateShellAllowlist,
maxAsk,
mergeExecApprovalsSocketDefaults,
minSecurity,
normalizeExecApprovals,
normalizeSafeBins,
requiresExecApproval,
resolveExecApprovalsPath,
resolveExecApprovalsSocketPath,
} from "./exec-approvals.js";
describe("mergeExecApprovalsSocketDefaults", () => {
it("prefers normalized socket, then current, then default path", () => {
const normalized = normalizeExecApprovals({
version: 1,
agents: {},
socket: { path: "/tmp/a.sock", token: "a" },
});
const current = normalizeExecApprovals({
version: 1,
agents: {},
socket: { path: "/tmp/b.sock", token: "b" },
});
const merged = mergeExecApprovalsSocketDefaults({ normalized, current });
expect(merged.socket?.path).toBe("/tmp/a.sock");
expect(merged.socket?.token).toBe("a");
});
it("falls back to current token when missing in normalized", () => {
const normalized = normalizeExecApprovals({ version: 1, agents: {} });
const current = normalizeExecApprovals({
version: 1,
agents: {},
socket: { path: "/tmp/b.sock", token: "b" },
});
const merged = mergeExecApprovalsSocketDefaults({ normalized, current });
expect(merged.socket?.path).toBeTruthy();
expect(merged.socket?.token).toBe("b");
});
});
describe("resolve exec approvals defaults", () => {
it("expands home-prefixed default file and socket paths", () => {
const dir = makeTempDir();
const prevOpenClawHome = process.env.OPENCLAW_HOME;
try {
process.env.OPENCLAW_HOME = dir;
expect(path.normalize(resolveExecApprovalsPath())).toBe(
path.normalize(path.join(dir, ".openclaw", "exec-approvals.json")),
);
expect(path.normalize(resolveExecApprovalsSocketPath())).toBe(
path.normalize(path.join(dir, ".openclaw", "exec-approvals.sock")),
);
} finally {
if (prevOpenClawHome === undefined) {
delete process.env.OPENCLAW_HOME;
} else {
process.env.OPENCLAW_HOME = prevOpenClawHome;
}
}
});
});
describe("exec approvals safe shell command builder", () => {
it("quotes only safeBins segments (leaves other segments untouched)", () => {
if (process.platform === "win32") {
@ -581,58 +522,3 @@ describe("exec approvals allowlist evaluation", () => {
expect(result.segmentSatisfiedBy).toEqual(["allowlist", "safeBins"]);
});
});
describe("exec approvals policy helpers", () => {
it("minSecurity returns the more restrictive value", () => {
expect(minSecurity("deny", "full")).toBe("deny");
expect(minSecurity("allowlist", "full")).toBe("allowlist");
});
it("maxAsk returns the more aggressive ask mode", () => {
expect(maxAsk("off", "always")).toBe("always");
expect(maxAsk("on-miss", "off")).toBe("on-miss");
});
it("requiresExecApproval respects ask mode and allowlist satisfaction", () => {
expect(
requiresExecApproval({
ask: "always",
security: "allowlist",
analysisOk: true,
allowlistSatisfied: true,
}),
).toBe(true);
expect(
requiresExecApproval({
ask: "off",
security: "allowlist",
analysisOk: true,
allowlistSatisfied: false,
}),
).toBe(false);
expect(
requiresExecApproval({
ask: "on-miss",
security: "allowlist",
analysisOk: true,
allowlistSatisfied: true,
}),
).toBe(false);
expect(
requiresExecApproval({
ask: "on-miss",
security: "allowlist",
analysisOk: false,
allowlistSatisfied: false,
}),
).toBe(true);
expect(
requiresExecApproval({
ask: "on-miss",
security: "full",
analysisOk: false,
allowlistSatisfied: false,
}),
).toBe(false);
});
});

View File

@ -0,0 +1,65 @@
import { describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../../config/config.js";
import { DirectoryCache, buildDirectoryCacheKey } from "./directory-cache.js";
describe("buildDirectoryCacheKey", () => {
it("includes account and signature fallbacks", () => {
expect(
buildDirectoryCacheKey({
channel: "slack",
kind: "channel",
source: "cache",
}),
).toBe("slack:default:channel:cache:default");
expect(
buildDirectoryCacheKey({
channel: "discord",
accountId: "work",
kind: "user",
source: "live",
signature: "v2",
}),
).toBe("discord:work:user:live:v2");
});
});
describe("DirectoryCache", () => {
it("expires entries after ttl and resets when config ref changes", () => {
vi.useFakeTimers();
const cache = new DirectoryCache<string>(1_000);
const cfgA = {} as OpenClawConfig;
const cfgB = {} as OpenClawConfig;
cache.set("a", "first", cfgA);
expect(cache.get("a", cfgA)).toBe("first");
vi.advanceTimersByTime(1_001);
expect(cache.get("a", cfgA)).toBeUndefined();
cache.set("b", "second", cfgA);
expect(cache.get("b", cfgB)).toBeUndefined();
vi.useRealTimers();
});
it("evicts least-recent entries, refreshes insertion order, and clears matches", () => {
const cache = new DirectoryCache<string>(60_000, 2);
const cfg = {} as OpenClawConfig;
cache.set("a", "A", cfg);
cache.set("b", "B", cfg);
cache.set("a", "A2", cfg);
cache.set("c", "C", cfg);
expect(cache.get("a", cfg)).toBe("A2");
expect(cache.get("b", cfg)).toBeUndefined();
expect(cache.get("c", cfg)).toBe("C");
cache.clearMatching((key) => key.startsWith("c"));
expect(cache.get("c", cfg)).toBeUndefined();
cache.clear(cfg);
expect(cache.get("a", cfg)).toBeUndefined();
});
});

View File

@ -0,0 +1,179 @@
import { describe, expect, it } from "vitest";
import {
buildOutboundDeliveryJson,
formatGatewaySummary,
formatOutboundDeliverySummary,
} from "./format.js";
describe("formatOutboundDeliverySummary", () => {
it("formats fallback and provider-specific detail variants", () => {
const cases = [
{
name: "fallback telegram",
channel: "telegram" as const,
result: undefined,
expected: "✅ Sent via Telegram. Message ID: unknown",
},
{
name: "fallback imessage",
channel: "imessage" as const,
result: undefined,
expected: "✅ Sent via iMessage. Message ID: unknown",
},
{
name: "telegram with chat detail",
channel: "telegram" as const,
result: {
channel: "telegram" as const,
messageId: "m1",
chatId: "c1",
},
expected: "✅ Sent via Telegram. Message ID: m1 (chat c1)",
},
{
name: "discord with channel detail",
channel: "discord" as const,
result: {
channel: "discord" as const,
messageId: "d1",
channelId: "chan",
},
expected: "✅ Sent via Discord. Message ID: d1 (channel chan)",
},
{
name: "slack with room detail",
channel: "slack" as const,
result: {
channel: "slack" as const,
messageId: "s1",
roomId: "room-1",
},
expected: "✅ Sent via Slack. Message ID: s1 (room room-1)",
},
{
name: "msteams with conversation detail",
channel: "msteams" as const,
result: {
channel: "msteams" as const,
messageId: "t1",
conversationId: "conv-1",
},
expected: "✅ Sent via msteams. Message ID: t1 (conversation conv-1)",
},
];
for (const testCase of cases) {
expect(formatOutboundDeliverySummary(testCase.channel, testCase.result), testCase.name).toBe(
testCase.expected,
);
}
});
});
describe("buildOutboundDeliveryJson", () => {
it("builds delivery payloads across provider-specific fields", () => {
const cases = [
{
name: "telegram direct payload",
input: {
channel: "telegram" as const,
to: "123",
result: { channel: "telegram" as const, messageId: "m1", chatId: "c1" },
mediaUrl: "https://example.com/a.png",
},
expected: {
channel: "telegram",
via: "direct",
to: "123",
messageId: "m1",
mediaUrl: "https://example.com/a.png",
chatId: "c1",
},
},
{
name: "whatsapp metadata",
input: {
channel: "whatsapp" as const,
to: "+1",
result: { channel: "whatsapp" as const, messageId: "w1", toJid: "jid" },
},
expected: {
channel: "whatsapp",
via: "direct",
to: "+1",
messageId: "w1",
mediaUrl: null,
toJid: "jid",
},
},
{
name: "signal timestamp",
input: {
channel: "signal" as const,
to: "+1",
result: { channel: "signal" as const, messageId: "s1", timestamp: 123 },
},
expected: {
channel: "signal",
via: "direct",
to: "+1",
messageId: "s1",
mediaUrl: null,
timestamp: 123,
},
},
{
name: "gateway payload with meta and explicit via",
input: {
channel: "discord" as const,
to: "channel:1",
via: "gateway" as const,
result: {
messageId: "g1",
channelId: "1",
meta: { thread: "2" },
},
},
expected: {
channel: "discord",
via: "gateway",
to: "channel:1",
messageId: "g1",
mediaUrl: null,
channelId: "1",
meta: { thread: "2" },
},
},
];
for (const testCase of cases) {
expect(buildOutboundDeliveryJson(testCase.input), testCase.name).toEqual(testCase.expected);
}
});
});
describe("formatGatewaySummary", () => {
it("formats default and custom gateway action summaries", () => {
const cases = [
{
name: "default send action",
input: { channel: "whatsapp", messageId: "m1" },
expected: "✅ Sent via gateway (whatsapp). Message ID: m1",
},
{
name: "custom action",
input: { action: "Poll sent", channel: "discord", messageId: "p1" },
expected: "✅ Poll sent via gateway (discord). Message ID: p1",
},
{
name: "missing channel and message id",
input: {},
expected: "✅ Sent via gateway. Message ID: unknown",
},
];
for (const testCase of cases) {
expect(formatGatewaySummary(testCase.input), testCase.name).toBe(testCase.expected);
}
});
});

View File

@ -1,7 +1,7 @@
import fs from "node:fs";
import os from "node:os";
import path from "node:path";
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import type { ReplyPayload } from "../../auto-reply/types.js";
import type { OpenClawConfig } from "../../config/config.js";
import { typedCases } from "../../test-utils/typed-cases.js";
@ -18,12 +18,6 @@ import {
moveToFailed,
recoverPendingDeliveries,
} from "./delivery-queue.js";
import { DirectoryCache } from "./directory-cache.js";
import {
buildOutboundDeliveryJson,
formatGatewaySummary,
formatOutboundDeliverySummary,
} from "./format.js";
import {
applyCrossContextDecoration,
buildCrossContextDecoration,
@ -616,184 +610,6 @@ describe("delivery-queue", () => {
});
});
describe("DirectoryCache", () => {
const cfg = {} as OpenClawConfig;
afterEach(() => {
vi.useRealTimers();
});
it("expires entries after ttl", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-01-01T00:00:00.000Z"));
const cache = new DirectoryCache<string>(1000, 10);
cache.set("a", "value-a", cfg);
expect(cache.get("a", cfg)).toBe("value-a");
vi.setSystemTime(new Date("2026-01-01T00:00:02.000Z"));
expect(cache.get("a", cfg)).toBeUndefined();
});
it("evicts least-recent entries when capacity is exceeded", () => {
const cases = [
{
actions: [
["set", "a", "value-a"],
["set", "b", "value-b"],
["set", "c", "value-c"],
] as const,
expected: { a: undefined, b: "value-b", c: "value-c" },
},
{
actions: [
["set", "a", "value-a"],
["set", "b", "value-b"],
["set", "a", "value-a2"],
["set", "c", "value-c"],
] as const,
expected: { a: "value-a2", b: undefined, c: "value-c" },
},
];
for (const testCase of cases) {
const cache = new DirectoryCache<string>(60_000, 2);
for (const action of testCase.actions) {
cache.set(action[1], action[2], cfg);
}
expect(cache.get("a", cfg)).toBe(testCase.expected.a);
expect(cache.get("b", cfg)).toBe(testCase.expected.b);
expect(cache.get("c", cfg)).toBe(testCase.expected.c);
}
});
});
describe("formatOutboundDeliverySummary", () => {
it("formats fallback and channel-specific detail variants", () => {
const cases = [
{
name: "fallback telegram",
channel: "telegram" as const,
result: undefined,
expected: "✅ Sent via Telegram. Message ID: unknown",
},
{
name: "fallback imessage",
channel: "imessage" as const,
result: undefined,
expected: "✅ Sent via iMessage. Message ID: unknown",
},
{
name: "telegram with chat detail",
channel: "telegram" as const,
result: {
channel: "telegram" as const,
messageId: "m1",
chatId: "c1",
},
expected: "✅ Sent via Telegram. Message ID: m1 (chat c1)",
},
{
name: "discord with channel detail",
channel: "discord" as const,
result: {
channel: "discord" as const,
messageId: "d1",
channelId: "chan",
},
expected: "✅ Sent via Discord. Message ID: d1 (channel chan)",
},
];
for (const testCase of cases) {
expect(formatOutboundDeliverySummary(testCase.channel, testCase.result), testCase.name).toBe(
testCase.expected,
);
}
});
});
describe("buildOutboundDeliveryJson", () => {
it("builds direct delivery payloads across provider-specific fields", () => {
const cases = [
{
name: "telegram direct payload",
input: {
channel: "telegram" as const,
to: "123",
result: { channel: "telegram" as const, messageId: "m1", chatId: "c1" },
mediaUrl: "https://example.com/a.png",
},
expected: {
channel: "telegram",
via: "direct",
to: "123",
messageId: "m1",
mediaUrl: "https://example.com/a.png",
chatId: "c1",
},
},
{
name: "whatsapp metadata",
input: {
channel: "whatsapp" as const,
to: "+1",
result: { channel: "whatsapp" as const, messageId: "w1", toJid: "jid" },
},
expected: {
channel: "whatsapp",
via: "direct",
to: "+1",
messageId: "w1",
mediaUrl: null,
toJid: "jid",
},
},
{
name: "signal timestamp",
input: {
channel: "signal" as const,
to: "+1",
result: { channel: "signal" as const, messageId: "s1", timestamp: 123 },
},
expected: {
channel: "signal",
via: "direct",
to: "+1",
messageId: "s1",
mediaUrl: null,
timestamp: 123,
},
},
];
for (const testCase of cases) {
expect(buildOutboundDeliveryJson(testCase.input), testCase.name).toEqual(testCase.expected);
}
});
});
describe("formatGatewaySummary", () => {
it("formats default and custom gateway action summaries", () => {
const cases = [
{
name: "default send action",
input: { channel: "whatsapp", messageId: "m1" },
expected: "✅ Sent via gateway (whatsapp). Message ID: m1",
},
{
name: "custom action",
input: { action: "Poll sent", channel: "discord", messageId: "p1" },
expected: "✅ Poll sent via gateway (discord). Message ID: p1",
},
];
for (const testCase of cases) {
expect(formatGatewaySummary(testCase.input), testCase.name).toBe(testCase.expected);
}
});
});
const slackConfig = {
channels: {
slack: {

View File

@ -19,7 +19,7 @@ describe("rejectPendingPairingRequest", () => {
});
it("removes the request, persists, and returns the dynamic id key", async () => {
const state = {
const state: { pendingById: Record<string, { accountId: string }> } = {
pendingById: {
keep: { accountId: "keep-me" },
reject: { accountId: "acct-42" },
@ -33,7 +33,7 @@ describe("rejectPendingPairingRequest", () => {
idKey: "accountId",
loadState: async () => state,
persistState,
getId: (pending) => pending.accountId,
getId: (pending: { accountId: string }) => pending.accountId,
}),
).resolves.toEqual({
requestId: "reject",

View File

@ -42,6 +42,51 @@ function lsofOutput(entries: Array<{ pid: number; cmd: string }>): string {
return entries.map(({ pid, cmd }) => `p${pid}\nc${cmd}`).join("\n") + "\n";
}
type MockLsofResult = {
error: Error | null;
status: number | null;
stdout: string;
stderr: string;
};
function createLsofResult(overrides: Partial<MockLsofResult> = {}): MockLsofResult {
return {
error: null,
status: 0,
stdout: "",
stderr: "",
...overrides,
};
}
function createOpenClawBusyResult(pid: number, overrides: Partial<MockLsofResult> = {}) {
return createLsofResult({
stdout: lsofOutput([{ pid, cmd: "openclaw-gateway" }]),
...overrides,
});
}
function createErrnoResult(code: string, message: string) {
const error = new Error(message) as NodeJS.ErrnoException;
error.code = code;
return createLsofResult({ error, status: null });
}
function installInitialBusyPoll(
stalePid: number,
resolvePoll: (call: number) => MockLsofResult,
): () => number {
let call = 0;
mockSpawnSync.mockImplementation(() => {
call += 1;
if (call === 1) {
return createOpenClawBusyResult(stalePid);
}
return resolvePoll(call);
});
return () => call;
}
describe.skipIf(isWindows)("restart-stale-pids", () => {
beforeEach(() => {
mockSpawnSync.mockReset();
@ -201,20 +246,7 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// lsof exits with status 1 when no matching processes are found — this is
// the canonical "port is free" signal, not an error.
const stalePid = process.pid + 500;
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
// Poll returns status 1 — no listeners
return { error: null, status: 1, stdout: "", stderr: "" };
});
installInitialBusyPoll(stalePid, () => createLsofResult({ status: 1 }));
vi.spyOn(process, "kill").mockReturnValue(true);
// Should complete cleanly (port reported free on status 1)
expect(() => cleanStaleGatewayProcessesSync()).not.toThrow();
@ -225,27 +257,17 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// bad flag, runtime error) must not be mapped to free:true. They are
// inconclusive and should keep the polling loop running until budget expires.
const stalePid = process.pid + 501;
let call = 0;
const events: string[] = [];
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
events.push("initial-find");
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
events.push("initial-find");
installInitialBusyPoll(stalePid, (call) => {
if (call === 2) {
// Permission/runtime error — status 2, should NOT be treated as free
events.push("error-poll");
return { error: null, status: 2, stdout: "", stderr: "lsof: permission denied" };
return createLsofResult({ status: 2, stderr: "lsof: permission denied" });
}
// Eventually port is free
events.push("free-poll");
return { error: null, status: 1, stdout: "", stderr: "" };
return createLsofResult({ status: 1 });
});
vi.spyOn(process, "kill").mockReturnValue(true);
cleanStaleGatewayProcessesSync();
@ -263,29 +285,13 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// The fix: pollPortOnce now parses res.stdout directly from the first
// spawnSync call. Exactly ONE lsof invocation per poll cycle.
const stalePid = process.pid + 400;
let spawnCount = 0;
mockSpawnSync.mockImplementation(() => {
spawnCount++;
if (spawnCount === 1) {
// Initial findGatewayPidsOnPortSync — returns stale pid
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
if (spawnCount === 2) {
const getCallCount = installInitialBusyPoll(stalePid, (call) => {
if (call === 2) {
// First waitForPortFreeSync poll — status 0, port busy (should parse inline, not spawn again)
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
return createOpenClawBusyResult(stalePid);
}
// Port free on third call
return { error: null, status: 0, stdout: "", stderr: "" };
return createLsofResult();
});
vi.spyOn(process, "kill").mockReturnValue(true);
@ -294,7 +300,7 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// If pollPortOnce made a second lsof call internally, spawnCount would
// be at least 4 (initial + 2 polls each doubled). With the fix, each poll
// is exactly one spawn: initial(1) + busy-poll(1) + free-poll(1) = 3.
expect(spawnCount).toBe(3);
expect(getCallCount()).toBe(3);
});
it("lsof status 1 with non-empty openclaw stdout is treated as busy, not free (Linux container edge case)", () => {
@ -302,34 +308,21 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// lsof can exit 1 AND still emit output for processes it could read.
// status 1 + non-empty openclaw stdout must not be treated as port-free.
const stalePid = process.pid + 601;
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
// Initial scan: finds stale pid
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
const getCallCount = installInitialBusyPoll(stalePid, (call) => {
if (call === 2) {
// status 1 + openclaw pid in stdout — container-restricted lsof reports partial results
return {
error: null,
return createOpenClawBusyResult(stalePid, {
status: 1,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "lsof: WARNING: can't stat() fuse",
};
});
}
// Third poll: port is genuinely free
return { error: null, status: 1, stdout: "", stderr: "" };
return createLsofResult({ status: 1 });
});
vi.spyOn(process, "kill").mockReturnValue(true);
cleanStaleGatewayProcessesSync();
// Poll 2 returned busy (not free), so we must have polled at least 3 times
expect(call).toBeGreaterThanOrEqual(3);
expect(getCallCount()).toBeGreaterThanOrEqual(3);
});
it("pollPortOnce outer catch returns { free: null, permanent: false } when resolveLsofCommandSync throws", () => {
@ -382,20 +375,7 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
it("sends SIGTERM to stale pids and returns them", () => {
const stalePid = process.pid + 100;
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
// waitForPortFreeSync polls: port free immediately
return { error: null, status: 0, stdout: "", stderr: "" };
});
installInitialBusyPoll(stalePid, () => createLsofResult());
const killSpy = vi.spyOn(process, "kill").mockReturnValue(true);
const result = cleanStaleGatewayProcessesSync();
@ -474,24 +454,11 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// immediately on ENOENT rather than spinning the full 2-second budget.
const stalePid = process.pid + 300;
const events: string[] = [];
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
events.push("initial-find");
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
events.push("initial-find");
installInitialBusyPoll(stalePid, (call) => {
// Permanent ENOENT — lsof is not installed
events.push(`enoent-poll-${call}`);
const err = new Error("lsof not found") as NodeJS.ErrnoException;
err.code = "ENOENT";
return { error: err, status: null, stdout: "", stderr: "" };
return createErrnoResult("ENOENT", "lsof not found");
});
vi.spyOn(process, "kill").mockReturnValue(true);
@ -506,50 +473,26 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// EPERM occurs when lsof exists but a MAC policy (SELinux/AppArmor) blocks
// execution. Like ENOENT/EACCES, this is permanent — retrying is pointless.
const stalePid = process.pid + 305;
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
const err = new Error("lsof eperm") as NodeJS.ErrnoException;
err.code = "EPERM";
return { error: err, status: null, stdout: "", stderr: "" };
});
const getCallCount = installInitialBusyPoll(stalePid, () =>
createErrnoResult("EPERM", "lsof eperm"),
);
vi.spyOn(process, "kill").mockReturnValue(true);
expect(() => cleanStaleGatewayProcessesSync()).not.toThrow();
// Must bail after exactly 1 EPERM poll — same as ENOENT/EACCES
expect(call).toBe(2); // 1 initial find + 1 EPERM poll
expect(getCallCount()).toBe(2); // 1 initial find + 1 EPERM poll
});
it("bails immediately when lsof is permanently unavailable (EACCES) — same as ENOENT", () => {
// EACCES and EPERM are also permanent conditions — lsof exists but the
// process has no permission to run it. No point retrying.
const stalePid = process.pid + 302;
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
const err = new Error("lsof permission denied") as NodeJS.ErrnoException;
err.code = "EACCES";
return { error: err, status: null, stdout: "", stderr: "" };
});
const getCallCount = installInitialBusyPoll(stalePid, () =>
createErrnoResult("EACCES", "lsof permission denied"),
);
vi.spyOn(process, "kill").mockReturnValue(true);
expect(() => cleanStaleGatewayProcessesSync()).not.toThrow();
// Should have bailed after exactly 1 poll call (the EACCES one)
expect(call).toBe(2); // 1 initial find + 1 EACCES poll
expect(getCallCount()).toBe(2); // 1 initial find + 1 EACCES poll
});
it("proceeds with warning when polling budget is exhausted — fake clock, no real 2s wait", () => {
@ -561,15 +504,10 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
let fakeNow = 0;
__testing.setDateNowOverride(() => fakeNow);
mockSpawnSync.mockImplementation(() => {
installInitialBusyPoll(stalePid, () => {
// Advance clock by PORT_FREE_TIMEOUT_MS + 1ms on first poll to trip the deadline.
fakeNow += 2001;
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
return createOpenClawBusyResult(stalePid);
});
vi.spyOn(process, "kill").mockReturnValue(true);
@ -585,24 +523,13 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// leaving its socket in TIME_WAIT / FIN_WAIT. Skipping the poll would
// silently recreate the EADDRINUSE race we are fixing.
const stalePid = process.pid + 304;
let call = 0;
const events: string[] = [];
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
// Initial scan: finds stale pid
events.push("initial-find");
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
events.push("initial-find");
installInitialBusyPoll(stalePid, () => {
// Port is already free on first poll — pid was dead before SIGTERM
events.push("poll-free");
return { error: null, status: 1, stdout: "", stderr: "" };
return createLsofResult({ status: 1 });
});
// All SIGTERMs throw ESRCH — pid already gone
@ -623,27 +550,16 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// would recreate the EADDRINUSE race this PR is designed to prevent.
const stalePid = process.pid + 301;
const events: string[] = [];
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
events.push("initial-find");
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
events.push("initial-find");
installInitialBusyPoll(stalePid, (call) => {
if (call === 2) {
// Transient: spawnSync timeout (no ENOENT code)
events.push("transient-error");
return { error: new Error("timeout"), status: null, stdout: "", stderr: "" };
return createLsofResult({ error: new Error("timeout"), status: null });
}
// Port free on the next poll
events.push("port-free");
return { error: null, status: 1, stdout: "", stderr: "" };
return createLsofResult({ status: 1 });
});
vi.spyOn(process, "kill").mockReturnValue(true);
@ -739,30 +655,18 @@ describe.skipIf(isWindows)("restart-stale-pids", () => {
// the port may be held by an unrelated process. From our perspective
// (we only kill openclaw pids) it is effectively free.
const stalePid = process.pid + 800;
let call = 0;
mockSpawnSync.mockImplementation(() => {
call++;
if (call === 1) {
return {
error: null,
status: 0,
stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]),
stderr: "",
};
}
const getCallCount = installInitialBusyPoll(stalePid, () => {
// status 1 + non-openclaw output — should be treated as free:true for our purposes
return {
error: null,
return createLsofResult({
status: 1,
stdout: lsofOutput([{ pid: process.pid + 801, cmd: "caddy" }]),
stderr: "",
};
});
});
vi.spyOn(process, "kill").mockReturnValue(true);
// Should complete cleanly — no openclaw pids in status-1 output → free
expect(() => cleanStaleGatewayProcessesSync()).not.toThrow();
// Completed in exactly 2 calls (initial find + 1 free poll)
expect(call).toBe(2);
expect(getCallCount()).toBe(2);
});
});

View File

@ -22,6 +22,21 @@ describe("resolveInlineCommandMatch", () => {
command: "Get-ChildItem",
valueTokenIndex: 2,
});
expect(
resolveInlineCommandMatch(["pwsh", "-File", "script.ps1"], POWERSHELL_INLINE_COMMAND_FLAGS),
).toEqual({
command: "script.ps1",
valueTokenIndex: 2,
});
expect(
resolveInlineCommandMatch(
["powershell", "-f", "script.ps1"],
POWERSHELL_INLINE_COMMAND_FLAGS,
),
).toEqual({
command: "script.ps1",
valueTokenIndex: 2,
});
});
it("supports combined -c forms only when enabled", () => {

View File

@ -3,6 +3,8 @@ export const POWERSHELL_INLINE_COMMAND_FLAGS = new Set([
"-c",
"-command",
"--command",
"-f",
"-file",
"-encodedcommand",
"-enc",
"-e",

View File

@ -55,6 +55,8 @@ describe("system run command helpers", () => {
test("extractShellCommandFromArgv supports fish and pwsh wrappers", () => {
expect(extractShellCommandFromArgv(["fish", "-c", "echo hi"])).toBe("echo hi");
expect(extractShellCommandFromArgv(["pwsh", "-Command", "Get-Date"])).toBe("Get-Date");
expect(extractShellCommandFromArgv(["pwsh", "-File", "script.ps1"])).toBe("script.ps1");
expect(extractShellCommandFromArgv(["powershell", "-f", "script.ps1"])).toBe("script.ps1");
expect(extractShellCommandFromArgv(["pwsh", "-EncodedCommand", "ZQBjAGgAbwA="])).toBe(
"ZQBjAGgAbwA=",
);

View File

@ -4,7 +4,9 @@ import { rawDataToString } from "./ws.js";
describe("rawDataToString", () => {
it("returns string input unchanged", () => {
expect(rawDataToString("hello")).toBe("hello");
expect(rawDataToString("hello" as unknown as Parameters<typeof rawDataToString>[0])).toBe(
"hello",
);
});
it("decodes Buffer, Buffer[] and ArrayBuffer inputs", () => {

View File

@ -58,6 +58,31 @@ function mockResolvedProviderKey(apiKey = "test-key") {
});
}
type GeminiFetchMock =
| ReturnType<typeof createGeminiFetchMock>
| ReturnType<typeof createGeminiBatchFetchMock>;
async function createProviderWithFetch(
fetchMock: GeminiFetchMock,
options: Partial<Parameters<typeof createGeminiEmbeddingProvider>[0]> & { model: string },
) {
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
fallback: "none",
...options,
});
return provider;
}
function expectNormalizedThreeFourVector(embedding: number[]) {
expect(embedding[0]).toBeCloseTo(0.6, 5);
expect(embedding[1]).toBeCloseTo(0.8, 5);
expect(magnitude(embedding)).toBeCloseTo(1, 5);
}
describe("buildGeminiTextEmbeddingRequest", () => {
it("builds a text embedding request with optional model and dimensions", () => {
expect(
@ -160,14 +185,8 @@ describe("resolveGeminiOutputDimensionality", () => {
describe("gemini-embedding-001 provider (backward compat)", () => {
it("does NOT include outputDimensionality in embedQuery", async () => {
const fetchMock = createGeminiFetchMock();
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-001",
fallback: "none",
});
await provider.embedQuery("test query");
@ -180,14 +199,8 @@ describe("gemini-embedding-001 provider (backward compat)", () => {
it("does NOT include outputDimensionality in embedBatch", async () => {
const fetchMock = createGeminiBatchFetchMock(2);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-001",
fallback: "none",
});
await provider.embedBatch(["text1", "text2"]);
@ -202,14 +215,8 @@ describe("gemini-embedding-001 provider (backward compat)", () => {
describe("gemini-embedding-2-preview provider", () => {
it("includes outputDimensionality in embedQuery request", async () => {
const fetchMock = createGeminiFetchMock();
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
await provider.embedQuery("test query");
@ -222,33 +229,19 @@ describe("gemini-embedding-2-preview provider", () => {
it("normalizes embedQuery response vectors", async () => {
const fetchMock = createGeminiFetchMock([3, 4]);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
const embedding = await provider.embedQuery("test query");
expect(embedding[0]).toBeCloseTo(0.6, 5);
expect(embedding[1]).toBeCloseTo(0.8, 5);
expect(magnitude(embedding)).toBeCloseTo(1, 5);
expectNormalizedThreeFourVector(embedding);
});
it("includes outputDimensionality in embedBatch request", async () => {
const fetchMock = createGeminiBatchFetchMock(2);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
await provider.embedBatch(["text1", "text2"]);
@ -272,36 +265,22 @@ describe("gemini-embedding-2-preview provider", () => {
it("normalizes embedBatch response vectors", async () => {
const fetchMock = createGeminiBatchFetchMock(2, [3, 4]);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
const embeddings = await provider.embedBatch(["text1", "text2"]);
expect(embeddings).toHaveLength(2);
for (const embedding of embeddings) {
expect(embedding[0]).toBeCloseTo(0.6, 5);
expect(embedding[1]).toBeCloseTo(0.8, 5);
expect(magnitude(embedding)).toBeCloseTo(1, 5);
expectNormalizedThreeFourVector(embedding);
}
});
it("respects custom outputDimensionality", async () => {
const fetchMock = createGeminiFetchMock();
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
outputDimensionality: 768,
});
@ -313,14 +292,8 @@ describe("gemini-embedding-2-preview provider", () => {
it("sanitizes and normalizes embedQuery responses", async () => {
const fetchMock = createGeminiFetchMock([3, 4, Number.NaN]);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
await expect(provider.embedQuery("test")).resolves.toEqual([0.6, 0.8, 0]);
@ -328,14 +301,8 @@ describe("gemini-embedding-2-preview provider", () => {
it("uses custom outputDimensionality for each embedBatch request", async () => {
const fetchMock = createGeminiBatchFetchMock(2);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
outputDimensionality: 768,
});
@ -350,14 +317,8 @@ describe("gemini-embedding-2-preview provider", () => {
it("sanitizes and normalizes structured batch responses", async () => {
const fetchMock = createGeminiBatchFetchMock(1, [0, Number.POSITIVE_INFINITY, 5]);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
await expect(
@ -375,14 +336,8 @@ describe("gemini-embedding-2-preview provider", () => {
it("supports multimodal embedBatchInputs requests", async () => {
const fetchMock = createGeminiBatchFetchMock(2);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
expect(provider.embedBatchInputs).toBeDefined();
@ -451,14 +406,8 @@ describe("gemini-embedding-2-preview provider", () => {
Number.POSITIVE_INFINITY,
Number.NEGATIVE_INFINITY,
]);
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
const embedding = await provider.embedQuery("test");
@ -468,14 +417,8 @@ describe("gemini-embedding-2-preview provider", () => {
it("uses correct endpoint URL", async () => {
const fetchMock = createGeminiFetchMock();
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
});
await provider.embedQuery("test");
@ -488,14 +431,8 @@ describe("gemini-embedding-2-preview provider", () => {
it("allows taskType override via options", async () => {
const fetchMock = createGeminiFetchMock();
vi.stubGlobal("fetch", fetchMock);
mockResolvedProviderKey();
const { provider } = await createGeminiEmbeddingProvider({
config: {} as never,
provider: "gemini",
const provider = await createProviderWithFetch(fetchMock, {
model: "gemini-embedding-2-preview",
fallback: "none",
taskType: "SEMANTIC_SIMILARITY",
});

View File

@ -43,6 +43,14 @@ type RuntimeFixture = {
binNames?: string[];
};
type UnsafeRuntimeInvocationCase = {
name: string;
binName: string;
tmpPrefix: string;
command: string[];
setup?: (tmp: string) => void;
};
function createScriptOperandFixture(tmp: string, fixture?: RuntimeFixture): ScriptOperandFixture {
if (fixture) {
return {
@ -154,6 +162,101 @@ function withScriptOperandPlanFixture<T>(
}
}
const DENIED_RUNTIME_APPROVAL = {
ok: false,
message: "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
} as const;
function expectRuntimeApprovalDenied(command: string[], cwd: string) {
const prepared = buildSystemRunApprovalPlan({ command, cwd });
expect(prepared).toEqual(DENIED_RUNTIME_APPROVAL);
}
const unsafeRuntimeInvocationCases: UnsafeRuntimeInvocationCase[] = [
{
name: "rejects bun package script names that do not bind a concrete file",
binName: "bun",
tmpPrefix: "openclaw-bun-package-script-",
command: ["bun", "run", "dev"],
},
{
name: "rejects deno eval invocations that do not bind a concrete file",
binName: "deno",
tmpPrefix: "openclaw-deno-eval-",
command: ["deno", "eval", "console.log('SAFE')"],
},
{
name: "rejects tsx eval invocations that do not bind a concrete file",
binName: "tsx",
tmpPrefix: "openclaw-tsx-eval-",
command: ["tsx", "--eval", "console.log('SAFE')"],
},
{
name: "rejects node inline import operands that cannot be bound to one stable file",
binName: "node",
tmpPrefix: "openclaw-node-import-inline-",
command: ["node", "--import=./preload.mjs", "./main.mjs"],
setup: (tmp) => {
fs.writeFileSync(path.join(tmp, "main.mjs"), 'console.log("SAFE")\n');
fs.writeFileSync(path.join(tmp, "preload.mjs"), 'console.log("SAFE")\n');
},
},
{
name: "rejects ruby require preloads that approval cannot bind completely",
binName: "ruby",
tmpPrefix: "openclaw-ruby-require-",
command: ["ruby", "-r", "attacker", "./safe.rb"],
setup: (tmp) => {
fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n');
},
},
{
name: "rejects ruby load-path flags that can redirect module resolution after approval",
binName: "ruby",
tmpPrefix: "openclaw-ruby-load-path-",
command: ["ruby", "-I.", "./safe.rb"],
setup: (tmp) => {
fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n');
},
},
{
name: "rejects perl module preloads that approval cannot bind completely",
binName: "perl",
tmpPrefix: "openclaw-perl-module-preload-",
command: ["perl", "-MPreload", "./safe.pl"],
setup: (tmp) => {
fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n');
},
},
{
name: "rejects perl load-path flags that can redirect module resolution after approval",
binName: "perl",
tmpPrefix: "openclaw-perl-load-path-",
command: ["perl", "-Ilib", "./safe.pl"],
setup: (tmp) => {
fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n');
},
},
{
name: "rejects perl combined preload and load-path flags",
binName: "perl",
tmpPrefix: "openclaw-perl-preload-load-path-",
command: ["perl", "-Ilib", "-MPreload", "./safe.pl"],
setup: (tmp) => {
fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n');
},
},
{
name: "rejects shell payloads that hide mutable interpreter scripts",
binName: "node",
tmpPrefix: "openclaw-inline-shell-node-",
command: ["sh", "-lc", "node ./run.js"],
setup: (tmp) => {
fs.writeFileSync(path.join(tmp, "run.js"), 'console.log("SAFE")\n');
},
},
];
describe("hardenApprovedExecutionPaths", () => {
const cases: HardeningCase[] = [
{
@ -493,233 +596,22 @@ describe("hardenApprovedExecutionPaths", () => {
);
});
it("rejects bun package script names that do not bind a concrete file", () => {
withFakeRuntimeBin({
binName: "bun",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-bun-package-script-"));
try {
const prepared = buildSystemRunApprovalPlan({
command: ["bun", "run", "dev"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
for (const testCase of unsafeRuntimeInvocationCases) {
it(testCase.name, () => {
withFakeRuntimeBin({
binName: testCase.binName,
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), testCase.tmpPrefix));
try {
testCase.setup?.(tmp);
expectRuntimeApprovalDenied(testCase.command, tmp);
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
});
it("rejects deno eval invocations that do not bind a concrete file", () => {
withFakeRuntimeBin({
binName: "deno",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-deno-eval-"));
try {
const prepared = buildSystemRunApprovalPlan({
command: ["deno", "eval", "console.log('SAFE')"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects tsx eval invocations that do not bind a concrete file", () => {
withFakeRuntimeBin({
binName: "tsx",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-tsx-eval-"));
try {
const prepared = buildSystemRunApprovalPlan({
command: ["tsx", "--eval", "console.log('SAFE')"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects node inline import operands that cannot be bound to one stable file", () => {
withFakeRuntimeBin({
binName: "node",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-node-import-inline-"));
try {
fs.writeFileSync(path.join(tmp, "main.mjs"), 'console.log("SAFE")\n');
fs.writeFileSync(path.join(tmp, "preload.mjs"), 'console.log("SAFE")\n');
const prepared = buildSystemRunApprovalPlan({
command: ["node", "--import=./preload.mjs", "./main.mjs"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects ruby require preloads that approval cannot bind completely", () => {
withFakeRuntimeBin({
binName: "ruby",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-ruby-require-"));
try {
fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n');
const prepared = buildSystemRunApprovalPlan({
command: ["ruby", "-r", "attacker", "./safe.rb"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects ruby load-path flags that can redirect module resolution after approval", () => {
withFakeRuntimeBin({
binName: "ruby",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-ruby-load-path-"));
try {
fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n');
const prepared = buildSystemRunApprovalPlan({
command: ["ruby", "-I.", "./safe.rb"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects perl module preloads that approval cannot bind completely", () => {
withFakeRuntimeBin({
binName: "perl",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-perl-module-preload-"));
try {
fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n');
const prepared = buildSystemRunApprovalPlan({
command: ["perl", "-MPreload", "./safe.pl"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects perl load-path flags that can redirect module resolution after approval", () => {
withFakeRuntimeBin({
binName: "perl",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-perl-load-path-"));
try {
fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n');
const prepared = buildSystemRunApprovalPlan({
command: ["perl", "-Ilib", "./safe.pl"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects perl combined preload and load-path flags", () => {
withFakeRuntimeBin({
binName: "perl",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-perl-preload-load-path-"));
try {
fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n');
const prepared = buildSystemRunApprovalPlan({
command: ["perl", "-Ilib", "-MPreload", "./safe.pl"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
it("rejects shell payloads that hide mutable interpreter scripts", () => {
withFakeRuntimeBin({
binName: "node",
run: () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-inline-shell-node-"));
try {
fs.writeFileSync(path.join(tmp, "run.js"), 'console.log("SAFE")\n');
const prepared = buildSystemRunApprovalPlan({
command: ["sh", "-lc", "node ./run.js"],
cwd: tmp,
});
expect(prepared).toEqual({
ok: false,
message:
"SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command",
});
} finally {
fs.rmSync(tmp, { recursive: true, force: true });
}
},
});
});
}
it("captures the real shell script operand after value-taking shell flags", () => {
const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-shell-option-value-"));

View File

@ -113,6 +113,40 @@ async function seedAuditFixture(fixture: AuditFixture): Promise<void> {
describe("secrets audit", () => {
let fixture: AuditFixture;
async function writeModelsProvider(
overrides: Partial<{
apiKey: unknown;
headers: Record<string, unknown>;
}> = {},
) {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: OPENAI_API_KEY_MARKER,
models: [{ id: "gpt-5", name: "gpt-5" }],
...overrides,
},
},
});
}
function expectModelsFinding(
report: Awaited<ReturnType<typeof runSecretsAudit>>,
params: { code: string; jsonPath?: string; present?: boolean },
) {
expect(
hasFinding(
report,
(entry) =>
entry.code === params.code &&
entry.file === fixture.modelsPath &&
(params.jsonPath === undefined || entry.jsonPath === params.jsonPath),
),
).toBe(params.present ?? true);
}
beforeEach(async () => {
fixture = await createAuditFixture();
await seedAuditFixture(fixture);
@ -278,221 +312,116 @@ describe("secrets audit", () => {
});
it("scans agent models.json files for plaintext provider apiKey values", async () => {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: "sk-models-plaintext", // pragma: allowlist secret
models: [{ id: "gpt-5", name: "gpt-5" }],
},
},
});
await writeModelsProvider({ apiKey: "sk-models-plaintext" }); // pragma: allowlist secret
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) =>
entry.code === "PLAINTEXT_FOUND" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.apiKey",
),
).toBe(true);
expectModelsFinding(report, {
code: "PLAINTEXT_FOUND",
jsonPath: "providers.openai.apiKey",
});
expect(report.filesScanned).toContain(fixture.modelsPath);
});
it("scans agent models.json files for plaintext provider header values", async () => {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: OPENAI_API_KEY_MARKER,
headers: {
Authorization: "Bearer sk-header-plaintext", // pragma: allowlist secret
},
models: [{ id: "gpt-5", name: "gpt-5" }],
},
await writeModelsProvider({
headers: {
Authorization: "Bearer sk-header-plaintext", // pragma: allowlist secret
},
});
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) =>
entry.code === "PLAINTEXT_FOUND" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.headers.Authorization",
),
).toBe(true);
expectModelsFinding(report, {
code: "PLAINTEXT_FOUND",
jsonPath: "providers.openai.headers.Authorization",
});
});
it("does not flag non-sensitive routing headers in models.json", async () => {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: OPENAI_API_KEY_MARKER,
headers: {
"X-Proxy-Region": "us-west",
},
models: [{ id: "gpt-5", name: "gpt-5" }],
},
await writeModelsProvider({
headers: {
"X-Proxy-Region": "us-west",
},
});
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) =>
entry.code === "PLAINTEXT_FOUND" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.headers.X-Proxy-Region",
),
).toBe(false);
expectModelsFinding(report, {
code: "PLAINTEXT_FOUND",
jsonPath: "providers.openai.headers.X-Proxy-Region",
present: false,
});
});
it("does not flag models.json marker values as plaintext", async () => {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: OPENAI_API_KEY_MARKER,
models: [{ id: "gpt-5", name: "gpt-5" }],
},
},
});
await writeModelsProvider();
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) =>
entry.code === "PLAINTEXT_FOUND" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.apiKey",
),
).toBe(false);
expectModelsFinding(report, {
code: "PLAINTEXT_FOUND",
jsonPath: "providers.openai.apiKey",
present: false,
});
});
it("flags arbitrary all-caps models.json apiKey values as plaintext", async () => {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret
models: [{ id: "gpt-5", name: "gpt-5" }],
},
},
});
await writeModelsProvider({ apiKey: "ALLCAPS_SAMPLE" }); // pragma: allowlist secret
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) =>
entry.code === "PLAINTEXT_FOUND" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.apiKey",
),
).toBe(true);
expectModelsFinding(report, {
code: "PLAINTEXT_FOUND",
jsonPath: "providers.openai.apiKey",
});
});
it("does not flag models.json header marker values as plaintext", async () => {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: OPENAI_API_KEY_MARKER,
headers: {
Authorization: "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
"x-managed-token": "secretref-managed", // pragma: allowlist secret
},
models: [{ id: "gpt-5", name: "gpt-5" }],
},
await writeModelsProvider({
headers: {
Authorization: "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
"x-managed-token": "secretref-managed", // pragma: allowlist secret
},
});
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) =>
entry.code === "PLAINTEXT_FOUND" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.headers.Authorization",
),
).toBe(false);
expect(
hasFinding(
report,
(entry) =>
entry.code === "PLAINTEXT_FOUND" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.headers.x-managed-token",
),
).toBe(false);
expectModelsFinding(report, {
code: "PLAINTEXT_FOUND",
jsonPath: "providers.openai.headers.Authorization",
present: false,
});
expectModelsFinding(report, {
code: "PLAINTEXT_FOUND",
jsonPath: "providers.openai.headers.x-managed-token",
present: false,
});
});
it("reports unresolved models.json SecretRef objects in provider headers", async () => {
await writeJsonFile(fixture.modelsPath, {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
apiKey: OPENAI_API_KEY_MARKER,
headers: {
Authorization: {
source: "env",
provider: "default",
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
},
},
models: [{ id: "gpt-5", name: "gpt-5" }],
await writeModelsProvider({
headers: {
Authorization: {
source: "env",
provider: "default",
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
},
},
});
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) =>
entry.code === "REF_UNRESOLVED" &&
entry.file === fixture.modelsPath &&
entry.jsonPath === "providers.openai.headers.Authorization",
),
).toBe(true);
expectModelsFinding(report, {
code: "REF_UNRESOLVED",
jsonPath: "providers.openai.headers.Authorization",
});
});
it("reports malformed models.json as unresolved findings", async () => {
await fs.writeFile(fixture.modelsPath, "{bad-json", "utf8");
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) => entry.code === "REF_UNRESOLVED" && entry.file === fixture.modelsPath,
),
).toBe(true);
expectModelsFinding(report, { code: "REF_UNRESOLVED" });
});
it("reports non-regular models.json files as unresolved findings", async () => {
await fs.rm(fixture.modelsPath, { force: true });
await fs.mkdir(fixture.modelsPath, { recursive: true });
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) => entry.code === "REF_UNRESOLVED" && entry.file === fixture.modelsPath,
),
).toBe(true);
expectModelsFinding(report, { code: "REF_UNRESOLVED" });
});
it("reports oversized models.json as unresolved findings", async () => {
@ -509,12 +438,7 @@ describe("secrets audit", () => {
});
const report = await runSecretsAudit({ env: fixture.env });
expect(
hasFinding(
report,
(entry) => entry.code === "REF_UNRESOLVED" && entry.file === fixture.modelsPath,
),
).toBe(true);
expectModelsFinding(report, { code: "REF_UNRESOLVED" });
});
it("scans active agent-dir override models.json even when outside state dir", async () => {