From f8a3840a42770c1cdc8a442ec8e6631d5b0f0288 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Sat, 4 Apr 2026 02:00:19 +0100 Subject: [PATCH] fix(ci): restore contextTokens runtime typing --- extensions/openai/openai-codex-provider.test.ts | 14 +++++--------- src/agents/context.test.ts | 14 +++++++++++++- src/agents/pi-embedded-runner/compact.ts | 17 ++++++++++------- src/agents/pi-embedded-runner/run/setup.ts | 8 +++++--- 4 files changed, 33 insertions(+), 20 deletions(-) diff --git a/extensions/openai/openai-codex-provider.test.ts b/extensions/openai/openai-codex-provider.test.ts index 0f13673d618..a652df1ee73 100644 --- a/extensions/openai/openai-codex-provider.test.ts +++ b/extensions/openai/openai-codex-provider.test.ts @@ -94,7 +94,7 @@ describe("openai codex provider", () => { provider: "openai-codex", modelId: "gpt-5.4", modelRegistry: { - find: vi.fn((providerId: string, modelId: string) => { + find: (providerId: string, modelId: string) => { if (providerId === "openai-codex" && modelId === "gpt-5.3-codex") { return { id: "gpt-5.3-codex", @@ -110,9 +110,9 @@ describe("openai codex provider", () => { }; } return null; - }), - }, - }); + }, + } as never, + } as never); expect(model).toMatchObject({ id: "gpt-5.4", @@ -126,19 +126,15 @@ describe("openai codex provider", () => { const provider = buildOpenAICodexProviderPlugin(); const entries = provider.augmentModelCatalog?.({ - provider: "openai-codex", + env: process.env, entries: [ { id: "gpt-5.3-codex", name: "gpt-5.3-codex", provider: "openai-codex", - api: "openai-codex-responses", - baseUrl: "https://chatgpt.com/backend-api", reasoning: true, input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 272_000, - maxTokens: 128_000, }, ], }); diff --git a/src/agents/context.test.ts b/src/agents/context.test.ts index 65d692dd9ea..3ab7bb33d2e 100644 --- a/src/agents/context.test.ts +++ b/src/agents/context.test.ts @@ -225,7 +225,19 @@ describe("resolveContextTokensForModel", () => { models: { providers: { "openai-codex": { - models: [{ id: "gpt-5.4", contextWindow: 1_050_000, contextTokens: 160_000 }], + baseUrl: "https://chatgpt.com/backend-api", + models: [ + { + id: "gpt-5.4", + name: "gpt-5.4", + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1_050_000, + contextTokens: 160_000, + maxTokens: 128_000, + }, + ], }, }, }, diff --git a/src/agents/pi-embedded-runner/compact.ts b/src/agents/pi-embedded-runner/compact.ts index 278c7f8ad92..2a7c839455e 100644 --- a/src/agents/pi-embedded-runner/compact.ts +++ b/src/agents/pi-embedded-runner/compact.ts @@ -19,6 +19,7 @@ import { getMachineDisplayName } from "../../infra/machine-name.js"; import { generateSecureToken } from "../../infra/secure-random.js"; import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js"; import { prepareProviderRuntimeAuth } from "../../plugins/provider-runtime.js"; +import type { ProviderRuntimeModel } from "../../plugins/types.js"; import { type enqueueCommand, enqueueCommandInLane } from "../../process/command-queue.js"; import { isCronSessionKey, isSubagentSessionKey } from "../../routing/session-key.js"; import { buildTtsSystemPromptHint } from "../../tts/tts.js"; @@ -437,19 +438,20 @@ export async function compactEmbeddedPiSessionDirect( }); // Apply contextTokens cap to model so pi-coding-agent's auto-compaction // threshold uses the effective limit, not the native context window. + const runtimeModelWithContext = runtimeModel as ProviderRuntimeModel; const ctxInfo = resolveContextWindowInfo({ cfg: params.config, provider, modelId, - modelContextTokens: runtimeModel.contextTokens, - modelContextWindow: runtimeModel.contextWindow, + modelContextTokens: runtimeModelWithContext.contextTokens, + modelContextWindow: runtimeModelWithContext.contextWindow, defaultTokens: DEFAULT_CONTEXT_TOKENS, }); const effectiveModel = applyAuthHeaderOverride( applyLocalNoAuthHeaderOverride( - ctxInfo.tokens < (runtimeModel.contextWindow ?? Infinity) - ? { ...runtimeModel, contextWindow: ctxInfo.tokens } - : runtimeModel, + ctxInfo.tokens < (runtimeModelWithContext.contextWindow ?? Infinity) + ? { ...runtimeModelWithContext, contextWindow: ctxInfo.tokens } + : runtimeModelWithContext, apiKeyInfo, ), // Skip header injection when runtime auth exchange produced a @@ -1028,12 +1030,13 @@ export async function compactEmbeddedPiSession( agentDir, params.config, ); + const ceRuntimeModel = ceModel as ProviderRuntimeModel | undefined; const ceCtxInfo = resolveContextWindowInfo({ cfg: params.config, provider: ceProvider, modelId: ceModelId, - modelContextTokens: ceModel?.contextTokens, - modelContextWindow: ceModel?.contextWindow, + modelContextTokens: ceRuntimeModel?.contextTokens, + modelContextWindow: ceRuntimeModel?.contextWindow, defaultTokens: DEFAULT_CONTEXT_TOKENS, }); // When the context engine owns compaction, its compact() implementation diff --git a/src/agents/pi-embedded-runner/run/setup.ts b/src/agents/pi-embedded-runner/run/setup.ts index a4e6fb4965d..adf493409fc 100644 --- a/src/agents/pi-embedded-runner/run/setup.ts +++ b/src/agents/pi-embedded-runner/run/setup.ts @@ -1,6 +1,8 @@ -import type { Api, Model } from "@mariozechner/pi-ai"; import type { OpenClawConfig } from "../../../config/config.js"; -import type { PluginHookBeforeAgentStartResult } from "../../../plugins/types.js"; +import type { + PluginHookBeforeAgentStartResult, + ProviderRuntimeModel, +} from "../../../plugins/types.js"; import { CONTEXT_WINDOW_HARD_MIN_TOKENS, CONTEXT_WINDOW_WARN_BELOW_TOKENS, @@ -99,7 +101,7 @@ export function resolveEffectiveRuntimeModel(params: { cfg: OpenClawConfig | undefined; provider: string; modelId: string; - runtimeModel: Model; + runtimeModel: ProviderRuntimeModel; }) { const ctxInfo = resolveContextWindowInfo({ cfg: params.cfg,