diff --git a/CHANGELOG.md b/CHANGELOG.md index 68cbdc6b19b..393f84cfc5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,9 +27,10 @@ Docs: https://docs.openclaw.ai ### Fixes +- Providers/OpenRouter: allow pass-through OpenRouter and Opencode model IDs in live model filtering so custom routed model IDs are treated as modern refs. (#14312) Thanks @Joly0. - Providers/OpenRouter: default reasoning to enabled when the selected model advertises `reasoning: true` and no session/directive override is set. (#22513) Thanks @zwffff. - Providers/OpenRouter: map `/think` levels to `reasoning.effort` in embedded runs while preserving explicit `reasoning.max_tokens` payloads. (#17236) Thanks @robbyczgw-cla. -- Gateway/OpenRouter: preserve stored session provider when model IDs are vendor-prefixed (for example, `anthropic/...`) so follow-up turns do not incorrectly route to direct provider APIs. (#22753) Thanks @dndodson. +- Providers/OpenRouter: preserve stored session provider when model IDs are vendor-prefixed (for example, `anthropic/...`) so follow-up turns do not incorrectly route to direct provider APIs. (#22753) Thanks @dndodson. - Providers/OpenRouter: preserve the required `openrouter/` prefix for OpenRouter-native model IDs during model-ref normalization. (#12942) Thanks @omair445. - Providers/OpenRouter: pass through provider routing parameters from model params.provider to OpenRouter request payloads for provider selection controls. (#17148) Thanks @carrotRakko. - Telegram/Webhook: keep webhook monitors alive until gateway abort signals fire, preventing false channel exits and immediate webhook auto-restart loops. diff --git a/src/agents/live-model-filter.ts b/src/agents/live-model-filter.ts index 48bbc3424c8..c4ad0957d81 100644 --- a/src/agents/live-model-filter.ts +++ b/src/agents/live-model-filter.ts @@ -33,10 +33,6 @@ function matchesExactOrPrefix(id: string, values: string[]): boolean { return values.some((value) => id === value || id.startsWith(value)); } -function matchesAny(id: string, values: string[]): boolean { - return values.some((value) => id.includes(value)); -} - export function isModernModelRef(ref: ModelRef): boolean { const provider = ref.provider?.trim().toLowerCase() ?? ""; const id = ref.id?.trim().toLowerCase() ?? ""; @@ -89,15 +85,9 @@ export function isModernModelRef(ref: ModelRef): boolean { } if (provider === "openrouter" || provider === "opencode") { - return matchesAny(id, [ - ...ANTHROPIC_PREFIXES, - ...OPENAI_MODELS, - ...CODEX_MODELS, - ...GOOGLE_PREFIXES, - ...ZAI_PREFIXES, - ...MINIMAX_PREFIXES, - ...XAI_PREFIXES, - ]); + // OpenRouter/opencode are pass-through proxies; accept any model ID + // rather than restricting to a static prefix list. + return true; } return false; diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index fc1cca65c2e..b1c55b8c353 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -144,6 +144,17 @@ const OLLAMA_DEFAULT_COST = { cacheWrite: 0, }; +const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"; +const OPENROUTER_DEFAULT_MODEL_ID = "auto"; +const OPENROUTER_DEFAULT_CONTEXT_WINDOW = 200000; +const OPENROUTER_DEFAULT_MAX_TOKENS = 8192; +const OPENROUTER_DEFAULT_COST = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, +}; + const VLLM_BASE_URL = "http://127.0.0.1:8000/v1"; const VLLM_DEFAULT_CONTEXT_WINDOW = 128000; const VLLM_DEFAULT_MAX_TOKENS = 8192; @@ -659,6 +670,24 @@ function buildTogetherProvider(): ProviderConfig { }; } +function buildOpenrouterProvider(): ProviderConfig { + return { + baseUrl: OPENROUTER_BASE_URL, + api: "openai-completions", + models: [ + { + id: OPENROUTER_DEFAULT_MODEL_ID, + name: "OpenRouter Auto", + reasoning: false, + input: ["text", "image"], + cost: OPENROUTER_DEFAULT_COST, + contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW, + maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS, + }, + ], + }; +} + async function buildVllmProvider(params?: { baseUrl?: string; apiKey?: string; @@ -671,6 +700,7 @@ async function buildVllmProvider(params?: { models, }; } + export function buildQianfanProvider(): ProviderConfig { return { baseUrl: QIANFAN_BASE_URL, @@ -907,6 +937,13 @@ export async function resolveImplicitProviders(params: { providers.qianfan = { ...buildQianfanProvider(), apiKey: qianfanKey }; } + const openrouterKey = + resolveEnvApiKeyVarName("openrouter") ?? + resolveApiKeyFromProfiles({ provider: "openrouter", store: authStore }); + if (openrouterKey) { + providers.openrouter = { ...buildOpenrouterProvider(), apiKey: openrouterKey }; + } + const nvidiaKey = resolveEnvApiKeyVarName("nvidia") ?? resolveApiKeyFromProfiles({ provider: "nvidia", store: authStore }); diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts index a9eff8fbdaf..f9e95023d5e 100644 --- a/src/agents/pi-embedded-runner/model.ts +++ b/src/agents/pi-embedded-runner/model.ts @@ -80,6 +80,24 @@ export function resolveModel( if (forwardCompat) { return { model: forwardCompat, authStorage, modelRegistry }; } + // OpenRouter is a pass-through proxy — any model ID available on OpenRouter + // should work without being pre-registered in the local catalog. + if (normalizedProvider === "openrouter") { + const fallbackModel: Model = normalizeModelCompat({ + id: modelId, + name: modelId, + api: "openai-completions", + provider, + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: DEFAULT_CONTEXT_TOKENS, + // Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts + maxTokens: 8192, + } as Model); + return { model: fallbackModel, authStorage, modelRegistry }; + } const providerCfg = providers[provider]; if (providerCfg || modelId.startsWith("mock-")) { const fallbackModel: Model = normalizeModelCompat({