refactor: remove provider-specific sdk shims from core

This commit is contained in:
Peter Steinberger 2026-04-05 16:54:15 +01:00
parent a6d0ab1482
commit 2ade009901
No known key found for this signature in database
9 changed files with 23 additions and 278 deletions

View File

@ -93,7 +93,7 @@ describe("voice-call config compatibility", () => {
doctorFixCommand: "openclaw doctor --fix",
}),
).toEqual([
`[voice-call] legacy config keys detected under plugins.entries.voice-call.config; runtime fallback remains for now but will be removed in ${VOICE_CALL_LEGACY_CONFIG_REMOVAL_VERSION}. Run "openclaw doctor --fix".`,
`[voice-call] legacy config keys detected under plugins.entries.voice-call.config; runtime loading will not rewrite them, and support for the legacy shape will be removed in ${VOICE_CALL_LEGACY_CONFIG_REMOVAL_VERSION}. Run "openclaw doctor --fix".`,
'[voice-call] plugins.entries.voice-call.config.provider: Replace provider "log" with "mock".',
"[voice-call] plugins.entries.voice-call.config.twilio.from: Move twilio.from to fromNumber.",
"[voice-call] plugins.entries.voice-call.config.streaming.sttProvider: Move streaming.sttProvider to streaming.provider.",

View File

@ -115,7 +115,7 @@ export function formatVoiceCallLegacyConfigWarnings(params: {
}
return [
`[voice-call] legacy config keys detected under ${params.configPathPrefix}; runtime fallback remains for now but will be removed in ${VOICE_CALL_LEGACY_CONFIG_REMOVAL_VERSION}. Run "${params.doctorFixCommand}".`,
`[voice-call] legacy config keys detected under ${params.configPathPrefix}; runtime loading will not rewrite them, and support for the legacy shape will be removed in ${VOICE_CALL_LEGACY_CONFIG_REMOVAL_VERSION}. Run "${params.doctorFixCommand}".`,
...issues.map(
(issue) => `[voice-call] ${params.configPathPrefix}.${issue.path}: ${issue.message}`,
),

View File

@ -1,11 +1,8 @@
import { resolveBedrockConfigApiKey } from "../../extensions/amazon-bedrock/discovery.js";
import { resolveAnthropicVertexConfigApiKey } from "../../extensions/anthropic-vertex/region.js";
import { normalizeGoogleProviderConfig } from "../../extensions/google/api.js";
import { MODEL_APIS } from "../config/types.models.js";
import { resolveMantleBearerToken } from "../plugin-sdk/amazon-bedrock-mantle.js";
import {
applyProviderNativeStreamingUsageCompatWithPlugin,
normalizeProviderConfigWithPlugin,
resolveProviderConfigApiKeyWithPlugin,
} from "../plugins/provider-runtime.js";
import type { ProviderConfig } from "./models-config.providers.secrets.js";
@ -15,9 +12,6 @@ const GENERIC_PROVIDER_APIS = new Set<string>([
"anthropic-messages",
"google-generative-ai",
]);
const PROVIDERS_WITH_RUNTIME_NORMALIZE_CONFIG = new Set<string>(["anthropic"]);
const GOOGLE_PROVIDER_KEYS = new Set<string>(["google", "google-antigravity", "google-vertex"]);
function resolveProviderPluginLookupKey(providerKey: string, provider?: ProviderConfig): string {
const api = typeof provider?.api === "string" ? provider.api.trim() : "";
if (
@ -58,15 +52,6 @@ export function normalizeProviderSpecificConfig(
provider: ProviderConfig,
): ProviderConfig {
const runtimeProviderKey = resolveProviderPluginLookupKey(providerKey, provider);
if (GOOGLE_PROVIDER_KEYS.has(runtimeProviderKey)) {
const normalized = normalizeGoogleProviderConfig(providerKey, provider);
if (normalized !== provider) {
return normalized;
}
}
if (!PROVIDERS_WITH_RUNTIME_NORMALIZE_CONFIG.has(runtimeProviderKey)) {
return provider;
}
const normalized =
normalizeProviderConfigWithPlugin({
provider: runtimeProviderKey,
@ -86,15 +71,12 @@ export function resolveProviderConfigApiKeyResolver(
provider?: ProviderConfig,
): ((env: NodeJS.ProcessEnv) => string | undefined) | undefined {
const runtimeProviderKey = resolveProviderPluginLookupKey(providerKey, provider).trim();
if (runtimeProviderKey === "amazon-bedrock") {
return (env) => resolveBedrockConfigApiKey(env)?.trim() || undefined;
}
if (runtimeProviderKey === "anthropic-vertex") {
return (env) => resolveAnthropicVertexConfigApiKey(env)?.trim() || undefined;
}
if (runtimeProviderKey === "amazon-bedrock-mantle") {
return (env) =>
resolveMantleBearerToken(env)?.trim() ? "AWS_BEARER_TOKEN_BEDROCK" : undefined;
}
return undefined;
return (env) =>
resolveProviderConfigApiKeyWithPlugin({
provider: runtimeProviderKey,
context: {
provider: providerKey,
env,
},
});
}

View File

@ -7,7 +7,6 @@ import { resolveAgentDir } from "../agents/agent-scope.js";
import type { OpenClawConfig } from "../config/config.js";
import { resolveAgentModelPrimaryValue } from "../config/model-input.js";
import type { ModelProviderConfig } from "../config/types.models.js";
import { ZAI_CODING_CN_BASE_URL, ZAI_CODING_GLOBAL_BASE_URL } from "../plugin-sdk/zai.js";
import { createProviderApiKeyAuthMethod } from "../plugins/provider-api-key-auth.js";
import { providerApiKeyAuthRuntime } from "../plugins/provider-api-key-auth.runtime.js";
import type { ProviderAuthMethod, ProviderAuthResult, ProviderPlugin } from "../plugins/types.js";
@ -26,6 +25,9 @@ import {
type DetectZaiEndpoint = typeof import("./zai-endpoint-detect.js").detectZaiEndpoint;
const ZAI_CODING_GLOBAL_BASE_URL = "https://api.z.ai/api/coding/paas/v4";
const ZAI_CODING_CN_BASE_URL = "https://open.bigmodel.cn/api/coding/paas/v4";
const loginOpenAICodexOAuth = vi.hoisted(() =>
vi.fn<() => Promise<OAuthCredentials | null>>(async () => null),
);

View File

@ -4,11 +4,6 @@ import { setTimeout as delay } from "node:timers/promises";
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import { MINIMAX_API_BASE_URL, MINIMAX_CN_API_BASE_URL } from "../../extensions/minimax/api.js";
import { OPENAI_DEFAULT_MODEL } from "../../extensions/openai/api.js";
import {
ZAI_CODING_CN_BASE_URL,
ZAI_CODING_GLOBAL_BASE_URL,
ZAI_GLOBAL_BASE_URL,
} from "../plugin-sdk/zai.js";
import { makeTempWorkspace } from "../test-helpers/workspace.js";
import { withEnvAsync } from "../test-utils/env.js";
import {
@ -23,6 +18,10 @@ type OnboardEnv = {
};
type FetchLike = (input: RequestInfo | URL, init?: RequestInit) => Promise<Response>;
const ZAI_CODING_GLOBAL_BASE_URL = "https://api.z.ai/api/coding/paas/v4";
const ZAI_CODING_CN_BASE_URL = "https://open.bigmodel.cn/api/coding/paas/v4";
const ZAI_GLOBAL_BASE_URL = "https://api.z.ai/api/paas/v4";
const ensureWorkspaceAndSessionsMock = vi.hoisted(() => vi.fn(async (..._args: unknown[]) => {}));
vi.mock("./onboard-non-interactive/local/auth-choice.plugin-providers.js", async () => {

View File

@ -1,8 +0,0 @@
/**
* Mantle's OpenAI-compatible surface currently expects a bearer token.
* Plain IAM credentials are not sufficient until token generation is wired in.
*/
export function resolveMantleBearerToken(env: NodeJS.ProcessEnv = process.env): string | undefined {
const explicitToken = env.AWS_BEARER_TOKEN_BEDROCK?.trim();
return explicitToken || undefined;
}

View File

@ -1,228 +0,0 @@
import type { ModelDefinitionConfig } from "./provider-model-shared.js";
import type { OpenClawConfig } from "./provider-onboard.js";
import { applyProviderConfigWithModelCatalogPreset } from "./provider-onboard.js";
export const ZAI_CODING_GLOBAL_BASE_URL = "https://api.z.ai/api/coding/paas/v4";
export const ZAI_CODING_CN_BASE_URL = "https://open.bigmodel.cn/api/coding/paas/v4";
export const ZAI_GLOBAL_BASE_URL = "https://api.z.ai/api/paas/v4";
export const ZAI_CN_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
export const ZAI_DEFAULT_MODEL_ID = "glm-5";
export const ZAI_DEFAULT_MODEL_REF = `zai/${ZAI_DEFAULT_MODEL_ID}`;
type ZaiCatalogEntry = {
name: string;
reasoning: boolean;
input: ModelDefinitionConfig["input"];
contextWindow: number;
maxTokens: number;
cost: ModelDefinitionConfig["cost"];
};
export const ZAI_DEFAULT_COST = {
input: 1,
output: 3.2,
cacheRead: 0.2,
cacheWrite: 0,
} satisfies ModelDefinitionConfig["cost"];
const ZAI_MODEL_CATALOG = {
"glm-5.1": {
name: "GLM-5.1",
reasoning: true,
input: ["text"],
contextWindow: 202800,
maxTokens: 131100,
cost: { input: 1.2, output: 4, cacheRead: 0.24, cacheWrite: 0 },
},
"glm-5": {
name: "GLM-5",
reasoning: true,
input: ["text"],
contextWindow: 202800,
maxTokens: 131100,
cost: ZAI_DEFAULT_COST,
},
"glm-5-turbo": {
name: "GLM-5 Turbo",
reasoning: true,
input: ["text"],
contextWindow: 202800,
maxTokens: 131100,
cost: { input: 1.2, output: 4, cacheRead: 0.24, cacheWrite: 0 },
},
"glm-5v-turbo": {
name: "GLM-5V Turbo",
reasoning: true,
input: ["text", "image"],
contextWindow: 202800,
maxTokens: 131100,
cost: { input: 1.2, output: 4, cacheRead: 0.24, cacheWrite: 0 },
},
"glm-4.7": {
name: "GLM-4.7",
reasoning: true,
input: ["text"],
contextWindow: 204800,
maxTokens: 131072,
cost: { input: 0.6, output: 2.2, cacheRead: 0.11, cacheWrite: 0 },
},
"glm-4.7-flash": {
name: "GLM-4.7 Flash",
reasoning: true,
input: ["text"],
contextWindow: 200000,
maxTokens: 131072,
cost: { input: 0.07, output: 0.4, cacheRead: 0, cacheWrite: 0 },
},
"glm-4.7-flashx": {
name: "GLM-4.7 FlashX",
reasoning: true,
input: ["text"],
contextWindow: 200000,
maxTokens: 128000,
cost: { input: 0.06, output: 0.4, cacheRead: 0.01, cacheWrite: 0 },
},
"glm-4.6": {
name: "GLM-4.6",
reasoning: true,
input: ["text"],
contextWindow: 204800,
maxTokens: 131072,
cost: { input: 0.6, output: 2.2, cacheRead: 0.11, cacheWrite: 0 },
},
"glm-4.6v": {
name: "GLM-4.6V",
reasoning: true,
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 32768,
cost: { input: 0.3, output: 0.9, cacheRead: 0, cacheWrite: 0 },
},
"glm-4.5": {
name: "GLM-4.5",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 98304,
cost: { input: 0.6, output: 2.2, cacheRead: 0.11, cacheWrite: 0 },
},
"glm-4.5-air": {
name: "GLM-4.5 Air",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 98304,
cost: { input: 0.2, output: 1.1, cacheRead: 0.03, cacheWrite: 0 },
},
"glm-4.5-flash": {
name: "GLM-4.5 Flash",
reasoning: true,
input: ["text"],
contextWindow: 131072,
maxTokens: 98304,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
"glm-4.5v": {
name: "GLM-4.5V",
reasoning: true,
input: ["text", "image"],
contextWindow: 64000,
maxTokens: 16384,
cost: { input: 0.6, output: 1.8, cacheRead: 0, cacheWrite: 0 },
},
} as const satisfies Record<string, ZaiCatalogEntry>;
type ZaiCatalogId = keyof typeof ZAI_MODEL_CATALOG;
export function resolveZaiBaseUrl(endpoint?: string): string {
switch (endpoint) {
case "coding-cn":
return ZAI_CODING_CN_BASE_URL;
case "global":
return ZAI_GLOBAL_BASE_URL;
case "cn":
return ZAI_CN_BASE_URL;
case "coding-global":
return ZAI_CODING_GLOBAL_BASE_URL;
default:
return ZAI_GLOBAL_BASE_URL;
}
}
export function buildZaiModelDefinition(params: {
id: string;
name?: string;
reasoning?: boolean;
input?: ModelDefinitionConfig["input"];
cost?: ModelDefinitionConfig["cost"];
contextWindow?: number;
maxTokens?: number;
}): ModelDefinitionConfig {
const catalog = ZAI_MODEL_CATALOG[params.id as ZaiCatalogId];
return {
id: params.id,
name: params.name ?? catalog?.name ?? `GLM ${params.id}`,
reasoning: params.reasoning ?? catalog?.reasoning ?? true,
input:
params.input ?? (catalog?.input ? ([...catalog.input] as ("text" | "image")[]) : ["text"]),
cost: params.cost ?? catalog?.cost ?? ZAI_DEFAULT_COST,
contextWindow: params.contextWindow ?? catalog?.contextWindow ?? 202800,
maxTokens: params.maxTokens ?? catalog?.maxTokens ?? 131100,
};
}
const ZAI_DEFAULT_MODELS = [
buildZaiModelDefinition({ id: "glm-5.1" }),
buildZaiModelDefinition({ id: "glm-5" }),
buildZaiModelDefinition({ id: "glm-5-turbo" }),
buildZaiModelDefinition({ id: "glm-5v-turbo" }),
buildZaiModelDefinition({ id: "glm-4.7" }),
buildZaiModelDefinition({ id: "glm-4.7-flash" }),
buildZaiModelDefinition({ id: "glm-4.7-flashx" }),
buildZaiModelDefinition({ id: "glm-4.6" }),
buildZaiModelDefinition({ id: "glm-4.6v" }),
buildZaiModelDefinition({ id: "glm-4.5" }),
buildZaiModelDefinition({ id: "glm-4.5-air" }),
buildZaiModelDefinition({ id: "glm-4.5-flash" }),
buildZaiModelDefinition({ id: "glm-4.5v" }),
];
function resolveZaiPresetBaseUrl(cfg: OpenClawConfig, endpoint?: string): string {
const existingProvider = cfg.models?.providers?.zai;
const existingBaseUrl =
typeof existingProvider?.baseUrl === "string" ? existingProvider.baseUrl.trim() : "";
return endpoint ? resolveZaiBaseUrl(endpoint) : existingBaseUrl || resolveZaiBaseUrl();
}
function applyZaiPreset(
cfg: OpenClawConfig,
params?: { endpoint?: string; modelId?: string },
primaryModelRef?: string,
): OpenClawConfig {
const modelId = params?.modelId?.trim() || ZAI_DEFAULT_MODEL_ID;
const modelRef = `zai/${modelId}`;
return applyProviderConfigWithModelCatalogPreset(cfg, {
providerId: "zai",
api: "openai-completions",
baseUrl: resolveZaiPresetBaseUrl(cfg, params?.endpoint),
catalogModels: ZAI_DEFAULT_MODELS,
aliases: [{ modelRef, alias: "GLM" }],
primaryModelRef,
});
}
export function applyZaiProviderConfig(
cfg: OpenClawConfig,
params?: { endpoint?: string; modelId?: string },
): OpenClawConfig {
return applyZaiPreset(cfg, params);
}
export function applyZaiConfig(
cfg: OpenClawConfig,
params?: { endpoint?: string; modelId?: string },
): OpenClawConfig {
const modelId = params?.modelId?.trim() || ZAI_DEFAULT_MODEL_ID;
const modelRef = modelId === ZAI_DEFAULT_MODEL_ID ? ZAI_DEFAULT_MODEL_REF : `zai/${modelId}`;
return applyZaiPreset(cfg, params, modelRef);
}

View File

@ -1,11 +1,10 @@
import {
ZAI_CN_BASE_URL,
ZAI_CODING_CN_BASE_URL,
ZAI_CODING_GLOBAL_BASE_URL,
ZAI_GLOBAL_BASE_URL,
} from "../plugin-sdk/zai.js";
import { fetchWithTimeout } from "../utils/fetch-timeout.js";
const ZAI_CODING_GLOBAL_BASE_URL = "https://api.z.ai/api/coding/paas/v4";
const ZAI_CODING_CN_BASE_URL = "https://open.bigmodel.cn/api/coding/paas/v4";
const ZAI_GLOBAL_BASE_URL = "https://api.z.ai/api/paas/v4";
const ZAI_CN_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
export type ZaiEndpointId = "global" | "cn" | "coding-global" | "coding-cn";
export type ZaiDetectedEndpoint = {

View File

@ -147,7 +147,6 @@ describe("non-extension test boundaries", () => {
"../plugin-sdk/synthetic.js",
"../plugin-sdk/xai.js",
"../plugin-sdk/xiaomi.js",
"../plugin-sdk/zai.js",
]);
const file = "src/commands/onboard-auth.test.ts";
const source = fs.readFileSync(path.join(repoRoot, file), "utf8");