fix(zai): align explicit coding endpoint setup with detected model defaults (#45969)

* fix: align Z.AI coding onboarding with endpoint docs

* fix: align Z.AI coding onboarding with endpoint docs (#45969)
This commit is contained in:
Ayaan Zaidi 2026-03-14 16:20:37 +05:30 committed by GitHub
parent 439c21e078
commit c79c4ffbfb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 212 additions and 60 deletions

View File

@ -9,6 +9,10 @@ Docs: https://docs.openclaw.ai
- Placeholder: replace with the first 2026.3.14 user-facing change.
- Refactor/channels: remove the legacy channel shim directories and point channel-specific imports directly at the extension-owned implementations. (#45967) thanks @scoootscooob.
### Fixes
- Z.AI/onboarding: detect a working default model even for explicit `zai-coding-*` endpoint choices, so Coding Plan setup can keep the selected endpoint while defaulting to `glm-5` when available or `glm-4.7` as fallback. (#45969)
## 2026.3.13
### Changes

View File

@ -14,7 +14,17 @@ models are accessed via the `zai` provider and model IDs like `zai/glm-5`.
## CLI setup
```bash
openclaw onboard --auth-choice zai-api-key
# Coding Plan Global, recommended for Coding Plan users
openclaw onboard --auth-choice zai-coding-global
# Coding Plan CN (China region), recommended for Coding Plan users
openclaw onboard --auth-choice zai-coding-cn
# General API
openclaw onboard --auth-choice zai-global
# General API CN (China region)
openclaw onboard --auth-choice zai-cn
```
## Config snippet

View File

@ -15,9 +15,17 @@ with a Z.AI API key.
## CLI setup
```bash
openclaw onboard --auth-choice zai-api-key
# or non-interactive
openclaw onboard --zai-api-key "$ZAI_API_KEY"
# Coding Plan Global, recommended for Coding Plan users
openclaw onboard --auth-choice zai-coding-global
# Coding Plan CN (China region), recommended for Coding Plan users
openclaw onboard --auth-choice zai-coding-cn
# General API
openclaw onboard --auth-choice zai-global
# General API CN (China region)
openclaw onboard --auth-choice zai-cn
```
## Config snippet

View File

@ -245,9 +245,15 @@ export async function applyAuthChoiceApiProviders(
setZaiApiKey(apiKey, params.agentDir, { secretInputMode: mode }),
});
// zai-api-key: auto-detect endpoint + choose a working default model.
let modelIdOverride: string | undefined;
if (!endpoint) {
if (endpoint) {
const detected = await detectZaiEndpoint({ apiKey, endpoint });
if (detected) {
modelIdOverride = detected.modelId;
await params.prompter.note(detected.note, "Z.AI endpoint");
}
} else {
// zai-api-key: auto-detect endpoint + choose a working default model.
const detected = await detectZaiEndpoint({ apiKey });
if (detected) {
endpoint = detected.endpoint;

View File

@ -285,7 +285,7 @@ describe("applyAuthChoice", () => {
expectedBaseUrl: string;
expectedModel?: string;
shouldPromptForEndpoint: boolean;
shouldAssertDetectCall?: boolean;
expectedDetectCall?: { apiKey: string; endpoint?: "coding-global" | "coding-cn" };
}> = [
{
authChoice: "zai-api-key",
@ -298,8 +298,16 @@ describe("applyAuthChoice", () => {
{
authChoice: "zai-coding-global",
token: "zai-test-key",
detectResult: {
endpoint: "coding-global",
modelId: "glm-4.7",
baseUrl: ZAI_CODING_GLOBAL_BASE_URL,
note: "Detected coding-global endpoint with GLM-4.7 fallback",
},
expectedBaseUrl: ZAI_CODING_GLOBAL_BASE_URL,
expectedModel: "zai/glm-4.7",
shouldPromptForEndpoint: false,
expectedDetectCall: { apiKey: "zai-test-key", endpoint: "coding-global" },
},
{
authChoice: "zai-api-key",
@ -313,7 +321,7 @@ describe("applyAuthChoice", () => {
expectedBaseUrl: ZAI_CODING_GLOBAL_BASE_URL,
expectedModel: "zai/glm-4.5",
shouldPromptForEndpoint: false,
shouldAssertDetectCall: true,
expectedDetectCall: { apiKey: "zai-detected-key" },
},
];
for (const scenario of scenarios) {
@ -344,8 +352,8 @@ describe("applyAuthChoice", () => {
setDefaultModel: true,
});
if (scenario.shouldAssertDetectCall) {
expect(detectZaiEndpoint).toHaveBeenCalledWith({ apiKey: scenario.token });
if (scenario.expectedDetectCall) {
expect(detectZaiEndpoint).toHaveBeenCalledWith(scenario.expectedDetectCall);
}
if (scenario.shouldPromptForEndpoint) {
expect(select).toHaveBeenCalledWith(

View File

@ -1,7 +1,7 @@
import fs from "node:fs/promises";
import path from "node:path";
import { setTimeout as delay } from "node:timers/promises";
import { beforeAll, describe, expect, it, vi } from "vitest";
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import { makeTempWorkspace } from "../test-helpers/workspace.js";
import { withEnvAsync } from "../test-utils/env.js";
import { MINIMAX_API_BASE_URL, MINIMAX_CN_API_BASE_URL } from "./onboard-auth.js";
@ -18,6 +18,8 @@ type OnboardEnv = {
};
const ensureWorkspaceAndSessionsMock = vi.hoisted(() => vi.fn(async (..._args: unknown[]) => {}));
type DetectZaiEndpoint = typeof import("./zai-endpoint-detect.js").detectZaiEndpoint;
const detectZaiEndpoint = vi.hoisted(() => vi.fn<DetectZaiEndpoint>(async () => null));
vi.mock("./onboard-helpers.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./onboard-helpers.js")>();
@ -27,6 +29,10 @@ vi.mock("./onboard-helpers.js", async (importOriginal) => {
};
});
vi.mock("./zai-endpoint-detect.js", () => ({
detectZaiEndpoint,
}));
const { runNonInteractiveOnboarding } = await import("./onboard-non-interactive.js");
const NON_INTERACTIVE_DEFAULT_OPTIONS = {
@ -180,6 +186,11 @@ describe("onboard (non-interactive): provider auth", () => {
({ ensureAuthProfileStore, upsertAuthProfile } = await import("../agents/auth-profiles.js"));
});
beforeEach(() => {
detectZaiEndpoint.mockReset();
detectZaiEndpoint.mockResolvedValue(null);
});
it("stores MiniMax API key and uses global baseUrl by default", async () => {
await withOnboardEnv("openclaw-onboard-minimax-", async (env) => {
const cfg = await runOnboardingAndReadConfig(env, {
@ -220,6 +231,12 @@ describe("onboard (non-interactive): provider auth", () => {
it("stores Z.AI API key and uses global baseUrl by default", async () => {
await withOnboardEnv("openclaw-onboard-zai-", async (env) => {
detectZaiEndpoint.mockResolvedValueOnce({
endpoint: "global",
baseUrl: "https://api.z.ai/api/paas/v4",
modelId: "glm-5",
note: "Verified GLM-5 on global endpoint.",
});
const cfg = await runOnboardingAndReadConfig(env, {
authChoice: "zai-api-key",
zaiApiKey: "zai-test-key", // pragma: allowlist secret
@ -235,6 +252,12 @@ describe("onboard (non-interactive): provider auth", () => {
it("supports Z.AI CN coding endpoint auth choice", async () => {
await withOnboardEnv("openclaw-onboard-zai-cn-", async (env) => {
detectZaiEndpoint.mockResolvedValueOnce({
endpoint: "coding-cn",
baseUrl: "https://open.bigmodel.cn/api/coding/paas/v4",
modelId: "glm-4.7",
note: "Coding Plan CN endpoint verified, but this key/plan does not expose GLM-5 there. Defaulting to GLM-4.7.",
});
const cfg = await runOnboardingAndReadConfig(env, {
authChoice: "zai-coding-cn",
zaiApiKey: "zai-test-key", // pragma: allowlist secret
@ -243,6 +266,25 @@ describe("onboard (non-interactive): provider auth", () => {
expect(cfg.models?.providers?.zai?.baseUrl).toBe(
"https://open.bigmodel.cn/api/coding/paas/v4",
);
expect(cfg.agents?.defaults?.model?.primary).toBe("zai/glm-4.7");
});
});
it("supports Z.AI Coding Plan global endpoint with GLM-5 when available", async () => {
await withOnboardEnv("openclaw-onboard-zai-coding-global-", async (env) => {
detectZaiEndpoint.mockResolvedValueOnce({
endpoint: "coding-global",
baseUrl: "https://api.z.ai/api/coding/paas/v4",
modelId: "glm-5",
note: "Verified GLM-5 on coding-global endpoint.",
});
const cfg = await runOnboardingAndReadConfig(env, {
authChoice: "zai-coding-global",
zaiApiKey: "zai-test-key", // pragma: allowlist secret
});
expect(cfg.models?.providers?.zai?.baseUrl).toBe("https://api.z.ai/api/coding/paas/v4");
expect(cfg.agents?.defaults?.model?.primary).toBe("zai/glm-5");
});
});

View File

@ -291,6 +291,13 @@ export async function applyNonInteractiveAuthChoice(params: {
endpoint = "global";
} else if (authChoice === "zai-cn") {
endpoint = "cn";
}
if (endpoint) {
const detected = await detectZaiEndpoint({ apiKey: resolved.key, endpoint });
if (detected) {
modelIdOverride = detected.modelId;
}
} else {
const detected = await detectZaiEndpoint({ apiKey: resolved.key });
if (detected) {

View File

@ -1,11 +1,14 @@
import { describe, expect, it } from "vitest";
import { detectZaiEndpoint } from "./zai-endpoint-detect.js";
function makeFetch(map: Record<string, { status: number; body?: unknown }>) {
return (async (url: string) => {
const entry = map[url];
type FetchResponse = { status: number; body?: unknown };
function makeFetch(map: Record<string, FetchResponse>) {
return (async (url: string, init?: RequestInit) => {
const rawBody = typeof init?.body === "string" ? JSON.parse(init.body) : null;
const entry = map[`${url}::${rawBody?.model ?? ""}`] ?? map[url];
if (!entry) {
throw new Error(`unexpected url: ${url}`);
throw new Error(`unexpected url: ${url} model=${String(rawBody?.model ?? "")}`);
}
const json = entry.body ?? {};
return new Response(JSON.stringify(json), {
@ -18,39 +21,71 @@ function makeFetch(map: Record<string, { status: number; body?: unknown }>) {
describe("detectZaiEndpoint", () => {
it("resolves preferred/fallback endpoints and null when probes fail", async () => {
const scenarios: Array<{
endpoint?: "global" | "cn" | "coding-global" | "coding-cn";
responses: Record<string, { status: number; body?: unknown }>;
expected: { endpoint: string; modelId: string } | null;
}> = [
{
responses: {
"https://api.z.ai/api/paas/v4/chat/completions": { status: 200 },
"https://api.z.ai/api/paas/v4/chat/completions::glm-5": { status: 200 },
},
expected: { endpoint: "global", modelId: "glm-5" },
},
{
responses: {
"https://api.z.ai/api/paas/v4/chat/completions": {
"https://api.z.ai/api/paas/v4/chat/completions::glm-5": {
status: 404,
body: { error: { message: "not found" } },
},
"https://open.bigmodel.cn/api/paas/v4/chat/completions": { status: 200 },
"https://open.bigmodel.cn/api/paas/v4/chat/completions::glm-5": { status: 200 },
},
expected: { endpoint: "cn", modelId: "glm-5" },
},
{
responses: {
"https://api.z.ai/api/paas/v4/chat/completions": { status: 404 },
"https://open.bigmodel.cn/api/paas/v4/chat/completions": { status: 404 },
"https://api.z.ai/api/coding/paas/v4/chat/completions": { status: 200 },
"https://api.z.ai/api/paas/v4/chat/completions::glm-5": { status: 404 },
"https://open.bigmodel.cn/api/paas/v4/chat/completions::glm-5": { status: 404 },
"https://api.z.ai/api/coding/paas/v4/chat/completions::glm-5": { status: 200 },
},
expected: { endpoint: "coding-global", modelId: "glm-5" },
},
{
endpoint: "coding-global",
responses: {
"https://api.z.ai/api/coding/paas/v4/chat/completions::glm-5": {
status: 404,
body: { error: { message: "glm-5 unavailable" } },
},
"https://api.z.ai/api/coding/paas/v4/chat/completions::glm-4.7": { status: 200 },
},
expected: { endpoint: "coding-global", modelId: "glm-4.7" },
},
{
endpoint: "coding-cn",
responses: {
"https://api.z.ai/api/paas/v4/chat/completions": { status: 401 },
"https://open.bigmodel.cn/api/paas/v4/chat/completions": { status: 401 },
"https://api.z.ai/api/coding/paas/v4/chat/completions": { status: 401 },
"https://open.bigmodel.cn/api/coding/paas/v4/chat/completions": { status: 401 },
"https://open.bigmodel.cn/api/coding/paas/v4/chat/completions::glm-5": { status: 200 },
},
expected: { endpoint: "coding-cn", modelId: "glm-5" },
},
{
endpoint: "coding-cn",
responses: {
"https://open.bigmodel.cn/api/coding/paas/v4/chat/completions::glm-5": {
status: 404,
body: { error: { message: "glm-5 unavailable" } },
},
"https://open.bigmodel.cn/api/coding/paas/v4/chat/completions::glm-4.7": { status: 200 },
},
expected: { endpoint: "coding-cn", modelId: "glm-4.7" },
},
{
responses: {
"https://api.z.ai/api/paas/v4/chat/completions::glm-5": { status: 401 },
"https://open.bigmodel.cn/api/paas/v4/chat/completions::glm-5": { status: 401 },
"https://api.z.ai/api/coding/paas/v4/chat/completions::glm-5": { status: 401 },
"https://api.z.ai/api/coding/paas/v4/chat/completions::glm-4.7": { status: 401 },
"https://open.bigmodel.cn/api/coding/paas/v4/chat/completions::glm-5": { status: 401 },
"https://open.bigmodel.cn/api/coding/paas/v4/chat/completions::glm-4.7": { status: 401 },
},
expected: null,
},
@ -59,6 +94,7 @@ describe("detectZaiEndpoint", () => {
for (const scenario of scenarios) {
const detected = await detectZaiEndpoint({
apiKey: "sk-test", // pragma: allowlist secret
...(scenario.endpoint ? { endpoint: scenario.endpoint } : {}),
fetchFn: makeFetch(scenario.responses),
});

View File

@ -88,6 +88,7 @@ async function probeZaiChatCompletions(params: {
export async function detectZaiEndpoint(params: {
apiKey: string;
endpoint?: ZaiEndpointId;
timeoutMs?: number;
fetchFn?: typeof fetch;
}): Promise<ZaiDetectedEndpoint | null> {
@ -97,50 +98,80 @@ export async function detectZaiEndpoint(params: {
}
const timeoutMs = params.timeoutMs ?? 5_000;
// Prefer GLM-5 on the general API endpoints.
const glm5: Array<{ endpoint: ZaiEndpointId; baseUrl: string }> = [
{ endpoint: "global", baseUrl: ZAI_GLOBAL_BASE_URL },
{ endpoint: "cn", baseUrl: ZAI_CN_BASE_URL },
];
for (const candidate of glm5) {
const result = await probeZaiChatCompletions({
baseUrl: candidate.baseUrl,
apiKey: params.apiKey,
modelId: "glm-5",
timeoutMs,
fetchFn: params.fetchFn,
});
if (result.ok) {
return {
endpoint: candidate.endpoint,
baseUrl: candidate.baseUrl,
const probeCandidates = (() => {
const general = [
{
endpoint: "global" as const,
baseUrl: ZAI_GLOBAL_BASE_URL,
modelId: "glm-5",
note: `Verified GLM-5 on ${candidate.endpoint} endpoint.`,
};
}
}
note: "Verified GLM-5 on global endpoint.",
},
{
endpoint: "cn" as const,
baseUrl: ZAI_CN_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on cn endpoint.",
},
];
const codingGlm5 = [
{
endpoint: "coding-global" as const,
baseUrl: ZAI_CODING_GLOBAL_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on coding-global endpoint.",
},
{
endpoint: "coding-cn" as const,
baseUrl: ZAI_CODING_CN_BASE_URL,
modelId: "glm-5",
note: "Verified GLM-5 on coding-cn endpoint.",
},
];
const codingFallback = [
{
endpoint: "coding-global" as const,
baseUrl: ZAI_CODING_GLOBAL_BASE_URL,
modelId: "glm-4.7",
note: "Coding Plan endpoint verified, but this key/plan does not expose GLM-5 there. Defaulting to GLM-4.7.",
},
{
endpoint: "coding-cn" as const,
baseUrl: ZAI_CODING_CN_BASE_URL,
modelId: "glm-4.7",
note: "Coding Plan CN endpoint verified, but this key/plan does not expose GLM-5 there. Defaulting to GLM-4.7.",
},
];
// Fallback: Coding Plan endpoint (GLM-5 not available there).
const coding: Array<{ endpoint: ZaiEndpointId; baseUrl: string }> = [
{ endpoint: "coding-global", baseUrl: ZAI_CODING_GLOBAL_BASE_URL },
{ endpoint: "coding-cn", baseUrl: ZAI_CODING_CN_BASE_URL },
];
for (const candidate of coding) {
switch (params.endpoint) {
case "global":
return general.filter((candidate) => candidate.endpoint === "global");
case "cn":
return general.filter((candidate) => candidate.endpoint === "cn");
case "coding-global":
return [
...codingGlm5.filter((candidate) => candidate.endpoint === "coding-global"),
...codingFallback.filter((candidate) => candidate.endpoint === "coding-global"),
];
case "coding-cn":
return [
...codingGlm5.filter((candidate) => candidate.endpoint === "coding-cn"),
...codingFallback.filter((candidate) => candidate.endpoint === "coding-cn"),
];
default:
return [...general, ...codingGlm5, ...codingFallback];
}
})();
for (const candidate of probeCandidates) {
const result = await probeZaiChatCompletions({
baseUrl: candidate.baseUrl,
apiKey: params.apiKey,
modelId: "glm-4.7",
modelId: candidate.modelId,
timeoutMs,
fetchFn: params.fetchFn,
});
if (result.ok) {
return {
endpoint: candidate.endpoint,
baseUrl: candidate.baseUrl,
modelId: "glm-4.7",
note: "Coding Plan endpoint detected; GLM-5 is not available there. Defaulting to GLM-4.7.",
};
return candidate;
}
}