mirror of https://github.com/openclaw/openclaw.git
fix(models): apply Gemini model-id normalization to google-vertex provider (#42435)
* fix(models): apply Gemini model-id normalization to google-vertex provider The existing normalizeGoogleModelId() (which maps e.g. gemini-3.1-flash-lite to gemini-3.1-flash-lite-preview) was only applied when the provider was "google". Users configuring google-vertex/gemini-3.1-flash-lite would get a "missing" model because the -preview suffix was never appended. Extend the normalization to google-vertex in both model-selection (parseModelRef path) and normalizeProviders (config normalization path). Ref: https://github.com/openclaw/openclaw/issues/36838 Ref: https://github.com/openclaw/openclaw/pull/36918#issuecomment-4032732959 * fix(models): normalize google-vertex flash-lite * fix(models): place unreleased changelog entry last * fix(models): place unreleased changelog entry before releases
This commit is contained in:
parent
f4a2bbe0c9
commit
b857a8d8bc
|
|
@ -62,6 +62,7 @@ Docs: https://docs.openclaw.ai
|
||||||
- Discord/allowlists: honor raw `guild_id` when hydrated guild objects are missing so allowlisted channels and threads like `#maintainers` no longer get false-dropped before channel allowlist checks.
|
- Discord/allowlists: honor raw `guild_id` when hydrated guild objects are missing so allowlisted channels and threads like `#maintainers` no longer get false-dropped before channel allowlist checks.
|
||||||
- macOS/runtime locator: require Node >=22.16.0 during macOS runtime discovery so the app no longer accepts Node versions that the main runtime guard rejects later. Thanks @sumleo.
|
- macOS/runtime locator: require Node >=22.16.0 during macOS runtime discovery so the app no longer accepts Node versions that the main runtime guard rejects later. Thanks @sumleo.
|
||||||
- Agents/custom providers: preserve blank API keys for loopback OpenAI-compatible custom providers by clearing the synthetic Authorization header at runtime, while keeping explicit apiKey and oauth/token config from silently downgrading into fake bearer auth. (#45631) Thanks @xinhuagu.
|
- Agents/custom providers: preserve blank API keys for loopback OpenAI-compatible custom providers by clearing the synthetic Authorization header at runtime, while keeping explicit apiKey and oauth/token config from silently downgrading into fake bearer auth. (#45631) Thanks @xinhuagu.
|
||||||
|
- Models/google-vertex Gemini flash-lite normalization: apply existing bare-ID preview normalization to `google-vertex` model refs and provider configs so `google-vertex/gemini-3.1-flash-lite` resolves as `gemini-3.1-flash-lite-preview`. (#42435) thanks @scoootscooob.
|
||||||
|
|
||||||
## 2026.3.12
|
## 2026.3.12
|
||||||
|
|
||||||
|
|
@ -373,6 +374,7 @@ Docs: https://docs.openclaw.ai
|
||||||
- Agents/compaction transcript updates: emit a transcript-update event immediately after successful embedded compaction so downstream listeners observe the post-compact transcript without waiting for a later write. (#25558) thanks @rodrigouroz.
|
- Agents/compaction transcript updates: emit a transcript-update event immediately after successful embedded compaction so downstream listeners observe the post-compact transcript without waiting for a later write. (#25558) thanks @rodrigouroz.
|
||||||
- Agents/sessions_spawn: use the target agent workspace for cross-agent spawned runs instead of inheriting the caller workspace, so child sessions load the correct workspace-scoped instructions and persona files. (#40176) Thanks @moshehbenavraham.
|
- Agents/sessions_spawn: use the target agent workspace for cross-agent spawned runs instead of inheriting the caller workspace, so child sessions load the correct workspace-scoped instructions and persona files. (#40176) Thanks @moshehbenavraham.
|
||||||
|
|
||||||
|
|
||||||
## 2026.3.7
|
## 2026.3.7
|
||||||
|
|
||||||
### Changes
|
### Changes
|
||||||
|
|
|
||||||
|
|
@ -241,6 +241,12 @@ describe("model-selection", () => {
|
||||||
defaultProvider: "anthropic",
|
defaultProvider: "anthropic",
|
||||||
expected: { provider: "openai", model: "gpt-5.3-codex-codex" },
|
expected: { provider: "openai", model: "gpt-5.3-codex-codex" },
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "normalizes gemini 3.1 flash-lite ids for google-vertex",
|
||||||
|
variants: ["google-vertex/gemini-3.1-flash-lite", "gemini-3.1-flash-lite"],
|
||||||
|
defaultProvider: "google-vertex",
|
||||||
|
expected: { provider: "google-vertex", model: "gemini-3.1-flash-lite-preview" },
|
||||||
|
},
|
||||||
])("$name", ({ variants, defaultProvider, expected }) => {
|
])("$name", ({ variants, defaultProvider, expected }) => {
|
||||||
expectParsedModelVariants(variants, defaultProvider, expected);
|
expectParsedModelVariants(variants, defaultProvider, expected);
|
||||||
});
|
});
|
||||||
|
|
@ -252,7 +258,6 @@ describe("model-selection", () => {
|
||||||
"anthropic/claude-opus-4-6",
|
"anthropic/claude-opus-4-6",
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it.each(["", " ", "/", "anthropic/", "/model"])("returns null for invalid ref %j", (raw) => {
|
it.each(["", " ", "/", "anthropic/", "/model"])("returns null for invalid ref %j", (raw) => {
|
||||||
expect(parseModelRef(raw, "anthropic")).toBeNull();
|
expect(parseModelRef(raw, "anthropic")).toBeNull();
|
||||||
});
|
});
|
||||||
|
|
|
||||||
|
|
@ -171,7 +171,7 @@ function normalizeProviderModelId(provider: string, model: string): string {
|
||||||
return `anthropic/${normalizedAnthropicModel}`;
|
return `anthropic/${normalizedAnthropicModel}`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (provider === "google") {
|
if (provider === "google" || provider === "google-vertex") {
|
||||||
return normalizeGoogleModelId(model);
|
return normalizeGoogleModelId(model);
|
||||||
}
|
}
|
||||||
// OpenRouter-native models (e.g. "openrouter/aurora-alpha") need the full
|
// OpenRouter-native models (e.g. "openrouter/aurora-alpha") need the full
|
||||||
|
|
|
||||||
|
|
@ -97,3 +97,33 @@ describe("google-antigravity provider normalization", () => {
|
||||||
expect(normalized).toBe(providers);
|
expect(normalized).toBe(providers);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe("google-vertex provider normalization", () => {
|
||||||
|
it("normalizes gemini flash-lite IDs for google-vertex providers", () => {
|
||||||
|
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||||
|
const providers = {
|
||||||
|
"google-vertex": buildProvider(["gemini-3.1-flash-lite", "gemini-3-flash-preview"]),
|
||||||
|
openai: buildProvider(["gpt-5"]),
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalized = normalizeProviders({ providers, agentDir });
|
||||||
|
|
||||||
|
expect(normalized).not.toBe(providers);
|
||||||
|
expect(normalized?.["google-vertex"]?.models.map((model) => model.id)).toEqual([
|
||||||
|
"gemini-3.1-flash-lite-preview",
|
||||||
|
"gemini-3-flash-preview",
|
||||||
|
]);
|
||||||
|
expect(normalized?.openai).toBe(providers.openai);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns original providers object when no google-vertex IDs need normalization", () => {
|
||||||
|
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||||
|
const providers = {
|
||||||
|
"google-vertex": buildProvider(["gemini-3.1-flash-lite-preview", "gemini-3-flash-preview"]),
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalized = normalizeProviders({ providers, agentDir });
|
||||||
|
|
||||||
|
expect(normalized).toBe(providers);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
|
||||||
|
|
@ -545,7 +545,7 @@ export function normalizeProviders(params: {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (normalizedKey === "google") {
|
if (normalizedKey === "google" || normalizedKey === "google-vertex") {
|
||||||
const googleNormalized = normalizeGoogleProvider(normalizedProvider);
|
const googleNormalized = normalizeGoogleProvider(normalizedProvider);
|
||||||
if (googleNormalized !== normalizedProvider) {
|
if (googleNormalized !== normalizedProvider) {
|
||||||
mutated = true;
|
mutated = true;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue