fix: add Azure AI Foundry URL support for custom providers

Detects Azure AI Foundry URLs (services.ai.azure.com and
openai.azure.com) and transforms them to include the proper
deployment path (/openai/deployments/<model-id>) required by
Azure's API. This fixes the 400 error when configuring OpenAI
models from Azure AI Foundry.

Fixes openclaw/openclaw#17992
This commit is contained in:
OpenClaw Bot 2026-02-16 11:18:09 +00:00 committed by Peter Steinberger
parent 4e5a9d83b7
commit 960cc11513
1 changed files with 51 additions and 7 deletions

View File

@ -13,6 +13,41 @@ const DEFAULT_CONTEXT_WINDOW = 4096;
const DEFAULT_MAX_TOKENS = 4096;
const VERIFY_TIMEOUT_MS = 10000;
/**
* Detects if a URL is from Azure AI Foundry or Azure OpenAI.
* Matches both:
* - https://*.services.ai.azure.com (Azure AI Foundry)
* - https://*.openai.azure.com (classic Azure OpenAI)
*/
function isAzureUrl(baseUrl: string): boolean {
try {
const url = new URL(baseUrl);
const host = url.hostname.toLowerCase();
return host.endsWith(".services.ai.azure.com") || host.endsWith(".openai.azure.com");
} catch {
return false;
}
}
/**
* Transforms an Azure AI Foundry/OpenAI URL to include the deployment path.
* Azure requires: https://host/openai/deployments/<model-id>/chat/completions?api-version=2024-xx-xx-preview
* But we can't add query params here, so we just add the path prefix.
* The api-version will be handled by the Azure OpenAI client or as a query param.
*
* Example:
* https://my-resource.services.ai.azure.com + gpt-5-nano
* => https://my-resource.services.ai.azure.com/openai/deployments/gpt-5-nano
*/
function transformAzureUrl(baseUrl: string, modelId: string): string {
const normalizedUrl = baseUrl.endsWith("/") ? baseUrl.slice(0, -1) : baseUrl;
// Check if the URL already includes the deployment path
if (normalizedUrl.includes("/openai/deployments/")) {
return normalizedUrl;
}
return `${normalizedUrl}/openai/deployments/${modelId}`;
}
export type CustomApiCompatibility = "openai" | "anthropic";
type CustomApiCompatibilityChoice = CustomApiCompatibility | "unknown";
export type CustomApiResult = {
@ -215,9 +250,13 @@ async function requestOpenAiVerification(params: {
apiKey: string;
modelId: string;
}): Promise<VerificationResult> {
// Transform Azure URLs to include the deployment path
const resolvedUrl = isAzureUrl(params.baseUrl)
? transformAzureUrl(params.baseUrl, params.modelId)
: params.baseUrl;
const endpoint = new URL(
"chat/completions",
params.baseUrl.endsWith("/") ? params.baseUrl : `${params.baseUrl}/`,
resolvedUrl.endsWith("/") ? resolvedUrl : `${resolvedUrl}/`,
).href;
try {
const res = await fetchWithTimeout(
@ -247,10 +286,12 @@ async function requestAnthropicVerification(params: {
apiKey: string;
modelId: string;
}): Promise<VerificationResult> {
const endpoint = new URL(
"messages",
params.baseUrl.endsWith("/") ? params.baseUrl : `${params.baseUrl}/`,
).href;
// Transform Azure URLs to include the deployment path
const resolvedUrl = isAzureUrl(params.baseUrl)
? transformAzureUrl(params.baseUrl, params.modelId)
: params.baseUrl;
const endpoint = new URL("messages", resolvedUrl.endsWith("/") ? resolvedUrl : `${resolvedUrl}/`)
.href;
try {
const res = await fetchWithTimeout(
endpoint,
@ -423,9 +464,12 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
throw new CustomApiError("invalid_model_id", "Custom provider model ID is required.");
}
// Transform Azure URLs to include the deployment path for API calls
const resolvedBaseUrl = isAzureUrl(baseUrl) ? transformAzureUrl(baseUrl, modelId) : baseUrl;
const providerIdResult = resolveCustomProviderId({
config: params.config,
baseUrl,
baseUrl: resolvedBaseUrl,
providerId: params.providerId,
});
const providerId = providerIdResult.providerId;
@ -468,7 +512,7 @@ export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): Custom
...providers,
[providerId]: {
...existingProviderRest,
baseUrl,
baseUrl: resolvedBaseUrl,
api: resolveProviderApi(params.compatibility),
...(normalizedApiKey ? { apiKey: normalizedApiKey } : {}),
models: mergedModels.length > 0 ? mergedModels : [nextModel],