feat(ollama): switch implicit provider to native api type (#11828)

This commit is contained in:
BrokenFinger98 2026-02-08 21:21:34 +09:00 committed by Peter Steinberger
parent e9900993a2
commit da27360722
1 changed files with 4 additions and 9 deletions

View File

@ -79,8 +79,8 @@ const QWEN_PORTAL_DEFAULT_COST = {
cacheWrite: 0,
};
const OLLAMA_BASE_URL = "http://127.0.0.1:11434/v1";
const OLLAMA_API_BASE_URL = "http://127.0.0.1:11434";
const OLLAMA_BASE_URL = "http://127.0.0.1:11434";
const OLLAMA_API_BASE_URL = OLLAMA_BASE_URL;
const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128000;
const OLLAMA_DEFAULT_MAX_TOKENS = 8192;
const OLLAMA_DEFAULT_COST = {
@ -180,11 +180,6 @@ async function discoverOllamaModels(baseUrl?: string): Promise<ModelDefinitionCo
cost: OLLAMA_DEFAULT_COST,
contextWindow: OLLAMA_DEFAULT_CONTEXT_WINDOW,
maxTokens: OLLAMA_DEFAULT_MAX_TOKENS,
// Disable streaming by default for Ollama to avoid SDK issue #1205
// See: https://github.com/badlogic/pi-mono/issues/1205
params: {
streaming: false,
},
};
});
} catch (error) {
@ -541,8 +536,8 @@ async function buildVeniceProvider(): Promise<ProviderConfig> {
async function buildOllamaProvider(configuredBaseUrl?: string): Promise<ProviderConfig> {
const models = await discoverOllamaModels(configuredBaseUrl);
return {
baseUrl: configuredBaseUrl ?? OLLAMA_BASE_URL,
api: "openai-completions",
baseUrl: resolveOllamaApiBase(configuredBaseUrl),
api: "ollama",
models,
};
}