Memory: extract embedding runtime registry

This commit is contained in:
Gustavo Madeira Santana 2026-03-15 19:21:52 +00:00
parent c0949e4eb8
commit df0cb8193c
2 changed files with 318 additions and 262 deletions

View File

@ -0,0 +1,307 @@
import fsSync from "node:fs";
import type { Llama, LlamaEmbeddingContext, LlamaModel } from "node-llama-cpp";
import { formatErrorMessage } from "../infra/errors.js";
import { sanitizeAndNormalizeEmbedding } from "../memory/embedding-vectors.js";
import {
createGeminiEmbeddingProvider,
type GeminiEmbeddingClient,
type GeminiTaskType,
} from "../memory/embeddings-gemini.js";
import {
createMistralEmbeddingProvider,
type MistralEmbeddingClient,
} from "../memory/embeddings-mistral.js";
import {
createOllamaEmbeddingProvider,
type OllamaEmbeddingClient,
} from "../memory/embeddings-ollama.js";
import {
createOpenAiEmbeddingProvider,
type OpenAiEmbeddingClient,
} from "../memory/embeddings-openai.js";
import {
createVoyageEmbeddingProvider,
type VoyageEmbeddingClient,
} from "../memory/embeddings-voyage.js";
import type {
EmbeddingProvider,
EmbeddingProviderId,
EmbeddingProviderOptions,
EmbeddingProviderResult,
} from "../memory/embeddings.js";
import { importNodeLlamaCpp } from "../memory/node-llama.js";
import { resolveUserPath } from "../utils.js";
export type {
GeminiEmbeddingClient,
GeminiTaskType,
MistralEmbeddingClient,
OllamaEmbeddingClient,
OpenAiEmbeddingClient,
VoyageEmbeddingClient,
};
export const DEFAULT_EXTENSION_HOST_LOCAL_EMBEDDING_MODEL =
"hf:ggml-org/embeddinggemma-300m-qat-q8_0-GGUF/embeddinggemma-300m-qat-Q8_0.gguf";
export const EXTENSION_HOST_REMOTE_EMBEDDING_PROVIDER_IDS = [
"openai",
"gemini",
"voyage",
"mistral",
] as const satisfies readonly EmbeddingProviderId[];
export function canAutoSelectExtensionHostLocalEmbedding(
options: EmbeddingProviderOptions,
): boolean {
const modelPath = options.local?.modelPath?.trim();
if (!modelPath) {
return false;
}
if (/^(hf:|https?:)/i.test(modelPath)) {
return false;
}
const resolved = resolveUserPath(modelPath);
try {
return fsSync.statSync(resolved).isFile();
} catch {
return false;
}
}
export function isMissingExtensionHostEmbeddingApiKeyError(err: unknown): boolean {
const message = formatErrorMessage(err);
return message.includes("No API key found for provider");
}
async function createExtensionHostLocalEmbeddingProvider(
options: EmbeddingProviderOptions,
): Promise<EmbeddingProvider> {
const modelPath =
options.local?.modelPath?.trim() || DEFAULT_EXTENSION_HOST_LOCAL_EMBEDDING_MODEL;
const modelCacheDir = options.local?.modelCacheDir?.trim();
// Lazy-load node-llama-cpp to keep startup light unless local is enabled.
const { getLlama, resolveModelFile, LlamaLogLevel } = await importNodeLlamaCpp();
let llama: Llama | null = null;
let embeddingModel: LlamaModel | null = null;
let embeddingContext: LlamaEmbeddingContext | null = null;
let initPromise: Promise<LlamaEmbeddingContext> | null = null;
const ensureContext = async (): Promise<LlamaEmbeddingContext> => {
if (embeddingContext) {
return embeddingContext;
}
if (initPromise) {
return initPromise;
}
initPromise = (async () => {
try {
if (!llama) {
llama = await getLlama({ logLevel: LlamaLogLevel.error });
}
if (!embeddingModel) {
const resolved = await resolveModelFile(modelPath, modelCacheDir || undefined);
embeddingModel = await llama.loadModel({ modelPath: resolved });
}
if (!embeddingContext) {
embeddingContext = await embeddingModel.createEmbeddingContext();
}
return embeddingContext;
} catch (err) {
initPromise = null;
throw err;
}
})();
return initPromise;
};
return {
id: "local",
model: modelPath,
embedQuery: async (text) => {
const ctx = await ensureContext();
const embedding = await ctx.getEmbeddingFor(text);
return sanitizeAndNormalizeEmbedding(Array.from(embedding.vector));
},
embedBatch: async (texts) => {
const ctx = await ensureContext();
return Promise.all(
texts.map(async (text) => {
const embedding = await ctx.getEmbeddingFor(text);
return sanitizeAndNormalizeEmbedding(Array.from(embedding.vector));
}),
);
},
};
}
async function createExtensionHostEmbeddingProviderById(
id: EmbeddingProviderId,
options: EmbeddingProviderOptions,
): Promise<
Omit<
EmbeddingProviderResult,
"requestedProvider" | "fallbackFrom" | "fallbackReason" | "providerUnavailableReason"
>
> {
if (id === "local") {
const provider = await createExtensionHostLocalEmbeddingProvider(options);
return { provider };
}
if (id === "ollama") {
const { provider, client } = await createOllamaEmbeddingProvider(options);
return { provider, ollama: client };
}
if (id === "gemini") {
const { provider, client } = await createGeminiEmbeddingProvider(options);
return { provider, gemini: client };
}
if (id === "voyage") {
const { provider, client } = await createVoyageEmbeddingProvider(options);
return { provider, voyage: client };
}
if (id === "mistral") {
const { provider, client } = await createMistralEmbeddingProvider(options);
return { provider, mistral: client };
}
const { provider, client } = await createOpenAiEmbeddingProvider(options);
return { provider, openAi: client };
}
function formatExtensionHostPrimaryEmbeddingError(
err: unknown,
provider: EmbeddingProviderId,
): string {
return provider === "local"
? formatExtensionHostLocalEmbeddingSetupError(err)
: formatErrorMessage(err);
}
export async function createExtensionHostEmbeddingProvider(
options: EmbeddingProviderOptions,
): Promise<EmbeddingProviderResult> {
const requestedProvider = options.provider;
const fallback = options.fallback;
if (requestedProvider === "auto") {
const missingKeyErrors: string[] = [];
let localError: string | null = null;
if (canAutoSelectExtensionHostLocalEmbedding(options)) {
try {
const local = await createExtensionHostEmbeddingProviderById("local", options);
return { ...local, requestedProvider };
} catch (err) {
localError = formatExtensionHostLocalEmbeddingSetupError(err);
}
}
for (const provider of EXTENSION_HOST_REMOTE_EMBEDDING_PROVIDER_IDS) {
try {
const result = await createExtensionHostEmbeddingProviderById(provider, options);
return { ...result, requestedProvider };
} catch (err) {
const message = formatExtensionHostPrimaryEmbeddingError(err, provider);
if (isMissingExtensionHostEmbeddingApiKeyError(err)) {
missingKeyErrors.push(message);
continue;
}
const wrapped = new Error(message) as Error & { cause?: unknown };
wrapped.cause = err;
throw wrapped;
}
}
const details = [...missingKeyErrors, localError].filter(Boolean) as string[];
const reason = details.length > 0 ? details.join("\n\n") : "No embeddings provider available.";
return {
provider: null,
requestedProvider,
providerUnavailableReason: reason,
};
}
try {
const primary = await createExtensionHostEmbeddingProviderById(requestedProvider, options);
return { ...primary, requestedProvider };
} catch (primaryErr) {
const reason = formatExtensionHostPrimaryEmbeddingError(primaryErr, requestedProvider);
if (fallback && fallback !== "none" && fallback !== requestedProvider) {
try {
const fallbackResult = await createExtensionHostEmbeddingProviderById(fallback, options);
return {
...fallbackResult,
requestedProvider,
fallbackFrom: requestedProvider,
fallbackReason: reason,
};
} catch (fallbackErr) {
const fallbackReason = formatErrorMessage(fallbackErr);
const combinedReason = `${reason}\n\nFallback to ${fallback} failed: ${fallbackReason}`;
if (
isMissingExtensionHostEmbeddingApiKeyError(primaryErr) &&
isMissingExtensionHostEmbeddingApiKeyError(fallbackErr)
) {
return {
provider: null,
requestedProvider,
fallbackFrom: requestedProvider,
fallbackReason: reason,
providerUnavailableReason: combinedReason,
};
}
const wrapped = new Error(combinedReason) as Error & { cause?: unknown };
wrapped.cause = fallbackErr;
throw wrapped;
}
}
if (isMissingExtensionHostEmbeddingApiKeyError(primaryErr)) {
return {
provider: null,
requestedProvider,
providerUnavailableReason: reason,
};
}
const wrapped = new Error(reason) as Error & { cause?: unknown };
wrapped.cause = primaryErr;
throw wrapped;
}
}
function isNodeLlamaCppMissing(err: unknown): boolean {
if (!(err instanceof Error)) {
return false;
}
const code = (err as Error & { code?: unknown }).code;
if (code === "ERR_MODULE_NOT_FOUND") {
return err.message.includes("node-llama-cpp");
}
return false;
}
export function formatExtensionHostLocalEmbeddingSetupError(err: unknown): string {
const detail = formatErrorMessage(err);
const missing = isNodeLlamaCppMissing(err);
return [
"Local embeddings unavailable.",
missing
? "Reason: optional dependency node-llama-cpp is missing (or failed to install)."
: detail
? `Reason: ${detail}`
: undefined,
missing && detail ? `Detail: ${detail}` : null,
"To enable local embeddings:",
"1) Use Node 24 (recommended for installs/updates; Node 22 LTS, currently 22.16+, remains supported)",
missing
? "2) Reinstall OpenClaw (this should install node-llama-cpp): npm i -g openclaw@latest"
: null,
"3) If you use pnpm: pnpm approve-builds (select node-llama-cpp), then pnpm rebuild node-llama-cpp",
...EXTENSION_HOST_REMOTE_EMBEDDING_PROVIDER_IDS.map(
(provider) => `Or set agents.defaults.memorySearch.provider = "${provider}" (remote).`,
),
]
.filter(Boolean)
.join("\n");
}

View File

@ -1,24 +1,15 @@
import fsSync from "node:fs";
import type { Llama, LlamaEmbeddingContext, LlamaModel } from "node-llama-cpp";
import type { OpenClawConfig } from "../config/config.js";
import type { SecretInput } from "../config/types.secrets.js";
import { formatErrorMessage } from "../infra/errors.js";
import { resolveUserPath } from "../utils.js";
import {
DEFAULT_EXTENSION_HOST_LOCAL_EMBEDDING_MODEL,
createExtensionHostEmbeddingProvider,
} from "../extension-host/embedding-runtime-registry.js";
import type { EmbeddingInput } from "./embedding-inputs.js";
import { sanitizeAndNormalizeEmbedding } from "./embedding-vectors.js";
import {
createGeminiEmbeddingProvider,
type GeminiEmbeddingClient,
type GeminiTaskType,
} from "./embeddings-gemini.js";
import {
createMistralEmbeddingProvider,
type MistralEmbeddingClient,
} from "./embeddings-mistral.js";
import { createOllamaEmbeddingProvider, type OllamaEmbeddingClient } from "./embeddings-ollama.js";
import { createOpenAiEmbeddingProvider, type OpenAiEmbeddingClient } from "./embeddings-openai.js";
import { createVoyageEmbeddingProvider, type VoyageEmbeddingClient } from "./embeddings-voyage.js";
import { importNodeLlamaCpp } from "./node-llama.js";
import { type GeminiEmbeddingClient, type GeminiTaskType } from "./embeddings-gemini.js";
import { type MistralEmbeddingClient } from "./embeddings-mistral.js";
import type { OllamaEmbeddingClient } from "./embeddings-ollama.js";
import type { OpenAiEmbeddingClient } from "./embeddings-openai.js";
import type { VoyageEmbeddingClient } from "./embeddings-voyage.js";
export type { GeminiEmbeddingClient } from "./embeddings-gemini.js";
export type { MistralEmbeddingClient } from "./embeddings-mistral.js";
@ -39,11 +30,6 @@ export type EmbeddingProviderId = "openai" | "local" | "gemini" | "voyage" | "mi
export type EmbeddingProviderRequest = EmbeddingProviderId | "auto";
export type EmbeddingProviderFallback = EmbeddingProviderId | "none";
// Remote providers considered for auto-selection when provider === "auto".
// Ollama is intentionally excluded here so that "auto" mode does not
// implicitly assume a local Ollama instance is available.
const REMOTE_EMBEDDING_PROVIDER_IDS = ["openai", "gemini", "voyage", "mistral"] as const;
export type EmbeddingProviderResult = {
provider: EmbeddingProvider | null;
requestedProvider: EmbeddingProviderRequest;
@ -78,247 +64,10 @@ export type EmbeddingProviderOptions = {
taskType?: GeminiTaskType;
};
export const DEFAULT_LOCAL_MODEL =
"hf:ggml-org/embeddinggemma-300m-qat-q8_0-GGUF/embeddinggemma-300m-qat-Q8_0.gguf";
function canAutoSelectLocal(options: EmbeddingProviderOptions): boolean {
const modelPath = options.local?.modelPath?.trim();
if (!modelPath) {
return false;
}
if (/^(hf:|https?:)/i.test(modelPath)) {
return false;
}
const resolved = resolveUserPath(modelPath);
try {
return fsSync.statSync(resolved).isFile();
} catch {
return false;
}
}
function isMissingApiKeyError(err: unknown): boolean {
const message = formatErrorMessage(err);
return message.includes("No API key found for provider");
}
async function createLocalEmbeddingProvider(
options: EmbeddingProviderOptions,
): Promise<EmbeddingProvider> {
const modelPath = options.local?.modelPath?.trim() || DEFAULT_LOCAL_MODEL;
const modelCacheDir = options.local?.modelCacheDir?.trim();
// Lazy-load node-llama-cpp to keep startup light unless local is enabled.
const { getLlama, resolveModelFile, LlamaLogLevel } = await importNodeLlamaCpp();
let llama: Llama | null = null;
let embeddingModel: LlamaModel | null = null;
let embeddingContext: LlamaEmbeddingContext | null = null;
let initPromise: Promise<LlamaEmbeddingContext> | null = null;
const ensureContext = async (): Promise<LlamaEmbeddingContext> => {
if (embeddingContext) {
return embeddingContext;
}
if (initPromise) {
return initPromise;
}
initPromise = (async () => {
try {
if (!llama) {
llama = await getLlama({ logLevel: LlamaLogLevel.error });
}
if (!embeddingModel) {
const resolved = await resolveModelFile(modelPath, modelCacheDir || undefined);
embeddingModel = await llama.loadModel({ modelPath: resolved });
}
if (!embeddingContext) {
embeddingContext = await embeddingModel.createEmbeddingContext();
}
return embeddingContext;
} catch (err) {
initPromise = null;
throw err;
}
})();
return initPromise;
};
return {
id: "local",
model: modelPath,
embedQuery: async (text) => {
const ctx = await ensureContext();
const embedding = await ctx.getEmbeddingFor(text);
return sanitizeAndNormalizeEmbedding(Array.from(embedding.vector));
},
embedBatch: async (texts) => {
const ctx = await ensureContext();
const embeddings = await Promise.all(
texts.map(async (text) => {
const embedding = await ctx.getEmbeddingFor(text);
return sanitizeAndNormalizeEmbedding(Array.from(embedding.vector));
}),
);
return embeddings;
},
};
}
export const DEFAULT_LOCAL_MODEL = DEFAULT_EXTENSION_HOST_LOCAL_EMBEDDING_MODEL;
export async function createEmbeddingProvider(
options: EmbeddingProviderOptions,
): Promise<EmbeddingProviderResult> {
const requestedProvider = options.provider;
const fallback = options.fallback;
const createProvider = async (id: EmbeddingProviderId) => {
if (id === "local") {
const provider = await createLocalEmbeddingProvider(options);
return { provider };
}
if (id === "ollama") {
const { provider, client } = await createOllamaEmbeddingProvider(options);
return { provider, ollama: client };
}
if (id === "gemini") {
const { provider, client } = await createGeminiEmbeddingProvider(options);
return { provider, gemini: client };
}
if (id === "voyage") {
const { provider, client } = await createVoyageEmbeddingProvider(options);
return { provider, voyage: client };
}
if (id === "mistral") {
const { provider, client } = await createMistralEmbeddingProvider(options);
return { provider, mistral: client };
}
const { provider, client } = await createOpenAiEmbeddingProvider(options);
return { provider, openAi: client };
};
const formatPrimaryError = (err: unknown, provider: EmbeddingProviderId) =>
provider === "local" ? formatLocalSetupError(err) : formatErrorMessage(err);
if (requestedProvider === "auto") {
const missingKeyErrors: string[] = [];
let localError: string | null = null;
if (canAutoSelectLocal(options)) {
try {
const local = await createProvider("local");
return { ...local, requestedProvider };
} catch (err) {
localError = formatLocalSetupError(err);
}
}
for (const provider of REMOTE_EMBEDDING_PROVIDER_IDS) {
try {
const result = await createProvider(provider);
return { ...result, requestedProvider };
} catch (err) {
const message = formatPrimaryError(err, provider);
if (isMissingApiKeyError(err)) {
missingKeyErrors.push(message);
continue;
}
// Non-auth errors (e.g., network) are still fatal
const wrapped = new Error(message) as Error & { cause?: unknown };
wrapped.cause = err;
throw wrapped;
}
}
// All providers failed due to missing API keys - return null provider for FTS-only mode
const details = [...missingKeyErrors, localError].filter(Boolean) as string[];
const reason = details.length > 0 ? details.join("\n\n") : "No embeddings provider available.";
return {
provider: null,
requestedProvider,
providerUnavailableReason: reason,
};
}
try {
const primary = await createProvider(requestedProvider);
return { ...primary, requestedProvider };
} catch (primaryErr) {
const reason = formatPrimaryError(primaryErr, requestedProvider);
if (fallback && fallback !== "none" && fallback !== requestedProvider) {
try {
const fallbackResult = await createProvider(fallback);
return {
...fallbackResult,
requestedProvider,
fallbackFrom: requestedProvider,
fallbackReason: reason,
};
} catch (fallbackErr) {
// Both primary and fallback failed - check if it's auth-related
const fallbackReason = formatErrorMessage(fallbackErr);
const combinedReason = `${reason}\n\nFallback to ${fallback} failed: ${fallbackReason}`;
if (isMissingApiKeyError(primaryErr) && isMissingApiKeyError(fallbackErr)) {
// Both failed due to missing API keys - return null for FTS-only mode
return {
provider: null,
requestedProvider,
fallbackFrom: requestedProvider,
fallbackReason: reason,
providerUnavailableReason: combinedReason,
};
}
// Non-auth errors are still fatal
const wrapped = new Error(combinedReason) as Error & { cause?: unknown };
wrapped.cause = fallbackErr;
throw wrapped;
}
}
// No fallback configured - check if we should degrade to FTS-only
if (isMissingApiKeyError(primaryErr)) {
return {
provider: null,
requestedProvider,
providerUnavailableReason: reason,
};
}
const wrapped = new Error(reason) as Error & { cause?: unknown };
wrapped.cause = primaryErr;
throw wrapped;
}
}
function isNodeLlamaCppMissing(err: unknown): boolean {
if (!(err instanceof Error)) {
return false;
}
const code = (err as Error & { code?: unknown }).code;
if (code === "ERR_MODULE_NOT_FOUND") {
return err.message.includes("node-llama-cpp");
}
return false;
}
function formatLocalSetupError(err: unknown): string {
const detail = formatErrorMessage(err);
const missing = isNodeLlamaCppMissing(err);
return [
"Local embeddings unavailable.",
missing
? "Reason: optional dependency node-llama-cpp is missing (or failed to install)."
: detail
? `Reason: ${detail}`
: undefined,
missing && detail ? `Detail: ${detail}` : null,
"To enable local embeddings:",
"1) Use Node 24 (recommended for installs/updates; Node 22 LTS, currently 22.16+, remains supported)",
missing
? "2) Reinstall OpenClaw (this should install node-llama-cpp): npm i -g openclaw@latest"
: null,
"3) If you use pnpm: pnpm approve-builds (select node-llama-cpp), then pnpm rebuild node-llama-cpp",
...REMOTE_EMBEDDING_PROVIDER_IDS.map(
(provider) => `Or set agents.defaults.memorySearch.provider = "${provider}" (remote).`,
),
]
.filter(Boolean)
.join("\n");
return createExtensionHostEmbeddingProvider(options);
}