fix(openai): allow qa image generation mock routing

This commit is contained in:
Peter Steinberger 2026-04-06 04:27:57 +01:00
parent e29ebc0417
commit 746b112dac
No known key found for this signature in database
4 changed files with 114 additions and 1 deletions

View File

@ -0,0 +1,83 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import { buildOpenAIImageGenerationProvider } from "./image-generation-provider.js";
const {
resolveApiKeyForProviderMock,
postJsonRequestMock,
postTranscriptionRequestMock,
assertOkOrThrowHttpErrorMock,
resolveProviderHttpRequestConfigMock,
} = vi.hoisted(() => ({
resolveApiKeyForProviderMock: vi.fn(async () => ({ apiKey: "openai-key" })),
postJsonRequestMock: vi.fn(),
postTranscriptionRequestMock: vi.fn(),
assertOkOrThrowHttpErrorMock: vi.fn(async () => {}),
resolveProviderHttpRequestConfigMock: vi.fn((params) => ({
baseUrl: params.baseUrl ?? params.defaultBaseUrl,
allowPrivateNetwork: Boolean(params.allowPrivateNetwork ?? params.baseUrl?.trim()),
headers: new Headers(params.defaultHeaders),
dispatcherPolicy: undefined,
})),
}));
vi.mock("openclaw/plugin-sdk/provider-auth-runtime", () => ({
resolveApiKeyForProvider: resolveApiKeyForProviderMock,
}));
vi.mock("openclaw/plugin-sdk/provider-http", () => ({
assertOkOrThrowHttpError: assertOkOrThrowHttpErrorMock,
postJsonRequest: postJsonRequestMock,
postTranscriptionRequest: postTranscriptionRequestMock,
resolveProviderHttpRequestConfig: resolveProviderHttpRequestConfigMock,
}));
describe("openai image generation provider", () => {
afterEach(() => {
resolveApiKeyForProviderMock.mockClear();
postJsonRequestMock.mockReset();
postTranscriptionRequestMock.mockReset();
assertOkOrThrowHttpErrorMock.mockClear();
resolveProviderHttpRequestConfigMock.mockClear();
});
it("allows explicit local baseUrl overrides for image requests", async () => {
postJsonRequestMock.mockResolvedValue({
response: {
json: async () => ({
data: [{ b64_json: Buffer.from("png-bytes").toString("base64") }],
}),
},
release: vi.fn(async () => {}),
});
const provider = buildOpenAIImageGenerationProvider();
const result = await provider.generateImage({
provider: "openai",
model: "gpt-image-1",
prompt: "Draw a QA lighthouse",
cfg: {
models: {
providers: {
openai: {
baseUrl: "http://127.0.0.1:44080/v1",
models: [],
},
},
},
},
});
expect(resolveProviderHttpRequestConfigMock).toHaveBeenCalledWith(
expect.objectContaining({
baseUrl: "http://127.0.0.1:44080/v1",
}),
);
expect(postJsonRequestMock).toHaveBeenCalledWith(
expect.objectContaining({
url: "http://127.0.0.1:44080/v1/images/generations",
allowPrivateNetwork: true,
}),
);
expect(result.images).toHaveLength(1);
});
});

View File

@ -90,7 +90,6 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
resolveProviderHttpRequestConfig({
baseUrl: resolveOpenAIBaseUrl(req.cfg),
defaultBaseUrl: DEFAULT_OPENAI_IMAGE_BASE_URL,
allowPrivateNetwork: false,
defaultHeaders: {
Authorization: `Bearer ${auth.apiKey}`,
},

View File

@ -251,6 +251,17 @@ describe("qa mock openai server", () => {
expect(await image.json()).toMatchObject({
data: [{ b64_json: expect.any(String) }],
});
const imageRequests = await fetch(`${server.baseUrl}/debug/image-generations`);
expect(imageRequests.status).toBe(200);
expect(await imageRequests.json()).toMatchObject([
{
model: "gpt-image-1",
prompt: "Draw a QA lighthouse",
n: 1,
size: "1024x1024",
},
]);
});
it("returns exact markers for visible and hot-installed skills", async () => {

View File

@ -279,6 +279,11 @@ function extractOrbitCode(text: string) {
return /\b(?:ORBIT-9|orbit-9)\b/.exec(text)?.[0]?.toUpperCase() ?? null;
}
function extractExactReplyDirective(text: string) {
const match = /reply with exactly:\s*([^\n]+)/i.exec(text);
return match?.[1]?.trim() || null;
}
function buildAssistantText(input: ResponsesInputItem[], body: Record<string, unknown>) {
const prompt = extractLastUserText(input);
const toolOutput = extractToolOutput(input);
@ -295,6 +300,7 @@ function buildAssistantText(input: ResponsesInputItem[], body: Record<string, un
: toolOutput;
const orbitCode = extractOrbitCode(memorySnippet);
const mediaPath = /MEDIA:([^\n]+)/.exec(toolOutput)?.[1]?.trim();
const exactReplyDirective = extractExactReplyDirective(allInputText);
if (/what was the qa canary code/i.test(prompt) && rememberedFact) {
return `Protocol note: the QA canary code was ${rememberedFact}.`;
@ -305,6 +311,9 @@ function buildAssistantText(input: ResponsesInputItem[], body: Record<string, un
if (/memory unavailable check/i.test(prompt)) {
return "Protocol note: I checked the available runtime context but could not confirm the hidden memory-only fact, so I will not guess.";
}
if (/\bmarker\b/i.test(prompt) && exactReplyDirective) {
return exactReplyDirective;
}
if (/visible skill marker/i.test(prompt)) {
return "VISIBLE-SKILL-OK";
}
@ -491,6 +500,7 @@ export async function startQaMockOpenAiServer(params?: { host?: string; port?: n
const host = params?.host ?? "127.0.0.1";
let lastRequest: MockOpenAiRequestSnapshot | null = null;
const requests: MockOpenAiRequestSnapshot[] = [];
const imageGenerationRequests: Array<Record<string, unknown>> = [];
const server = createServer(async (req, res) => {
const url = new URL(req.url ?? "/", "http://127.0.0.1");
if (req.method === "GET" && (url.pathname === "/healthz" || url.pathname === "/readyz")) {
@ -515,7 +525,17 @@ export async function startQaMockOpenAiServer(params?: { host?: string; port?: n
writeJson(res, 200, requests);
return;
}
if (req.method === "GET" && url.pathname === "/debug/image-generations") {
writeJson(res, 200, imageGenerationRequests);
return;
}
if (req.method === "POST" && url.pathname === "/v1/images/generations") {
const raw = await readBody(req);
const body = raw ? (JSON.parse(raw) as Record<string, unknown>) : {};
imageGenerationRequests.push(body);
if (imageGenerationRequests.length > 20) {
imageGenerationRequests.splice(0, imageGenerationRequests.length - 20);
}
writeJson(res, 200, {
data: [
{