From d166f2648ef356cdc576d69d8e2c5a1ee0359515 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=8D=E8=AF=95=E8=B5=84=E6=96=99?= Date: Fri, 27 Mar 2026 20:32:27 +0800 Subject: [PATCH] Agents: normalize WS usage aliases --- src/agents/openai-ws-message-conversion.ts | 8 +++++--- src/agents/openai-ws-stream.test.ts | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/agents/openai-ws-message-conversion.ts b/src/agents/openai-ws-message-conversion.ts index 72dcf702404..62f22aff26b 100644 --- a/src/agents/openai-ws-message-conversion.ts +++ b/src/agents/openai-ws-message-conversion.ts @@ -10,6 +10,7 @@ import type { } from "./openai-ws-connection.js"; import { normalizeToolParameterSchema } from "./pi-tools.schema.js"; import { buildAssistantMessage, buildUsageWithNoCost } from "./stream-message-shared.js"; +import { normalizeUsage } from "./usage.js"; type AnyMessage = Message & { role: string; content: unknown }; type AssistantMessageWithPhase = AssistantMessage & { phase?: OpenAIResponsesAssistantPhase }; @@ -538,15 +539,16 @@ export function buildAssistantMessageFromResponse( const hasToolCalls = content.some((part) => part.type === "toolCall"); const stopReason: StopReason = hasToolCalls ? "toolUse" : "stop"; + const normalizedUsage = normalizeUsage(response.usage); const message = buildAssistantMessage({ model: modelInfo, content, stopReason, usage: buildUsageWithNoCost({ - input: response.usage?.input_tokens ?? 0, - output: response.usage?.output_tokens ?? 0, - totalTokens: response.usage?.total_tokens ?? 0, + input: normalizedUsage?.input ?? 0, + output: normalizedUsage?.output ?? 0, + totalTokens: normalizedUsage?.total ?? response.usage?.total_tokens ?? 0, }), }); diff --git a/src/agents/openai-ws-stream.test.ts b/src/agents/openai-ws-stream.test.ts index 2bf997ab496..eee7688bcff 100644 --- a/src/agents/openai-ws-stream.test.ts +++ b/src/agents/openai-ws-stream.test.ts @@ -860,6 +860,20 @@ describe("buildAssistantMessageFromResponse", () => { expect(msg.usage.totalTokens).toBe(150); }); + it("maps prompt_tokens and completion_tokens usage aliases", () => { + const response = makeResponseObject("resp_5b", "Hello"); + (response as unknown as { usage?: Record }).usage = { + prompt_tokens: 44, + completion_tokens: 11, + total_tokens: 55, + }; + + const msg = buildAssistantMessageFromResponse(response, modelInfo); + expect(msg.usage.input).toBe(44); + expect(msg.usage.output).toBe(11); + expect(msg.usage.totalTokens).toBe(55); + }); + it("sets model/provider/api from modelInfo", () => { const response = makeResponseObject("resp_6", "Hi"); const msg = buildAssistantMessageFromResponse(response, modelInfo);