test: share sanitize session usage helpers

This commit is contained in:
Peter Steinberger 2026-03-13 23:59:46 +00:00
parent 91b9c47dad
commit 903cb0679d
1 changed files with 62 additions and 74 deletions

View File

@ -177,6 +177,14 @@ describe("sanitizeSessionHistory", () => {
AgentMessage & { usage?: unknown; content?: unknown }
>;
const getSingleAssistantUsage = async (messages: AgentMessage[]) => {
vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false);
const result = await sanitizeOpenAIHistory(messages);
return result.find((message) => message.role === "assistant") as
| (AgentMessage & { usage?: unknown })
| undefined;
};
beforeEach(async () => {
testTimestamp = 1;
const harness = await loadSanitizeSessionHistoryWithCleanMocks();
@ -358,28 +366,22 @@ describe("sanitizeSessionHistory", () => {
});
it("adds a zeroed assistant usage snapshot when usage is missing", async () => {
vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false);
const messages = castAgentMessages([
const assistant = await getSingleAssistantUsage(
castAgentMessages([
{ role: "user", content: "question" },
{
role: "assistant",
content: [{ type: "text", text: "answer without usage" }],
},
]);
const result = await sanitizeOpenAIHistory(messages);
const assistant = result.find((message) => message.role === "assistant") as
| (AgentMessage & { usage?: unknown })
| undefined;
]),
);
expect(assistant?.usage).toEqual(makeZeroUsageSnapshot());
});
it("normalizes mixed partial assistant usage fields to numeric totals", async () => {
vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false);
const messages = castAgentMessages([
const assistant = await getSingleAssistantUsage(
castAgentMessages([
{ role: "user", content: "question" },
{
role: "assistant",
@ -389,12 +391,8 @@ describe("sanitizeSessionHistory", () => {
cache_read_input_tokens: 9,
},
},
]);
const result = await sanitizeOpenAIHistory(messages);
const assistant = result.find((message) => message.role === "assistant") as
| (AgentMessage & { usage?: unknown })
| undefined;
]),
);
expect(assistant?.usage).toEqual({
input: 0,
@ -406,9 +404,8 @@ describe("sanitizeSessionHistory", () => {
});
it("preserves existing usage cost while normalizing token fields", async () => {
vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false);
const messages = castAgentMessages([
const assistant = await getSingleAssistantUsage(
castAgentMessages([
{ role: "user", content: "question" },
{
role: "assistant",
@ -425,12 +422,8 @@ describe("sanitizeSessionHistory", () => {
},
},
},
]);
const result = await sanitizeOpenAIHistory(messages);
const assistant = result.find((message) => message.role === "assistant") as
| (AgentMessage & { usage?: unknown })
| undefined;
]),
);
expect(assistant?.usage).toEqual({
...makeZeroUsageSnapshot(),
@ -450,9 +443,8 @@ describe("sanitizeSessionHistory", () => {
});
it("preserves unknown cost when token fields already match", async () => {
vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false);
const messages = castAgentMessages([
const assistant = await getSingleAssistantUsage(
castAgentMessages([
{ role: "user", content: "question" },
{
role: "assistant",
@ -465,12 +457,8 @@ describe("sanitizeSessionHistory", () => {
totalTokens: 10,
},
},
]);
const result = await sanitizeOpenAIHistory(messages);
const assistant = result.find((message) => message.role === "assistant") as
| (AgentMessage & { usage?: unknown })
| undefined;
]),
);
expect(assistant?.usage).toEqual({
input: 1,