mirror of https://github.com/openclaw/openclaw.git
fix: carry live overflow token counts
This commit is contained in:
parent
6f7e5d9c38
commit
f91271594d
|
|
@ -236,6 +236,7 @@ Docs: https://docs.openclaw.ai
|
|||
- Memory/Gemini: normalize returned Gemini embeddings across direct query, direct batch, and async batch paths so memory search uses consistent vector handling for Gemini too. (#43409) Thanks @gumadeiras.
|
||||
- Agents/failover: recognize additional serialized network errno strings plus `EHOSTDOWN` and `EPIPE` structured codes so transient transport failures trigger timeout failover more reliably. (#42830) Thanks @jnMetaCode.
|
||||
- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb.
|
||||
- Agents/embedded runner: carry provider-observed overflow token counts into compaction so overflow retries and diagnostics use the rejected live prompt size instead of only transcript estimates. (#40357) thanks @rabsef-bicrym.
|
||||
|
||||
## 2026.3.7
|
||||
|
||||
|
|
|
|||
|
|
@ -469,9 +469,9 @@ describe("extractObservedOverflowTokenCount", () => {
|
|||
'400 {"type":"error","error":{"message":"prompt is too long: 277403 tokens > 200000 maximum"}}',
|
||||
),
|
||||
).toBe(277403);
|
||||
expect(extractObservedOverflowTokenCount("Context window exceeded: requested 12000 tokens")).toBe(
|
||||
12000,
|
||||
);
|
||||
expect(
|
||||
extractObservedOverflowTokenCount("Context window exceeded: requested 12000 tokens"),
|
||||
).toBe(12000);
|
||||
expect(
|
||||
extractObservedOverflowTokenCount(
|
||||
"This model's maximum context length is 128000 tokens. However, your messages resulted in 145000 tokens.",
|
||||
|
|
|
|||
|
|
@ -110,9 +110,7 @@ vi.mock("../pi-embedded-helpers.js", () => ({
|
|||
formatBillingErrorMessage: vi.fn(() => ""),
|
||||
classifyFailoverReason: vi.fn(() => null),
|
||||
extractObservedOverflowTokenCount: vi.fn((msg?: string) => {
|
||||
const match = msg?.match(
|
||||
/prompt is too long:\s*([\d,]+)\s+tokens\s*>\s*[\d,]+\s+maximum/i,
|
||||
);
|
||||
const match = msg?.match(/prompt is too long:\s*([\d,]+)\s+tokens\s*>\s*[\d,]+\s+maximum/i);
|
||||
return match?.[1] ? Number(match[1].replaceAll(",", "")) : undefined;
|
||||
}),
|
||||
formatAssistantErrorText: vi.fn(() => ""),
|
||||
|
|
|
|||
Loading…
Reference in New Issue