diff --git a/CHANGELOG.md b/CHANGELOG.md index b4639870fa8..e601de4cf4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -181,6 +181,13 @@ Docs: https://docs.openclaw.ai - LINE/markdown: preserve underscores inside Latin, Cyrillic, and CJK words when stripping markdown, while still removing standalone `_italic_` markers on the shared text-runtime path used by LINE and TTS. (#47465) Thanks @jackjin1997. - Agents/failover: make overloaded same-provider retry count and retry delay configurable via `auth.cooldowns`, default to one retry with no delay, and document the model-fallback behavior. - Ollama/model picker: include configured Ollama models in the opted-in non-PI-native model catalog path so Ollama onboarding shows available models directly after provider selection. (#55290) Thanks @Luckymingxuan. +- Telegram/audio: transcode Telegram voice-note `.ogg` attachments before the local `whisper-cli` auto fallback runs, and keep mention-preflight transcription enabled in auto mode when `tools.media.audio` is unset. +- Matrix/direct rooms: recover fresh auto-joined 1:1 DMs without eagerly persisting invite-only `m.direct` mappings, while keeping named, aliased, and explicitly configured rooms on the room path. (#58024) Thanks @gumadeiras. +- TTS: Restore 3.28 schema compatibility and fallback observability. (#57953) Thanks @joshavant. +- Telegram/forum topics: restore reply routing to the active topic and keep ACP `sessions_spawn(..., thread=true, mode="session")` bound to that same topic instead of falling back to root chat or losing follow-up routing. (#56060) Thanks @one27001. +- Config/SecretRef + Control UI: harden SecretRef redaction round-trip restore, block unsafe raw fallback (force Form mode when raw is unavailable), and preflight submitted-config SecretRefs before config write RPC persistence. (#58044) Thanks @joshavant. +- Config/Telegram: migrate removed `channels.telegram.groupMentionsOnly` into `channels.telegram.groups["*"].requireMention` on load so legacy configs no longer crash at startup. (#55336) thanks @jameslcowan. +- `/context detail` now compares the tracked prompt estimate with cached context usage and surfaces untracked provider/runtime overhead when present. (#28391) thanks @ImLukeF. ## 2026.3.31-beta.1 @@ -2325,7 +2332,6 @@ Docs: https://docs.openclaw.ai - Diagnostics/Stuck session signal: add configurable stuck-session warning threshold via `diagnostics.stuckSessionWarnMs` (default 120000ms) to reduce false-positive warnings on long multi-tool turns. (#31032) - Agents/error classification: check billing errors before context overflow heuristics in the agent runner catch block so spend-limit and quota errors show the billing-specific message instead of being misclassified as "Context overflow: prompt too large". (#40409) Thanks @ademczuk. - Memory/MMR CJK tokenization: add Han, kana, and hangul tokens plus adjacent bigrams so memory-search reranking can detect overlap for CJK text instead of treating unrelated snippets as identical. (#29396) Thanks @buyitsydney. -- `/context detail` now compares the tracked prompt estimate with cached context usage and surfaces untracked provider/runtime overhead when present. (#28391) thanks @ImLukeF. ## 2026.2.26 diff --git a/src/auto-reply/reply/commands-context-report.test.ts b/src/auto-reply/reply/commands-context-report.test.ts index 15deeaac36a..be51c0d03d4 100644 --- a/src/auto-reply/reply/commands-context-report.test.ts +++ b/src/auto-reply/reply/commands-context-report.test.ts @@ -8,6 +8,8 @@ function makeParams( options?: { omitBootstrapLimits?: boolean; contextTokens?: number | null; + totalTokens?: number | null; + totalTokensFresh?: boolean; }, ): HandleCommandsParams { return { @@ -25,7 +27,8 @@ function makeParams( resolvedThinkLevel: "off", resolvedReasoningLevel: "off", sessionEntry: { - totalTokens: 123, + totalTokens: options?.totalTokens ?? 123, + totalTokensFresh: options?.totalTokensFresh ?? true, inputTokens: 100, outputTokens: 23, systemPromptReport: { @@ -96,18 +99,27 @@ describe("buildContextReply", () => { it("shows tracked estimate and cached context delta in detail output", async () => { const result = await buildContextReply( makeParams("/context detail", false, { - contextTokens: 900, + contextTokens: 8_192, + totalTokens: 900, }), ); expect(result.text).toContain("Tracked prompt estimate: 1,020 chars (~255 tok)"); expect(result.text).toContain("Actual context usage (cached): 900 tok"); expect(result.text).toContain("Untracked provider/runtime overhead: ~645 tok"); + expect(result.text).toContain("Session tokens (cached): 900 total / ctx=8,192"); }); it("shows estimate-only detail output when cached context usage is unavailable", async () => { - const result = await buildContextReply(makeParams("/context detail", false)); + const result = await buildContextReply( + makeParams("/context detail", false, { + contextTokens: 8_192, + totalTokens: 900, + totalTokensFresh: false, + }), + ); expect(result.text).toContain("Tracked prompt estimate: 1,020 chars (~255 tok)"); expect(result.text).toContain("Actual context usage (cached): unavailable"); + expect(result.text).toContain("Session tokens (cached): unknown / ctx=8,192"); expect(result.text).not.toContain("~645 tok"); }); }); diff --git a/src/auto-reply/reply/commands-context-report.ts b/src/auto-reply/reply/commands-context-report.ts index e3feb1910de..040357be320 100644 --- a/src/auto-reply/reply/commands-context-report.ts +++ b/src/auto-reply/reply/commands-context-report.ts @@ -4,7 +4,10 @@ import { resolveBootstrapTotalMaxChars, } from "../../agents/pi-embedded-helpers.js"; import { buildSystemPromptReport } from "../../agents/system-prompt-report.js"; -import type { SessionSystemPromptReport } from "../../config/sessions/types.js"; +import { + resolveFreshSessionTotalTokens, + type SessionSystemPromptReport, +} from "../../config/sessions/types.js"; import { estimateTokensFromChars } from "../../utils/cjk-chars.js"; import type { ReplyPayload } from "../types.js"; import { resolveCommandsSystemPromptBundle } from "./commands-system-prompt.js"; @@ -93,8 +96,10 @@ export async function buildContextReply(params: HandleCommandsParams): Promise