This commit is contained in:
Issac the Kaylon 2026-03-15 17:49:47 -05:00 committed by GitHub
commit 8d55dfc00d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 75 additions and 0 deletions

View File

@ -113,6 +113,37 @@ describe("formatAssistantErrorText", () => {
expect(formatAssistantErrorText(msg)).toContain("rate limit reached");
});
it("surfaces OAuth refresh-token reuse as re-auth required", () => {
const msg = makeAssistantError(
'OAuth token refresh failed for openai-codex: 401 {"error":{"message":"Your refresh token has already been used to generate a new access token. Please try signing in again.","type":"invalid_request_error","code":"refresh_token_reused"}}',
);
expect(formatAssistantErrorText(msg, { provider: "openai-codex" })).toContain(
"Please re-authenticate",
);
expect(formatAssistantErrorText(msg, { provider: "openai-codex" })).not.toContain("rate limit");
});
it("surfaces OAuth refresh-token reuse when invalid_request_error fields are type-first", () => {
const msg = makeAssistantError(
'OAuth token refresh failed for openai-codex: 401 {"error":{"type":"invalid_request_error","message":"Your refresh token has already been used to generate a new access token. Please try signing in again.","code":"refresh_token_reused"}}',
);
expect(formatAssistantErrorText(msg, { provider: "openai-codex" })).toContain(
"Please re-authenticate",
);
expect(formatAssistantErrorText(msg, { provider: "openai-codex" })).not.toContain(
"LLM request rejected",
);
});
it("does not rewrite generic OAuth refresh failures without token-reuse signal", () => {
const msg = makeAssistantError(
"OAuth token refresh failed for openai-codex: request timed out while contacting auth endpoint",
);
expect(formatAssistantErrorText(msg, { provider: "openai-codex" })).toBe(
"LLM request timed out.",
);
});
it("returns a friendly message for empty stream chunk errors", () => {
const msg = makeAssistantError("request ended without sending any chunks");
expect(formatAssistantErrorText(msg)).toBe("LLM request timed out.");

View File

@ -759,6 +759,16 @@ describe("classifyFailoverReason", () => {
"auth",
);
expect(classifyFailoverReason("Missing scopes: model.request")).toBe("auth");
expect(
classifyFailoverReason(
'OAuth token refresh failed for openai-codex: 401 {"error":{"message":"Your refresh token has already been used to generate a new access token. Please try signing in again.","type":"invalid_request_error","code":"refresh_token_reused"}}',
),
).toBe("auth");
expect(
classifyFailoverReason(
"OAuth token refresh failed for openai-codex: request timed out while contacting auth endpoint",
),
).toBe("timeout");
expect(
classifyFailoverReason("model_cooldown: All credentials for model gpt-5 are cooling down"),
).toBe("rate_limit");

View File

@ -43,6 +43,29 @@ const RATE_LIMIT_ERROR_USER_MESSAGE = "⚠️ API rate limit reached. Please try
const OVERLOADED_ERROR_USER_MESSAGE =
"The AI service is temporarily overloaded. Please try again in a moment.";
function isOauthRefreshReauthRequiredMessage(raw: string): boolean {
if (!raw) {
return false;
}
const lower = raw.toLowerCase();
const hasRefreshFailureContext =
lower.includes("oauth token refresh failed") ||
lower.includes("token refresh failed") ||
lower.includes("refresh token");
const hasTokenReuseSignal =
lower.includes("refresh_token_reused") || lower.includes("refresh token has already been used");
return hasRefreshFailureContext && hasTokenReuseSignal;
}
function formatOauthRefreshReauthCopy(provider?: string): string {
const providerLabel = provider?.trim();
if (providerLabel) {
return `🔐 ${providerLabel} authentication expired. Please re-authenticate and try again.`;
}
return "🔐 Provider authentication expired. Please re-authenticate and try again.";
}
function formatRateLimitOrOverloadedErrorCopy(raw: string): string | undefined {
if (isRateLimitErrorMessage(raw)) {
return RATE_LIMIT_ERROR_USER_MESSAGE;
@ -732,6 +755,10 @@ export function formatAssistantErrorText(
);
}
if (isOauthRefreshReauthRequiredMessage(raw)) {
return formatOauthRefreshReauthCopy(opts?.provider);
}
const invalidRequest = raw.match(/"type":"invalid_request_error".*?"message":"([^"]+)"/);
if (invalidRequest?.[1]) {
return `LLM request rejected: ${invalidRequest[1]}`;
@ -793,6 +820,10 @@ export function sanitizeUserFacingText(text: string, opts?: { errorContext?: boo
return BILLING_ERROR_USER_MESSAGE;
}
if (isOauthRefreshReauthRequiredMessage(trimmed)) {
return formatOauthRefreshReauthCopy();
}
if (isRawApiErrorPayload(trimmed) || isLikelyHttpErrorText(trimmed)) {
return formatRawAssistantErrorForUi(trimmed);
}
@ -988,6 +1019,9 @@ export function classifyFailoverReason(raw: string): FailoverReason | null {
if (reasonFrom402Text) {
return reasonFrom402Text;
}
if (isOauthRefreshReauthRequiredMessage(raw)) {
return "auth";
}
if (isPeriodicUsageLimitErrorMessage(raw)) {
return isBillingErrorMessage(raw) ? "billing" : "rate_limit";
}