feat(bedrock): add Bedrock Mantle (OpenAI-compatible) provider (#61296)

* feat(bedrock): add Bedrock Mantle (OpenAI-compatible) provider

New amazon-bedrock-mantle extension that provides auto-discovery and
authentication for Amazon Bedrock Mantle endpoints.

Mantle (bedrock-mantle.<region>.api.aws) is Amazon Bedrock's OpenAI-
compatible API surface, separate from the existing bedrock-runtime
(ConverseStream) endpoint. It has its own model catalog including
models not available via ConverseStream (e.g. openai.gpt-oss-120b,
mistral.devstral-2-123b).

Extension structure:
- discovery.ts: Model discovery via GET /v1/models (OpenAI format),
  bearer token resolution, implicit provider configuration
- register.sync.runtime.ts: Provider registration with catalog,
  error classification (rate limits, context overflow)
- openclaw.plugin.json: Plugin manifest, enabledByDefault

Auth support:
- Long-lived Bedrock API key (AWS_BEARER_TOKEN_BEDROCK env var)
  created from the AWS Console → used directly as Bearer token
- Pre-generated SigV4-derived tokens (via aws-bedrock-token-generator)
  set in AWS_BEARER_TOKEN_BEDROCK → works transparently

Provider config (auto-resolved when AWS_BEARER_TOKEN_BEDROCK is set):
  api: "openai-completions"
  baseUrl: "https://bedrock-mantle.<region>.api.aws/v1"
  auth: "api-key" (bearer token)

Available in 12 regions: us-east-1, us-east-2, us-west-2,
ap-northeast-1, ap-south-1, ap-southeast-3, eu-central-1,
eu-west-1, eu-west-2, eu-south-1, eu-north-1, sa-east-1

Tests: 15 passing (13 discovery + 2 plugin registration)

* chore(bedrock): clarify mantle bearer auth scope

---------

Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
wirjo 2026-04-05 21:53:54 +10:00 committed by GitHub
parent deb212d3b0
commit dbac5fa258
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 772 additions and 0 deletions

View File

@ -31,6 +31,7 @@ Docs: https://docs.openclaw.ai
- Providers/StepFun: add the bundled StepFun provider plugin with standard and Step Plan endpoints, China/global onboarding choices, `step-3.5-flash` on both catalogs, and `step-3.5-flash-2603` currently exposed on Step Plan. (#60032) Thanks @hengm3467.
- Tools/web_search: add a bundled MiniMax Search provider backed by the Coding Plan search API, with region reuse from `MINIMAX_API_HOST` and plugin-owned credential config. (#54648) Thanks @fengmk2.
- Providers/Amazon Bedrock: discover regional and global inference profiles, inherit their backing model capabilities, and inject the Bedrock request region automatically so cross-region Claude profiles work without manual provider overrides. (#61299) Thanks @wirjo.
- Providers/Amazon Bedrock Mantle: add a bundled OpenAI-compatible Mantle provider with bearer-token discovery, automatic OSS model catalog loading, and Bedrock Mantle region detection for hosted GPT-OSS, Qwen, Kimi, GLM, and similar routes. (#61296) Thanks @wirjo.
### Fixes

View File

@ -0,0 +1,7 @@
export {
discoverMantleModels,
mergeImplicitMantleProvider,
resetMantleDiscoveryCacheForTest,
resolveImplicitMantleProvider,
resolveMantleBearerToken,
} from "./discovery.js";

View File

@ -0,0 +1,400 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import {
discoverMantleModels,
mergeImplicitMantleProvider,
resetMantleDiscoveryCacheForTest,
resolveMantleBearerToken,
resolveImplicitMantleProvider,
} from "./api.js";
describe("bedrock mantle discovery", () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = { ...originalEnv };
vi.restoreAllMocks();
resetMantleDiscoveryCacheForTest();
});
afterEach(() => {
process.env = originalEnv;
});
// ---------------------------------------------------------------------------
// Bearer token resolution
// ---------------------------------------------------------------------------
it("resolves bearer token from AWS_BEARER_TOKEN_BEDROCK", () => {
expect(
resolveMantleBearerToken({
AWS_BEARER_TOKEN_BEDROCK: "bedrock-api-key-abc123", // pragma: allowlist secret
} as NodeJS.ProcessEnv),
).toBe("bedrock-api-key-abc123");
});
it("returns undefined when no bearer token env var is set", () => {
expect(resolveMantleBearerToken({} as NodeJS.ProcessEnv)).toBeUndefined();
});
it("trims whitespace from bearer token", () => {
expect(
resolveMantleBearerToken({
AWS_BEARER_TOKEN_BEDROCK: " my-token ", // pragma: allowlist secret
} as NodeJS.ProcessEnv),
).toBe("my-token");
});
// ---------------------------------------------------------------------------
// Model discovery
// ---------------------------------------------------------------------------
it("discovers models from Mantle /v1/models endpoint sorted by id", async () => {
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: async () => ({
data: [
{ id: "openai.gpt-oss-120b", object: "model", owned_by: "openai" },
{ id: "anthropic.claude-sonnet-4-6", object: "model", owned_by: "anthropic" },
{ id: "mistral.devstral-2-123b", object: "model", owned_by: "mistral" },
],
}),
});
const models = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
});
expect(models).toHaveLength(3);
// Models should be sorted alphabetically by id
expect(models[0]).toMatchObject({
id: "anthropic.claude-sonnet-4-6",
name: "anthropic.claude-sonnet-4-6",
reasoning: false,
input: ["text"],
});
expect(models[1]).toMatchObject({
id: "mistral.devstral-2-123b",
reasoning: false,
});
expect(models[2]).toMatchObject({
id: "openai.gpt-oss-120b",
reasoning: true, // GPT-OSS 120B supports reasoning
});
// Verify correct endpoint and auth header
expect(mockFetch).toHaveBeenCalledWith(
"https://bedrock-mantle.us-east-1.api.aws/v1/models",
expect.objectContaining({
headers: expect.objectContaining({
Authorization: "Bearer test-token",
}),
}),
);
});
it("infers reasoning support from model IDs", async () => {
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: async () => ({
data: [
{ id: "moonshotai.kimi-k2-thinking", object: "model" },
{ id: "openai.gpt-oss-120b", object: "model" },
{ id: "openai.gpt-oss-safeguard-120b", object: "model" },
{ id: "deepseek.v3.2", object: "model" },
{ id: "mistral.mistral-large-3-675b-instruct", object: "model" },
],
}),
});
const models = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
});
const byId = Object.fromEntries(models.map((m) => [m.id, m]));
expect(byId["moonshotai.kimi-k2-thinking"]?.reasoning).toBe(true);
expect(byId["openai.gpt-oss-120b"]?.reasoning).toBe(true);
expect(byId["openai.gpt-oss-safeguard-120b"]?.reasoning).toBe(true);
expect(byId["deepseek.v3.2"]?.reasoning).toBe(false);
expect(byId["mistral.mistral-large-3-675b-instruct"]?.reasoning).toBe(false);
});
it("returns empty array on permission error", async () => {
const mockFetch = vi.fn().mockResolvedValue({
ok: false,
status: 403,
statusText: "Forbidden",
});
const models = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
});
expect(models).toEqual([]);
});
it("returns empty array on network error", async () => {
const mockFetch = vi.fn().mockRejectedValue(new Error("ECONNREFUSED"));
const models = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
});
expect(models).toEqual([]);
});
it("filters out models with empty IDs", async () => {
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: async () => ({
data: [
{ id: "anthropic.claude-sonnet-4-6", object: "model" },
{ id: "", object: "model" },
{ id: " ", object: "model" },
],
}),
});
const models = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
});
expect(models).toHaveLength(1);
expect(models[0]?.id).toBe("anthropic.claude-sonnet-4-6");
});
// ---------------------------------------------------------------------------
// Discovery caching
// ---------------------------------------------------------------------------
it("returns cached models on subsequent calls within refresh interval", async () => {
let now = 1000000;
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: async () => ({
data: [{ id: "anthropic.claude-sonnet-4-6", object: "model" }],
}),
});
// First call — hits the network
const first = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
now: () => now,
});
expect(first).toHaveLength(1);
expect(mockFetch).toHaveBeenCalledTimes(1);
// Second call within refresh interval — uses cache
now += 60_000; // 1 minute later
const second = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
now: () => now,
});
expect(second).toHaveLength(1);
expect(mockFetch).toHaveBeenCalledTimes(1); // No additional fetch
// Third call after refresh interval — re-fetches
now += 3600_000; // 1 hour later
const third = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
now: () => now,
});
expect(third).toHaveLength(1);
expect(mockFetch).toHaveBeenCalledTimes(2); // Re-fetched
});
it("returns stale cache on fetch failure", async () => {
let now = 1000000;
const mockFetch = vi
.fn()
.mockResolvedValueOnce({
ok: true,
json: async () => ({
data: [{ id: "anthropic.claude-sonnet-4-6", object: "model" }],
}),
})
.mockRejectedValueOnce(new Error("ECONNREFUSED"));
// First call — succeeds
await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
now: () => now,
});
// Second call after expiry — fails but returns stale cache
now += 7200_000;
const stale = await discoverMantleModels({
region: "us-east-1",
bearerToken: "test-token",
fetchFn: mockFetch as unknown as typeof fetch,
now: () => now,
});
expect(stale).toHaveLength(1);
expect(stale[0]?.id).toBe("anthropic.claude-sonnet-4-6");
});
// ---------------------------------------------------------------------------
// Implicit provider resolution
// ---------------------------------------------------------------------------
it("resolves implicit provider when bearer token is set", async () => {
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: async () => ({
data: [{ id: "anthropic.claude-sonnet-4-6", object: "model" }],
}),
});
const provider = await resolveImplicitMantleProvider({
env: {
AWS_BEARER_TOKEN_BEDROCK: "my-token", // pragma: allowlist secret
AWS_REGION: "us-east-1",
} as NodeJS.ProcessEnv,
fetchFn: mockFetch as unknown as typeof fetch,
});
expect(provider).not.toBeNull();
expect(provider?.baseUrl).toBe("https://bedrock-mantle.us-east-1.api.aws/v1");
expect(provider?.api).toBe("openai-completions");
expect(provider?.auth).toBe("api-key");
expect(provider?.apiKey).toBe("env:AWS_BEARER_TOKEN_BEDROCK");
expect(provider?.models).toHaveLength(1);
});
it("returns null when no bearer token is available", async () => {
const provider = await resolveImplicitMantleProvider({
env: {} as NodeJS.ProcessEnv,
});
expect(provider).toBeNull();
});
it("does not infer Mantle auth from plain IAM env vars alone", async () => {
const provider = await resolveImplicitMantleProvider({
env: {
AWS_PROFILE: "default",
AWS_REGION: "us-east-1",
} as NodeJS.ProcessEnv,
});
expect(provider).toBeNull();
});
it("returns null for unsupported regions", async () => {
const provider = await resolveImplicitMantleProvider({
env: {
AWS_BEARER_TOKEN_BEDROCK: "my-token", // pragma: allowlist secret
AWS_REGION: "af-south-1",
} as NodeJS.ProcessEnv,
});
expect(provider).toBeNull();
});
it("defaults to us-east-1 when no region is set", async () => {
const mockFetch = vi.fn().mockResolvedValue({
ok: true,
json: async () => ({ data: [{ id: "openai.gpt-oss-120b", object: "model" }] }),
});
const provider = await resolveImplicitMantleProvider({
env: {
AWS_BEARER_TOKEN_BEDROCK: "my-token", // pragma: allowlist secret
} as NodeJS.ProcessEnv,
fetchFn: mockFetch as unknown as typeof fetch,
});
expect(provider?.baseUrl).toBe("https://bedrock-mantle.us-east-1.api.aws/v1");
expect(mockFetch).toHaveBeenCalledWith(
"https://bedrock-mantle.us-east-1.api.aws/v1/models",
expect.anything(),
);
});
// ---------------------------------------------------------------------------
// Provider merging
// ---------------------------------------------------------------------------
it("merges implicit models when existing provider has empty models", () => {
const result = mergeImplicitMantleProvider({
existing: {
baseUrl: "https://custom.example.com/v1",
models: [],
},
implicit: {
baseUrl: "https://bedrock-mantle.us-east-1.api.aws/v1",
api: "openai-completions",
auth: "api-key",
apiKey: "env:AWS_BEARER_TOKEN_BEDROCK",
models: [
{
id: "openai.gpt-oss-120b",
name: "GPT-OSS 120B",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 32000,
maxTokens: 4096,
},
],
},
});
expect(result.baseUrl).toBe("https://custom.example.com/v1");
expect(result.models?.map((m) => m.id)).toEqual(["openai.gpt-oss-120b"]);
});
it("preserves existing models over implicit ones", () => {
const result = mergeImplicitMantleProvider({
existing: {
baseUrl: "https://bedrock-mantle.us-east-1.api.aws/v1",
models: [
{
id: "custom-model",
name: "My Custom Model",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 64000,
maxTokens: 8192,
},
],
},
implicit: {
baseUrl: "https://bedrock-mantle.us-east-1.api.aws/v1",
api: "openai-completions",
auth: "api-key",
models: [
{
id: "openai.gpt-oss-120b",
name: "GPT-OSS 120B",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 32000,
maxTokens: 4096,
},
],
},
});
expect(result.models?.map((m) => m.id)).toEqual(["custom-model"]);
});
});

View File

@ -0,0 +1,261 @@
import { createSubsystemLogger } from "openclaw/plugin-sdk/core";
import type {
ModelDefinitionConfig,
ModelProviderConfig,
} from "openclaw/plugin-sdk/provider-model-shared";
const log = createSubsystemLogger("bedrock-mantle-discovery");
const DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
const DEFAULT_CONTEXT_WINDOW = 32000;
const DEFAULT_MAX_TOKENS = 4096;
const DEFAULT_REFRESH_INTERVAL_SECONDS = 3600; // 1 hour
// ---------------------------------------------------------------------------
// Mantle region & endpoint helpers
// ---------------------------------------------------------------------------
const MANTLE_SUPPORTED_REGIONS = [
"us-east-1",
"us-east-2",
"us-west-2",
"ap-northeast-1",
"ap-south-1",
"ap-southeast-3",
"eu-central-1",
"eu-west-1",
"eu-west-2",
"eu-south-1",
"eu-north-1",
"sa-east-1",
] as const;
function mantleEndpoint(region: string): string {
return `https://bedrock-mantle.${region}.api.aws`;
}
function isSupportedRegion(region: string): boolean {
return (MANTLE_SUPPORTED_REGIONS as readonly string[]).includes(region);
}
// ---------------------------------------------------------------------------
// Bearer token resolution
// ---------------------------------------------------------------------------
export type MantleBearerTokenProvider = () => Promise<string>;
/**
* Resolve a bearer token for Mantle authentication.
*
* Returns the value of AWS_BEARER_TOKEN_BEDROCK if set, undefined otherwise.
*
* Mantle's OpenAI-compatible surface expects a bearer token today in OpenClaw.
* Plain IAM credentials (instance roles, SSO, access keys) are not enough
* until we wire in SigV4-derived token generation via `@aws/bedrock-token-generator`.
*/
export function resolveMantleBearerToken(env: NodeJS.ProcessEnv = process.env): string | undefined {
const explicitToken = env.AWS_BEARER_TOKEN_BEDROCK?.trim();
if (explicitToken) {
return explicitToken;
}
return undefined;
}
// ---------------------------------------------------------------------------
// OpenAI-format model list response
// ---------------------------------------------------------------------------
interface OpenAIModelEntry {
id: string;
object?: string;
owned_by?: string;
created?: number;
}
interface OpenAIModelsResponse {
data?: OpenAIModelEntry[];
object?: string;
}
// ---------------------------------------------------------------------------
// Reasoning heuristic
// ---------------------------------------------------------------------------
/** Model ID substrings that indicate reasoning/thinking support. */
const REASONING_PATTERNS = [
"thinking",
"reasoner",
"reasoning",
"deepseek.r",
"gpt-oss-120b", // GPT-OSS 120B supports reasoning
"gpt-oss-safeguard-120b",
];
function inferReasoningSupport(modelId: string): boolean {
const lower = modelId.toLowerCase();
return REASONING_PATTERNS.some((p) => lower.includes(p));
}
// ---------------------------------------------------------------------------
// Discovery cache
// ---------------------------------------------------------------------------
interface MantleCacheEntry {
models: ModelDefinitionConfig[];
fetchedAt: number;
}
const discoveryCache = new Map<string, MantleCacheEntry>();
/** Clear the discovery cache (for testing). */
export function resetMantleDiscoveryCacheForTest(): void {
discoveryCache.clear();
}
// ---------------------------------------------------------------------------
// Model discovery
// ---------------------------------------------------------------------------
/**
* Discover available models from the Mantle `/v1/models` endpoint.
*
* The response is in standard OpenAI format:
* ```json
* { "data": [{ "id": "anthropic.claude-sonnet-4-6", "object": "model", "owned_by": "anthropic" }] }
* ```
*
* Results are cached per region for `DEFAULT_REFRESH_INTERVAL_SECONDS`.
* Returns an empty array if the request fails (no permission, network error, etc.).
*/
export async function discoverMantleModels(params: {
region: string;
bearerToken: string;
fetchFn?: typeof fetch;
now?: () => number;
}): Promise<ModelDefinitionConfig[]> {
const { region, bearerToken, fetchFn = fetch, now = Date.now } = params;
// Check cache
const cacheKey = region;
const cached = discoveryCache.get(cacheKey);
if (cached && now() - cached.fetchedAt < DEFAULT_REFRESH_INTERVAL_SECONDS * 1000) {
return cached.models;
}
const endpoint = `${mantleEndpoint(region)}/v1/models`;
try {
const response = await fetchFn(endpoint, {
method: "GET",
headers: {
Authorization: `Bearer ${bearerToken}`,
Accept: "application/json",
},
});
if (!response.ok) {
log.debug?.("Mantle model discovery failed", {
status: response.status,
statusText: response.statusText,
});
return cached?.models ?? [];
}
const body = (await response.json()) as OpenAIModelsResponse;
const rawModels = body.data ?? [];
const models = rawModels
.filter((m) => m.id?.trim())
.map((m) => ({
id: m.id,
name: m.id, // Mantle doesn't return display names
reasoning: inferReasoningSupport(m.id),
input: ["text" as const],
cost: DEFAULT_COST,
contextWindow: DEFAULT_CONTEXT_WINDOW,
maxTokens: DEFAULT_MAX_TOKENS,
}))
.sort((a, b) => a.id.localeCompare(b.id));
discoveryCache.set(cacheKey, { models, fetchedAt: now() });
return models;
} catch (error) {
log.debug?.("Mantle model discovery error", {
error: error instanceof Error ? error.message : String(error),
});
return cached?.models ?? [];
}
}
// ---------------------------------------------------------------------------
// Implicit provider resolution
// ---------------------------------------------------------------------------
/**
* Resolve an implicit Bedrock Mantle provider if bearer-token auth is available.
*
* Detection:
* - AWS_BEARER_TOKEN_BEDROCK is set Mantle is available
* - Region from AWS_REGION / AWS_DEFAULT_REGION / default us-east-1
* - Models discovered from `/v1/models`
*/
export async function resolveImplicitMantleProvider(params: {
env?: NodeJS.ProcessEnv;
fetchFn?: typeof fetch;
}): Promise<ModelProviderConfig | null> {
const env = params.env ?? process.env;
const bearerToken = resolveMantleBearerToken(env);
if (!bearerToken) {
return null;
}
const region = env.AWS_REGION ?? env.AWS_DEFAULT_REGION ?? "us-east-1";
if (!isSupportedRegion(region)) {
log.debug?.("Mantle not available in region", { region });
return null;
}
const models = await discoverMantleModels({
region,
bearerToken,
fetchFn: params.fetchFn,
});
if (models.length === 0) {
return null;
}
return {
baseUrl: `${mantleEndpoint(region)}/v1`,
api: "openai-completions",
auth: "api-key",
apiKey: "env:AWS_BEARER_TOKEN_BEDROCK",
models,
};
}
export function mergeImplicitMantleProvider(params: {
existing: ModelProviderConfig | undefined;
implicit: ModelProviderConfig;
}): ModelProviderConfig {
const { existing, implicit } = params;
if (!existing) {
return implicit;
}
return {
...implicit,
...existing,
models:
Array.isArray(existing.models) && existing.models.length > 0
? existing.models
: implicit.models,
};
}

View File

@ -0,0 +1,24 @@
import { describe, expect, it } from "vitest";
import { registerSingleProviderPlugin } from "../../test/helpers/plugins/plugin-registration.js";
import bedrockMantlePlugin from "./index.js";
describe("amazon-bedrock-mantle provider plugin", () => {
it("registers with correct provider ID and label", async () => {
const provider = await registerSingleProviderPlugin(bedrockMantlePlugin);
expect(provider.id).toBe("amazon-bedrock-mantle");
expect(provider.label).toBe("Amazon Bedrock Mantle (OpenAI-compatible)");
});
it("classifies rate limit errors for failover", async () => {
const provider = await registerSingleProviderPlugin(bedrockMantlePlugin);
expect(
provider.classifyFailoverReason?.({ errorMessage: "rate_limit exceeded" } as never),
).toBe("rate_limit");
expect(
provider.classifyFailoverReason?.({ errorMessage: "429 Too Many Requests" } as never),
).toBe("rate_limit");
expect(
provider.classifyFailoverReason?.({ errorMessage: "some other error" } as never),
).toBeUndefined();
});
});

View File

@ -0,0 +1,11 @@
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
import { registerBedrockMantlePlugin } from "./register.sync.runtime.js";
export default definePluginEntry({
id: "amazon-bedrock-mantle",
name: "Amazon Bedrock Mantle Provider",
description: "Bundled Amazon Bedrock Mantle (OpenAI-compatible) provider plugin",
register(api) {
registerBedrockMantlePlugin(api);
},
});

View File

@ -0,0 +1,5 @@
{
"id": "amazon-bedrock-mantle",
"enabledByDefault": true,
"providers": ["amazon-bedrock-mantle"]
}

View File

@ -0,0 +1,15 @@
{
"name": "@openclaw/amazon-bedrock-mantle-provider",
"version": "2026.4.4",
"private": true,
"description": "OpenClaw Amazon Bedrock Mantle (OpenAI-compatible) provider plugin",
"type": "module",
"openclaw": {
"bundle": {
"stageRuntimeDependencies": true
},
"extensions": [
"./index.ts"
]
}
}

View File

@ -0,0 +1,47 @@
import type { OpenClawPluginApi } from "openclaw/plugin-sdk/plugin-entry";
import {
mergeImplicitMantleProvider,
resolveImplicitMantleProvider,
resolveMantleBearerToken,
} from "./discovery.js";
export function registerBedrockMantlePlugin(api: OpenClawPluginApi): void {
const providerId = "amazon-bedrock-mantle";
api.registerProvider({
id: providerId,
label: "Amazon Bedrock Mantle (OpenAI-compatible)",
docsPath: "/providers/models",
auth: [],
catalog: {
order: "simple",
run: async (ctx) => {
const implicit = await resolveImplicitMantleProvider({
env: ctx.env,
});
if (!implicit) {
return null;
}
return {
provider: mergeImplicitMantleProvider({
existing: ctx.config.models?.providers?.[providerId],
implicit,
}),
};
},
},
resolveConfigApiKey: ({ env }) =>
resolveMantleBearerToken(env) ? "AWS_BEARER_TOKEN_BEDROCK" : undefined,
matchesContextOverflowError: ({ errorMessage }) =>
/context_length_exceeded|max.*tokens.*exceeded/i.test(errorMessage),
classifyFailoverReason: ({ errorMessage }) => {
if (/rate_limit|too many requests|429/i.test(errorMessage)) {
return "rate_limit";
}
if (/overloaded|503/i.test(errorMessage)) {
return "overloaded";
}
return undefined;
},
});
}

View File

@ -2,6 +2,7 @@ import { bundledPluginRoot } from "./scripts/lib/bundled-plugin-paths.mjs";
export const providerExtensionIds = [
"amazon-bedrock",
"amazon-bedrock-mantle",
"anthropic",
"anthropic-vertex",
"byteplus",