diff --git a/apps/web/client/.env.example b/apps/web/client/.env.example index e193b284a2..62870dc7cf 100644 --- a/apps/web/client/.env.example +++ b/apps/web/client/.env.example @@ -65,6 +65,9 @@ GITHUB_APP_PRIVATE_KEY="" # ------------- Optional: Alternative LLM providers ------------- +# MiniMax +MINIMAX_API_KEY="" + # Anthropic ANTHROPIC_API_KEY="" diff --git a/apps/web/client/src/env.ts b/apps/web/client/src/env.ts index 449a3d7ea7..5d05e83d82 100644 --- a/apps/web/client/src/env.ts +++ b/apps/web/client/src/env.ts @@ -37,6 +37,7 @@ export const env = createEnv({ ANTHROPIC_API_KEY: z.string().optional(), GOOGLE_AI_STUDIO_API_KEY: z.string().optional(), OPENAI_API_KEY: z.string().optional(), + MINIMAX_API_KEY: z.string().optional(), // n8n N8N_WEBHOOK_URL: z.string().optional(), @@ -130,6 +131,7 @@ export const env = createEnv({ GOOGLE_AI_STUDIO_API_KEY: process.env.GOOGLE_AI_STUDIO_API_KEY, OPENAI_API_KEY: process.env.OPENAI_API_KEY, OPENROUTER_API_KEY: process.env.OPENROUTER_API_KEY, + MINIMAX_API_KEY: process.env.MINIMAX_API_KEY, // n8n N8N_WEBHOOK_URL: process.env.N8N_WEBHOOK_URL, diff --git a/docs/content/docs/self-hosting/external-services.mdx b/docs/content/docs/self-hosting/external-services.mdx index 89866c6eb7..be7ef6cf6f 100644 --- a/docs/content/docs/self-hosting/external-services.mdx +++ b/docs/content/docs/self-hosting/external-services.mdx @@ -21,8 +21,12 @@ For assistance with self-hosting these services, please contact us at [founders@ ### 3. AI Providers To configure custom AI providers: -1. Update the providers in [`packages/ai/src/chat/providers.ts`](https://github.com/onlook-dev/onlook/blob/main/packages/ai/src/chat/providers.ts). We already support Anthropic and OpenRouter as examples. You can follow the same format to add a new provider. +1. Update the providers in [`packages/ai/src/chat/providers.ts`](https://github.com/onlook-dev/onlook/blob/main/packages/ai/src/chat/providers.ts). We already support OpenRouter, MiniMax, and Anthropic as examples. You can follow the same format to add a new provider. 2. Update the usages by searching for [`initModel`](https://github.com/search?q=repo%3Aonlook-dev%2Fonlook+%22await+initModel%22&type=code) 3. Update your API keys in the `apps/web/client/.env` file to the provider's expected API keys. +Built-in providers: +- **OpenRouter** (default) — access to multiple models via a single API key +- **[MiniMax](https://platform.minimaxi.com)** — MiniMax-M2.7 models with 204K context window + Note: We support any provider from the [AI SDK providers](https://ai-sdk.dev/providers/ai-sdk-providers). You can add a custom provider by following these AI SDK provider guides: [OpenAI compatible](https://ai-sdk.dev/providers/openai-compatible-providers/custom-providers) and [Community](https://ai-sdk.dev/providers/community-providers/custom-providers). diff --git a/packages/ai/package.json b/packages/ai/package.json index c81655cff8..3d4627270e 100644 --- a/packages/ai/package.json +++ b/packages/ai/package.json @@ -33,6 +33,7 @@ "typescript": "^5.5.4" }, "dependencies": { + "@ai-sdk/openai-compatible": "^1.0.34", "@mendable/firecrawl-js": "^1.29.1", "@openrouter/ai-sdk-provider": "^1.2.0", "ai": "5.0.60", diff --git a/packages/ai/src/chat/providers.ts b/packages/ai/src/chat/providers.ts index 3dd4eb6582..473a535f82 100644 --- a/packages/ai/src/chat/providers.ts +++ b/packages/ai/src/chat/providers.ts @@ -1,11 +1,13 @@ import { LLMProvider, + MINIMAX_MODELS, MODEL_MAX_TOKENS, OPENROUTER_MODELS, type InitialModelPayload, type ModelConfig } from '@onlook/models'; import { assertNever } from '@onlook/utility'; +import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { createOpenRouter } from '@openrouter/ai-sdk-provider'; import type { LanguageModel } from 'ai'; @@ -33,6 +35,9 @@ export function initModel({ ? { ...providerOptions, anthropic: { cacheControl: { type: 'ephemeral' } } } : providerOptions; break; + case LLMProvider.MINIMAX: + model = getMinimaxProvider(requestedModel); + break; default: assertNever(requestedProvider); } @@ -52,3 +57,15 @@ function getOpenRouterProvider(model: OPENROUTER_MODELS): LanguageModel { const openrouter = createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY }); return openrouter(model); } + +function getMinimaxProvider(model: MINIMAX_MODELS): LanguageModel { + if (!process.env.MINIMAX_API_KEY) { + throw new Error('MINIMAX_API_KEY must be set'); + } + const minimax = createOpenAICompatible({ + name: 'minimax', + baseURL: 'https://api.minimax.io/v1', + apiKey: process.env.MINIMAX_API_KEY, + }); + return minimax(model); +} diff --git a/packages/models/src/llm/index.ts b/packages/models/src/llm/index.ts index 6eb7acf015..066aa034d8 100644 --- a/packages/models/src/llm/index.ts +++ b/packages/models/src/llm/index.ts @@ -2,6 +2,7 @@ import type { LanguageModel } from 'ai'; export enum LLMProvider { OPENROUTER = 'openrouter', + MINIMAX = 'minimax', } export enum OPENROUTER_MODELS { @@ -13,8 +14,16 @@ export enum OPENROUTER_MODELS { OPEN_AI_GPT_5_NANO = 'openai/gpt-5-nano', } +export enum MINIMAX_MODELS { + MINIMAX_M2_7 = 'MiniMax-M2.7', + MINIMAX_M2_7_HIGHSPEED = 'MiniMax-M2.7-highspeed', + MINIMAX_M2_5 = 'MiniMax-M2.5', + MINIMAX_M2_5_HIGHSPEED = 'MiniMax-M2.5-highspeed', +} + interface ModelMapping { [LLMProvider.OPENROUTER]: OPENROUTER_MODELS; + [LLMProvider.MINIMAX]: MINIMAX_MODELS; } export type InitialModelPayload = { @@ -37,4 +46,8 @@ export const MODEL_MAX_TOKENS = { [OPENROUTER_MODELS.OPEN_AI_GPT_5_NANO]: 400000, [OPENROUTER_MODELS.OPEN_AI_GPT_5_MINI]: 400000, [OPENROUTER_MODELS.OPEN_AI_GPT_5]: 400000, + [MINIMAX_MODELS.MINIMAX_M2_7]: 204000, + [MINIMAX_MODELS.MINIMAX_M2_7_HIGHSPEED]: 204000, + [MINIMAX_MODELS.MINIMAX_M2_5]: 204000, + [MINIMAX_MODELS.MINIMAX_M2_5_HIGHSPEED]: 204000, } as const;