Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions examples/configs/llm/minimax/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# MiniMax LLM Configuration

This example shows how to configure [MiniMax](https://www.minimax.io/) models with NeMo Guardrails.

MiniMax provides an OpenAI-compatible API, so the integration uses `ChatOpenAI` from `langchain-openai` under the hood with MiniMax's API endpoint.

## Prerequisites

1. Install the `langchain-openai` package:
```bash
pip install langchain-openai
```

2. Set your MiniMax API key:
```bash
export MINIMAX_API_KEY="your-api-key-here"
```

## Available Models

| Model | Context Window | Description |
|-------|---------------|-------------|
| `MiniMax-M2.7` | 1M tokens | Latest flagship model |
| `MiniMax-M2.5` | 1M tokens | Balanced performance |
| `MiniMax-M2.5-highspeed` | 204K tokens | High throughput variant |

## Configuration

```yaml
models:
- type: main
engine: minimax
model: MiniMax-M2.5
parameters:
temperature: 0.7
```

You can also pass the API key directly via the configuration:

```yaml
models:
- type: main
engine: minimax
model: MiniMax-M2.7
parameters:
api_key: ${MINIMAX_API_KEY}
temperature: 0.5
```

## Notes

- Temperature must be between 0 and 1 for MiniMax models (values are automatically clamped).
- The default API base URL is `https://api.minimax.io/v1`. You can override it via the `base_url` parameter.
7 changes: 7 additions & 0 deletions examples/configs/llm/minimax/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
models:
- type: main
engine: minimax
model: MiniMax-M2.5
parameters:
# Temperature must be between 0 and 1 for MiniMax models.
temperature: 0.7
45 changes: 45 additions & 0 deletions nemoguardrails/llm/models/langchain_initializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,13 +338,58 @@ def _init_nvidia_model(model_name: str, provider_name: str, kwargs: Dict[str, An
)


def _init_minimax_model(model_name: str, provider_name: str, kwargs: Dict[str, Any]) -> BaseChatModel:
"""Initialize MiniMax model via their OpenAI-compatible API.

MiniMax provides an OpenAI-compatible API endpoint, so we use ChatOpenAI
with a custom base_url pointing to MiniMax's API.

Args:
model_name: Name of the model to initialize (e.g., MiniMax-M2.5, MiniMax-M2.7)
provider_name: Name of the provider to use
kwargs: Additional arguments to pass to the model initialization.
Supports ``api_key`` (or the ``MINIMAX_API_KEY`` env var) and
``base_url`` (defaults to ``https://api.minimax.io/v1``).

Returns:
An initialized chat model

Raises:
ImportError: If langchain-openai is not installed
"""
try:
from langchain_openai import ChatOpenAI
except ImportError:
raise ImportError(
"Could not import langchain_openai, please install it with "
"`pip install langchain-openai`."
)

import os

api_key = kwargs.pop("api_key", None) or os.environ.get("MINIMAX_API_KEY", "")
base_url = kwargs.pop("base_url", "https://api.minimax.io/v1")

# MiniMax temperature must be in [0, 1]
if "temperature" in kwargs:
kwargs["temperature"] = max(0.0, min(1.0, float(kwargs["temperature"])))

return ChatOpenAI(
model=model_name,
api_key=api_key,
base_url=base_url,
**kwargs,
)


_SPECIAL_MODEL_INITIALIZERS = {
"gpt-3.5-turbo-instruct": _init_gpt35_turbo_instruct,
}

_PROVIDER_INITIALIZERS: Dict[str, Any] = {
"nvidia_ai_endpoints": _init_nvidia_model,
"nim": _init_nvidia_model,
"minimax": _init_minimax_model,
}


Expand Down
247 changes: 247 additions & 0 deletions tests/llm/models/test_minimax_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,247 @@
# SPDX-FileCopyrightText: Copyright (c) 2023-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Tests for the MiniMax provider initializer.

This module contains unit tests for the MiniMax provider support in NeMo Guardrails,
and integration tests that verify end-to-end model initialization via the provider
initializer registry.
"""

import os
from unittest.mock import MagicMock, patch

import pytest
from langchain_core.language_models import BaseChatModel

from nemoguardrails.llm.models.langchain_initializer import (
_PROVIDER_INITIALIZERS,
_handle_model_special_cases,
_init_minimax_model,
)


def has_langchain_openai():
"""Check if langchain-openai package is installed."""
try:
import langchain_openai # noqa: F401

return True
except ImportError:
return False


# ---------------------------------------------------------------------------
# Unit tests
# ---------------------------------------------------------------------------


class TestMiniMaxProviderRegistry:
"""Tests that the MiniMax provider is properly registered."""

def test_minimax_in_provider_initializers(self):
"""Test that 'minimax' is registered in _PROVIDER_INITIALIZERS."""
assert "minimax" in _PROVIDER_INITIALIZERS
assert _PROVIDER_INITIALIZERS["minimax"] == _init_minimax_model

def test_handle_special_cases_dispatches_minimax(self):
"""Test that _handle_model_special_cases dispatches to MiniMax initializer."""
mock_init = MagicMock()
mock_model = MagicMock(spec=BaseChatModel)
mock_init.return_value = mock_model

with patch.dict(_PROVIDER_INITIALIZERS, {"minimax": mock_init}):
result = _handle_model_special_cases("MiniMax-M2.5", "minimax", {})

assert result == mock_model
mock_init.assert_called_once_with("MiniMax-M2.5", "minimax", {})

def test_handle_special_cases_no_minimax_for_other_providers(self):
"""Test that non-minimax providers don't trigger MiniMax initializer."""
result = _handle_model_special_cases("some-model", "some-provider", {})
assert result is None


@pytest.mark.skipif(
not has_langchain_openai(), reason="langchain-openai package not installed"
)
class TestMiniMaxModelInitialization:
"""Tests for the _init_minimax_model function."""

def test_init_minimax_model_creates_chat_openai(self):
"""Test that _init_minimax_model creates a ChatOpenAI instance."""
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"api_key": "test-key"},
)
assert result is not None
assert isinstance(result, BaseChatModel)
assert hasattr(result, "invoke")
assert hasattr(result, "generate")

def test_init_minimax_model_default_base_url(self):
"""Test that the default base URL is set to MiniMax's API."""
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"api_key": "test-key"},
)
base_url = str(result.openai_api_base)
assert "api.minimax.io" in base_url

def test_init_minimax_model_custom_base_url(self):
"""Test that a custom base_url is respected."""
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"api_key": "test-key", "base_url": "https://custom.endpoint.com/v1"},
)
base_url = str(result.openai_api_base)
assert "custom.endpoint.com" in base_url

def test_init_minimax_model_temperature_clamping_high(self):
"""Test that temperature above 1.0 is clamped to 1.0."""
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"api_key": "test-key", "temperature": 2.0},
)
assert result.temperature <= 1.0

def test_init_minimax_model_temperature_clamping_low(self):
"""Test that negative temperature is clamped to 0.0."""
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"api_key": "test-key", "temperature": -0.5},
)
assert result.temperature >= 0.0

def test_init_minimax_model_valid_temperature(self):
"""Test that a valid temperature within [0, 1] is kept as-is."""
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"api_key": "test-key", "temperature": 0.7},
)
assert result.temperature == pytest.approx(0.7)

def test_init_minimax_model_api_key_from_env(self):
"""Test that API key can be read from MINIMAX_API_KEY env var."""
with patch.dict(os.environ, {"MINIMAX_API_KEY": "env-test-key"}):
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{},
)
assert result is not None
assert isinstance(result, BaseChatModel)

def test_init_minimax_model_api_key_kwarg_takes_precedence(self):
"""Test that api_key kwarg takes precedence over env var."""
with patch.dict(os.environ, {"MINIMAX_API_KEY": "env-key"}):
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"api_key": "kwarg-key"},
)
assert result is not None

def test_init_minimax_model_m27(self):
"""Test initialization with MiniMax-M2.7 model."""
result = _init_minimax_model(
"MiniMax-M2.7",
"minimax",
{"api_key": "test-key"},
)
assert result is not None
assert isinstance(result, BaseChatModel)

def test_init_minimax_model_m25_highspeed(self):
"""Test initialization with MiniMax-M2.5-highspeed model."""
result = _init_minimax_model(
"MiniMax-M2.5-highspeed",
"minimax",
{"api_key": "test-key"},
)
assert result is not None
assert isinstance(result, BaseChatModel)


class TestMiniMaxImportError:
"""Tests for handling missing langchain-openai dependency."""

def test_init_minimax_model_import_error(self):
"""Test that ImportError is raised when langchain-openai is not installed."""
if has_langchain_openai():
pytest.skip("langchain-openai package is installed")
with pytest.raises(ImportError, match="langchain_openai"):
_init_minimax_model("MiniMax-M2.5", "minimax", {})


# ---------------------------------------------------------------------------
# Integration tests
# ---------------------------------------------------------------------------


@pytest.mark.skipif(
not has_langchain_openai(), reason="langchain-openai package not installed"
)
class TestMiniMaxIntegration:
"""Integration tests that verify MiniMax model initialization via the full
provider initializer chain."""

def test_init_via_handle_special_cases(self):
"""Test end-to-end initialization through _handle_model_special_cases."""
result = _handle_model_special_cases(
"MiniMax-M2.5",
"minimax",
{"api_key": "integration-test-key", "temperature": 0.5},
)
assert result is not None
assert isinstance(result, BaseChatModel)

def test_init_via_handle_special_cases_with_custom_params(self):
"""Test initialization with custom parameters passed through the chain."""
result = _handle_model_special_cases(
"MiniMax-M2.7",
"minimax",
{
"api_key": "integration-test-key",
"temperature": 0.3,
"max_tokens": 2048,
},
)
assert result is not None
assert isinstance(result, BaseChatModel)

@pytest.mark.skipif(
not os.environ.get("MINIMAX_API_KEY"),
reason="MINIMAX_API_KEY not set",
)
def test_live_minimax_model_init(self):
"""Test live initialization with a real MiniMax API key.

This test requires a valid MINIMAX_API_KEY environment variable.
"""
result = _init_minimax_model(
"MiniMax-M2.5",
"minimax",
{"temperature": 0.7},
)
assert result is not None
assert isinstance(result, BaseChatModel)