Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ pipenv sync
Set your OpenAI API key by updating the configuration file `config/llm_env.yml`
- If you need help locating your API key, visit this [link](https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key).

- We recommend using [OpenAI's GPT-4](https://platform.openai.com/docs/guides/gpt) for the LLM. Our framework also supports other providers and open-source models, as discussed [here](docs/installation.md#configure-your-llm).
- We recommend using [OpenAI's GPT-4](https://platform.openai.com/docs/guides/gpt) for the LLM. Our framework also supports other providers including [MiniMax](https://www.minimax.io/) (M2.7 / M2.5 models), Anthropic, Google, Azure, and open-source models, as discussed [here](docs/installation.md#configure-your-llm).

<br />

Expand Down
2 changes: 1 addition & 1 deletion config/config_default.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ eval:

llm:
name: 'gpt-4-1106-preview' # This is the meta-prompt LLM, it should be a strong model. For example, using GPT-3.5 will cause an error in many cases.
type: 'OpenAI' # Can be OpenAI, Anthropic, Google, Azure
type: 'OpenAI' # Can be OpenAI, MiniMax, Anthropic, Google, Azure
temperature: 0.8

stop_criteria:
Expand Down
6 changes: 5 additions & 1 deletion config/llm_env.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,8 @@ azure:
OPENAI_API_VERSION: ''

google:
GOOGLE_API_KEY: ''
GOOGLE_API_KEY: ''

minimax:
MINIMAX_API_KEY: ''
MINIMAX_API_BASE: 'https://api.minimax.io/v1'
16 changes: 15 additions & 1 deletion docs/installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,21 @@ pipenv sync

Set your OpenAI API key in the configuration file `config/llm_env.yml`. For assistance locating your API key, visit this [link](https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key).

- For LLM, we recommend using [OpenAI's GPT-4](https://platform.openai.com/docs/guides/gpt). Alternatively, configure Azure by setting llm type in `config/config_default.yml` to `"Azure"` and specifying the key in `config/llm_env.yml`. Our system also supports various LLMs, including open source models, through [Langchain Pipeline](https://python.langchain.com/docs/integrations/llms/huggingface_pipelines). Change the llm `type` to `"HuggingFacePipeline"` and specify the model ID in the llm `name` field.
- For LLM, we recommend using [OpenAI's GPT-4](https://platform.openai.com/docs/guides/gpt). Alternatively, configure Azure by setting llm type in `config/config_default.yml` to `"Azure"` and specifying the key in `config/llm_env.yml`. Our system also supports various LLMs, including open source models, through [Langchain Pipeline](https://python.langchain.com/docs/integrations/llms/huggingface_pipelines). Change the llm `type` to `"HuggingFacePipeline"` and specify the model ID in the llm `name` field.

- **Configure MiniMax.** To use [MiniMax](https://www.minimax.io/) models (e.g., MiniMax-M2.7), set the llm `type` to `"MiniMax"` and `name` to the model identifier in `config/config_default.yml`. Add your MiniMax API key in `config/llm_env.yml` under the `minimax` section:
```yaml
# config/config_default.yml
llm:
name: 'MiniMax-M2.7'
type: 'MiniMax'
temperature: 0.8

# config/llm_env.yml
minimax:
MINIMAX_API_KEY: 'your-api-key'
MINIMAX_API_BASE: 'https://api.minimax.io/v1'
```

- **Configure your Predictor**. We employ a predictor to estimate prompt performance. The default predictor LLM is GPT-3.5. Configuration is located in the `predictor` section of `config/config_default.yml`.

Expand Down
Empty file added tests/__init__.py
Empty file.
45 changes: 45 additions & 0 deletions tests/test_minimax_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
"""Integration tests for MiniMax provider (require MINIMAX_API_KEY)."""
import os
import sys
import unittest

sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

MINIMAX_API_KEY = os.environ.get('MINIMAX_API_KEY', '')


@unittest.skipUnless(MINIMAX_API_KEY, 'MINIMAX_API_KEY not set')
class TestMiniMaxIntegration(unittest.TestCase):
"""End-to-end tests that call the real MiniMax API."""

def _get_llm(self, model='MiniMax-M2.7', temperature=0.5):
from unittest.mock import patch
env = {
'openai': {'OPENAI_API_KEY': '', 'OPENAI_API_BASE': '', 'OPENAI_ORGANIZATION': ''},
'minimax': {'MINIMAX_API_KEY': MINIMAX_API_KEY, 'MINIMAX_API_BASE': 'https://api.minimax.io/v1'},
}
with patch('utils.config.LLM_ENV', env):
from utils.config import get_llm
return get_llm({'type': 'MiniMax', 'name': model, 'temperature': temperature})

def test_simple_completion(self):
"""MiniMax M2.7 can complete a simple prompt."""
llm = self._get_llm()
result = llm.invoke('Say hello in one word.')
self.assertTrue(len(result.content) > 0)

def test_highspeed_model(self):
"""MiniMax M2.7-highspeed responds correctly."""
llm = self._get_llm(model='MiniMax-M2.7-highspeed')
result = llm.invoke('What is 2+2? Reply with just the number.')
self.assertIn('4', result.content)

def test_temperature_clamping_zero(self):
"""Temperature 0 is clamped and model still works."""
llm = self._get_llm(temperature=0)
result = llm.invoke('Reply with OK.')
self.assertTrue(len(result.content) > 0)


if __name__ == '__main__':
unittest.main()
217 changes: 217 additions & 0 deletions tests/test_minimax_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
"""Unit tests for MiniMax LLM provider integration."""
import os
import sys
import unittest
from unittest.mock import patch, MagicMock

# Ensure the project root is importable
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))


class TestGetLlmMiniMax(unittest.TestCase):
"""Tests for the MiniMax branch in get_llm()."""

def setUp(self):
"""Patch LLM_ENV so config.py doesn't need a real llm_env.yml."""
self.env_patcher = patch('utils.config.LLM_ENV', {
'openai': {'OPENAI_API_KEY': '', 'OPENAI_API_BASE': '', 'OPENAI_ORGANIZATION': ''},
'azure': {'AZURE_OPENAI_API_KEY': '', 'AZURE_OPENAI_ENDPOINT': '', 'OPENAI_API_VERSION': ''},
'google': {'GOOGLE_API_KEY': ''},
'anthropic': {'ANTHROPIC_API_KEY': ''},
'minimax': {'MINIMAX_API_KEY': 'test-mm-key', 'MINIMAX_API_BASE': 'https://api.minimax.io/v1'},
})
self.env_patcher.start()

def tearDown(self):
self.env_patcher.stop()

@patch('utils.config.ChatOpenAI')
def test_minimax_basic(self, mock_chat):
"""MiniMax type routes to ChatOpenAI with correct base URL and model."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'temperature': 0.8}
get_llm(config)
mock_chat.assert_called_once()
kwargs = mock_chat.call_args
self.assertEqual(kwargs[1]['model_name'], 'MiniMax-M2.7')
self.assertEqual(kwargs[1]['openai_api_key'], 'test-mm-key')
self.assertEqual(kwargs[1]['openai_api_base'], 'https://api.minimax.io/v1')
self.assertAlmostEqual(kwargs[1]['temperature'], 0.8)

@patch('utils.config.ChatOpenAI')
def test_minimax_case_insensitive(self, mock_chat):
"""Provider type matching is case-insensitive."""
from utils.config import get_llm
config = {'type': 'minimax', 'name': 'MiniMax-M2.5', 'temperature': 0.5}
get_llm(config)
mock_chat.assert_called_once()
self.assertEqual(mock_chat.call_args[1]['model_name'], 'MiniMax-M2.5')

@patch('utils.config.ChatOpenAI')
def test_minimax_temperature_clamping_zero(self, mock_chat):
"""Temperature 0 is clamped to 0.01 for MiniMax."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'temperature': 0}
get_llm(config)
self.assertAlmostEqual(mock_chat.call_args[1]['temperature'], 0.01)

@patch('utils.config.ChatOpenAI')
def test_minimax_temperature_default_zero(self, mock_chat):
"""When temperature not set, defaults to 0 which is clamped to 0.01."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7'}
get_llm(config)
self.assertAlmostEqual(mock_chat.call_args[1]['temperature'], 0.01)

@patch('utils.config.ChatOpenAI')
def test_minimax_temperature_valid(self, mock_chat):
"""Temperature within (0, 1] is passed through without clamping."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'temperature': 0.7}
get_llm(config)
self.assertAlmostEqual(mock_chat.call_args[1]['temperature'], 0.7)

@patch('utils.config.ChatOpenAI')
def test_minimax_custom_api_key(self, mock_chat):
"""API key from config overrides LLM_ENV."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'minimax_api_key': 'custom-key'}
get_llm(config)
self.assertEqual(mock_chat.call_args[1]['openai_api_key'], 'custom-key')

@patch('utils.config.ChatOpenAI')
def test_minimax_custom_api_base(self, mock_chat):
"""Custom API base URL from config overrides LLM_ENV default."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'minimax_api_base': 'https://custom.minimax.io/v1'}
get_llm(config)
self.assertEqual(mock_chat.call_args[1]['openai_api_base'], 'https://custom.minimax.io/v1')

@patch('utils.config.ChatOpenAI')
def test_minimax_model_kwargs(self, mock_chat):
"""model_kwargs are forwarded to ChatOpenAI."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'model_kwargs': {'seed': 42}}
get_llm(config)
self.assertEqual(mock_chat.call_args[1]['model_kwargs'], {'seed': 42})

@patch('utils.config.ChatOpenAI')
def test_minimax_no_env_section(self, mock_chat):
"""Works gracefully when minimax section missing from LLM_ENV."""
from utils.config import get_llm
with patch('utils.config.LLM_ENV', {
'openai': {'OPENAI_API_KEY': '', 'OPENAI_API_BASE': '', 'OPENAI_ORGANIZATION': ''},
}):
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'minimax_api_key': 'k'}
get_llm(config)
mock_chat.assert_called_once()
self.assertEqual(mock_chat.call_args[1]['openai_api_base'], 'https://api.minimax.io/v1')

@patch('utils.config.ChatOpenAI')
def test_minimax_m27_highspeed(self, mock_chat):
"""M2.7-highspeed model name is correctly passed."""
from utils.config import get_llm
config = {'type': 'MiniMax', 'name': 'MiniMax-M2.7-highspeed', 'temperature': 0.5}
get_llm(config)
self.assertEqual(mock_chat.call_args[1]['model_name'], 'MiniMax-M2.7-highspeed')


class TestChainWrapperMiniMax(unittest.TestCase):
"""Tests for MiniMax support in ChainWrapper."""

@patch('utils.config.LLM_ENV', {
'openai': {'OPENAI_API_KEY': '', 'OPENAI_API_BASE': '', 'OPENAI_ORGANIZATION': ''},
'minimax': {'MINIMAX_API_KEY': 'test-key', 'MINIMAX_API_BASE': 'https://api.minimax.io/v1'},
})
@patch('utils.config.ChatOpenAI')
@patch('utils.llm_chain.load_prompt')
@patch('utils.llm_chain.LLMChain')
def test_minimax_uses_openai_callback(self, mock_llm_chain, mock_load_prompt, mock_chat):
"""MiniMax provider uses get_openai_callback for cost tracking."""
from easydict import EasyDict
mock_load_prompt.return_value = MagicMock()
mock_llm_instance = MagicMock()
mock_chat.return_value = mock_llm_instance

from utils.llm_chain import ChainWrapper, get_openai_callback
llm_config = EasyDict({'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'temperature': 0.5})
wrapper = ChainWrapper(llm_config, 'dummy.prompt')
self.assertEqual(wrapper.callback, get_openai_callback)

@patch('utils.config.LLM_ENV', {
'openai': {'OPENAI_API_KEY': '', 'OPENAI_API_BASE': '', 'OPENAI_ORGANIZATION': ''},
'minimax': {'MINIMAX_API_KEY': 'test-key', 'MINIMAX_API_BASE': 'https://api.minimax.io/v1'},
})
@patch('utils.config.ChatOpenAI')
@patch('utils.llm_chain.load_prompt')
def test_minimax_structured_output(self, mock_load_prompt, mock_chat):
"""MiniMax provider supports structured output via with_structured_output."""
from easydict import EasyDict
mock_load_prompt.return_value = MagicMock()
mock_llm_instance = MagicMock()
mock_chat.return_value = mock_llm_instance

schema = {'type': 'object', 'properties': {'label': {'type': 'string'}}}
from utils.llm_chain import ChainWrapper
llm_config = EasyDict({'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'temperature': 0.5})
wrapper = ChainWrapper(llm_config, 'dummy.prompt', json_schema=schema)
# Structured output should have called with_structured_output
mock_llm_instance.with_structured_output.assert_called_once_with(schema)

@patch('utils.config.LLM_ENV', {
'openai': {'OPENAI_API_KEY': '', 'OPENAI_API_BASE': '', 'OPENAI_ORGANIZATION': ''},
'minimax': {'MINIMAX_API_KEY': 'test-key', 'MINIMAX_API_BASE': 'https://api.minimax.io/v1'},
})
@patch('utils.config.ChatOpenAI')
@patch('utils.llm_chain.load_prompt')
@patch('utils.llm_chain.LLMChain')
def test_minimax_no_schema_uses_llmchain(self, mock_llm_chain, mock_load_prompt, mock_chat):
"""Without json_schema, MiniMax falls back to LLMChain."""
from easydict import EasyDict
mock_load_prompt.return_value = MagicMock()
mock_llm_instance = MagicMock()
mock_chat.return_value = mock_llm_instance

from utils.llm_chain import ChainWrapper
llm_config = EasyDict({'type': 'MiniMax', 'name': 'MiniMax-M2.7', 'temperature': 0.5})
wrapper = ChainWrapper(llm_config, 'dummy.prompt')
mock_llm_instance.with_structured_output.assert_not_called()
mock_llm_chain.assert_called_once()


class TestLlmEnvConfig(unittest.TestCase):
"""Tests for MiniMax section in llm_env.yml."""

def test_minimax_section_exists(self):
"""llm_env.yml has a minimax section with required keys."""
import yaml
env_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'llm_env.yml')
with open(env_path) as f:
env = yaml.safe_load(f)
self.assertIn('minimax', env)
self.assertIn('MINIMAX_API_KEY', env['minimax'])
self.assertIn('MINIMAX_API_BASE', env['minimax'])
self.assertEqual(env['minimax']['MINIMAX_API_BASE'], 'https://api.minimax.io/v1')

def test_config_default_mentions_minimax(self):
"""config_default.yml comment mentions MiniMax as a supported provider."""
config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config_default.yml')
with open(config_path) as f:
content = f.read()
self.assertIn('MiniMax', content)


class TestNotImplementedPreserved(unittest.TestCase):
"""Ensure unknown providers still raise NotImplementedError."""

@patch('utils.config.LLM_ENV', {
'openai': {'OPENAI_API_KEY': '', 'OPENAI_API_BASE': '', 'OPENAI_ORGANIZATION': ''},
})
def test_unknown_type_raises(self):
from utils.config import get_llm
with self.assertRaises(NotImplementedError):
get_llm({'type': 'UnknownProvider', 'name': 'test'})


if __name__ == '__main__':
unittest.main()
10 changes: 10 additions & 0 deletions utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,16 @@ def get_llm(config: dict):
model_kwargs=model_kwargs)


elif config['type'].lower() == 'minimax':
api_key = config.get('minimax_api_key', LLM_ENV.get('minimax', {}).get('MINIMAX_API_KEY', ''))
api_base = config.get('minimax_api_base', LLM_ENV.get('minimax', {}).get('MINIMAX_API_BASE', 'https://api.minimax.io/v1'))
# MiniMax requires temperature in (0.0, 1.0]
minimax_temperature = max(temperature, 0.01)
return ChatOpenAI(temperature=minimax_temperature, model_name=config['name'],
openai_api_key=api_key,
openai_api_base=api_base,
model_kwargs=model_kwargs)

elif config['type'].lower() == 'huggingfacepipeline':
device = config.get('gpu_device', -1)
device_map = config.get('device_map', None)
Expand Down
4 changes: 2 additions & 2 deletions utils/llm_chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self, llm_config, prompt_path: str, json_schema: dict = None, parse
self.prompt = load_prompt(prompt_path)
self.build_chain()
self.accumulate_usage = 0
if self.llm_config.type.lower() == 'openai':
if self.llm_config.type.lower() in ['openai', 'minimax']:
self.callback = get_openai_callback
else:
self.callback = get_dummy_callback
Expand Down Expand Up @@ -159,7 +159,7 @@ def build_chain(self):
"""
Build the chain according to the LLM type
"""
if (self.llm_config.type.lower() in ['openai', 'azure', 'anthropic', 'google']) and self.json_schema is not None:
if (self.llm_config.type.lower() in ['openai', 'azure', 'anthropic', 'google', 'minimax']) and self.json_schema is not None:
self.chain = self.prompt | self.llm.with_structured_output(self.json_schema)
else:
self.chain = LLMChain(llm=self.llm, prompt=self.prompt)
Expand Down