diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 6885f36cb..8ffe870ae 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -3106,6 +3106,31 @@ gptoss-fp4-b200-vllm: - { tp: 4, conc-start: 4, conc-end: 64 } - { tp: 8, conc-start: 4, conc-end: 4 } +minimaxm2.5-fp8-b200-vllm: + image: vllm/vllm-openai:v0.16.0-cu130 + model: MiniMaxAI/MiniMax-M2.5 + model-prefix: minimaxm2.5 + runner: b200 + precision: fp8 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 2, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 2, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 2, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + gptoss-fp4-h100-vllm: image: vllm/vllm-openai:v0.15.1 model: openai/gpt-oss-120b diff --git a/benchmarks/single_node/minimaxm2.5_fp8_b200.sh b/benchmarks/single_node/minimaxm2.5_fp8_b200.sh new file mode 100755 index 000000000..7cb87d2a7 --- /dev/null +++ b/benchmarks/single_node/minimaxm2.5_fp8_b200.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + EP_SIZE \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +nvidia-smi + +hf download "$MODEL" + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +export VLLM_USE_FLASHINFER_MOE_FP8=0 +export VLLM_MOE_USE_DEEP_GEMM=0 + +if [ "$EP_SIZE" -ge 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +set -x +vllm serve $MODEL --port $PORT \ +--tensor-parallel-size=$TP \ +$EP \ +--gpu-memory-utilization 0.95 \ +--max-model-len $MAX_MODEL_LEN \ +--block-size=32 \ +--disable-log-requests \ +--trust-remote-code > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 15d00da6d..130501642 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -679,6 +679,7 @@ - "Image: rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260218" - "Uses triton attention backend, TP=8, concurrency 4-64" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/768 + - config-keys: - qwen3.5-bf16-b200-sglang @@ -875,3 +876,10 @@ - "Server: reasoning-parser qwen3, tool-call-parser qwen3_coder, enable-flashinfer-allreduce-fusion, mem-fraction-static 0.8" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/855 +- config-keys: + - minimaxm2.5-fp8-b200-vllm + description: + - "Add MiniMax-M2.5 FP8 vLLM benchmark for B200" + - "Model: MiniMaxAI/MiniMax-M2.5 with --trust-remote-code" + - "Image: vllm/vllm-openai:v0.16.0-cu130" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/757