Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .github/configs/amd-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,31 @@ kimik2.5-fp4-mi355x-vllm:
search-space:
- { tp: 8, conc-start: 4, conc-end: 64 }

minimaxm2.1-fp4-mi355x-vllm:
image: vllm/vllm-openai-rocm:v0.16.0
model: amd/MiniMax-M2.1-MXFP4
model-prefix: minimaxm2.1
runner: mi355x
precision: fp4
framework: vllm
multinode: false
seq-len-configs:
- isl: 1024
osl: 1024
search-space:
- { tp: 2, conc-start: 4, conc-end: 64 }
- { tp: 4, conc-start: 4, conc-end: 64 }
- isl: 1024
osl: 8192
search-space:
- { tp: 2, conc-start: 4, conc-end: 64 }
- { tp: 4, conc-start: 4, conc-end: 64 }
- isl: 8192
osl: 1024
search-space:
- { tp: 2, conc-start: 4, conc-end: 64 }
- { tp: 4, conc-start: 4, conc-end: 64 }

minimaxm2.5-fp8-mi355x-vllm:
image: vllm/vllm-openai-rocm:v0.15.1
model: MiniMaxAI/MiniMax-M2.5
Expand Down
71 changes: 71 additions & 0 deletions benchmarks/single_node/minimaxm2.1_fp4_mi355x.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#!/usr/bin/env bash

source "$(dirname "$0")/../benchmark_lib.sh"

check_env_vars \
MODEL \
TP \
CONC \
ISL \
OSL \
MAX_MODEL_LEN \
RANDOM_RANGE_RATIO \
RESULT_FILENAME

if [[ -n "$SLURM_JOB_ID" ]]; then
echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME"
fi

hf download "$MODEL"

# Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+
if [ -n "$ROCR_VISIBLE_DEVICES" ]; then
export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES"
fi

export VLLM_ROCM_USE_AITER=1
if [ "$TP" -ge 4 ]; then
# AITER CK fused MoE kernels lack compiled tiles for N=intermediate_size/TP
# when TP>=4 (TP=4, N=384). Disable AITER MoE to fall back to triton, but keep
# AITER attention. See: https://github.com/vllm-project/vllm/issues/35637
export VLLM_ROCM_USE_AITER_MOE=0
export VLLM_ATTENTION_BACKEND="ROCM_AITER_UNIFIED_ATTN"
pip install amd-quark 2>/dev/null || true
fi

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}

set -x
vllm serve $MODEL --port $PORT \
--tensor-parallel-size=$TP \
--gpu-memory-utilization 0.95 \
--max-model-len $MAX_MODEL_LEN \
--block-size=32 \
--disable-log-requests \
--trust-remote-code > $SERVER_LOG 2>&1 &

SERVER_PID=$!

# Wait for server to be ready
wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID"

run_benchmark_serving \
--model "$MODEL" \
--port "$PORT" \
--backend vllm \
--input-len "$ISL" \
--output-len "$OSL" \
--random-range-ratio "$RANDOM_RANGE_RATIO" \
--num-prompts "$((CONC * 10))" \
--max-concurrency "$CONC" \
--result-filename "$RESULT_FILENAME" \
--result-dir /workspace/ \
--trust-remote-code

# After throughput, run evaluation only if RUN_EVAL is true
if [ "${RUN_EVAL}" = "true" ]; then
run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC
append_lm_eval_summary
fi
set +x
11 changes: 11 additions & 0 deletions perf-changelog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -755,6 +755,17 @@
- "Key changes: AITER v0.1.10.post3 with FP8 Prefill/Decode/KV Cache, FP8 prefill attention kernel, MORI EP two-batch overlapping, OOM fix for DeepSeek weight loading"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/816

- config-keys:
- minimaxm2.1-fp4-mi355x-vllm
description:
- "Add MiniMax M2.1 MXFP4 vLLM benchmark for MI355X"
- "Model: amd/MiniMax-M2.1-MXFP4 with --trust-remote-code and --block-size=32"
- "Image: vllm/vllm-openai-rocm:v0.16.0"
- "Environment: VLLM_ROCM_USE_AITER=1"
- "TP=2 only (TP=4 disabled due to vLLM bug https://github.com/vllm-project/vllm/issues/35637)"
- "Concurrency 4-64 for 1k1k, 1k8k, and 8k1k sequence lengths"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/827

- config-keys:
- minimaxm2.5-fp8-h200-vllm
description:
Expand Down