Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions .github/configs/nvidia-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1804,6 +1804,34 @@ qwen3.5-bf16-b200-sglang:
search-space:
- { tp: 8, ep: 1, conc-start: 4, conc-end: 64 }

qwen3.5-fp4-b200-sglang:
image: lmsysorg/sglang:v0.5.9-cu129-amd64
model: nvidia/Qwen3.5-397B-A17B-NVFP4
model-prefix: qwen3.5
runner: b200
precision: fp4
framework: sglang
multinode: false
seq-len-configs:
- isl: 1024
osl: 1024
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 64 }
- { tp: 8, ep: 1, conc-start: 4, conc-end: 4 }
- { tp: 8, ep: 8, conc-start: 64, conc-end: 128 }
- isl: 1024
osl: 8192
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 64 }
- { tp: 8, ep: 1, conc-start: 4, conc-end: 4 }
- { tp: 8, ep: 1, conc-start: 64, conc-end: 128 }
- isl: 8192
osl: 1024
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 64 }
- { tp: 8, ep: 1, conc-start: 4, conc-end: 4 }
- { tp: 8, ep: 1, conc-start: 64, conc-end: 128 }

kimik2.5-int4-b200-vllm:
image: vllm/vllm-openai:v0.15.1
model: moonshotai/Kimi-K2.5
Expand Down
71 changes: 71 additions & 0 deletions benchmarks/single_node/qwen3.5_fp4_b200.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
#!/usr/bin/env bash

source "$(dirname "$0")/../benchmark_lib.sh"

check_env_vars \
MODEL \
TP \
CONC \
ISL \
OSL \
RANDOM_RANGE_RATIO \
RESULT_FILENAME \
EP_SIZE

if [[ -n "$SLURM_JOB_ID" ]]; then
echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME"
fi

nvidia-smi

hf download "$MODEL"

export NCCL_NVLS_ENABLE=1
export SGL_ENABLE_JIT_DEEPGEMM=false
export SGLANG_ENABLE_FLASHINFER_GEMM=true
export PYTHONUNBUFFERED=1

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}
CONTEXT_LENGTH=$((ISL + OSL + 20))
CUDA_GRAPH_MAX_BS=$CONC
MAX_RUNNING_REQUESTS=$((CONC > 128 ? CONC : 128))
MEM_FRAC_STATIC=0.85

echo "Config: ISL=$ISL, OSL=$OSL, CONC=$CONC, EP=$EP_SIZE, MEM=$MEM_FRAC_STATIC, CUDA_BS=$CUDA_GRAPH_MAX_BS, MAX_RR=$MAX_RUNNING_REQUESTS"

set -x
PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \
--trust-remote-code \
--tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \
--cuda-graph-max-bs $CUDA_GRAPH_MAX_BS --max-running-requests $MAX_RUNNING_REQUESTS \
--mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size 32768 --max-prefill-tokens 32768 \
--context-length $CONTEXT_LENGTH --disable-radix-cache \
--attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm \
--enable-flashinfer-allreduce-fusion --scheduler-recv-interval 30 \
--stream-interval 30 --quantization modelopt_fp4 \
--kv-cache-dtype fp8_e4m3 --fp4-gemm-backend flashinfer_cutlass > $SERVER_LOG 2>&1 &

SERVER_PID=$!

wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID"

pip install -q datasets pandas

run_benchmark_serving \
--model "$MODEL" \
--port "$PORT" \
--backend vllm \
--input-len "$ISL" \
--output-len "$OSL" \
--random-range-ratio "$RANDOM_RANGE_RATIO" \
--num-prompts "$((CONC * 10))" \
--max-concurrency "$CONC" \
--result-filename "$RESULT_FILENAME" \
--result-dir /workspace/

if [ "${RUN_EVAL}" = "true" ]; then
run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC
append_lm_eval_summary
fi
set +x
9 changes: 9 additions & 0 deletions perf-changelog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -824,3 +824,12 @@
- "Uses triton attention backend, TP=8, concurrency 4-64"
- "Following AMD Andy Luo's recipe"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/850

- config-keys:
- qwen3.5-fp4-b200-sglang
description:
- "Add Qwen3.5-397B-A17B NVFP4 B200 SGLang benchmark config and launch script"
- "Image: lmsysorg/sglang:v0.5.9-cu129-amd64"
- "Model: nvidia/Qwen3.5-397B-A17B-NVFP4"
- "Configs: 1k1k (conc 4-64 ep1, conc 128 ep8), 1k8k (conc 4-128 ep1), 8k1k (conc 4-128 ep1)"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/820