diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index fc837704c..52b80fae7 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1829,6 +1829,72 @@ qwen3.5-fp8-b200-sglang: - { tp: 8, ep: 1, conc-start: 4, conc-end: 4} - { tp: 4, ep: 4, conc-start: 8, conc-end: 64 } +qwen3.5-bf16-b200-sglang-mtp: + image: lmsysorg/sglang:v0.5.9-cu130 + model: Qwen/Qwen3.5-397B-A17B + model-prefix: qwen3.5 + runner: b200 + precision: bf16 + framework: sglang + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 8, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + +qwen3.5-fp8-b200-sglang-mtp: + image: lmsysorg/sglang:v0.5.9-cu130 + model: Qwen/Qwen3.5-397B-A17B-FP8 + model-prefix: qwen3.5 + runner: b200 + precision: fp8 + framework: sglang + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 4, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 4, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 4, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + +qwen3.5-fp4-b200-sglang-mtp: + image: lmsysorg/sglang:v0.5.9-cu130 + model: nvidia/Qwen3.5-397B-A17B-NVFP4 + model-prefix: qwen3.5 + runner: b200 + precision: fp4 + framework: sglang + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 4, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + - isl: 1024 + osl: 8192 + search-space: + - { tp: 4, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 4, ep: 1, conc-start: 4, conc-end: 512, spec-decoding: mtp } + kimik2.5-int4-b200-vllm: image: vllm/vllm-openai:v0.15.1 model: moonshotai/Kimi-K2.5 diff --git a/benchmarks/single_node/qwen3.5_bf16_b200_mtp.sh b/benchmarks/single_node/qwen3.5_bf16_b200_mtp.sh new file mode 100644 index 000000000..3c6ecb73f --- /dev/null +++ b/benchmarks/single_node/qwen3.5_bf16_b200_mtp.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +nvidia-smi + +hf download "$MODEL" + +export NCCL_NVLS_ENABLE=1 +export SGL_ENABLE_JIT_DEEPGEMM=false +export SGLANG_ENABLE_FLASHINFER_GEMM=true +export PYTHONUNBUFFERED=1 + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +# Default: recv every ~10 requests; if CONC >= 16, relax to ~30 requests between scheduler recv polls. +if [[ $CONC -ge 16 ]]; then + SCHEDULER_RECV_INTERVAL=30 +else + SCHEDULER_RECV_INTERVAL=10 +fi + +MEM_FRAC_STATIC=0.8 +CHUNKED_PREFILL_SIZE=32768 +MAX_PREFILL_TOKENS=32768 +CUDA_GRAPH_MAX_BATCH_SIZE=$CONC +MAX_RUNNING_REQUESTS=128 +CONTEXT_LENGTH=$((ISL + OSL + 20)) + +# MTP (Multi-Token Prediction) Config - EAGLE speculative decoding +SPECULATIVE_NUM_STEPS=3 +SPECULATIVE_DRAFT_TOKENS=4 +SPECULATIVE_EAGLE_TOPK=1 + +echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL" + +set -x +PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \ +--trust-remote-code \ +--tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \ +--cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \ +--mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \ +--context-length $CONTEXT_LENGTH --disable-radix-cache \ +--attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm \ +--enable-flashinfer-allreduce-fusion --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL \ +--tokenizer-worker-num 6 --stream-interval 30 \ +--speculative-algorithm EAGLE --speculative-num-steps $SPECULATIVE_NUM_STEPS --speculative-eagle-topk $SPECULATIVE_EAGLE_TOPK --speculative-num-draft-tokens $SPECULATIVE_DRAFT_TOKENS \ +> $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +pip install -q datasets pandas + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --use-chat-template + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi +set +x diff --git a/benchmarks/single_node/qwen3.5_fp4_b200_mtp.sh b/benchmarks/single_node/qwen3.5_fp4_b200_mtp.sh new file mode 100644 index 000000000..48db1d66d --- /dev/null +++ b/benchmarks/single_node/qwen3.5_fp4_b200_mtp.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +nvidia-smi + +hf download "$MODEL" + +export NCCL_NVLS_ENABLE=1 +export SGL_ENABLE_JIT_DEEPGEMM=false +export SGLANG_ENABLE_FLASHINFER_GEMM=true +export PYTHONUNBUFFERED=1 + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +# Default: recv every ~10 requests; if CONC >= 16, relax to ~30 requests between scheduler recv polls. +if [[ $CONC -ge 16 ]]; then + SCHEDULER_RECV_INTERVAL=30 +else + SCHEDULER_RECV_INTERVAL=10 +fi + +MEM_FRAC_STATIC=0.8 +CHUNKED_PREFILL_SIZE=32768 +MAX_PREFILL_TOKENS=32768 +CUDA_GRAPH_MAX_BATCH_SIZE=$CONC +MAX_RUNNING_REQUESTS=128 +CONTEXT_LENGTH=$((ISL + OSL + 20)) + +# MTP (Multi-Token Prediction) Config - EAGLE speculative decoding +SPECULATIVE_NUM_STEPS=3 +SPECULATIVE_DRAFT_TOKENS=4 +SPECULATIVE_EAGLE_TOPK=1 + +echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL" + +set -x +PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \ +--trust-remote-code \ +--tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \ +--cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \ +--mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \ +--context-length $CONTEXT_LENGTH --disable-radix-cache \ +--attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm --fp4-gemm-backend flashinfer_cutlass --kv-cache-dtype fp8_e4m3 \ +--enable-flashinfer-allreduce-fusion --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL \ +--tokenizer-worker-num 6 --stream-interval 30 \ +--speculative-algorithm EAGLE --speculative-num-steps $SPECULATIVE_NUM_STEPS --speculative-eagle-topk $SPECULATIVE_EAGLE_TOPK --speculative-num-draft-tokens $SPECULATIVE_DRAFT_TOKENS \ +> $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +pip install -q datasets pandas + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --use-chat-template + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi +set +x diff --git a/benchmarks/single_node/qwen3.5_fp8_b200_mtp.sh b/benchmarks/single_node/qwen3.5_fp8_b200_mtp.sh new file mode 100644 index 000000000..c8aa7490f --- /dev/null +++ b/benchmarks/single_node/qwen3.5_fp8_b200_mtp.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +nvidia-smi + +hf download "$MODEL" + +export NCCL_NVLS_ENABLE=1 +export SGL_ENABLE_JIT_DEEPGEMM=false +export SGLANG_ENABLE_FLASHINFER_GEMM=true +export PYTHONUNBUFFERED=1 + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +# Default: recv every ~10 requests; if CONC >= 16, relax to ~30 requests between scheduler recv polls. +if [[ $CONC -ge 16 ]]; then + SCHEDULER_RECV_INTERVAL=30 +else + SCHEDULER_RECV_INTERVAL=10 +fi + +MEM_FRAC_STATIC=0.8 +CHUNKED_PREFILL_SIZE=32768 +MAX_PREFILL_TOKENS=32768 +CUDA_GRAPH_MAX_BATCH_SIZE=$CONC +MAX_RUNNING_REQUESTS=128 +CONTEXT_LENGTH=$((ISL + OSL + 20)) + +# MTP (Multi-Token Prediction) Config - EAGLE speculative decoding +SPECULATIVE_NUM_STEPS=3 +SPECULATIVE_DRAFT_TOKENS=4 +SPECULATIVE_EAGLE_TOPK=1 + +echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL" + +set -x +PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \ +--trust-remote-code \ +--tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \ +--quantization fp8 --kv-cache-dtype fp8_e4m3 --mamba-ssm-dtype bfloat16 \ +--cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \ +--mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \ +--context-length $CONTEXT_LENGTH --disable-radix-cache \ +--attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm \ +--enable-flashinfer-allreduce-fusion --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL \ +--tokenizer-worker-num 6 --stream-interval 30 \ +--speculative-algorithm EAGLE --speculative-num-steps $SPECULATIVE_NUM_STEPS --speculative-eagle-topk $SPECULATIVE_EAGLE_TOPK --speculative-num-draft-tokens $SPECULATIVE_DRAFT_TOKENS \ +> $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +pip install -q datasets pandas + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --use-chat-template + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC + append_lm_eval_summary +fi +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index c19ddbd1a..e86c630c3 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -931,4 +931,14 @@ - "Switch to --attention-backend ROCM_AITER_UNIFIED_ATTN and add fuse_rope_kvcache compilation pass" - "Remove deprecated VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION/VLLM_ROCM_USE_AITER_MHA env vars and compilation-config cudagraph_mode" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/867 - + +- config-keys: + - qwen3.5-bf16-b200-sglang-mtp + - qwen3.5-fp8-b200-sglang-mtp + - qwen3.5-fp4-b200-sglang-mtp + description: + - "Add Single Node Agg MTP configs for Qwen3.5 B200 SGLang (bf16, fp8, fp4)" + - "EAGLE speculative decoding: num-steps 3, draft-tokens 4, topk 1" + - "New scripts: benchmarks/single_node/qwen3.5_bf16_b200_mtp.sh, qwen3.5_fp8_b200_mtp.sh, qwen3.5_fp4_b200_mtp.sh" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/897 +