Skip to content
24 changes: 24 additions & 0 deletions .github/configs/nvidia-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1851,6 +1851,30 @@ glm5-fp8-b200-sglang:
search-space:
- { tp: 8, ep: 1, conc-start: 4, conc-end: 128 }

qwen3.5-fp4-b200-sglang:
image: lmsysorg/sglang:v0.5.9-cu129-amd64
model: nvidia/Qwen3.5-397B-A17B-NVFP4
model-prefix: qwen3.5
runner: b200
precision: fp4
framework: sglang
multinode: false
seq-len-configs:
- isl: 1024
osl: 1024
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 128 }
- isl: 1024
osl: 8192
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 128 }
- { tp: 8, ep: 1, conc-start: 4, conc-end: 4 }
- isl: 8192
osl: 1024
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 128 }
- { tp: 8, ep: 1, conc-start: 4, conc-end: 4 }

kimik2.5-int4-b200-vllm:
image: vllm/vllm-openai:v0.15.1
model: moonshotai/Kimi-K2.5
Expand Down
83 changes: 83 additions & 0 deletions benchmarks/single_node/qwen3.5_fp4_b200.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
#!/usr/bin/env bash

source "$(dirname "$0")/../benchmark_lib.sh"

check_env_vars \
MODEL \
TP \
CONC \
ISL \
OSL \
RANDOM_RANGE_RATIO \
RESULT_FILENAME \
MAX_MODEL_LEN \
EP_SIZE

if [[ -n "$SLURM_JOB_ID" ]]; then
echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME"
fi

nvidia-smi

hf download "$MODEL"

export NCCL_NVLS_ENABLE=1
export SGL_ENABLE_JIT_DEEPGEMM=false
export SGLANG_ENABLE_FLASHINFER_GEMM=true
export PYTHONUNBUFFERED=1

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}
MEM_FRAC_STATIC=0.85

echo "Config: ISL=$ISL, OSL=$OSL, CONC=$CONC, EP=$EP_SIZE, MEM=$MEM_FRAC_STATIC, CUDA_BS=$CUDA_GRAPH_MAX_BS, MAX_RR=$MAX_RUNNING_REQUESTS"

# Start GPU monitoring (power, temperature, clocks every second)
start_gpu_monitor

set -x
PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \
--trust-remote-code \
--tensor-parallel-size $TP \
--ep-size $EP_SIZE \
--cuda-graph-max-bs $CONC \
--max-running-requests $CONC \
--mem-fraction-static $MEM_FRAC_STATIC \
--chunked-prefill-size 32768 \
--max-prefill-tokens 32768 \
--context-length $MAX_MODEL_LEN \
--attention-backend trtllm_mha \
--moe-runner-backend flashinfer_trtllm \
--fp4-gemm-backend flashinfer_cutlass \
--quantization modelopt_fp4 \
--kv-cache-dtype fp8_e4m3 \
--mamba-ssm-dtype bfloat16 \
--disable-radix-cache \
--scheduler-recv-interval 30 --stream-interval 30 > $SERVER_LOG 2>&1 &

SERVER_PID=$!

wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID"

pip install -q datasets pandas

run_benchmark_serving \
--model "$MODEL" \
--port "$PORT" \
--backend vllm \
--input-len "$ISL" \
--output-len "$OSL" \
--random-range-ratio "$RANDOM_RANGE_RATIO" \
--num-prompts "$((CONC * 10))" \
--max-concurrency "$CONC" \
--result-filename "$RESULT_FILENAME" \
--result-dir /workspace/

if [ "${RUN_EVAL}" = "true" ]; then
run_eval --framework lm-eval --port "$PORT" --concurrent-requests $CONC
append_lm_eval_summary
fi

# Stop GPU monitoring
stop_gpu_monitor
set +x
13 changes: 11 additions & 2 deletions perf-changelog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -970,7 +970,7 @@
- "Replace old per-file recipes with resolved variants from consolidated 8k1k.yaml"
- "14 variants: STP/MTP x low-latency/max-throughput with updated concurrencies and scale points"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/907

- config-keys:
- glm5-fp8-h200-sglang
description:
Expand All @@ -979,10 +979,19 @@
- "Benchmark script: benchmarks/single_node/glm5_fp8_h200.sh"
- "Tool-call-parser glm47, reasoning-parser glm45, mem-fraction-static 0.85"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/914

- config-keys:
- glm5-fp8-b200-sglang
description:
- "Add GLM-5 FP8 SGLang benchmark for B200"
- "Supports TP8 (low latency) and DEP8 (high throughput) modes with NSA attention backend"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/915

- config-keys:
- qwen3.5-fp4-b200-sglang
description:
- "Add Qwen3.5-397B-A17B NVFP4 B200 SGLang benchmark config and launch script"
- "Image: lmsysorg/sglang:v0.5.9-cu129-amd64"
- "Model: nvidia/Qwen3.5-397B-A17B-NVFP4"
- "Configs: 1k1k (conc 4-64 ep1, conc 128 ep8), 1k8k (conc 4-128 ep1), 8k1k (conc 4-128 ep1)"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/820