From 87af61155b43992c20f75755560b8de0fae7f652 Mon Sep 17 00:00:00 2001 From: hshrivastava-droid Date: Mon, 4 May 2026 16:41:54 -0700 Subject: [PATCH 1/2] add qwen3.5_fp4_b200_trt --- .github/configs/nvidia-master.yaml | 38 ++++ .../single_node/qwen3.5_fp4_b200_trt.sh | 196 ++++++++++++++++++ perf-changelog.yaml | 10 + 3 files changed, 244 insertions(+) create mode 100755 benchmarks/single_node/qwen3.5_fp4_b200_trt.sh diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 042d9a5f8..8d0899c6c 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -2083,6 +2083,44 @@ qwen3.5-fp4-b200-sglang-mtp: search-space: - { tp: 4, ep: 1, conc-start: 4, conc-end: 128, spec-decoding: mtp } +qwen3.5-fp4-b200-trt: + image: nvcr.io#nvidia/tensorrt-llm/release:1.3.0rc12 + model: nvidia/Qwen3.5-397B-A17B-NVFP4 + model-prefix: qwen3.5 + runner: b200 + precision: fp4 + framework: trt + multinode: false + scenarios: + fixed-seq-len: + - isl: 1024 + osl: 1024 + search-space: + # TP-only (EP=1) + - { tp: 4, ep: 1, conc-start: 4, conc-end: 16 } + - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } + # TP + EP (no DP-attn) + - { tp: 4, ep: 4, conc-start: 16, conc-end: 256 } + # TP + EP + DP-attn + - { tp: 4, ep: 4, dp-attn: true, conc-start: 1024, conc-end: 1024 } + - { tp: 8, ep: 8, dp-attn: true, conc-start: 512, conc-end: 1024 } + - isl: 8192 + osl: 1024 + search-space: + # TP-only (EP=1) + - { tp: 2, ep: 1, conc-start: 4, conc-end: 32 } + - { tp: 4, ep: 1, conc-start: 4, conc-end: 8 } + - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } + # TP + EP (no DP-attn) + - { tp: 2, ep: 2, conc-start: 64, conc-end: 64 } + - { tp: 4, ep: 4, conc-start: 16, conc-end: 16 } + # TP + EP + DP-attn + # NOTE: the (tp:4, ep:4, dp-attn:true) point at conc 256 is a hand-tuned + # hybrid that runs with moe_config.backend=TRTLLM and max_num_tokens=24576; + # qwen3.5_fp4_b200_trt.sh applies that override in place for that one point. + - { tp: 4, ep: 4, dp-attn: true, conc-start: 256, conc-end: 1024 } + - { tp: 8, ep: 8, dp-attn: true, conc-start: 512, conc-end: 1024 } + glm5-fp8-b200-sglang: image: lmsysorg/sglang:nightly-dev-cu13-20260317-1eea7448 model: zai-org/GLM-5-FP8 diff --git a/benchmarks/single_node/qwen3.5_fp4_b200_trt.sh b/benchmarks/single_node/qwen3.5_fp4_b200_trt.sh new file mode 100755 index 000000000..70746854d --- /dev/null +++ b/benchmarks/single_node/qwen3.5_fp4_b200_trt.sh @@ -0,0 +1,196 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + EP_SIZE \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + DP_ATTENTION + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +echo "TP: $TP, EP: $EP_SIZE, CONC: $CONC, ISL: $ISL, OSL: $OSL, DP_ATTENTION: $DP_ATTENTION" + +hf download "$MODEL" + +# Derive max_batch_size from (TP, DP_ATTENTION). For dp-attn the DEP-8 case uses 128, DEP-4 uses 256. +# For non-dp-attn, TP=2 uses 256 (retained for compat), else 512. +if [[ "$DP_ATTENTION" == "true" ]]; then + if [[ "$TP" == "8" ]]; then + MAX_BATCH_SIZE=128 + else + MAX_BATCH_SIZE=256 + fi +else + if [[ "$TP" == "2" ]]; then + MAX_BATCH_SIZE=256 + else + MAX_BATCH_SIZE=512 + fi +fi + +# cuda_graph batch_sizes: powers of 2 up to min(256, MAX_BATCH_SIZE), plus 384,512 when MAX_BATCH_SIZE=512. +CUDA_GRAPH_BATCH_SIZES="1, 2, 4, 8, 16, 32, 64, 128" +if (( MAX_BATCH_SIZE >= 256 )); then + CUDA_GRAPH_BATCH_SIZES="$CUDA_GRAPH_BATCH_SIZES, 256" +fi +if (( MAX_BATCH_SIZE >= 512 )); then + CUDA_GRAPH_BATCH_SIZES="$CUDA_GRAPH_BATCH_SIZES, 384, 512" +fi + +# MoE backend: CUTEDSL for dp-attn configs, TRTLLM otherwise. +if [[ "$DP_ATTENTION" == "true" ]]; then + MOE_BACKEND=CUTEDSL +else + MOE_BACKEND=TRTLLM +fi + +# max_num_tokens scales with input seq length. +case "$ISL" in + 8192) MAX_NUM_TOKENS=33792 ;; + *) MAX_NUM_TOKENS=16384 ;; +esac + +# Hand-tuned hybrid: 8k/1k DEP-4 at conc 256 wants TRTLLM MoE and a tighter +# token budget instead of the CUTEDSL default that other dp-attn points use. +if [[ "$ISL" == "8192" && "$TP" == "4" && "$EP_SIZE" == "4" \ + && "$DP_ATTENTION" == "true" && "$CONC" == "256" ]]; then + MOE_BACKEND=TRTLLM + MAX_NUM_TOKENS=24576 +fi + +# batch_wait_max_tokens_ratio (non-dp-attn only) scales with concurrency. +case "$CONC" in + 4|8|16) BATCH_WAIT_RATIO=0.0625 ;; + 32) BATCH_WAIT_RATIO=0.125 ;; + 64) BATCH_WAIT_RATIO=0.25 ;; + 128) BATCH_WAIT_RATIO=0.5 ;; + *) BATCH_WAIT_RATIO=0.75 ;; +esac + +EXTRA_CONFIG_FILE="$(pwd)/extra-llm-api-config.yml" + +cat > "$EXTRA_CONFIG_FILE" << EOF +max_batch_size: $MAX_BATCH_SIZE +max_num_tokens: $MAX_NUM_TOKENS +num_postprocess_workers: 4 +backend: pytorch +print_iter_log: true +enable_layerwise_nvtx_marker: false +disable_overlap_scheduler: false +enable_iter_perf_stats: true +enable_chunked_prefill: false +stream_interval: 20 +scheduler_config: + capacity_scheduler_policy: MAX_UTILIZATION + context_chunking_policy: FIRST_COME_FIRST_SERVED +kv_cache_config: + free_gpu_memory_fraction: 0.9 + enable_block_reuse: false + dtype: fp8 +cuda_graph_config: + enable_padding: true + batch_sizes: [$CUDA_GRAPH_BATCH_SIZES] +moe_config: + backend: $MOE_BACKEND +EOF + +if [[ "$DP_ATTENTION" == "true" ]]; then + cat >> "$EXTRA_CONFIG_FILE" << EOF +enable_attention_dp: true +attention_dp_config: + enable_balance: true + batching_wait_iters: 10 + timeout_iters: 500 +EOF +else + cat >> "$EXTRA_CONFIG_FILE" << EOF +batch_wait_timeout_iters: 50 +batch_wait_max_tokens_ratio: $BATCH_WAIT_RATIO +EOF +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x + +if [ "${EVAL_ONLY}" = "true" ]; then + setup_eval_context + MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN" +fi + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +# --- audit: dump env + generated config before launching the server --- +echo "=============== env (resolved) ===============" +printf ' %-22s = %s\n' \ + MODEL "$MODEL" \ + TP "$TP" \ + EP_SIZE "$EP_SIZE" \ + DP_ATTENTION "$DP_ATTENTION" \ + CONC "$CONC" \ + ISL "$ISL" \ + OSL "$OSL" \ + MAX_MODEL_LEN "$MAX_MODEL_LEN" \ + RANDOM_RANGE_RATIO "$RANDOM_RANGE_RATIO" \ + RESULT_FILENAME "$RESULT_FILENAME" \ + MAX_BATCH_SIZE "$MAX_BATCH_SIZE" \ + MAX_NUM_TOKENS "$MAX_NUM_TOKENS" \ + MOE_BACKEND "$MOE_BACKEND" \ + BATCH_WAIT_RATIO "$BATCH_WAIT_RATIO" \ + CUDA_GRAPH_BATCH_SIZES "$CUDA_GRAPH_BATCH_SIZES" \ + SERVER_LOG "$SERVER_LOG" \ + PORT "$PORT" \ + EVAL_ONLY "${EVAL_ONLY:-}" +echo "=============== $EXTRA_CONFIG_FILE ===============" +ls -la "$EXTRA_CONFIG_FILE" +cat "$EXTRA_CONFIG_FILE" +echo "==============================================" + +mpirun -n 1 --oversubscribe --allow-run-as-root \ + trtllm-serve "$MODEL" --port="$PORT" \ + --trust_remote_code \ + --backend=pytorch \ + --max_seq_len="$MAX_MODEL_LEN" \ + --max_num_tokens="$MAX_NUM_TOKENS" \ + --tp_size="$TP" --ep_size="$EP_SIZE" \ + --extra_llm_api_options="$EXTRA_CONFIG_FILE" \ + > "$SERVER_LOG" 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend openai \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts $(( CONC * 10 )) \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 98fa4e8b3..745d19505 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -2207,3 +2207,13 @@ - "run_benchmark_serving uses --dsv4 (chat-formatted prompts) per the AGENTS.md MTP rule, since EAGLE-style speculative decoding regresses acceptance on raw random tokens" - "Search space mirrors the non-MTP H200 entry: TP=8, EP=8, DP-attn=true, CONC 4-64 for both 1k1k and 8k1k, with spec-decoding: mtp" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1222 + +- config-keys: + - qwen3.5-fp4-b200-trt + description: + - "Add Qwen3.5-397B FP4 B200 TensorRT-LLM benchmark" + - "Image: nvcr.io#nvidia/tensorrt-llm/release:1.3.0rc12" + - "Model: nvidia/Qwen3.5-397B-A17B-NVFP4" + - "1k1k: TP-only (tp4 conc 4-16, tp8 conc 4), TEP (tp4ep4 conc 16-256), DEP (tp4ep4 dp-attn conc 1024, tp8ep8 dp-attn conc 512-1024)" + - "8k1k: TP-only (tp2 conc 4-32, tp4 conc 4-8, tp8 conc 4), TEP (tp2ep2 conc 64, tp4ep4 conc 16), DEP (tp4ep4 dp-attn conc 256-1024, tp8ep8 dp-attn conc 512-1024)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXX From 4f20519978db19a8f98e86349e0f97ccf3048813 Mon Sep 17 00:00:00 2001 From: hshrivastava-droid Date: Mon, 4 May 2026 16:48:09 -0700 Subject: [PATCH 2/2] update PR number --- .github/configs/nvidia-master.yaml | 9 --------- perf-changelog.yaml | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 8d0899c6c..cadf92c3a 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -2096,28 +2096,19 @@ qwen3.5-fp4-b200-trt: - isl: 1024 osl: 1024 search-space: - # TP-only (EP=1) - { tp: 4, ep: 1, conc-start: 4, conc-end: 16 } - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } - # TP + EP (no DP-attn) - { tp: 4, ep: 4, conc-start: 16, conc-end: 256 } - # TP + EP + DP-attn - { tp: 4, ep: 4, dp-attn: true, conc-start: 1024, conc-end: 1024 } - { tp: 8, ep: 8, dp-attn: true, conc-start: 512, conc-end: 1024 } - isl: 8192 osl: 1024 search-space: - # TP-only (EP=1) - { tp: 2, ep: 1, conc-start: 4, conc-end: 32 } - { tp: 4, ep: 1, conc-start: 4, conc-end: 8 } - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } - # TP + EP (no DP-attn) - { tp: 2, ep: 2, conc-start: 64, conc-end: 64 } - { tp: 4, ep: 4, conc-start: 16, conc-end: 16 } - # TP + EP + DP-attn - # NOTE: the (tp:4, ep:4, dp-attn:true) point at conc 256 is a hand-tuned - # hybrid that runs with moe_config.backend=TRTLLM and max_num_tokens=24576; - # qwen3.5_fp4_b200_trt.sh applies that override in place for that one point. - { tp: 4, ep: 4, dp-attn: true, conc-start: 256, conc-end: 1024 } - { tp: 8, ep: 8, dp-attn: true, conc-start: 512, conc-end: 1024 } diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 745d19505..8384ff19a 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -2216,4 +2216,4 @@ - "Model: nvidia/Qwen3.5-397B-A17B-NVFP4" - "1k1k: TP-only (tp4 conc 4-16, tp8 conc 4), TEP (tp4ep4 conc 16-256), DEP (tp4ep4 dp-attn conc 1024, tp8ep8 dp-attn conc 512-1024)" - "8k1k: TP-only (tp2 conc 4-32, tp4 conc 4-8, tp8 conc 4), TEP (tp2ep2 conc 64, tp4ep4 conc 16), DEP (tp4ep4 dp-attn conc 256-1024, tp8ep8 dp-attn conc 512-1024)" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXX + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1280