diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 042d9a5f8..e38cab781 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -2634,7 +2634,7 @@ dsv4-fp8-h200-vllm: # (the non-MTP entry above is still on the deepseekv4-cu129 tag) and adds # --speculative-config '{"method":"mtp","num_speculative_tokens":2}'. dsv4-fp8-h200-vllm-mtp: - image: vllm/vllm-openai:v0.20.1@sha256:9eff9734a30b6713a8566217d36f8277630fd2d31cec7f0a0292835901a23aa4 + image: public.ecr.aws#q9t5s3a7/vllm-release-repo:fbd51e3dfc902364fddab316ef1337c4f261de1a-x86_64-cu129 model: deepseek-ai/DeepSeek-V4-Pro model-prefix: dsv4 runner: h200 diff --git a/perf-changelog.yaml b/perf-changelog.yaml index b04ae1947..3259cdc26 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -2214,3 +2214,9 @@ - "Bump --speculative-config num_speculative_tokens from 1 to 2 (`{\"method\":\"mtp\",\"num_speculative_tokens\":2}`)" - "Re-test whether H200 MTP kernels accept 2 draft tokens — Blackwell MTP runs at 2 (per @wzhao18's vLLM Blackwell MTP submission); checking if H200 has parity now" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1279 + +- config-keys: + - dsv4-fp8-h200-vllm-mtp + description: + - "Switch image from vllm/vllm-openai:v0.20.1 (cu130) to public.ecr.aws#q9t5s3a7/vllm-release-repo:fbd51e3dfc902364fddab316ef1337c4f261de1a-x86_64-cu129 (vLLM nightly release-repo build, cu129; enroot URL form with `#` separating registry from path)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1284