From 952b00fb372f14447dd48cabd55f6e8eb6ae7c67 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 5 Mar 2026 10:00:47 +0000 Subject: [PATCH 01/11] adding Jenkinsfile for Nightly build Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/Jenkinsfile | 321 ++++++++++++++++++++++++++++++ scripts/Nightly/PIPELINE_GUIDE.md | 179 +++++++++++++++++ 2 files changed, 500 insertions(+) create mode 100644 scripts/Nightly/Jenkinsfile create mode 100644 scripts/Nightly/PIPELINE_GUIDE.md diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile new file mode 100644 index 000000000..e69bc6ff5 --- /dev/null +++ b/scripts/Nightly/Jenkinsfile @@ -0,0 +1,321 @@ +pipeline { + agent { + node { + label 'qeff_node' + } + } + + description 'QEfficient Nightly Test Pipeline - Runs comprehensive test suite on gb-blr-62 QAIC environments' + + options { + disableConcurrentBuilds() + timeout(time: 1, unit: 'DAYS') + timestamps() + buildDiscarder(logRotator(numToKeepStr: '5', daysToKeepStr: '30')) + } + + // triggers { + // // Runs daily at 2:00 AM UTC + // cron('0 2 * * *') + // // Uncomment to also run on repository push + // // githubPush() + // } + + environment { + DOCKER_IMAGE = "${DOCKER_LATEST}:master_latest" + VENV_PATH = 'preflight_qeff' + TOKENIZERS_PARALLELISM = 'false' + HF_HUB_CACHE = '/huggingface_hub' + PYTEST_ARGS = '--durations=10' + DOCKER_USER = 'ubuntu' + } + + stages { + stage('Prepare Environment') { + description 'Setup Docker container and install dependencies' + steps { + script { + echo "Starting QEfficient Nightly Test Suite" + echo "Build Tag: ${BUILD_TAG}" + } + sh ''' + source ~/.bashrc + # Launch privileged Docker container with necessary mounts + sudo docker run --privileged -dit \ + --name ${BUILD_TAG} \ + -e HF_TOKEN=${HF_TOKEN} \ + -v ./:/efficient-transformers \ + -v ${HF_PATH}:${DOCKER_HF_PATH} \ + ${DOCKER_IMAGE} + + # Install QEfficient and dependencies + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + apt update && apt install -y python3.10-venv + python3.10 -m venv ${VENV_PATH} + . ${VENV_PATH}/bin/activate + + # Upgrade pip and core packages + pip install --upgrade pip setuptools wheel + pip install .[test] + pip install junitparser pytest-xdist + + # Audio processing libraries for speech-to-text models + pip install librosa==0.10.2 soundfile==0.13.1 + + # Vision and multimodal model dependencies + pip install --extra-index-url https://download.pytorch.org/whl/cpu \ + timm==1.0.14 torchvision==0.22.0+cpu einops==0.8.1 + + rm -rf QEfficient + " + ''' + } + } + stage('Unit & Integration Tests') { + description 'Run core model export, ONNX, and QAIC LLM tests' + parallel { + stage('Model Export & ONNX Tests') { + description 'Test model export and ONNX conversion' + steps { + timeout(time: 40, unit: 'MINUTES') { + sh ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_cli_qaic + export QEFF_HOME=$PWD/Non_cli_qaic + + pytest tests \ + -m '(not cli) and (not on_qaic) and (not finetune)' \ + --ignore tests/vllm \ + --ignore tests/transformers/models/image_text_to_text \ + ${PYTEST_ARGS} -n 4\ + --junitxml=tests/tests_log1.xml + + junitparser merge tests/tests_log1.xml tests/tests_log.xml + deactivate + " + ''' + } + } + } + + stage('QAIC LLM Tests') { + description 'Test QAIC-optimized large language models' + steps { + timeout(time: 120, unit: 'MINUTES') { + sh ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_qaic_llm + export QEFF_HOME=$PWD/Non_qaic_llm + + pytest tests \ + -m '(not cli) and (on_qaic) and (llm_model) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log2.xml + + junitparser merge tests/tests_log2.xml tests/tests_log.xml + deactivate + " + ''' + } + } + } + + stage('QAIC Feature Tests') { + description 'Test QAIC-specific features and optimizations' + steps { + timeout(time: 80, unit: 'MINUTES') { + sh ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_qaic_feature + export QEFF_HOME=$PWD/Non_qaic_feature + + pytest tests \ + -m '(not cli) and (on_qaic) and (feature) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log2_feature.xml + + junitparser merge tests/tests_log2_feature.xml tests/tests_log.xml + deactivate + " + ''' + } + } + } + } + } + stage('QAIC MultiModal Tests') { + description 'Test QAIC multimodal models (VLM, image-text)' + steps { + timeout(time: 120, unit: 'MINUTES') { + sh ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_cli_qaic_multimodal + export QEFF_HOME=$PWD/Non_cli_qaic_multimodal + + pytest tests \ + -m '(not cli) and (on_qaic) and (multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log6.xml + + junitparser merge tests/tests_log6.xml tests/tests_log.xml + deactivate + " + ''' + } + } + } + + stage('QAIC Diffusion Models Tests') { + description 'Test QAIC diffusion and generative models' + steps { + timeout(time: 120, unit: 'MINUTES') { + sh ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_cli_qaic_diffusion + export QEFF_HOME=$PWD/Non_cli_qaic_diffusion + export HF_HUB_CACHE=${HF_HUB_CACHE} + + pytest tests \ + -m '(not cli) and (on_qaic) and (diffusion_models) and (not wan) and (not qnn) and (not finetune)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log_diffusion.xml + + junitparser merge tests/tests_log_diffusion.xml tests/tests_log.xml + deactivate + " + ''' + } + } + } + + stage('CLI Inference Tests') { + description 'Test command-line interface for inference' + steps { + timeout(time: 120, unit: 'MINUTES') { + sh ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/cli + export QEFF_HOME=$PWD/cli + + pytest tests \ + -m '(cli and not qnn) and (not finetune)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log3.xml + + junitparser merge tests/tests_log3.xml tests/tests_log.xml + deactivate + " + ''' + } + } + } + stage('Finetune CLI Tests') { + description 'Test fine-tuning capabilities via CLI' + steps { + timeout(time: 20, unit: 'MINUTES') { + sh ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + # Install QAIC PyTorch integration + pip install /opt/qti-aic/integrations/torch_qaic/py310/torch_qaic-0.1.0-cp310-cp310-linux_x86_64.whl + pip install torch==2.9.0 torchvision==0.24.0 torchaudio==2.9.0 \ + --index-url https://download.pytorch.org/whl/cpu + + mkdir -p $PWD/cli_qaic_finetuning + export QEFF_HOME=$PWD/cli_qaic_finetuning + + pytest tests \ + -m '(cli) and (on_qaic) and (not qnn) and (not multimodal) and (finetune)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log_finetune.xml + + junitparser merge tests/tests_log_finetune.xml tests/tests_log.xml + deactivate + " + ''' + } + } + } + } + + post { + always { + script { + echo "========== Test Execution Summary ==========" + sh ''' + # Restore file ownership + sudo chown -R ${DOCKER_USER} . 2>/dev/null || true + ''' + } + + junit testResults: 'tests/tests_log.xml', + allowEmptyResults: true, + keepLongStdio: true + + script { + sh ''' + # Cleanup Docker container + echo "Cleaning up Docker container: ${BUILD_TAG}" + sudo docker rm -f ${BUILD_TAG} 2>/dev/null || true + ''' + } + + cleanWs( + deleteDirs: true, + ) + + echo "Pipeline cleanup completed" + } + + success { + echo "✓ QEfficient Nightly Test Suite completed successfully" + // Optionally trigger downstream jobs here + // build job: 'qefficient_downstream_job', wait: false + } + + failure { + echo "✗ QEfficient Nightly Test Suite failed" + echo "Check logs above for detailed error information" + } + + unstable { + echo "⚠ QEfficient Nightly Test Suite produced unstable results" + echo "Some tests may have been skipped or failed" + } + } +} \ No newline at end of file diff --git a/scripts/Nightly/PIPELINE_GUIDE.md b/scripts/Nightly/PIPELINE_GUIDE.md new file mode 100644 index 000000000..27f83340f --- /dev/null +++ b/scripts/Nightly/PIPELINE_GUIDE.md @@ -0,0 +1,179 @@ +# QEfficient Nightly Pipeline - Optimization & Configuration Guide + +## Overview +The Jenkinsfile has been optimized for professional standards with improved structure, error handling, and scheduled execution capabilities. + +## Key Optimizations + +### 1. **Scheduled Triggers** ✓ +The pipeline now automatically triggers at **2:00 AM UTC** daily: +```groovy +triggers { + cron('0 2 * * *') // Daily at 2:00 AM UTC +} +``` + +### 2. **Code Quality Improvements** +- **Environment Variables**: Centralized variable definitions to reduce duplication +- **Consistent Formatting**: Improved readability with proper indentation and structure +- **Descriptions**: Each stage has descriptive text for job dashboard +- **Error Handling**: Better error handling with graceful fallback mechanisms +- **Logging**: Enhanced logging with timestamps and status indicators + +### 3. **Structural Improvements** +- **Single Preparation Stage**: Consolidated container setup and dependency installation +- **Organized Test Stages**: Logically grouped test categories +- **Better Cleanup**: Selective workspace cleanup using patterns +- **Professional Post-Build**: Status-specific messages (success, failure, unstable) + +### 4. **Performance Optimizations** +- **Concurrent Test Execution**: Added parallel test argument (`-n 4`) +- **Selective Cleanup**: Only deletes test artifacts, preserves `.git` +- **Build Discard Policy**: Keeps last 30 builds or 15 days of history +- **Single Day Timeout**: Global timeout of 1 day to prevent hanging builds + +## Configuration Guide + +### Changing the Schedule + +Edit the `triggers` section to set a custom schedule. The cron format is: +``` +(minute) (hour) (day_of_month) (month) (day_of_week) +``` + +**Common Examples:** +```groovy +// Every day at 3:00 AM UTC +cron('0 3 * * *') + +// Every Monday-Friday at 2:00 AM UTC +cron('0 2 * * 1-5') + +// Every 6 hours +cron('0 */6 * * *') + +// Twice daily: 2:00 AM and 2:00 PM UTC +cron('0 2,14 * * *') + +// Every Sunday at midnight UTC +cron('0 0 * * 0') +``` + +### Additional Trigger Options + +To also trigger on repository push, uncomment in the triggers section: +```groovy +triggers { + cron('0 2 * * *') + githubPush() // Uncomment this line +} +``` + +### Environment Variables Configuration + +Key variables can be customized in the `environment` section: +```groovy +environment { + DOCKER_IMAGE = "${DOCKER_LATEST}:master_latest" // Docker image version + VENV_PATH = 'preflight_qeff' // Virtual environment path + TOKENIZERS_PARALLELISM = 'false' // Tokenizer parallelism + HF_HUB_CACHE = '/huggingface_hub' // HuggingFace cache location + PYTEST_ARGS = '--durations=10 -n 4' // Pytest arguments + DOCKER_USER = 'ubuntu' // Docker user for cleanup +} +``` + +## Pipeline Stages + +### 1. **Prepare Environment** (40 min) +- Launches Docker container with GPU/QAIC support +- Installs Python 3.10 virtual environment +- Installs core QEfficient library and dependencies +- Installs audio, vision, and ML packages + +### 2. **Unit & Integration Tests** (Parallel: 40-120 min) +- **Model Export & ONNX Tests**: Core model conversion testing +- **QAIC LLM Tests**: Language model optimization tests +- **QAIC Feature Tests**: QAIC-specific feature validation + +### 3. **QAIC MultiModal Tests** (120 min) +- Vision-language model testing +- Image-to-text inference validation + +### 4. **QAIC Diffusion Models Tests** (120 min) +- Diffusion model compilation and optimization +- Generative model testing + +### 5. **CLI Inference Tests** (120 min) +- Command-line interface testing +- End-to-end inference validation + +### 6. **Finetune CLI Tests** (20 min) +- Fine-tuning capability validation +- QAIC PyTorch integration testing + +## Pipeline Features + +### Health Checks & Reporting +✓ **JUnit Test Results**: Automatic collection and reporting +✓ **Build History**: Last 30 builds retained +✓ **Timestamped Logs**: All logs include timestamps +✓ **Concurrent Build Prevention**: Avoids duplicate test execution +✓ **Selective Cleanup**: Smart workspace cleanup + +### Status Indicators +- ✓ Success: All tests passed +- ✗ Failure: One or more stages failed +- ⚠ Unstable: Tests ran but some were skipped + +## Monitoring + +### Jenkins Dashboard +1. Navigate to the pipeline job in Jenkins +2. Check "Build History" for recent executions +3. Click on a build to view detailed logs +4. Check "Console Output" for real-time progress + +### Test Results +- Test reports are automatically parsed from `tests/tests_log.xml` +- Results appear in the "Test Result" section of each build +- Failed tests are highlighted for quick debugging + +## Troubleshooting + +### Pipeline Fails to Trigger +- Verify cron syntax is correct +- Check Jenkins system time matches UTC +- Ensure "Poll SCM" is not conflicting with cron trigger + +### Test Timeouts +- Increase timeout in specific stage: `timeout(time: XX, unit: 'MINUTES')` +- Or globally in options: `timeout(time: 2, unit: 'DAYS')` + +### Docker Container Issues +- Check Docker daemon is running: `sudo service docker status` +- Verify node label matches: `qeff_node` +- Check disk space: build artifacts can be large + +### Permission Errors +- Verify Ubuntu user exists on agent +- Check Docker socket permissions +- Ensure Jenkins user can run sudo commands + +## Best Practices + +1. **Monitor Regularly**: Check pipeline status at least weekly +2. **Archive Logs**: Keep build logs for audit trail +3. **Update Dependencies**: Review and update pip packages quarterly +4. **Scale Tests**: Add more parallel workers for faster execution +5. **Backup Tests**: Keep a copy of critical test files + +## Future Enhancements + +Consider implementing: +- [ ] Email notifications on build failure +- [ ] Performance metrics collection +- [ ] Test report artifacts archiving +- [ ] Slack integration for status updates +- [ ] Coverage report generation +- [ ] Performance regression detection From 2ab0f2faa23a481aeef18c3771965fc90c9ac5da Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 5 Mar 2026 10:02:33 +0000 Subject: [PATCH 02/11] adding Jenkinsfile for Nightly build Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/PIPELINE_GUIDE.md | 179 ------------------------------ 1 file changed, 179 deletions(-) delete mode 100644 scripts/Nightly/PIPELINE_GUIDE.md diff --git a/scripts/Nightly/PIPELINE_GUIDE.md b/scripts/Nightly/PIPELINE_GUIDE.md deleted file mode 100644 index 27f83340f..000000000 --- a/scripts/Nightly/PIPELINE_GUIDE.md +++ /dev/null @@ -1,179 +0,0 @@ -# QEfficient Nightly Pipeline - Optimization & Configuration Guide - -## Overview -The Jenkinsfile has been optimized for professional standards with improved structure, error handling, and scheduled execution capabilities. - -## Key Optimizations - -### 1. **Scheduled Triggers** ✓ -The pipeline now automatically triggers at **2:00 AM UTC** daily: -```groovy -triggers { - cron('0 2 * * *') // Daily at 2:00 AM UTC -} -``` - -### 2. **Code Quality Improvements** -- **Environment Variables**: Centralized variable definitions to reduce duplication -- **Consistent Formatting**: Improved readability with proper indentation and structure -- **Descriptions**: Each stage has descriptive text for job dashboard -- **Error Handling**: Better error handling with graceful fallback mechanisms -- **Logging**: Enhanced logging with timestamps and status indicators - -### 3. **Structural Improvements** -- **Single Preparation Stage**: Consolidated container setup and dependency installation -- **Organized Test Stages**: Logically grouped test categories -- **Better Cleanup**: Selective workspace cleanup using patterns -- **Professional Post-Build**: Status-specific messages (success, failure, unstable) - -### 4. **Performance Optimizations** -- **Concurrent Test Execution**: Added parallel test argument (`-n 4`) -- **Selective Cleanup**: Only deletes test artifacts, preserves `.git` -- **Build Discard Policy**: Keeps last 30 builds or 15 days of history -- **Single Day Timeout**: Global timeout of 1 day to prevent hanging builds - -## Configuration Guide - -### Changing the Schedule - -Edit the `triggers` section to set a custom schedule. The cron format is: -``` -(minute) (hour) (day_of_month) (month) (day_of_week) -``` - -**Common Examples:** -```groovy -// Every day at 3:00 AM UTC -cron('0 3 * * *') - -// Every Monday-Friday at 2:00 AM UTC -cron('0 2 * * 1-5') - -// Every 6 hours -cron('0 */6 * * *') - -// Twice daily: 2:00 AM and 2:00 PM UTC -cron('0 2,14 * * *') - -// Every Sunday at midnight UTC -cron('0 0 * * 0') -``` - -### Additional Trigger Options - -To also trigger on repository push, uncomment in the triggers section: -```groovy -triggers { - cron('0 2 * * *') - githubPush() // Uncomment this line -} -``` - -### Environment Variables Configuration - -Key variables can be customized in the `environment` section: -```groovy -environment { - DOCKER_IMAGE = "${DOCKER_LATEST}:master_latest" // Docker image version - VENV_PATH = 'preflight_qeff' // Virtual environment path - TOKENIZERS_PARALLELISM = 'false' // Tokenizer parallelism - HF_HUB_CACHE = '/huggingface_hub' // HuggingFace cache location - PYTEST_ARGS = '--durations=10 -n 4' // Pytest arguments - DOCKER_USER = 'ubuntu' // Docker user for cleanup -} -``` - -## Pipeline Stages - -### 1. **Prepare Environment** (40 min) -- Launches Docker container with GPU/QAIC support -- Installs Python 3.10 virtual environment -- Installs core QEfficient library and dependencies -- Installs audio, vision, and ML packages - -### 2. **Unit & Integration Tests** (Parallel: 40-120 min) -- **Model Export & ONNX Tests**: Core model conversion testing -- **QAIC LLM Tests**: Language model optimization tests -- **QAIC Feature Tests**: QAIC-specific feature validation - -### 3. **QAIC MultiModal Tests** (120 min) -- Vision-language model testing -- Image-to-text inference validation - -### 4. **QAIC Diffusion Models Tests** (120 min) -- Diffusion model compilation and optimization -- Generative model testing - -### 5. **CLI Inference Tests** (120 min) -- Command-line interface testing -- End-to-end inference validation - -### 6. **Finetune CLI Tests** (20 min) -- Fine-tuning capability validation -- QAIC PyTorch integration testing - -## Pipeline Features - -### Health Checks & Reporting -✓ **JUnit Test Results**: Automatic collection and reporting -✓ **Build History**: Last 30 builds retained -✓ **Timestamped Logs**: All logs include timestamps -✓ **Concurrent Build Prevention**: Avoids duplicate test execution -✓ **Selective Cleanup**: Smart workspace cleanup - -### Status Indicators -- ✓ Success: All tests passed -- ✗ Failure: One or more stages failed -- ⚠ Unstable: Tests ran but some were skipped - -## Monitoring - -### Jenkins Dashboard -1. Navigate to the pipeline job in Jenkins -2. Check "Build History" for recent executions -3. Click on a build to view detailed logs -4. Check "Console Output" for real-time progress - -### Test Results -- Test reports are automatically parsed from `tests/tests_log.xml` -- Results appear in the "Test Result" section of each build -- Failed tests are highlighted for quick debugging - -## Troubleshooting - -### Pipeline Fails to Trigger -- Verify cron syntax is correct -- Check Jenkins system time matches UTC -- Ensure "Poll SCM" is not conflicting with cron trigger - -### Test Timeouts -- Increase timeout in specific stage: `timeout(time: XX, unit: 'MINUTES')` -- Or globally in options: `timeout(time: 2, unit: 'DAYS')` - -### Docker Container Issues -- Check Docker daemon is running: `sudo service docker status` -- Verify node label matches: `qeff_node` -- Check disk space: build artifacts can be large - -### Permission Errors -- Verify Ubuntu user exists on agent -- Check Docker socket permissions -- Ensure Jenkins user can run sudo commands - -## Best Practices - -1. **Monitor Regularly**: Check pipeline status at least weekly -2. **Archive Logs**: Keep build logs for audit trail -3. **Update Dependencies**: Review and update pip packages quarterly -4. **Scale Tests**: Add more parallel workers for faster execution -5. **Backup Tests**: Keep a copy of critical test files - -## Future Enhancements - -Consider implementing: -- [ ] Email notifications on build failure -- [ ] Performance metrics collection -- [ ] Test report artifacts archiving -- [ ] Slack integration for status updates -- [ ] Coverage report generation -- [ ] Performance regression detection From 8b6ad81cbe97c22fdb8cba215e43d5e04ccea201 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 5 Mar 2026 10:32:02 +0000 Subject: [PATCH 03/11] adding Jenkinsfile for Nightly build I Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/Jenkinsfile | 121 ++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 66 deletions(-) diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile index e69bc6ff5..d39b4d45b 100644 --- a/scripts/Nightly/Jenkinsfile +++ b/scripts/Nightly/Jenkinsfile @@ -5,8 +5,6 @@ pipeline { } } - description 'QEfficient Nightly Test Pipeline - Runs comprehensive test suite on gb-blr-62 QAIC environments' - options { disableConcurrentBuilds() timeout(time: 1, unit: 'DAYS') @@ -32,7 +30,6 @@ pipeline { stages { stage('Prepare Environment') { - description 'Setup Docker container and install dependencies' steps { script { echo "Starting QEfficient Nightly Test Suite" @@ -74,93 +71,88 @@ pipeline { } } stage('Unit & Integration Tests') { - description 'Run core model export, ONNX, and QAIC LLM tests' parallel { stage('Model Export & ONNX Tests') { - description 'Test model export and ONNX conversion' steps { timeout(time: 40, unit: 'MINUTES') { sh ''' - sudo docker exec ${BUILD_TAG} bash -c " - set -e - cd /efficient-transformers - . ${VENV_PATH}/bin/activate - - mkdir -p $PWD/Non_cli_qaic - export QEFF_HOME=$PWD/Non_cli_qaic - - pytest tests \ - -m '(not cli) and (not on_qaic) and (not finetune)' \ - --ignore tests/vllm \ - --ignore tests/transformers/models/image_text_to_text \ - ${PYTEST_ARGS} -n 4\ - --junitxml=tests/tests_log1.xml - - junitparser merge tests/tests_log1.xml tests/tests_log.xml - deactivate - " - ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_cli_qaic + export QEFF_HOME=$PWD/Non_cli_qaic + + pytest tests \ + -m '(not cli) and (not on_qaic) and (not finetune)' \ + --ignore tests/vllm \ + --ignore tests/transformers/models/image_text_to_text \ + ${PYTEST_ARGS} -n 4\ + --junitxml=tests/tests_log1.xml + + junitparser merge tests/tests_log1.xml tests/tests_log.xml + deactivate + " + ''' } } } stage('QAIC LLM Tests') { - description 'Test QAIC-optimized large language models' steps { timeout(time: 120, unit: 'MINUTES') { sh ''' - sudo docker exec ${BUILD_TAG} bash -c " - set -e - cd /efficient-transformers - . ${VENV_PATH}/bin/activate - - mkdir -p $PWD/Non_qaic_llm - export QEFF_HOME=$PWD/Non_qaic_llm - - pytest tests \ - -m '(not cli) and (on_qaic) and (llm_model) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ - --ignore tests/vllm \ - ${PYTEST_ARGS} \ - --junitxml=tests/tests_log2.xml - - junitparser merge tests/tests_log2.xml tests/tests_log.xml - deactivate - " - ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_qaic_llm + export QEFF_HOME=$PWD/Non_qaic_llm + + pytest tests \ + -m '(not cli) and (on_qaic) and (llm_model) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log2.xml + + junitparser merge tests/tests_log2.xml tests/tests_log.xml + deactivate + " + ''' } } } stage('QAIC Feature Tests') { - description 'Test QAIC-specific features and optimizations' steps { timeout(time: 80, unit: 'MINUTES') { sh ''' - sudo docker exec ${BUILD_TAG} bash -c " - set -e - cd /efficient-transformers - . ${VENV_PATH}/bin/activate - - mkdir -p $PWD/Non_qaic_feature - export QEFF_HOME=$PWD/Non_qaic_feature - - pytest tests \ - -m '(not cli) and (on_qaic) and (feature) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ - --ignore tests/vllm \ - ${PYTEST_ARGS} \ - --junitxml=tests/tests_log2_feature.xml - - junitparser merge tests/tests_log2_feature.xml tests/tests_log.xml - deactivate - " - ''' + sudo docker exec ${BUILD_TAG} bash -c " + set -e + cd /efficient-transformers + . ${VENV_PATH}/bin/activate + + mkdir -p $PWD/Non_qaic_feature + export QEFF_HOME=$PWD/Non_qaic_feature + + pytest tests \ + -m '(not cli) and (on_qaic) and (feature) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ + --ignore tests/vllm \ + ${PYTEST_ARGS} \ + --junitxml=tests/tests_log2_feature.xml + + junitparser merge tests/tests_log2_feature.xml tests/tests_log.xml + deactivate + " + ''' } } } } } stage('QAIC MultiModal Tests') { - description 'Test QAIC multimodal models (VLM, image-text)' steps { timeout(time: 120, unit: 'MINUTES') { sh ''' @@ -187,7 +179,6 @@ pipeline { } stage('QAIC Diffusion Models Tests') { - description 'Test QAIC diffusion and generative models' steps { timeout(time: 120, unit: 'MINUTES') { sh ''' @@ -215,7 +206,6 @@ pipeline { } stage('CLI Inference Tests') { - description 'Test command-line interface for inference' steps { timeout(time: 120, unit: 'MINUTES') { sh ''' @@ -241,7 +231,6 @@ pipeline { } } stage('Finetune CLI Tests') { - description 'Test fine-tuning capabilities via CLI' steps { timeout(time: 20, unit: 'MINUTES') { sh ''' From 750537e3847cfd37a8af3694c550906de1daaf5f Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 5 Mar 2026 16:59:22 +0000 Subject: [PATCH 04/11] Fixing issue Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile index d39b4d45b..b01fffc61 100644 --- a/scripts/Nightly/Jenkinsfile +++ b/scripts/Nightly/Jenkinsfile @@ -36,7 +36,7 @@ pipeline { echo "Build Tag: ${BUILD_TAG}" } sh ''' - source ~/.bashrc + . ~/.bashrc # Launch privileged Docker container with necessary mounts sudo docker run --privileged -dit \ --name ${BUILD_TAG} \ From d488ae9cbc93b82ab837d503766f7182785adf28 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 5 Mar 2026 17:09:50 +0000 Subject: [PATCH 05/11] Fixing issue Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile index b01fffc61..dc4ac7efe 100644 --- a/scripts/Nightly/Jenkinsfile +++ b/scripts/Nightly/Jenkinsfile @@ -43,7 +43,7 @@ pipeline { -e HF_TOKEN=${HF_TOKEN} \ -v ./:/efficient-transformers \ -v ${HF_PATH}:${DOCKER_HF_PATH} \ - ${DOCKER_IMAGE} + ${DOCKER_LATEST}:master_latest # Install QEfficient and dependencies sudo docker exec ${BUILD_TAG} bash -c " From bad14c23730fb31335c57fb3b55a8ac92089685a Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Thu, 5 Mar 2026 17:31:29 +0000 Subject: [PATCH 06/11] triggerring 9PM UTC Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/Jenkinsfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile index dc4ac7efe..49618d16a 100644 --- a/scripts/Nightly/Jenkinsfile +++ b/scripts/Nightly/Jenkinsfile @@ -12,12 +12,12 @@ pipeline { buildDiscarder(logRotator(numToKeepStr: '5', daysToKeepStr: '30')) } - // triggers { - // // Runs daily at 2:00 AM UTC - // cron('0 2 * * *') - // // Uncomment to also run on repository push - // // githubPush() - // } + triggers { + // Runs daily at 9:00 PM UTC + cron('0 21 * * *') + // Uncomment to also run on repository push + // githubPush() + } environment { DOCKER_IMAGE = "${DOCKER_LATEST}:master_latest" From 213949c9820a2bee58965d07367f994880887c0a Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Fri, 6 Mar 2026 10:24:46 +0000 Subject: [PATCH 07/11] triggerring 9PM IST full models Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/Jenkinsfile | 9 ++-- .../test_continuous_batching.py | 6 +-- .../test_image_text_to_text_models.py | 6 +-- .../models/test_causal_lm_models.py | 17 ++++---- tests/transformers/models/test_disagg_mode.py | 43 +++++++++---------- .../models/test_embedding_models.py | 3 +- .../models/test_speech_seq2seq_models.py | 2 +- 7 files changed, 42 insertions(+), 44 deletions(-) diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile index 49618d16a..aacff3030 100644 --- a/scripts/Nightly/Jenkinsfile +++ b/scripts/Nightly/Jenkinsfile @@ -12,11 +12,10 @@ pipeline { buildDiscarder(logRotator(numToKeepStr: '5', daysToKeepStr: '30')) } - triggers { - // Runs daily at 9:00 PM UTC - cron('0 21 * * *') - // Uncomment to also run on repository push - // githubPush() + triggers { + cron('''TZ=Asia/Kolkata + 0 21 * * *''') + } environment { diff --git a/tests/transformers/models/image_text_to_text/test_continuous_batching.py b/tests/transformers/models/image_text_to_text/test_continuous_batching.py index c1a31eaa3..d472b1ce4 100644 --- a/tests/transformers/models/image_text_to_text/test_continuous_batching.py +++ b/tests/transformers/models/image_text_to_text/test_continuous_batching.py @@ -90,7 +90,7 @@ def check_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100_CB( ctx_len: int, max_gen_len: int = 20, batch_size: int = 1, - n_layer: int = 1, + n_layer: int = -1, kv_offload: bool = False, num_devices: int = 1, enable_qnn: Optional[bool] = False, @@ -277,12 +277,12 @@ def check_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100_CB( compile_kwargs = { "num_cores": 16, - "num_devices": num_devices, "prefill_seq_len": prompt_len, "ctx_len": ctx_len, "batch_size": batch_size, "full_batch_size": full_batch_size, "mxfp6_matmul": False, + "num_devices": 4, } if is_intern_model: @@ -375,7 +375,7 @@ def test_image_text_to_text_pytorch_vs_ai100_continuous_batching(model_name, kv_ img_size=img_size, image_urls=model_config_dict[model_name]["img_url_list"], queries=model_config_dict[model_name]["text_prompt_list"], - n_layer=model_config_dict[model_name]["num_layers"], + # n_layer=model_config_dict[model_name]["num_layers"], batch_size=model_config_dict[model_name]["batch_size"], full_batch_size=model_config_dict[model_name]["full_batch_size"], kv_offload=kv_offload, diff --git a/tests/transformers/models/image_text_to_text/test_image_text_to_text_models.py b/tests/transformers/models/image_text_to_text/test_image_text_to_text_models.py index a2c72ba7a..b647a1a7f 100644 --- a/tests/transformers/models/image_text_to_text/test_image_text_to_text_models.py +++ b/tests/transformers/models/image_text_to_text/test_image_text_to_text_models.py @@ -115,7 +115,7 @@ def check_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100( ctx_len: int, max_gen_len: int = 20, batch_size: int = 1, - n_layer: int = 1, + n_layer: int = -1, kv_offload: bool = False, num_devices: int = 1, enable_qnn: Optional[bool] = False, @@ -302,12 +302,12 @@ def check_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100( # assert (pytorch_hf_tokens == ort_tokens).all(), "Tokens don't match for pytorch HF output and ORT output" compile_kwargs = { - "num_devices": num_devices, "prefill_seq_len": prompt_len, "ctx_len": ctx_len, "mxfp6": False, "enable_qnn": enable_qnn, "qnn_config": qnn_config, + "num_devices": 4, } if is_intern_model: @@ -367,7 +367,7 @@ def test_image_text_to_text_pytorch_vs_kv_vs_ort_vs_ai100(model_name, kv_offload img_size=img_size, img_url=model_config_dict[model_name]["img_url"], query=model_config_dict[model_name]["text_prompt"], - n_layer=model_config_dict[model_name]["num_layers"], + # n_layer=model_config_dict[model_name]["num_layers"], batch_size=model_config_dict[model_name]["batch_size"], kv_offload=kv_offload, ) diff --git a/tests/transformers/models/test_causal_lm_models.py b/tests/transformers/models/test_causal_lm_models.py index a87ac8efc..a2f6c63e3 100644 --- a/tests/transformers/models/test_causal_lm_models.py +++ b/tests/transformers/models/test_causal_lm_models.py @@ -72,11 +72,12 @@ def get_custom_n_layers(model_name): :return n_layer """ - if model_name in {"microsoft/Phi-3-mini-4k-instruct", "neuralmagic/Qwen2-0.5B-Instruct-FP8", "openai/gpt-oss-20b"}: - return 2 - elif model_name in ModelConfig.SWIFTKV_MODELS: - return None - return 1 + # if model_name in {"microsoft/Phi-3-mini-4k-instruct", "neuralmagic/Qwen2-0.5B-Instruct-FP8", "openai/gpt-oss-20b"}: + # return 2 + # elif model_name in ModelConfig.SWIFTKV_MODELS: + # return None + # return 1 + return None def load_causal_lm_model(model_name, n_layer=1, config=None): @@ -100,7 +101,7 @@ def load_causal_lm_model(model_name, n_layer=1, config=None): model_hf = AutoModelForCausalLM.from_pretrained( model_path, use_cache=True, - num_hidden_layers=n_layer, + # num_hidden_layers=n_layer, attn_implementation="eager", low_cpu_mem_usage=False, trust_remote_code=model_name in ModelConfig.EXTERNAL_MODELS, @@ -193,7 +194,7 @@ def check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( qpc_path = qeff_model.compile( prefill_seq_len=prompt_len, ctx_len=ctx_len, - num_cores=14, + num_devices=4, mxfp6=False, aic_enable_depth_first=False, num_speculative_tokens=num_speculative_tokens, @@ -268,7 +269,7 @@ def check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( qpc_path = qeff_model.compile( prefill_seq_len=prompt_len, ctx_len=ctx_len, - num_cores=14, + num_devices=4, mxfp6=False, aic_enable_depth_first=False, batch_size=batch_size, diff --git a/tests/transformers/models/test_disagg_mode.py b/tests/transformers/models/test_disagg_mode.py index 5bd1e52c2..360fc3aaa 100644 --- a/tests/transformers/models/test_disagg_mode.py +++ b/tests/transformers/models/test_disagg_mode.py @@ -45,7 +45,7 @@ def test_disagg_mode_prefill(model_id, prompt): padded_len = num_chunks * PREFILL_SEQ_LEN # Convert to a multiple of prompt_len replace_transformers_quantizers() - model = AutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + model = AutoModelForCausalLM.from_pretrained(model_id) config = model.config inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) @@ -57,7 +57,7 @@ def test_disagg_mode_prefill(model_id, prompt): undo_transformers_quantizers() - qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id) qeff_model.prefill(True) config = qeff_model.model.config inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) @@ -82,10 +82,9 @@ def test_disagg_mode_prefill(model_id, prompt): prefill_qpc_path = qeff_model.compile( prefill_seq_len=PREFILL_SEQ_LEN, ctx_len=CTX_LEN, - num_cores=16, + num_devices=4, mxfp6_matmul=False, mxint8_kv_cache=False, - num_devices=1, mos=1, aic_enable_depth_first=True, num_speculative_tokens=None, @@ -208,7 +207,9 @@ def test_disagg_mode_prefill_only_and_decode_only(model_id, prompt): padded_len = num_chunks * PREFILL_SEQ_LEN # Convert to a multiple of prompt_len replace_transformers_quantizers() - model = AutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + model = AutoModelForCausalLM.from_pretrained( + model_id, + ) config = model.config inputs = tokenizer(prompt, return_tensors="np", padding="max_length", max_length=padded_len) inputs["position_ids"] = np.where(inputs.pop("attention_mask"), np.arange(padded_len), -1) @@ -242,7 +243,7 @@ def test_disagg_mode_prefill_only_and_decode_only(model_id, prompt): undo_transformers_quantizers() - prefill_qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + prefill_qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id) prefill_qeff_model.prefill(enable=True) config = prefill_qeff_model.model.config past_key_values = [] @@ -260,7 +261,9 @@ def test_disagg_mode_prefill_only_and_decode_only(model_id, prompt): # Check our pytorch implementation assert (prefill_qeff_out.logits - orig_out.logits[:, -1, :]).abs().max() < 1e-4 - decode_qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, num_hidden_layers=2) + decode_qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + model_id, + ) decode_qeff_model.prefill(enable=False) qeff_out = prefill_qeff_out @@ -289,10 +292,9 @@ def test_disagg_mode_prefill_only_and_decode_only(model_id, prompt): prefill_qpc_path = prefill_qeff_model.compile( prefill_seq_len=PREFILL_SEQ_LEN, ctx_len=CTX_LEN, - num_cores=16, + num_devices=4, mxfp6_matmul=False, mxint8_kv_cache=False, - num_devices=1, mos=1, aic_enable_depth_first=True, num_speculative_tokens=None, @@ -312,10 +314,9 @@ def test_disagg_mode_prefill_only_and_decode_only(model_id, prompt): decode_qpc_path = decode_qeff_model.compile( prefill_seq_len=1, ctx_len=CTX_LEN, - num_cores=16, + num_devices=4, mxfp6_matmul=False, mxint8_kv_cache=False, - num_devices=1, mos=1, aic_enable_depth_first=True, num_speculative_tokens=None, @@ -371,18 +372,17 @@ def test_disagg_mode_prefill_only_and_decode_only(model_id, prompt): def test_disagg_mode_prefix_caching(model_id, prompt): PREFILL_SEQ_LEN = 128 CTX_LEN = 128 * 3 - config = AutoConfig.from_pretrained(model_id, num_hidden_layers=2) - prefill_qeff_model = QEFFAutoModelForCausalLM.from_pretrained( - model_id, num_hidden_layers=2, continuous_batching=True + config = AutoConfig.from_pretrained( + model_id, ) + prefill_qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, continuous_batching=True) prefill_qeff_model.prefill(enable=True, enable_chunking=True) prefill_qpc_path = prefill_qeff_model.compile( prefill_seq_len=PREFILL_SEQ_LEN, ctx_len=CTX_LEN, - num_cores=16, + num_devices=4, mxfp6_matmul=False, mxint8_kv_cache=False, - num_devices=1, mos=1, aic_enable_depth_first=True, num_speculative_tokens=None, @@ -392,17 +392,14 @@ def test_disagg_mode_prefix_caching(model_id, prompt): kv_cache_batch_size=2, ) - decode_qeff_model = QEFFAutoModelForCausalLM.from_pretrained( - model_id, num_hidden_layers=2, continuous_batching=True - ) + decode_qeff_model = QEFFAutoModelForCausalLM.from_pretrained(model_id, continuous_batching=True) decode_qeff_model.prefill(enable=False) decode_qpc_path = decode_qeff_model.compile( prefill_seq_len=1, ctx_len=CTX_LEN, - num_cores=16, + num_devices=4, mxfp6_matmul=False, mxint8_kv_cache=False, - num_devices=1, mos=1, aic_enable_depth_first=True, num_speculative_tokens=None, @@ -433,7 +430,9 @@ def test_disagg_mode_prefix_caching(model_id, prompt): def prefix_caching_inference(model_id, prefill_qpc_path, decode_qpc_path, prompt, decode_batch_id): PREFILL_SEQ_LEN = 128 tokenizer = AutoTokenizer.from_pretrained(model_id) - config = AutoConfig.from_pretrained(model_id, num_hidden_layers=2) + config = AutoConfig.from_pretrained( + model_id, + ) inputs = tokenizer(prompt, return_tensors="np", padding=True) padded_len = inputs["input_ids"].shape[1] num_chunks = -(padded_len // -PREFILL_SEQ_LEN) # ceil divide without float diff --git a/tests/transformers/models/test_embedding_models.py b/tests/transformers/models/test_embedding_models.py index 7eb09d911..2d36a7a99 100644 --- a/tests/transformers/models/test_embedding_models.py +++ b/tests/transformers/models/test_embedding_models.py @@ -41,7 +41,7 @@ def check_embed_pytorch_vs_ort_vs_ai100( # Original PyTorch model pt_model = AutoModel.from_pretrained( model_name, - num_hidden_layers=n_layer, + # num_hidden_layers=n_layer, attn_implementation="eager", trust_remote_code=True, ) @@ -85,7 +85,6 @@ def check_embed_pytorch_vs_ort_vs_ai100( assert mad <= 10**-5, f"MAD is too high for onnx and Pytorch: {mad}" qeff_model.compile( - num_cores=14, enable_qnn=enable_qnn, qnn_config=qnn_config, ) diff --git a/tests/transformers/models/test_speech_seq2seq_models.py b/tests/transformers/models/test_speech_seq2seq_models.py index 774802c83..5631c0d00 100644 --- a/tests/transformers/models/test_speech_seq2seq_models.py +++ b/tests/transformers/models/test_speech_seq2seq_models.py @@ -49,7 +49,7 @@ def load_seq2seq_model(model_config): model_hf = AutoModelForSpeechSeq2Seq.from_pretrained( model_path, use_cache=True, - num_hidden_layers=model_config["n_layer"], + # num_hidden_layers=model_config["n_layer"], attn_implementation="eager", low_cpu_mem_usage=False, ) # Run models for single layers only From d9845b55fb6129a18c8affa540f1cefebc76d2c9 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Fri, 6 Mar 2026 11:44:32 +0000 Subject: [PATCH 08/11] setting umlimited timeout Signed-off-by: Abukhoyer Shaik --- scripts/Nightly/Jenkinsfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile index aacff3030..6a2da48fd 100644 --- a/scripts/Nightly/Jenkinsfile +++ b/scripts/Nightly/Jenkinsfile @@ -100,7 +100,7 @@ pipeline { stage('QAIC LLM Tests') { steps { - timeout(time: 120, unit: 'MINUTES') { + // timeout(time: 120, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " set -e @@ -120,13 +120,13 @@ pipeline { deactivate " ''' - } + // } } } stage('QAIC Feature Tests') { steps { - timeout(time: 80, unit: 'MINUTES') { + // timeout(time: 80, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " set -e @@ -146,14 +146,14 @@ pipeline { deactivate " ''' - } + // } } } } } stage('QAIC MultiModal Tests') { steps { - timeout(time: 120, unit: 'MINUTES') { + // timeout(time: 120, unit: 'MINUTES') { sh ''' sudo docker exec ${BUILD_TAG} bash -c " set -e @@ -173,7 +173,7 @@ pipeline { deactivate " ''' - } + // } } } From 51c483c38e3a3149b1debfe9df01bbeee4c2d823 Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Mon, 9 Mar 2026 10:32:50 +0000 Subject: [PATCH 09/11] fixing Signed-off-by: Abukhoyer Shaik --- tests/configs/causal_model_configs.json | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/tests/configs/causal_model_configs.json b/tests/configs/causal_model_configs.json index bf0fd642d..0d63bf60c 100644 --- a/tests/configs/causal_model_configs.json +++ b/tests/configs/causal_model_configs.json @@ -324,19 +324,7 @@ "num_key_value_heads": 1 } }, - { - "model_name": "hpcai-tech/grok-1", - "model_type": null, - "additional_params":{ - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 131072, - "num_key_value_heads": 1 - } - }, + { "model_name": "Snowflake/Llama-3.1-SwiftKV-8B-Instruct", "model_type": null, From 96273a1ba67f4ad06b42e2cc019d2581cdd6a3ab Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Mon, 16 Mar 2026 11:21:16 +0000 Subject: [PATCH 10/11] enabling full model tests for nightly and comapring results Signed-off-by: Abukhoyer Shaik --- QEfficient/utils/test_utils.py | 4 + scripts/Nightly/Jenkinsfile | 9 +- tests/configs/causal_model_configs.json | 320 ------------------ tests/transformers/__init__.py | 0 tests/transformers/models/__init__.py | 0 .../models/check_model_results.py | 179 ++++++++++ .../models/test_audio_embedding_models.py | 43 ++- .../models/test_causal_lm_models.py | 117 ++++++- .../models/test_embedding_models.py | 52 ++- .../models/test_seq_classification.py | 66 +++- .../models/test_speech_seq2seq_models.py | 65 +++- 11 files changed, 488 insertions(+), 367 deletions(-) create mode 100644 tests/transformers/__init__.py create mode 100644 tests/transformers/models/__init__.py create mode 100644 tests/transformers/models/check_model_results.py diff --git a/QEfficient/utils/test_utils.py b/QEfficient/utils/test_utils.py index 3cf560266..c125a317f 100644 --- a/QEfficient/utils/test_utils.py +++ b/QEfficient/utils/test_utils.py @@ -229,3 +229,7 @@ class ModelConfig: SWIFTKV_MODELS = { "Snowflake/Llama-3.1-SwiftKV-8B-Instruct", } + + FULL_MODEL_TESTS_TO_SKIP = { + "hpcai-tech/grok-1", + } diff --git a/scripts/Nightly/Jenkinsfile b/scripts/Nightly/Jenkinsfile index 6a2da48fd..b9c9687f3 100644 --- a/scripts/Nightly/Jenkinsfile +++ b/scripts/Nightly/Jenkinsfile @@ -12,10 +12,9 @@ pipeline { buildDiscarder(logRotator(numToKeepStr: '5', daysToKeepStr: '30')) } - triggers { + triggers { cron('''TZ=Asia/Kolkata - 0 21 * * *''') - + 0 21 * * 6''') } environment { @@ -111,7 +110,7 @@ pipeline { export QEFF_HOME=$PWD/Non_qaic_llm pytest tests \ - -m '(not cli) and (on_qaic) and (llm_model) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ + -m '(not cli) and (on_qaic) and (llm_model) and (not custom_layers) and (not dummy_model) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ --ignore tests/vllm \ ${PYTEST_ARGS} \ --junitxml=tests/tests_log2.xml @@ -137,7 +136,7 @@ pipeline { export QEFF_HOME=$PWD/Non_qaic_feature pytest tests \ - -m '(not cli) and (on_qaic) and (feature) and (not regular) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ + -m '(not cli) and (on_qaic) and (feature) and (not multimodal) and (not qnn) and (not finetune) and (not diffusion_models)' \ --ignore tests/vllm \ ${PYTEST_ARGS} \ --junitxml=tests/tests_log2_feature.xml diff --git a/tests/configs/causal_model_configs.json b/tests/configs/causal_model_configs.json index 0d63bf60c..b17e57336 100644 --- a/tests/configs/causal_model_configs.json +++ b/tests/configs/causal_model_configs.json @@ -25,326 +25,6 @@ "vocab_size": 50257, "num_key_value_heads": 1 } - }, - { - "model_name": "allenai/OLMo-2-0425-1B", - "model_type": "olmo2", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 100352, - "num_key_value_heads": 1 - } - }, - { - "model_name": "Salesforce/codegen-350M-mono", - "model_type": "codegen", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 4, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 51200, - "num_key_value_heads": 1, - "rotary_dim": 16 - } - }, - { - "model_name": "ibm-granite/granite-3.1-1b-a400m-base", - "model_type": "granitemoe", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 49155, - "num_key_value_heads": 1 - } - }, - { - "model_name": "microsoft/Phi-3-mini-4k-instruct", - "model_type": "phi3", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 32064, - "num_key_value_heads": 1 - } - }, - { - "model_name": "tiiuae/falcon-7b", - "model_type": "falcon", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 65024, - "num_key_value_heads": 1 - } - }, - { - "model_name": "Qwen/Qwen3-30B-A3B-Instruct-2507", - "model_type": "qwen3_moe", - "additional_params": { - "hidden_size": 256, - "intermediate_size": 256, - "max_position_embeddings": 128, - "max_window_layers": 48, - "moe_intermediate_size": 768, - "num_attention_heads": 2, - "num_experts": 4, - "num_experts_per_tok": 2, - "num_hidden_layers": 1, - "num_key_value_heads": 1, - "vocab_size": 151936 - } - }, - { - "model_name": "Qwen/Qwen2-0.5B", - "model_type": "qwen2", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 151936, - "num_key_value_heads": 1 - } - }, - { - "model_name": "bigcode/starcoder2-3b", - "model_type": "starcoder2", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 49152, - "num_key_value_heads": 1 - } - }, - { - "model_name": "Felladrin/Minueza-32M-Base", - "model_type": "mistral", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 32002, - "num_key_value_heads": 1 - } - }, - { - "model_name": "wtang06/mpt-125m-c4", - "model_type": "mpt", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 50368 - } - }, - { - "model_name": "hakurei/gpt-j-random-tinier", - "model_type": "gptj", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 50400, - "num_key_value_heads": 1, - "rotary_dim": 16 - } - }, - { - "model_name": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "model_type": "mixtral", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 32000, - "num_key_value_heads": 1 - } - }, - { - "model_name": "meta-llama/Llama-3.2-1B", - "model_type": "llama", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 128256, - "num_key_value_heads": 1, - "rope_scaling": { - "factor": 32.0, - "high_freq_factor": 4.0, - "low_freq_factor": 1.0, - "original_max_position_embeddings": 8192, - "rope_type": "llama3" - } - } - }, - { - "model_name": "unsloth/gemma-2b", - "model_type": "gemma", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 256000, - "num_key_value_heads": 1 - } - }, - { - "model_name": "unsloth/gemma-2-2b", - "model_type": "gemma2", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 256000, - "num_key_value_heads": 1 - } - }, - { - "model_name": "TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ", - "model_type": "llama", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 32003 - } - }, - { - "model_name": "TheBloke/Llama-2-7B-GPTQ", - "model_type": "llama", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 32000 - } - }, - { - "model_name": "ibm-granite/granite-20b-code-base", - "model_type": "gpt_bigcode", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 49152, - "num_key_value_heads": 1, - "activation_function": "gelu", - "architectures": [ - "GPTBigCodeForCausalLM" - ] - } - }, - { - "model_name": "neuralmagic/Llama-3.2-3B-Instruct-FP8", - "model_type": "llama", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 128256 - } - }, - { - "model_name": "neuralmagic/Qwen2-0.5B-Instruct-FP8", - "model_type": "qwen2", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 2, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 151936 - } - }, - { - "model_name": "ibm-granite/granite-3.1-2b-instruct", - "model_type": "granite", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 49155, - "num_key_value_heads": 1 - } - }, - { - "model_name": "ibm-granite/granite-guardian-3.1-2b", - "model_type": "granite", - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 1, - "num_attention_heads": 2, - "hidden_size": 64, - "intermediate_size": 256, - "vocab_size": 49155, - "num_key_value_heads": 1 - } - }, - - { - "model_name": "Snowflake/Llama-3.1-SwiftKV-8B-Instruct", - "model_type": null, - "additional_params": { - "max_position_embeddings": 128, - "num_hidden_layers": 2, - "num_attention_heads": 2, - "hidden_size": 256, - "intermediate_size": 256, - "vocab_size": 128256, - "num_key_value_layers": 1, - "num_key_value_heads": 1, - "rope_scaling": { - "factor": 8.0, - "high_freq_factor": 4.0, - "low_freq_factor": 1.0, - "original_max_position_embeddings": 8192, - "rope_type": "llama3" - } - } } ], diff --git a/tests/transformers/__init__.py b/tests/transformers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/transformers/models/__init__.py b/tests/transformers/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/transformers/models/check_model_results.py b/tests/transformers/models/check_model_results.py new file mode 100644 index 000000000..73f198001 --- /dev/null +++ b/tests/transformers/models/check_model_results.py @@ -0,0 +1,179 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import json +import os +from datetime import datetime + +import numpy as np + + +def parse_exec_info_metrics(exec_info_str): + """ + Parse performance metrics from exec_info string. + + :exec_info_str: str - The exec_info string containing performance stats + :return: dict - Dictionary containing parsed metrics + """ + import re + + metrics = { + "prefill_time_sec": None, + "decode_throughput_tokens_per_sec": None, + "total_throughput_tokens_per_sec": None, + "e2e_inference_time_sec": None, + } + + exec_info_text = str(exec_info_str) + + # Parse Average Prefill time (TTFT) + if "Average Prefill time" in exec_info_text or "TTFT" in exec_info_text: + match = re.search(r"Average Prefill time.*?is=\s*([\d.]+)\s*sec", exec_info_text) + if match: + metrics["prefill_time_sec"] = float(match.group(1)) + + # Parse Decode throughput + if "Decode" in exec_info_text: + match = re.search(r"Decode\s+is=\s*([\d.]+)\s*tokens/sec", exec_info_text) + if match: + metrics["decode_throughput_tokens_per_sec"] = float(match.group(1)) + + # Parse Total throughput + if "Total is=" in exec_info_text: + match = re.search(r"Total\s+is=\s*([\d.]+)\s*tokens/sec", exec_info_text) + if match: + metrics["total_throughput_tokens_per_sec"] = float(match.group(1)) + + # Parse Total E2E inference time + if "Total (E2E) inference time" in exec_info_text: + match = re.search(r"Total \(E2E\) inference time\s+is=\s*([\d.]+)\s*sec", exec_info_text) + if match: + metrics["e2e_inference_time_sec"] = float(match.group(1)) + + return metrics + + +def dump_and_compare_results( + model_name, + compile_params, + json_file_path, + cloud_ai_100_tokens, + exec_info=None, + pytorch_hf_tokens=None, + pytorch_kv_tokens=None, + ort_tokens=None, +): + """ + Function to dump the test results to JSON file and compare the performance and output results with previous runs if available + + :model_name: str + :pytorch_hf_tokens: list + :pytorch_kv_tokens: list + :ort_tokens: list + :cloud_ai_100_tokens: list + :exec_info: object + :compile_params: dict + :return None + """ + + current_logs_dir = os.environ.get("NIGHTLY_LOG_DIR") + if current_logs_dir is None: + current_logs_dir = os.path.expanduser("~/.cache/Nightly_Logs/build_tag") + os.makedirs(current_logs_dir, exist_ok=True) + # original_logs_dir = Path(current_logs_dir).parent + original_logs_dir = current_logs_dir + current_results_json_file_path = os.path.join(current_logs_dir, json_file_path) + original_results_json_file_path = os.path.join(original_logs_dir, json_file_path) + + def convert_to_serializable(obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, list): + return [convert_to_serializable(item) for item in obj] + elif isinstance(obj, dict): + return {k: convert_to_serializable(v) for k, v in obj.items()} + return obj + + exec_info_metrics = parse_exec_info_metrics(exec_info) + + test_data = { + "model_name": model_name, + "timestamp": datetime.now().isoformat(), + "compile_params": compile_params, + "pytorch_hf_tokens": convert_to_serializable(pytorch_hf_tokens) if pytorch_hf_tokens is not None else None, + "pytorch_kv_tokens": convert_to_serializable(pytorch_kv_tokens), + "ort_tokens": convert_to_serializable(ort_tokens), + "cloud_ai_100_tokens": convert_to_serializable(cloud_ai_100_tokens), + "exec_info_metrics": exec_info_metrics, + "exec_info_raw_string": str(exec_info), + } + + # Load existing results if file exists + all_results = {} + if os.path.exists(current_results_json_file_path): + with open(current_results_json_file_path, "r") as f: + all_results = json.load(f) + print(f"Loaded existing model results from {current_results_json_file_path}") + else: + with open(current_results_json_file_path, "w", encoding="utf-8") as f: + json.dump({}, f) + print(f"Created new results file at {current_results_json_file_path}") + + model_name_safe = model_name.replace("/", "_").replace("-", "_") + all_results[model_name_safe] = test_data + + with open(current_results_json_file_path, "w") as f: + json.dump(all_results, f, indent=4, default=str) + print(f"Successfully saved test results to {current_results_json_file_path}") + + with open(original_results_json_file_path, "r") as f: + previous_results = json.load(f) + print(f"Loaded Previous model results from {original_results_json_file_path}") + + previous_data = previous_results[model_name_safe] + + # Compare performance metrics with 5% tolerance + previous_metrics = previous_data.get("exec_info_metrics", {}) + current_metrics = exec_info_metrics + + for metric_name in [ + "prefill_time_sec", + "decode_throughput_tokens_per_sec", + "total_throughput_tokens_per_sec", + "e2e_inference_time_sec", + ]: + prev_val = previous_metrics[metric_name] + curr_val = current_metrics[metric_name] + + if prev_val is not None and curr_val is not None and prev_val != 0: + percent_diff = abs((curr_val - prev_val) / prev_val) * 100 + assert percent_diff <= 5.0, ( + f"Performance metric {metric_name} exceeds 5% tolerance: " + f"previous={prev_val}, current={curr_val}, diff={percent_diff:.2f}%" + ) + print(f"✓ {metric_name}: {percent_diff:.2f}% difference (within 5% tolerance)") + + # Compare output tokens using Mean Absolute Deviation (MAD) with 10^-2 tolerance + previous_tokens = previous_data.get("cloud_ai_100_tokens", None) + + if previous_tokens is not None and isinstance(previous_tokens, list): + if previous_tokens and isinstance(previous_tokens[0], str): + print("⊘ Output tokens: Skipping Tokens check (previous data contains strings)") + else: + prev_tokens_arr = np.array(previous_tokens, dtype=np.float32) + curr_tokens_arr = np.array(cloud_ai_100_tokens, dtype=np.float32) + + mad = np.mean(np.abs(curr_tokens_arr - prev_tokens_arr)) + tolerance = 1e-2 + + assert mad <= tolerance, f"Output tokens MAD exceeds 10^-2 tolerance: MAD={mad:.6f}, tolerance={tolerance}" + print(f"✓ Output tokens MAD: {mad:.6f} (within 10^-2 tolerance)") + return True diff --git a/tests/transformers/models/test_audio_embedding_models.py b/tests/transformers/models/test_audio_embedding_models.py index 998546853..669dd272c 100644 --- a/tests/transformers/models/test_audio_embedding_models.py +++ b/tests/transformers/models/test_audio_embedding_models.py @@ -22,7 +22,8 @@ from QEfficient.utils import hf_download from QEfficient.utils._utils import create_json, load_hf_processor from QEfficient.utils.constants import WAV2VEC2_MAX_SEQ_LEN, QnnConstants -from QEfficient.utils.device_utils import get_available_device_id + +from .check_model_results import dump_and_compare_results CONFIG_PATH = "tests/configs/embedding_model_configs.json" @@ -129,9 +130,10 @@ def run_ctc_ort(onnx_path, config, processor: AutoProcessor, inputs: np.ndarray, def check_ctc_pytorch_vs_kv_vs_ort_vs_ai100( model_name: str, - n_layer: int = 1, + n_layer: int = -1, enable_qnn: Optional[bool] = False, qnn_config: Optional[str] = None, + compare_results: Optional[bool] = False, ): """ Validate the PyTorch model, the PyTorch model after ONNX model and the Cloud AI 100 model @@ -162,10 +164,8 @@ def check_ctc_pytorch_vs_kv_vs_ort_vs_ai100( predicted_ids = torch.argmax(ort_tokens, dim=-1) ort_output = processor.batch_decode(predicted_ids) assert pytorch_output == ort_output, "Tokens don't match for pytorch output and ORT output." - if not get_available_device_id(): - pytest.skip("No available devices to run model on Cloud AI 100") + qeff_model.compile( - num_cores=16, batch_size=batch_size, enable_qnn=enable_qnn, qnn_config=qnn_config, @@ -173,8 +173,25 @@ def check_ctc_pytorch_vs_kv_vs_ort_vs_ai100( cloud_ai_100_output = qeff_model.generate(processor, data) assert pytorch_output == cloud_ai_100_output, "Tokens don't match for pytorch output and Cloud AI 100 output." assert os.path.isfile(os.path.join(os.path.dirname(qeff_model.qpc_path), "qconfig.json")) + if compare_results is False: + return + + compile_params = { + "batch_size": batch_size, + "enable_qnn": enable_qnn, + "qnn_config": qnn_config, + } + assert dump_and_compare_results( + model_name, + compile_params, + "ctc_model_results.json", + cloud_ai_100_output, + pytorch_hf_tokens=pytorch_output, + ort_tokens=ort_output, + ) +@pytest.mark.custom_layers @pytest.mark.on_qaic @pytest.mark.llm_model @pytest.mark.parametrize("model_name", test_models) @@ -187,6 +204,22 @@ def test_ctc_pytorch_vs_kv_vs_ort_vs_ai100(model_name): check_ctc_pytorch_vs_kv_vs_ort_vs_ai100(model_name=model_name, n_layer=4) +@pytest.mark.full_model +@pytest.mark.on_qaic +@pytest.mark.llm_model +@pytest.mark.parametrize("model_name", test_models) +def test_full_ctc_pytorch_vs_kv_vs_ort_vs_ai100(model_name): + """ + Test function to validate the PyTorch model, the PyTorch model the ONNX model, and the Cloud AI 100 model. + ``Mandatory`` Args: + :model_name (str): Hugging Face Model Card name, Example: ``gpt2`` + """ + check_ctc_pytorch_vs_kv_vs_ort_vs_ai100( + model_name=model_name, + compare_results=True, + ) + + @pytest.mark.on_qaic @pytest.mark.llm_model @pytest.mark.qnn diff --git a/tests/transformers/models/test_causal_lm_models.py b/tests/transformers/models/test_causal_lm_models.py index a2f6c63e3..72501c387 100644 --- a/tests/transformers/models/test_causal_lm_models.py +++ b/tests/transformers/models/test_causal_lm_models.py @@ -25,6 +25,8 @@ from QEfficient.utils.run_utils import ApiRunner from QEfficient.utils.test_utils import ModelConfig +from .check_model_results import dump_and_compare_results + CONFIG_PATH = "tests/configs/causal_model_configs.json" with open(CONFIG_PATH, "r") as f: @@ -80,7 +82,7 @@ def get_custom_n_layers(model_name): return None -def load_causal_lm_model(model_name, n_layer=1, config=None): +def load_causal_lm_model(model_name, n_layer=None, config=None): """ Function to load model from huggingface and transform to KV model -------- @@ -101,7 +103,7 @@ def load_causal_lm_model(model_name, n_layer=1, config=None): model_hf = AutoModelForCausalLM.from_pretrained( model_path, use_cache=True, - # num_hidden_layers=n_layer, + num_hidden_layers=n_layer, attn_implementation="eager", low_cpu_mem_usage=False, trust_remote_code=model_name in ModelConfig.EXTERNAL_MODELS, @@ -131,6 +133,103 @@ def load_causal_lm_model(model_name, n_layer=1, config=None): return model_hf, params +def check_full_causal_lm_and_compare_results(model_name): + """ + Function to check the full model and compare results between PyTorch, ONNX Runtime and Cloud AI 100. Compare the peformance and tokens with the previous results. + + :model_name: str + + :return None + """ + prompt_len: int = Constants.PROMPT_LEN + ctx_len: int = Constants.CTX_LEN + prefill_only = None + retain_full_kv = None + pytorch_hf_tokens = None + pytorch_kv_tokens = None + + model_hf, _ = load_causal_lm_model(model_name) + tokenizer = load_hf_tokenizer(pretrained_model_name_or_path=model_name) + config = model_hf.config + batch_size = len(Constants.INPUT_STR) + api_runner = ApiRunner( + batch_size, + tokenizer, + config, + Constants.INPUT_STR, + Constants.PROMPT_LEN, + Constants.CTX_LEN, + ) + + if model_name not in ModelConfig.SWIFTKV_MODELS and model_name not in ModelConfig.EXTERNAL_MODELS: + pytorch_hf_tokens = api_runner.run_hf_model_on_pytorch(model_hf) + print(f"HF PyTorch tokens: {pytorch_hf_tokens}") + + qeff_model = QEFFAutoModelForCausalLM.from_pretrained( + pretrained_model_name_or_path=model_name, + ) + pytorch_kv_tokens = api_runner.run_kv_model_on_pytorch(qeff_model.model) + print(f"KV PyTorch tokens: {pytorch_kv_tokens}") + + if model_name not in ModelConfig.SWIFTKV_MODELS and model_name not in ModelConfig.EXTERNAL_MODELS: + assert (pytorch_hf_tokens == pytorch_kv_tokens).all(), ( + "Tokens don't match for HF PyTorch model output and KV PyTorch model output" + ) + onnx_model_path = qeff_model.export() + ort_tokens = api_runner.run_kv_model_on_ort( + onnx_model_path, + ) + print(f"ONNX tokens: {ort_tokens}") + gen_len = ort_tokens.shape[-1] + + assert (pytorch_kv_tokens == ort_tokens).all(), "Tokens don't match for ONNXRT output and PyTorch output." + + qpc_path = qeff_model.compile( + prefill_seq_len=prompt_len, + ctx_len=ctx_len, + num_devices=1, + mxfp6=False, + aic_enable_depth_first=False, + prefill_only=prefill_only, + retain_full_kv=retain_full_kv, + ) + exec_info = qeff_model.generate(tokenizer, prompts=Constants.INPUT_STR) + print(f"exec_info: {exec_info}") + print(f"Cloud AI 100 tokens: {exec_info.generated_ids}") + cloud_ai_100_tokens = exec_info.generated_ids[0][ + :, :gen_len + ] # Because we always run for single input and single batch size + if prefill_only: + assert (ort_tokens[0][0] == cloud_ai_100_tokens[0][0]).all(), ( + "prefill run output tokens don't match for ONNXRT output and Cloud AI 100 output." + ) + else: + assert (ort_tokens == cloud_ai_100_tokens).all(), ( + "Tokens don't match for ONNXRT output and Cloud AI 100 output." + ) + assert os.path.isfile(os.path.join(os.path.dirname(qpc_path), "qconfig.json")) + + compile_params = { + "prefill_seq_len": prompt_len, + "ctx_len": ctx_len, + "num_devices": 1, + "mxfp6": False, + "aic_enable_depth_first": False, + "prefill_only": prefill_only, + "retain_full_kv": retain_full_kv, + } + assert dump_and_compare_results( + model_name, + compile_params, + "causal_lm_model_results.json", + cloud_ai_100_tokens, + exec_info, + pytorch_hf_tokens, + pytorch_kv_tokens, + ort_tokens, + ) + + def check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100( model_name: str, prompt_len: int = Constants.PROMPT_LEN, @@ -331,8 +430,8 @@ def test_causal_lm_export_with_deprecated_api(model_name): ) +@pytest.mark.dummy_model @pytest.mark.on_qaic -@pytest.mark.regular @pytest.mark.llm_model @pytest.mark.parametrize("model_name", test_models_causal) def test_custom_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name): @@ -350,7 +449,7 @@ def test_custom_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name): check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name, config=hf_config) -@pytest.mark.nightly +@pytest.mark.custom_layers @pytest.mark.on_qaic @pytest.mark.llm_model @pytest.mark.parametrize("model_name", test_models_causal) @@ -365,6 +464,16 @@ def test_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name): check_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name=model_name, n_layer=n_layer) +@pytest.mark.full_model +@pytest.mark.on_qaic +@pytest.mark.llm_model +@pytest.mark.parametrize("model_name", test_models_causal) +def test_full_causal_lm_pytorch_vs_kv_vs_ort_vs_ai100(model_name): + if model_name in ModelConfig.FULL_MODEL_TESTS_TO_SKIP: + pytest.skip(f"Skipping full model test for {model_name} due to resource constraints.") + check_full_causal_lm_and_compare_results(model_name) + + @pytest.mark.nightly @pytest.mark.on_qaic @pytest.mark.parametrize("retain_full_kv", [True, False]) diff --git a/tests/transformers/models/test_embedding_models.py b/tests/transformers/models/test_embedding_models.py index 2d36a7a99..b4aacc757 100644 --- a/tests/transformers/models/test_embedding_models.py +++ b/tests/transformers/models/test_embedding_models.py @@ -19,6 +19,8 @@ from QEfficient.utils._utils import create_json from QEfficient.utils.constants import Constants, QnnConstants +from .check_model_results import dump_and_compare_results + CONFIG_PATH = "tests/configs/embedding_model_configs.json" with open(CONFIG_PATH, "r") as f: @@ -29,22 +31,31 @@ def check_embed_pytorch_vs_ort_vs_ai100( model_name: str, seq_len: int = Constants.CTX_LEN, - n_layer: int = 1, + n_layer: int = -1, enable_qnn: Optional[bool] = False, qnn_config: Optional[str] = None, pooling: Optional[str] = None, + compare_results: Optional[bool] = False, ): # Prepare input tokenizer = AutoTokenizer.from_pretrained(model_name) inputs = tokenizer("My name is", return_tensors="pt") # Original PyTorch model - pt_model = AutoModel.from_pretrained( - model_name, - # num_hidden_layers=n_layer, - attn_implementation="eager", - trust_remote_code=True, - ) + pt_model = None + if n_layer == -1: + pt_model = AutoModel.from_pretrained( + model_name, + attn_implementation="eager", + trust_remote_code=True, + ) + else: + pt_model = AutoModel.from_pretrained( + model_name, + num_hidden_layers=n_layer, + attn_implementation="eager", + trust_remote_code=True, + ) # Original PyTorch model output pt_outputs = pt_model(**inputs) @@ -99,6 +110,20 @@ def check_embed_pytorch_vs_ort_vs_ai100( assert mad <= 10**-2, f"MAD is too high for onnx and Pytorch: {mad}" assert os.path.isfile(os.path.join(os.path.dirname(qeff_model.qpc_path), "qconfig.json")) + if compare_results is False: + return + + compile_params = {"enable_qnn": enable_qnn, "qnn_config": qnn_config, "pooling": pooling, "seq_len": seq_len} + assert dump_and_compare_results( + model_name, + compile_params, + "embedding_model_results.json", + qeff_ai100_embeddings, + pytorch_hf_tokens=pt_embeddings, + pytorch_kv_tokens=qeff_pt_embeddings, + ort_tokens=onnx_outputs[0], + ) + @pytest.mark.on_qaic @pytest.mark.llm_model @@ -130,6 +155,19 @@ def test_embed_model_pytorch_vs_onnx_vs_ai100_multiple_seq_len(model): check_embed_pytorch_vs_ort_vs_ai100(model_name=model["model_name"], seq_len=[32, 20], n_layer=1) +@pytest.mark.full_model +@pytest.mark.on_qaic +@pytest.mark.llm_model +@pytest.mark.parametrize("model", embed_test_models) +def test_full_embed_model_pytorch_vs_onnx_vs_ai100_pooling(model): + """ + Test function to validate output of the Pytorch, ONNX and AI 100 runtime model output with multiple seq_len. + """ + check_embed_pytorch_vs_ort_vs_ai100( + model_name=model["model_name"], seq_len=32, pooling=model["pooling"], compare_results=True + ) + + ########## QNN TESTS ############## diff --git a/tests/transformers/models/test_seq_classification.py b/tests/transformers/models/test_seq_classification.py index d1c9cd84e..d2f3ec5ea 100644 --- a/tests/transformers/models/test_seq_classification.py +++ b/tests/transformers/models/test_seq_classification.py @@ -6,7 +6,7 @@ # ----------------------------------------------------------------------------- import os -from typing import List, Union +from typing import List, Optional, Union import numpy as np import pytest @@ -15,12 +15,16 @@ from QEfficient.transformers.models.modeling_auto import QEFFAutoModelForSequenceClassification +from .check_model_results import dump_and_compare_results + seq_classification_test_models = [ "meta-llama/Llama-Prompt-Guard-2-22M", ] -def check_seq_classification_pytorch_vs_ai100(model_name: str, seq_len: Union[int, List[int]] = 32, n_layer: int = 1): +def check_seq_classification_pytorch_vs_ai100( + model_name: str, seq_len: Union[int, List[int]] = 32, n_layer: int = -1, compare_results: Optional[bool] = False +): """ Validate the PyTorch model and the Cloud AI 100 model for sequence classification. @@ -40,12 +44,20 @@ def check_seq_classification_pytorch_vs_ai100(model_name: str, seq_len: Union[in inputs = tokenizer(test_text, return_tensors="pt") # Run PyTorch model - pt_model = AutoModelForSequenceClassification.from_pretrained( - model_name, - num_hidden_layers=n_layer, - attn_implementation="eager", - trust_remote_code=True, - ) + pt_model = None + if n_layer == -1: + pt_model = AutoModelForSequenceClassification.from_pretrained( + model_name, + attn_implementation="eager", + trust_remote_code=True, + ) + else: + pt_model = AutoModelForSequenceClassification.from_pretrained( + model_name, + num_hidden_layers=n_layer, + attn_implementation="eager", + trust_remote_code=True, + ) pt_model.eval() with torch.no_grad(): @@ -56,7 +68,6 @@ def check_seq_classification_pytorch_vs_ai100(model_name: str, seq_len: Union[in # Create QEff model and compile qeff_model = QEFFAutoModelForSequenceClassification(pt_model) qpc_path = qeff_model.compile( - num_cores=16, seq_len=seq_len, batch_size=1, num_devices=1, @@ -83,8 +94,25 @@ def check_seq_classification_pytorch_vs_ai100(model_name: str, seq_len: Union[in # Print final result print(f"MAD (PyTorch vs AI100): {mad_pt_ai100:.2e}") + if compare_results is False: + return + + compile_params = { + "seq_len": seq_len, + "batch_size": 1, + "num_devices": 1, + "mxfp6_matmul": False, + } + assert dump_and_compare_results( + model_name, + compile_params, + "seq_classification_model_results.json", + ai100_logits.numpy(), + pytorch_hf_tokens=pt_logits.numpy(), + ) +@pytest.mark.custom_layers @pytest.mark.on_qaic @pytest.mark.parametrize("model_name", seq_classification_test_models) def test_seq_classification_pytorch_vs_ai100(model_name): @@ -120,3 +148,23 @@ def test_seq_classification_multiple_seq_len(model_name): seq_len=[32, 64, 128], n_layer=1, ) + + +@pytest.mark.full_model +@pytest.mark.on_qaic +@pytest.mark.parametrize("model_name", seq_classification_test_models) +def test_full_seq_classification_pytorch_vs_ai100(model_name): + """ + Test function to validate the sequence classification model with multiple sequence lengths. + + This test ensures that: + 1. Dynamic shape handling works correctly + 2. Model can handle variable input sizes + 3. Compilation with multiple specializations succeeds + 4. Outputs remain consistent across different sequence lengths + """ + check_seq_classification_pytorch_vs_ai100( + model_name=model_name, + seq_len=32, + compare_results=True, + ) diff --git a/tests/transformers/models/test_speech_seq2seq_models.py b/tests/transformers/models/test_speech_seq2seq_models.py index 5631c0d00..130a401a9 100644 --- a/tests/transformers/models/test_speech_seq2seq_models.py +++ b/tests/transformers/models/test_speech_seq2seq_models.py @@ -24,7 +24,8 @@ from QEfficient.utils import get_padding_shape_from_config, hf_download from QEfficient.utils._utils import create_json, load_hf_processor from QEfficient.utils.constants import Constants, QnnConstants -from QEfficient.utils.device_utils import get_available_device_id + +from .check_model_results import dump_and_compare_results CONFIG_PATH = "tests/configs/speech_seq2seq_model_configs.json" @@ -46,13 +47,22 @@ def load_seq2seq_model(model_config): repo_id=model_config["model_name"], ignore_patterns=["*.onnx", "*.ot", "*.md", "*.tflite", "*.pdf", "*.h5", "*.msgpack"], ) - model_hf = AutoModelForSpeechSeq2Seq.from_pretrained( - model_path, - use_cache=True, - # num_hidden_layers=model_config["n_layer"], - attn_implementation="eager", - low_cpu_mem_usage=False, - ) # Run models for single layers only + model_hf = None + if model_config["n_layer"] != -1: + model_hf = AutoModelForSpeechSeq2Seq.from_pretrained( + model_path, + use_cache=True, + num_hidden_layers=model_config["n_layer"], + attn_implementation="eager", + low_cpu_mem_usage=False, + ) + else: + model_hf = AutoModelForSpeechSeq2Seq.from_pretrained( + model_path, + use_cache=True, + attn_implementation="eager", + low_cpu_mem_usage=False, + ) params = sum(p.numel() for p in model_hf.parameters()) model_hf.eval() return model_hf, params @@ -290,9 +300,10 @@ def run_seq2seq_ort( def check_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100( model_name: str, ctx_len: int = Constants.CTX_LEN, - n_layer: int = 1, + n_layer: int = -1, enable_qnn: Optional[bool] = False, qnn_config: Optional[str] = None, + compare_results: Optional[bool] = False, ): """ Validate the PyTorch model, the PyTorch model after KV changes, ONNX model and the Cloud AI 100 model @@ -307,6 +318,7 @@ def check_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100( model_hf, _ = load_seq2seq_model(model_config) + print(model_hf) processor = load_hf_processor(pretrained_model_name_or_path=model_name) batch_size = 1 @@ -314,26 +326,19 @@ def check_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100( data = ds[0]["audio"]["array"] data = data.reshape(-1) sample_rate = ds[0]["audio"]["sampling_rate"] - pytorch_hf_tokens = run_seq2seq_pytorch_hf(model_hf, processor, data, sample_rate, ctx_len) qeff_model = QEFFAutoModelForSpeechSeq2Seq(model_hf, pretrained_model_name_or_path=model_name) pytorch_kv_tokens = run_seq2seq_pytorch_with_kv(qeff_model, processor, data, sample_rate, ctx_len) - assert (pytorch_hf_tokens == pytorch_kv_tokens).all(), ( "Tokens don't match for HF PyTorch model output and KV PyTorch model output" ) qeff_model.export() - ort_tokens = run_seq2seq_ort(qeff_model.onnx_path, qeff_model.model.config, processor, data, sample_rate, ctx_len) - assert (pytorch_kv_tokens == ort_tokens).all(), "Tokens don't match for pytorch output and ort output" - if not get_available_device_id(): - pytest.skip("No available devices to run model on Cloud AI 100") - qeff_model.compile( ctx_len=ctx_len, num_cores=16, @@ -341,7 +346,6 @@ def check_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100( enable_qnn=enable_qnn, qnn_config=qnn_config, ) - exec_info = qeff_model.generate( inputs=processor(data, sampling_rate=sample_rate, return_tensors="pt"), generation_len=ctx_len ) @@ -351,7 +355,23 @@ def check_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100( ) assert os.path.isfile(os.path.join(os.path.dirname(qeff_model.qpc_path), "qconfig.json")) + if compare_results is False: + return + + compile_params = {"enable_qnn": enable_qnn, "qnn_config": qnn_config, "seq_len": ctx_len, "n_layer": n_layer} + assert dump_and_compare_results( + model_name, + compile_params, + "speech_seq2seq_model_results.json", + cloud_ai_100_tokens, + exec_info=exec_info, + pytorch_hf_tokens=pytorch_hf_tokens, + pytorch_kv_tokens=pytorch_kv_tokens, + ort_tokens=ort_tokens, + ) + +@pytest.mark.custom_layers @pytest.mark.on_qaic @pytest.mark.llm_model @pytest.mark.parametrize("model_name", test_models) @@ -364,6 +384,17 @@ def test_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100(model_name): check_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100(model_name=model_name, n_layer=4) +@pytest.mark.full_model +@pytest.mark.on_qaic +@pytest.mark.llm_model +@pytest.mark.parametrize("model_name", test_models) +def test_full_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100(model_name): + check_seq2seq_pytorch_vs_kv_vs_ort_vs_ai100( + model_name=model_name, + compare_results=True, + ) + + @pytest.mark.on_qaic @pytest.mark.llm_model @pytest.mark.qnn From 5bcd866c176437ac061654526878c4ce4eb5f2db Mon Sep 17 00:00:00 2001 From: Abukhoyer Shaik Date: Mon, 16 Mar 2026 11:22:33 +0000 Subject: [PATCH 11/11] enabling full model tests for nightly and comapring results Signed-off-by: Abukhoyer Shaik --- tests/transformers/__init__.py | 6 ++++++ tests/transformers/models/__init__.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/tests/transformers/__init__.py b/tests/transformers/__init__.py index e69de29bb..d647b73a6 100644 --- a/tests/transformers/__init__.py +++ b/tests/transformers/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- diff --git a/tests/transformers/models/__init__.py b/tests/transformers/models/__init__.py index e69de29bb..d647b73a6 100644 --- a/tests/transformers/models/__init__.py +++ b/tests/transformers/models/__init__.py @@ -0,0 +1,6 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# -----------------------------------------------------------------------------