diff --git a/.github/actions/aws_s3_helper/action.yml b/.github/actions/aws_s3_helper/action.yml new file mode 100644 index 000000000..c0a3be7d7 --- /dev/null +++ b/.github/actions/aws_s3_helper/action.yml @@ -0,0 +1,53 @@ +name: Build Workspace +description: | + Builds kernel and video-driver using a Docker image. + +inputs: + docker_image: + description: Docker image to use + required: true + workspace_path: + description: Path to workspace directory + required: true + +runs: + using: "composite" + steps: + - name: Build kernel + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + WORKSPACE_PATH: ${{ inputs.workspace_path }} + run: | + set -euo pipefail + + docker run --rm \ + -v "$WORKSPACE_PATH:$WORKSPACE_PATH" \ + -w "$WORKSPACE_PATH/kernel" \ + --user "$(id -u):$(id -g)" \ + "$DOCKER_IMAGE" \ + bash -c ' + make O=../kobj ARCH=arm64 defconfig && + make O=../kobj -j$(nproc) && + make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1 + ' + + - name: Build video-driver + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + WORKSPACE_PATH: ${{ inputs.workspace_path }} + run: | + set -euo pipefail + + docker run --rm \ + -v "$WORKSPACE_PATH:$WORKSPACE_PATH" \ + -w "$WORKSPACE_PATH/video-driver" \ + --user "$(id -u):$(id -g)" \ + -e WORKSPACE_PATH="$WORKSPACE_PATH" \ + "$DOCKER_IMAGE" \ + bash -c ' + make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \ + -C "$WORKSPACE_PATH/kobj" \ + M="$(pwd)" VIDEO_KERNEL_ROOT="$(pwd)" modules + ' \ No newline at end of file diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml new file mode 100644 index 000000000..0b56fc84a --- /dev/null +++ b/.github/actions/build/action.yml @@ -0,0 +1,42 @@ +name: Build Workspace +description: | + Builds kernel and video-driver using a Docker image. + +inputs: + docker_image: + description: Docker image to use + required: true + workspace_path: + description: Path to workspace directory + required: true + +runs: + using: "composite" + steps: + - name: Build kernel + shell: bash + run: | + docker run --rm \ + -v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \ + -w "${{ inputs.workspace_path }}/kernel" \ + --user $(id -u):$(id -g) \ + ${{ inputs.docker_image }} \ + bash -c " + make O=../kobj ARCH=arm64 defconfig && + make O=../kobj -j\$(nproc) && + make O=../kobj -j\$(nproc) dir-pkg INSTALL_MOD_STRIP=1 + " + + - name: Build video-driver + shell: bash + run: | + docker run --rm \ + -v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \ + -w "${{ inputs.workspace_path }}/video-driver" \ + --user $(id -u):$(id -g) \ + ${{ inputs.docker_image }} \ + bash -c " + make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \ + -C ${{ inputs.workspace_path }}/kobj \ + M=\$(pwd) VIDEO_KERNEL_ROOT=\$(pwd) modules + " \ No newline at end of file diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml new file mode 100644 index 000000000..6803abb7a --- /dev/null +++ b/.github/actions/lava_job_render/action.yml @@ -0,0 +1,229 @@ +name: LAVA Job Render +inputs: + docker_image: + description: Docker image + required: true + default: kmake-image:ver.1.0 + +runs: + using: "composite" + steps: + - name: Process presigned_urls.json + id: process_urls + uses: actions/github-script@v7 + env: + MACHINE: ${{ env.MACHINE }} + with: + script: | + const fs = require('fs'); + const p = require('path'); + + const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json'); + if (!fs.existsSync(filePath)) { + core.setFailed(`File not found: ${filePath}`); + return; + } + + // Read JSON mapping of uploaded file paths -> presigned URLs + const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); + + function findUrlByFilename(filename) { + for (const [path, url] of Object.entries(data)) { + if (path.endsWith(filename)) return url; + } + return null; + } + + const modulesTarUrl = findUrlByFilename('modules.tar.xz'); + const imageUrl = findUrlByFilename('Image'); + const mergedRamdiskUrl = findUrlByFilename('video-merged.cpio.gz'); + const vmlinuxUrl = findUrlByFilename('vmlinux'); + + // DTB is expected to be ".dtb" + const dtbFilename = `${process.env.MACHINE}.dtb`; + const dtbUrl = findUrlByFilename(dtbFilename); + + core.setOutput('modules_url', modulesTarUrl || ''); + core.setOutput('image_url', imageUrl || ''); + core.setOutput('vmlinux_url', vmlinuxUrl || ''); + core.setOutput('dtb_url', dtbUrl || ''); + core.setOutput('merged_ramdisk_url', mergedRamdiskUrl || ''); + + console.log(`Modules URL: ${modulesTarUrl}`); + console.log(`Image URL: ${imageUrl}`); + console.log(`Vmlinux URL: ${vmlinuxUrl}`); + console.log(`Dtb URL: ${dtbUrl}`); + console.log(`Merged Ramdisk URL: ${mergedRamdiskUrl}`); + + - name: Create metadata.json + id: create_metadata + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + DTB_URL: ${{ steps.process_urls.outputs.dtb_url }} + MACHINE: ${{ env.MACHINE }} + run: | + set -euo pipefail + echo "Creating metadata.json from job_render templates" + cd ../job_render + + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e DTB_URL="$DTB_URL" \ + -e MACHINE="$MACHINE" \ + "$DOCKER_IMAGE" \ + jq '.artifacts["dtbs/qcom/\(env.MACHINE).dtb"] = env.DTB_URL' data/metadata.json > temp.json + + mv temp.json data/metadata.json + + - name: Upload metadata.json + id: upload_metadata + uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.qclinux.main + with: + local_file: ../job_render/data/metadata.json + s3_bucket: qli-prd-video-gh-artifacts + mode: single-upload + upload_location: ${{ env.UPLOAD_LOCATION }} + + - name: Create template json cloudData.json + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + METADATA_URL: ${{ steps.upload_metadata.outputs.presigned_url }} + IMAGE_URL: ${{ steps.process_urls.outputs.image_url }} + VMLINUX_URL: ${{ steps.process_urls.outputs.vmlinux_url }} + MODULES_URL: ${{ steps.process_urls.outputs.modules_url }} + MERGED_RAMDISK_URL: ${{ steps.process_urls.outputs.merged_ramdisk_url }} + run: | + set -euo pipefail + echo "Populating cloudData.json with kernel, vmlinux, modules, metadata, ramdisk" + cd ../job_render + + # metadata + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e METADATA_URL="$METADATA_URL" \ + "$DOCKER_IMAGE" \ + jq '.artifacts.metadata = env.METADATA_URL' data/cloudData.json > temp.json + mv temp.json data/cloudData.json + + # kernel Image + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e IMAGE_URL="$IMAGE_URL" \ + "$DOCKER_IMAGE" \ + jq '.artifacts.kernel = env.IMAGE_URL' data/cloudData.json > temp.json + mv temp.json data/cloudData.json + + # vmlinux (set only if present) + if [ -n "$VMLINUX_URL" ]; then + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e VMLINUX_URL="$VMLINUX_URL" \ + "$DOCKER_IMAGE" \ + jq '.artifacts.vmlinux = env.VMLINUX_URL' data/cloudData.json > temp.json + mv temp.json data/cloudData.json + fi + + # modules + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e MODULES_URL="$MODULES_URL" \ + "$DOCKER_IMAGE" \ + jq '.artifacts.modules = env.MODULES_URL' data/cloudData.json > temp.json + mv temp.json data/cloudData.json + + # ramdisk: use merged only here (fallback added in next step if missing) + if [ -n "$MERGED_RAMDISK_URL" ]; then + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e MERGED_RAMDISK_URL="$MERGED_RAMDISK_URL" \ + "$DOCKER_IMAGE" \ + jq '.artifacts.ramdisk = env.MERGED_RAMDISK_URL' data/cloudData.json > temp.json + mv temp.json data/cloudData.json + fi + + - name: Update firmware and ramdisk + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + MERGED_RAMDISK_URL: ${{ steps.process_urls.outputs.merged_ramdisk_url }} + FIRMWARE: ${{ env.FIRMWARE }} + run: | + set -euo pipefail + cd ../job_render + + # Fallback to stable kerneltest ramdisk only if merged ramdisk is not available + if [ -z "$MERGED_RAMDISK_URL" ]; then + echo "Merged ramdisk not found. Using stable kerneltest ramdisk fallback." + ramdisk_url="$(aws s3 presign s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e RAMDISK_URL="$ramdisk_url" \ + "$DOCKER_IMAGE" \ + jq '.artifacts.ramdisk = env.RAMDISK_URL' data/cloudData.json > temp.json + mv temp.json data/cloudData.json + else + echo "Ramdisk set from merged source; skipping kerneltest fallback." + fi + + # Optional board-specific firmware initramfs + if [ -n "$FIRMWARE" ]; then + case "$FIRMWARE" in + sm8750-mtp) + FW_FILE="initramfs-firmware-dragonboard410c-image-sm8750-mtp.cpio.gz" + ;; + *) + FW_FILE="initramfs-firmware-${FIRMWARE}-image-qcom-armv8a.cpio.gz" + ;; + esac + + echo "Using firmware file: $FW_FILE" + + firmware_url="$(aws s3 presign "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/${FW_FILE}" --expires 7600)" + + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e FIRMWARE_URL="$firmware_url" \ + "$DOCKER_IMAGE" \ + jq '.artifacts.firmware = env.FIRMWARE_URL' data/cloudData.json > temp.json + mv temp.json data/cloudData.json + else + echo "No FIRMWARE provided; skipping firmware artifact update." + fi + + - name: Create lava_job_definition + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + TARGET: ${{ env.LAVA_NAME }} + TARGET_DTB: ${{ env.MACHINE }} + run: | + set -euo pipefail + cd ../job_render + mkdir -p renders + + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir "$PWD" \ + -v "$(dirname "$PWD"):$(dirname "$PWD")" \ + -e TARGET="$TARGET" \ + -e TARGET_DTB="$TARGET_DTB" \ + "$DOCKER_IMAGE" \ + sh -c 'export BOOT_METHOD=fastboot && python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --video_pre-merge' \ No newline at end of file diff --git a/.github/actions/loading/action.yml b/.github/actions/loading/action.yml new file mode 100644 index 000000000..cd5c7e083 --- /dev/null +++ b/.github/actions/loading/action.yml @@ -0,0 +1,68 @@ +--- +name: Load Parameters +description: Load parameters for the build job + +outputs: + build_matrix: + description: Build matrix + value: ${{ steps.set-matrix.outputs.build_matrix }} + full_matrix: + description: Full matrix containing lava details + value: ${{ steps.set-matrix.outputs.full_matrix }} + +runs: + using: "composite" + steps: + - name: Set Build Matrix + id: set-matrix + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const path = require('path'); + + // 1. Define possible paths for MACHINES.json + // Path A: Workspace/video-driver/ci/MACHINES.json (Nested) + const pathNested = path.join(process.env.GITHUB_WORKSPACE, 'video-driver', 'ci', 'MACHINES.json'); + // Path B: Workspace/ci/MACHINES.json (Root) + const pathRoot = path.join(process.env.GITHUB_WORKSPACE, 'ci', 'MACHINES.json'); + + let targetsPath = ''; + + // 2. Check which path exists + if (fs.existsSync(pathNested)) { + console.log(`Found config at nested path: ${pathNested}`); + targetsPath = pathNested; + } else if (fs.existsSync(pathRoot)) { + console.log(`Found config at root path: ${pathRoot}`); + targetsPath = pathRoot; + } else { + // 3. Debugging: If neither exists, list files to help us see what is happening + console.log('!!! Error: MACHINES.json not found in expected locations.'); + console.log(`Checked: ${pathNested}`); + console.log(`Checked: ${pathRoot}`); + + console.log('--- Workspace Root Contents ---'); + try { + console.log(fs.readdirSync(process.env.GITHUB_WORKSPACE)); + } catch (e) { console.log(e.message); } + + core.setFailed(`MACHINES.json not found.`); + return; + } + + // 4. Parse the file + let targets; + try { + targets = JSON.parse(fs.readFileSync(targetsPath, 'utf-8')); + } catch (err) { + core.setFailed(`Failed to parse MACHINES.json: ${err.message}`); + return; + } + + // 5. Generate Outputs + const build_matrix = Object.values(targets).map(({ machine, firmware }) => ({ machine, firmware })); + core.setOutput('build_matrix', JSON.stringify(build_matrix)); + + const full_matrix = Object.values(targets).map(({ machine, firmware, lavaname }) => ({ machine, firmware, lavaname })); + core.setOutput('full_matrix', JSON.stringify(full_matrix)); \ No newline at end of file diff --git a/.github/actions/sync/action.yml b/.github/actions/sync/action.yml new file mode 100644 index 000000000..770df835b --- /dev/null +++ b/.github/actions/sync/action.yml @@ -0,0 +1,66 @@ +--- +name: Sync Action +description: Checks out the correct code depending on event and repo context. + +inputs: + event_name: + description: Event type that triggered the workflow (e.g., pull_request_target, push, workflow_call) + required: true + pr_ref: + description: PR branch ref (e.g., feature/my-feature) + required: false + pr_repo: + description: PR repo full name (e.g., org/repo or user/repo) + required: false + base_ref: + description: Base branch ref (e.g., master) + required: false + caller_workflow: + description: Name of the workflow calling this actions + required: false + default: None + +runs: + using: 'composite' + steps: + - name: Checkout PR code (pull_request_target) + if: ${{ (inputs.event_name == 'pull_request_target' || inputs.event_name == 'workflow_call') && inputs.pr_ref != '' && inputs.pr_repo != '' }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ inputs.pr_ref }} + repository: ${{ inputs.pr_repo }} + path: video-driver + + - name: Checkout master (push) + if: ${{ (inputs.event_name == 'push' || inputs.event_name == 'workflow_call') && inputs.base_ref != '' }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ inputs.base_ref }} + path: video-driver + + - name: Checkout current repo and ref (Fallback) + if: ${{ (inputs.event_name == 'push' || inputs.event_name == 'workflow_call') && (inputs.base_ref == '' || inputs.pr_repo == '') }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ github.ref }} + path: video-driver + + - name: Checkout for workflow_dispatch + if: ${{ inputs.event_name == 'workflow_dispatch' }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ inputs.base_ref }} + path: video-driver + + - name: Clone kernel sources + if: ${{ inputs.caller_workflow == 'build' }} + uses: actions/checkout@v5 + with: + repository: qualcomm-linux/kernel + ref: qcom-next + path: kernel + fetch-depth: 0 \ No newline at end of file diff --git a/.github/workflows/loading.yml b/.github/workflows/loading.yml new file mode 100644 index 000000000..a0967aaec --- /dev/null +++ b/.github/workflows/loading.yml @@ -0,0 +1,52 @@ +--- +name: _loading +description: Load required parameters for the subsequent jobs + +on: + workflow_call: + inputs: + target_branch: + description: "Branch to checkout (optional)" + required: false + type: string + default: "" + outputs: + build_matrix: + description: Build matrix + value: ${{ jobs.loading.outputs.build_matrix }} + full_matrix: + description: Full Matrix containing lava description + value: ${{ jobs.loading.outputs.full_matrix }} + +jobs: + loading: + runs-on: ubuntu-latest + outputs: + build_matrix: ${{ steps.loading.outputs.build_matrix }} + full_matrix: ${{ steps.loading.outputs.full_matrix }} + steps: + # SCENARIO 1: Pull Request (Pre-Merge) + # Uses your custom sync action to merge PR code with base + - name: Sync codebase (PR) + if: github.event_name == 'pull_request' + uses: qualcomm-linux/video-driver/.github/actions/sync@video.qclinux.main + with: + event_name: ${{ github.event_name }} + pr_ref: ${{ github.event.pull_request.head.ref }} + pr_repo: ${{ github.event.pull_request.head.repo.full_name }} + base_ref: ${{ github.ref_name }} + + # SCENARIO 2: Schedule or Manual (Post-Merge) + # Uses standard checkout because there is no PR to sync + - name: Checkout Code (Schedule) + if: github.event_name != 'pull_request' + uses: actions/checkout@v4 + with: + # Use the input branch if provided, otherwise default to current ref + ref: ${{ inputs.target_branch || github.ref_name }} + # Check out into 'video-driver' folder so the script finds the nested path + path: video-driver + + - name: Load Parameters + id: loading + uses: qualcomm-linux/video-driver/.github/actions/loading@video.qclinux.main \ No newline at end of file diff --git a/.github/workflows/post_merge.yml b/.github/workflows/post_merge.yml new file mode 100644 index 000000000..7575f3076 --- /dev/null +++ b/.github/workflows/post_merge.yml @@ -0,0 +1,31 @@ +name: Post Merge Weekly +description: | + Runs post-merge CI for the video-driver repository on a weekly schedule. + Reuses loading, build and test workflows. + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +jobs: + loading: + uses: qualcomm-linux/video-driver/.github/workflows/loading.yml@video.qclinux.main + secrets: inherit + + build: + needs: loading + uses: qualcomm-linux/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.main + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + + lava-test: + needs: [loading, build] + uses: qualcomm-linux/video-driver/.github/workflows/test.yml@video.qclinux.main + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + full_matrix: ${{ needs.loading.outputs.full_matrix }} \ No newline at end of file diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml new file mode 100644 index 000000000..dcc0bb2a1 --- /dev/null +++ b/.github/workflows/pre_merge.yml @@ -0,0 +1,36 @@ +name: pre_merge +description: | + Orchestrates pre-merge CI for the video-driver repository a matrix + in the caller workflow. Builds and tests using reusable workflows. + +on: + push: + branches: + - video.qclinux.main + workflow_dispatch: + pull_request_target: + types: [opened, synchronize, reopened] + branches: + - video.qclinux.main + +jobs: + loading: + uses: qualcomm-linux/video-driver/.github/workflows/loading.yml@video.qclinux.main + secrets: inherit + + build: + needs: loading + uses: qualcomm-linux/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.main + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + + lava-test: + needs: [loading, build] + uses: qualcomm-linux/video-driver/.github/workflows/test.yml@video.qclinux.main + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + full_matrix: ${{ needs.loading.outputs.full_matrix }} \ No newline at end of file diff --git a/.github/workflows/qcom-preflight-checks.yml b/.github/workflows/qcom-preflight-checks.yml new file mode 100644 index 000000000..8ca84014c --- /dev/null +++ b/.github/workflows/qcom-preflight-checks.yml @@ -0,0 +1,24 @@ +name: QC Preflight Checks + +on: + pull_request: + push: + branches: [video.qclinux.main] + workflow_dispatch: + +jobs: + preflight: + name: Run QC Preflight Checks + uses: qualcomm/qcom-reusable-workflows/.github/workflows/reusable-qcom-preflight-checks-orchestrator.yml@v2 + with: + enable-semgrep-scan: true + enable-dependency-review: true + enable-repolinter-check: true + enable-copyright-license-check: true + enable-commit-email-check: true + enable-commit-msg-check: true + commit-msg-check-extra-options: '{"body-char-limit": 72, "sub-char-limit": 72, "check-blank-line": true}' + enable-armor-checkers: false + permissions: + contents: read + security-events: write diff --git a/.github/workflows/stale-issues.yaml b/.github/workflows/stale-issues.yaml new file mode 100644 index 000000000..f5c83c608 --- /dev/null +++ b/.github/workflows/stale-issues.yaml @@ -0,0 +1,24 @@ +name: Qualcomm Preflight Checks +on: + pull_request_target: + branches: [ video.qclinux.main ] + push: + branches: [ video.qclinux.main ] + workflow_dispatch: + +permissions: + contents: read + security-events: write + +jobs: + qcom-preflight-checks: + uses: qualcomm/qcom-reusable-workflows/.github/workflows/qcom-preflight-checks-reusable-workflow.yml@v1.1.4 + with: + # ✅ Preflight Checkers + repolinter: true # default: true + semgrep: true # default: true + copyright-license-detector: true # default: true + pr-check-emails: true # default: true + dependency-review: true # default: true + secrets: + SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/sync-and-build.yml b/.github/workflows/sync-and-build.yml new file mode 100644 index 000000000..1314fb57b --- /dev/null +++ b/.github/workflows/sync-and-build.yml @@ -0,0 +1,342 @@ +name: Sync and Build + +on: + workflow_dispatch: + workflow_call: + inputs: + docker_image: + description: Docker image to use for the build + required: false + type: string + default: kmake-image:ver.1.0 + build_matrix: + description: Build matrix for multi target builds + type: string + required: true + +permissions: + packages: read + +jobs: + sync-and-build: + runs-on: + group: GHA-video-Prd-SelfHosted-RG + labels: [self-hosted, video-prd-u2204-x64-large-od-ephem] + + steps: + - name: Pull Docker image + uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main + with: + image: ${{ inputs.docker_image }} + + - name: Checkout Video Driver + uses: actions/checkout@v4 + with: + path: video-driver + fetch-depth: 0 + + - name: Sync codebase + uses: qualcomm-linux/video-driver/.github/actions/sync@video.qclinux.main + with: + event_name: ${{ github.event_name }} + pr_ref: ${{ github.event.pull_request.head.ref }} + pr_repo: ${{ github.event.pull_request.head.repo.full_name }} + base_ref: ${{ github.ref_name }} + caller_workflow: build + + - name: Build workspace + id: build_workspace + uses: qualcomm-linux/video-driver/.github/actions/build@video.qclinux.main + with: + docker_image: kmake-image:ver.1.0 + workspace_path: ${{ github.workspace }} + + - name: Fix Workspace Ownership + if: always() + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + echo "🔧 Fixing file ownership (root -> runner user)..." + sudo chown -R "$(id -u):$(id -g)" "$WORKSPACE" + + - name: Download iris_test_app from the s3 + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + echo "$WORKSPACE" + mkdir -p "$WORKSPACE/v4l-video-test-app/build/" + echo "syncing files from s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/" + aws s3 cp \ + "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/iris_test_app/" \ + "$WORKSPACE/v4l-video-test-app/build/" \ + --recursive + echo "✅ Download complete" + ls "$WORKSPACE/v4l-video-test-app/build/" + + - name: Download firmware file from S3 + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + mkdir -p "$WORKSPACE/downloads" + echo "📥 Syncing files from S3 path: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/" + aws s3 cp \ + "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/vpu20_1v.mbn" \ + "$WORKSPACE/downloads/" + echo "✅ Download complete" + [ -f "$WORKSPACE/downloads/vpu20_1v.mbn" ] || { echo "❌ Missing vpu20_1v.mbn"; exit 1; } + + - name: Download the video-contents for testing + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + mkdir -p "$WORKSPACE/downloads" + echo "Downloading the video-content files" + wget -q \ + "https://github.com/qualcomm-linux/qcom-linux-testkit/releases/download/IRIS-Video-Files-v1.0/video_clips_iris.tar.gz" \ + -O "$WORKSPACE/downloads/video_clips_iris.tar.gz" + [ -f "$WORKSPACE/downloads/video_clips_iris.tar.gz" ] || { echo "❌ Failed to download video_clips_iris.tar.gz"; exit 1; } + + - name: Prepare /data/vendor/iris_test_app and list contents + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + data_dir="$WORKSPACE/kobj/tar-install/data/vendor/iris_test_app" + data_dir2="$WORKSPACE/kobj/tar-install/data/vendor/iris_test_app/firmware" + + mkdir -p "$data_dir" + mkdir -p "$data_dir2" + + firmware_version="$(ls "$WORKSPACE/kobj/tar-install/lib/modules/")" + mkdir -p "$WORKSPACE/kobj/tar-install/lib/modules/$firmware_version/updates" + + cp "$WORKSPACE/video-driver/video/iris_vpu.ko" \ + "$WORKSPACE/kobj/tar-install/lib/modules/$firmware_version/updates/" + cp "$WORKSPACE/v4l-video-test-app/build/iris_v4l2_test" "$data_dir/" + cp "$WORKSPACE/downloads/vpu20_1v.mbn" "$data_dir2/" + cp "$WORKSPACE/downloads/video_clips_iris.tar.gz" "$data_dir/" + + echo "📂 Contents of $data_dir:" + ls -lh "$data_dir" + + - name: Create compressed kernel ramdisk archives + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + cd "$WORKSPACE/kobj/tar-install" + find lib/modules data | cpio -o -H newc --owner=0:0 | gzip -9 > "$WORKSPACE/local-kernel-ramdisk.cpio.gz" + ls -lh "$WORKSPACE/local-kernel-ramdisk.cpio.gz" + + - name: Download meta-qcom stable initramfs artifacts from S3 + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + mkdir -p "$WORKSPACE/downloads" + echo "🔍 Fetching initramfs files from S3 bucket: s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/" + aws s3 cp \ + "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz" \ + "$WORKSPACE/downloads/" + echo "Initramfs files downloaded to: $WORKSPACE/downloads" + + - name: Decompress ramdisk files and rename .cpio.gz files + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + cd "$WORKSPACE/downloads" + echo "Decompressing and renaming .cpio.gz files..." + gunzip -c initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz > kerneltest.cpio + + - name: Merge and repackage initramfs + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + echo "🔧 Starting repackaging process" + + mkdir -p "$WORKSPACE/combineramdisk" + cp "$WORKSPACE/local-kernel-ramdisk.cpio.gz" "$WORKSPACE/combineramdisk/" + + cd "$WORKSPACE/combineramdisk" + + mv local-kernel-ramdisk.cpio.gz local-kernel-ramdisk.cpio.gz.bak + gunzip -c local-kernel-ramdisk.cpio.gz.bak > local-kernel-ramdisk.cpio + + cp "$WORKSPACE/downloads/kerneltest.cpio" . + + cat kerneltest.cpio local-kernel-ramdisk.cpio > video-merged.cpio + gzip -9 video-merged.cpio + + mkdir -p temp_merge + cd temp_merge + cpio -id --no-absolute-filenames < ../kerneltest.cpio + cpio -id --no-absolute-filenames < ../local-kernel-ramdisk.cpio + cd .. + + rm -f video-merged.cpio.gz + + cd temp_merge + find . | cpio -o -H newc --owner=0:0 > ../video-merged.cpio + cd .. + + gzip -9 video-merged.cpio + + rm -rf temp_merge kerneltest.cpio local-kernel-ramdisk.cpio + + echo "Final archive: $WORKSPACE/combineramdisk/video-merged.cpio.gz" + ls -lh "$WORKSPACE/combineramdisk/video-merged.cpio.gz" + + - name: Validate build_matrix and jq + shell: bash + env: + BUILD_MATRIX: ${{ inputs.build_matrix }} + run: | + set -euo pipefail + machines_json="$BUILD_MATRIX" + + if ! command -v jq >/dev/null 2>&1; then + echo "❌ jq is not installed on this runner. Please install jq." + exit 1 + fi + + echo "$machines_json" | jq -e . >/dev/null + [ "$(echo "$machines_json" | jq length)" -gt 0 ] || { echo "❌ build_matrix is empty"; exit 1; } + + echo "✅ build_matrix is valid JSON" + + - name: Append artifacts to S3 upload list + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + BUILD_MATRIX: ${{ inputs.build_matrix }} + run: | + set -euo pipefail + + workspace="$WORKSPACE" + file_list="$workspace/artifacts/file_list.txt" + mkdir -p "$workspace/artifacts" + + : > "$file_list" + + mod_root="$workspace/kobj/tar-install/lib/modules" + [ -d "$mod_root" ] || { echo "❌ Missing directory: $mod_root"; exit 1; } + + tar -C "$workspace/kobj/tar-install" \ + --exclude='lib/modules/*/build' \ + --exclude='lib/modules/*/source' \ + --numeric-owner --owner=0 --group=0 \ + -cJf "$workspace/modules.tar.xz" lib/modules + + if tar -Jtvf "$workspace/modules.tar.xz" | grep -q ' -> '; then + echo "❌ Symlinks found in modules archive (should be none)" + exit 1 + fi + + if tar -Jtf "$workspace/modules.tar.xz" | grep -Eq '^/|(^|/)\.\.(/|$)'; then + echo "❌ Unsafe paths found in modules archive" + exit 1 + fi + + echo "$workspace/modules.tar.xz" >> "$file_list" + echo "✅ Queued for upload: $workspace/modules.tar.xz" + + IMAGE_PATH="$workspace/kobj/arch/arm64/boot/Image" + VMLINUX_PATH="$workspace/kobj/vmlinux" + MERGED_PATH="$workspace/combineramdisk/video-merged.cpio.gz" + + [ -f "$IMAGE_PATH" ] || { echo "❌ Missing expected file: $IMAGE_PATH"; exit 1; } + [ -f "$VMLINUX_PATH" ] || { echo "❌ Missing expected file: $VMLINUX_PATH"; exit 1; } + [ -f "$MERGED_PATH" ] || { echo "❌ Missing merged cpio: $MERGED_PATH"; exit 1; } + + echo "$IMAGE_PATH" >> "$file_list" + echo "✅ Queued for upload: $IMAGE_PATH" + + echo "$VMLINUX_PATH" >> "$file_list" + echo "✅ Queued for upload: $VMLINUX_PATH" + + echo "$MERGED_PATH" >> "$file_list" + echo "✅ Queued for upload: $MERGED_PATH" + + while IFS= read -r machine; do + dtb="$workspace/kobj/arch/arm64/boot/dts/qcom/${machine}.dtb" + if [ -f "$dtb" ]; then + echo "$dtb" >> "$file_list" + echo "✅ Queued for upload: $dtb" + else + echo "❌ Missing DTB: $dtb" + exit 1 + fi + done < <(printf '%s' "$BUILD_MATRIX" | jq -r '.[].machine') + + echo "----- Files queued for S3 upload -----" + cat "$file_list" + + - name: Upload all artifacts to S3 + uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.qclinux.main + with: + s3_bucket: qli-prd-video-gh-artifacts + local_file: ${{ github.workspace }}/artifacts/file_list.txt + mode: multi-upload + upload_location: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.run_id }}-${{ github.run_attempt }} + + - name: Clean up + if: always() + shell: bash + env: + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + ws="$WORKSPACE" + rm -rf "$ws/artifacts" || true + rm -rf "$ws/combineramdisk" || true + rm -rf "$ws/downloads" || true + rm -rf "$ws/kobj" || true + rm -f "$ws/modules.tar.xz" || true + rm -f "$ws/local-kernel-ramdisk.cpio.gz" || true + + - name: Update summary + if: success() || failure() + shell: bash + env: + BUILD_STATUS: ${{ steps.build_workspace.outcome }} + WORKSPACE: ${{ github.workspace }} + run: | + set -euo pipefail + + status="$BUILD_STATUS" + if [ "$status" = "success" ]; then + summary=":heavy_check_mark: Build Success" + else + summary=":x: Build Failed" + fi + + ws="$WORKSPACE" + file_list="$ws/artifacts/file_list.txt" + + { + echo "
Build Summary" + echo "$summary" + if [ -f "$file_list" ]; then + echo "" + echo "Artifacts queued for upload:" + while IFS= read -r line; do + echo "- $line" + done < "$file_list" + fi + echo "
" + } >> "$GITHUB_STEP_SUMMARY" \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..8f61e9e4b --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,370 @@ +name: _test + +on: + workflow_call: + inputs: + docker_image: + description: Docker image + type: string + required: true + default: kmake-image:ver.1.0 + build_matrix: + description: Build matrix for multi target builds (stringified JSON) + type: string + required: true + full_matrix: + description: Full matrix containing lava description (stringified JSON) + type: string + required: true + +jobs: + test: + runs-on: + group: GHA-video-Prd-SelfHosted-RG + labels: [self-hosted, video-prd-u2204-x64-large-od-ephem] + + strategy: + fail-fast: false + matrix: + build_matrix: ${{ fromJson(inputs.build_matrix) }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Pull docker image + uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main + with: + image: ${{ inputs.docker_image }} + + - name: Download URLs list (presigned_urls.json) + uses: actions/download-artifact@v4 + with: + name: presigned_urls.json + merge-multiple: true + path: ${{ github.workspace }} + + - name: Clone lava job render scripts + shell: bash + run: | + set -euo pipefail + cd .. + git clone https://github.com/qualcomm-linux/job_render + + - name: Extract the LAVA machine name + id: get_lavaname + uses: actions/github-script@v7 + env: + FULL_MATRIX: ${{ inputs.full_matrix }} + CURRENT_MACHINE: ${{ matrix.build_matrix.machine }} + with: + script: | + const fullMatrix = JSON.parse(process.env.FULL_MATRIX); + const currentMachine = process.env.CURRENT_MACHINE; + const entry = fullMatrix.find(item => item.machine === currentMachine); + + if (!entry) { + core.setFailed(`No entry found in full matrix for machine: ${currentMachine}`); + return; + } + + const lavaname = entry.lavaname; + console.log(`Lavaname for ${currentMachine} is ${lavaname}`); + core.setOutput("LAVANAME", lavaname); + + - name: Create lava job definition + id: create_job_definition + uses: qualcomm-linux/video-driver/.github/actions/lava_job_render@video.qclinux.main + with: + docker_image: ${{ inputs.docker_image }} + env: + FIRMWARE: ${{ matrix.build_matrix.firmware }} + MACHINE: ${{ matrix.build_matrix.machine }} + LAVA_NAME: ${{ steps.get_lavaname.outputs.LAVANAME }} + + - name: Submit lava job + id: submit_job + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + LAVA_OSS_TOKEN: ${{ secrets.LAVA_OSS_TOKEN }} + LAVA_OSS_USER: ${{ secrets.LAVA_OSS_USER }} + run: | + set -euo pipefail + cd ../job_render + + job_id="$( + docker run -i --rm \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + "$DOCKER_IMAGE" \ + sh -c "lavacli identities add --token \"$LAVA_OSS_TOKEN\" --uri https://lava-oss.qualcomm.com/RPC2 --username \"$LAVA_OSS_USER\" production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml" + )" + + job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id" + + echo "job_id=$job_id" >> "$GITHUB_OUTPUT" + echo "job_url=$job_url" >> "$GITHUB_OUTPUT" + echo "Lava Job: $job_url" + echo "JOB_ID=$job_id" >> "$GITHUB_ENV" + + - name: Save Job ID for Reporting + if: always() + shell: bash + env: + MACHINE: ${{ matrix.build_matrix.machine }} + run: | + set -euo pipefail + printf '{"id":"%s","machine":"%s"}\n' "$JOB_ID" "$MACHINE" > "lava-job-$MACHINE.json" + + - name: Upload Job ID Artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: lava-job-data-${{ matrix.build_matrix.machine }} + path: lava-job-${{ matrix.build_matrix.machine }}.json + + - name: Check lava job results + id: check_job + shell: bash + env: + DOCKER_IMAGE: ${{ inputs.docker_image }} + LAVA_OSS_TOKEN: ${{ secrets.LAVA_OSS_TOKEN }} + LAVA_OSS_USER: ${{ secrets.LAVA_OSS_USER }} + run: | + set -euo pipefail + + STATE="" + HEALTH="" + START_TIME=$(date +%s) + TIMEOUT_SECONDS=$((2 * 60 * 60)) + + get_lava_field() { + local content="$1" + local field="$2" + printf '%s\n' "$content" \ + | sed -nE "s/^[[:space:]]*-?[[:space:]]*${field}[[:space:]]*:[[:space:]]*(.*)[[:space:]]*$/\1/pI" \ + | head -n 1 \ + | xargs + } + + echo "Waiting for Job $JOB_ID to finish..." + + while [ "$STATE" != "Finished" ]; do + JOB_INFO="$( + docker run -i --rm \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + "$DOCKER_IMAGE" \ + sh -c "lavacli identities add --token \"$LAVA_OSS_TOKEN\" --uri https://lava-oss.qualcomm.com/RPC2 --username \"$LAVA_OSS_USER\" production > /dev/null 2>&1 || true; lavacli -i production jobs show \"$JOB_ID\"" + )" + + STATE=$(get_lava_field "$JOB_INFO" "state") + HEALTH=$(get_lava_field "$JOB_INFO" "Health") + + echo "Current status: ${STATE:-WAITING_FOR_METADATA} (Health: ${HEALTH:-Unknown})" + + if [ "$STATE" = "Finished" ]; then + break + fi + + if [ $(( $(date +%s) - START_TIME )) -ge $TIMEOUT_SECONDS ]; then + echo "::error::Timeout reached (2 hours)." + exit 1 + fi + + sleep 30 + done + + if [ "${HEALTH:-}" != "Complete" ]; then + echo "::error::Job health is $HEALTH (not Complete). Check LAVA logs for infrastructure failure." + echo "summary=:warning: LAVA Job $HEALTH" >> "$GITHUB_OUTPUT" + exit 1 + fi + + echo "Fetching detailed results..." + docker run -i --rm \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + "$DOCKER_IMAGE" \ + sh -c "lavacli identities add --token \"$LAVA_OSS_TOKEN\" --uri https://lava-oss.qualcomm.com/RPC2 --username \"$LAVA_OSS_USER\" production > /dev/null 2>&1 || true; lavacli -i production results \"$JOB_ID\"" \ + > lava_results.txt + + RESULTS=$(grep -E '^\* 0_video_pre-merge-tests\.Video_V4L2_Runner \[(pass|fail)\]' lava_results.txt || true) + + if [ -z "$RESULTS" ]; then + echo "::error::Testcase 0_video_pre-merge-tests.Video_V4L2_Runner not found in results!" + echo "summary=:x: Video_V4L2_Runner result not found" >> "$GITHUB_OUTPUT" + exit 1 + fi + + echo "Found result lines:" + echo "$RESULTS" + + if echo "$RESULTS" | grep -q "\[fail\]"; then + echo "FAILURE: Video_V4L2_Runner detected a fail result." + echo "summary=:x: Video_V4L2_Runner Failed" >> "$GITHUB_OUTPUT" + exit 1 + else + echo "SUCCESS: Video_V4L2_Runner passed." + echo "summary=:heavy_check_mark: Video_V4L2_Runner Passed" >> "$GITHUB_OUTPUT" + exit 0 + fi + + generate-summary: + needs: test + if: always() + runs-on: ubuntu-latest + + steps: + - name: Download all Job IDs + uses: actions/download-artifact@v4 + with: + pattern: lava-job-data-* + merge-multiple: true + path: artifacts + + - name: Install dependencies + shell: bash + run: | + set -euo pipefail + sudo apt-get update + sudo apt-get install -y jq curl + + - name: Generate LAVA Test Job Summary + id: generate + shell: bash + env: + LAVA_URL: https://lava-oss.qualcomm.com + LAVA_TOKEN: ${{ secrets.LAVA_OSS_TOKEN }} + SUMMARY_FILE: step-summary.md + run: | + set -euo pipefail + + INPUT="$( + find artifacts -name 'lava-job-*.json' -print0 | while IFS= read -r -d '' TESTJOB; do + JOB_ID=$(jq -r '.id' "$TESTJOB") + AUTH_HEADER="Authorization: Token $LAVA_TOKEN" + + JOB_DETAILS=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/") + JOB_STATE=$(echo "$JOB_DETAILS" | jq -r '.state // empty') + JOB_DEVICE_TYPE=$(echo "$JOB_DETAILS" | jq -r '.requested_device_type // empty') + + if [ -z "$JOB_DEVICE_TYPE" ] || [ "$JOB_DEVICE_TYPE" = "null" ]; then + JOB_DEVICE_TYPE="unknown-$JOB_ID" + fi + + TEST_RESULTS="{}" + + if [ "$JOB_STATE" = "Finished" ]; then + JOB_SUITES=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/suites/") + + TEST_RESULTS="$( + echo "$JOB_SUITES" | jq -c '.results[]?' | while IFS= read -r SUITE; do + SUITE_NAME=$(echo "$SUITE" | jq -r '.name') + SUITE_ID=$(echo "$SUITE" | jq -r '.id') + + if [ "$SUITE_NAME" != "lava" ]; then + SUITE_TESTS=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/suites/$SUITE_ID/tests/") + + echo "$SUITE_TESTS" | jq -c '.results[]?' | while IFS= read -r TEST_ITEM; do + T_NAME=$(echo "$TEST_ITEM" | jq -r '.name') + T_RESULT=$(echo "$TEST_ITEM" | jq -r '.result') + TEST_URL="$LAVA_URL/results/$JOB_ID/$SUITE_ID" + + if [ -n "$T_NAME" ] && [ "$T_NAME" != "null" ]; then + jq -n -c \ + --arg key "$T_NAME" \ + --arg url "$TEST_URL" \ + --arg result "$T_RESULT" \ + '{key: $key, value: {url: $url, result: $result}}' + fi + done + else + TEST_NAME="boot" + TEST_URL="$LAVA_URL/results/$JOB_ID" + jq -n -c \ + --arg key "$TEST_NAME" \ + --arg url "$TEST_URL" \ + --arg result "pass" \ + '{key: $key, value: {url: $url, result: $result}}' + fi + done | jq -s -c 'from_entries' + )" + fi + + jq -n -c \ + --arg key "$JOB_DEVICE_TYPE" \ + --argjson value "$TEST_RESULTS" \ + '{key: $key, value: $value}' + done | jq -s -c 'reduce .[] as $i ({}; .[$i.key] = ((.[$i.key] // {}) + $i.value))' + )" + + DEVICES=$(echo "$INPUT" | jq -r 'keys[]' | sort) + RESULTS=$(echo "$INPUT" | jq -r '.[] | keys[]?' | sort -u) + + echo "### LAVA Test Summary" > "$SUMMARY_FILE" + echo "" >> "$SUMMARY_FILE" + + printf "| Test Case |" >> "$SUMMARY_FILE" + for D in $DEVICES; do + printf " %s |" "$D" >> "$SUMMARY_FILE" + done + echo "" >> "$SUMMARY_FILE" + + printf "| :--- |" >> "$SUMMARY_FILE" + for _ in $DEVICES; do + printf " :---: |" >> "$SUMMARY_FILE" + done + echo "" >> "$SUMMARY_FILE" + + for R in $RESULTS; do + printf "| **%s** |" "$R" >> "$SUMMARY_FILE" + + for D in $DEVICES; do + VALUE=$(echo "$INPUT" | jq -r --arg d "$D" --arg r "$R" '.[$d][$r].result // ""') + URL=$(echo "$INPUT" | jq -r --arg d "$D" --arg r "$R" '.[$d][$r].url // ""') + + ICON=":no_entry_sign:" + if [ "$VALUE" = "pass" ]; then ICON=":white_check_mark:"; fi + if [ "$VALUE" = "fail" ]; then ICON=":x:"; fi + if [ "$VALUE" = "skip" ]; then ICON=":warning:"; fi + + if [ -n "$URL" ] && [ "$URL" != "null" ]; then + printf " [%s](%s) |" "$ICON" "$URL" >> "$SUMMARY_FILE" + else + printf " %s |" "$ICON" >> "$SUMMARY_FILE" + fi + done + + echo "" >> "$SUMMARY_FILE" + done + + echo "" >> "$SUMMARY_FILE" + echo "#### Job Details" >> "$SUMMARY_FILE" + echo "| Job ID | Device | State | Health | Link |" >> "$SUMMARY_FILE" + echo "| :--- | :--- | :--- | :--- | :--- |" >> "$SUMMARY_FILE" + + find artifacts -name 'lava-job-*.json' -print0 | while IFS= read -r -d '' TESTJOB; do + JOB_ID=$(jq -r '.id' "$TESTJOB") + AUTH_HEADER="Authorization: Token $LAVA_TOKEN" + JOB_DETAILS=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/") + + HEALTH=$(echo "$JOB_DETAILS" | jq -r '.health') + STATE=$(echo "$JOB_DETAILS" | jq -r '.state') + DEVICE=$(echo "$JOB_DETAILS" | jq -r '.requested_device_type') + URL="$LAVA_URL/results/$JOB_ID" + + echo "| $JOB_ID | $DEVICE | $STATE | $HEALTH | [View]($URL) |" >> "$SUMMARY_FILE" + done + + cat "$SUMMARY_FILE" >> "$GITHUB_STEP_SUMMARY" + + - name: Upload Summary Artifact + uses: actions/upload-artifact@v4 + with: + name: lava-summary-report + path: step-summary.md \ No newline at end of file diff --git a/.github/workflows/uapi-check.yml b/.github/workflows/uapi-check.yml new file mode 100644 index 000000000..c9a2b6e57 --- /dev/null +++ b/.github/workflows/uapi-check.yml @@ -0,0 +1,30 @@ +name: UAPI and Driver Checks + +on: + pull_request: + # Optional: limit to main branch + branches: video.qclinux.main + +jobs: + uapi-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository with full history + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Determine base and head SHAs + id: shas + run: | + # PR base commit + echo "base_sha=${{ github.event.pull_request.base.sha }}" >> "$GITHUB_OUTPUT" + # PR head (current) commit + echo "head_sha=${{ github.sha }}" >> "$GITHUB_OUTPUT" + + - name: Run UAPI + driver checks + run: | + export BASE_SHA="${{ steps.shas.outputs.base_sha }}" + export HEAD_SHA="${{ steps.shas.outputs.head_sha }}" + ./ci/check-uapi-and-driver.sh \ No newline at end of file diff --git a/ci/MACHINES.json b/ci/MACHINES.json new file mode 100644 index 000000000..ff3253812 --- /dev/null +++ b/ci/MACHINES.json @@ -0,0 +1,42 @@ +{ + "qcs6490-rb3gen2": { + "machine": "qcs6490-rb3gen2", + "firmware": "rb3gen2", + "lavaname": "qcs6490", + "target": "qcs6490-rb3gen2", + "buildid": "QCM6490.LE.1.0-00376-STD.PROD-1", + "firmwareid": "rb3gen2" + }, + "qcs9100-ride-r3": { + "machine": "qcs9100-ride-r3", + "firmware": "sa8775p-ride", + "lavaname": "qcs9100-ride", + "target": "qcs9100-ride-r3", + "buildid": "QCS9100.LE.1.0-00243-STD.PROD-1", + "firmwareid": "sa8775p-ride" + }, + "qcs8300-ride": { + "machine": "qcs8300-ride", + "firmware": "qcs8300-ride", + "lavaname": "qcs8300-ride", + "target": "qcs8300-ride", + "buildid": "QCS8300.LE.1.0-00137-STD.PROD-1", + "firmwareid": "qcs8300-ride" + }, + "qcs615-ride": { + "machine": "qcs615-ride", + "firmware": "qcs615-ride", + "lavaname": "qcs615-ride", + "target": "qcs615-ride", + "buildid": "QCS615.LE.1.0-00016-STD.PROD-1", + "firmwareid": "qcs615-ride" + }, + "sm8750-mtp": { + "machine": "sm8750-mtp", + "firmware": "sm8750-mtp", + "lavaname": "sm8750-mtp", + "target": "sm8750-mtp", + "buildid": "YOUR_BUILD_ID", + "firmwareid": "sm8750-mtp" +} +} diff --git a/ci/check-uapi-and-driver.sh b/ci/check-uapi-and-driver.sh new file mode 100755 index 000000000..742d13331 --- /dev/null +++ b/ci/check-uapi-and-driver.sh @@ -0,0 +1,107 @@ +#!/bin/bash +set -euo pipefail + +# BASE_SHA = commit to compare against (PR base) +# HEAD_SHA = commit being tested (PR head) +BASE_SHA="${BASE_SHA:-}" +HEAD_SHA="${HEAD_SHA:-HEAD}" + +if [[ -z "$BASE_SHA" ]]; then + echo "ERROR: BASE_SHA is not set. Set BASE_SHA to the base commit to diff against." + exit 1 +fi + +echo "Running UAPI + driver checks between:" +echo " base = $BASE_SHA" +echo " head = $HEAD_SHA" +echo + +changed_files=$(git diff --name-only --diff-filter=AM "$BASE_SHA" "$HEAD_SHA") +if [[ -z "$changed_files" ]]; then + echo "No changed files; skipping checks." + exit 0 +fi + +exit_status=0 + +############################################################################### +# 1. UAPI header checks (v4l2_vidc_extensions.h) +############################################################################### + +uapi_header="include/uapi/vidc/media/v4l2_vidc_extensions.h" + +if echo "$changed_files" | grep -q "^$uapi_header$"; then + echo "UAPI header changed: $uapi_header" + echo "Checking for removed struct/enums/defines that may break ABI ..." + + pre_uapi=$(mktemp) + post_uapi=$(mktemp) + + git show "$BASE_SHA:$uapi_header" > "$pre_uapi" 2>/dev/null || true + git show "$HEAD_SHA:$uapi_header" > "$post_uapi" 2>/dev/null || true + + if [[ ! -s "$pre_uapi" ]]; then + echo "Note: UAPI header appears to be newly added; skipping ABI removal check." + else + removed_lines=$(diff -u "$pre_uapi" "$post_uapi" | grep '^-' | grep -v '^---' || true) + + if echo "$removed_lines" \ + | grep -E '^\-.*(struct|enum|#define|V4L2_.*|VIDC_.*)' >/dev/null; then + echo "ERROR: Potential ABI break in $uapi_header – definitions removed:" + echo "$removed_lines" + exit_status=1 + fi + fi + + rm -f "$pre_uapi" "$post_uapi" + echo +fi + +############################################################################### +# 2. Sysfs usage checks in modified C files +############################################################################### + +echo "Checking for sysfs usage in modified C files ..." + +while read -r f; do + [[ -z "$f" ]] && continue + [[ "$f" != *.c ]] && continue + + if grep -qE 'sysfs_create_file|device_create_file|sysfs_remove_file' "$f"; then + echo "ERROR: sysfs interface usage detected in modified file: $f" + echo " Policy: avoid adding/modifying sysfs in this driver." + exit_status=1 + fi +done <<< "$changed_files" + +echo + +############################################################################### +# 3. Optional: module_param checks +############################################################################### + +echo "Checking module_param definitions ..." + +if echo "$changed_files" | grep -q '\.c$'; then + tmp_diff=$(mktemp) + + git diff "$BASE_SHA" "$HEAD_SHA" -- '*.c' \ + | grep -E '^[\+\-].*module_param' > "$tmp_diff" 2>/dev/null || true + + if grep -E '^\-.*module_param' "$tmp_diff" >/dev/null; then + echo "ERROR: module_param removed/modified in diff:" + grep -E '^\-.*module_param' "$tmp_diff" + exit_status=1 + fi + + rm -f "$tmp_diff" +fi + +echo +if [[ "$exit_status" -ne 0 ]]; then + echo "UAPI/driver checks FAILED." +else + echo "UAPI/driver checks PASSED." +fi + +exit "$exit_status" diff --git a/video_kernel_headers.py b/video_kernel_headers.py index bfb5507a5..a41632b6d 100644 --- a/video_kernel_headers.py +++ b/video_kernel_headers.py @@ -12,6 +12,11 @@ # You should have received a copy of the GNU General Public License along with # this program. If not, see . +// SPDX-License-Identifier: GPL-2.0-only +/* +* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +*/ + import argparse import filecmp import os