Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 53 additions & 0 deletions .github/actions/aws_s3_helper/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
name: Build Workspace
description: |
Builds kernel and video-driver using a Docker image.

inputs:
docker_image:
description: Docker image to use
required: true
workspace_path:
description: Path to workspace directory
required: true

runs:
using: "composite"
steps:
- name: Build kernel
shell: bash
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}
WORKSPACE_PATH: ${{ inputs.workspace_path }}
run: |
set -euo pipefail

docker run --rm \
-v "$WORKSPACE_PATH:$WORKSPACE_PATH" \
-w "$WORKSPACE_PATH/kernel" \
--user "$(id -u):$(id -g)" \
"$DOCKER_IMAGE" \
bash -c '
make O=../kobj ARCH=arm64 defconfig &&
make O=../kobj -j$(nproc) &&
make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1
'

- name: Build video-driver
shell: bash
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}
WORKSPACE_PATH: ${{ inputs.workspace_path }}
run: |
set -euo pipefail

docker run --rm \
-v "$WORKSPACE_PATH:$WORKSPACE_PATH" \
-w "$WORKSPACE_PATH/video-driver" \
--user "$(id -u):$(id -g)" \
-e WORKSPACE_PATH="$WORKSPACE_PATH" \
"$DOCKER_IMAGE" \
bash -c '
make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \
-C "$WORKSPACE_PATH/kobj" \
M="$(pwd)" VIDEO_KERNEL_ROOT="$(pwd)" modules
'
42 changes: 42 additions & 0 deletions .github/actions/build/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: Build Workspace
description: |
Builds kernel and video-driver using a Docker image.

inputs:
docker_image:
description: Docker image to use
required: true
workspace_path:
description: Path to workspace directory
required: true

runs:
using: "composite"
steps:
- name: Build kernel
shell: bash
run: |
docker run --rm \
-v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \
-w "${{ inputs.workspace_path }}/kernel" \
--user $(id -u):$(id -g) \
${{ inputs.docker_image }} \
bash -c "
make O=../kobj ARCH=arm64 defconfig &&
make O=../kobj -j\$(nproc) &&
make O=../kobj -j\$(nproc) dir-pkg INSTALL_MOD_STRIP=1
"

- name: Build video-driver
shell: bash
run: |
docker run --rm \
-v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \
-w "${{ inputs.workspace_path }}/video-driver" \
--user $(id -u):$(id -g) \
${{ inputs.docker_image }} \
bash -c "
make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \
-C ${{ inputs.workspace_path }}/kobj \
M=\$(pwd) VIDEO_KERNEL_ROOT=\$(pwd) modules
"
229 changes: 229 additions & 0 deletions .github/actions/lava_job_render/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,229 @@
name: LAVA Job Render
inputs:
docker_image:
description: Docker image
required: true
default: kmake-image:ver.1.0

runs:
using: "composite"
steps:
- name: Process presigned_urls.json
id: process_urls
uses: actions/github-script@v7
env:
MACHINE: ${{ env.MACHINE }}
with:
script: |
const fs = require('fs');
const p = require('path');

const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json');
if (!fs.existsSync(filePath)) {
core.setFailed(`File not found: ${filePath}`);
return;
}

// Read JSON mapping of uploaded file paths -> presigned URLs
const data = JSON.parse(fs.readFileSync(filePath, 'utf-8'));

function findUrlByFilename(filename) {
for (const [path, url] of Object.entries(data)) {
if (path.endsWith(filename)) return url;
}
return null;
}

const modulesTarUrl = findUrlByFilename('modules.tar.xz');
const imageUrl = findUrlByFilename('Image');
const mergedRamdiskUrl = findUrlByFilename('video-merged.cpio.gz');
const vmlinuxUrl = findUrlByFilename('vmlinux');

// DTB is expected to be "<MACHINE>.dtb"
const dtbFilename = `${process.env.MACHINE}.dtb`;
const dtbUrl = findUrlByFilename(dtbFilename);

core.setOutput('modules_url', modulesTarUrl || '');
core.setOutput('image_url', imageUrl || '');
core.setOutput('vmlinux_url', vmlinuxUrl || '');
core.setOutput('dtb_url', dtbUrl || '');
core.setOutput('merged_ramdisk_url', mergedRamdiskUrl || '');

console.log(`Modules URL: ${modulesTarUrl}`);
console.log(`Image URL: ${imageUrl}`);
console.log(`Vmlinux URL: ${vmlinuxUrl}`);
console.log(`Dtb URL: ${dtbUrl}`);
console.log(`Merged Ramdisk URL: ${mergedRamdiskUrl}`);

- name: Create metadata.json
id: create_metadata
shell: bash
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}
DTB_URL: ${{ steps.process_urls.outputs.dtb_url }}
MACHINE: ${{ env.MACHINE }}
run: |
set -euo pipefail
echo "Creating metadata.json from job_render templates"
cd ../job_render

docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e DTB_URL="$DTB_URL" \
-e MACHINE="$MACHINE" \
"$DOCKER_IMAGE" \
jq '.artifacts["dtbs/qcom/\(env.MACHINE).dtb"] = env.DTB_URL' data/metadata.json > temp.json

mv temp.json data/metadata.json

- name: Upload metadata.json
id: upload_metadata
uses: qualcomm-linux/video-driver/.github/actions/aws_s3_helper@video.qclinux.main
with:
local_file: ../job_render/data/metadata.json
s3_bucket: qli-prd-video-gh-artifacts
mode: single-upload
upload_location: ${{ env.UPLOAD_LOCATION }}

- name: Create template json cloudData.json
shell: bash
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}
METADATA_URL: ${{ steps.upload_metadata.outputs.presigned_url }}
IMAGE_URL: ${{ steps.process_urls.outputs.image_url }}
VMLINUX_URL: ${{ steps.process_urls.outputs.vmlinux_url }}
MODULES_URL: ${{ steps.process_urls.outputs.modules_url }}
MERGED_RAMDISK_URL: ${{ steps.process_urls.outputs.merged_ramdisk_url }}
run: |
set -euo pipefail
echo "Populating cloudData.json with kernel, vmlinux, modules, metadata, ramdisk"
cd ../job_render

# metadata
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e METADATA_URL="$METADATA_URL" \
"$DOCKER_IMAGE" \
jq '.artifacts.metadata = env.METADATA_URL' data/cloudData.json > temp.json
mv temp.json data/cloudData.json

# kernel Image
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e IMAGE_URL="$IMAGE_URL" \
"$DOCKER_IMAGE" \
jq '.artifacts.kernel = env.IMAGE_URL' data/cloudData.json > temp.json
mv temp.json data/cloudData.json

# vmlinux (set only if present)
if [ -n "$VMLINUX_URL" ]; then
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e VMLINUX_URL="$VMLINUX_URL" \
"$DOCKER_IMAGE" \
jq '.artifacts.vmlinux = env.VMLINUX_URL' data/cloudData.json > temp.json
mv temp.json data/cloudData.json
fi

# modules
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e MODULES_URL="$MODULES_URL" \
"$DOCKER_IMAGE" \
jq '.artifacts.modules = env.MODULES_URL' data/cloudData.json > temp.json
mv temp.json data/cloudData.json

# ramdisk: use merged only here (fallback added in next step if missing)
if [ -n "$MERGED_RAMDISK_URL" ]; then
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e MERGED_RAMDISK_URL="$MERGED_RAMDISK_URL" \
"$DOCKER_IMAGE" \
jq '.artifacts.ramdisk = env.MERGED_RAMDISK_URL' data/cloudData.json > temp.json
mv temp.json data/cloudData.json
fi

- name: Update firmware and ramdisk
shell: bash
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}
MERGED_RAMDISK_URL: ${{ steps.process_urls.outputs.merged_ramdisk_url }}
FIRMWARE: ${{ env.FIRMWARE }}
run: |
set -euo pipefail
cd ../job_render

# Fallback to stable kerneltest ramdisk only if merged ramdisk is not available
if [ -z "$MERGED_RAMDISK_URL" ]; then
echo "Merged ramdisk not found. Using stable kerneltest ramdisk fallback."
ramdisk_url="$(aws s3 presign s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)"
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e RAMDISK_URL="$ramdisk_url" \
"$DOCKER_IMAGE" \
jq '.artifacts.ramdisk = env.RAMDISK_URL' data/cloudData.json > temp.json
mv temp.json data/cloudData.json
else
echo "Ramdisk set from merged source; skipping kerneltest fallback."
fi

# Optional board-specific firmware initramfs
if [ -n "$FIRMWARE" ]; then
case "$FIRMWARE" in
sm8750-mtp)
FW_FILE="initramfs-firmware-dragonboard410c-image-sm8750-mtp.cpio.gz"
;;
*)
FW_FILE="initramfs-firmware-${FIRMWARE}-image-qcom-armv8a.cpio.gz"
;;
esac

echo "Using firmware file: $FW_FILE"

firmware_url="$(aws s3 presign "s3://qli-prd-video-gh-artifacts/qualcomm-linux/video-driver/artifacts/initramfs/${FW_FILE}" --expires 7600)"

docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e FIRMWARE_URL="$firmware_url" \
"$DOCKER_IMAGE" \
jq '.artifacts.firmware = env.FIRMWARE_URL' data/cloudData.json > temp.json
mv temp.json data/cloudData.json
else
echo "No FIRMWARE provided; skipping firmware artifact update."
fi

- name: Create lava_job_definition
shell: bash
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}
TARGET: ${{ env.LAVA_NAME }}
TARGET_DTB: ${{ env.MACHINE }}
run: |
set -euo pipefail
cd ../job_render
mkdir -p renders

docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir "$PWD" \
-v "$(dirname "$PWD"):$(dirname "$PWD")" \
-e TARGET="$TARGET" \
-e TARGET_DTB="$TARGET_DTB" \
"$DOCKER_IMAGE" \
sh -c 'export BOOT_METHOD=fastboot && python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --video_pre-merge'
68 changes: 68 additions & 0 deletions .github/actions/loading/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
---
name: Load Parameters
description: Load parameters for the build job

outputs:
build_matrix:
description: Build matrix
value: ${{ steps.set-matrix.outputs.build_matrix }}
full_matrix:
description: Full matrix containing lava details
value: ${{ steps.set-matrix.outputs.full_matrix }}

runs:
using: "composite"
steps:
- name: Set Build Matrix
id: set-matrix
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');

// 1. Define possible paths for MACHINES.json
// Path A: Workspace/video-driver/ci/MACHINES.json (Nested)
const pathNested = path.join(process.env.GITHUB_WORKSPACE, 'video-driver', 'ci', 'MACHINES.json');
// Path B: Workspace/ci/MACHINES.json (Root)
const pathRoot = path.join(process.env.GITHUB_WORKSPACE, 'ci', 'MACHINES.json');

let targetsPath = '';

// 2. Check which path exists
if (fs.existsSync(pathNested)) {
console.log(`Found config at nested path: ${pathNested}`);
targetsPath = pathNested;
} else if (fs.existsSync(pathRoot)) {
console.log(`Found config at root path: ${pathRoot}`);
targetsPath = pathRoot;
} else {
// 3. Debugging: If neither exists, list files to help us see what is happening
console.log('!!! Error: MACHINES.json not found in expected locations.');
console.log(`Checked: ${pathNested}`);
console.log(`Checked: ${pathRoot}`);

console.log('--- Workspace Root Contents ---');
try {
console.log(fs.readdirSync(process.env.GITHUB_WORKSPACE));
} catch (e) { console.log(e.message); }

core.setFailed(`MACHINES.json not found.`);
return;
}

// 4. Parse the file
let targets;
try {
targets = JSON.parse(fs.readFileSync(targetsPath, 'utf-8'));
} catch (err) {
core.setFailed(`Failed to parse MACHINES.json: ${err.message}`);
return;
}

// 5. Generate Outputs
const build_matrix = Object.values(targets).map(({ machine, firmware }) => ({ machine, firmware }));
core.setOutput('build_matrix', JSON.stringify(build_matrix));

const full_matrix = Object.values(targets).map(({ machine, firmware, lavaname }) => ({ machine, firmware, lavaname }));
core.setOutput('full_matrix', JSON.stringify(full_matrix));
Loading
Loading