diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..526c8a38 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.sh text eol=lf \ No newline at end of file diff --git a/.github/actions/run-unity-test-batch/action.yml b/.github/actions/run-unity-test-batch/action.yml new file mode 100644 index 00000000..56dc7838 --- /dev/null +++ b/.github/actions/run-unity-test-batch/action.yml @@ -0,0 +1,85 @@ +name: Run Unity UTP Test Batch +description: Runs a batch of Unity UTP tests in a given Unity project. +inputs: + unity-project-path: + description: Absolute path to the Unity project. + required: true + build-target: + description: Build target to use. + required: true + build-args: + description: Additional build args. + required: false + default: "" + artifact-name: + description: Artifact name for uploaded test artifacts (UTP logs, Unity Editor/Player logs, and test results XML; must be unique per matrix job). + required: false + default: unity-tests-batch-utp-logs + test-profile: + description: Predefined UTP scenario profile to run (normal|negative|all). + required: false + default: normal + tests-input: + description: Optional explicit comma-separated test list override. When provided, this takes precedence over test-profile. + required: false + default: "" +runs: + using: composite + steps: + - name: Prepare test list and install packages + shell: bash + working-directory: ${{ inputs.unity-project-path }} + run: | + set -euo pipefail + tests_input="${{ inputs.tests-input }}" + test_profile="${{ inputs.test-profile }}" + + if [ -z "$tests_input" ]; then + case "$test_profile" in + normal) + tests_input="CompilerWarnings,BuildWarnings" + ;; + negative) + tests_input="CompilerErrors,BuildErrors" + ;; + all) + tests_input="CompilerWarnings,CompilerErrors,BuildWarnings,BuildErrors,PlaymodeTestsErrors,EditmodeTestsErrors,EditmodeTestsPassing,EditmodeTestsSkipped,PlaymodeTestsPassing,PlaymodeTestsSkipped,EditmodeSuite,PlaymodeSuite" + ;; + *) + echo "::error::Unknown test-profile '$test_profile'. Expected one of: normal, negative, all." + exit 1 + ;; + esac + fi + + echo "Using UTP tests list: ${tests_input}" + echo "TESTS_INPUT=$tests_input" >> $GITHUB_ENV + + needs_test_framework=false + if [[ "$tests_input" == *"PlaymodeTests"* || "$tests_input" == *"EditmodeTests"* || "$tests_input" == *"EditmodeSuite"* || "$tests_input" == *"PlaymodeSuite"* ]]; then + needs_test_framework=true + fi + + npm install -g openupm-cli + openupm add com.utilities.buildpipeline + if [ "$needs_test_framework" = true ]; then + openupm add com.unity.test-framework + openupm add com.unity.test-framework.utp-reporter || true + fi + + - name: Run tests + shell: bash + env: + UNITY_PROJECT_PATH: ${{ inputs.unity-project-path }} + BUILD_TARGET: ${{ inputs.build-target }} + BUILD_ARGS: ${{ inputs.build-args }} + run: | + bash "${GITHUB_WORKSPACE}/.github/actions/scripts/run-utp-tests.sh" + + - name: Upload test artifacts + if: always() + uses: actions/upload-artifact@v7 + with: + name: ${{ inputs.artifact-name }} + path: utp-artifacts/ + if-no-files-found: ignore diff --git a/.github/actions/scripts/run-utp-tests.sh b/.github/actions/scripts/run-utp-tests.sh new file mode 100755 index 00000000..892cfdfd --- /dev/null +++ b/.github/actions/scripts/run-utp-tests.sh @@ -0,0 +1,394 @@ +#!/usr/bin/env bash +set -uo pipefail + +_UTP_HELPERS="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/utp-ci-assertion-helpers.sh" +# shellcheck source=utp-ci-assertion-helpers.sh +source "$_UTP_HELPERS" + +UNITY_PROJECT_PATH=${UNITY_PROJECT_PATH:?UNITY_PROJECT_PATH is required} +BUILD_TARGET=${BUILD_TARGET:?BUILD_TARGET is required} +BUILD_ARGS=${BUILD_ARGS:-} +TESTS_INPUT=${TESTS_INPUT:-} + +if printf '%s' "$BUILD_ARGS" | grep -qE '[;&`|]'; then + echo "::error::BUILD_ARGS contains disallowed shell metacharacters" + exit 1 +fi + +declare -a build_args=() +if [ -n "$BUILD_ARGS" ]; then + # Split on whitespace into an array without invoking the shell + read -r -a build_args <<< "$BUILD_ARGS" +fi + +IFS=',' read -ra tests <<< "$TESTS_INPUT" +failures=0 + +declare -A known_tests=( + [CompilerWarnings]=1 + [CompilerErrors]=1 + [BuildWarnings]=1 + [BuildErrors]=1 + [PlaymodeTestsErrors]=1 + [EditmodeTestsErrors]=1 + [EditmodeTestsPassing]=1 + [EditmodeTestsSkipped]=1 + [PlaymodeTestsPassing]=1 + [PlaymodeTestsSkipped]=1 + [EditmodeSuite]=1 + [PlaymodeSuite]=1 +) + +effective_tests=0 +declare -a selected_tests=() +for raw_test in "${tests[@]}"; do + tname="$(echo "$raw_test" | xargs)" + if [ -n "$tname" ] && [ "$tname" != "None" ]; then + if [ -z "${known_tests[$tname]+x}" ]; then + echo "::error::TESTS_INPUT includes unknown test selection '$tname'" + exit 1 + fi + selected_tests+=("$tname") + effective_tests=$((effective_tests + 1)) + fi +done +if [ "$effective_tests" -eq 0 ]; then + echo "::error::TESTS_INPUT is empty or contains no runnable test entries" + exit 1 +fi + +clean_tests() { + rm -f "$UNITY_PROJECT_PATH/Assets/UnityCliTests"/*.cs 2>/dev/null || true + rm -f "$UNITY_PROJECT_PATH/Assets/Editor/UnityCliTests"/*.cs 2>/dev/null || true + rm -f "$UNITY_PROJECT_PATH/Assets/Tests/PlayMode/UnityCliTests"/*.cs 2>/dev/null || true + rm -f "$UNITY_PROJECT_PATH/Assets/Tests/EditMode/UnityCliTests"/*.cs 2>/dev/null || true + rm -f "$UNITY_PROJECT_PATH/Assets/Tests/EditMode/UnityCliTests"/*.asmdef 2>/dev/null || true + rm -f "$UNITY_PROJECT_PATH/Assets/Tests/EditMode/Editor/UnityCliTests"/*.cs 2>/dev/null || true +} + +clean_build_outputs() { + rm -rf "$UNITY_PROJECT_PATH/Builds" 2>/dev/null || true + mkdir -p "$UNITY_PROJECT_PATH/Builds/Logs" +} + +# Expectations for each synthetic test +# expected_status: 0 = should succeed, 1 = should fail +expected_status_for() { + case "$1" in + CompilerWarnings|BuildWarnings) echo 0 ;; + CompilerErrors|BuildErrors) echo 1 ;; + PlaymodeTestsErrors|EditmodeTestsErrors) echo 1 ;; + EditmodeSuite|PlaymodeSuite) echo 1 ;; + EditmodeTestsPassing|EditmodeTestsSkipped|PlaymodeTestsPassing|PlaymodeTestsSkipped) echo 0 ;; + *) echo 0 ;; + esac +} + +expected_message_for() { + case "$1" in + CompilerErrors) echo "Intentional compiler error" ;; + BuildErrors) echo "Intentional build failure" ;; + PlaymodeTestsErrors|PlaymodeSuite) echo "Intentional playmode failure" ;; + EditmodeTestsErrors|EditmodeSuite) echo "Intentional editmode failure" ;; + CompilerWarnings) echo "Intentional warning" ;; + BuildWarnings) echo "Intentional build warning" ;; + *) echo "" ;; + esac +} + +echo "UTP preflight: selected ${effective_tests} scenario(s): ${selected_tests[*]}" +for tname in "${selected_tests[@]}"; do + echo " - ${tname}: expected_status=$(expected_status_for "$tname") expected_message='$(expected_message_for "$tname")'" +done + +mkdir -p "$GITHUB_WORKSPACE/utp-artifacts" + +for raw_test in "${tests[@]}"; do + test_name="$(echo "$raw_test" | xargs)" + if [ -z "$test_name" ] || [ "$test_name" = "None" ]; then + echo "Skipping empty/None test entry" + continue + fi + + src="$GITHUB_WORKSPACE/unity-tests/${test_name}.cs" + is_suite=0 + case "$test_name" in + EditmodeSuite|PlaymodeSuite) is_suite=1 ;; + esac + if [ "$is_suite" -eq 0 ] && [ ! -f "$src" ]; then + echo "::error::Requested test '$test_name' not found at $src" + failures=$((failures+1)) + continue + fi + + clean_tests + clean_build_outputs + + asmdef_src="" + + case "$test_name" in + CompilerWarnings|CompilerErrors) + dest="$UNITY_PROJECT_PATH/Assets/UnityCliTests" + ;; + BuildWarnings|BuildErrors) + dest="$UNITY_PROJECT_PATH/Assets/Editor/UnityCliTests" + ;; + PlaymodeTestsErrors|PlaymodeTestsPassing|PlaymodeTestsSkipped) + dest="$UNITY_PROJECT_PATH/Assets/Tests/PlayMode/UnityCliTests" + asmdef_src="$GITHUB_WORKSPACE/unity-tests/UnityCliTests.PlayMode.asmdef" + ;; + EditmodeTestsErrors|EditmodeTestsPassing|EditmodeTestsSkipped) + dest="$UNITY_PROJECT_PATH/Assets/Tests/EditMode/UnityCliTests" + asmdef_src="$GITHUB_WORKSPACE/unity-tests/UnityCliTests.EditMode.Editor.asmdef" + ;; + EditmodeSuite) + dest="$UNITY_PROJECT_PATH/Assets/Tests/EditMode/UnityCliTests" + asmdef_src="$GITHUB_WORKSPACE/unity-tests/UnityCliTests.EditMode.Editor.asmdef" + suite_sources="EditmodeTestsErrors,EditmodeTestsPassing,EditmodeTestsSkipped" + ;; + PlaymodeSuite) + dest="$UNITY_PROJECT_PATH/Assets/Tests/PlayMode/UnityCliTests" + asmdef_src="$GITHUB_WORKSPACE/unity-tests/UnityCliTests.PlayMode.asmdef" + suite_sources="PlaymodeTestsErrors,PlaymodeTestsPassing,PlaymodeTestsSkipped" + ;; + *) + echo "::error::Unknown test selection '$test_name'" + failures=$((failures+1)) + continue + ;; + esac + + mkdir -p "$dest" + if [ -n "$asmdef_src" ]; then + if [ ! -f "$asmdef_src" ]; then + echo "::error::Assembly definition for tests not found at $asmdef_src" + failures=$((failures+1)) + continue + fi + cp "$asmdef_src" "$dest/" + fi + + if [ -n "${suite_sources:-}" ]; then + IFS=',' read -ra suite_files <<< "$suite_sources" + for f in "${suite_files[@]}"; do + f="${f// /}" + suite_src="$GITHUB_WORKSPACE/unity-tests/${f}.cs" + if [ -f "$suite_src" ]; then + cp "$suite_src" "$dest/" + fi + done + unset suite_sources + echo "Running suite: $test_name (copied ${#suite_files[@]} test files to $dest)" + elif [ -f "$src" ]; then + cp "$src" "$dest/" + echo "Running test: $test_name (copied to $dest)" + else + echo "::error::Requested test '$test_name' not found at $src" + failures=$((failures+1)) + continue + fi + + validate_rc=0 + build_rc=0 + + ran_custom_flow=0 + expected_for_flow=$(expected_status_for "$test_name") + + if [ "$test_name" = "EditmodeTestsErrors" ] || [ "$test_name" = "EditmodeTestsPassing" ] || [ "$test_name" = "EditmodeTestsSkipped" ] || [ "$test_name" = "EditmodeSuite" ]; then + unity_rc=0 + unity-cli run --log-name "${test_name}-EditMode" -runTests -testPlatform editmode -assemblyNames "UnityCli.EditMode.EditorTests" -testResults "$UNITY_PROJECT_PATH/Builds/Logs/${test_name}-results.xml" -quit || unity_rc=$? + + results_xml="" + if results_xml="$(find_nunit_results_xml "$test_name")"; then + : + else + results_xml="" + fi + + xml_ok=0 + if [ -n "$results_xml" ] && [ -f "$results_xml" ] && grep -q "]" "$results_xml" 2>/dev/null; then + xml_ok=1 + fi + + validate_rc=$unity_rc + if [ "$xml_ok" -eq 0 ]; then + if [ "$unity_rc" -ne 0 ]; then + validate_rc=$unity_rc + elif [ "$expected_for_flow" -eq 0 ] && edit_play_log_suggests_tests_completed_ok "$test_name" "EditMode"; then + validate_rc=0 + echo "::notice::${test_name}: using log-based test completion evidence (no NUnit XML with at expected path)" + elif [ "$expected_for_flow" -eq 0 ]; then + validate_rc=1 + echo "::warning::${test_name}: no NUnit XML with and no trustworthy log completion markers (unity_rc=$unity_rc)" + else + validate_rc=$unity_rc + fi + fi + + build_rc=$validate_rc + ran_custom_flow=1 + fi + + if [ "$test_name" = "PlaymodeTestsErrors" ] || [ "$test_name" = "PlaymodeTestsPassing" ] || [ "$test_name" = "PlaymodeTestsSkipped" ] || [ "$test_name" = "PlaymodeSuite" ]; then + unity_rc=0 + unity-cli run --log-name "${test_name}-PlayMode" -runTests -testPlatform playmode -assemblyNames "UnityCli.PlayMode.Tests" -testResults "$UNITY_PROJECT_PATH/Builds/Logs/${test_name}-results.xml" -quit || unity_rc=$? + + results_xml="" + if results_xml="$(find_nunit_results_xml "$test_name")"; then + : + else + results_xml="" + fi + + xml_ok=0 + if [ -n "$results_xml" ] && [ -f "$results_xml" ] && grep -q "]" "$results_xml" 2>/dev/null; then + xml_ok=1 + fi + + validate_rc=$unity_rc + if [ "$xml_ok" -eq 0 ]; then + if [ "$unity_rc" -ne 0 ]; then + validate_rc=$unity_rc + elif [ "$expected_for_flow" -eq 0 ] && edit_play_log_suggests_tests_completed_ok "$test_name" "PlayMode"; then + validate_rc=0 + echo "::notice::${test_name}: using log-based test completion evidence (no NUnit XML with at expected path)" + elif [ "$expected_for_flow" -eq 0 ]; then + validate_rc=1 + echo "::warning::${test_name}: no NUnit XML with and no trustworthy log completion markers (unity_rc=$unity_rc)" + else + validate_rc=$unity_rc + fi + fi + + build_rc=$validate_rc + ran_custom_flow=1 + fi + + if [ "$ran_custom_flow" -eq 0 ]; then + unity-cli run --log-name "${test_name}-Validate" -quit -executeMethod Utilities.Editor.BuildPipeline.UnityPlayerBuildTools.ValidateProject -importTMProEssentialsAsset || validate_rc=$? + + build_cmd=( + unity-cli run + --log-name "${test_name}-Build" + -buildTarget "$BUILD_TARGET" + -quit + -executeMethod Utilities.Editor.BuildPipeline.UnityPlayerBuildTools.StartCommandLineBuild + -sceneList Assets/Scenes/SampleScene.unity + ) + + if [ ${#build_args[@]} -gt 0 ]; then + build_cmd+=("${build_args[@]}") + fi + + "${build_cmd[@]}" || build_rc=$? + fi + + expected=$(expected_status_for "$test_name") + exp_msg=$(expected_message_for "$test_name") + + test_failed=0 + message_found=0 + utp_error_found=0 + utp_any_signal=0 + + if [ -n "$exp_msg" ]; then + while IFS= read -r log_file; do + if [ -z "$log_file" ]; then + continue + fi + if grep -qi -- "$exp_msg" "$log_file" 2>/dev/null; then + message_found=1 + break + fi + done < <(find "$UNITY_PROJECT_PATH/Builds/Logs" -maxdepth 1 -type f -name "*${test_name}*.log") + fi + + # UTP: severity rules differ for warning-only scenarios vs everything else. + while IFS= read -r utp_file; do + if [ -z "$utp_file" ]; then + continue + fi + if utp_signals_failure_for_expected_success "$test_name" "$utp_file"; then + utp_error_found=1 + break + fi + done < <(find "$UNITY_PROJECT_PATH/Builds/Logs" -maxdepth 1 -type f -name "*${test_name}*-utp-json.log") + + while IFS= read -r utp_file; do + if [ -z "$utp_file" ]; then + continue + fi + if utp_signals_any_severity_problem "$utp_file"; then + utp_any_signal=1 + break + fi + done < <(find "$UNITY_PROJECT_PATH/Builds/Logs" -maxdepth 1 -type f -name "*${test_name}*-utp-json.log") + + if [ "$expected" -eq 0 ]; then + if [ "$validate_rc" -ne 0 ] || [ "$build_rc" -ne 0 ]; then + echo "::error::Test $test_name was expected to succeed but failed (validate_rc=$validate_rc, build_rc=$build_rc)" + test_failed=1 + fi + if [ "$utp_error_found" -eq 1 ]; then + echo "::error::Test $test_name produced UTP errors but was expected to succeed" + test_failed=1 + fi + if [ -n "$exp_msg" ] && [ "$message_found" -eq 0 ]; then + echo "::error::Test $test_name did not emit expected message '$exp_msg'" + test_failed=1 + fi + else + if [ "$validate_rc" -ne 0 ] || [ "$build_rc" -ne 0 ] || [ "$message_found" -eq 1 ] || [ "$utp_any_signal" -eq 1 ]; then + : # Expected failure observed + else + echo "::error::Test $test_name was expected to fail but succeeded" + test_failed=1 + fi + + # Only insist on the expected message if both invocations claimed success. + if [ -n "$exp_msg" ] && [ "$message_found" -eq 0 ] && [ "$validate_rc" -eq 0 ] && [ "$build_rc" -eq 0 ]; then + echo "::error::Test $test_name did not emit expected message '$exp_msg'" + test_failed=1 + fi + fi + + if [ "$test_failed" -eq 0 ]; then + echo "::notice::Test $test_name behaved as expected (validate_rc=$validate_rc, build_rc=$build_rc)" + else + failures=$((failures+1)) + fi + + test_artifacts="$GITHUB_WORKSPACE/utp-artifacts/$test_name" + mkdir -p "$test_artifacts" + logs_dir="$UNITY_PROJECT_PATH/Builds/Logs" + utp_pattern="*${test_name}*-utp-json.log" + # Primary: project Builds/Logs; fallback: workspace (e.g. alternate log roots). Exclude staging and .git. + { + if [ -d "$logs_dir" ]; then + find "$logs_dir" -maxdepth 1 -type f -name "$utp_pattern" -print + fi + if [ -n "${GITHUB_WORKSPACE:-}" ] && [ -d "$GITHUB_WORKSPACE" ]; then + find "$GITHUB_WORKSPACE" \( -path '*/utp-artifacts/*' -o -path '*/.git/*' \) -prune -o -type f -name "$utp_pattern" -print 2>/dev/null + fi + } | sort -u | while IFS= read -r utp_src; do + [ -z "$utp_src" ] && continue + dest_file="$test_artifacts/$(basename "$utp_src")" + if [ ! -f "$dest_file" ]; then + cp "$utp_src" "$dest_file" || true + fi + done || true + # Copy test results XML when present (Edit/Play mode) for later analysis + if nunit_copy="$(find_nunit_results_xml "$test_name")" && [ -n "$nunit_copy" ] && [ -f "$nunit_copy" ]; then + cp "$nunit_copy" "$test_artifacts/" || true + fi + # Copy all Unity Editor/Player logs for this scenario + find "$UNITY_PROJECT_PATH/Builds/Logs" -maxdepth 1 -type f -name "*${test_name}*.log" -exec cp {} "$test_artifacts/" \; 2>/dev/null || true + +done + +if [ "$failures" -gt 0 ]; then + echo "::error::One or more tests did not meet expectations ($failures)" + exit 1 +fi + +exit 0 diff --git a/.github/actions/scripts/utp-ci-assertion-helpers.sh b/.github/actions/scripts/utp-ci-assertion-helpers.sh new file mode 100644 index 00000000..004c34f3 --- /dev/null +++ b/.github/actions/scripts/utp-ci-assertion-helpers.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +# Shared helpers for UTP CI batch validation (.github/actions/scripts/run-utp-tests.sh). +# Keep behavior in sync with contract tests: tests/run-utp-tests-contract.sh + +# Returns 0 (true) if this UTP JSON log should fail an *expected-success* scenario. +utp_signals_failure_for_expected_success() { + local test_name="$1" + local utp_file="$2" + case "$test_name" in + CompilerWarnings|BuildWarnings) + # Engine / allocator assert telemetry is common here; only treat Error/Exception as hard failures. + grep -qi '"severity"[[:space:]]*:[[:space:]]*"\(Error\|Exception\)"' "$utp_file" 2>/dev/null + ;; + *) + grep -qi '"severity"[[:space:]]*:[[:space:]]*"\(Error\|Exception\|Assert\)"' "$utp_file" 2>/dev/null + ;; + esac +} + +# Returns 0 if UTP log contains any Error/Exception/Assert (used for expected-failure scenarios). +utp_signals_any_severity_problem() { + local utp_file="$1" + grep -qi '"severity"[[:space:]]*:[[:space:]]*"\(Error\|Exception\|Assert\)"' "$utp_file" 2>/dev/null +} + +# Prints first path to an NUnit results file containing , or nothing. +find_nunit_results_xml() { + local test_name="$1" + local f + + for f in \ + "$UNITY_PROJECT_PATH/Builds/Logs/${test_name}-results.xml" \ + "$UNITY_PROJECT_PATH/Builds/Logs/${test_name}-Results.xml"; do + if [ -f "$f" ] && grep -q "]" "$f" 2>/dev/null; then + printf '%s\n' "$f" + return 0 + fi + done + + while IFS= read -r f; do + [ -n "$f" ] || continue + case "$f" in + */PackageCache/*|*/.git/*) continue ;; + esac + if grep -q "]" "$f" 2>/dev/null; then + printf '%s\n' "$f" + return 0 + fi + done < <( + find "$UNITY_PROJECT_PATH" -type f \( \ + -name "${test_name}-results.xml" -o \ + -name "${test_name}-Results.xml" -o \ + -name "*${test_name}*results.xml" -o \ + -name "*${test_name}*Results.xml" \ + \) ! -path "*/PackageCache/*" ! -path "*/.git/*" 2>/dev/null | head -n 80 + ) + + return 1 +} + +# Heuristic: Unity wrote no usable XML but logs show the test runner finished successfully. +edit_play_log_suggests_tests_completed_ok() { + local test_name="$1" + local mode="$2" + local logf + local saw_success=0 + + while IFS= read -r logf; do + [ -z "$logf" ] && continue + [ -f "$logf" ] || continue + # Any explicit failure marker across matching logs should fail the heuristic. + if grep -qiE 'test run failed|one or more child tests failed|failures:[[:space:]]*[1-9]|errors:[[:space:]]*[1-9]' "$logf" 2>/dev/null; then + return 1 + fi + if grep -qiE \ + 'test run completed|tests run:.*passed|total tests:.*failed:[[:space:]]*0(\>|[^0-9]|$)|Executed[[:space:]]+[0-9]+[[:space:]]+tests|Test run[[:space:]]+\[.*\][[:space:]]+finished|NUnit[[:space:]]+Engine|UnityEditor\.TestTools\.TestRunner' \ + "$logf" 2>/dev/null; then + saw_success=1 + fi + done < <(find "$UNITY_PROJECT_PATH/Builds/Logs" -maxdepth 1 -type f -name "*${test_name}*${mode}*.log" 2>/dev/null) + + [ "$saw_success" -eq 1 ] +} diff --git a/.github/scripts/scan-utp-artifacts.cjs b/.github/scripts/scan-utp-artifacts.cjs new file mode 100644 index 00000000..65131c32 --- /dev/null +++ b/.github/scripts/scan-utp-artifacts.cjs @@ -0,0 +1,99 @@ +/** + * CI / local maintenance: scan *-utp-json.log trees and verify every object only uses + * top-level keys that normalizeTelemetryEntry (UTP_SUPPORTED_TOP_LEVEL_PROPERTIES) recognizes. + * Exits non-zero on JSON parse errors or unknown keys. + * + * Kept as CommonJS so `node .github/scripts/scan-utp-artifacts.cjs` can require `dist/utp.js` + * after `npm run build` without ts-node or compiling this file. + * + * Usage: node .github/scripts/scan-utp-artifacts.cjs [directory] + * Default directory: $GITHUB_WORKSPACE/utp-artifacts, else ./utp-artifacts + */ +const fs = require('fs'); +const path = require('path'); +const { normalizeTelemetryEntry } = require(path.join(__dirname, '..', '..', 'dist', 'utp.js')); + +function defaultScanRoot() { + if (process.argv[2]) { + return process.argv[2]; + } + if (process.env.GITHUB_WORKSPACE) { + return path.join(process.env.GITHUB_WORKSPACE, 'utp-artifacts'); + } + return path.join(process.cwd(), 'utp-artifacts'); +} + +const root = path.resolve(defaultScanRoot()); +if (!fs.existsSync(root)) { + console.warn(`scan-utp-artifacts: directory not found (skipping): ${root}`); + process.exit(0); +} +if (!fs.statSync(root).isDirectory()) { + console.warn(`scan-utp-artifacts: not a directory (skipping): ${root}`); + process.exit(0); +} + +const typeCount = new Map(); +const unknownKeyOccurrences = new Map(); +let totalObjects = 0; +const parseErrors = []; + +function walk(dir) { + for (const e of fs.readdirSync(dir, { withFileTypes: true })) { + const p = path.join(dir, e.name); + if (e.isDirectory()) { + walk(p); + } else if (e.name.endsWith('-utp-json.log')) { + const raw = fs.readFileSync(p, 'utf8').trim(); + if (!raw) { + continue; + } + let data; + try { + data = JSON.parse(raw); + } catch (err) { + parseErrors.push(`${path.relative(root, p)}: ${err.message}`); + continue; + } + const arr = Array.isArray(data) ? data : [data]; + for (const o of arr) { + if (!o || typeof o !== 'object') { + continue; + } + totalObjects++; + const t = o.type ?? '(missing)'; + typeCount.set(t, (typeCount.get(t) || 0) + 1); + const { unknownTopLevelKeys } = normalizeTelemetryEntry(o); + for (const k of unknownTopLevelKeys) { + unknownKeyOccurrences.set(k, (unknownKeyOccurrences.get(k) || 0) + 1); + } + } + } + } +} + +walk(root); + +const out = { + artifactRoot: root, + totalObjects, + types: Object.fromEntries([...typeCount.entries()].sort((a, b) => b[1] - a[1])), + unknownTopLevelKeys: Object.fromEntries([...unknownKeyOccurrences.entries()].sort((a, b) => b[1] - a[1])), + parseErrorCount: parseErrors.length, + parseErrorsSample: parseErrors.slice(0, 50), +}; +console.log(JSON.stringify(out, null, 2)); + +let code = 0; +if (parseErrors.length > 0) { + console.error(`scan-utp-artifacts: ${parseErrors.length} JSON parse error(s)`); + code = 1; +} +if (unknownKeyOccurrences.size > 0) { + console.error( + 'scan-utp-artifacts: unknown top-level key(s) on UTP objects — extend UTP_SUPPORTED_TOP_LEVEL_PROPERTIES in src/utp.ts:', + [...unknownKeyOccurrences.keys()].join(', ') + ); + code = 1; +} +process.exit(code); diff --git a/.github/workflows/build-options.json b/.github/workflows/build-options.json index 9178fb3a..4fd42a4f 100644 --- a/.github/workflows/build-options.json +++ b/.github/workflows/build-options.json @@ -5,20 +5,12 @@ "macos-latest" ], "unity-version": [ - "4.7.2", - "5.6.7f1 (e80cc3114ac1)", - "2017.4.40f1", - "2018", - "2019.x", - "2020.*", - "2021.3.x", - "2022.3.*", - "6000.0.x", - "6000.1.*", + "6000.1", "6000.2", "6000.3", "6000.4", - "6000.5" + "6000.5", + "6000.6" ], "include": [ { diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 06d1bd8e..776b545c 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -10,11 +10,21 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: + utp-batch-contract: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - uses: actions/checkout@v6 + - name: UTP batch assertion helpers (contract) + run: bash tests/run-utp-tests-contract.sh setup: if: github.event.pull_request.draft == false runs-on: ubuntu-latest permissions: contents: read + checks: write # to publish unit test results via checks github api steps: - uses: actions/checkout@v6 with: @@ -32,6 +42,7 @@ jobs: name: build ${{ matrix.jobs.name }} permissions: contents: read + checks: write # required by nested unity-build workflow strategy: matrix: ${{ fromJSON(needs.setup.outputs.jobs) }} fail-fast: false @@ -40,12 +51,24 @@ jobs: uses: ./.github/workflows/unity-build.yml with: matrix: ${{ toJSON(matrix.jobs.matrix) }} + utp-test-profile: normal + validate-negative-scenarios: + if: github.event.pull_request.draft == false + name: build negative-scenarios + permissions: + contents: read + checks: write + secrets: inherit + uses: ./.github/workflows/unity-build.yml + with: + matrix: '{"include":[{"os":"ubuntu-latest","unity-version":"6000.1","build-target":"StandaloneLinux64","name":"negative-scenarios / ubuntu-latest StandaloneLinux64"}]}' + utp-test-profile: negative timeline: - needs: [setup, validate] + needs: [setup, validate, validate-negative-scenarios] if: always() runs-on: ubuntu-latest permissions: contents: read steps: - - uses: Kesin11/actions-timeline@c2f474758e8e9ac6f37ec64a6442dead7fd1dad2 # v2.2.5 + - uses: Kesin11/actions-timeline@44c9c178ffb2fb1d9859614a3ffa79ccfb77565e # v3.1.0 continue-on-error: true diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 564f9015..62ed36d8 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,4 +1,4 @@ -name: Publish +name: publish on: push: branches: [main] diff --git a/.github/workflows/unity-build.yml b/.github/workflows/unity-build.yml index fae4712e..f1556ecf 100644 --- a/.github/workflows/unity-build.yml +++ b/.github/workflows/unity-build.yml @@ -5,6 +5,10 @@ on: matrix: required: true type: string + utp-test-profile: + required: false + type: string + default: normal secrets: UNITY_USERNAME: required: true @@ -17,6 +21,9 @@ jobs: strategy: matrix: ${{ fromJSON(inputs.matrix) }} fail-fast: false + permissions: + contents: read + checks: write # to publish unit test results via checks github api defaults: run: shell: bash @@ -25,7 +32,7 @@ jobs: steps: - name: Free Disk Space if: ${{ matrix.os == 'ubuntu-latest' && (matrix.unity-version != '2018' && matrix.unity-version != '2017.4.40f1') }} - uses: endersonmenezes/free-disk-space@e6ed9b02e683a3b55ed0252f1ee469ce3b39a885 # v3.1.0 + uses: endersonmenezes/free-disk-space@7901478139cff6e9d44df5972fd8ab8fcade4db1 # v3.2.2 with: remove_android: true remove_dotnet: false @@ -77,7 +84,22 @@ jobs: if: ${{ matrix.unity-version != 'none' }} run: | unity-cli list-project-templates --unity-editor "${UNITY_EDITOR_PATH}" --json - unity-cli create-project --name "Unity Project" --unity-editor "${UNITY_EDITOR_PATH}" --json + create_rc=1 + for attempt in 1 2 3; do + echo "Create Unity Project attempt ${attempt}/3" + if unity-cli create-project --name "Unity Project" --unity-editor "${UNITY_EDITOR_PATH}" --json; then + create_rc=0 + break + fi + if [ "$attempt" -lt 3 ]; then + echo "::warning::create-project failed on attempt ${attempt}; retrying after short backoff" + sleep 10 + fi + done + if [ "$create_rc" -ne 0 ]; then + echo "::error::Failed to create Unity project after 3 attempts" + exit 1 + fi - name: Verify UNITY_PROJECT_PATH variable if: ${{ matrix.unity-version != 'none' }} id: verify-project-path @@ -101,28 +123,94 @@ jobs: else echo "Skipping build: Unity version $version does not support the build pipeline package (requires 2019.4+)" fi - - name: Install OpenUPM and build pipeline package - if: ${{ steps.verify-project-path.outputs.RUN_BUILD == 'true' }} - working-directory: ${{ env.UNITY_PROJECT_PATH }} - run: | - npm install -g openupm-cli - openupm add com.utilities.buildpipeline - name: Update Android Target Sdk Version - if: ${{ matrix.build-target == 'Android' }} + if: ${{ matrix.build-target == 'Android' && matrix.unity-version != 'none' }} run: | # update AndroidTargetSdkVersion to 32 in ProjectSettings/ProjectSettings.asset sed -i 's/AndroidTargetSdkVersion: [0-9]*/AndroidTargetSdkVersion: 32/' "${UNITY_PROJECT_PATH}/ProjectSettings/ProjectSettings.asset" - # ensure android dependencies are installed + # ensure android dependencies are installed before UTP/build batches unity-cli setup-unity -p "${UNITY_PROJECT_PATH}" -m android - - name: Build Project + - name: Compute safe artifact name + id: artifact-name + env: + MATRIX_NAME: ${{ matrix.name }} + run: | + set -euo pipefail + unity_version="${{ matrix.unity-version }}" + unity_version="${unity_version//'*'/x}" + bt="${{ matrix.build-target }}" + bt="${bt:-none}" + # Per-job slug so parallel matrix rows never collide (name encodes the job-builder row). + base="${{ matrix.os }}-${unity_version}-${bt}" + ba="${{ matrix.build-args }}" + if [ -n "${ba}" ]; then + if command -v sha256sum >/dev/null 2>&1; then + bah=$(printf '%s' "$ba" | sha256sum | awk '{print $1}' | cut -c1-12) + else + bah=$(printf '%s' "$ba" | shasum -a 256 2>/dev/null | awk '{print $1}' | cut -c1-12) + fi + base="${base}-ba${bah}" + fi + mname="${MATRIX_NAME:-}" + if [ -n "$mname" ]; then + slug=$(printf '%s' "$mname" | sed 's/[^a-zA-Z0-9._-]/_/g' | cut -c1-100) + base="${slug}__${base}" + fi + echo "name=${base}-utp-batch-logs" >> $GITHUB_OUTPUT + shell: bash + - name: Run Unity UTP test batches + if: ${{ steps.verify-project-path.outputs.RUN_BUILD == 'true' }} + uses: ./.github/actions/run-unity-test-batch + with: + unity-project-path: ${{ env.UNITY_PROJECT_PATH }} + build-target: ${{ matrix.build-target }} + build-args: ${{ matrix.build-args }} + test-profile: ${{ inputs.utp-test-profile }} + artifact-name: ${{ steps.artifact-name.outputs.name }} + - name: Verify UTP JSON keys + if: ${{ steps.verify-project-path.outputs.RUN_BUILD == 'true' }} + run: node .github/scripts/scan-utp-artifacts.cjs "${GITHUB_WORKSPACE}/utp-artifacts" + - name: Guardrail hidden UTP failures if: ${{ steps.verify-project-path.outputs.RUN_BUILD == 'true' }} - timeout-minutes: 60 run: | - # we don't have to specify the project path or unity editor path as unity-cli will use the environment variables - unity-cli run --log-name Validate -quit -executeMethod Utilities.Editor.BuildPipeline.UnityPlayerBuildTools.ValidateProject -importTMProEssentialsAsset - unity-cli run --log-name Build -buildTarget ${{ matrix.build-target }} -quit -executeMethod Utilities.Editor.BuildPipeline.UnityPlayerBuildTools.StartCommandLineBuild -sceneList Assets/Scenes/SampleScene.unity ${{ matrix.build-args }} + set -euo pipefail + # Keep this alternation in sync with hard failures from .github/actions/scripts/run-utp-tests.sh + failure_markers='One or more tests did not meet expectations|was expected to succeed but failed|produced UTP errors but was expected to succeed' + log_dir="${UNITY_PROJECT_PATH}/Builds/Logs" + artifacts_dir="${GITHUB_WORKSPACE}/utp-artifacts" + + marker_found=0 + + scan_markers() { + local target_dir="$1" + if command -v rg >/dev/null 2>&1; then + rg -n --no-ignore -S "$failure_markers" "$target_dir" + else + grep -RInE "$failure_markers" "$target_dir" + fi + } + + if [ -d "$log_dir" ]; then + if scan_markers "$log_dir"; then + echo "::error::Hidden UTP failure marker detected in ${log_dir}" + marker_found=1 + fi + fi + + if [ -d "$artifacts_dir" ]; then + if scan_markers "$artifacts_dir"; then + echo "::error::Hidden UTP failure marker detected in ${artifacts_dir}" + marker_found=1 + fi + fi + + if [ "$marker_found" -ne 0 ]; then + exit 1 + fi - name: Uninstall Editor if: ${{ matrix.unity-version != 'none' }} + timeout-minutes: 5 + continue-on-error: true run: | if [ -z "${UNITY_EDITOR_PATH}" ]; then echo "UNITY_EDITOR_PATH is not set, skipping uninstall" @@ -136,12 +224,12 @@ jobs: PACKAGE_MANAGER_LOG_PATH=$(unity-cli package-manager-logs) LICENSING_CLIENT_LOG_PATH=$(unity-cli licensing-client-logs) LICENSING_AUDIT_LOG_PATH=$(unity-cli licensing-audit-logs) - + echo "Hub Log Path: ${HUB_LOG_PATH}" echo "Package Manager Log Path: ${PACKAGE_MANAGER_LOG_PATH}" echo "Licensing Client Log Path: ${LICENSING_CLIENT_LOG_PATH}" echo "Licensing Audit Log Path: ${LICENSING_AUDIT_LOG_PATH}" - + if [ ! -f "${HUB_LOG_PATH}" ]; then echo "::warning:: Hub log file does not exist at ${HUB_LOG_PATH}" # find all info-log.json files in ~/.config/unity3d/ - print their paths @@ -155,15 +243,15 @@ jobs: find ~/.config/ -type f -exec echo "{}" \; echo "::warning:: Hub log file does not exist at any known location" fi - + if [ ! -f "${PACKAGE_MANAGER_LOG_PATH}" ]; then echo "::warning::Package Manager log file does not exist at ${PACKAGE_MANAGER_LOG_PATH}" fi - + if [ ! -f "${LICENSING_CLIENT_LOG_PATH}" ]; then echo "::error::Licensing Client log file does not exist at ${LICENSING_CLIENT_LOG_PATH}" fi - + if [ ! -f "${LICENSING_AUDIT_LOG_PATH}" ]; then echo "::error::Licensing Audit log file does not exist at ${LICENSING_AUDIT_LOG_PATH}" fi diff --git a/.gitignore b/.gitignore index 2e92ab99..f2017871 100644 --- a/.gitignore +++ b/.gitignore @@ -137,4 +137,6 @@ dist # Vite logs files vite.config.js.timestamp-* vite.config.ts.timestamp-* -_temp + +.artifacts/ +_temp/ diff --git a/package-lock.json b/package-lock.json index 8bb40984..5bea9286 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@rage-against-the-pixel/unity-cli", - "version": "1.8.3", + "version": "1.9.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@rage-against-the-pixel/unity-cli", - "version": "1.8.3", + "version": "1.9.0", "license": "MIT", "dependencies": { "@electron/asar": "^4.2.0", diff --git a/package.json b/package.json index 8da8d46d..4210bb8e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@rage-against-the-pixel/unity-cli", - "version": "1.8.3", + "version": "1.9.0", "description": "A command line utility for the Unity Game Engine.", "author": "RageAgainstThePixel", "license": "MIT", @@ -44,6 +44,8 @@ "build": "tsc", "dev": "tsc --watch", "tests": "jest --roots tests", + "test:utp-batch-contract": "bash tests/run-utp-tests-contract.sh", + "scan-utp-artifacts": "node .github/scripts/scan-utp-artifacts.cjs", "link": "npm link", "unlink": "npm unlink @rage-against-the-pixel/unity-cli" }, diff --git a/src/github-actions-ci.ts b/src/github-actions-ci.ts new file mode 100644 index 00000000..7324a9db --- /dev/null +++ b/src/github-actions-ci.ts @@ -0,0 +1,102 @@ +import * as fs from 'fs'; +import { + ILoggerProvider, + LoggerAnnotationOptions, + LoggerProviderLevel, + MarkdownTarget +} from './logger-provider'; + +export enum GitHubAnnotationLevel { + Notice = 'notice', + Warning = 'warning', + Error = 'error', +} + +export class GitHubActionsLoggerProvider implements ILoggerProvider { + public readonly isCi = process.env.GITHUB_ACTIONS === 'true'; + + public log(level: LoggerProviderLevel, message: any, optionalParams: any[] = []): void { + switch (level) { + case 'debug': { + message.toString().split('\n').forEach((line: string) => { + process.stdout.write(`::debug::${line}\n`, ...optionalParams); + }); + break; + } + case 'ci': + case 'info': + process.stdout.write(`${message}\n`, ...optionalParams); + break; + default: + process.stdout.write(`::${level}::${message}\n`, ...optionalParams); + break; + } + } + + public startGroup(message: any, optionalParams: any[] = []): void { + const firstLine: string = message.toString().split('\n')[0]; + process.stdout.write(`::group::${firstLine}\n`, ...optionalParams); + } + + public endGroup(): void { + process.stdout.write('::endgroup::\n'); + } + + public annotate(level: GitHubAnnotationLevel, message: string, options?: LoggerAnnotationOptions): void { + const parts: string[] = []; + const appendPart = (key: string, value?: string | number): void => { + if (value === undefined || value === null) { return; } + const stringValue = value.toString(); + if (stringValue.length === 0) { return; } + parts.push(`${key}=${this.escapeGitHubCommandValue(stringValue)}`); + }; + + appendPart('file', options?.file); + if (options?.line !== undefined && options.line > 0) appendPart('line', options.line); + if (options?.endLine !== undefined && options.endLine > 0) appendPart('endLine', options.endLine); + if (options?.column !== undefined && options.column > 0) appendPart('col', options.column); + if (options?.endColumn !== undefined && options.endColumn > 0) appendPart('endColumn', options.endColumn); + appendPart('title', options?.title); + + const metadata = parts.length > 0 ? ` ${parts.join(',')}` : ''; + process.stdout.write(`::${level}${metadata}::${this.escapeGitHubCommandValue(message)}\n`); + } + + public mask(message: string): void { + process.stdout.write(`::add-mask::${message}\n`); + } + + public setEnvironmentVariable(name: string, value: string): void { + const githubEnv = process.env.GITHUB_ENV; + if (githubEnv) { + fs.appendFileSync(githubEnv, `${name}=${value}\n`, { encoding: 'utf8' }); + } + } + + public setOutput(name: string, value: string): void { + const githubOutput = process.env.GITHUB_OUTPUT; + if (githubOutput) { + fs.appendFileSync(githubOutput, `${name}=${value}\n`, { encoding: 'utf8' }); + } + } + + public appendStepSummary(summary: string): void { + const githubSummary = process.env.GITHUB_STEP_SUMMARY; + if (!githubSummary) { return; } + fs.appendFileSync(githubSummary, summary, { encoding: 'utf8' }); + } + + public getMarkdownByteLimit(target: MarkdownTarget): number { + if (target === 'workflow-summary') { + return 1024 * 1024; + } + return Number.POSITIVE_INFINITY; + } + + private escapeGitHubCommandValue(value: string): string { + return value + .replace(/%/g, '%25') + .replace(/\r/g, '%0D') + .replace(/\n/g, '%0A'); + } +} diff --git a/src/logger-provider.ts b/src/logger-provider.ts new file mode 100644 index 00000000..70f42e3f --- /dev/null +++ b/src/logger-provider.ts @@ -0,0 +1,78 @@ +export type MarkdownTarget = 'workflow-summary' | 'stdout'; +export type LoggerProviderLevel = 'debug' | 'ci' | 'utp' | 'info' | 'warning' | 'error'; +export type LoggerProviderAnnotationLevel = 'notice' | 'warning' | 'error'; + +export interface LoggerAnnotationOptions { + file?: string; + line?: number; + endLine?: number; + column?: number; + endColumn?: number; + title?: string; +} + +export interface ILoggerProvider { + readonly isCi: boolean; + log(level: LoggerProviderLevel, message: any, optionalParams?: any[]): void; + startGroup(message: any, optionalParams?: any[]): void; + endGroup(): void; + annotate(level: LoggerProviderAnnotationLevel, message: string, options?: LoggerAnnotationOptions): void; + mask(message: string): void; + setEnvironmentVariable(name: string, value: string): void; + setOutput(name: string, value: string): void; + appendStepSummary(summary: string): void; + getMarkdownByteLimit(target: MarkdownTarget): number; +} + +export class LocalCliLoggerProvider implements ILoggerProvider { + public readonly isCi = false; + + public log(level: LoggerProviderLevel, message: any, optionalParams: any[] = []): void { + const stringColor: string | undefined = { + debug: '\x1b[35m', + ci: undefined, + utp: undefined, + info: undefined, + warning: '\x1b[33m', + error: '\x1b[31m', + }[level]; + if (stringColor && stringColor.length > 0) { + process.stdout.write(`${stringColor}${message}\x1b[0m\n`, ...optionalParams); + return; + } + process.stdout.write(`${message}\n`, ...optionalParams); + } + + public startGroup(message: any, optionalParams: any[] = []): void { + this.log('info', message, optionalParams); + } + + public endGroup(): void { + // no-op for local terminal + } + + public annotate(level: LoggerProviderAnnotationLevel, message: string): void { + const mapped: LoggerProviderLevel = level === 'error' ? 'error' : (level === 'warning' ? 'warning' : 'info'); + this.log(mapped, message); + } + + public mask(_message: string): void { + // no-op for local terminal + } + + public setEnvironmentVariable(_name: string, _value: string): void { + // no-op for local terminal + } + + public setOutput(_name: string, _value: string): void { + // no-op for local terminal + } + + public appendStepSummary(_summary: string): void { + // no-op for local terminal + } + + public getMarkdownByteLimit(_target: MarkdownTarget): number { + return Number.POSITIVE_INFINITY; + } +} diff --git a/src/logging.ts b/src/logging.ts index 54f9e863..cd953082 100644 --- a/src/logging.ts +++ b/src/logging.ts @@ -1,4 +1,517 @@ -import * as fs from 'fs'; +import { UTP, Severity } from './utp'; +import { GitHubActionsLoggerProvider, GitHubAnnotationLevel } from './github-actions-ci'; +import { ILoggerProvider, LocalCliLoggerProvider, LoggerAnnotationOptions, MarkdownTarget } from './logger-provider'; + +/** Severity order for display: Error first, then Warning, then Info. Undefined treats as Warning. */ +function severityRank(s: string | undefined): number { + if (s === Severity.Error || s === Severity.Exception || s === Severity.Assert) return 0; + if (s === Severity.Warning || s === undefined) return 1; + return 2; // Info +} + +function dedupeKey(e: UTP): string { + const msg = (e.message || '').trim(); + const file = (e.file || (e as { fileName?: string }).fileName || '').replace(/\\/g, '/'); + const line = e.line ?? (e as { lineNumber?: number }).lineNumber ?? 0; + return `${msg}\n${file}\n${line}`; +} + +/** + * Returns true if the path looks absolute (Unix / or Windows X:/). + */ +function isAbsolutePath(file: string): boolean { + const norm = file.replace(/\\/g, '/'); + if (norm.startsWith('/')) return true; + return /^[a-zA-Z]:\//.test(norm); +} + +/** + * Returns true if the entry's file is under the project path (or entry has no file). + * Relative paths (e.g. Assets/..., Packages/...) are always kept so Unity UTP log/compiler + * entries with relative file paths still appear in the summary. + */ +function isEntryUnderProjectPath(e: UTP, projectPath: string): boolean { + const file = (e.file || (e as { fileName?: string }).fileName || '').trim(); + if (!file) return true; + const normFile = file.replace(/\\/g, '/'); + if (!isAbsolutePath(normFile)) return true; + const normProject = projectPath.replace(/\\/g, '/'); + const base = normProject.endsWith('/') ? normProject : normProject + '/'; + return normFile === normProject || normFile.startsWith(base); +} + +/** + * Returns true if the entry's file looks like a Unity engine path (should be omitted when not using projectPath). + */ +function isUnityEnginePath(file: string): boolean { + const norm = file.replace(/\\/g, '/'); + if (UNITY_ENGINE_PATH_PREFIXES.some(p => norm.startsWith(p))) return true; + if (norm.includes('/Runtime/') || norm.includes('\\Runtime\\')) return true; + if (!norm.endsWith('.cpp')) return false; + const underProject = norm.includes('/Assets/') || norm.includes('/Packages/') || norm.includes('/Library/PackageCache/'); + return !underProject; +} + +/** + * Merges LogEntry/Compiler rows by message+file+line; on collision keeps the more severe entry. + * Exported for unit tests. + */ +export function mergeLogEntriesPreferringSeverity(candidates: UTP[]): UTP[] { + const byKey = new Map(); + for (const e of candidates) { + const key = dedupeKey(e); + const existing = byKey.get(key); + if (!existing || severityRank(e.severity) < severityRank(existing.severity)) { + byKey.set(key, e); + } + } + const merged = [...byKey.values()]; + merged.sort((a, b) => severityRank(a.severity) - severityRank(b.severity)); + return merged; +} + +/** + * Builds one merged list from LogEntry and Compiler entries. + * Deduplicated by message+file+line (keeping worse severity on collision), sorted by severity. + */ +function buildMergedLogList(filtered: UTP[]): UTP[] { + const candidates = filtered.filter(e => e.type === 'LogEntry' || e.type === 'Compiler'); + return mergeLogEntriesPreferringSeverity(candidates); +} + +/** + * Filters merged list to project-relevant entries only. + * When projectPath is set: keep entries with no file or file under projectPath. + * When projectPath is not set: exclude Unity engine paths only (keep PackageCache and project paths). + */ +function filterMergedByPath(merged: UTP[], options: { projectPath?: string } | undefined): UTP[] { + if (options?.projectPath != null && options.projectPath !== '') { + return merged.filter(e => isEntryUnderProjectPath(e, options.projectPath!)); + } + return merged.filter(e => { + const file = (e.file || (e as { fileName?: string }).fileName || '').trim(); + if (!file) return true; + return !isUnityEnginePath(file); + }); +} + +/** Groups merged log by severity for foldouts (Error, Warning, Info). Missing severity is grouped as Warning. */ +function groupBySeverity(merged: UTP[]): { errorCritical: UTP[]; warning: UTP[]; info: UTP[] } { + const errorCritical: UTP[] = []; + const warning: UTP[] = []; + const info: UTP[] = []; + for (const e of merged) { + if (e.severity === Severity.Error || e.severity === Severity.Exception || e.severity === Severity.Assert) { + errorCritical.push(e); + } else if (e.severity === Severity.Warning || e.severity === undefined) { + warning.push(e); + } else { + info.push(e); + } + } + return { errorCritical, warning, info }; +} + +/** Single test result row for summary and CLI table. */ +export interface TestResultSummary { + status: string; + durationMs: number; + description: string; + message?: string; + file?: string; + line?: number; +} + +/** Maps UTPTestStatus.state to display status (Unity/NUnit-style: 0 Inconclusive, 1 Passed, 2 Failed, 3 Skipped). */ +export function testStatusFromState(state: number | undefined): string { + switch (state) { + case 1: return '✅'; + case 2: return '❌'; + case 3: return '⏭️'; + case 0: + default: return '◯'; + } +} + +/** Converts a single TestStatus UTP to TestResultSummary. Exported for CLI use. */ +export function utpToTestResultSummary(e: UTP): TestResultSummary { + const state = (e as { state?: number }).state; + const durationMs = e.duration ?? (e.durationMicroseconds != null ? e.durationMicroseconds / 1000 : 0); + const description = (e.name || e.description || '-').trim(); + const msg = (e.message || '').trim(); + const summary: TestResultSummary = { + status: testStatusFromState(state), + durationMs, + description, + }; + if (msg !== '') { + summary.message = msg; + } + const file = (e.file || (e as { fileName?: string }).fileName || '').trim(); + const line = e.line ?? (e as { lineNumber?: number }).lineNumber; + if (file !== '') { + summary.file = file.replace(/\\/g, '/'); + } + if (line !== undefined && line > 0) { + summary.line = line; + } + return summary; +} + +/** Collects TestStatus entries from telemetry into TestResultSummary rows. */ +function collectTestResults(filtered: UTP[]): TestResultSummary[] { + return filtered.filter(e => e.type === 'TestStatus').map(utpToTestResultSummary); +} + +function escapeMarkdownTableCell(value: string): string { + return value + .replace(/\\/g, '\\\\') + .replace(/\|/g, '\\|'); +} + +/** Builds a markdown table string for test results (Status | Duration | Test). Exported for CLI use. */ +export function buildTestResultsTableMarkdown(testResults: TestResultSummary[], byteLimit: number, prefix?: string): string { + if (testResults.length === 0) return ''; + const p = prefix ?? ''; + let out = p + `### Test results\n\n`; + out += `| Status | Duration | Test |\n`; + out += `|--------|----------|------|\n`; + let shown = 0; + for (const row of testResults) { + const durationStr = row.durationMs >= 1000 + ? `${(row.durationMs / 1000).toFixed(1)}s` + : `${Math.round(row.durationMs)} ms`; + const rawDesc = row.description.length > 80 ? row.description.slice(0, 77) + '…' : row.description; + const desc = escapeMarkdownTableCell(rawDesc); + const line = `| ${escapeMarkdownTableCell(row.status)} | ${escapeMarkdownTableCell(durationStr)} | ${desc} |\n`; + if (Buffer.byteLength(out + line, 'utf8') > byteLimit) break; + out += line; + shown++; + } + if (shown < testResults.length) { + out += `| … | … | … and ${testResults.length - shown} more |\n`; + } + out += `\n`; + return out; +} + +function summarizeTestOutcomes(testResults: TestResultSummary[]): { passed: number; failed: number; skipped: number; inconclusive: number; totalDurationMs: number } { + let passed = 0; + let failed = 0; + let skipped = 0; + let inconclusive = 0; + let totalDurationMs = 0; + for (const t of testResults) { + totalDurationMs += t.durationMs; + switch (t.status) { + case '✅': passed++; break; + case '❌': failed++; break; + case '⏭️': skipped++; break; + default: inconclusive++; break; + } + } + return { passed, failed, skipped, inconclusive, totalDurationMs }; +} + +/** + * Rich unit-test markdown block used by workflow summary and stdout. + * Keeps byte-budget behavior and truncation hints. + */ +export function buildUnitTestJobSummaryMarkdown(testResults: TestResultSummary[], byteLimit: number, prefix?: string): string { + if (testResults.length === 0) return ''; + const p = prefix ?? ''; + let out = p + '### Unit test results\n\n'; + const counts = summarizeTestOutcomes(testResults); + const durationStr = counts.totalDurationMs >= 1000 + ? `${(counts.totalDurationMs / 1000).toFixed(1)}s` + : `${Math.round(counts.totalDurationMs)} ms`; + out += `**${testResults.length}** tests - **${counts.passed}** ✓, **${counts.failed}** ✗, **${counts.skipped}** skipped, **${counts.inconclusive}** inconclusive - **${durationStr}** total\n\n`; + out += '| Test | Result | Time | Message |\n'; + out += '| --- | --- | --- | --- |\n'; + + const ordered = [...testResults].sort((a, b) => { + const aFail = a.status === '❌' ? 0 : 1; + const bFail = b.status === '❌' ? 0 : 1; + if (aFail !== bFail) return aFail - bFail; + return b.durationMs - a.durationMs; + }); + + let shown = 0; + for (const row of ordered) { + const durationText = row.durationMs >= 1000 ? `${(row.durationMs / 1000).toFixed(1)}s` : `${Math.round(row.durationMs)} ms`; + const loc = row.file && row.line ? ` (${row.file}:${row.line})` : ''; + const rawName = `${row.description}${loc}`; + const name = escapeMarkdownTableCell(rawName.length > 90 ? `${rawName.slice(0, 87)}…` : rawName); + const msgRaw = (row.message ?? '').replace(/\r?\n/g, ' ').trim(); + const msg = escapeMarkdownTableCell(msgRaw.length > 120 ? `${msgRaw.slice(0, 117)}…` : msgRaw); + const line = `| ${name} | ${escapeMarkdownTableCell(row.status)} | ${escapeMarkdownTableCell(durationText)} | ${msg} |\n`; + if (Buffer.byteLength(out + line, 'utf8') > byteLimit) break; + out += line; + shown++; + } + if (shown < ordered.length) { + out += `| … | … | … | … and ${ordered.length - shown} more |\n`; + } + out += '\n'; + return out; +} + +function buildActionTimelineTableMarkdown( + completedActions: UTP[], + byteLimit: number, + prefix?: string +): { markdown: string; truncated: boolean } { + if (completedActions.length === 0) return { markdown: '', truncated: false }; + const p = prefix ?? ''; + let out = p + '| Status | Duration | Errors | Action |\n'; + out += '| --- | --- | --- | --- |\n'; + + let shown = 0; + for (const a of completedActions) { + const durationMs = a.duration ?? (a.durationMicroseconds != null ? a.durationMicroseconds / 1000 : undefined); + const errCount = Array.isArray(a.errors) ? a.errors.length : 0; + const status = errCount > 0 ? '❌' : '✅'; + const action = truncateStr(toSingleLineText(a.description || a.name || '-'), 120); + const row = `| ${escapeMarkdownTableCell(status)} | ${escapeMarkdownTableCell(formatDurationMsForSummary(durationMs))} | ${errCount} | ${escapeMarkdownTableCell(action)} |\n`; + if (Buffer.byteLength(out + row, 'utf8') > byteLimit) break; + out += row; + shown++; + } + + const truncated = shown < completedActions.length; + if (truncated) { + out += `| ... | ... | ... | ... and ${completedActions.length - shown} more actions |\n`; + } + out += '\n'; + return { markdown: out, truncated }; +} + +function buildActionTimelineCodeblockMarkdown(completedActions: UTP[], byteLimit: number, prefix?: string): string { + if (completedActions.length === 0) return ''; + const p = prefix ?? ''; + let out = p + '```text\n'; + let timelineShown = 0; + for (const a of completedActions) { + const durationMs = a.duration ?? (a.durationMicroseconds != null ? a.durationMicroseconds / 1000 : undefined); + const errCount = Array.isArray(a.errors) ? a.errors.length : 0; + const status = errCount > 0 ? '❌' : '✅'; + const desc = toSingleLineText(a.description || a.name || '-'); + const durationStr = formatDurationMsForSummary(durationMs); + const row = `${status} ${durationStr} ${errCount} - ${desc}\n`; + if (Buffer.byteLength(out + row, 'utf8') > byteLimit) break; + out += row; + timelineShown++; + } + if (timelineShown < completedActions.length) { + out += `... and ${completedActions.length - timelineShown} more actions\n`; + } + out += '```\n\n'; + return out; +} + +function truncateStr(s: string, max: number): string { + return s.length <= max ? s : s.slice(0, max) + '…'; +} + +/** + * Truncates s to fit within maxBytes in UTF-8. If truncated, appends an ellipsis (…). + * If s already fits, returns s unchanged. + * Exported for unit tests. + */ +export function truncateStringToUtf8ByteLength(s: string, maxBytes: number): string { + if (maxBytes <= 0) return ''; + const ellipsis = '…'; + const ellBytes = Buffer.byteLength(ellipsis, 'utf8'); + if (Buffer.byteLength(s, 'utf8') <= maxBytes) return s; + if (maxBytes <= ellBytes) { + let end = 0; + for (let i = 1; i <= s.length; i++) { + const sub = s.slice(0, i); + if (Buffer.byteLength(sub, 'utf8') > maxBytes) break; + end = i; + } + return s.slice(0, end); + } + let low = 0; + let high = s.length; + while (low < high) { + const mid = Math.floor((low + high + 1) / 2); + const sub = s.slice(0, mid); + if (Buffer.byteLength(sub, 'utf8') + ellBytes <= maxBytes) low = mid; + else high = mid - 1; + } + return s.slice(0, low) + ellipsis; +} + +/** + * Appends one formatted log line per entry, truncating each line only when it would exceed the + * remaining bytes in the workflow summary (byteLimit is total cap for the final string starting from out). + */ +function appendWorkflowSummaryLogLines(out: string, entries: UTP[], byteLimit: number): { out: string; shown: number; omitted: number } { + let o = out; + let shown = 0; + const newline = '\n'; + const nlBytes = Buffer.byteLength(newline, 'utf8'); + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + if (entry === undefined) { + return { out: o, shown, omitted: entries.length - shown }; + } + const room = byteLimit - Buffer.byteLength(o, 'utf8'); + if (room < nlBytes) { + return { out: o, shown, omitted: entries.length - shown }; + } + const rawLine = formatLogEntryLine(entry, Number.POSITIVE_INFINITY).replace(/\n$/, ''); + const maxContentBytes = room - nlBytes; + const lineBody = Buffer.byteLength(rawLine, 'utf8') <= maxContentBytes + ? rawLine + : truncateStringToUtf8ByteLength(rawLine, maxContentBytes); + o += lineBody + newline; + shown++; + } + return { out: o, shown, omitted: 0 }; +} + +function toSingleLineText(value: string): string { + return value + .replace(/\r?\n+/g, ' ') + .replace(/\s+/g, ' ') + .trim(); +} + +function formatDurationMsForSummary(ms: number | undefined): string { + if (ms === undefined || !Number.isFinite(ms)) { return '-'; } + if (ms < 1000) { return `${Math.round(ms)}ms`; } + return `${(ms / 1000).toFixed(1)}s`; +} + +/** Unity/CI noise shown in logs; omit from workflow summary foldouts and counts. */ +const SUMMARY_NOISE_ACCESS_TOKEN = 'Access token is unavailable; failed to update'; + +/** + * Removes known noise phrases from a log message for summary display. + * Exported for unit tests. + */ +export function stripSummaryNoiseFromLogMessage(message: string): string { + const flat = toSingleLineText(message); + if (!flat) return ''; + const pattern = SUMMARY_NOISE_ACCESS_TOKEN.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const out = flat.replace(new RegExp(pattern, 'gi'), ' ').replace(/\s+/g, ' ').trim(); + return out; +} + +function filterNoiseFromSummaryLogEntries(entries: UTP[]): UTP[] { + const out: UTP[] = []; + for (const e of entries) { + const stripped = stripSummaryNoiseFromLogMessage(e.message || ''); + if (stripped === '') continue; + const originalFlat = toSingleLineText(e.message || ''); + if (stripped !== originalFlat) { + out.push({ ...e, message: stripped }); + } else { + out.push(e); + } + } + return out; +} + +function renderBuildActionsFoldoutMarkdown(completedActions: UTP[], maxBytes: number): string { + const n = completedActions.length; + const open = `
Build actions (${n})\n\n`; + const close = `
\n\n`; + const overhead = Buffer.byteLength(open + close, 'utf8'); + const innerBudget = Math.max(0, maxBytes - overhead); + const table = buildActionTimelineTableMarkdown(completedActions, innerBudget, ''); + const inner = !table.truncated + ? table.markdown + : buildActionTimelineCodeblockMarkdown(completedActions, innerBudget, ''); + return open + inner + close; +} + +/** Paths to treat as Unity engine (omit from summary when using heuristic filter). */ +const UNITY_ENGINE_PATH_PREFIXES = [ + 'Runtime/', + './Runtime/', + 'Modules/', + './Modules/', +]; + +/** + * Normalizes a log message for display by stripping a redundant file:line prefix + * when it matches the entry's file/line so the path appears only once. + * Returns the normalized message and optional column if present in the prefix. + */ +function normalizeMessageForDisplay( + message: string, + file: string, + line: number | undefined +): { message: string; column?: number } { + const trimmed = message.trim(); + const normFile = file.replace(/\\/g, '/'); + if (!normFile && line === undefined) return { message: trimmed }; + + // path(line,col): e.g. Assets/File.cs(2,8): error ... + const parenColon = trimmed.match(/^(.+?)\((\d+),(\d+)\):\s*/); + if (parenColon && parenColon[1] != null && parenColon[2] != null && parenColon[3] != null) { + const fullMatch = parenColon[0]; + const msgPath = parenColon[1].replace(/\\/g, '/'); + const msgLine = parseInt(parenColon[2], 10); + const msgCol = parseInt(parenColon[3], 10); + const pathMatches = msgPath === normFile || normFile.endsWith(msgPath) || msgPath.endsWith(normFile); + if (pathMatches && (line === undefined || line === msgLine)) { + return { message: trimmed.slice(fullMatch.length).trim(), column: msgCol }; + } + } + + // path(line): e.g. Assets/File.cs(2): ... + const parenOnly = trimmed.match(/^(.+?)\((\d+)\):\s*/); + if (parenOnly && parenOnly[1] != null && parenOnly[2] != null) { + const fullMatch = parenOnly[0]; + const msgPath = parenOnly[1].replace(/\\/g, '/'); + const msgLine = parseInt(parenOnly[2], 10); + const pathMatches = msgPath === normFile || normFile.endsWith(msgPath) || msgPath.endsWith(normFile); + if (pathMatches && (line === undefined || line === msgLine)) { + return { message: trimmed.slice(fullMatch.length).trim() }; + } + } + + // path:line: e.g. path/to/file.cs:10: + const pathLineColon = trimmed.match(/^(.+?):(\d+):\s*/); + if (pathLineColon && pathLineColon[1] != null && pathLineColon[2] != null) { + const fullMatch = pathLineColon[0]; + const msgPath = pathLineColon[1].replace(/\\/g, '/'); + const msgLine = parseInt(pathLineColon[2], 10); + const pathMatches = msgPath === normFile || normFile.endsWith(msgPath) || msgPath.endsWith(normFile); + if (pathMatches && (line === undefined || line === msgLine)) { + return { message: trimmed.slice(fullMatch.length).trim() }; + } + } + + return { message: trimmed }; +} + +/** + * One line per entry: path(line,col): <message> or path(line): <message> when column is missing. + * When file/line are missing, outputs: - <message>. + */ +function formatLogEntryLine(e: UTP, maxMsgLen: number = Number.POSITIVE_INFINITY): string { + const file = (e.file || (e as { fileName?: string }).fileName || '').replace(/\\/g, '/'); + const line = e.line ?? (e as { lineNumber?: number }).lineNumber; + const hasLocation = file && (line !== undefined && line > 0); + const rawMsg = toSingleLineText(e.message || ''); + const { message: normalizedMsg, column } = hasLocation + ? normalizeMessageForDisplay(rawMsg, file, line) + : { message: rawMsg, column: undefined as number | undefined }; + const msg = Number.isFinite(maxMsgLen) && maxMsgLen >= 0 && maxMsgLen < Number.POSITIVE_INFINITY + ? truncateStr(normalizedMsg, maxMsgLen) + : normalizedMsg; + + if (hasLocation) { + const loc = column !== undefined ? `${file}(${line},${column})` : `${file}(${line})`; + return `${loc}: ${msg}\n`; + } + return `${msg}\n`; +} export enum LogLevel { DEBUG = 'debug', @@ -11,24 +524,18 @@ export enum LogLevel { export class Logger { public logLevel: LogLevel = LogLevel.INFO; - private readonly _ci: string | undefined; + private readonly _provider: ILoggerProvider; static readonly instance: Logger = new Logger(); private constructor() { + this._provider = process.env.GITHUB_ACTIONS === 'true' + ? new GitHubActionsLoggerProvider() + : new LocalCliLoggerProvider(); if (process.env.GITHUB_ACTIONS === 'true') { - this._ci = 'GITHUB_ACTIONS'; this.logLevel = process.env.ACTIONS_STEP_DEBUG === 'true' ? LogLevel.DEBUG : LogLevel.CI; } } - private printLine(message: any, lineColor: string | undefined, optionalParams: any[] = []): void { - if (lineColor && lineColor.length > 0) { - process.stdout.write(`${lineColor}${message}\x1b[0m\n`, ...optionalParams); - } else { - process.stdout.write(`${message}\n`, ...optionalParams); - } - } - /** * Logs a message to the console. * @param level The log level for this message. @@ -37,40 +544,7 @@ export class Logger { */ public log(level: LogLevel, message: any, optionalParams: any[] = []): void { if (this.shouldLog(level)) { - switch (this._ci) { - case 'GITHUB_ACTIONS': { - switch (level) { - case LogLevel.DEBUG: { - message.toString().split('\n').forEach((line: string) => { - process.stdout.write(`::debug::${line}\n`, ...optionalParams); - }); - break; - } - case LogLevel.CI: - case LogLevel.INFO: { - process.stdout.write(`${message}\n`, ...optionalParams); - break; - } - default: { - process.stdout.write(`::${level}::${message}\n`, ...optionalParams); - break; - } - } - break; - } - default: { - const stringColor: string | undefined = { - [LogLevel.DEBUG]: '\x1b[35m', // Purple - [LogLevel.INFO]: undefined, // No color / White - [LogLevel.CI]: undefined, // No color / White - [LogLevel.UTP]: undefined, // No color / White - [LogLevel.WARN]: '\x1b[33m', // Yellow - [LogLevel.ERROR]: '\x1b[31m', // Red - }[level] || undefined; // Default to no color / White - this.printLine(message, stringColor, optionalParams); - break; - } - } + this._provider.log(level, message, optionalParams); } } @@ -78,39 +552,18 @@ export class Logger { * Starts a log group. In CI environments that support grouping, this will create a collapsible group. */ public startGroup(message: any, optionalParams: any[] = [], logLevel: LogLevel = LogLevel.INFO): void { - switch (this._ci) { - case 'GITHUB_ACTIONS': { - // if there is newline in message, only use the first line for group title - // then print the rest of the lines inside the group in cyan color - const firstLine: string = message.toString().split('\n')[0]; - const restLines: string[] = message.toString().split('\n').slice(1); - process.stdout.write(`::group::${firstLine}\n`, ...optionalParams); - restLines.forEach(line => { - this.printLine(line, '\x1b[36m', ...optionalParams); - }); - break; - } - default: { - // No grouping in standard console - this.log(logLevel, message, optionalParams); - break; - } + if (this._provider.isCi) { + this._provider.startGroup(message, optionalParams); + return; } + this.log(logLevel, message, optionalParams); } /** * Ends a log group. In CI environments that support grouping, this will end the current group. */ public endGroup(): void { - switch (this._ci) { - case 'GITHUB_ACTIONS': { - process.stdout.write(`::endgroup::\n`); - break; - } - default: { - break; // No grouping in standard console - } - } + this._provider.endGroup(); } /** @@ -150,60 +603,25 @@ export class Logger { * @param title The title of the annotation. */ public annotate(logLevel: LogLevel, message: string, file?: string, line?: number, endLine?: number, column?: number, endColumn?: number, title?: string): void { - let annotation = ''; - - switch (this._ci) { - case 'GITHUB_ACTIONS': { - const level = { - [LogLevel.CI]: 'notice', - [LogLevel.INFO]: 'notice', - [LogLevel.DEBUG]: 'notice', - [LogLevel.UTP]: 'notice', - [LogLevel.WARN]: 'warning', - [LogLevel.ERROR]: 'error', - }[logLevel] ?? 'notice'; - - const parts: string[] = []; - const appendPart = (key: string, value?: string | number): void => { - if (value === undefined || value === null) { return; } - const stringValue = value.toString(); - if (stringValue.length === 0) { return; } - parts.push(`${key}=${this.escapeGitHubCommandValue(stringValue)}`); - }; - - appendPart('file', file); - if (line !== undefined && line > 0) { - appendPart('line', line); - } - if (endLine !== undefined && endLine > 0) { - appendPart('endLine', endLine); - } - if (column !== undefined && column > 0) { - appendPart('col', column); - } - if (endColumn !== undefined && endColumn > 0) { - appendPart('endColumn', endColumn); - } - appendPart('title', title); - - const metadata = parts.length > 0 ? ` ${parts.join(',')}` : ''; - annotation = `::${level}${metadata}::${this.escapeGitHubCommandValue(message)}`; - break; - } - } - - if (annotation.length > 0) { - process.stdout.write(`${annotation}\n`); - } else { - this.log(logLevel, message); - } - } - - private escapeGitHubCommandValue(value: string): string { - return value - .replace(/%/g, '%25') - .replace(/\r/g, '%0D') - .replace(/\n/g, '%0A'); + const level = { + [LogLevel.CI]: 'notice', + [LogLevel.INFO]: 'notice', + [LogLevel.DEBUG]: 'notice', + [LogLevel.UTP]: 'notice', + [LogLevel.WARN]: 'warning', + [LogLevel.ERROR]: 'error', + }[logLevel] ?? 'notice'; + const options: LoggerAnnotationOptions = {}; + if (file !== undefined && file !== '') { options.file = file; } + if (line !== undefined) { options.line = line; } + if (endLine !== undefined) { options.endLine = endLine; } + if (column !== undefined) { options.column = column; } + if (endColumn !== undefined) { options.endColumn = endColumn; } + if (title !== undefined && title !== '') { options.title = title; } + const backendLevel = level === 'error' + ? GitHubAnnotationLevel.Error + : (level === 'warning' ? GitHubAnnotationLevel.Warning : GitHubAnnotationLevel.Notice); + this._provider.annotate(backendLevel, message, options); } private shouldLog(level: LogLevel): boolean { @@ -217,12 +635,7 @@ export class Logger { * @param message The string to mask. */ public CI_mask(message: string): void { - switch (this._ci) { - case 'GITHUB_ACTIONS': { - process.stdout.write(`::add-mask::${message}\n`); - break; - } - } + this._provider.mask(message); } /** @@ -312,47 +725,210 @@ export class Logger { * @param value The value of the environment variable. */ public CI_setEnvironmentVariable(name: string, value: string): void { - switch (this._ci) { - case 'GITHUB_ACTIONS': { - // needs to be appended to the temporary file specified in the GITHUB_ENV environment variable - const githubEnv = process.env.GITHUB_ENV; - // echo "MY_ENV_VAR=myValue" >> $GITHUB_ENV - if (githubEnv) { - fs.appendFileSync(githubEnv, `${name}=${value}\n`, { encoding: 'utf8' }); - } - break; - } - } + this._provider.setEnvironmentVariable(name, value); } public CI_setOutput(name: string, value: string): void { - switch (this._ci) { - case 'GITHUB_ACTIONS': { - // needs to be appended to the temporary file specified in the GITHUB_OUTPUT environment variable - const githubOutput = process.env.GITHUB_OUTPUT; - // echo "myOutput=myValue" >> $GITHUB_OUTPUT - if (githubOutput) { - fs.appendFileSync(githubOutput, `${name}=${value}\n`, { encoding: 'utf8' }); - } - break; - } + this._provider.setOutput(name, value); + } + + private static formatDurationMs(ms: number | undefined): string { + if (ms === undefined || !Number.isFinite(ms)) { return '-'; } + if (ms < 1000) { return `${Math.round(ms)}ms`; } + return `${(ms / 1000).toFixed(1)}s`; + } + + private static truncateStr(s: string, max: number): string { + return s.length <= max ? s : s.slice(0, max) + '…'; + } + + private static truncateSummaryToByteLimit(summary: string, byteLimit: number): string { + const footer = `\n***Summary truncated due to size limits.***\n`; + const footerSize = Buffer.byteLength(footer, 'utf8'); + const lines = summary.split('\n'); + let rebuilt = ''; + for (const line of lines) { + const nextSize = Buffer.byteLength(rebuilt + line + '\n', 'utf8') + footerSize; + if (nextSize > byteLimit) { break; } + rebuilt += `${line}\n`; } + return rebuilt + footer; } - public CI_appendWorkflowSummary(telemetry: any[]) { - switch (this._ci) { - case 'GITHUB_ACTIONS': { - const githubSummary = process.env.GITHUB_STEP_SUMMARY; + /** + * Returns the markdown byte limit for a given output target. + * Workflow summary may be backend constrained; stdout is intentionally uncapped. + */ + public getMarkdownByteLimit(target: MarkdownTarget): number { + return this._provider.getMarkdownByteLimit(target); + } - if (githubSummary) { - let table = `| Key | Value |\n| --- | ----- |\n`; - telemetry.forEach(item => { - table += `| ${item.key} | ${item.value} |\n`; - }); + public CI_appendWorkflowSummary(name: string, telemetry: UTP[], options?: { projectPath?: string; additionalLogEntries?: UTP[] }) { + if (telemetry.length === 0) { return; } + if (this.getMarkdownByteLimit('workflow-summary') === Number.POSITIVE_INFINITY) { + return; + } + const excludedTypes = new Set(['MemoryLeaks', 'MemoryLeak']); + const filtered = telemetry.filter(entry => !excludedTypes.has(entry.type || '')); + if (filtered.length === 0) { return; } + + const completedActions = filtered.filter( + e => e.type === 'Action' && e.phase === 'End' + ); + const testResults = collectTestResults(filtered); + const additional = options?.additionalLogEntries ?? []; + const merged = mergeLogEntriesPreferringSeverity([ + ...buildMergedLogList(filtered), + ...additional.filter(e => e.type === 'LogEntry' || e.type === 'Compiler'), + ]); + const pathFiltered = filterMergedByPath(merged, options); + const summaryLogs = filterNoiseFromSummaryLogEntries(pathFiltered); + const bySeverity = groupBySeverity(summaryLogs); + const limit = this.getMarkdownByteLimit('workflow-summary'); + + const builders: (() => string)[] = [ + () => this.buildSummaryTimelineAndMergedLog(name, completedActions, bySeverity, testResults, limit), + () => this.buildSummaryCollapsibleWithMergedLog(name, completedActions, bySeverity, testResults, limit), + () => this.buildSummaryTimelineAndCounts(name, completedActions, summaryLogs.length, testResults, limit), + ]; + let summary = ''; + for (const build of builders) { + summary = build(); + if (Buffer.byteLength(summary, 'utf8') <= limit) { break; } + } + if (Buffer.byteLength(summary, 'utf8') > limit) { + summary = Logger.truncateSummaryToByteLimit(summary, limit); + } + this._provider.appendStepSummary(summary); + } - fs.appendFileSync(githubSummary, table, { encoding: 'utf8' }); - } + /** + * Builds summary: stats + action table + unit-test block + severity foldouts. + */ + private buildSummaryTimelineAndMergedLog( + name: string, + completedActions: UTP[], + bySeverity: { errorCritical: UTP[]; warning: UTP[]; info: UTP[] }, + testResults: TestResultSummary[], + byteLimit: number + ): string { + let out = `## ${name} Summary\n\n`; + + const totalDurationMs = completedActions.reduce( + (sum, a) => sum + (a.duration ?? (a.durationMicroseconds != null ? a.durationMicroseconds / 1000 : 0)), + 0 + ); + const totalSec = totalDurationMs / 1000; + const totalStr = totalSec >= 60 ? `${Math.round(totalSec / 60)}m ${Math.round(totalSec % 60)}s` : `${totalSec.toFixed(1)}s`; + out += `Errors: ${bySeverity.errorCritical.length}\n`; + out += `Warnings: ${bySeverity.warning.length}\n`; + out += `Total duration: ${totalStr}\n`; + out += `Actions: ${completedActions.length}\n`; + if (testResults.length > 0) { + out += `Tests: ${testResults.length}\n`; + } + out += '\n'; + + if (completedActions.length > 0) { + const remaining = byteLimit - Buffer.byteLength(out, 'utf8'); + out += renderBuildActionsFoldoutMarkdown(completedActions, remaining); + } + + if (testResults.length > 0) { + const remaining = byteLimit - Buffer.byteLength(out, 'utf8'); + out += buildUnitTestJobSummaryMarkdown(testResults, remaining, ''); + } + + const limit = byteLimit; + const appendFoldout = (title: string, entries: UTP[], dropSuffix: string, openByDefault?: boolean): void => { + if (entries.length === 0) return; + const openAttr = openByDefault ? ' open' : ''; + out += `${title} (${entries.length})\n\n`; + out += '```text\n'; + const appended = appendWorkflowSummaryLogLines(out, entries, limit); + out = appended.out; + if (appended.omitted > 0) { + out += `... and ${appended.omitted} more ${dropSuffix}\n`; } + out += '```\n\n'; + out += `\n\n`; + }; + + appendFoldout('Error', bySeverity.errorCritical, '(see annotations).', true); + appendFoldout('Warning', bySeverity.warning, '(truncated; see full log).'); + appendFoldout('Info', bySeverity.info, '(truncated; see full log).'); + + return out; + } + + /** + * Builds summary with timeline in a
and merged log foldouts by severity. + * Used when primary builder would exceed size limit. + */ + private buildSummaryCollapsibleWithMergedLog( + name: string, + completedActions: UTP[], + bySeverity: { errorCritical: UTP[]; warning: UTP[]; info: UTP[] }, + testResults: TestResultSummary[], + byteLimit: number + ): string { + let out = `## ${name} Summary\n\n`; + + if (completedActions.length > 0) { + const remaining = byteLimit - Buffer.byteLength(out, 'utf8'); + out += renderBuildActionsFoldoutMarkdown(completedActions, remaining); } + + if (testResults.length > 0) { + const remaining = byteLimit - Buffer.byteLength(out, 'utf8'); + out += buildUnitTestJobSummaryMarkdown(testResults, remaining, ''); + } + + const limit = byteLimit; + const appendFoldout = (title: string, entries: UTP[], dropSuffix: string, openByDefault?: boolean): void => { + if (entries.length === 0) return; + const openAttr = openByDefault ? ' open' : ''; + out += `${title} (${entries.length})\n\n`; + out += '```text\n'; + const appended = appendWorkflowSummaryLogLines(out, entries, limit); + out = appended.out; + if (appended.omitted > 0) out += `... and ${appended.omitted} more ${dropSuffix}\n`; + out += '```\n\n'; + out += `
\n\n`; + }; + appendFoldout('Error', bySeverity.errorCritical, '(see annotations).', true); + appendFoldout('Warning', bySeverity.warning, '(truncated; see full log).'); + appendFoldout('Info', bySeverity.info, '(truncated; see full log).'); + + return out; + } + + /** + * Fallback: list timeline (when actions exist) + unit-test block (when present) + compact count lines. + * Used when even collapsible summary would exceed 1 MB. + */ + private buildSummaryTimelineAndCounts( + name: string, + completedActions: UTP[], + logCount: number, + testResults: TestResultSummary[], + byteLimit: number + ): string { + let out = `## ${name} Summary\n\n`; + if (completedActions.length > 0) { + const remaining = byteLimit - Buffer.byteLength(out, 'utf8'); + out += renderBuildActionsFoldoutMarkdown(completedActions, remaining); + } + if (testResults.length > 0) { + const remaining = byteLimit - Buffer.byteLength(out, 'utf8'); + out += buildUnitTestJobSummaryMarkdown(testResults, remaining, ''); + } + out += `Log entries: ${logCount}\n`; + out += `Actions: ${completedActions.length}\n`; + if (testResults.length > 0) { + out += `Tests: ${testResults.length}\n`; + } + out += `\nSee annotations for details.\n`; + return out; } -} \ No newline at end of file +} diff --git a/src/unity-editor.ts b/src/unity-editor.ts index 60da2e63..e8eea3d2 100644 --- a/src/unity-editor.ts +++ b/src/unity-editor.ts @@ -306,6 +306,7 @@ export class UnityEditor { const baseEditorEnv: NodeJS.ProcessEnv = { ...process.env, UNITY_THISISABUILDMACHINE: '1', + DISABLE_EMBEDDED_BUILD_PIPELINE_PLUGIN_LOGGING: '1', ...(linuxEnvOverrides ?? {}) }; diff --git a/src/unity-hub.ts b/src/unity-hub.ts index 35b5cf58..62fe06b1 100644 --- a/src/unity-hub.ts +++ b/src/unity-hub.ts @@ -380,6 +380,7 @@ wget -qO - https://hub.unity3d.com/linux/keys/public | gpg --dearmor | sudo tee sudo sh -c 'echo "deb [signed-by=/usr/share/keyrings/Unity_Technologies_ApS.gpg] https://hub.unity3d.com/linux/repos/deb stable main" > /etc/apt/sources.list.d/unityhub.list' sudo apt-get update --allow-releaseinfo-change sudo apt-get install -y --no-install-recommends --only-upgrade unityhub${version ? '=' + version : ''}`]); + this.logger.info(`Unity Hub updated successfully.`); } else { throw new Error(`Unsupported platform: ${process.platform}`); } diff --git a/src/unity-logging.ts b/src/unity-logging.ts index 52cbb9a2..699de95e 100644 --- a/src/unity-logging.ts +++ b/src/unity-logging.ts @@ -1,6 +1,12 @@ import * as fs from 'fs'; import * as path from 'path'; -import { LogLevel, Logger } from './logging'; +import { + LogLevel, + Logger, + buildUnitTestJobSummaryMarkdown, + TestResultSummary, + utpToTestResultSummary +} from './logging'; import { Delay, WaitForFileToBeUnlocked } from './utilities'; import { Phase, @@ -9,8 +15,9 @@ import { UTPBase, UTPMemoryLeak, UTPPlayerBuildInfo, + UTPTestStatus, normalizeTelemetryEntry -} from './utp/utp'; +} from './utp'; /** * Result of the tailLogFile function containing cleanup resources. @@ -24,8 +31,8 @@ export interface LogTailResult { telemetry: UTP[]; } -// Detects GitHub-style annotation markers to avoid emitting duplicates -const githubAnnotationPrefixRegex = /\n::[a-z]+::/i; +// Detects workflow command markers to avoid emitting duplicate annotations +const annotationCommandPrefixRegex = /\n::[a-z]+::/i; // Matches ANSI escape sequences (CSI and single-character) const ansiEscapeSequenceRegex = /\u001b(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g; @@ -82,16 +89,208 @@ export function sanitizeTelemetryJson(raw: string | undefined): string | undefin return sanitized; } +/** Builds the warning when a `##utp:` payload includes unrecognized root properties. Exported for tests. */ +export function formatUtpUnrecognizedTopLevelPropertiesMessage( + unknownTopLevelKeys: string[], + fullTelemetryLine: string +): string { + return `UTP entry contains unrecognized top-level properties: ${unknownTopLevelKeys.join(', ')}\nFull line: ${fullTelemetryLine}`; +} + +/** + * Single-line debug text for `--log-level UTP` for telemetry types that do not use the action / memory / player-build tables. + * Returns `undefined` when the type should fall back to unknown-type handling (warn + raw JSON). + */ +export function describeUtpForUtpLogLevel(utp: UTP): string | undefined { + switch (utp.type) { + case 'Compiler': + case 'LogEntry': { + const u = utp as UTPBase; + const loc = u.file != null && u.line != null ? `${u.file}:${u.line}` : (u.file ?? ''); + const sev = u.severity != null ? String(u.severity) : ''; + const msg = (u.message ?? ''); + return `[UTP] ${utp.type} ${sev} ${loc} ${msg}`.replace(/\s+/gu, ' ').trim(); + } + case 'TestStatus': { + const u = utp as UTPTestStatus; + const name = (u.name ?? u.description ?? '—').trim(); + const dur = u.duration ?? (u.durationMicroseconds != null ? u.durationMicroseconds / 1000 : 0); + const msg = (u.message ?? ''); + return `[UTP] TestStatus state=${u.state ?? '?'} durMs=${dur} ${name} ${msg}`.replace(/\s+/gu, ' ').trim(); + } + case 'TestPlan': + case 'ScreenSettings': + case 'PlayerSettings': + case 'BuildSettings': + case 'PlayerSystemInfo': + case 'QualitySettings': + return `[UTP] ${utp.type} ${JSON.stringify(utp)}`; + default: + return undefined; + } +} + function sanitizeStackTrace(raw: string | undefined): string | undefined { if (!raw) { return undefined; } const sanitized = raw - .replace(githubAnnotationPrefixRegex, '') + .replace(annotationCommandPrefixRegex, '') .replace(ansiEscapeSequenceRegex, '') .trim(); if (sanitized === '') { return undefined; } return sanitized; } +interface StackFrame { + file: string; + line: number; + title: string; +} + +const MAX_STACK_FRAME_ANNOTATIONS = 5; +const MAX_PLAIN_SCAN_ANNOTATIONS = 100; + +interface PlainLogIssue { + severity: Severity.Error | Severity.Warning; + message: string; + file?: string; + line?: number; +} + +export interface NormalizedAnnotationPath { + absoluteFile?: string; + annotationFile?: string; +} + +function normalizePathSlashes(filePath: string): string { + return path.normalize(filePath).replace(/\\/g, '/'); +} + +/** + * Normalizes a candidate issue file path for annotation and project-path checks. + * - absoluteFile: used for `isFileUnderProjectPath` gating. + * - annotationFile: project-relative path preferred for GitHub annotation rendering. + */ +export function normalizeAnnotationPath(filePath: string | undefined, projectPath: string | undefined): NormalizedAnnotationPath { + if (!filePath) { + return {}; + } + + const trimmed = filePath.trim(); + if (!trimmed) { + return {}; + } + + const projectRootAbsolute = projectPath ? path.resolve(projectPath) : undefined; + const normalizedProject = projectRootAbsolute ? normalizePathSlashes(projectRootAbsolute) : undefined; + const isAbsolute = path.isAbsolute(trimmed); + const absoluteFile = normalizePathSlashes(isAbsolute + ? trimmed + : (projectRootAbsolute ? path.resolve(projectRootAbsolute, trimmed) : trimmed)); + + if (!normalizedProject) { + return { absoluteFile, annotationFile: normalizePathSlashes(trimmed) }; + } + + if (!isFileUnderProjectPath(absoluteFile, normalizedProject)) { + return { absoluteFile }; + } + + const relative = normalizePathSlashes(path.relative(normalizedProject, absoluteFile)); + if (!relative || relative.startsWith('../')) { + return { absoluteFile }; + } + + return { + absoluteFile, + annotationFile: relative, + }; +} + +function parsePlainLogIssue(line: string): PlainLogIssue | undefined { + const paren = line.match(/^(.+?)\((\d+)(?:,\d+)?\):\s*(warning|error)\b[:\s-]*(.*)$/i); + if (paren && paren[1] && paren[2] && paren[3]) { + const severity = paren[3].toLowerCase() === 'warning' ? Severity.Warning : Severity.Error; + const file = paren[1].trim().replace(/\\/g, '/'); + const lineNum = parseInt(paren[2], 10); + const remainder = (paren[4] ?? '').trim(); + const message = remainder.length > 0 ? remainder : line.trim(); + const issue: PlainLogIssue = { severity, file, message }; + if (Number.isFinite(lineNum)) { + issue.line = lineNum; + } + return issue; + } + + const colon = line.match(/^(.+?):(\d+):\s*(warning|error)\b[:\s-]*(.*)$/i); + if (colon && colon[1] && colon[2] && colon[3]) { + const severity = colon[3].toLowerCase() === 'warning' ? Severity.Warning : Severity.Error; + const file = colon[1].trim().replace(/\\/g, '/'); + const lineNum = parseInt(colon[2], 10); + const remainder = (colon[4] ?? '').trim(); + const message = remainder.length > 0 ? remainder : line.trim(); + const issue: PlainLogIssue = { severity, file, message }; + if (Number.isFinite(lineNum)) { + issue.line = lineNum; + } + return issue; + } + + const generic = line.match(/\b(error|warning)\b[:\s-]+(.+)/i); + if (generic && generic[1] && generic[2]) { + const severity = generic[1].toLowerCase() === 'warning' ? Severity.Warning : Severity.Error; + return { severity, message: generic[2].trim() }; + } + + return undefined; +} + +/** + * True if filePath is the project root or under it. Normalizes separators; on Windows compares case-insensitively. + * Exported for unit tests. + */ +export function isFileUnderProjectPath(filePath: string, projectRoot: string): boolean { + const normFile = normalizePathSlashes(filePath); + const normRoot = normalizePathSlashes(projectRoot); + const base = normRoot.endsWith('/') ? normRoot : `${normRoot}/`; + if (process.platform === 'win32') { + const f = normFile.toLowerCase(); + const r = normRoot.toLowerCase(); + const b = base.toLowerCase(); + return f === r || f.startsWith(b); + } + return normFile === normRoot || normFile.startsWith(base); +} + +function parseStackFrames(stackTrace: string, projectPath: string | undefined): StackFrame[] { + const frames: StackFrame[] = []; + const lines = stackTrace.split(/\r?\n/).map(l => l.trim()).filter(Boolean); + for (const stackLine of lines) { + const inMatch = stackLine.match(/\s+in\s+([^\s]+):(\d+)\s*$/); + const parenMatch = stackLine.match(/\(([^)]+):(\d+)\)\s*$/); + const plainMatch = stackLine.match(/^(.+):(\d+)\s*$/); + let file: string | undefined; + let lineNum: number | undefined; + if (inMatch && inMatch[1] != null && inMatch[2] != null) { + file = inMatch[1].replace(/\\/g, '/'); + lineNum = parseInt(inMatch[2], 10); + } else if (parenMatch && parenMatch[1] != null && parenMatch[2] != null) { + file = parenMatch[1].replace(/\\/g, '/'); + lineNum = parseInt(parenMatch[2], 10); + } else if (plainMatch && plainMatch[1] != null && plainMatch[2] != null) { + file = plainMatch[1].replace(/\\/g, '/'); + lineNum = parseInt(plainMatch[2], 10); + } + const line = lineNum !== undefined && Number.isFinite(lineNum) ? lineNum : undefined; + if (file != null && line != null && line > 0) { + const normalized = normalizeAnnotationPath(file, projectPath); + if (projectPath != null && normalized.absoluteFile && normalized.annotationFile) { + frames.push({ file: normalized.annotationFile, line, title: stackLine }); + } + } + } + return frames; +} + const MIN_DESCRIPTION_COLUMN_WIDTH = 16; const DEFAULT_TERMINAL_WIDTH = 120; const TERMINAL_WIDTH_SAFETY_MARGIN = 2; @@ -956,11 +1155,24 @@ export function TailLogFile(logPath: string, projectPath: string | undefined): L const logPollingInterval = 250; let pendingPartialLine = ''; const telemetry: UTP[] = []; + const testResults: TestResultSummary[] = []; + const scannedLogEntries: UTP[] = []; + const seenIssueKeys = new Set(); + const seenAnnotationKeys = new Set(); + let plainScanAnnotations = 0; + /** Dedupe stdout test table rows when Unity emits duplicate TestStatus lines (key: name + state + description). */ + const seenTestStatusKeys = new Set(); const logger = Logger.instance; const actionAccumulator = new ActionTelemetryAccumulator(); const actionTableRenderer = new ActionTableRenderer(process.stdout.isTTY === true && process.env.CI !== 'true'); const utpLogPath = buildUtpLogPath(logPath); let telemetryFlushed = false; + const buildIssueKey = (file: string | undefined, lineNo: number | undefined, message: string): string => { + const normalized = normalizeAnnotationPath(file, projectPath); + const canonicalFile = (normalized.absoluteFile ?? normalizePathSlashes(file ?? '')).toLowerCase(); + const canonicalLine = lineNo ?? 0; + return `${canonicalFile}\u0000${canonicalLine}\u0000${message}`; + }; const renderActionTable = (): void => { const snapshot = actionAccumulator.snapshot(); @@ -973,6 +1185,17 @@ export function TailLogFile(logPath: string, projectPath: string | undefined): L if (telemetryFlushed) { return; } telemetryFlushed = true; await writeUtpTelemetryLog(utpLogPath, telemetry, logger); + const parsed = path.parse(logPath); + Logger.instance.CI_appendWorkflowSummary( + parsed.name, + telemetry, + projectPath != null && projectPath !== '' ? { projectPath, additionalLogEntries: scannedLogEntries } : { additionalLogEntries: scannedLogEntries } + ); + if (testResults.length > 0) { + const limit = logger.getMarkdownByteLimit('stdout'); + const summary = buildUnitTestJobSummaryMarkdown(testResults, limit, '\n'); + process.stdout.write(summary); + } }; const writeStdoutThenTableContent = (content: string, restoreTable: boolean = true): void => { @@ -995,8 +1218,36 @@ export function TailLogFile(logPath: string, projectPath: string | undefined): L if (!sanitizedJson) { return; } const utpJson = JSON.parse(sanitizedJson); - const utp = normalizeTelemetryEntry(utpJson); + const { utp, unknownTopLevelKeys } = normalizeTelemetryEntry(utpJson); + if (unknownTopLevelKeys.length > 0) { + logger.warn(formatUtpUnrecognizedTopLevelPropertiesMessage(unknownTopLevelKeys, line)); + } telemetry.push(utp); + const utpMsg = (utp.message ?? '').trim(); + if ((utp.type === 'LogEntry' || utp.type === 'Compiler') && utpMsg !== '') { + seenIssueKeys.add(buildIssueKey(utp.file, utp.line, utpMsg)); + } + if (utp.type === 'TestStatus') { + const ts = utp as UTP & { name?: string; state?: number; description?: string }; + const dedupeKey = `${ts.name ?? ''}\u0000${ts.state ?? ''}\u0000${ts.description ?? ''}`; + if (!seenTestStatusKeys.has(dedupeKey)) { + seenTestStatusKeys.add(dedupeKey); + const result = utpToTestResultSummary(utp); + testResults.push(result); + } + if ((ts.state === 2 || ts.state === 0) && ts.message && !annotationCommandPrefixRegex.test(ts.message)) { + const normalizedPath = normalizeAnnotationPath(utp.file, projectPath); + const lineNumber = utp.line; + const title = (ts.name ?? ts.description ?? 'Test failure').trim(); + if (normalizedPath.annotationFile && lineNumber) { + const key = buildIssueKey(normalizedPath.annotationFile, lineNumber, ts.message); + if (!seenAnnotationKeys.has(key)) { + seenAnnotationKeys.add(key); + logger.annotate(ts.state === 2 ? LogLevel.ERROR : LogLevel.WARN, ts.message, normalizedPath.annotationFile, lineNumber, undefined, undefined, undefined, title); + } + } + } + } if (utp.message && 'severity' in utp && (utp.severity === Severity.Error || utp.severity === Severity.Exception || utp.severity === Severity.Assert)) { @@ -1007,14 +1258,22 @@ export function TailLogFile(logPath: string, projectPath: string | undefined): L messageLevel = remappedLevel; } - const file = utp.file ? utp.file.replace(/\\/g, '/') : undefined; + const normalizedPath = normalizeAnnotationPath(utp.file, projectPath); const stacktrace = sanitizeStackTrace(utp.stackTrace); const message = stacktrace == undefined ? utp.message : `${utp.message}\n${stacktrace}`; - if (!githubAnnotationPrefixRegex.test(message)) { + if (!annotationCommandPrefixRegex.test(message)) { // only annotate if the file is within the current project - if (projectPath && file && file.startsWith(projectPath)) { - logger.annotate(LogLevel.ERROR, message, file, utp.line); + if (normalizedPath.annotationFile) { + logger.annotate(LogLevel.ERROR, message, normalizedPath.annotationFile, utp.line); + // Link stack trace to annotations: emit one annotation per frame (capped) for clickable stack in Checks + if (stacktrace && projectPath) { + const frames = parseStackFrames(stacktrace, projectPath); + const toEmit = frames.slice(0, MAX_STACK_FRAME_ANNOTATIONS); + for (const frame of toEmit) { + logger.annotate(LogLevel.ERROR, frame.title, frame.file, frame.line, undefined, undefined, undefined, 'Stack frame'); + } + } } else { switch (messageLevel) { case LogLevel.WARN: @@ -1037,6 +1296,31 @@ export function TailLogFile(logPath: string, projectPath: string | undefined): L logger.warn(`Failed to parse telemetry JSON: ${error} -- raw: ${jsonPart}`); } } else { + const scan = parsePlainLogIssue(line); + if (scan) { + const key = buildIssueKey(scan.file, scan.line, scan.message); + if (!seenIssueKeys.has(key)) { + seenIssueKeys.add(key); + scannedLogEntries.push({ + type: 'Compiler', + severity: scan.severity, + message: scan.message, + file: scan.file, + line: scan.line, + } as UTP); + } + if (!annotationCommandPrefixRegex.test(scan.message) && plainScanAnnotations < MAX_PLAIN_SCAN_ANNOTATIONS) { + const normalizedPath = normalizeAnnotationPath(scan.file, projectPath); + const annotationKey = buildIssueKey(normalizedPath.annotationFile ?? scan.file, scan.line, scan.message); + if (!seenAnnotationKeys.has(annotationKey)) { + if (normalizedPath.annotationFile && scan.line) { + seenAnnotationKeys.add(annotationKey); + plainScanAnnotations++; + logger.annotate(scan.severity === Severity.Warning ? LogLevel.WARN : LogLevel.ERROR, scan.message, normalizedPath.annotationFile, scan.line); + } + } + } + } if (Logger.instance.logLevel !== LogLevel.UTP) { process.stdout.write(`${line}\n`); } @@ -1057,6 +1341,7 @@ export function TailLogFile(logPath: string, projectPath: string | undefined): L break; } case 'MemoryLeaks': + case 'MemoryLeak': logger.debug(formatMemoryLeakTable(utp as UTPMemoryLeak)); break; case 'PlayerBuildInfo': { @@ -1069,11 +1354,16 @@ export function TailLogFile(logPath: string, projectPath: string | undefined): L break; } - default: + default: { + const desc = describeUtpForUtpLogLevel(utp); + if (desc !== undefined) { + logger.debug(desc); + break; + } logger.warn(`UTP entry has unknown type: ${utp.type ?? 'undefined'}`); - // Print raw JSON for unhandled UTP types writeStdoutThenTableContent(`${JSON.stringify(utp)}\n`); break; + } } } diff --git a/src/utp/utp.ts b/src/utp.ts similarity index 79% rename from src/utp/utp.ts rename to src/utp.ts index a549304a..f917ad53 100644 --- a/src/utp/utp.ts +++ b/src/utp.ts @@ -1,4 +1,4 @@ -import { Logger } from "../logging"; +import { Logger } from "./logging"; export class UTPBase { type?: string; @@ -20,13 +20,19 @@ export class UTPBase { errors?: unknown[]; } +export class UTPAction extends UTPBase { } + export class UTPMemoryLeak extends UTPBase { allocatedMemory?: number; memoryLabels?: Record | Array>; } +export class UTPMemoryLeaks extends UTPMemoryLeak { } + export class UTPLogEntry extends UTPBase { } +export class UTPCompiler extends UTPBase { } + export class UTPTestPlan extends UTPBase { tests?: string[]; } @@ -113,10 +119,13 @@ export interface PlayerBuildInfoStep { } export class UTPPlayerBuildInfo extends UTPBase { + success?: boolean; steps?: PlayerBuildInfoStep[]; } export type UTP = + | UTPAction + | UTPCompiler | UTPBase | UTPLogEntry | UTPTestPlan @@ -127,6 +136,7 @@ export type UTP = | UTPQualitySettings | UTPTestStatus | UTPMemoryLeak + | UTPMemoryLeaks | UTPPlayerBuildInfo; export enum Phase { @@ -143,7 +153,11 @@ export enum Severity { Assert = 'Assert' } -const allowedUtpKeys = new Set([ +/** + * Root-level JSON keys on UTP objects that this CLI recognizes. Other keys are still parsed + * but reported via {@link normalizeTelemetryEntry}'s `unknownTopLevelKeys` for logging. + */ +export const UTP_SUPPORTED_TOP_LEVEL_PROPERTIES = new Set([ 'allocatedMemory', 'BuildSettings', 'description', @@ -165,6 +179,7 @@ const allowedUtpKeys = new Set([ 'QualitySettings', 'ScreenSettings', 'severity', + 'success', 'stacktrace', 'stackTrace', 'state', @@ -175,12 +190,19 @@ const allowedUtpKeys = new Set([ 'version', ]); +export interface NormalizeTelemetryResult { + utp: UTP; + /** Top-level property names present in the payload but not in {@link UTP_SUPPORTED_TOP_LEVEL_PROPERTIES}. */ + unknownTopLevelKeys: string[]; +} + /** - * Normalizes UTP telemetry entries to canonical shapes and reports unexpected properties. + * Normalizes UTP telemetry entries to canonical shapes. Unknown top-level keys are listed + * for the caller to log (with the raw `##utp:` line when tailing logs). */ -export function normalizeTelemetryEntry(entry: unknown): UTP { +export function normalizeTelemetryEntry(entry: unknown): NormalizeTelemetryResult { if (!entry || typeof entry !== 'object') { - return entry as UTP; + return { utp: entry as UTP, unknownTopLevelKeys: [] }; } const utp = entry as UTP; @@ -216,16 +238,12 @@ export function normalizeTelemetryEntry(entry: unknown): UTP { Logger.instance.warn('UTP entry missing type property; telemetry entry may be ignored.'); } - const extras: string[] = []; + const unknownTopLevelKeys: string[] = []; for (const key of Object.keys(record)) { - if (!allowedUtpKeys.has(key)) { - extras.push(key); + if (!UTP_SUPPORTED_TOP_LEVEL_PROPERTIES.has(key)) { + unknownTopLevelKeys.push(key); } } - if (extras.length > 0) { - Logger.instance.warn(`UTP entry contains unrecognized properties: ${extras.join(', ')}`); - } - - return utp; + return { utp, unknownTopLevelKeys }; } \ No newline at end of file diff --git a/tests/fixtures/utp-ci/README.md b/tests/fixtures/utp-ci/README.md new file mode 100644 index 00000000..cc06a40a --- /dev/null +++ b/tests/fixtures/utp-ci/README.md @@ -0,0 +1,5 @@ +# UTP CI fixtures + +Synthetic files used by `tests/run-utp-tests-contract.sh` are generated inline in that script. + +Real CI artifacts for regression reviews are stored under repository `_temp/` (gitignored) when downloaded locally, e.g. `_temp/-artifacts-full/`. diff --git a/tests/fixtures/utp/compiler-and-logentry.json b/tests/fixtures/utp/compiler-and-logentry.json new file mode 100644 index 00000000..fa1c7176 --- /dev/null +++ b/tests/fixtures/utp/compiler-and-logentry.json @@ -0,0 +1,32 @@ +[ + { + "type": "Compiler", + "version": 2, + "phase": "Immediate", + "time": 1776545848626, + "processId": 11122, + "severity": "Error", + "message": "Assets/UnityCliTests/CompilerErrors.cs(2,8): error CS1029: #error: 'Intentional compiler error: CS1029'", + "stacktrace": "", + "line": 2, + "file": "Assets/UnityCliTests/CompilerErrors.cs", + "stackTrace": "", + "fileName": "Assets/UnityCliTests/CompilerErrors.cs", + "lineNumber": 2 + }, + { + "type": "LogEntry", + "version": 2, + "phase": "Immediate", + "time": 1776545849441, + "processId": 11122, + "severity": "Error", + "message": "Scripts have compiler errors.", + "stacktrace": "", + "line": 376, + "file": "./Runtime/Utilities/Argv.cpp", + "stackTrace": "", + "fileName": "./Runtime/Utilities/Argv.cpp", + "lineNumber": 376 + } +] diff --git a/tests/fixtures/utp/memory-leaks.json b/tests/fixtures/utp/memory-leaks.json new file mode 100644 index 00000000..61447a42 --- /dev/null +++ b/tests/fixtures/utp/memory-leaks.json @@ -0,0 +1,16 @@ +[ + { + "type": "MemoryLeaks", + "version": 2, + "phase": "Immediate", + "time": 1776560249323, + "processId": 11224, + "allocatedMemory": 11431386, + "memoryLabels": [ + { "Default": 22013 }, + { "Permanent": 16136 }, + { "Thread": 1084656 }, + { "GfxDevice": 36008 } + ] + } +] diff --git a/tests/fixtures/utp/player-build-info.json b/tests/fixtures/utp/player-build-info.json new file mode 100644 index 00000000..1c7ad9b5 --- /dev/null +++ b/tests/fixtures/utp/player-build-info.json @@ -0,0 +1,16 @@ +[ + { + "success": true, + "type": "PlayerBuildInfo", + "version": 2, + "phase": "Immediate", + "time": 1776554398641, + "processId": 14606, + "steps": [ + { "description": "Preprocess Player", "duration": 473 }, + { "description": "Prepare For Build", "duration": 2023 }, + { "description": "Postprocess built player", "duration": 559541 } + ], + "duration": 743780 + } +] diff --git a/tests/fixtures/utp/test-status.json b/tests/fixtures/utp/test-status.json new file mode 100644 index 00000000..4a6263a9 --- /dev/null +++ b/tests/fixtures/utp/test-status.json @@ -0,0 +1,47 @@ +[ + { + "type": "TestStatus", + "version": 2, + "phase": "Immediate", + "time": 1000, + "processId": 1, + "name": "EditMode.Foo.Passes", + "state": 1, + "duration": 12, + "description": "Passing test" + }, + { + "type": "TestStatus", + "version": 2, + "phase": "Immediate", + "time": 1001, + "processId": 1, + "name": "EditMode.Foo.Fails", + "state": 2, + "durationMicroseconds": 5000000, + "message": "Expected 1 Was 2", + "description": "Failing test" + }, + { + "type": "TestStatus", + "version": 2, + "phase": "Immediate", + "time": 1002, + "processId": 1, + "name": "EditMode.Foo.Skipped", + "state": 3, + "duration": 0, + "description": "Skipped test" + }, + { + "type": "TestStatus", + "version": 2, + "phase": "Immediate", + "time": 1003, + "processId": 1, + "name": "EditMode.Foo.Inconclusive", + "state": 0, + "duration": 5, + "description": "Inconclusive" + } +] diff --git a/tests/hub-cdn-urls.test.ts b/tests/hub-cdn-urls.test.ts new file mode 100644 index 00000000..f04b5785 --- /dev/null +++ b/tests/hub-cdn-urls.test.ts @@ -0,0 +1,41 @@ +/** + * Locks Hub CDN URL contracts against Unity's public CDN (HEAD, short timeouts). + * Fails if Unity removes or reshuffles artifacts we rely on. + */ +jest.setTimeout(90_000); + +async function httpStatus(url: string, method: 'HEAD' | 'GET' = 'HEAD'): Promise { + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), 60_000); + try { + const res = await fetch(url, { method, redirect: 'follow', signal: controller.signal }); + return res.status; + } finally { + clearTimeout(timer); + } +} + +describe('Unity Hub public CDN URLs', () => { + it('serves prod Windows arch-specific installers (not legacy UnityHubSetup.exe)', async () => { + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/prod/UnityHubSetup-x64.exe')).toBe(200); + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/prod/UnityHubSetup-arm64.exe')).toBe(200); + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/prod/UnityHubSetup.exe')).toBe(404); + }); + + it('serves pinned Hub semver Windows layout (single UnityHubSetup.exe)', async () => { + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/3.12.0/UnityHubSetup.exe')).toBe(200); + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/3.12.0/UnityHubSetup-x64.exe')).toBe(404); + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/3.12.0/UnityHubSetup-arm64.exe')).toBe(404); + }); + + it('serves prod and pinned Hub macOS arm64 dmgs (installer path used by unity-cli)', async () => { + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/prod/UnityHubSetup-arm64.dmg')).toBe(200); + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/3.12.0/UnityHubSetup-arm64.dmg')).toBe(200); + }); + + it('serves latest.yml for Hub version discovery (latest-linux.yml is not published)', async () => { + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/prod/latest.yml')).toBe(200); + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/prod/latest-mac.yml')).toBe(200); + expect(await httpStatus('https://public-cdn.cloud.unity3d.com/hub/prod/latest-linux.yml')).toBe(404); + }); +}); diff --git a/tests/logger-provider.test.ts b/tests/logger-provider.test.ts new file mode 100644 index 00000000..d78a3e09 --- /dev/null +++ b/tests/logger-provider.test.ts @@ -0,0 +1,66 @@ +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { GitHubActionsLoggerProvider, GitHubAnnotationLevel } from '../src/github-actions-ci'; +import { LocalCliLoggerProvider } from '../src/logger-provider'; + +describe('logger providers', () => { + afterEach(() => { + jest.restoreAllMocks(); + delete process.env.GITHUB_ENV; + delete process.env.GITHUB_OUTPUT; + delete process.env.GITHUB_STEP_SUMMARY; + }); + + it('github provider enforces 1MB workflow summary limit and uncapped stdout', () => { + const provider = new GitHubActionsLoggerProvider(); + expect(provider.getMarkdownByteLimit('workflow-summary')).toBe(1024 * 1024); + expect(provider.getMarkdownByteLimit('stdout')).toBe(Number.POSITIVE_INFINITY); + }); + + it('local provider is safe no-op for CI side effects and uncapped markdown', () => { + const provider = new LocalCliLoggerProvider(); + expect(provider.getMarkdownByteLimit('workflow-summary')).toBe(Number.POSITIVE_INFINITY); + expect(provider.getMarkdownByteLimit('stdout')).toBe(Number.POSITIVE_INFINITY); + expect(() => provider.mask('secret')).not.toThrow(); + expect(() => provider.setEnvironmentVariable('A', 'B')).not.toThrow(); + expect(() => provider.setOutput('A', 'B')).not.toThrow(); + expect(() => provider.appendStepSummary('hello')).not.toThrow(); + }); + + it('github provider formats annotations with metadata and escaping', () => { + const writeSpy = jest.spyOn(process.stdout, 'write').mockImplementation(() => true as any); + const provider = new GitHubActionsLoggerProvider(); + provider.annotate(GitHubAnnotationLevel.Error, 'line1\nline2', { + file: 'Assets/Test.cs', + line: 10, + title: 'Compiler', + }); + expect(writeSpy).toHaveBeenCalled(); + const output = String(writeSpy.mock.calls[0][0]); + expect(output).toContain('::error '); + expect(output).toContain('file=Assets/Test.cs'); + expect(output).toContain('line=10'); + expect(output).toContain('title=Compiler'); + expect(output).toContain('line1%0Aline2'); + }); + + it('github provider appends env/output/summary files when configured', () => { + const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'unity-cli-provider-')); + const envFile = path.join(tempDir, 'env'); + const outputFile = path.join(tempDir, 'output'); + const summaryFile = path.join(tempDir, 'summary'); + process.env.GITHUB_ENV = envFile; + process.env.GITHUB_OUTPUT = outputFile; + process.env.GITHUB_STEP_SUMMARY = summaryFile; + const provider = new GitHubActionsLoggerProvider(); + + provider.setEnvironmentVariable('KEY', 'VALUE'); + provider.setOutput('OUT', '123'); + provider.appendStepSummary('summary'); + + expect(fs.readFileSync(envFile, 'utf8')).toBe('KEY=VALUE\n'); + expect(fs.readFileSync(outputFile, 'utf8')).toBe('OUT=123\n'); + expect(fs.readFileSync(summaryFile, 'utf8')).toBe('summary'); + }); +}); diff --git a/tests/logging-summary.test.ts b/tests/logging-summary.test.ts new file mode 100644 index 00000000..cef432a8 --- /dev/null +++ b/tests/logging-summary.test.ts @@ -0,0 +1,142 @@ +import { Severity } from '../src/utp'; +import { + mergeLogEntriesPreferringSeverity, + buildTestResultsTableMarkdown, + buildUnitTestJobSummaryMarkdown, + stripSummaryNoiseFromLogMessage, + truncateStringToUtf8ByteLength, + utpToTestResultSummary, +} from '../src/logging'; + +describe('truncateStringToUtf8ByteLength', () => { + it('returns the string unchanged when it fits', () => { + expect(truncateStringToUtf8ByteLength('hello', 100)).toBe('hello'); + }); + + it('truncates with ellipsis when UTF-8 length exceeds the budget', () => { + const long = 'a'.repeat(200); + const out = truncateStringToUtf8ByteLength(long, 20); + expect(out.endsWith('…')).toBe(true); + expect(Buffer.byteLength(out, 'utf8')).toBeLessThanOrEqual(20); + }); +}); + +describe('stripSummaryNoiseFromLogMessage', () => { + it('removes access token noise and trims', () => { + expect(stripSummaryNoiseFromLogMessage('Scripts have compiler errors.\nAccess token is unavailable; failed to update')).toBe( + 'Scripts have compiler errors.' + ); + expect(stripSummaryNoiseFromLogMessage('Access token is unavailable; failed to update')).toBe(''); + }); +}); + +describe('mergeLogEntriesPreferringSeverity', () => { + it('keeps Error over Info when dedupe key matches', () => { + const info = { + type: 'LogEntry', + message: 'dup', + file: 'Assets/Foo.cs', + line: 3, + severity: Severity.Info, + }; + const err = { + type: 'LogEntry', + message: 'dup', + file: 'Assets/Foo.cs', + line: 3, + severity: Severity.Error, + }; + const merged = mergeLogEntriesPreferringSeverity([info, err]); + expect(merged).toHaveLength(1); + expect(merged[0].severity).toBe(Severity.Error); + }); + + it('keeps first entry when severities tie', () => { + const a = { + type: 'Compiler', + message: 'm', + file: 'Assets/Foo.cs', + line: 1, + severity: Severity.Warning, + }; + const b = { + type: 'Compiler', + message: 'm', + file: 'Assets/Foo.cs', + line: 1, + severity: Severity.Warning, + }; + const merged = mergeLogEntriesPreferringSeverity([a, b]); + expect(merged).toHaveLength(1); + expect(merged[0]).toBe(a); + }); +}); + +describe('buildTestResultsTableMarkdown', () => { + it('escapes pipe characters in cells', () => { + const rows = [ + utpToTestResultSummary({ + type: 'TestStatus', + name: 'A|B', + state: 1, + duration: 10, + } as any), + ]; + const md = buildTestResultsTableMarkdown(rows, 1024 * 1024, ''); + expect(md).toContain('A\\|B'); + expect(md.split('\n').filter(l => l.startsWith('|')).length).toBeGreaterThanOrEqual(3); + }); + + it('escapes backslashes before pipes so markdown cells stay well-formed', () => { + const rows = [ + utpToTestResultSummary({ + type: 'TestStatus', + name: 'a\\b|c', + state: 1, + duration: 10, + } as any), + ]; + const md = buildTestResultsTableMarkdown(rows, 1024 * 1024, ''); + expect(md).toMatch(/a\\\\b\\|c/); + }); +}); + +describe('buildUnitTestJobSummaryMarkdown', () => { + it('renders aggregate counts and failure-first rows', () => { + const rows = [ + utpToTestResultSummary({ + type: 'TestStatus', + name: 'Pass.Test', + state: 1, + duration: 10, + } as any), + utpToTestResultSummary({ + type: 'TestStatus', + name: 'Fail.Test', + state: 2, + duration: 20, + message: 'assert fail', + file: 'Assets/Tests/Fail.cs', + line: 42, + } as any), + ]; + const md = buildUnitTestJobSummaryMarkdown(rows, 1024 * 1024, ''); + expect(md).toContain('### Unit test results'); + expect(md).toContain('**2** tests'); + expect(md).toContain('Fail.Test (Assets/Tests/Fail.cs:42)'); + }); +}); + +describe('utpToTestResultSummary', () => { + it('preserves file and line when available', () => { + const summary = utpToTestResultSummary({ + type: 'TestStatus', + name: 'A.Test', + state: 2, + file: 'Assets/A.cs', + line: 12, + } as any); + expect(summary.file).toBe('Assets/A.cs'); + expect(summary.line).toBe(12); + }); +}); diff --git a/tests/logging-workflow-summary.test.ts b/tests/logging-workflow-summary.test.ts new file mode 100644 index 00000000..2459e088 --- /dev/null +++ b/tests/logging-workflow-summary.test.ts @@ -0,0 +1,180 @@ +import { Logger } from '../src/logging'; +import type { UTP } from '../src/utp'; + +describe('workflow summary formatting', () => { + it('renders build timeline as a table when budget allows', () => { + const summaryWrites: string[] = []; + const logger = Logger.instance as unknown as { + _provider: { + appendStepSummary: (summary: string) => void; + getMarkdownByteLimit: (target: 'workflow-summary' | 'stdout') => number; + }; + }; + + logger._provider = { + appendStepSummary: (summary: string) => summaryWrites.push(summary), + getMarkdownByteLimit: () => 1024 * 1024, + }; + + const telemetry: UTP[] = [ + { + type: 'Action', + phase: 'End', + description: 'Build Player', + duration: 1234, + errors: [], + } as UTP, + { + type: 'Compiler', + severity: 'Error', + file: 'Assets/UnityCliTests/CompilerErrors.cs', + line: 2, + message: "error CS1029: #error: 'Intentional compiler error: CS1029'", + } as UTP, + ]; + + Logger.instance.CI_appendWorkflowSummary('Build-Unity', telemetry); + + expect(summaryWrites).toHaveLength(1); + const summary = summaryWrites[0]; + expect(summary).toContain('
Build actions (1)'); + expect(summary).toContain('| Status | Duration | Errors | Action |'); + expect(summary).toContain('| ✅ | 1.2s | 0 | Build Player |'); + expect(summary).toContain(`Assets/UnityCliTests/CompilerErrors.cs(2): error CS1029: #error: 'Intentional compiler error: CS1029'`); + expect(summary).not.toContain('```text\n✅'); + }); + + it('does not cap log lines at a fixed character length when under byte budget', () => { + const summaryWrites: string[] = []; + const logger = Logger.instance as unknown as { + _provider: { + appendStepSummary: (summary: string) => void; + getMarkdownByteLimit: (target: 'workflow-summary' | 'stdout') => number; + }; + }; + + logger._provider = { + appendStepSummary: (summary: string) => summaryWrites.push(summary), + getMarkdownByteLimit: () => 1024 * 1024, + }; + + const longTail = 'Z'.repeat(250); + const telemetry: UTP[] = [ + { + type: 'LogEntry', + severity: 'Warning', + message: `Overlay.png (TextureImporter) -> artifact tail ${longTail}`, + } as UTP, + ]; + + Logger.instance.CI_appendWorkflowSummary('Build-Unity', telemetry); + + expect(summaryWrites).toHaveLength(1); + expect(summaryWrites[0]).toContain(longTail); + expect(summaryWrites[0]).not.toMatch(/artifact tail Z+…/); + }); + + it('collapses multiline log messages into one summary line', () => { + const summaryWrites: string[] = []; + const logger = Logger.instance as unknown as { + _provider: { + appendStepSummary: (summary: string) => void; + getMarkdownByteLimit: (target: 'workflow-summary' | 'stdout') => number; + }; + }; + + logger._provider = { + appendStepSummary: (summary: string) => summaryWrites.push(summary), + getMarkdownByteLimit: () => 1024 * 1024, + }; + + const telemetry: UTP[] = [ + { + type: 'Compiler', + severity: 'Error', + file: 'Assets/UnityCliTests/CompilerErrors.cs', + line: 2, + message: 'Scripts have compiler errors.\nAccess token is unavailable; failed to update', + } as UTP, + ]; + + Logger.instance.CI_appendWorkflowSummary('Build-Unity', telemetry); + + expect(summaryWrites).toHaveLength(1); + const summary = summaryWrites[0]; + expect(summary).toContain('Scripts have compiler errors.'); + expect(summary).not.toContain('Access token is unavailable; failed to update'); + expect(summary).not.toContain('\n- Access token is unavailable; failed to update'); + }); + + it('omits access-token noise-only log lines from the summary', () => { + const summaryWrites: string[] = []; + const logger = Logger.instance as unknown as { + _provider: { + appendStepSummary: (summary: string) => void; + getMarkdownByteLimit: (target: 'workflow-summary' | 'stdout') => number; + }; + }; + + logger._provider = { + appendStepSummary: (summary: string) => summaryWrites.push(summary), + getMarkdownByteLimit: () => 1024 * 1024, + }; + + const telemetry: UTP[] = [ + { + type: 'LogEntry', + severity: 'Warning', + message: 'Access token is unavailable; failed to update', + } as UTP, + ]; + + Logger.instance.CI_appendWorkflowSummary('Build-Unity', telemetry); + + expect(summaryWrites).toHaveLength(1); + const summary = summaryWrites[0]; + expect(summary).toContain('Errors: 0'); + expect(summary).not.toContain('Access token is unavailable'); + }); + + it('drops action table and uses plaintext timeline when near byte limit', () => { + const summaryWrites: string[] = []; + const logger = Logger.instance as unknown as { + _provider: { + appendStepSummary: (summary: string) => void; + getMarkdownByteLimit: (target: 'workflow-summary' | 'stdout') => number; + }; + }; + + logger._provider = { + appendStepSummary: (summary: string) => summaryWrites.push(summary), + getMarkdownByteLimit: () => 380, + }; + + const telemetry: UTP[] = [ + ...Array.from({ length: 12 }, (_, i) => ({ + type: 'Action', + phase: 'End', + description: `Build Player step ${i} with a very long action description to consume summary bytes quickly`, + duration: 1234 + i, + errors: [], + } as UTP)), + { + type: 'Compiler', + severity: 'Error', + file: 'Assets/UnityCliTests/CompilerErrors.cs', + line: 2, + message: "error CS1029: #error: 'Intentional compiler error: CS1029'", + } as UTP, + ]; + + Logger.instance.CI_appendWorkflowSummary('Build-Unity', telemetry); + + expect(summaryWrites).toHaveLength(1); + const summary = summaryWrites[0]; + expect(summary).toContain('
Build actions (12)'); + expect(summary).not.toContain('| Status | Duration | Errors | Action |'); + expect(summary).toContain('```text'); + expect(summary).toContain('Build Player step'); + }); +}); diff --git a/tests/run-utp-tests-contract.sh b/tests/run-utp-tests-contract.sh new file mode 100644 index 00000000..e4531d83 --- /dev/null +++ b/tests/run-utp-tests-contract.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Contract tests for UTP CI assertion helpers (bash; run on Linux CI or Git Bash). +set -euo pipefail + +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +# shellcheck source=../.github/actions/scripts/utp-ci-assertion-helpers.sh +source "$ROOT/.github/actions/scripts/utp-ci-assertion-helpers.sh" + +fail() { + echo "::error::$1" >&2 + exit 1 +} + +tmpdir="$(mktemp -d)" +trap 'rm -rf "$tmpdir"' EXIT + +# --- UTP severity: warning scenarios ignore Assert-only noise --- +printf '%s\n' '{"type":"Log","severity":"Assert","message":"StackAllocator"}' >"$tmpdir/warn-assert.json" +if utp_signals_failure_for_expected_success CompilerWarnings "$tmpdir/warn-assert.json"; then + fail "CompilerWarnings + Assert-only should not signal failure for expected-success check" +fi + +printf '%s\n' '{"type":"Log","severity":"Error","message":"boom"}' >"$tmpdir/warn-err.json" +if ! utp_signals_failure_for_expected_success CompilerWarnings "$tmpdir/warn-err.json"; then + fail "CompilerWarnings + Error should signal failure for expected-success check" +fi + +printf '%s\n' '{"severity":"Assert"}' >"$tmpdir/nonwarn-assert.json" +if ! utp_signals_failure_for_expected_success EditmodeTestsPassing "$tmpdir/nonwarn-assert.json"; then + fail "Non-warning scenario should still treat Assert as failure for expected-success check" +fi + +# --- UTP any-signal (expected-failure branch) --- +if ! utp_signals_any_severity_problem "$tmpdir/nonwarn-assert.json"; then + fail "utp_signals_any_severity_problem should match Assert" +fi + +# --- NUnit XML discovery --- +export UNITY_PROJECT_PATH="$tmpdir/proj" +mkdir -p "$UNITY_PROJECT_PATH/Builds/Logs" +printf '\n' >"$UNITY_PROJECT_PATH/Builds/Logs/EditmodeTestsPassing-results.xml" +found="$(find_nunit_results_xml EditmodeTestsPassing)" +if [ "$found" != "$UNITY_PROJECT_PATH/Builds/Logs/EditmodeTestsPassing-results.xml" ]; then + fail "find_nunit_results_xml should resolve default Builds/Logs path (got: $found)" +fi + +mkdir -p "$UNITY_PROJECT_PATH/Builds/Alt" +printf '\n' >"$UNITY_PROJECT_PATH/Builds/Alt/EditmodeTestsPassing-results.xml" +rm -f "$UNITY_PROJECT_PATH/Builds/Logs/EditmodeTestsPassing-results.xml" +found2="$(find_nunit_results_xml EditmodeTestsPassing)" +if [ "$found2" != "$UNITY_PROJECT_PATH/Builds/Alt/EditmodeTestsPassing-results.xml" ]; then + fail "find_nunit_results_xml should discover alternate path under project (got: $found2)" +fi + +# --- Log completion heuristic --- +printf '%s\n' 'Some noise' 'Test run completed.' 'more' >"$UNITY_PROJECT_PATH/Builds/Logs/EditmodeTestsPassing-EditMode-Unity-1.log" +if ! edit_play_log_suggests_tests_completed_ok EditmodeTestsPassing EditMode; then + fail "edit_play_log_suggests_tests_completed_ok should match Test run completed marker" +fi + +printf '%s\n' 'Test run completed.' 'test run failed' >"$UNITY_PROJECT_PATH/Builds/Logs/EditmodeTestsPassing-EditMode-Unity-2.log" +if edit_play_log_suggests_tests_completed_ok EditmodeTestsPassing EditMode; then + fail "edit_play_log_suggests_tests_completed_ok should reject logs that also contain failure markers" +fi + +echo "run-utp-tests-contract: OK" diff --git a/tests/unity-logging-project-path.test.ts b/tests/unity-logging-project-path.test.ts new file mode 100644 index 00000000..9cb87116 --- /dev/null +++ b/tests/unity-logging-project-path.test.ts @@ -0,0 +1,57 @@ +import { isFileUnderProjectPath, normalizeAnnotationPath } from '../src/unity-logging'; +import * as path from 'path'; + +describe('isFileUnderProjectPath', () => { + const origPlatform = process.platform; + + afterEach(() => { + Object.defineProperty(process, 'platform', { value: origPlatform }); + }); + + it('returns true for file under unix-style project root', () => { + expect(isFileUnderProjectPath('/home/runner/proj/Assets/a.cs', '/home/runner/proj')).toBe(true); + }); + + it('returns false when file is outside project', () => { + expect(isFileUnderProjectPath('/other/Assets/a.cs', '/home/runner/proj')).toBe(false); + }); + + it('on win32 matches case-insensitively', () => { + Object.defineProperty(process, 'platform', { value: 'win32' }); + expect(isFileUnderProjectPath('D:/Work/MyProj/Assets/Foo.cs', 'd:/work/myproj')).toBe(true); + }); +}); + +describe('normalizeAnnotationPath', () => { + const origPlatform = process.platform; + + afterEach(() => { + Object.defineProperty(process, 'platform', { value: origPlatform }); + }); + + it('resolves relative project file to project-relative annotation path', () => { + const out = normalizeAnnotationPath('Assets/Scripts/Foo.cs', '/home/runner/proj'); + const expectedAbsolute = path.resolve('/home/runner/proj', 'Assets/Scripts/Foo.cs').replace(/\\/g, '/'); + expect(out.absoluteFile).toBe(expectedAbsolute); + expect(out.annotationFile).toBe('Assets/Scripts/Foo.cs'); + }); + + it('returns only absolute path when file is outside project root', () => { + const out = normalizeAnnotationPath('/other/Foo.cs', '/home/runner/proj'); + expect(out.absoluteFile).toBe('/other/Foo.cs'); + expect(out.annotationFile).toBeUndefined(); + }); + + it('normalizes windows relative paths for annotation output', () => { + Object.defineProperty(process, 'platform', { value: 'win32' }); + const out = normalizeAnnotationPath('Assets\\UnityCliTests\\CompilerErrors.cs', 'D:\\Work\\MyProj'); + expect(out.absoluteFile).toBe('D:/Work/MyProj/Assets/UnityCliTests/CompilerErrors.cs'); + expect(out.annotationFile).toBe('Assets/UnityCliTests/CompilerErrors.cs'); + }); + + it('supports windows case-insensitive project roots', () => { + Object.defineProperty(process, 'platform', { value: 'win32' }); + const out = normalizeAnnotationPath('D:\\WORK\\MYPROJ\\Assets\\Bar.cs', 'd:/work/myproj'); + expect(out.annotationFile).toBe('Assets/Bar.cs'); + }); +}); diff --git a/tests/unity-logging.test.ts b/tests/unity-logging.test.ts index 21748711..08b81e5c 100644 --- a/tests/unity-logging.test.ts +++ b/tests/unity-logging.test.ts @@ -1,4 +1,11 @@ -import { type ActionTableSnapshot, formatActionTimelineTable, sanitizeTelemetryJson, stringDisplayWidth } from '../src/unity-logging'; +import { + type ActionTableSnapshot, + describeUtpForUtpLogLevel, + formatActionTimelineTable, + normalizeAnnotationPath, + sanitizeTelemetryJson, + stringDisplayWidth +} from '../src/unity-logging'; describe('sanitizeTelemetryJson', () => { it('removes trailing null characters that break JSON.parse', () => { @@ -145,3 +152,54 @@ describe('formatActionTimelineTable', () => { expect(formatted?.text).toContain('# of Errors'); }); }); + +describe('describeUtpForUtpLogLevel', () => { + it('returns a one-line debug string for Compiler', () => { + const s = describeUtpForUtpLogLevel({ + type: 'Compiler', + severity: 'Error', + message: 'bad', + file: 'Assets/A.cs', + line: 3, + } as any); + expect(s).toContain('[UTP] Compiler'); + expect(s).toContain('Assets/A.cs:3'); + expect(s).toContain('bad'); + }); + + it('returns a one-line debug string for TestStatus', () => { + const s = describeUtpForUtpLogLevel({ + type: 'TestStatus', + name: 'T.Name', + state: 1, + duration: 42, + } as any); + expect(s).toContain('TestStatus'); + expect(s).toContain('state=1'); + expect(s).toContain('T.Name'); + }); + + it('returns JSON for settings-like types', () => { + const s = describeUtpForUtpLogLevel({ + type: 'BuildSettings', + BuildSettings: { Platform: 'Android' }, + } as any); + expect(s).toContain('BuildSettings'); + expect(s).toContain('Android'); + }); + + it('returns undefined for an unknown type string', () => { + expect(describeUtpForUtpLogLevel({ type: 'FutureUnityType', x: 1 } as any)).toBeUndefined(); + }); +}); + +describe('normalizeAnnotationPath edge cases', () => { + it('returns empty result for undefined file', () => { + expect(normalizeAnnotationPath(undefined, '/tmp/proj')).toEqual({}); + }); + + it('keeps normalized relative path without project path', () => { + const out = normalizeAnnotationPath('Assets\\X.cs', undefined); + expect(out.annotationFile).toBe('Assets/X.cs'); + }); +}); diff --git a/tests/utp-telemetry-fixtures.test.ts b/tests/utp-telemetry-fixtures.test.ts new file mode 100644 index 00000000..ef705f75 --- /dev/null +++ b/tests/utp-telemetry-fixtures.test.ts @@ -0,0 +1,82 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import { normalizeTelemetryEntry, UTP_SUPPORTED_TOP_LEVEL_PROPERTIES } from '../src/utp'; +import { buildTestResultsTableMarkdown, utpToTestResultSummary } from '../src/logging'; +import { formatUtpUnrecognizedTopLevelPropertiesMessage } from '../src/unity-logging'; + +const fixturesDir = path.join(__dirname, 'fixtures', 'utp'); + +function loadFixture(name: string): unknown[] { + const p = path.join(fixturesDir, name); + const raw = fs.readFileSync(p, 'utf8'); + const data = JSON.parse(raw) as unknown; + return Array.isArray(data) ? data : [data]; +} + +describe('UTP telemetry fixtures', () => { + const fixtureFiles = fs.readdirSync(fixturesDir).filter(f => f.endsWith('.json')); + + it.each(fixtureFiles)('%s has only supported top-level keys and normalizes cleanly', fileName => { + for (const obj of loadFixture(fileName)) { + const { utp, unknownTopLevelKeys } = normalizeTelemetryEntry(obj); + expect(unknownTopLevelKeys).toEqual([]); + expect(utp).toBeDefined(); + for (const k of Object.keys(obj as object)) { + expect(UTP_SUPPORTED_TOP_LEVEL_PROPERTIES.has(k)).toBe(true); + } + } + }); + + it('merges legacy stacktrace and file/line fields on Compiler', () => { + const [first] = loadFixture('compiler-and-logentry.json'); + const { utp } = normalizeTelemetryEntry(first); + expect(utp.type).toBe('Compiler'); + expect(utp.stackTrace).toBe(''); + expect(utp.file).toBe('Assets/UnityCliTests/CompilerErrors.cs'); + expect(utp.fileName).toBe('Assets/UnityCliTests/CompilerErrors.cs'); + expect(utp.line).toBe(2); + expect(utp.lineNumber).toBe(2); + }); + + it('maps TestStatus fixtures to summaries and markdown', () => { + const rows = loadFixture('test-status.json').map(e => { + const { utp } = normalizeTelemetryEntry(e); + return utpToTestResultSummary(utp); + }); + expect(rows[0].status).toBe('✅'); + expect(rows[1].status).toBe('❌'); + expect(rows[2].status).toBe('⏭️'); + expect(rows[3].status).toBe('◯'); + expect(rows[1].durationMs).toBe(5000); + const md = buildTestResultsTableMarkdown(rows, 1024 * 1024, ''); + expect(md).toContain('### Test results'); + expect(md).toContain('EditMode.Foo.Passes'); + }); + + it('reports unknown top-level keys without failing normalization', () => { + const payload = { + type: 'Compiler', + version: 2, + phase: 'Immediate', + time: 1, + processId: 1, + severity: 'Warning', + message: 'm', + file: 'Assets/X.cs', + line: 1, + futureUnityOnlyField: 'surprise', + }; + const { utp, unknownTopLevelKeys } = normalizeTelemetryEntry(payload); + expect(unknownTopLevelKeys).toEqual(['futureUnityOnlyField']); + expect(utp.type).toBe('Compiler'); + }); +}); + +describe('formatUtpUnrecognizedTopLevelPropertiesMessage', () => { + it('includes unknown key names and the full ##utp line', () => { + const line = '##utp:{"type":"Action","extra":1}'; + const msg = formatUtpUnrecognizedTopLevelPropertiesMessage(['extra'], line); + expect(msg).toContain('unrecognized top-level properties: extra'); + expect(msg).toContain(`Full line: ${line}`); + }); +}); diff --git a/tests/utp-workflow-profiles.test.ts b/tests/utp-workflow-profiles.test.ts new file mode 100644 index 00000000..e33ca17a --- /dev/null +++ b/tests/utp-workflow-profiles.test.ts @@ -0,0 +1,35 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import { parse } from 'yaml'; + +function loadYaml(filePath: string): any { + return parse(fs.readFileSync(filePath, 'utf8')); +} + +describe('UTP workflow profiles', () => { + const repoRoot = path.resolve(__dirname, '..'); + + it('defines profile-aware test selection in run-unity-test-batch action', () => { + const actionPath = path.join(repoRoot, '.github', 'actions', 'run-unity-test-batch', 'action.yml'); + const action = loadYaml(actionPath); + + expect(action.inputs['test-profile'].default).toBe('normal'); + expect(action.inputs['tests-input'].default).toBe(''); + + const prepareStep = action.runs.steps.find((step: any) => step.name === 'Prepare test list and install packages'); + expect(prepareStep).toBeDefined(); + expect(prepareStep.run).toContain('case "$test_profile" in'); + expect(prepareStep.run).toContain('normal)'); + expect(prepareStep.run).toContain('negative)'); + expect(prepareStep.run).toContain('all)'); + }); + + it('wires integration workflow to normal matrix plus dedicated negative scenario run', () => { + const workflowPath = path.join(repoRoot, '.github', 'workflows', 'integration-tests.yml'); + const workflow = loadYaml(workflowPath); + + expect(workflow.jobs.validate.with['utp-test-profile']).toBe('normal'); + expect(workflow.jobs['validate-negative-scenarios']).toBeDefined(); + expect(workflow.jobs['validate-negative-scenarios'].with['utp-test-profile']).toBe('negative'); + }); +}); diff --git a/unity-tests/BuildErrors.cs b/unity-tests/BuildErrors.cs new file mode 100644 index 00000000..dc344195 --- /dev/null +++ b/unity-tests/BuildErrors.cs @@ -0,0 +1,20 @@ +using UnityEditor; +using UnityEditor.Build; +using UnityEditor.Build.Reporting; + +namespace UnityCli.UtpSamples +{ + /// + /// Forces the build pipeline to fail by throwing a BuildFailedException. + /// Place under an Editor folder when copying into a project. + /// + public class BuildErrors : IPreprocessBuildWithReport + { + public int callbackOrder => 0; + + public void OnPreprocessBuild(BuildReport report) + { + throw new System.Exception("Intentional build failure."); + } + } +} diff --git a/unity-tests/BuildWarnings.cs b/unity-tests/BuildWarnings.cs new file mode 100644 index 00000000..e4c2a3d7 --- /dev/null +++ b/unity-tests/BuildWarnings.cs @@ -0,0 +1,20 @@ +using UnityEditor; +using UnityEditor.Build; +using UnityEditor.Build.Reporting; + +namespace UnityCli.UtpSamples +{ + /// + /// Emits a build-time warning via the build pipeline (no custom UTP JSON logging). + /// Place under an Editor folder when copying into a project. + /// + public class BuildWarnings : IPreprocessBuildWithReport + { + public int callbackOrder => 0; + + public void OnPreprocessBuild(BuildReport report) + { + UnityEngine.Debug.LogWarning("Intentional build warning."); + } + } +} diff --git a/unity-tests/CompilerErrors.cs b/unity-tests/CompilerErrors.cs new file mode 100644 index 00000000..056f16ee --- /dev/null +++ b/unity-tests/CompilerErrors.cs @@ -0,0 +1,4 @@ +// Intentional compiler error for matrix scenario coverage. +#error Intentional compiler error: CS1029 + +// Note: file is kept minimal so it can be copied into a project to force a build failure. diff --git a/unity-tests/CompilerWarnings.cs b/unity-tests/CompilerWarnings.cs new file mode 100644 index 00000000..ae35bd22 --- /dev/null +++ b/unity-tests/CompilerWarnings.cs @@ -0,0 +1,20 @@ +using UnityEngine; + +namespace UnityCli.UtpSamples +{ + /// + /// Introduces a benign compiler warning (unused variable) without emitting custom logs. + /// + public class CompilerWarnings : MonoBehaviour + { + private void Awake() + { + ObsoleteApi(); // CS0618: call to obsolete member + } + + [System.Obsolete("Intentional warning", false)] + private static void ObsoleteApi() + { + } + } +} diff --git a/unity-tests/EditmodeTestsErrors.cs b/unity-tests/EditmodeTestsErrors.cs new file mode 100644 index 00000000..a0304d8d --- /dev/null +++ b/unity-tests/EditmodeTestsErrors.cs @@ -0,0 +1,16 @@ +using NUnit.Framework; + +namespace UnityCli.UtpSamples +{ + /// + /// Editmode test that intentionally fails to produce real test failure output. + /// + public class EditmodeTestsErrors + { + [Test] + public void FailsEditmodeSuite() + { + Assert.Fail("Intentional editmode failure"); + } + } +} diff --git a/unity-tests/EditmodeTestsPassing.cs b/unity-tests/EditmodeTestsPassing.cs new file mode 100644 index 00000000..48c093e4 --- /dev/null +++ b/unity-tests/EditmodeTestsPassing.cs @@ -0,0 +1,16 @@ +using NUnit.Framework; + +namespace UnityCli.UtpSamples +{ + /// + /// Editmode test that passes for test matrix and summary table coverage. + /// + public class EditmodeTestsPassing + { + [Test] + public void PassesEditmodeSuite() + { + Assert.Pass("Intentional editmode pass"); + } + } +} diff --git a/unity-tests/EditmodeTestsSkipped.cs b/unity-tests/EditmodeTestsSkipped.cs new file mode 100644 index 00000000..4d7e6d88 --- /dev/null +++ b/unity-tests/EditmodeTestsSkipped.cs @@ -0,0 +1,17 @@ +using NUnit.Framework; + +namespace UnityCli.UtpSamples +{ + /// + /// Editmode test that is skipped for test matrix and summary table coverage. + /// + public class EditmodeTestsSkipped + { + [Test] + [Ignore("Intentional editmode skip")] + public void SkippedEditmodeSuite() + { + Assert.Fail("Should not run"); + } + } +} diff --git a/unity-tests/PlaymodeTestsErrors.cs b/unity-tests/PlaymodeTestsErrors.cs new file mode 100644 index 00000000..4c08ce9e --- /dev/null +++ b/unity-tests/PlaymodeTestsErrors.cs @@ -0,0 +1,21 @@ +using System.Collections; +using UnityEngine; +using UnityEditor; +using NUnit.Framework; +using UnityEngine.TestTools; + +namespace UnityCli.UtpSamples +{ + /// + /// Playmode test that intentionally fails to generate real test failure output. + /// + public class PlaymodeTestsErrors + { + [UnityTest] + public IEnumerator FailsPlaymodeSuite() + { + yield return null; + Assert.Fail("Intentional playmode failure for test matrix coverage."); + } + } +} diff --git a/unity-tests/PlaymodeTestsPassing.cs b/unity-tests/PlaymodeTestsPassing.cs new file mode 100644 index 00000000..144fcf34 --- /dev/null +++ b/unity-tests/PlaymodeTestsPassing.cs @@ -0,0 +1,20 @@ +using System.Collections; +using UnityEngine; +using NUnit.Framework; +using UnityEngine.TestTools; + +namespace UnityCli.UtpSamples +{ + /// + /// Playmode test that passes for test matrix and summary table coverage. + /// + public class PlaymodeTestsPassing + { + [UnityTest] + public IEnumerator PassesPlaymodeSuite() + { + yield return null; + Assert.Pass("Intentional playmode pass"); + } + } +} diff --git a/unity-tests/PlaymodeTestsSkipped.cs b/unity-tests/PlaymodeTestsSkipped.cs new file mode 100644 index 00000000..a60dbedb --- /dev/null +++ b/unity-tests/PlaymodeTestsSkipped.cs @@ -0,0 +1,21 @@ +using System.Collections; +using UnityEngine; +using NUnit.Framework; +using UnityEngine.TestTools; + +namespace UnityCli.UtpSamples +{ + /// + /// Playmode test that is skipped for test matrix and summary table coverage. + /// + public class PlaymodeTestsSkipped + { + [UnityTest] + [Ignore("Intentional playmode skip")] + public IEnumerator SkippedPlaymodeSuite() + { + yield return null; + Assert.Fail("Should not run"); + } + } +} diff --git a/unity-tests/UnityCliTests.EditMode.Editor.asmdef b/unity-tests/UnityCliTests.EditMode.Editor.asmdef new file mode 100644 index 00000000..fc0a73b9 --- /dev/null +++ b/unity-tests/UnityCliTests.EditMode.Editor.asmdef @@ -0,0 +1,18 @@ +{ + "name": "UnityCli.EditMode.EditorTests", + "references": [], + "optionalUnityReferences": [ + "TestAssemblies" + ], + "includePlatforms": [ + "Editor" + ], + "excludePlatforms": [], + "allowUnsafeCode": false, + "overrideReferences": false, + "precompiledReferences": [], + "autoReferenced": true, + "defineConstraints": [], + "versionDefines": [], + "noEngineReferences": false +} \ No newline at end of file diff --git a/unity-tests/UnityCliTests.PlayMode.asmdef b/unity-tests/UnityCliTests.PlayMode.asmdef new file mode 100644 index 00000000..ab7fb34e --- /dev/null +++ b/unity-tests/UnityCliTests.PlayMode.asmdef @@ -0,0 +1,23 @@ +{ + "name": "UnityCli.PlayMode.Tests", + "references": [], + "optionalUnityReferences": [ + "TestAssemblies" + ], + "includePlatforms": [ + "Editor", + "WindowsStandalone64", + "LinuxStandaloneUniversal", + "macOSStandalone", + "Android", + "iOS" + ], + "excludePlatforms": [], + "allowUnsafeCode": false, + "overrideReferences": false, + "precompiledReferences": [], + "autoReferenced": true, + "defineConstraints": [], + "versionDefines": [], + "noEngineReferences": false +} \ No newline at end of file