diff --git a/.clang-format b/.clang-format index 1ceb0920ba..6f95591db7 100644 --- a/.clang-format +++ b/.clang-format @@ -1,3 +1,4 @@ +--- Language: Cpp # BasedOnStyle: Google AccessModifierOffset: -1 @@ -8,7 +9,7 @@ AlignEscapedNewlines: Left AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: true +AllowShortBlocksOnASingleLine: Empty AllowShortCaseLabelsOnASingleLine: true AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: true @@ -17,9 +18,10 @@ AllowShortLoopsOnASingleLine: true AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: true -AlwaysBreakTemplateDeclarations: Yes -BinPackArguments: false +AlwaysBreakTemplateDeclarations: Yes # yamllint disable-line rule:truthy +BinPackArguments: false BinPackParameters: false +# yamllint disable rule:colons BraceWrapping: AfterClass: false AfterControlStatement: false @@ -37,6 +39,7 @@ BraceWrapping: SplitEmptyFunction: false SplitEmptyRecord: false SplitEmptyNamespace: false +# yamllint enable rule:colons BreakBeforeBinaryOperators: None BreakBeforeBraces: WebKit BreakBeforeInheritanceComma: false @@ -59,24 +62,52 @@ DisableFormat: false ExperimentalAutoDetectBinPacking: false FixNamespaceComments: true ForEachMacros: -IncludeBlocks: Preserve +IncludeBlocks: Regroup +# yamllint disable rule:colons IncludeCategories: - - Regex: '^<.*\.h>' + - Regex: '^("|<)legate_defines\.h("|>)' Priority: 1 - - Regex: '^<.*' + - Regex: '^("|<)legate\.h("|>)' Priority: 2 - - Regex: '.*' - Priority: 3 + - Regex: '^("|<)legate/.*\.(cu)?hp*("|>)' + Priority: 10 + - Regex: '^("|<)stl/.*\.h("|>)' + Priority: 20 + - Regex: '^("|<)(legion|realm).*' + Priority: 30 + - Regex: '^("|<)kvikio.*' + Priority: 40 + - Regex: '^("|<)highfive.*' + Priority: 51 + - Regex: '^("|<)hdf5.*' + Priority: 62 + - Regex: '^("|<)fmt.*' + Priority: 63 + - Regex: '^("|<)cpptrace.*' + Priority: 64 + - Regex: '^("|<)(gtest|gmock).*' + Priority: 65 + - Regex: '^".*' + Priority: 100 + - Regex: '^<.*' + Priority: 200 IncludeIsMainRegex: '([-_](test|unittest))?$' +MainIncludeChar: Any +# yamllint enable rule:colons IndentCaseLabels: true IndentPPDirectives: None -IndentWidth: 2 +IndentWidth: 2 IndentWrappedFunctionNames: false +InsertBraces: true +InsertNewlineAtEOF: true JavaScriptQuotes: Leave JavaScriptWrapImports: true -KeepEmptyLinesAtTheStartOfBlocks: false +KeepEmptyLines: + AtEndOfFile: false + AtStartOfBlock: false + AtStartOfFile: false MacroBlockBegin: '' -MacroBlockEnd: '' +MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBinPackProtocolList: Never @@ -127,3 +158,9 @@ StatementMacros: # Be consistent with indent-width, even for people who use tab for indentation! TabWidth: 2 UseTab: Never +Macros: + - LEGATE_SCOPE_GUARD(x)=do { x } while (0) + - LEGATE_SCOPE_FAIL(x)=do { x } while (0) + - LEGATE_STL_UNSPECIFIED(...)=__VA_ARGS__ + - CPPTRACE_TRY=try + - CPPTRACE_CATCH(x)=catch(x) diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000000..cec6ee25b7 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,186 @@ +--- +Checks: + # enable everything first + - 'performance*' + - 'modernize*' + - 'readability*' + - 'clang-analyzer*' + - 'clang-diagnostic-*' + - 'bugprone*' + - 'misc*' + - 'core*' + - 'mpi*' + - 'cert-*' + - 'portability-*' + - 'google*' + - 'cppcoreguidelines-pro-type-cstyle-cast' + # then disable the stuff we don't want + - '-cert-dcl21-cpp' # returning non-const from operator-- or operator++ + - '-cert-dcl50-cpp' # allow c-style variadics + # no reserved identifiers, both of these are aliased to bugprone-reserved-identifier, + # which we do enable. Leaving these enabled however, leads to needing to specify all + # three (bugprone-reserved-identifier, cert-dcl51-cpp, and cert-dcl37-c) in NOLINT lines + # which is a hassle. Since bugprone-reserved-identifier is enabled, the check still + # fires. + - '-cert-dcl51-cpp,-cert-dcl37-c,-cert-oop54-cpp' + - '-modernize-use-trailing-return-type' + - '-readability-function-cognitive-complexity' + - '-readability-implicit-bool-conversion' + - '-readability-braces-around-statements' + - '-readability-qualified-auto' + - '-readability-isolate-declaration' + - '-modernize-avoid-c-arrays' + - '-readability-named-parameter' + - '-readability-identifier-length' + - '-misc-non-private-member-variables-in-classes' + - '-bugprone-easily-swappable-parameters' + - '-bugprone-implicit-widening-of-multiplication-result' + - '-misc-include-cleaner' + - '-misc-header-include-cycle' + - '-modernize-macro-to-enum' + - '-misc-no-recursion' + # Generally speaking if something is static then it has an express reason to be. 99% of + # the candidates identified by this check were because they just so happened not to + # touch any member variables, not because they are logically static. So we disable the + # check. + - '-readability-convert-member-functions-to-static' + # When running in a conda env, ignore options that may have been added by conda but are unused + - '-clang-diagnostic-unused-command-line-argument' + # Given + # + # std::make_pair(...) + # + # Results in: error: for C++11-compatibility, use pair directly. But we don't care about + # C++11, and so we don't care about this warning. + - '-google-build-explicit-make-pair' + # This check is incredibly expensive for absolutely no reason, and since we a) use + # modern google-test and b) don't have clang-tidy enabled on our testing code, we don't + # need to enable it! + - '-google-upgrade-googletest-case' + # This warning will warn if you have redundant default-initializers for class + # members. For example: + # + # class Foo + # { + # int x_{}; // WARNING: redundant initializer here + # + # public: + # Foo(int x) : x_{x} { } + # }; + # + # Since Foo can only ever be constructed with an explicit value for x_ via its constructor, + # the default initializer is technically redundant. However, if we change the definition + # of Foo to now allow a default ctor, then the initializer becomes non-redundant + # again. It is easier to just follow the rule of "always explicitly default initialize + # members" than to remember to change 2 places at once. + - '-readability-redundant-member-init' + # Alias for readability-enum-initial-value, disable this one because the readability- + # name is easier to understand, and we don't want to silence 2 things for the same + # warning. + - '-cert-int09-c' + # This one is potentially controversial. This check warns when iterating over unordered + # containers of pointers: + # + # { + # int a = 1, b = 2; + # std::unordered_set set = {&a, &b}; + # + # for (auto *i : set) { // iteration order not deterministic + # f(i); + # } + # } + # + # On the one hand, this is a clear case of non-determinism. But on the other hand, I + # feel it is obvious that a user does not care about the order of iteration + # because... they are using unordered containers! + - '-bugprone-nondeterministic-pointer-iteration-order' +WarningsAsErrors: '*' +HeaderFileExtensions: + - '' + - h + - hh + - hpp + - hxx + - cuh + - inl +ImplementationFileExtensions: + - c + - cc + - cpp + - cxx + - cu +HeaderFilterRegex: '.*/src/.*' +SystemHeaders: false +CheckOptions: + cert-dcl16-c.NewSuffixes: 'L;LL;LU;LLU' + cert-err33-c.CheckedFunctions: '::aligned_alloc;::asctime_s;::at_quick_exit;::atexit;::bsearch;::bsearch_s;::btowc;::c16rtomb;::c32rtomb;::calloc;::clock;::cnd_broadcast;::cnd_init;::cnd_signal;::cnd_timedwait;::cnd_wait;::ctime_s;::fclose;::fflush;::fgetc;::fgetpos;::fgets;::fgetwc;::fopen;::fopen_s;::fprintf;::fprintf_s;::fputc;::fputs;::fputwc;::fputws;::fread;::freopen;::freopen_s;::fscanf;::fscanf_s;::fseek;::fsetpos;::ftell;::fwprintf;::fwprintf_s;::fwrite;::fwscanf;::fwscanf_s;::getc;::getchar;::getenv;::getenv_s;::gets_s;::getwc;::getwchar;::gmtime;::gmtime_s;::localtime;::localtime_s;::malloc;::mbrtoc16;::mbrtoc32;::mbsrtowcs;::mbsrtowcs_s;::mbstowcs;::mbstowcs_s;::memchr;::mktime;::mtx_init;::mtx_lock;::mtx_timedlock;::mtx_trylock;::mtx_unlock;::printf_s;::putc;::putwc;::raise;::realloc;::remove;::rename;::scanf;::scanf_s;::setlocale;::setvbuf;::signal;::snprintf;::snprintf_s;::sprintf;::sprintf_s;::sscanf;::sscanf_s;::strchr;::strerror_s;::strftime;::strpbrk;::strrchr;::strstr;::strtod;::strtof;::strtoimax;::strtok;::strtok_s;::strtol;::strtold;::strtoll;::strtoul;::strtoull;::strtoumax;::strxfrm;::swprintf;::swprintf_s;::swscanf;::swscanf_s;::thrd_create;::thrd_detach;::thrd_join;::thrd_sleep;::time;::timespec_get;::tmpfile;::tmpfile_s;::tmpnam;::tmpnam_s;::tss_create;::tss_get;::tss_set;::ungetc;::ungetwc;::vfprintf;::vfprintf_s;::vfscanf;::vfscanf_s;::vfwprintf;::vfwprintf_s;::vfwscanf;::vfwscanf_s;::vprintf_s;::vscanf;::vscanf_s;::vsnprintf;::vsnprintf_s;::vsprintf;::vsprintf_s;::vsscanf;::vsscanf_s;::vswprintf;::vswprintf_s;::vswscanf;::vswscanf_s;::vwprintf_s;::vwscanf;::vwscanf_s;::wcrtomb;::wcschr;::wcsftime;::wcspbrk;::wcsrchr;::wcsrtombs;::wcsrtombs_s;::wcsstr;::wcstod;::wcstof;::wcstoimax;::wcstok;::wcstok_s;::wcstol;::wcstold;::wcstoll;::wcstombs;::wcstombs_s;::wcstoul;::wcstoull;::wcstoumax;::wcsxfrm;::wctob;::wctrans;::wctype;::wmemchr;::wprintf_s;::wscanf;::wscanf_s;' + llvm-else-after-return.WarnOnUnfixable: 'false' + cert-str34-c.DiagnoseSignedUnsignedCharComparisons: 'false' + cppcoreguidelines-non-private-member-variables-in-classes.IgnoreClassesWithAllMemberVariablesBeingPublic: 'true' + google-readability-braces-around-statements.ShortStatementLines: '1' + llvm-qualified-auto.AddConstToQualified: 'false' + llvm-else-after-return.WarnOnConditionVariables: 'false' + cert-oop54-cpp.WarnOnlyIfThisHasSuspiciousField: 'false' + performance-move-const-arg.CheckTriviallyCopyableMove: 'false' + # The following is needed because clang tidy complains that TaskContext is always + # copied: + # + # class MyTask + # { + # // clang-tidy warning: ctx copied every time, but only used as a reference. + # // Consider making it a const reference + # void cpu_task(legate::TaskContext ctx) + # { + # ... + # } + # }; + # + # But we can't do that, because the `task_wrapper` expects to find exactly the signature + # above. So we can either add `NOLINT` everywhere, or we can just ignore `TaskContext` + # copies wholesale (which is OK because they are just pimpls, and thus, very cheap to + # copy). + performance-unnecessary-value-param.AllowedTypes: 'legate::TaskContext' + performance-inefficient-vector-operation.VectorLikeClasses: '::std::vector;legate::tuple;legate::Shape' + performance-inefficient-string-concatenation.StrictMode: 'true' + readability-simplify-boolean-expr.ChainedConditionalReturn: 'true' + readability-simplify-boolean-expr.ChainedConditionalAssignment: 'true' + bugprone-dangling-handle.HandleClasses: 'legate::Span;::std::span' + bugprone-misplaced-widening-cast.CheckImplicitCasts: 'true' + bugprone-unused-return-value.AllowCastToVoid: 'true' + readability-enum-initial-value.AllowExplicitZeroFirstInitialValue: 'false' + readability-enum-initial-value.AllowExplicitSequentialInitialValues: 'false' + readability-redundant-access-specifiers.CheckFirstDeclaration: 'true' + bugprone-lambda-function-name.IgnoreMacros: 'true' + readability-identifier-naming.ClassCase: 'CamelCase' + readability-identifier-naming.UnionCase: 'CamelCase' + readability-identifier-naming.ClassConstantCase: 'UPPER_CASE' + readability-identifier-naming.ClassIgnoredRegexp: 'tuple|has_.*|is_.*|as_.*|.*_tag|tag|.*_of' + readability-identifier-naming.ConstantMemberCase: 'UPPER_CASE' + readability-identifier-naming.ConstantMemberIgnoredRegexp: 'value' + readability-identifier-naming.EnumCase: 'CamelCase' + readability-identifier-naming.EnumConstantCase: 'UPPER_CASE' + readability-identifier-naming.FunctionCase: 'lower_case' + # We want to allow legate::Shape SHAPE_4D() + readability-identifier-naming.FunctionIgnoredRegexp: '[A-Z_0-9]+' + readability-identifier-naming.GlobalConstantCase: 'UPPER_CASE' + readability-identifier-naming.GlobalConstantIgnoredRegexp: '.*_v' + readability-identifier-naming.LocalVariableCase: 'lower_case' + # We want to allow constexpr auto MY_VAL1 + readability-identifier-naming.LocalVariableIgnoredRegexp: '[A-Z_0-9]+' + readability-identifier-naming.MacroDefinitionCase: 'UPPER_CASE' + # We want to allow MY_MACRO_PRIVATE_1_ + readability-identifier-naming.MacroDefinitionIgnoredRegexp: '[A-Z_0-9]+' + readability-identifier-naming.NamespaceCase: 'lower_case' + readability-identifier-naming.PrivateMemberCase: 'lower_case' + readability-identifier-naming.PrivateMemberSuffix: '_' + readability-identifier-naming.PrivateMethodCase: 'lower_case' + readability-identifier-naming.PrivateMethodSuffix: '_' + readability-identifier-naming.ProtectedMemberCase: 'lower_case' + readability-identifier-naming.ProtectedMemberSuffix: '_' + readability-identifier-naming.ProtectedMethodCase: 'lower_case' + readability-identifier-naming.ProtectedMethodSuffix: '_' + readability-identifier-naming.PublicMethodCase: 'lower_case' + readability-identifier-naming.ScopedEnumCase: 'CamelCase' + readability-identifier-naming.ScopedEnumConstantCase: 'UPPER_CASE' + bugprone-suspicious-stringview-data-usage.AllowedCallees: 'legate::detail::ZStringView' +... diff --git a/.dir-locals.el b/.dir-locals.el new file mode 100644 index 0000000000..42469a8b39 --- /dev/null +++ b/.dir-locals.el @@ -0,0 +1,7 @@ +;;; legate specific configuration for Emacs +((nil . ((indent-tabs-mode . nil) + (tab-width . 4) + (eval . (add-hook 'before-save-hook #'delete-trailing-whitespace)))) + (python-mode . ((python-interpreter . (seq-find (lambda (item) (executable-find item)) '("python3" "python"))) + (python-indent-offset . 4) + (fill-column . 79)))) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..791480c215 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,39 @@ +# Global +* @magnatelee @Jacobfaib + +# Python/Cython +*.py @nv-legate/python-reviewers +*.pyi @nv-legate/python-reviewers +*.pyx @nv-legate/python-reviewers +*.pxd @nv-legate/python-reviewers +src/python @nv-legate/python-reviewers +src/python/legate/jupyter @eddy16112 +pyproject.toml @nv-legate/build-reviewers +setup.cfg @nv-legate/build-reviewers + +# cmake +src/cmake @nv-legate/build-reviewers +*.cmake @nv-legate/build-reviewers +CMakeLists.txt @nv-legate/build-reviewers + +# configure +config/ @nv-legate/python-reviewers @nv-legate/build-reviewers +configure @nv-legate/python-reviewers @nv-legate/build-reviewers +makefile @nv-legate/build-reviewers + +# CI +.github/ @nv-legate/devops-reviewers +continuous_integration @nv-legate/devops-reviewers +conda @nv-legate/devops-reviewers +scripts/ @nv-legate/devops-reviewers + +# cpp +src/cpp @nv-legate/cpp-reviewers + +# docs +docs/ @nv-legate/docs-reviewers +*.md @nv-legate/docs-reviewers +*.rst @nv-legate/docs-reviewers + +# Override for CODEOWNERS - last rule wins. +CODEOWNERS @magnatelee @manopapad @Jacobfaib diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 34a08c5c7d..19cfdd6a21 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,3 +1,4 @@ +--- name: Bug report description: Submit a bug report title: "[BUG] " @@ -29,7 +30,7 @@ body: Platform : Linux-5.14.0-1042-oem-x86_64-with-glibc2.31 Legion : v23.11.00.dev-16-g2499f878 Legate : 23.11.00.dev+17.gb7b50313 - Cunumeric : (ImportError: cannot import name 'LogicalArray' from 'legate.core') + Cunumeric : (ImportError: cannot import name 'LogicalArray' from 'legate') Numpy : 1.24.4 Scipy : 1.10.1 Numba : (not installed) @@ -76,10 +77,9 @@ body: attributes: label: Example code or instructions description: > - Please provide detailed instructions to reproduce the issue. Ideally this includes a + Please provide detailed instructions to reproduce the issue. Ideally this includes a [Complete, minimal, self-contained example code](https://stackoverflow.com/help/minimal-reproducible-example) - given here or as a link to code in another repository. - render: Python + given here or as a link to code in another repository. validations: required: true - type: markdown diff --git a/.github/copy-pr-bot.yaml b/.github/copy-pr-bot.yaml index 895ba83ee5..59873746e1 100644 --- a/.github/copy-pr-bot.yaml +++ b/.github/copy-pr-bot.yaml @@ -1,3 +1,4 @@ +--- # Configuration file for `copy-pr-bot` GitHub App # https://docs.gha-runners.nvidia.com/apps/copy-pr-bot/ diff --git a/.github/release.yml b/.github/release.yml index 0a37704fb5..4bd4c0fb00 100644 --- a/.github/release.yml +++ b/.github/release.yml @@ -1,7 +1,8 @@ +--- changelog: exclude: labels: - - category:task + - category:housekeeping categories: - title: 🐛 Bug Fixes labels: @@ -14,4 +15,4 @@ changelog: - category:improvement - title: 📖 Documentation labels: - - category:documentation \ No newline at end of file + - category:documentation diff --git a/.github/workflows/ci-gh-build-release.yml b/.github/workflows/ci-gh-build-release.yml deleted file mode 100644 index 0fcd284fd5..0000000000 --- a/.github/workflows/ci-gh-build-release.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Build conda release packages - -concurrency: - group: ci-release-on-${{ github.event_name }}-from-${{ github.ref_name }} - cancel-in-progress: true - -on: - push: - workflow_dispatch: - branches: - - "pull-request/[0-9]+" - - "branch-*" - -jobs: - build: - uses: - ./.github/workflows/gh-build.yml - with: - build-target: all - repos-name: ${{ github.event.repository.name }} - runs-on: ${{ github.repository_owner == 'nv-legate' && 'linux-amd64-cpu16' || 'ubuntu-latest' }} - sha: ${{ github.sha }} - build-type: release - use-container: true - ucx-config: no_ucx - ucx-string: '' \ No newline at end of file diff --git a/.github/workflows/ci-gh-docs.yml b/.github/workflows/ci-gh-docs.yml index 22e33c5bc3..00b1a1be92 100644 --- a/.github/workflows/ci-gh-docs.yml +++ b/.github/workflows/ci-gh-docs.yml @@ -1,89 +1,44 @@ -name: Build legate.core documentation +--- +name: Docs + +concurrency: + group: ${{ startsWith(github.ref_name, 'cpp-branch') && format('unique-{0}', github.run_id) || format('ci-build-docs-on-{0}-from-{1}', github.event_name, github.ref_name) }} + cancel-in-progress: true + on: push: branches: - "pull-request/[0-9]+" - "branch-*" -env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - COMMIT: ${{ github.event.pull_request.head.sha || github.sha }} - PROJECT: github-core-ci - REF: ${{ github.event.pull_request.head.ref || github.ref }} - EVENT_NAME: ${{ github.event_name }} - LABEL: ${{ github.event.pull_request.head.label }} - REPO_URL: ${{ github.event.pull_request.head.repo.html_url || github.event.repository.html_url }} - ARTIFACTS_DIR: "${{ github.workspace }}/.artifacts" - ARTIFACT_NAME: "legate-core-docs" - # Prevent output buffering - PYTHONUNBUFFERED: 1 + - "main" + merge_group: jobs: - build: - permissions: - id-token: write # This is required for configure-aws-credentials - contents: read # This is required for actions/checkout - packages: write # This is required to push docker image to ghcr.io + build-and-test: + name: Build documentation (${{ matrix.platform }}, ${{ matrix.target-device }}, ${{ matrix.build-mode }}, ucx enabled) + strategy: + fail-fast: false + matrix: + platform: + - linux + target-device: + - gpu + build-mode: + - release + uses: + ./.github/workflows/gh-build-docs.yml + with: + platform: ${{ matrix.platform }} + target-device: ${{ matrix.target-device }} + build-mode: ${{ matrix.build-mode }} + secrets: inherit - runs-on: ${{ contains(github.repository, 'nv-legate/legate.core') && 'linux-amd64-cpu16' || 'ubuntu-latest' }} + docs-pass: + if: always() + needs: + - build-and-test + runs-on: linux-amd64-cpu4 steps: - - name: Dump GitHub context - env: - GITHUB_CONTEXT: ${{ toJSON(github) }} - run: echo "$GITHUB_CONTEXT" - - name: Dump job context - env: - JOB_CONTEXT: ${{ toJSON(job) }} - run: echo "$JOB_CONTEXT" - - name: Dump steps context - env: - STEPS_CONTEXT: ${{ toJSON(steps) }} - run: echo "$STEPS_CONTEXT" - - name: Dump runner context - env: - RUNNER_CONTEXT: ${{ toJSON(runner) }} - run: echo "$RUNNER_CONTEXT" - - name: Dump strategy context - env: - STRATEGY_CONTEXT: ${{ toJSON(strategy) }} - run: echo "$STRATEGY_CONTEXT" - - name: Dump matrix context - env: - MATRIX_CONTEXT: ${{ toJSON(matrix) }} - run: echo "$MATRIX_CONTEXT" - - #################################### - # Actual build process starts here # - #################################### - - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - if: github.repository_owner == 'nv-legate' - name: Get AWS credentials for sccache bucket - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-region: us-east-2 - role-duration-seconds: 28800 # 8 hours - role-to-assume: arn:aws:iam::279114543810:role/gha-oidc-nv-legate - - - name: Build Documentation - run: | - set -xeuo pipefail - docker run \ - -e AWS_REGION \ - -e AWS_SESSION_TOKEN \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e GITHUB_TOKEN \ - -v "$(pwd):$(pwd)" \ - -v "$ARTIFACTS_DIR:$(pwd)/.artifacts" \ - --rm "condaforge/miniforge3:latest" \ - /bin/bash -c "cd $(pwd) && ./continuous_integration/scripts/build-legate docs" - - - name: Upload documentation - uses: actions/upload-artifact@v3 - with: - name: ${{ env.ARTIFACT_NAME }} - path: ${{ env.ARTIFACTS_DIR }} \ No newline at end of file + - name: Check job results + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: exit 1 diff --git a/.github/workflows/ci-gh-gasnet.yml b/.github/workflows/ci-gh-gasnet.yml new file mode 100644 index 0000000000..a4b94a40f2 --- /dev/null +++ b/.github/workflows/ci-gh-gasnet.yml @@ -0,0 +1,95 @@ +--- +name: Build Gasnet Wrapper Package + +defaults: + run: + shell: bash --noprofile --norc -euo pipefail {0} +on: + workflow_dispatch: + +jobs: + Build_GasNet_Wrapper: + name: Build Gasnet wrapper + strategy: + fail-fast: false + runs-on: linux-amd64-gpu-l4-latest-1 + container: + options: -u root + image: condaforge/miniforge3:latest + env: + CONDA_ROOT: "/tmp/conda-croot/gasnet-wrapper" + CONDA_OUTPUT: "/tmp/gasnet/output" + ARTIFACT_SERVER: "https://urm.nvidia.com/artifactory" + ARTIFACT_REPOS: "sw-legate-conda-local" + TARGET_PLATFORM: "noarch" + PKG_DIR: "gex/noarch" + + steps: + - name: Set environment variables + run: | + BUILD_DATE="$(date +%Y%m%d)" + { + echo "ARTIFACT_NAME=Gasnet-Wrapper-Artifact-${GITHUB_SHA}" + echo "ARTIFACT_PATH=${CONDA_OUTPUT}" + echo "BUILD_DATE=${BUILD_DATE}" + } >> "${GITHUB_ENV}" + + - name: Display environment variables + run: | + uname -a + echo "CONDA_ROOT=${CONDA_ROOT}" + echo "CONDA_OUTPUT=${CONDA_ROOT}" + echo "ARTIFACT_NAME=${ARTIFACT_NAME}" + echo "ARTIFACT_PATH=${ARTIFACT_PATH}" + echo "BUILD_DATE=${BUILD_DATE}" + + - name: Checkout ${{ github.event.repository.name }} (= this repo) + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Cleanup/Create related folders + run: | + rm -rf "${CONDA_ROOT}" "${CONDA_OUTPUT}" + mkdir -p "${CONDA_ROOT}" "${CONDA_OUTPUT}" + + - name: Run conda build + run: | + conda_build_args=(); + conda_build_args+=(--croot "${CONDA_ROOT}"); + conda_build_args+=(--output-folder "${CONDA_OUTPUT}"); + + mamba install conda-build + conda-build "${conda_build_args[@]}" conda/gasnet_wrapper + + - name: Display output files + run: | + ls -lAhR "${CONDA_OUTPUT}" + + - name: Upload gasnet wrapper artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: ${{ env.ARTIFACT_PATH }} + + - name: Install pre-requisites to upload package + run: | + echo "Install pre-requisites" + apt-get update + apt-get install -y curl jq + + - name: Upload Package to URM server + if: ${{ false }} + run: | + echo "Upload gasnet wrapper package" + find "${ARTIFACT_PATH}/${TARGET_PLATFORM}/." -name "realm-gex-wrapper*.tar.bz2" | while read -r f; do + fname="$(basename "${f}")" + packageFound=$(curl -usvc-legate-github:${{ secrets.URM_ARTIFACT_TOKEN }} -X POST -H "content-type: text/plain" "${ARTIFACT_SERVER}/api/search/aql" -d 'items.find({"name":"'"${fname}"'"})' | jq -r .results[].name) + + if [[ -z ${packageFound} ]]; then + echo "Uploading the package: ${fname}" + curl -usvc-legate-github:${{ secrets.URM_ARTIFACT_TOKEN }} -T "${f}" "${ARTIFACT_SERVER}/${ARTIFACT_REPOS}/${PKG_DIR}/${fname};buildDate=${BUILD_DATE};sha=${{ github.sha }}" + else + echo "The package: ${fname} already exists on the server..skipping upload" + fi + done diff --git a/.github/workflows/ci-gh-lint.yml b/.github/workflows/ci-gh-lint.yml new file mode 100644 index 0000000000..3bb03c26da --- /dev/null +++ b/.github/workflows/ci-gh-lint.yml @@ -0,0 +1,217 @@ +--- +name: Lint + +concurrency: + group: ci-lint-on-${{ github.event_name }}-from-${{ github.ref_name }} + cancel-in-progress: true + +on: + push: + branches: + - "pull-request/[0-9]+" + - "*branch-*" + - "main" + merge_group: + +defaults: + run: + shell: bash -el {0} + +jobs: + pre-commit: + runs-on: 'linux-amd64-cpu4' + strategy: + fail-fast: false + matrix: + version: ['3.10', '3.11', '3.12'] + if: ${{ github.repository_owner == 'nv-legate' }} + permissions: + contents: read # This is required for actions/checkout + + name: "pre-commit python-${{ matrix.version }}" + + steps: + - name: Checkout ${{ github.event.repository.name }} (= this repo) + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Generate requirements.txt + run: | + { + echo "shellcheck-py" + echo "cmakelang" + echo "rich" + } > requirements.txt + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.version }} + cache: 'pip' + + - name: Install dependencies + run: | + pip install -r requirements.txt + + - name: Set LEGATE_DIR + run: | + echo "LEGATE_DIR=${{ github.workspace }}" >> "${GITHUB_ENV}" + + - name: run pre-commit + uses: pre-commit/action@v3.0.1 + + test-configure: + env: + LEGATE_ARCH: arch-ci-test-lint + runs-on: 'linux-amd64-cpu4' + strategy: + fail-fast: false + matrix: + version: ['3.10', '3.11', '3.12'] + generator: [ninja, make] + if: ${{ github.repository_owner == 'nv-legate' }} + permissions: + contents: read # This is required for actions/checkout + + name: "test-configure, Python ${{ matrix.version }}, Generator ${{ matrix.generator }}" + + steps: + - name: Checkout ${{ github.event.repository.name }} (= this repo) + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Mambaforge + uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-variant: Miniforge3 + miniforge-version: latest + activate-environment: legate + use-mamba: true + + - name: Get Week Number + id: get-week-hash + run: | + WEEK="$(/bin/date -u '+%Y%W')" + echo "week=${WEEK}" >> "${GITHUB_OUTPUT}" + + - name: Cache Conda env + uses: actions/cache@v4 + with: + path: ${{ env.CONDA }}/envs + key: + ci-gh-lint-test-configure--"${{ matrix.generator }}"--${{ + runner.os }}--${{ runner.arch }}--${{ matrix.version }}--${{ + steps.get-week-hash.outputs.week }}-${{ env.CACHE_NUMBER }} + env: + # Increase this value to reset cache if etc/example-environment.yml has not changed + CACHE_NUMBER: 7 + id: cache + + - name: Update environment + run: | + mamba create -n legate rich cmake clangxx compiler-rt pytest pytest-mock "${{ matrix.generator }}" + if: steps.cache.outputs.cache-hit != 'true' + + - name: List Conda packages and info + run: | + conda info + conda list + + - name: Run tests + run: | + pytest ./config/aedifix/tests + + tidy: + env: + LEGATE_ARCH: arch-ci-lint + runs-on: 'linux-amd64-cpu16' + strategy: + fail-fast: true + if: ${{ github.repository_owner == 'nv-legate' }} + permissions: + contents: read # This is required for actions/checkout + + steps: + - name: List machine info + run: | + uname -a + lsb_release -a + + - name: Checkout ${{ github.event.repository.name }} (= this repo) + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Mambaforge + uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-variant: Miniforge3 + miniforge-version: latest + activate-environment: legate + use-mamba: true + + - name: Get Week Number + id: get-week-hash + run: | + WEEK="$(/bin/date -u '+%Y%W')" + echo "week=${WEEK}" >> "${GITHUB_OUTPUT}" + + - name: Cache Conda env + uses: actions/cache@v4 + with: + path: ${{ env.CONDA }}/envs + key: + ci-gh-lint-tidy-${{ runner.os }}--${{ runner.arch }}--${{ + steps.get-week-hash.outputs.week }}-${{ env.CACHE_NUMBER }} + env: + # Increase this value to reset cache if etc/example-environment.yml has not changed + CACHE_NUMBER: 9 + id: cache + + - name: Update environment + run: | + mamba create -n legate cmake clang clangxx clang-tools elfutils hdf5 rich ninja + if: steps.cache.outputs.cache-hit != 'true' + + - name: List Conda packages and info + run: | + conda info + conda list + + - name: List clang-tidy version + run: | + clang-tidy --version + + - name: Configure legate + # CMake Error: CMake was unable to find a build program corresponding to "Ninja". + # CMAKE_MAKE_PROGRAM is not set. You probably need to select a different build + # tool. + run: | + ./configure \ + --with-tests \ + --with-benchmarks \ + --with-cc=clang \ + --with-cxx=clang++ || { cat configure.log; exit 1; } + echo "LEGATE_DIR=${{ github.workspace }}" >> "${GITHUB_ENV}" + + - name: Run clang-tidy + run: | + # "-- -k" tells the underlying build tool to not stop on first error. We want + # CI to unearth all clang-tidy errors in one go, otherwise the dev needs to play + # whack-a-mole with each additional error. + LEGATE_CMAKE_ARGS="-- -k 0" make tidy -j 14 + + + lint-pass: + if: always() + needs: + - pre-commit + - test-configure + - tidy + runs-on: linux-amd64-cpu4 + steps: + - name: Check job results + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: exit 1 diff --git a/.github/workflows/ci-gh-mac.yml b/.github/workflows/ci-gh-mac.yml new file mode 100644 index 0000000000..1b355840b3 --- /dev/null +++ b/.github/workflows/ci-gh-mac.yml @@ -0,0 +1,29 @@ +--- +name: Mac build and test + +concurrency: + group: mac-ci-build-and-test-on-${{ github.event_name }}-from-${{ github.ref_name }} + cancel-in-progress: true + +on: + # Disabled until the CI scripts are in working order again. + # schedule: + # # Runs at 08:00 UTC every day + # - cron: '0 8 * * *' + workflow_dispatch: + +jobs: + build-and-test: + name: Build and test (mac, cpu, release, ucx) + uses: + ./.github/workflows/gh-build-and-test.yml + with: + platform: mac + target-device: cpu + build-type: ci + build-mode: release + upload-enabled: false + network: ucx + refname: ${{ github.ref_name }} + default-branch: ${{ github.event.repository.default_branch }} + secrets: inherit diff --git a/.github/workflows/ci-gh-mpi.yml b/.github/workflows/ci-gh-mpi.yml new file mode 100644 index 0000000000..3892116593 --- /dev/null +++ b/.github/workflows/ci-gh-mpi.yml @@ -0,0 +1,95 @@ +--- +name: Build MPI Wrapper Package + +on: + workflow_dispatch: + +defaults: + run: + shell: bash --noprofile --norc -euo pipefail {0} +jobs: + Build_GasNet_Wrapper: + name: Build MPI wrapper + strategy: + fail-fast: false + runs-on: linux-amd64-gpu-l4-latest-1 + container: + options: -u root + image: condaforge/miniforge3:latest + env: + CONDA_ROOT: "/tmp/conda-croot/mpi-wrapper" + CONDA_OUTPUT: "/tmp/mpy/output" + ARTIFACT_SERVER: "https://urm.nvidia.com/artifactory" + ARTIFACT_REPOS: "sw-legate-conda-local" + TARGET_PLATFORM: "noarch" + PKG_DIR: "gex/noarch" + + steps: + - name: Set environment variables + run: | + BUILD_DATE=$(date +%Y%m%d) + { + echo "ARTIFACT_NAME=MPI-Wrapper-Artifact-${GITHUB_SHA}" + echo "ARTIFACT_PATH=${CONDA_OUTPUT}" + echo "BUILD_DATE=${BUILD_DATE}" + } >> "${GITHUB_ENV}" + + - name: Display environment variables + run: | + uname -a + echo "CONDA_ROOT=${CONDA_ROOT}" + echo "CONDA_OUTPUT=${CONDA_ROOT}" + echo "ARTIFACT_NAME=${ARTIFACT_NAME}" + echo "ARTIFACT_PATH=${ARTIFACT_PATH}" + echo "BUILD_DATE=${BUILD_DATE}" + + - name: Checkout ${{ github.event.repository.name }} (= this repo) + uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Cleanup/Create related folders + run: | + rm -rf "${CONDA_ROOT}" "${CONDA_OUTPUT}" + mkdir -p "${CONDA_ROOT}" "${CONDA_OUTPUT}" + + - name: Run conda build + run: | + conda_build_args=(); + conda_build_args+=(--croot "${CONDA_ROOT}") + conda_build_args+=(--output-folder "${CONDA_OUTPUT}") + + mamba install conda-build + conda-build "${conda_build_args[@]}" conda/mpi_wrapper + + - name: Display output files + run: | + ls -lAhR "${CONDA_OUTPUT}" + + - name: Upload mpi wrapper artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ env.ARTIFACT_NAME }} + path: ${{ env.ARTIFACT_PATH }} + + - name: Install pre-requisites to upload package + run: | + echo "Install pre-requisites" + apt-get update + apt-get install -y curl jq + + - name: Upload Package to URM server + if: ${{ false }} + run: | + echo "Upload MPI wrapper package" + find "${ARTIFACT_PATH}/${TARGET_PLATFORM}/." -name "realm-gex-wrapper*.tar.bz2" | while read -r f; do + fname="$(basename "${f}")" + packageFound=$(curl -usvc-legate-github:${{ secrets.URM_ARTIFACT_TOKEN }} -X POST -H "content-type: text/plain" "${ARTIFACT_SERVER}/api/search/aql" -d 'items.find({"name":"'"${fname}"'"})' | jq -r .results[].name) + + if [[ -z ${packageFound} ]]; then + echo "Uploading the package: ${fname}" + curl -usvc-legate-github:${{ secrets.URM_ARTIFACT_TOKEN }} -T "${f}" "${ARTIFACT_SERVER}/${ARTIFACT_REPOS}/${PKG_DIR}/${fname};buildDate=${BUILD_DATE};sha=${{ github.sha }}" + else + echo "The package: ${fname} already exists on the server..skipping upload" + fi + done diff --git a/.github/workflows/ci-gh-nightly-release.yml b/.github/workflows/ci-gh-nightly-release.yml new file mode 100644 index 0000000000..9153348214 --- /dev/null +++ b/.github/workflows/ci-gh-nightly-release.yml @@ -0,0 +1,106 @@ +--- +name: Build conda Nightly release and legate profiler packages + +on: + workflow_dispatch: + schedule: + - cron: '0 22 * * *' # Nightly at 10:00 PM + +permissions: + id-token: write + contents: write + +jobs: + build-and-test: + strategy: + fail-fast: false + matrix: + platform: + - linux + - linux-aarch64 + target-device: + - gpu + - cpu + build-mode: + - release + - release-debug + network: + - ucx + - gex + python-version: + - "3.10" + - "3.11" + - "3.12" + upload-enabled: + - true + - false + uses: + ./.github/workflows/gh-build-and-test.yml + with: + platform: ${{ matrix.platform }} + target-device: ${{ matrix.target-device }} + build-mode: ${{ matrix.build-mode }} + build-type: nightly + upload-enabled: ${{ matrix.upload-enabled }} + upload-action: ${{ (matrix.build-mode == 'release' && 'upload-package-Anaconda' || 'upload-package-None') }} + python-version: ${{ matrix.python-version }} + network: ${{ matrix.network }} + refname: ${{ github.ref_name }} + default-branch: ${{ github.event.repository.default_branch }} + secrets: inherit + + build-legate-profiler: + strategy: + fail-fast: false + matrix: + platform: + - linux + - linux-aarch64 + target-device: + - cpu + upload-enabled: + - true + python-version: + - "3.12" + uses: + ./.github/workflows/gh-legate-profiler.yml + with: + platform: ${{ matrix.platform }} + build-type: profiler + python-version: ${{ matrix.python-version }} + target-device: ${{ matrix.target-device }} + upload-enabled: ${{ matrix.upload-enabled }} + network: "ucx" + refname: ${{ github.ref_name }} + default-branch: ${{ github.event.repository.default_branch }} + secrets: inherit + + build-nightly-docs: + name: Build Nightly documentation (${{ matrix.platform }}, ${{ matrix.target-device }}, ${{ matrix.build-mode }}, ucx enabled) + strategy: + fail-fast: false + matrix: + platform: + - linux + target-device: + - gpu + build-mode: + - release + uses: + ./.github/workflows/gh-build-docs.yml + with: + platform: ${{ matrix.platform }} + target-device: ${{ matrix.target-device }} + build-mode: ${{ matrix.build-mode }} + upload-docs-to-gh-pages: true + secrets: inherit + + push_code: + name: Nightly source release + uses: + nv-legate/legate-gh-ci/.github/workflows/gh-push-code.yml@nightly_push_to_external_repo + with: + runs-on: linux-amd64-cpu4 + source-repo: "${{ github.repository_owner }}/legate.internal" + dest-repo: "${{ github.repository_owner }}/legate" + secrets: inherit diff --git a/.github/workflows/ci-gh.yml b/.github/workflows/ci-gh.yml index 40ce4021ae..252a886841 100644 --- a/.github/workflows/ci-gh.yml +++ b/.github/workflows/ci-gh.yml @@ -1,7 +1,8 @@ -name: Build and test on GH +--- +name: Build and Test concurrency: - group: ci-build-and-test-on-${{ github.event_name }}-from-${{ github.ref_name }} + group: ${{ startsWith(github.ref_name, 'main') && format('unique-{0}', github.run_id) || format('ci-build-and-test-on-{0}-from-{1}', github.event_name, github.ref_name) }} cancel-in-progress: true on: @@ -9,27 +10,45 @@ on: branches: - "pull-request/[0-9]+" - "branch-*" + - "main" + merge_group: jobs: build-and-test: strategy: fail-fast: false matrix: - include: - - device: gpu - build-runner: ${{ contains(github.repository, 'nv-legate/legate.core') && 'linux-amd64-cpu32' || 'ubuntu-latest' }} - platform: linux - - - device: cpu - build-runner: ${{ contains(github.repository, 'nv-legate/legate.core') && 'linux-amd64-cpu32' || 'ubuntu-latest' }} - platform: linux - - - device: cpu - build-runner: macos-latest - platform: osx + platform: + - linux + - linux-aarch64 + target-device: + - gpu + - cpu + build-mode: + - debug + - debug-sanitizer + - release + - release-debug uses: ./.github/workflows/gh-build-and-test.yml with: - device: ${{ matrix.device }} - build-runner: ${{ matrix.build-runner }} + build-mode: ${{ matrix.build-mode }} + build-type: ci + network: ucx platform: ${{ matrix.platform }} + python-version: "3.10" + target-device: ${{ matrix.target-device }} + upload-enabled: false + refname: ${{ github.ref_name }} + default-branch: ${{ github.event.repository.default_branch }} + secrets: inherit + + tests-pass: + if: always() + needs: + - build-and-test + runs-on: linux-amd64-cpu4 + steps: + - name: Check job results + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: exit 1 diff --git a/.github/workflows/gh-build-and-test.yml b/.github/workflows/gh-build-and-test.yml index c706c208e0..001630410a 100644 --- a/.github/workflows/gh-build-and-test.yml +++ b/.github/workflows/gh-build-and-test.yml @@ -1,118 +1,196 @@ +--- on: workflow_call: inputs: - device: + build-mode: + required: true type: string + build-type: required: true - build-runner: type: string + network: required: true + type: string platform: + required: true + type: string + python-version: + required: false + type: string + target-device: + required: true + type: string + upload-enabled: + required: true + type: boolean + upload-action: + required: false + default: 'upload-package-None' + type: string + refname: + required: true type: string + default-branch: required: true + type: string +defaults: + run: + shell: bash --noprofile --norc -xeuo pipefail {0} jobs: build: - strategy: - fail-fast: false - matrix: - ucx-config: [ucx, no_ucx] - os-platform: [ "${{ inputs.platform }}" ] - exclude: - - os-platform: "osx" - ucx-config: ucx if: ${{ github.repository_owner == 'nv-legate' }} uses: - ./.github/workflows/gh-build.yml + nv-legate/legate-gh-ci/.github/workflows/gh-build.yml@v1.29 with: - build-target: ${{ inputs.device }} - repos-name: ${{ github.event.repository.name }} - runs-on: ${{ inputs.build-runner }} - sha: ${{ github.sha }} - build-type: ci - use-container: ${{ inputs.platform == 'linux' }} - ucx-config: ${{ matrix.ucx-config }} - ucx-string: ${{ (matrix.ucx-config == 'ucx' && '-ucx') || '' }} - - test: + build-has-tests: ${{ inputs.upload-enabled == false }} + client-repo: ${{ github.event.repository.name }} + target-device: ${{ inputs.target-device }} + runs-on: ${{ (inputs.platform == 'linux' && 'linux-amd64-cpu16') || (inputs.platform == 'linux-aarch64' && 'linux-arm64-cpu16') || (inputs.platform == 'mac' && 'macos-latest') }} + build-type: ${{ inputs.build-type }} + use-container: ${{ inputs.platform == 'linux' || inputs.platform == 'linux-aarch64'}} + platform: ${{ inputs.platform }} + dependencies-file: "" + legate-gh-ci-tag: "v1.29" + build-mode: ${{ inputs.build-mode }} + upload-enabled: ${{ inputs.upload-enabled }} + python-version: ${{ inputs.python-version }} + network: ${{ inputs.network }} + secrets: inherit + + setup-test: + name: Setup test + # Don't run GPU tests on linux-aarch64. We don't have enough test machines for this. + if: inputs.upload-enabled == false && (inputs.build-type == 'nightly' || inputs.target-device != 'gpu' || inputs.platform != 'linux-aarch64') needs: - build + runs-on: linux-amd64-cpu4 + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - id: set-matrix + run: | + MATRIX_JSON='{"include": [' + RUNNERS=( + 'linux-amd64-gpu-l4-latest-1:gpu:gpu:linux' + 'linux-amd64-2gpu:gpu:2gpu:linux' + 'linux-amd64-cpu8:cpu:cpu:linux' + 'linux-arm64-cpu16:cpu:cpu:linux-aarch64' + 'linux-aarch64-2gpu:gpu:2gpu:linux-aarch64' + 'macos-latest:cpu:cpu:mac' + ) + TEST_CONFIGS=( + 'Python Unit Tests:python' + 'C++ Unit Tests:cpp' + ) + + for RUNNER in "${RUNNERS[@]}"; do + IFS=':' read -ra RUNNER_INFO <<< "${RUNNER}" + RUNNER_NAME=${RUNNER_INFO[0]} + RUNNER_TYPE=${RUNNER_INFO[1]} + RUNNER_DEVICE=${RUNNER_INFO[2]} + RUNNER_PLATFORM=${RUNNER_INFO[3]} + + if [[ "${RUNNER_TYPE}" == "${{ inputs.target-device }}" && "${RUNNER_PLATFORM}" == "${{ inputs.platform }}" ]]; then + for TEST_CONFIG in "${TEST_CONFIGS[@]}"; do + IFS=':' read -ra CONFIG_INFO <<< "${TEST_CONFIG}" + CONFIG_NAME=${CONFIG_INFO[0]} + CONFIG_SCOPE=${CONFIG_INFO[1]} + + MATRIX_JSON+="{\"runner\": {\"name\": \"${RUNNER_NAME}\", \"type\": \"${RUNNER_TYPE}\", \"device\": \"${RUNNER_DEVICE}\", \"platform\": \"${RUNNER_PLATFORM}\"}, \"test-config\": {\"name\": \"${CONFIG_NAME}\", \"test-scope\": \"${CONFIG_SCOPE} ${RUNNER_DEVICE}\"}}," + done + fi + done + + MATRIX_JSON=${MATRIX_JSON%,} # Remove the trailing comma + MATRIX_JSON+=']}' + echo "matrix=${MATRIX_JSON}" >> "${GITHUB_OUTPUT}" + + test-within-container: + if: github.repository_owner == 'nv-legate' && (inputs.platform == 'linux' || inputs.platform == 'linux-aarch64') + needs: + - setup-test + + name: ${{ matrix.test-config.name }} (${{ inputs.platform }}, ${{ inputs.target-device }}, ${{ inputs.build-mode }}, ${{ inputs.network }}) + + strategy: + fail-fast: false + matrix: ${{fromJson(needs.setup-test.outputs.matrix)}} + + uses: + nv-legate/legate-gh-ci/.github/workflows/gh-test-within-container.yml@v1.29 + with: + build-has-tests: ${{ inputs.upload-enabled == false }} + client-repo: ${{ github.event.repository.name }} + build-type: ${{ inputs.build-type }} + name: "${{ matrix.test-config.name }} (${{ matrix.runner.device }})" + target-device: ${{ inputs.target-device }} + runs-on: ${{ matrix.runner.name }} + has-gpu: ${{ matrix.runner.type == 'gpu' }} + test-options: ${{ matrix.test-config.test-scope }} + platform: ${{ matrix.runner.platform }} + legate-gh-ci-tag: "v1.29" + build-mode: ${{ inputs.build-mode }} + upload-enabled: ${{ inputs.upload-enabled }} + python-version: ${{ inputs.python-version }} + network: ${{ inputs.network }} + secrets: inherit + + + test-without-container: + if: github.repository_owner == 'nv-legate' && (inputs.platform != 'linux' && inputs.platform != 'linux-aarch64') + needs: + - setup-test + + name: ${{ matrix.test-config.name }} (${{ inputs.platform }}, ${{ inputs.target-device }}, ${{ inputs.build-mode }}, ucx enabled) + strategy: fail-fast: false - matrix: - include: - - name: Pytest Unit Tests - test-scope: unit - runner: linux-amd64-gpu-v100-latest-1 - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'gpu' }} - ucx-config: no_ucx - - - name: Pytest Unit Tests - test-scope: unit - runner: linux-amd64-gpu-v100-latest-1 - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'gpu' }} - ucx-config: ucx - - - name: Pytest Unit Tests (OS X) - test-scope: unit - runner: macos-latest - enabled: ${{ inputs.platform == 'osx' }} - ucx-config: no_ucx - - - name: Pytest Unit Tests - test-scope: unit - runner: linux-amd64-2gpu - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'gpu' }} - ucx-config: no_ucx - - - name: Pytest Unit Tests - test-scope: unit - runner: linux-amd64-2gpu - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'gpu' }} - ucx-config: ucx - - - name: Pytest Unit Tests - test-scope: unit - runner: linux-amd64-cpu4 - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'cpu' }} - ucx-config: no_ucx - - - name: Pytest Unit Tests - test-scope: unit - runner: linux-amd64-cpu4 - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'cpu' }} - ucx-config: ucx - - - name: mypy - test-scope: mypy - runner: linux-amd64-cpu4 - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'cpu' }} - ucx-config: no_ucx - - - name: mypy - test-scope: mypy - runner: linux-amd64-cpu4 - enabled: ${{ inputs.platform == 'linux' && inputs.device == 'cpu' }} - ucx-config: ucx - - - name: mypy (OS X) - test-scope: mypy - runner: macos-latest - enabled: ${{ inputs.platform == 'osx' }} - ucx-config: no_ucx - - name: ${{ matrix.name }} - if: github.repository_owner == 'nv-legate' + matrix: ${{fromJson(needs.setup-test.outputs.matrix)}} + + uses: + nv-legate/legate-gh-ci/.github/workflows/gh-test-without-container.yml@v1.29 + with: + build-has-tests: ${{ inputs.upload-enabled == false }} + client-repo: ${{ github.event.repository.name }} + build-type: ${{ inputs.build-type }} + name: ${{ matrix.test-config.name }} + target-device: ${{ inputs.target-device }} + runs-on: ${{ matrix.runner.name }} + has-gpu: ${{ matrix.runner.type == 'gpu' }} + test-options: ${{ matrix.test-config.test-scope }} + platform: ${{ matrix.runner.platform }} + legate-gh-ci-tag: "v1.29" + build-mode: ${{ inputs.build-mode }} + upload-enabled: ${{ inputs.upload-enabled }} + python-version: ${{ inputs.python-version }} + network: ${{ inputs.network }} + secrets: inherit + + upload: + needs: test-within-container + # The use of always() below, ensures the step will run even on failure of the tests + # before if other conditionals are all true + if: ${{ always() && github.repository_owner == 'nv-legate' && inputs.build-type == 'nightly' && inputs.upload-enabled == false }} + name: Upload package to Server uses: - ./.github/workflows/gh-test.yml + nv-legate/legate-gh-ci/.github/workflows/gh-upload.yml@v1.30 with: - build-target: ${{ inputs.device }} - repos-name: ${{ github.event.repository.name }} - runs-on: ${{ matrix.runner }} - sha: ${{ github.sha }} - test-scope: ${{ matrix.test-scope }} - use-container: ${{ inputs.platform == 'linux' }} - enabled: ${{ matrix.enabled }} - ucx-config: ${{ matrix.ucx-config }} - ucx-string: ${{ (matrix.ucx-config == 'ucx' && '-ucx') || '' }} \ No newline at end of file + build-has-tests: false + client-repo: ${{ github.event.repository.name }} + build-type: ${{ inputs.build-type }} + name: Upload to Anaconda + target-device: ${{ inputs.target-device }} + platform: ${{ inputs.platform }} + legate-gh-ci-tag: "v1.30" + build-mode: ${{ inputs.build-mode }} + upload-enabled: true + network: ${{ inputs.network }} + upload-action: ${{ needs.test-within-container.result == 'failure' && 'upload-package-Anaconda' || 'upload-package-Anaconda-tested' }} + pkgSubString: "legate" + repos-Root: "legate" + python-version: ${{ inputs.python-version }} + refname: ${{ inputs.refname }} + default-branch: ${{ inputs.default-branch }} + secrets: inherit diff --git a/.github/workflows/gh-build-docs.yml b/.github/workflows/gh-build-docs.yml new file mode 100644 index 0000000000..e9ed18ec70 --- /dev/null +++ b/.github/workflows/gh-build-docs.yml @@ -0,0 +1,87 @@ +--- +on: + workflow_call: + inputs: + platform: + type: string + required: true + target-device: + type: string + required: true + build-mode: + type: string + required: true + upload-docs-to-gh-pages: + type: boolean + required: false + default: false + +jobs: + build-docs: + if: ${{ github.repository_owner == 'nv-legate' }} + uses: + nv-legate/legate-gh-ci/.github/workflows/gh-build.yml@v1.29 + with: + build-has-tests: false + client-repo: ${{ github.event.repository.name }} + target-device: ${{ inputs.target-device }} + runs-on: ${{ (inputs.platform == 'linux' && 'linux-amd64-cpu16') || (inputs.platform == 'mac' && 'macos-latest') }} + build-type: docs + use-container: ${{ inputs.platform == 'linux' }} + platform: ${{ inputs.platform }} + dependencies-file: "" + legate-gh-ci-tag: "v1.29" + build-mode: ${{ inputs.build-mode }} + upload-enabled: false + network: "ucx" + secrets: inherit + + upload-docs-to-gh-pages: + if: ${{ inputs.upload-docs-to-gh-pages && github.ref_name == 'main' }} + needs: build-docs + runs-on: ubuntu-latest + steps: + - name: Dump build-docs output variables + shell: bash --noprofile --norc -xeuo pipefail {0} + run: | + echo "${{ needs.build-docs.outputs.artifact-name }}" + echo "${{ needs.build-docs.outputs.artifacts-dir }}" + + - name: Download build artifacts + uses: actions/download-artifact@v4 + with: + name: ${{ needs.build-docs.outputs.artifact-name }} + path: ${{ needs.build-docs.outputs.artifacts-dir }} + + - name: Display structure of downloaded artifacts + shell: bash --noprofile --norc -xeuo pipefail {0} + run: | + pwd + ls -lahR ${{ needs.build-docs.outputs.artifacts-dir }} + + - name: Find index.html's parent folder + shell: bash --noprofile --norc -xeuo pipefail {0} + id: find_docs_dir + run: | + FILE_PATH="$( + find "${{ needs.build-docs.outputs.artifacts-dir }}" -name "index.html" -printf '%d %p\n' \ + | sort -nk1 \ + | cut -d' ' -f2- \ + | head -n 1 + )" + if [[ -z "${FILE_PATH}" ]]; then + echo "index.html not found" >&2 + exit 1 + fi + PARENT_DIR=$(dirname "${FILE_PATH}") + echo "docs_dir=${PARENT_DIR}" >> "${GITHUB_OUTPUT}" + + - name: Checkout + uses: actions/checkout@v4 + + - name: Deploy + uses: JamesIves/github-pages-deploy-action@v4 + with: + folder: ${{ steps.find_docs_dir.outputs.docs_dir }} + token: ${{ secrets.NV_LEGATE_INTER_REPOS_ACCESS }} + repository-name: "nv-legate/legate" diff --git a/.github/workflows/gh-build.yml b/.github/workflows/gh-build.yml deleted file mode 100644 index 6ce5e672ae..0000000000 --- a/.github/workflows/gh-build.yml +++ /dev/null @@ -1,100 +0,0 @@ -name: Build legate.core on GH - -on: - workflow_call: - inputs: - build-target: - required: true - type: string - description: One of CPU / GPU - repos-name: - required: true - type: string - runs-on: - required: true - type: string - sha: - required: true - type: string - description: A unique identifier for labeling the images / artifacts - build-type: - required: true - type: string - description: One of ci / release - use-container: - required: true - type: boolean - ucx-config: - required: true - type: string - ucx-string: - required: true - type: string - -env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - USE_CUDA: ${{ (inputs.build-target == 'cpu' && 'OFF') || 'ON' }} - ARTIFACTS_DIR: "${{ github.workspace }}/.artifacts" - ARTIFACT_NAME: "${{ inputs.repos-name }}-${{ inputs.build-target }}${{ inputs.ucx-string }}-${{ inputs.sha }}" - UCX_ENABLED: ${{ (inputs.ucx-config == 'ucx' && 'ON') || 'OFF' }} -jobs: - build: - name: build-${{ inputs.build-target }}${{ inputs.ucx-string }}-sub-workflow - - permissions: - id-token: write # This is required for configure-aws-credentials - contents: read # This is required for actions/checkout - - runs-on: ${{ inputs.runs-on }} - - defaults: - run: - shell: bash - - steps: - - name: Checkout ${{ inputs.repos-name }} (= this repo) - uses: actions/checkout@v3 - with: - fetch-depth: 0 - persist-credentials: false - - - if: github.repository_owner == 'nv-legate' - name: Get AWS credentials for sccache bucket - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-region: us-east-2 - role-duration-seconds: 28800 # 8 hours - role-to-assume: arn:aws:iam::279114543810:role/gha-oidc-nv-legate - - - if: ${{ inputs.use-container }} - name: Build legate.core (in container) - run: | - set -xeuo pipefail - - docker run \ - -e AWS_REGION \ - -e AWS_SESSION_TOKEN \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e GITHUB_TOKEN \ - -e USE_CUDA \ - -e UCX_ENABLED \ - -v "$(pwd):$(pwd)" \ - -v "$ARTIFACTS_DIR:$(pwd)/.artifacts" \ - --rm "condaforge/miniforge3:latest" \ - /bin/bash -c "cd $(pwd) && continuous_integration/scripts/build-legate ${{ inputs.build-type}}" - - - if: ${{ !inputs.use-container }} - name: Build legate.core (without container) - run: | - set -xeuo pipefail - continuous_integration/scripts/build-legate ${{ inputs.build-type}} - - - name: Display structure of the artifacts folder - run: ls -aR ${{ env.ARTIFACTS_DIR }} - - - name: Upload build artifacts - uses: actions/upload-artifact@v3 - with: - name: ${{ env.ARTIFACT_NAME }} - path: ${{ env.ARTIFACTS_DIR }} diff --git a/.github/workflows/gh-legate-profiler.yml b/.github/workflows/gh-legate-profiler.yml new file mode 100644 index 0000000000..7ca71aba5f --- /dev/null +++ b/.github/workflows/gh-legate-profiler.yml @@ -0,0 +1,73 @@ +--- +on: + workflow_call: + inputs: + platform: + required: true + type: string + build-type: + type: string + required: true + python-version: + type: string + required: true + target-device: + type: string + required: true + upload-enabled: + type: boolean + required: true + network: + type: string + required: true + refname: + required: true + type: string + default-branch: + required: true + type: string + +jobs: + Build_Profiler: + if: ${{ github.repository_owner == 'nv-legate' }} + uses: + nv-legate/legate-gh-ci/.github/workflows/gh-build.yml@v1.27 + with: + client-repo: ${{ github.event.repository.name }} + target-device: ${{ inputs.target-device }} + runs-on: ${{ (inputs.platform == 'linux' && 'linux-amd64-cpu16') || (inputs.platform == 'linux-aarch64' && 'linux-arm64-cpu16') || (inputs.platform == 'mac' && 'macos-latest') }} + build-has-tests: ${{ inputs.upload-enabled == false }} + build-type: ${{ inputs.build-type }} + use-container: ${{ inputs.platform == 'linux' || inputs.platform == 'linux-aarch64'}} + platform: ${{ inputs.platform }} + dependencies-file: "" + legate-gh-ci-tag: "v1.27" + build-mode: "" + upload-enabled: ${{ inputs.upload-enabled }} + python-version: ${{ inputs.python-version }} + network: ${{ inputs.network }} + secrets: inherit + + Upload_Profiler: + needs: Build_Profiler + name: Upload Legate Profiler + uses: + nv-legate/legate-gh-ci/.github/workflows/gh-upload.yml@v1.27 + with: + build-mode: "" + build-type: ${{ inputs.build-type }} + client-repo: ${{ github.event.repository.name }} + build-has-tests: ${{ inputs.upload-enabled == false }} + legate-gh-ci-tag: "v1.27" + name: Upload package to Anaconda + network: ${{ inputs.network }} + pkgSubString: "legate-profiler" + platform: ${{ inputs.platform }} + python-version: ${{ inputs.python-version }} + repos-Root: "legate-profiler" + target-device: ${{ inputs.target-device }} + upload-action: "upload-package-Anaconda" + upload-enabled: ${{ inputs.upload-enabled }} + refname: ${{ inputs.refname }} + default-branch: ${{ inputs.default-branch }} + secrets: inherit diff --git a/.github/workflows/gh-test.yml b/.github/workflows/gh-test.yml deleted file mode 100644 index 125a68c7ca..0000000000 --- a/.github/workflows/gh-test.yml +++ /dev/null @@ -1,107 +0,0 @@ -name: Test legate.core on GH - -on: - workflow_call: - inputs: - build-target: - required: true - type: string - repos-name: - required: true - type: string - runs-on: - required: true - type: string - sha: - required: true - type: string - test-scope: - required: true - type: string - use-container: - required: true - type: boolean - enabled: - required: true - type: boolean - ucx-config: - required: true - type: string - ucx-string: - required: true - type: string - -env: - ARTIFACT_NAME: "${{ inputs.repos-name }}-${{ inputs.build-target }}${{ inputs.ucx-string }}-${{ inputs.sha }}" - ARTIFACTS_DIR: "${{ github.workspace }}/.artifacts" - -jobs: - test-in-container: - if: github.repository_owner == 'nv-legate' && inputs.use-container && inputs.enabled - name: test-${{ inputs.build-target }}${{ inputs.ucx-string }}-sub-workflow - runs-on: ${{ inputs.runs-on }} - - defaults: - run: - shell: bash - - container: - options: -u root - image: condaforge/miniforge3:latest - env: - PYTHONDONTWRITEBYTECODE: 1 - NVIDIA_VISIBLE_DEVICES: ${{ env.NVIDIA_VISIBLE_DEVICES }} - volumes: - - "${{ github.workspace }}/.artifacts:${{ github.workspace }}/.artifacts" - steps: - - if: inputs.build-target == 'gpu' - name: Run nvidia-smi to make sure GPU is working - run: nvidia-smi - - - name: Checkout repo - uses: actions/checkout@v3 - with: - fetch-depth: 0 - persist-credentials: false - - - name: Download build artifacts - uses: actions/download-artifact@v3 - with: - name: ${{ env.ARTIFACT_NAME }} - path: ${{ env.ARTIFACTS_DIR }} - - - name: Run ${{ inputs.repos-name }} test / analysis - run: | - set -xeuo pipefail - continuous_integration/scripts/run-test-or-analysis ${{ inputs.test-scope }} - - test: - if: github.repository_owner == 'nv-legate' && !inputs.use-container && inputs.enabled - name: test-${{ inputs.build-target }}-sub-workflow - runs-on: ${{ inputs.runs-on }} - - defaults: - run: - shell: bash - - steps: - - if: inputs.build-target == 'gpu' - name: Run nvidia-smi to make sure GPU is working - run: nvidia-smi - - - name: Checkout repo - uses: actions/checkout@v3 - with: - fetch-depth: 0 - persist-credentials: false - - - name: Download build artifacts - uses: actions/download-artifact@v3 - with: - name: ${{ env.ARTIFACT_NAME }} - path: ${{ env.ARTIFACTS_DIR }} - - - name: Run ${{ inputs.repos-name }} test / analysis - run: | - set -xeuo pipefail - continuous_integration/scripts/run-test-or-analysis ${{ inputs.test-scope }} diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml new file mode 100644 index 0000000000..2c429172be --- /dev/null +++ b/.github/workflows/pr.yml @@ -0,0 +1,38 @@ +name: pr + +on: + push: + branches: + - "pull-request/[0-9]+" + - "branch-*" + - "main" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash + +jobs: + wheels-build: + secrets: inherit + uses: ./.github/workflows/wheels-build.yml + with: + build-type: pull-request + wheels-test: + needs: wheels-build + secrets: inherit + uses: ./.github/workflows/wheels-test.yml + with: + build-type: pull-request + wheels-pass: + if: always() + needs: + - wheels-test + runs-on: linux-amd64-cpu4 + steps: + - name: Check job results + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: exit 1 diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml index 6f1f751d23..56ac0c34b5 100644 --- a/.github/workflows/require-labels.yml +++ b/.github/workflows/require-labels.yml @@ -1,14 +1,21 @@ +--- name: Pull Request Labels + +concurrency: + group: label-check-${{ github.event_name }}-from-${{ github.ref_name }} + cancel-in-progress: true + on: pull_request: types: [opened, labeled, unlabeled, synchronize] + merge_group: jobs: label: runs-on: ubuntu-latest steps: - name: Check Labels - uses: mheap/github-action-required-labels@v3 + uses: mheap/github-action-required-labels@v5 with: mode: exactly count: 1 - labels: "category:new-feature, category:improvement, category:bug-fix, category:task, category:documentation" \ No newline at end of file + labels: "category:new-feature, category:improvement, category:bug-fix, category:housekeeping, category:documentation" diff --git a/.github/workflows/wheels-build.yml b/.github/workflows/wheels-build.yml new file mode 100644 index 0000000000..5f98650b48 --- /dev/null +++ b/.github/workflows/wheels-build.yml @@ -0,0 +1,125 @@ +on: + workflow_call: + inputs: + build-type: + required: true + type: string + branch: + type: string + date: + type: string + sha: + type: string + repo: + type: string + node_type: + type: string + default: "cpu16" + cuda_ver: + type: string + default: "12.5.1" + linux_ver: + type: string + default: "rockylinux8" + script: + type: string + default: "continuous_integration/scripts/build_wheel_linux.bash" + matrix_filter: + type: string + default: "." + +defaults: + run: + shell: bash -eou pipefail {0} + +permissions: + actions: read + checks: none + contents: read + deployments: none + discussions: none + id-token: write + issues: none + packages: read + pages: none + pull-requests: read + repository-projects: none + security-events: none + statuses: none + +jobs: + compute-matrix: + runs-on: linux-amd64-cpu4 + outputs: + MATRIX: ${{ steps.compute-matrix.outputs.MATRIX }} + steps: + - name: Compute Build Matrix + id: compute-matrix + run: | + set -eo pipefail + + # please keep the matrices sorted in ascending order by the following: + # + # [ARCH, PY_VER, CUDA_VER, LINUX_VER] + # + export MATRIX=" + # amd64 + - { ARCH: 'amd64', PY_VER: '3.10', TARGET_DEV: 'gpu', BUILD_MODE: 'release' } + - { ARCH: 'amd64', PY_VER: '3.11', TARGET_DEV: 'gpu', BUILD_MODE: 'release' } + - { ARCH: 'amd64', PY_VER: '3.12', TARGET_DEV: 'gpu', BUILD_MODE: 'release' } + # arm64 + - { ARCH: 'arm64', PY_VER: '3.10', TARGET_DEV: 'gpu', BUILD_MODE: 'release' } + - { ARCH: 'arm64', PY_VER: '3.11', TARGET_DEV: 'gpu', BUILD_MODE: 'release' } + - { ARCH: 'arm64', PY_VER: '3.12', TARGET_DEV: 'gpu', BUILD_MODE: 'release' } + " + + MATRIX="$( + yq -n -o json 'env(MATRIX)' | \ + jq -c '${{ inputs.matrix_filter }} | if (. | length) > 0 then {include: .} else "Error: Empty matrix\n" | halt_error(1) end' + )" + + echo "MATRIX=${MATRIX}" | tee --append "${GITHUB_OUTPUT}" + build: + name: ${{ matrix.ARCH }}, py${{ matrix.PY_VER }}, ${{ matrix.TARGET_DEV }}, ${{ matrix.BUILD_MODE }} + needs: compute-matrix + timeout-minutes: 480 + strategy: + fail-fast: false + matrix: ${{ fromJSON(needs.compute-matrix.outputs.MATRIX) }} + runs-on: "linux-${{ matrix.ARCH }}-${{ inputs.node_type }}" + container: + image: rapidsai/ci-wheel:cuda${{ inputs.cuda_ver }}-${{ inputs.linux_ver }}-py${{ matrix.PY_VER }} + steps: + - name: Get the SHA + id: get-sha + run: | + sha="$(echo ${{github.sha}} | head -c 10)" + echo "sha=${sha}" >> "${GITHUB_OUTPUT}" + - if: github.repository_owner == 'nv-legate' + name: Get AWS credentials for sccache bucket + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: us-east-2 + role-duration-seconds: 28800 # 8 hours + role-to-assume: arn:aws:iam::279114543810:role/gha-oidc-nv-legate + - uses: actions/checkout@v4 + with: + repository: ${{ inputs.repo }} + ref: ${{ inputs.sha }} + fetch-depth: 0 + - name: Add default paths to the env + run: | + # The || true is to satisfy the linting check, seems less readable to me. + echo "$(pwd)/continuous_integration/scripts/tools" >> "${GITHUB_PATH}" || true + - name: C++ wheel build + run: ${{ inputs.script }} + env: + STEP_NAME: "C++ build" + GH_TOKEN: ${{ github.token }} + - name: C++ wheel upload + env: + BUILD_NAME: ${{ matrix.ARCH }}-${{ matrix.TARGET_DEV }}-cuda${{ inputs.cuda_ver }}-py${{ matrix.PY_VER }} + uses: actions/upload-artifact@v4 + with: + name: legate-wheel-${{ env.BUILD_NAME }} + path: final-dist/*.whl diff --git a/.github/workflows/wheels-test.yml b/.github/workflows/wheels-test.yml new file mode 100644 index 0000000000..6f8b74d47d --- /dev/null +++ b/.github/workflows/wheels-test.yml @@ -0,0 +1,121 @@ +on: + workflow_call: + inputs: + build-type: + required: true + type: string + branch: + type: string + date: + type: string + sha: + type: string + repo: + type: string + node_type: + type: string + default: "cpu16" + cuda_ver: + type: string + default: "12.8.0" + script: + type: string + default: "continuous_integration/scripts/test_wheel_linux.bash" + matrix_filter: + type: string + default: "." + +defaults: + run: + shell: bash -eou pipefail {0} + +permissions: + actions: read + checks: none + contents: read + deployments: none + discussions: none + id-token: write + issues: none + packages: read + pages: none + pull-requests: read + repository-projects: none + security-events: none + statuses: none + +jobs: + compute-matrix: + runs-on: linux-amd64-cpu4 + outputs: + MATRIX: ${{ steps.compute-matrix.outputs.MATRIX }} + steps: + - name: Compute Build Matrix + id: compute-matrix + run: | + set -eo pipefail + + # please keep the matrices sorted in ascending order by the following: + # + # [ARCH, PY_VER, CUDA_VER, LINUX_VER] + # + export MATRIX=" + # amd64 + - { ARCH: 'amd64', PY_VER: '3.10', TARGET_DEV: 'gpu', GPU: 'l4', LINUX_VER: 'ubuntu22.04' } + - { ARCH: 'amd64', PY_VER: '3.11', TARGET_DEV: 'gpu', GPU: 'l4', LINUX_VER: 'ubuntu22.04' } + - { ARCH: 'amd64', PY_VER: '3.12', TARGET_DEV: 'gpu', GPU: 'l4', LINUX_VER: 'ubuntu24.04' } + # arm64 + #- { ARCH: 'arm64', PY_VER: '3.10', TARGET_DEV: 'cpu', BUILD_MODE: 'release' } + #- { ARCH: 'arm64', PY_VER: '3.10', TARGET_DEV: 'gpu', GPU: 'a100', LINUX_VER: 'ubuntu24.04' } + " + + MATRIX="$( + yq -n -o json 'env(MATRIX)' | \ + jq -c '${{ inputs.matrix_filter }} | if (. | length) > 0 then {include: .} else "Error: Empty matrix\n" | halt_error(1) end' + )" + + echo "MATRIX=${MATRIX}" | tee --append "${GITHUB_OUTPUT}" + build: + name: ${{ matrix.ARCH }}, py${{ matrix.PY_VER }}, ${{ matrix.LINUX_VER }}, ${{ matrix.GPU }} + needs: compute-matrix + timeout-minutes: 60 + strategy: + fail-fast: false + matrix: ${{ fromJSON(needs.compute-matrix.outputs.MATRIX) }} + runs-on: "linux-${{ matrix.ARCH }}-gpu-${{ matrix.GPU }}-latest-1" + container: + image: rapidsai/citestwheel:cuda${{ inputs.cuda_ver }}-${{ matrix.LINUX_VER }}-py${{ matrix.PY_VER }} + env: + NVIDIA_VISIBLE_DEVICES: ${{ env.NVIDIA_VISIBLE_DEVICES }} + steps: + - name: Get the SHA + id: get-sha + run: | + sha="$(echo ${{github.sha}} | head -c 10)" + echo "sha=${sha}" >> "${GITHUB_OUTPUT}" + - uses: actions/checkout@v4 + with: + repository: ${{ inputs.repo }} + ref: ${{ inputs.sha }} + fetch-depth: 0 + - name: Add default paths to the env + run: | + # The || true is to satisfy the linting check, seems less readable to me. + echo "$(pwd)/continuous_integration/scripts/tools" >> "${GITHUB_PATH}" || true + - name: Run nvidia-smi to make sure GPU is working + run: nvidia-smi + - name: Setup proxy cache + uses: nv-gha-runners/setup-proxy-cache@main + continue-on-error: true + # Skip the cache on RDS Lab nodes + if: ${{ matrix.GPU != 'v100' && matrix.GPU != 'a100' }} + - name: Download wheels from the build job + uses: actions/download-artifact@v4 + env: + BUILD_NAME: ${{ matrix.ARCH }}-${{ matrix.TARGET_DEV }}-cuda12.5.1-py${{ matrix.PY_VER }} + with: + name: legate-wheel-${{ env.BUILD_NAME }} + path: final-dist + + - name: Run tests + run: ${{ inputs.script }} diff --git a/.gitignore b/.gitignore index a5c13fcb28..fe6d3872a7 100644 --- a/.gitignore +++ b/.gitignore @@ -22,21 +22,34 @@ *.dylib install_info.py /dist -/build +/build*/ /legion /install* -/_skbuild +_skbuild/ config.mk -/docs/legate/core/build -/docs/legate/core/source/api/generated +/docs/legate/build +docs/legate/source/api/python/generated/ +docs/legate/source/generated/ *.egg-info .cache .coverage .vscode _cmake_test_compile !cmake/versions.json -legate.core.code-workspace +legate.code-workspace *.prof +examples/cpp/build +tests/cpp/build .legate-test-last-failed out/ .artifacts/ +venv/ +!cmake/presets/*.json +!CMakePresets.json +arch-* +configure.log +reconfigure-* +*-arch +*_venv +_version.py +scripts/get_legate_arch\.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0657c48da..7d63131994 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,32 +1,186 @@ +--- repos: - - repo: https://github.com/PyCQA/isort - rev: 5.12.0 - hooks: - - id: isort - - repo: https://github.com/psf/black - rev: 23.9.1 - hooks: - - id: black - - repo: https://github.com/PyCQA/flake8 - rev: 6.1.0 - hooks: - - id: flake8 - - repo: https://github.com/pre-commit/mirrors-clang-format - rev: 'v16.0.6' - hooks: - - id: clang-format - files: \.(cu|cuh|h|cc|inl)$ - types_or: [] - - repo: https://github.com/pre-commit/mirrors-mypy - rev: 'v1.5.1' - hooks: - - id: mypy - pass_filenames: false - args: ['legate', 'tests'] - additional_dependencies: [numpy,pytest,pytest_mock] + - repo: https://github.com/adrienverge/yamllint + rev: v1.37.0 + hooks: + - id: yamllint + types: [yaml] + args: ['-c', './scripts/pre-commit/yamllint.yml'] + exclude: 'meta\.yaml$' + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-json # checks that all json files have proper syntax + - id: check-toml # checks that all toml files have proper syntax + - id: debug-statements # check for debug statements + exclude: '^(config/aedifix/main.py|config/aedifix/templates/.*)$' # intentional + - id: end-of-file-fixer # check all files end in a newline + # handled by clang-format + exclude_types: [c, c++, cuda] + - id: pretty-format-json + args: ['--autofix', '--indent=4'] + - id: trailing-whitespace # remove trailing whitespace + # don't mess up diff files + exclude: '^src/cmake/patches/.*\.diff$' + # handled by clang-format + exclude_types: [c, c++, cuda] + # These hooks perform formatting or other related changes, and therefore should run + # before clang-format and clang-tidy do. This way, if they do make changes, we can do + # them and the formatting changes in 1 pass instead of multiple. + - repo: local + hooks: + - id: angle-includes + name: angle-includes + description: "Transform '' includes to <>" + entry: ./scripts/pre-commit/angle_includes.py + language: script + types_or: [c++, c, cuda] + stages: [pre-commit] + - id: copyright + name: copyright + description: 'Check that year is correct for copyright notices' + entry: ./scripts/pre-commit/copyright.py + language: python + exclude: '.*\.(png|pdf)|^(share/legate/realm_ucp_bootstrap/.*)$' + types_or: [file, text] + - id: legate-assert + name: legate-assert + description: 'Find uses of assert() that should be using LegateAssert()' + entry: ./scripts/pre-commit/legate_assert.py + language: python + 'types_or': [c++, c, cuda] + stages: [pre-commit] + exclude: '^(src/cpp/legate/utilities/assert\.h|src/cpp/legate/utilities/abort.h)$' + - id: c-begone + name: c-begone + description: 'Convert C standard library functions and types to C++ equivalent' + entry: ./scripts/pre-commit/c_begone.py + language: python + 'types_or': [c++, c, cuda] + stages: [pre-commit] + exclude: '^(src/cpp/legate/utilities/abort.h|share/legate/mpi_wrapper/.*|share/legate/realm_ucp_bootstrap/.*)$' + - id: traced-throw + name: traced-throw + description: 'Convert throw statements to throwing legate TracedException' + entry: ./scripts/pre-commit/traced_throw.py + language: python + 'types_or': [c++, c, cuda] + stages: [pre-commit] + exclude: '^tests/cpp/.*$' + - repo: https://github.com/codespell-project/codespell + rev: v2.4.1 + hooks: + - id: codespell + additional_dependencies: + # Technically not necessary, but since Legate supports Python 3.10, it's + # possible that the user doesn't have 3.11 locally. + - tomli + args: ["--toml=./pyproject.toml", "--write-changes"] + - repo: https://github.com/pre-commit/mirrors-clang-format + rev: 'v20.1.0' + hooks: + - id: clang-format + files: \.(cu|cuh|h|hpp|cc|inl)$ + types_or: [] + exclude: '^(share/legate/realm_ucp_bootstrap/.*)$' + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.11.4 + hooks: + - id: ruff-format + - id: ruff + args: ["--config=./pyproject.toml", "--fix"] + - repo: https://github.com/pre-commit/mirrors-mypy + rev: 'v1.15.0' + hooks: + - id: mypy + pass_filenames: false + args: ['src', 'tests', 'config', 'test.py', 'scripts/release'] + additional_dependencies: [numpy, pytest, pytest_mock, types-psutil, rich, "dask[distributed]"] + - repo: https://github.com/MarcoGorelli/cython-lint + rev: v0.16.6 + hooks: + - id: cython-lint + - id: double-quote-cython-strings + - repo: https://github.com/netromdk/vermin + rev: 'v1.6.0' + hooks: + - id: vermin + args: ['--config-file', './setup.cfg'] + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: '3.0.0' + hooks: + - id: shellcheck + args: ['--rcfile', './scripts/pre-commit/shellcheckrc'] + - repo: https://github.com/rhysd/actionlint + rev: v1.7.7 + hooks: + - id: actionlint + types: [yaml] + files: ^\.github/workflows/ + args: [-config-file=scripts/pre-commit/actionlint.yml] + entry: env SHELLCHECK_OPTS="--enable=all --severity=style" actionlint + - repo: local + hooks: + - id: legate-defined + name: legate-defined + description: 'Find uses of ifdef LEGATE_ that should be using LegateDefined()' + entry: ./scripts/pre-commit/legate_defined.sh + language: script + 'types_or': [c++, c, cuda] + stages: [pre-commit] + exclude: '^(src/cpp/legate/experimental/stl/detail/config\.hpp|src/cpp/legate/utilities/compiler\.h|share/legate/mpi_wrapper/.*|src/cpp/legate/utilities/env.h)$' + - id: no-default + name: no-default + description: "Ban the use of default: in switch statements" + entry: ./scripts/pre-commit/no_default.py + language: python + 'types_or': [c++, c, cuda] + stages: [pre-commit] + exclude: '^tests/cpp/.*$' + additional_dependencies: + - rich + - id: cmake-format + name: cmake-format + entry: ./scripts/pre-commit/cmake-format.bash cmake-format + language: python + types: [cmake] + exclude: '^(share/legate/realm_ucp_bootstrap/CMakeLists.txt|share/legate/realm_ucp_bootstrap/cmake/Config.cmake.in|continuous_integration/scripts/ucc-cmake-config/.*.cmake)$' + # Note that pre-commit autoupdate does not update the versions + # of dependencies, so we'll have to update this manually. + additional_dependencies: + - cmakelang==0.6.13 + verbose: true + - id: cmake-lint + name: cmake-lint + entry: ./scripts/pre-commit/cmake-format.bash cmake-lint + language: python + types: [cmake] + # Note that pre-commit autoupdate does not update the versions + # of dependencies, so we'll have to update this manually. + additional_dependencies: + - cmakelang==0.6.13 + verbose: true + - id: dej2lint + name: dej2lint + description: 'Run yamllint on meta.yaml files, removing Jinja2 directives' + entry: python scripts/pre-commit/dej2lint.py + language: python + files: meta\.yaml$ + args: ['-c', './scripts/pre-commit/yamllint.yml'] + additional_dependencies: ['yamllint'] + - id: check_cmake_format + name: check_cmake_format + description: 'Check that cmake-format.json is properly filled out with function signatures' + entry: ./scripts/pre-commit/check_cmake_format.py + args: ['--input-file', './scripts/pre-commit/cmake-format-legate.json'] + language: python + types: [cmake] + require_serial: true + additional_dependencies: + - cmakelang ci: - autoupdate_schedule: quarterly + autoupdate_schedule: quarterly default_language_version: - python: python3 + python: python3 diff --git a/BUILD.md b/BUILD.md deleted file mode 100644 index 6a380fe664..0000000000 --- a/BUILD.md +++ /dev/null @@ -1,449 +0,0 @@ - - -# Basic build - -If you are building on a cluster, first check if there are specialized scripts -available for your cluster at -[nv-legate/quickstart](https://github.com/nv-legate/quickstart). Even if your -specific cluster is not covered, you may be able to adapt an existing workflow. - -## Getting dependencies through conda - -The primary method of retrieving dependencies for Legate Core and downstream -libraries is through [conda](https://docs.conda.io/en/latest/). You will need -an installation of conda to follow the instructions below. We suggest using -the [mamba](https://github.com/mamba-org/mamba) implementation of the conda -package manager. - -Please use the `scripts/generate-conda-envs.py` script to create a conda -environment file listing all the packages that are required to build, run and -test Legate Core and all downstream libraries. For example: - -```shell -$ ./scripts/generate-conda-envs.py --python 3.10 --ctk 12.2.2 --os linux --ucx ---- generating: environment-test-linux-py310-cuda-12.2.2-ucx.yaml -``` - -Run this script with `-h` to see all available configuration options for the -generated environment file (e.g. all the supported Python versions). See the -[Dependencies](#dependency-listing) section for more details. - -Once you have this environment file, you can install the required packages by -creating a new conda environment: - -```shell -mamba env create -n legate -f .yaml -``` - -or by updating an existing environment: - -```shell -mamba env update -f .yaml -``` - -## Building through install.py - -The Legate Core repository comes with a helper `install.py` script in the -top-level directory, that will build the C++ parts of the library and install -the C++ and Python components under the currently active Python environment. - -To add GPU support, use the `--cuda` flag: - -```shell -./install.py --cuda -``` - -You can specify the CUDA toolkit directory and the CUDA architecture you want to -target using the `--with-cuda` and `--arch` flags, e.g.: - -```shell -./install.py --cuda --with-cuda /usr/local/cuda/ --arch ampere -``` - -By default the script relies on CMake's auto-detection for these settings. -CMake will first search the currently active Python/conda environment -for dependencies, then any common system-wide installation directories (e.g. -`/usr/lib`). If a dependency cannot be found but is publicly available in source -form (e.g. OpenBLAS), cmake will fetch and build it automatically. You can -override this search by providing an install location for any dependency -explicitly, using a `--with-` flag, e.g. `--with-nccl` and -`--with-openblas`. - -For multi-node execution Legate can use [GASNet](https://gasnet.lbl.gov/) (use -`--network gasnet1` or `--network gasnetex`, see [below](#gasnet-optional) for -more details) or [UCX](https://openucx.org) (use `--network ucx`, see -[below](#ucx-optional) for more details). - -Compiling with networking support requires MPI. - -For example this would be an installation for a -[DGX SuperPOD](https://www.nvidia.com/en-us/data-center/dgx-superpod/): - -```shell -./install.py --network gasnet1 --conduit ibv --cuda --arch ampere -``` - -Alternatively, here is an install line for the -[Piz-Daint](https://www.cscs.ch/computers/decommissioned/piz-daint-piz-dora/) -supercomputer: - -```shell -./install.py --network gasnet1 --conduit aries --cuda --arch pascal -``` - -To see all available configuration options, run with the `--help` flag: - -```shell -./install.py --help -``` - -# Advanced topics - -## Support matrix - -The following table lists Legate's minimum supported versions of major dependencies. - -"Full support" means that the corresponding versions (and all later ones) are -being tested with some regularity, and are expected to work. Please report any -incompatibility you find against a fully-supported version by opening a bug. - -"Best-effort support" means that the corresponding versions are not actively -tested, but Legate should be compatible with them. We will not actively work to -fix any incompatibilities discovered under these versions, but we accept -contributions that fix such incompatibilities. - -| Dependency | Full support (min version) | Best-effort support (min version) | -| ---------------- | ------------------------------- | ------------------------------------ | -| CPU architecture | x86-64 (Haswell), aarch64 | ppc64le, older x86-64, Apple Silicon | -| OS | RHEL 8, Ubuntu 20.04, MacOS 12 | other Linux | -| C++ compiler | gcc 8, clang 7, nvc++ 19.1 | any compiler with C++17 support | -| GPU architecture | Volta | Pascal | -| CUDA toolkit | 11.4 | 10.0 | -| Python | 3.9 | | -| NumPy | 1.22 | | - -## Dependency listing - -In this section we comment further on our major dependencies. Please consult an -environment file created by `generate-conda-envs.py` for a full listing of -dependencies, e.g. building and testing tools, and for exact version -requirements. - -### Operating system - -Legate has been tested on Linux and MacOS, although only a few flavors of Linux -such as Ubuntu have been thoroughly tested. There is currently no support for -Windows. - -Specify your OS when creating a conda environment file through the `--os` flag -of `generate-conda-envs.py`. - -### Python - -In terms of Python compatibility, Legate *roughly* follows the timeline outlined -in [NEP 29](https://numpy.org/neps/nep-0029-deprecation_policy.html). - -Specify your desired Python version when creating a conda environment file -through the `--python` flag of `generate-conda-envs.py`. - -### C++ compiler - -We suggest that you avoid using the compiler packages available on conda-forge. -These compilers are configured with the specific goal of building -redistributable conda packages (e.g. they explicitly avoid linking to system -directories), which tends to cause issues for development builds. Instead prefer -the compilers available from your distribution's package manager (e.g. apt/yum) -or your HPC vendor. - -If you want to pull the compilers from conda, use an environment file created -by `generate-conda-envs.py` using the `--compilers` flag. An appropriate -compiler for the target OS will be chosen automatically. - -### CUDA (optional) - -Only necessary if you wish to run with Nvidia GPUs. - -Some CUDA components necessary for building, e.g. the `nvcc` compiler and driver -stubs, are not distributed through conda. These must instead be installed using -[system-level packages](https://developer.nvidia.com/cuda-downloads). If these -are not installed under a standard system location, you will need to inform -`install.py` of their location using `--with-cuda`. - -If you intend to pull any CUDA libraries from conda (see below), conda will need -to install an environment-local copy of the CUDA toolkit, even if you have it -installed system-wide. To avoid versioning conflicts it is safest to match the -version of CUDA installed system-wide, by specifying it to -`generate-conda-envs.py` through the `--ctk` flag. - -Legate is tested and guaranteed to be compatible with Volta and later GPU -architectures. You can use Legate with Pascal GPUs as well, but there could -be issues due to lack of independent thread scheduling. Please report any such -issues on GitHub. - -### CUDA libraries (optional) - -Only necessary if you wish to run with Nvidia GPUs. - -The following additional CUDA libraries are required, for use by legate.core or -downstream libraries. Unless noted otherwise, these are included in the conda -environment file. - -- `cublas` -- `cufft` -- `curand` (can optionally be used for its host fallback implementations even - when building without CUDA support) -- `cusolver` -- `cutensor` -- `nccl` -- `nvml` -- `nvtx` -- `thrust` (pulled from github) - -If you wish to provide alternative installations for these, then you can remove -them from the environment file (or invoke `generate-conda-envs.py` with `--ctk -none`, which will skip them all), and pass the corresponding `--with-` flag -to `install.py` (or let the build process attempt to locate them automatically). - -### OpenBLAS - -Used by cuNumeric for implementing linear algebra routines on CPUs. - -This library is automatically pulled from conda. If you wish to provide an -alternative installation, then you can manually remove `openblas` from the -generated environment file and pass `--with-openblas` to cuNumeric's -`install.py`. - -Note that if you want to build OpenBLAS from source you will need to get a -Fortran compiler, e.g. by pulling `fortran-compiler` from conda-forge. - -If you wish to compile Legate with OpenMP support, then you need a build of -OpenBLAS configured with the following options: - -- `USE_THREAD=1` -- `USE_OPENMP=1` -- `NUM_PARALLEL=32` (or at least as many as the NUMA domains on the target - machine) -- The `NUM_PARALLEL` flag defines how many instances of OpenBLAS's - calculation API can run in parallel. Legate will typically instantiate a - separate OpenMP group per NUMA domain, and each group can launch independent - BLAS work. If `NUM_PARALLEL` is not high enough, some of this parallel work - will be serialized. - -### TBLIS - -Used by cuNumeric for implementing tensor contraction routines on CPUs. - -This library will be automatically downloaded and built during cuNumeric -installation. If you wish to provide an alternative installation, pass -`--with-tblis` to cuNumeric's `install.py`. - -cuNumeric requires a build of TBLIS configured as follows: - -``` ---with-label-type=int32_t --with-length-type=int64_t --with-stride-type=int64_t -``` - -and additionally `--enable-thread-model=openmp` if cuNumeric is compiled -with OpenMP support. - -### Numactl (optional) - -Required to support CPU and memory binding in the Legate launcher. - -Not available on conda; typically available through the system-level package -manager. - -### MPI (optional) - -Only necessary if you wish to run on multiple nodes. - -We suggest that you avoid using the generic build of OpenMPI available on -conda-forge. Instead prefer an MPI installation provided by your HPC vendor, or -from system-wide distribution channels like apt/yum and -[MOFED](https://network.nvidia.com/products/infiniband-drivers/linux/mlnx_ofed/), -since these will likely be more compatible with (and tuned for) your particular -system. - -If you want to use the OpenMPI distributed on conda-forge, use an environment -file created by `generate-conda-envs.py` using the `--openmpi` flag. - -Legate requires a build of MPI that supports `MPI_THREAD_MULTIPLE`. - -### RDMA/networking libraries (e.g. Infiniband, RoCE, Slingshot) (optional) - -Only necessary if you wish to run on multiple nodes, using the corresponding -networking hardware. - -Not available on conda; typically available through MOFED or the system-level -package manager. - -Depending on your hardware, you may need to use a particular Realm -networking backend, e.g. as of October 2023 HPE Slingshot is only -compatible with GASNet. - -### GASNet (optional) - -Only necessary if you wish to run on multiple nodes, using the GASNet1 or -GASNetEx Realm networking backend. - -This library will be automatically downloaded and built during Legate -installation. If you wish to provide an alternative installation, pass -`--with-gasnet` to `install.py`. - -When using GASNet, you also need to specify the interconnect network of the -target machine using the `--conduit` flag. - -### UCX (optional) - -Only necessary if you wish to run on multiple nodes, using the UCX Realm -networking backend. - -You can use the version of UCX available on conda-forge by using an environment -file created by `generate-conda-envs.py` using the `--ucx` flag. Note that this -build of UCX might not include support for the particular networking hardware on -your machine (or may not be optimally tuned for such). In that case you may want -to use an environment file generated with `--no-ucx` (default), get UCX from -another source (e.g. MOFED, the system-level package manager, or compiled -manually from [source](https://github.com/openucx/ucx)), and pass the location -of your UCX installation to `install.py` (if necessary) using `--with-ucx`. - -Legate requires a build of UCX configured with `--enable-mt`. - -## Alternative sources for dependencies - -If you do not wish to use conda for some (or all) of the dependencies, you can -remove the corresponding entries from the environment file before passing it to -conda. See [the `install.py` section](#building-through-installpy) for -instructions on how to provide alternative locations for these dependencies to -the build process. - -Note that this is likely to result in conflicts between conda-provided and -system-provided libraries. - -Conda distributes its own version of certain common libraries (in particular the -C++ standard library), which are also typically available system-wide. Any -system package you include will typically link to the system version, while -conda packages link to the conda version. Often these two different versions, -although incompatible, carry the same version number (`SONAME`), and are -therefore indistinguishable to the dynamic linker. Then, the first component to -specify a link location for this library will cause it to be loaded from there, -and any subsequent link requests for the same library, even if suggesting a -different link location, will get served using the previously linked version. - -This can cause link failures at runtime, e.g. when a system-level library -happens to be the first to load GLIBC, causing any conda library that comes -after to trip GLIBC's internal version checks, since the conda library expects -to find symbols with more recent version numbers than what is available on the -system-wide GLIBC: - -``` -/lib/x86_64-linux-gnu/libstdc++.so.6: version `GLIBCXX_3.4.30' not found (required by /opt/conda/envs/legate/lib/libarrow.so) -``` - -You can usually work around this issue by putting the conda library directory -first in the dynamic library resolution path: - -```shell -LD_LIBRARY_PATH="$CONDA_PREFIX/lib:$LD_LIBRARY_PATH" -``` - -This way you can make sure that the (typically more recent) conda version of any -common library will be preferred over the system-wide one, no matter which -component requests it first. - -## Building through pip - -Legate Core is not yet registered in a standard pip repository. However, users -can still use the pip installer to build and install Legate Core. The following -command will trigger a single-node, CPU-only build of Legate Core, then install -it into the currently active Python environment: - -```shell -$ pip install . -``` - -or - -```shell -$ python3 -m pip install . -``` - -Legate relies on CMake to select its toolchain and build flags. Users can set -the environment variables `CXX` or `CXXFLAGS` prior to building to override the -CMake defaults. - -Alternatively, CMake and build tool arguments can be passed via the -`CMAKE_ARGS`/`SKBUILD_CONFIGURE_OPTIONS` and `SKBUILD_BUILD_OPTIONS` -[environment variables](https://scikit-build.readthedocs.io/en/latest/usage.html#environment-variable-configuration): - -```shell -$ CMAKE_ARGS="${CMAKE_ARGS:-} -D Legion_USE_CUDA:BOOL=ON" \ - pip install . -``` - -An alternative syntax using `setup.py` with `scikit-build` is - -```shell -$ python setup.py install -- -DLegion_USE_CUDA:BOOL=ON -``` - -## Building through pip & cmake - -pip uses [scikit-build](https://scikit-build.readthedocs.io/en/latest/) -in `setup.py` to drive the build and installation. A `pip install` will trigger three general actions: - -1. CMake build and installation of C++ libraries -2. CMake generation of configuration files and build-dependent Python files -3. pip installation of Python files - -The CMake build can be configured independently of `pip`, allowing incremental C++ builds directly through CMake. -This simplifies rebuilding the C++ shared libraries either via command-line or via IDE. -After building the C++ libraries, the `pip install` can be done in "editable" mode using the `-e` flag. -This configures the Python site packages to import the Python source tree directly. -The Python source can then be edited and used directly for testing without requiring another `pip install`. - -There are several examples in the `scripts` folder. We walk through the steps in -`build-separately-no-install.sh` here. - -First, the CMake build needs to be configured: - -```shell -$ cmake -S . -B build -GNinja -D Legion_USE_CUDA=ON -``` - -Once configured, we can build the C++ libraries: - -```shell -$ cmake --build build -``` - -This will invoke Ninja (or make) to execute the build. -Once the C++ libraries are available, we can do an editable (development) pip installation. - -```shell -$ SKBUILD_BUILD_OPTIONS="-D FIND_LEGATE_CORE_CPP=ON -D legate_core_ROOT=$(pwd)/build" \ - python3 -m pip install \ - --root / --no-deps --no-build-isolation - --editable . -``` - -The Python source tree and CMake build tree are now available with the environment Python -for running Legate programs. The diagram below illustrates the -complete workflow for building both Legate core and a downstream package, -[cuNumeric](https://github.com/nv-legate/cunumeric) - -drawing diff --git a/BUILD.rst b/BUILD.rst new file mode 100644 index 0000000000..c6d3ea637c --- /dev/null +++ b/BUILD.rst @@ -0,0 +1,372 @@ +.. + SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 + + +.. _legate_source_build: + +=========================== +Building Legate from Source +=========================== + +Basic build +=========== + +Influential environment variables +--------------------------------- + +Legate uses two environment variables ``${LEGATE_DIR}`` and ``${LEGATE_ARCH}`` to "orient" +itself on your system during the build phase. + +.. note:: + + The definition and use of these variables is only applicable during the local build + phase. **Once Legate is installed, these variables are ignored.** + + They are used to determine the active build if you wish to have multiple concurrent + configurations/builds of Legate. + + **If you only ever have a single configuration of Legate, you can ignore these variables.** + + +- ``${LEGATE_DIR}``: This variable should point to the root directory of the Legate source + tree, i.e. the directory containing e.g. ``configure``, ``pyproject.toml``, and + ``.clang-format``. +- ``${LEGATE_ARCH}``: This variable should be the (un-prefixed) name of the current build + directory inside ``${LEGATE_DIR}``, such that ``${LEGATE_DIR}/${LEGATE_ARCH}`` points to + the current "active" build directory. + + The actual value of ``${LEGATE_ARCH}`` is meaningless. It can be + e.g. ``foo-bar-baz``. The only important thing is that it does not conflict with the + name of another directory under ``${LEGATE_DIR}``. + + If you are unsure what to set for this variable, ignore it and run ``configure`` first + -- it will choose an appropriate value for you and instruct you on how to set it. + + +If you have multiple configurations of Legate built, you can set these variables (usually +just ``${LEGATE_ARCH}``) to quickly switch between builds: + +.. code-block:: sh + + # Run the tests using the "foo-bar" build of the library. + $ LEGATE_ARCH=foo-bar ./test.py + # Now run the tests using the "baz-bop" build of the library, all without needing to + # recompile. + $ LEGATE_ARCH=baz-bop ./test.py + + +.. note:: + + Unless otherwise stated, all relative paths mentioned from here on out are relative to + ``${LEGATE_DIR}``. + + +Building from source +-------------------- + +To build and install the basic C++ runtime: + +.. code-block:: sh + + $ ./configure + $ make install + + +Build and install C++ runtime and Python bindings: + +.. code-block:: sh + + $ ./configure --with-python + $ pip install . + + +Technically, when installing Python bindings, ``configure``` is optional. It is possible +to configure, build, and install Legate with python bindings using just: + +.. code-block:: sh + + $ pip install . + + +While this workflow is supported (in the sense that it is functional), very little -- if +any -- effort is made to make it ergonomic. **The user is strongly encouraged to run +configure first**. + +In particular, it requires the following from the user: + +#. Defining all CMake options manually through ``CMAKE_ARGS`` environment variable. +#. Defining all scikit-build options, including any that might be implicitly set via + ``configure``, manually via appropriate environment variables. +#. Ensuring that no prior installation of Legate, Legion, or any of its dependencies exist + in the environment which might otherwise influence the CMake configuration. + + For example, due to how CMake picks up dependencies, a prior (stale) installation of + Legion to a shared ``conda`` environment may be prioritized over downloading it from + scratch. ``configure`` automatically detects this (and sets the appropriate CMake + variables to guard against it) but a bare ``pip install`` will not do so. +#. Other potential quality-of-life improvements made by ``configure``. + +Build and install basic C++ runtime with CUDA and HDF5 support, while disabling ZLIB, and +explicitly specifying a pre-built UCX directory. Specifying the UCX directory implies +enabling UCX support. Additionally, we install the library to a custom prefix: + +.. code-block:: sh + + $ ./configure \ + --with-cuda \ + --with-hdf5 \ + --with-zlib=0 \ + --with-ucx-dir='/path/to/ucx' + $ make install PREFIX=/path/to/prefix + + +A full list of options available during ``configure`` can be found by running: + +.. code-block:: sh + + $ ./configure --help + + +For a list of example configurations, see the configure scripts under +``config/examples``. These contain configuration scripts for a wide variety of +machines. For example, to configure a debug build on a `DGX SuperPOD +`_ you may use +``config/examples/arch-dgx-superpod-debug.py``. + +For multi-node execution, Legate can use `UCX `_ (use ``--with-ucx``) +or `GASNet `_ (use ``--with-gasnet``) see the discussion on +:ref:`dependencies ` for more details. + +Compiling with networking support requires MPI. + +Dependencies +============ + +For many of its dependencies, ``configure`` will download and install them transparently +as part of the build. However for some (e.g. CUDA) this is not possible. In this case, the +user must use some other package manager or module system to load the necessary +dependencies. + +The primary method of retrieving dependencies for Legate and downstream libraries is +through `conda `_. You will need an installation of conda +to follow the instructions below. We suggest using the +`miniforge `_ distribution of conda. + +Please use the ``scripts/generate-conda-envs.py`` script to create a conda environment +file listing all the packages that are required to build, run and test Legate (and +optionally some downstream libraries, e.g. cuPyNumeric). For example: + +.. code-block:: sh + + $ ./scripts/generate-conda-envs.py --ctk 12.2.2 --ucx + --- generating: environment-test-linux-cuda-12.2.2-ucx.yaml + + +Run this script with ``--help`` to see all available configuration options for the +generated environment file. See the :ref:`dependencies ` section for more +details. + +Once you have this environment file, you can install the required packages by creating a +new conda environment: + +.. code-block:: sh + + $ conda env create -n legate -f /path/to/env/file.yaml + + +or by updating an existing environment: + +.. code-block:: sh + + $ conda env update -f /path/to/env/file.yaml + + +You will want to "activate" this environment every time before (re-)building Legate, to +make sure it is always installed in the same directory (consider doing this in your shell +startup script): + +.. code-block:: sh + + $ conda activate legate + + +Advanced build topics +===================== + +.. _dependency_listing: + +Dependency listing +------------------ + +In this section we comment further on our major dependencies. Please consult an +environment file created by ``scripts/generate-conda-envs.py`` for a full listing of +dependencies, e.g. building and testing tools, and for exact version requirements. + +Operating system +---------------- + +Legate has been tested on Linux and macOS, although only a few flavors of Linux such as +Ubuntu have been thoroughly tested. There is currently no support for Windows. + +Python +------ + +In terms of Python compatibility, Legate *roughly* follows the timeline outlined in `NEP +29 `_. + +C++ compiler +------------ + +We suggest that you avoid using the compiler packages available on conda-forge. These +compilers are configured with the specific goal of building redistributable conda packages +(e.g. they explicitly avoid linking to system directories), which tends to cause issues +for development builds. Instead prefer the compilers available from your distribution's +package manager (e.g. apt/yum) or your HPC vendor. + +If you want to pull the compilers from conda, use an environment file created by +``scripts/generate-conda-envs.py`` using the ``--compilers`` flag. An appropriate compiler +for the target OS will be chosen automatically. + +CUDA (optional) +--------------- + +Only necessary if you wish to run with NVIDIA GPUs. + +If CUDA is not installed under a standard system location, you will need to inform +``configure`` of its location using ``--with-cuda-dir`` (note, you don't need to pass +``--with-cuda`` when passing ``--with-cuda-dir``, desire for CUDA support is implied when +specifying the root directory). + +If you intend to pull any CUDA libraries from conda (see below), conda will need to +install an environment-local copy of the CUDA toolkit, even if you have it installed +system-wide. To avoid versioning conflicts it is safest to match the version of CUDA +installed system-wide, by specifying it to ``scripts/generate-conda-envs.py`` through the +``--ctk`` flag. + +CUDA libraries (optional) +------------------------- + +Only necessary if you wish to run with NVIDIA GPUs. + +The following additional CUDA libraries are required, for use by legate or downstream +libraries. Unless noted otherwise, these are included in the conda environment file. + +- ``nccl`` +- ``nvml`` +- ``nvtx`` +- ``CCCL`` (pulled from github) + +If you wish to provide alternative installations for these, then you can remove them from +the environment file (or invoke ``scripts/generate-conda-envs.py`` with ``--ctk none``, +which will skip them all), and pass the corresponding ``--with-`` flag to +``configure`` (or let the build process attempt to locate them automatically). + + +Numactl (optional) +------------------ + +Required to support CPU and memory binding in the Legate launcher. + +Not available on conda; typically available through the system-level package manager. + +MPI (optional) +-------------- + +Only necessary if you wish to run on multiple nodes. + +We suggest that you avoid using the generic build of OpenMPI available on +conda-forge. Instead prefer an MPI installation provided by your HPC vendor, or from +system-wide distribution channels like apt/yum and `MOFED +`_, since these +will likely be more compatible with (and tuned for) your particular system. + +If you want to use the OpenMPI distributed on conda-forge, use an environment file created +by ``scripts/generate-conda-envs.py`` using the ``--openmpi`` flag. + +Legate requires a build of MPI that supports ``MPI_THREAD_MULTIPLE``. + +RDMA/networking libraries (e.g. Infiniband, RoCE, Slingshot) (optional) +----------------------------------------------------------------------- + +Only necessary if you wish to run on multiple nodes, using the corresponding networking +hardware. + +Not available on conda; typically available through MOFED or the system-level package +manager. + +Depending on your hardware, you may need to use a particular Realm networking backend, +e.g. as of October 2023 HPE Slingshot is only compatible with GASNet. + +GASNet (optional) +----------------- + +Only necessary if you wish to run on multiple nodes, using the GASNet1 or GASNetEx Realm +networking backend. + +This library will be automatically downloaded and built during Legate installation. If you +wish to provide an alternative installation, pass ``--with-gasnet`` to ``configure``. + +When using GASNet, you also need to specify the interconnect network of the target machine +using the ``--gasnet-conduit`` flag. + +UCX (optional) +-------------- + +Only necessary if you wish to run on multiple nodes, using the UCX Realm networking +backend. + +You can use the version of UCX available on conda-forge by using an environment file +created by ``scripts/generate-conda-envs.py`` using the ``--ucx`` flag. Note that this +build of UCX might not include support for the particular networking hardware on your +machine (or may not be optimally tuned for such). In that case you may want to use an +environment file generated with ``--no-ucx`` (default), get UCX from another source +(e.g. MOFED, the system-level package manager, or compiled manually from `source +`_), and pass the location of your UCX installation to +``configure`` (if necessary) using ``--with-ucx-dir``. + +Legate requires a build of UCX configured with ``--enable-mt``. + +Alternative sources for dependencies +------------------------------------ + +If you do not wish to use conda for some (or all) of the dependencies, you can remove the +corresponding entries from the environment file before passing it to conda. + +Note that this is likely to result in conflicts between conda-provided and system-provided +libraries. + +Conda distributes its own version of certain common libraries (in particular the C++ +standard library), which are also typically available system-wide. Any system package you +include will typically link to the system version, while conda packages link to the conda +version. Often these two different versions, although incompatible, carry the same version +number (``SONAME``), and are therefore indistinguishable to the dynamic linker. Then, the +first component to specify a link location for this library will cause it to be loaded +from there, and any subsequent link requests for the same library, even if suggesting a +different link location, will get served using the previously linked version. + +This can cause link failures at runtime, e.g. when a system-level library happens to be +the first to load GLIBC, causing any conda library that comes after to trip GLIBC's +internal version checks, since the conda library expects to find symbols with more recent +version numbers than what is available on the system-wide GLIBC: + +.. code-block:: sh + + ... + /lib/x86_64-linux-gnu/libstdc++.so.6: version GLIBCXX_3.4.30 not found (required by /opt/conda/envs/legate/lib/libfoo.so) + + +You can usually work around this issue by putting the conda library directory first in the +dynamic library resolution path: + +.. code-block:: sh + + # On Linux + $ export LD_LIBRARY_PATH="${CONDA_PREFIX}/lib:${LD_LIBRARY_PATH}" + # On macOS + $ export DYLD_LIBRARY_PATH="${CONDA_PREFIX}/lib:${DYLD_LIBRARY_PATH}" + + +This way you can make sure that the (typically more recent) conda version of any common +library will be preferred over the system-wide one, no matter which component requests it +first. diff --git a/CMakeLists.txt b/CMakeLists.txt deleted file mode 100644 index afee8f23be..0000000000 --- a/CMakeLists.txt +++ /dev/null @@ -1,125 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -cmake_minimum_required(VERSION 3.22.1 FATAL_ERROR) - -if(POLICY CMP0074) - # find_package() uses _ROOT variables - # https://cmake.org/cmake/help/latest/policy/CMP0074.html#policy:CMP0074 - cmake_policy(SET CMP0074 NEW) - set(CMAKE_POLICY_DEFAULT_CMP0074 NEW) -endif() - -if(POLICY CMP0060) - # Link libraries by full path even in implicit directories - # https://cmake.org/cmake/help/latest/policy/CMP0060.html#policy:CMP0060 - cmake_policy(SET CMP0060 NEW) - set(CMAKE_POLICY_DEFAULT_CMP0060 NEW) -endif() - -if(POLICY CMP0077) - # option() honors normal variables - # https://cmake.org/cmake/help/latest/policy/CMP0077.html - cmake_policy(SET CMP0077 NEW) - set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) -endif() - -if(POLICY CMP0096) - # The project() command preserves leading zeros in version components - # https://cmake.org/cmake/help/latest/policy/CMP0096.html - cmake_policy(SET CMP0096 NEW) - set(CMAKE_POLICY_DEFAULT_CMP0096 NEW) -endif() - -if(POLICY CMP0126) - # make set(CACHE) command not remove normal variable of the same name from the current scope - # https://cmake.org/cmake/help/latest/policy/CMP0126.html - cmake_policy(SET CMP0126 NEW) - set(CMAKE_POLICY_DEFAULT_CMP0126 NEW) -endif() - -if(POLICY CMP0135) - # make the timestamps of ExternalProject_ADD match the download time - # https://cmake.org/cmake/help/latest/policy/CMP0135.html - cmake_policy(SET CMP0135 NEW) - set(CMAKE_POLICY_DEFAULT_CMP0135 NEW) -endif() - -############################################################################## -# - Download and initialize RAPIDS CMake helpers ----------------------------- - -if(NOT EXISTS ${CMAKE_BINARY_DIR}/RAPIDS.cmake) - file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.08/RAPIDS.cmake - ${CMAKE_BINARY_DIR}/RAPIDS.cmake) -endif() -include(${CMAKE_BINARY_DIR}/RAPIDS.cmake) -include(rapids-cmake) -include(rapids-cpm) -include(rapids-cuda) -include(rapids-export) -include(rapids-find) - -set(legate_core_version 24.01.00) - -# For now we want the optimization flags to match on both normal make and cmake -# builds so we override the cmake defaults here for release, this changes -# -O3 to -O2 and removes -DNDEBUG -set(CMAKE_CXX_FLAGS_DEBUG "-O0 -g") -set(CMAKE_CUDA_FLAGS_DEBUG "-O0 -g") -set(CMAKE_CXX_FLAGS_RELEASE "-O2") -set(CMAKE_CUDA_FLAGS_RELEASE "-O2") -set(CMAKE_CXX_FLAGS_MINSIZEREL "-Os") -set(CMAKE_CUDA_FLAGS_MINSIZEREL "-Os") -set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g") -set(CMAKE_CUDA_FLAGS_RELWITHDEBINFO "-O2 -g") - -if(NOT SKBUILD) - project(legate_core VERSION ${legate_core_version} LANGUAGES C CXX) - include(${CMAKE_CURRENT_SOURCE_DIR}/legate_core_cpp.cmake) -else() - project( - legate_core_python - VERSION ${legate_core_version} - LANGUAGES # TODO: Building Python extension modules via the python_extension_module requires the C - # language to be enabled here. The test project that is built in scikit-build to verify - # various linking options for the python library is hardcoded to build with C, so until - # that is fixed we need to keep C. - C CXX) - include(${CMAKE_CURRENT_SOURCE_DIR}/legate_core_python.cmake) -endif() - -if(CMAKE_GENERATOR STREQUAL "Ninja") - function(add_touch_legate_core_ninja_build_target) - set(_suf ) - if(SKBUILD) - set(_suf "_python") - endif() - add_custom_target("touch_legate_core${_suf}_ninja_build" ALL - COMMAND ${CMAKE_COMMAND} -E touch_nocreate "${CMAKE_CURRENT_BINARY_DIR}/build.ninja" - COMMENT "touch build.ninja so ninja doesn't re-run CMake on rebuild" - VERBATIM - ) - foreach(_dep IN ITEMS legion_core legion_core_python - Legion LegionRuntime - Realm RealmRuntime - Regent) - if(TARGET ${_dep}) - add_dependencies("touch_legate_core${_suf}_ninja_build" ${_dep}) - endif() - endforeach() - endfunction() - add_touch_legate_core_ninja_build_target() -endif() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 3da71fca14..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributing to Legate Core - -Legate Core is an open-source project released under the [Apache license, version 2.0](https://www.apache.org/licenses/LICENSE-2.0). We welcome any and all contributions, and we hope that you can help us develop a strong community. - -## How to begin - -Most of the time, the best thing is to begin by [opening an issue](https://github.com/nv-legate/Legate Core/issues). This gives us a chance to discuss the contribution and to define the problem or feature that it addresses. Often, opening of the issue first may help prevent you from doing unnecessary work or to enhance and further develop your idea. - -Once you are ready to start development, we ask you to work on a [fork](https://docs.github.com/en/get-started/quickstart/fork-a-repo) of our repository. The next step is to create a (pull request)[https://help.github.com/en/articles/about-pull-requests]. Feel free to open the pull request as soon as you begin your development (just mark it [as a draft](https://github.blog/2019-02-14-introducing-draft-pull-requests/)) or when you are ready to have your contribution merged. - -## The Legalese: Developer Certificate of Origin - -Legate Core is released under the [Apache license, version 2.0](https://www.apache.org/licenses/LICENSE-2.0) and is free to use, modify, and redistribute. To ensure that the license can be exercised without encumbrance, we ask you that you only contribute your own work or work to which you have the intellectual rights. To that end, we employ the Developer's Certificate of Origin (DCO), which is the lightweight mechanism for you to certify that you are legally able to make your contribution. Here is the full text of the certificate (also available at [DeveloperCertificate.org](https://developercertificate.org/)): - -```` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -```` - -### How Do I Sign the DCO? - -Fortunately, it does not take much work to sign the DCO. The only thing that you have to do is to mark all your commits with a `Signed-off-by` line that looks like that: - -```` -Signed-off-by: Your Name -```` - -Please use your real name and a valid email address at which you can be reached. For legal reasons, we will not be able to accept contributions that use pseudonyms in the signature. You can simply add this line at the end of all your commits manually, or you can use the `-s` or the `--signoff` options provided by Git to automatically tack on the signature. - -## Review Process - -We are really grateful that you are thinking of contributing to Legate Core. We will make every effort to review your contributions as soon as possible. - -As we suggested at the beginning of this document, it will be really helpful to start with an issue unless your proposed change is really trivial. An issue will help to save work in the review process (e.g., maybe somebody is already working on exactly the same thing you want to work on). After you open your pull request (PR), there usually will be a community feedback that often will require further changes to your contribution (the usual open-source process). Usually, this will conclude in the PR being merged by a maintainer, but on rare occasions a PR may be rejected. This may happen, for example, if the PR appears abandoned (no response to the community feedback) or if the PR does not seem to be approaching community acceptance in a reasonable time frame. In any case, an explanation will always be given why a PR is closed. Even if a PR is closed for some reason, it may always be reopened if the situation evolves (feel free to comment on closed PRs to discuss reopening them). - -## Code Formatting Requirements - -Legate Core has a set of coding standards that are expected from all the code merged into the project. The coding standards are defined by the set of tools we use to format our code. We use the [pre-commit](https://pre-commit.com/) framework to run our formatting tools. The easiest way to meet the coding standards is to simply use the pre-commit framework to run all the checks for you. Please visit the [pre-commit project page](https://pre-commit.com/) for pre-commit installation and usage instructions. Once pre-commit is installed in the cuNumeric repo, all the checks and formatting will be run on every commit, but one can also run the checks explicitly as detailed in pre-commit documentation. - - - -We hope that the automation of our formatting checks will make it easy to comply with our coding standards. If you encounter problems with code formatting, however, please let us know in a comment on your PR, and we will do our best to help. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000000..26eea7c097 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,129 @@ +Contributing to Legate +====================== + +Legate is an open-source project released under the `Apache license, version 2.0 +`_. We welcome any and all +contributions, and we hope that you can help us develop a strong community. + +How to begin +------------ + +Most of the time, the best thing is to begin by `opening an issue +`_. This gives us a chance to discuss +the contribution and to define the problem or feature that it addresses. Often, +opening of the issue first may help prevent you from doing unnecessary work or to +enhance and further develop your idea. + +Once you are ready to start development, we ask you to work on a `fork +`_ of our +repository. The next step is to create a `pull request +`_ . Feel free to open the +pull request as soon as you begin your development (just mark it as a `draft +`_) or when you +are ready to have your contribution merged. + +The Legalese: Developer Certificate of Origin +--------------------------------------------- + +Legate is released under the `Apache license, version 2.0`_ and is free to use, +modify, and redistribute. To ensure that the license can be exercised without +encumbrance, we ask you that you only contribute your own work or work to which you +have the intellectual rights. To that end, we employ the Developer's Certificate +of Origin (DCO), which is the lightweight mechanism for you to certify that you are +legally able to make your contribution. Here is the full text of the certificate +(also available at `DeveloperCertificate.org +`_): + +.. code-block:: + + Developer Certificate of Origin + Version 1.1 + + Copyright (C) 2004, 2006 The Linux Foundation and its contributors. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + +How Do I Sign the DCO? +...................... + +Fortunately, it does not take much work to sign the DCO. All you have to do is +"sign-off" all your commits. To sign off on a commit you simply use the +``--signoff`` (or ``-s``) option when committing your changes: + +``$ git commit -s -m "Add cool feature."`` + +This will append the following to your commit message: + +``Signed-off-by: Your Name `` + +Please use your real name and a valid email address at which you can be reached. +For legal reasons, we will not be able to accept contributions that use pseudonyms +in the signature. + +Review Process +-------------- + +We are really grateful that you are thinking of contributing to Legate. We will +make every effort to review your contributions as soon as possible. + +As we suggested at the beginning of this document, it will be really helpful to +start with an issue unless your proposed change is really trivial. An issue will +help to save work in the review process (e.g., maybe somebody is already working on +exactly the same thing you want to work on). After you open your pull request +(PR), there usually will be a community feedback that often will require further +changes to your contribution (the usual open-source process). Usually, this will +conclude in the PR being merged by a maintainer, but on rare occasions a PR may be +rejected. This may happen, for example, if the PR appears abandoned (no response +to the community feedback) or if the PR does not seem to be approaching community +acceptance in a reasonable time frame. In any case, an explanation will always be +given why a PR is closed. Even if a PR is closed for some reason, it may always be +reopened if the situation evolves (feel free to comment on closed PRs to discuss +reopening them). + +Code Formatting Requirements +---------------------------- + +Legate has a set of coding standards that are expected from all the code merged +into the project. The coding standards are defined by the set of tools we use to +format our code. We use the `pre-commit `_ framework to +run our formatting tools. The easiest way to meet the coding standards is to +simply use the pre-commit framework to run all the checks for you. Please visit +the `pre-commit`_ project page for installation and usage instructions. Once +pre-commit is installed in the cloned repo, all the checks and formatting will be +run on every commit, but one can also run the checks explicitly as detailed in +pre-commit documentation. + + + +We hope that the automation of our formatting checks will make it easy to comply +with our coding standards. If you encounter problems with code formatting, +however, please let us know in a comment on your PR, and we will do our best to +help. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 9f4e99bebd..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include versioneer.py -include legate/_version.py -include legate/py.typed - -# include interface files -recursive-include legate *.pyi \ No newline at end of file diff --git a/README.md b/README.md index 2fd410354c..6cf79af404 100644 --- a/README.md +++ b/README.md @@ -1,490 +1,36 @@ -# Legate - -The Legate project endeavors to democratize computing by making it possible -for all programmers to leverage the power of large clusters of CPUs and GPUs -by running the same code that runs on a desktop or a laptop at scale. -Using this technology, computational and data scientists can develop and test -programs on moderately sized data sets on local machines and then immediately -scale up to larger data sets deployed on many nodes in the cloud or on a -supercomputer without any code modifications. - -The Legate project is built upon two foundational principles: - -1. For end users, such as computational and data scientists, the programming - model must be identical to programming a single sequential CPU on their - laptop or desktop. All concerns relating to parallelism, data - distribution, and synchronization must be implicit. The cloud or a - supercomputer should appear as nothing more than a super-powerful CPU core. -2. Software must be compositional and not just interoperable (i.e. functionally - correct). Libraries developed in the Legate ecosystem must be able to exchange - partitioned and distributed data without requiring "shuffles" or unnecessary - blocking synchronization. Computations from different libraries should be - able to use arbitrary data and still be reordered across abstraction boundaries - to hide communication and synchronization latencies where the original sequential - semantics of the program allow. This is essential for achieving speed-of-light - performance on large scale machines. - -The Legate project is still in its nascent stages of development, but much of -the fundamental architecture is in place. We encourage development and contributions -to existing Legate libraries, as well as the development of new Legate libraries. -Pull requests are welcomed. - -If you have questions, please contact us at legate(at)nvidia.com. - -- [Legate](#legate) - - [Why Legate?](#why-legate) - - [What is the Legate Core?](#what-is-the-legate-core) - - [How Does Legate Work?](#how-does-legate-work) - - [How Do I Install Legate?](#how-do-i-install-legate) - - [How Do I Use Legate?](#how-do-i-use-legate) - - [Distributed Launch](#distributed-launch) - - [Debugging and Profiling](#debugging-and-profiling) - - [Running Legate programs with Jupyter Notebook](#running-legate-programs-with-jupyter-notebook) - - [Installation of the Legate IPython Kernel](#installation-of-the-legate-ipython-kernel) - - [Running with Jupyter Notebook](#running-with-jupyter-notebook) - - [Configuring the Jupyter Notebook](#configuring-the-jupyter-notebook) - - [Magic Command](#magic-command) - - [Other FAQs](#other-faqs) - - [Contributing](#contributing) - - [Documentation](#documentation) - - [Next Steps](#next-steps) - -## Why Legate? - -Computational problems today continue to grow both in their complexity as well -as the scale of the data that they consume and generate. This is true both in -traditional HPC domains as well as enterprise data analytics cases. Consequently, -more and more users truly need the power of large clusters of both CPUs and -GPUs to address their computational problems. Not everyone has the time or -resources required to learn and deploy the advanced programming models and tools -needed to target this class of hardware today. Legate aims to bridge this gap -so that any programmer can run code on any scale machine without needing to be -an expert in parallel programming and distributed systems, thereby allowing -developers to bring the problem-solving power of large machines to bear on -more kinds of challenging problems than ever before. - -## What is the Legate Core? - -The Legate Core is our version of [Apache Arrow](https://arrow.apache.org/). Apache -Arrow has significantly improved composability of software libraries by making it -possible for different libraries to share in-memory buffers of data without -unnecessary copying. However, it falls short when it comes to meeting two -of our primary requirements for Legate: - -1. Arrow only provides an API for describing a physical representation - of data as a single memory allocation. There is no interface for describing - cases where data has been partitioned and then capturing the logical - relationships of those partitioned subsets of data. -2. Arrow is mute on the subject of synchronization. Accelerators such as GPUs - achieve significantly higher performance when computations are performed - asynchronously with respect to other components of the system. When data is - passed between libraries today, accelerators must be pessimistically - synchronized to ensure that data dependences are satisfied across abstraction - boundaries. This might result in tolerable overheads for single GPU systems, - but can result in catastrophically poor performance when hundreds of GPUs are involved. - -The Legate Core provides an API very similar to Arrow's interface with several -important distinctions that provide stronger guarantees about data coherence and -synchronization to aid library developers when building Legate libraries. These -guarantees are the crux of how libraries in the Legate ecosystem are able to -provide excellent composability. - -The Legate Core API imports several important concepts from Arrow such that -users that are familiar with Arrow already will find it unsurprising. We use -the same type system representation as Arrow so libraries that have already -adopted it do not need to learn or adapt to a new type system. We also reuse -the concept of an [Array](https://arrow.apache.org/docs/cpp/api/array.html) -from Arrow. The `LegateArray` class supports many of the same methods as -the Arrow Array interface (we'll continue to add methods to improve -compatibility). The main difference is that instead of obtaining -[Buffer](https://arrow.apache.org/docs/cpp/api/memory.html#buffers) -objects from arrays to describe allocations of data that back the array, the -Legate Core API introduces a new primitive called a `LegateStore` which -provides a new interface for reasoning about partitioned and distributed -data in asynchronous execution environments. - -Any implementation of a `LegateStore` must maintain the following guarantees -to clients of the Legate Core API (i.e. Legate library developers): - -1. The coherence of data contained in a `LegateStore` must be implicitly - managed by the implementation of the Legate Core API. This means that - no matter where data is requested to perform a computation in a machine, - the most recent modifications to that data in program order must be - reflected. It should never be clients responsibility to maintain this - coherence. -2. It should be possible to create arbitrary views onto `LegateStore` objects - such that library developers can precisely describe the working sets of - their computations. Modifications to views must be reflected onto all - aliasing views data. This property must be maintained by the Legate Core - API implementation such that it is never the concern of clients. -3. Dependence management between uses of the `LegateStore` objects and their - views is the responsibility of Legate Core API regardless of what - (asynchronous) computations are performed on `LegateStore` objects or their - views. This dependence analysis must be both sound and precise. It is - illegal to over-approximate dependences. This dependence analysis must also - be performed globally in scope. Any use of the `LegateStore` on any - processor/node in the system must abide by the original sequential - semantics of the program - -Note that we do not specify exactly what the abstractions are that are needed -for implementing `LegateStore` objects. Our goal is not prescribe what these -abstractions are as they may be implementation dependent. Our only requirements -are that they have these properties to ensure that incentives are aligned in -such a way for Legate libraries to achieve a high degree of composability -at any scale of machine. Indeed, these requirements shift many of the burdens -that make implementing distributed and accelerated libraries hard off of the -library developers and onto the implementation of the Legate Core API. This -is by design as it allows the costs to be amortized across all libraries in -the ecosystem and ensures that Legate library developers are more productive. - -## How Does Legate Work? - -Our implementation of the Legate Core API is built on top of the -[Legion](https://legion.stanford.edu/) programming model and runtime system. -Legion was originally designed for large HPC applications that target -supercomputers and consequently applications written in the Legion programming -model tend to both perform and scale well on large clusters of both CPUs and -GPUs. Legion programs are also easy to port to new machines as they inherently -decouple the machine-independent specification of computations from decisions -about how that application is mapped to the target machine. Due to this -abstract nature, many programmers find writing Legion programs challenging. -By implementing the Legate Core API on top of Legion, we've made it easier -to use Legion such that developers can still get access to the benefits of -Legion without needing to learn all of the lowest-level interfaces. - -The [Legion programming model](https://legion.stanford.edu/pdfs/sc2012.pdf) -greatly aids in implementing the Legate Core API. Data types from libraries, -such as arrays in cuNumeric are mapped down onto `LegateStore` objects -that wrap Legion data types such as logical regions or futures. -In the case of regions, Legate application libraries rely heavily on -Legion's [support for partitioning of logical regions into arbitrary -subregion views](https://legion.stanford.edu/pdfs/oopsla2013.pdf). -Each library has its own heuristics for computing such partitions that -take into consideration the computations that will access the data, the -ideal sizes of data to be consumed by different processor kinds, and -the available number of processors. Legion automatically manages the coherence -of subregion views regardless of the scale of the machine. - -Computations in Legate application libraries are described by Legion tasks. -Tasks describe their data usage in terms of `LegateStore` objects, thereby -allowing Legion to infer where dependences exist. Legion uses distributed -bounding volume hierarchies, similar to a high performance ray-tracer, -to soundly and precisely perform dependence analysis on logical regions -and insert the necessary synchronization between tasks to maintain the -original sequential semantics of a Legate program. - -Each Legate application library also comes with its own custom Legion -mapper that uses heuristics to determine the best choice of mapping for -tasks (e.g. are they best run on a CPU or a GPU). All -Legate tasks are currently implemented in native C or CUDA in order to -achieve excellent performance on the target processor kind, but Legion -has bindings in other languages such as Python, Fortran, and Lua for -users that would prefer to use them. Importantly, by using Legion, -Legate is able to control the placement of data in order to leave it -in-place in fast memories like GPU framebuffers across tasks. - -When running on large clusters, Legate leverages a novel technology provided -by Legion called "[control replication](https://research.nvidia.com/sites/default/files/pubs/2021-02_Scaling-Implicit-Parallelism//ppopp.pdf)" to avoid the sequential bottleneck -of having one node farm out work to all the nodes in the cluster. With -control replication, Legate will actually replicate the Legate program and -run it across all the nodes of the machine at the same time. These copies -of the program all cooperate logically to appear to execute as one -program. When communication is necessary between -different computations, the Legion runtime's program analysis will automatically -detect it and insert the necessary data movement and synchronization -across nodes (or GPU framebuffers). This is the transformation that allows -sequential programs to run efficiently at scale across large clusters -as though they are running on a single processor. - -## How Do I Install Legate? - -Legate Core is available [on conda](https://anaconda.org/legate/legate-core). -Create a new environment containing Legate Core: - -``` -mamba create -n myenv -c nvidia -c conda-forge -c legate legate-core -``` - -or install it into an existing environment: - -``` -mamba install -c nvidia -c conda-forge -c legate legate-core -``` - -Only linux-64 packages are available at the moment. - -The default package contains GPU support, and is compatible with CUDA >= 12.0 -(CUDA driver version >= r520), and Volta or later GPU architectures. There are -also CPU-only packages available, and will be automatically selected when -installing on a machine without GPUs. You can force installation of a CPU-only -package by requesting it as follows: +[![Build conda Nightly release package](https://github.com/nv-legate/legate.internal/actions/workflows/ci-gh-nightly-release.yml/badge.svg?event=schedule)](https://github.com/nv-legate/legate.internal/actions/workflows/ci-gh-nightly-release.yml) -``` -mamba ... legate-core=*=*_cpu -``` - -See [BUILD.md](BUILD.md) for instructions on building Legate Core from source. - -## How Do I Use Legate? - -After installing the Legate Core library, the next step is to install a Legate -application library such as cuNumeric. The installation process for a -Legate application library will require you to provide a pointer to the location -of your Legate Core library installation as this will be used to configure the -installation of the Legate application library. After you finish installing any -Legate application libraries, you can then simply replace their `import` statements -with the equivalent ones from any Legate application libraries you have installed. -For example, you can change this: -```python -import numpy as np -``` -to this: -```python -import cunumeric as np -``` -After this, you can use the `legate` driver script in the `bin` directory of -your installation to run any Python program. - -You can also use the standard Python interpreter, but in that case configuration -options can only be passed through the environment (see below), and some options -are not available (check the output of legate --help for more details). - -For example, to run your script in the default configuration (4 CPUs cores and -4 GB of memory) just run: -``` -$ legate my_python_program.py [other args] -``` -The `legate` script also allows you to control the amount of resources that -Legate consumes when running on the machine. The `--cpus` and `--gpus` flags -are used to specify how many CPU and GPU processors should be used on a node. -The `--sysmem` flag can be used to specify how many MBs of DRAM Legate is allowed -to use per node, while the `--fbmem` flag controls how many MBs of framebuffer -memory Legate is allowed to use per GPU. For example, when running on a DGX -station, you might run your application as follows: -``` -$ legate --cpus 16 --gpus 4 --sysmem 100000 --fbmem 15000 my_python_program.py -``` -This will make 16 CPU processors and all 4 GPUs available for use by Legate. -It will also allow Legate to consume up to 100 GB of DRAM memory and 15 GB of -framebuffer memory per GPU for a total of 60 GB of GPU framebuffer memory. Note -that you probably will not be able to make all the resources of the machine -available for Legate as some will be used by the system or Legate itself for -meta-work. Currently if you try to exceed these resources during execution then -Legate will inform you that it had insufficient resources to complete the job -given its current mapping heuristics. If you believe the job should fit within -the assigned resources please let us know so we can improve our mapping heuristics. -There are many other flags available for use in the `legate` driver script that you -can use to communicate how Legate should view the available machine resources. -You can see a list of them by running: -``` -$ legate --help -``` -In addition to running Legate programs, you can also use Legate in an interactive -mode by simply not passing any `*py` files on the command line. You can still -request resources just as you would though with a normal file. Legate will -still use all the resources available to it, including doing multi-node execution. -``` -$ legate --cpus 16 --gpus 4 --sysmem 100000 --fbmem 15000 -Welcome to Legion Python interactive console ->>> -``` -Note that Legate does not currently support multi-tenancy cases where different -users are attempting to use the same hardware concurrently. - -As a convenience, several command-line options can have their default values set -via environment variables. These environment variables, their corresponding command- -line options, and their default values are as follows. - -| CLI Option | Env. Variable | Default Value | -|--------------------------|----------------------------------|---------------| -| --omps | LEGATE\_OMP\_PROCS | 0 | -| --ompthreads | LEGATE\_OMP\_THREADS | 4 | -| --utility | LEGATE\_UTILITY\_CORES | 2 | -| --sysmem | LEGATE\_SYSMEM | 4000 | -| --numamem | LEGATE\_NUMAMEM | 0 | -| --fbmem | LEGATE\_FBMEM | 4000 | -| --zcmem | LEGATE\_ZCMEM | 32 | -| --regmem | LEGATE\_REGMEM | 0 | -| --eager-alloc-percentage | LEGATE\_EAGER\_ALLOC\_PERCENTAGE | 50 | - -### Distributed Launch - -If Legate is compiled with networking support (see the -[installation section](#how-do-i-install-legate)), -it can be run in parallel by using the `--nodes` option followed by the number of nodes -to be used. Whenever the `--nodes` option is used, Legate will be launched -using `mpirun`, even with `--nodes 1`. Without the `--nodes` option, no launcher will -be used. Legate currently supports `mpirun`, `srun`, and `jsrun` as launchers and we -are open to adding additional launcher kinds. You can select the -target kind of launcher with `--launcher`. - -### Debugging and Profiling - -Legate also comes with several tools that you can use to better understand -your program both from a correctness and a performance standpoint. For -correctness, Legate has facilities for constructing both dataflow -and event graphs for the actual run of an application. These graphs require -that you have an installation of [GraphViz](https://www.graphviz.org/) -available on your machine. To generate a dataflow graph for your Legate -program simply pass the `--dataflow` flag to the `legate.py` script and after -your run is complete we will generate a `dataflow_legate.pdf` file containing -the dataflow graph of your program. To generate a corresponding event graph -you simply need to pass the `--event` flag to the `legate.py` script to generate -a `event_graph_legate.pdf` file. These files can grow to be fairly large for non-trivial -programs so we encourage you to keep your programs small when using these -visualizations or invest in a [robust PDF viewer](https://get.adobe.com/reader/). - -For profiling all you need to do is pass the `--profile` flag to Legate and -afterwards you will have a `legate_prof` directory containing a web page that -can be viewed in any web browser that displays a timeline of your program's -execution. You simply need to load the `index.html` page from a browser. You -may have to enable local JavaScript execution if you are viewing the page from -your local machine (depending on your browser). - -We recommend that you do not mix debugging and profiling in the same run as -some of the logging for the debugging features requires significant file I/O -that can adversely effect the performance of the application. - -## Running Legate programs with Jupyter Notebook - -Same as normal Python programs, Legate programs can be run -using Jupyter Notebook. Currently we support single node execution with -multiple CPUs and GPUs, and plan to support multi-node execution in the future. -We leverage Legion's Jupyter support, so you may want to refer to the -[relevant section in Legion's README](https://github.com/StanfordLegion/legion/blob/master/jupyter_notebook/README.md). -To simplify the installation, we provide a script specifically for Legate libraries. - -### Installation of the Legate IPython Kernel - -Please install Legate, then run the following command to install a default -Jupyter kernel: -``` -legate-jupyter -``` -If installation is successful, you will see some output like the following: -``` -Jupyter kernel spec Legate_SM_GPU (Legate_SM_GPU) has been installed -``` -`Legate_SM_GPU` is the default kernel name. - -### Running with Jupyter Notebook - -You will need to start a Jupyter server, then you can use a Jupyter notebook -from any browser. Please refer to the following two sections from the README of -the [Legion Jupyter Notebook extension](https://github.com/StanfordLegion/legion/tree/master/jupyter_notebook) - -* Start the Jupyter Notebook server -* Use the Jupyter Notebook in the browser - -### Configuring the Jupyter Notebook - -The Legate Jupyter kernel is configured according to the command line arguments -provided at install time. Standard `legate` options for Core, Memory, and -Mult-node configuration may be provided, as well as a name for the kernel: -``` -legate-jupyter --name legate_cpus_2 --cpus 2 -``` -Other configuration options can be seen by using the `--help` command line option. - -### Magic Command - -We provide a Jupyter magic command to display the IPython kernel configuration. -``` -%load_ext legate.jupyter -%legate_info -``` -results in output: -``` -Kernel 'Legate_SM_GPU' configured for 1 node(s) - -Cores: - CPUs to use per rank : 4 - GPUs to use per rank : 0 - OpenMP groups to use per rank : 0 - Threads per OpenMP group : 4 - Utility processors per rank : 2 - -Memory: - DRAM memory per rank (in MBs) : 4000 - DRAM memory per NUMA domain per rank (in MBs) : 0 - Framebuffer memory per GPU (in MBs) : 4000 - Zero-copy memory per rank (in MBs) : 32 - Registered CPU-side pinned memory per rank (in MBs) : 0 -``` - -## Other FAQs - -* *Does Legate only work on NVIDIA hardware?* - No, Legate will run on any processor supported by Legion (e.g. x86, ARM, and - PowerPC CPUs), and any network supported by GASNet or UCX (e.g. Infiniband, - Cray, Omnipath, and (ROC-)Ethernet based interconnects). - -* *What languages does the Legate Core API have bindings for?* - Currently the Legate Core bindings are only available in Python. Watch - this space for new language bindings soon or make a pull request to - contribute your own. Legion has a C API which should make it easy to - develop bindings in any language with a foreign function interface. - -* *Do I have to build drop-in replacement libraries?* - No! While we've chosen to provide drop-in replacement libraries for - popular Python libraries to illustrate the benefits of Legate, you - are both welcomed and encouraged to develop your own libraries on top - of Legate. We promise that they will compose well with other existing - Legate libraries. - -* *What other libraries are you planning to release for the Legate ecosystem?* - We're still working on that. If you have thoughts about what is important - please let us know so that we can get a feel for where best to put our time. +# Legate -* *Can I use Legate with other Legion libraries?* - Yes! If you're willing to extract the Legion primitives from the `LegateStore` - objects you should be able to pass them into other Legion libraries such as - [FlexFlow](https://flexflow.ai/). +The Legate project makes it easier for programmers to leverage the +power of large clusters of CPUs and GPUs. Using Legate, programs can be +developed and tested on moderately sized data sets on local machines and +then immediately scaled up to larger data sets deployed on many nodes in +the cloud or on a supercomputer, *without any code modifications*. -* *Does Legate interoperate with X?* - Yes, probably, but we don't recommend it. Our motivation for building - Legate is to provide the bare minimum subset of functionality that - we believe is essential for building truly composable software that can still - run at scale. No other systems out there met our needs. Providing - interoperability with those other systems will destroy the very essence - of what Legate is and significantly dilute its benefits. All that being - said, Legion does provide some means of doing stop-the-world exchanges - with other runtime system running concurrently in the same processes. - If you are interested in pursuing this approach please open an issue - on the [Legion github issue tracker](https://github.com/StanfordLegion/legion/issues) - as it will be almost entirely orthogonal to how you use Legate. +For more information about Legate's goals, architecture, and functioning, +see the [Legate overview](https://docs.nvidia.com/legate/latest/overview.html). -## Contributing +## Installation -See the discussion of contributing in [CONTRIBUTING.md](CONTRIBUTING.md). +Legate is available from [conda](https://docs.conda.io/projects/conda/en/latest/index.html) +on the [legate channel](https://anaconda.org/legate/legate). +See https://docs.nvidia.com/legate/latest/installation.html for details about different +install configurations. ## Documentation -A complete list of available features can is found in the [Legate Core -documentation](https://nv-legate.github.io/legate.core). +A complete list of available features and APIs can be found in the [Legate +documentation](https://docs.nvidia.com/legate/latest/). + +## Contact -## Next Steps +For technical questions about Legate and Legate-based tools, please visit the +[community discussion forum](https://github.com/nv-legate/discussion). -We recommend starting by experimenting with at least one Legate application -library to test out performance and see how Legate works. If you are interested -in building your own Legate application library, we recommend that you -investigate our [Legate Hello World application library](https://github.com/nv-legate/legate.core/tree/HEAD/examples/hello) that -provides a small example of how to get started developing your own drop-in -replacement library on top of Legion using the Legate Core library. +If you have other questions, please contact us at legate(at)nvidia.com. diff --git a/benchmarks/cpp/CMakeLists.txt b/benchmarks/cpp/CMakeLists.txt new file mode 100644 index 0000000000..94f6b0a195 --- /dev/null +++ b/benchmarks/cpp/CMakeLists.txt @@ -0,0 +1,46 @@ +#============================================================================= +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +#============================================================================= + +list(APPEND CMAKE_MESSAGE_CONTEXT "cpp") + +include("${LEGATE_CMAKE_DIR}/Modules/debug_symbols.cmake") +include("${LEGATE_CMAKE_DIR}/Modules/clang_tidy.cmake") + +legate_find_or_configure(PACKAGE google_benchmark) + +function(legate_configure_benchmark) + set(options) + set(one_value TARGET) + set(multi_value SOURCES) + cmake_parse_arguments(_LEGATE_BM "${options}" "${one_value}" "${multi_value}" ${ARGN}) + + add_executable(${_LEGATE_BM_TARGET} ${_LEGATE_BM_SOURCES}) + + set_target_properties(${_LEGATE_BM_TARGET} + PROPERTIES RUNTIME_OUTPUT_DIRECTORY + "$" + BUILD_RPATH_USE_ORIGIN TRUE + INSTALL_RPATH_USE_LINK_PATH TRUE + LEGATE_INTERNAL_TARGET TRUE) + + set_property(TARGET ${_LEGATE_BM_TARGET} APPEND + PROPERTY INSTALL_RPATH + "${legate_PLATFORM_RPATH_ORIGIN}/../${CMAKE_INSTALL_LIBDIR}") + + target_link_libraries(${_LEGATE_BM_TARGET} PRIVATE legate::legate benchmark::benchmark) + + if(Legion_USE_CUDA) + target_link_libraries(${_LEGATE_BM_TARGET} PRIVATE NCCL::NCCL) + endif() + + legate_install_debug_symbols(TARGET ${_LEGATE_BM_TARGET} + INSTALL_DIR "${CMAKE_INSTALL_BINDIR}") + + foreach(src IN LISTS _LEGATE_BM_SOURCES) + legate_add_tidy_target(SOURCE "${src}") + endforeach() +endfunction() + +legate_configure_benchmark(TARGET inline_launch SOURCES inline_launch.cc) diff --git a/benchmarks/cpp/inline_launch.cc b/benchmarks/cpp/inline_launch.cc new file mode 100644 index 0000000000..1c188ba59a --- /dev/null +++ b/benchmarks/cpp/inline_launch.cc @@ -0,0 +1,141 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights + * reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include +#include + +namespace { + +constexpr std::string_view LIBNAME = "bench"; + +class EmptyTask : public legate::LegateTask { + public: + static inline const auto TASK_CONFIG = // NOLINT(cert-err58-cpp) + legate::TaskConfig{legate::LocalTaskID{0}}; + + static void cpu_variant(legate::TaskContext) {} +}; + +class TaskLaunchFixture : public benchmark::Fixture { + public: + static constexpr std::int32_t NUM_INPUTS_OUTPUTS = 10; + + void SetUp(benchmark::State& state) override + { + for (auto* dest : {&saved_inputs_, &saved_outputs_}) { + dest->reserve(static_cast(state.range(0))); + for (std::int64_t i = 0; i < state.range(0); ++i) { + dest->emplace_back(make_store_()); + } + } + } + + void TearDown(benchmark::State&) override + { + saved_inputs_.clear(); + saved_outputs_.clear(); + } + + [[nodiscard]] legate::LogicalStore add_input_store(std::size_t i) + { + return reuse_or_make_store_(i, saved_inputs_); + } + + [[nodiscard]] legate::LogicalStore add_output_store(std::size_t i) + { + return reuse_or_make_store_(i, saved_outputs_); + } + + private: + [[nodiscard]] legate::LogicalStore reuse_or_make_store_( + std::size_t i, const std::vector& cache) + { + if (i < cache.size()) { + return cache[i]; + } + + return make_store_(); + } + + [[nodiscard]] legate::LogicalStore make_store_() const + { + const auto runtime = legate::Runtime::get_runtime(); + auto store = runtime->create_store(shape_, type_); + + LEGATE_CHECK(type_.code() == legate::Type::Code::INT32); + runtime->issue_fill(store, legate::Scalar{std::int32_t{0}}); + return store; + } + + [[nodiscard]] static legate::Shape make_shape_() + { + auto extents = legate::tuple{}; + + extents.reserve(LEGATE_MAX_DIM); + for (std::uint64_t i = 1; i <= LEGATE_MAX_DIM; ++i) { + extents.append_inplace(i); + } + return legate::Shape{extents}; + } + + legate::Shape shape_{make_shape_()}; + legate::Type type_{legate::int32()}; + std::vector saved_inputs_{}; + std::vector saved_outputs_{}; +}; + +void benchmark_body(TaskLaunchFixture& fixt, benchmark::State& state) +{ + auto runtime = legate::Runtime::get_runtime(); + auto lib = runtime->find_library(LIBNAME); + + for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores) + state.PauseTiming(); + auto task = runtime->create_task(lib, EmptyTask::TASK_CONFIG.task_id()); + for (std::size_t i = 0; i < TaskLaunchFixture::NUM_INPUTS_OUTPUTS; ++i) { + task.add_input(fixt.add_input_store(i)); + } + for (std::size_t i = 0; i < TaskLaunchFixture::NUM_INPUTS_OUTPUTS; ++i) { + task.add_output(fixt.add_output_store(i)); + } + state.ResumeTiming(); + + runtime->submit(std::move(task)); + runtime->issue_execution_fence(true); + } +} + +BENCHMARK_DEFINE_F(TaskLaunchFixture, InlineTaskLaunch)(benchmark::State& state) +{ + benchmark_body(*this, state); +} + +// NOLINTBEGIN(cert-err58-cpp) +BENCHMARK_REGISTER_F(TaskLaunchFixture, InlineTaskLaunch) + ->Unit(benchmark::kMicrosecond) + // Determines the number of reused inputs and outputs + ->DenseRange(/* begin */ 0, /* end */ TaskLaunchFixture::NUM_INPUTS_OUTPUTS, /* step */ 2); +// NOLINTEND(cert-err58-cpp) + +} // namespace + +int main(int argc, char** argv) +{ + legate::start(); + EmptyTask::register_variants(legate::Runtime::get_runtime()->find_or_create_library(LIBNAME)); + + ::benchmark::Initialize(&argc, argv); + if (::benchmark::ReportUnrecognizedArguments(argc, argv)) { + return 1; + } + ::benchmark::RunSpecifiedBenchmarks(); + ::benchmark::Shutdown(); + return legate::finish(); +} diff --git a/bind.sh b/bind.sh deleted file mode 100755 index 8e00a7d5ae..0000000000 --- a/bind.sh +++ /dev/null @@ -1,191 +0,0 @@ -#!/bin/bash - -# Copyright 2021 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -set -euo pipefail - -help() { - cat 1>&2 <&2 - help - ;; - esac -done - -case "$launcher" in - mpirun) - local_rank="${OMPI_COMM_WORLD_LOCAL_RANK:-unknown}" - global_rank="${OMPI_COMM_WORLD_RANK:-unknown}" - ;; - jsrun ) - local_rank="${OMPI_COMM_WORLD_LOCAL_RANK:-unknown}" - global_rank="${OMPI_COMM_WORLD_RANK:-unknown}" - ;; - srun ) - local_rank="${SLURM_LOCALID:-unknown}" - global_rank="${SLURM_PROCID:-unknown}" - ;; - auto ) - local_rank="${OMPI_COMM_WORLD_LOCAL_RANK:-${MV2_COMM_WORLD_LOCAL_RANK:-${SLURM_LOCALID:-unknown}}}" - global_rank="${OMPI_COMM_WORLD_RANK:-${PMI_RANK:-${MV2_COMM_WORLD_RANK:-${SLURM_PROCID:-unknown}}}}" - ;; - local ) - local_rank="0" - global_rank="0" - ;; - *) - echo "Unexpected launcher value: $launcher" 1>&2 - help - ;; -esac - -if [[ "$local_rank" == "unknown" ]]; then - echo "Error: Could not determine node-local rank" 1>&2 - exit 1 -fi - -if [[ "$global_rank" == "unknown" ]]; then - echo "Error: Could not determine global rank" 1>&2 - exit 1 -fi - -export LEGATE_LOCAL_RANK="$local_rank" -export LEGATE_GLOBAL_RANK="$global_rank" - -if [ -n "${cpus+x}" ]; then - cpus=(${cpus//\// }) - if [[ "$local_rank" -ge "${#cpus[@]}" ]]; then - echo "Error: Incomplete CPU binding specification" 1>&2 - exit 1 - fi -fi - -if [ -n "${gpus+x}" ]; then - gpus=(${gpus//\// }) - if [[ "$local_rank" -ge "${#gpus[@]}" ]]; then - echo "Error: Incomplete GPU binding specification" 1>&2 - exit 1 - fi - export CUDA_VISIBLE_DEVICES="${gpus[$local_rank]}" -fi - -if [ -n "${mems+x}" ]; then - mems=(${mems//\// }) - if [[ "$local_rank" -ge "${#mems[@]}" ]]; then - echo "Error: Incomplete MEM binding specification" 1>&2 - exit 1 - fi -fi - -if [ -n "${nics+x}" ]; then - nics=(${nics//\// }) - if [[ "$local_rank" -ge "${#nics[@]}" ]]; then - echo "Error: Incomplete NIC binding specification" 1>&2 - exit 1 - fi - - # set all potentially relevant variables (hopefully they are ignored if we - # are not using the corresponding network) - nic="${nics[$local_rank]}" - nic_array=(${nic//,/ }) - export UCX_NET_DEVICES="${nic//,/:1,}":1 - export GASNET_NUM_QPS="${#nic_array[@]}" - export GASNET_IBV_PORTS="${nic//,/+}" - - # NCCL is supposed to detect the topology and use the right NIC automatically. - # NCCL env vars must be set the same way for all ranks on the same node, so - # the best we can do here is to constrain NCCL to the full set of NICs that - # the user specified. - # Note the added "=", to do exact instead of prefix match. - export NCCL_IB_HCA="=$(IFS=, ; echo "${nics[*]}")" -fi - -# numactl is only needed if cpu or memory pinning was requested -if [[ -n "${cpus+x}" || -n "${mems+x}" ]]; then - if command -v numactl &> /dev/null; then - if [[ -n "${cpus+x}" ]]; then - set -- --physcpubind "${cpus[$local_rank]}" "$@" - fi - if [[ -n "${mems+x}" ]]; then - set -- --membind "${mems[$local_rank]}" "$@" - fi - set -- numactl "$@" - else - echo "Warning: numactl is not available, cannot bind to cores or memories" 1>&2 - fi -fi - -# arguments may contain the substring %%LEGATE_GLOBAL_RANK%% which needs to be -# be replaced with the actual computed rank for downstream processes to use -updated=() -for arg in "$@"; do - updated+=("${arg/\%\%LEGATE_GLOBAL_RANK\%\%/$LEGATE_GLOBAL_RANK}") -done - -set -- "${updated[@]}" - -if [ "$debug" == "1" ]; then - echo -n "bind.sh:" - for TOK in "$@"; do printf " %q" "$TOK"; done - echo -fi - -exec "$@" diff --git a/cmake/Modules/cpm_helpers.cmake b/cmake/Modules/cpm_helpers.cmake deleted file mode 100644 index 9fc28633d8..0000000000 --- a/cmake/Modules/cpm_helpers.cmake +++ /dev/null @@ -1,57 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -function(get_cpm_git_args _out_var) - - set(oneValueArgs TAG BRANCH REPOSITORY) - cmake_parse_arguments(GIT "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - set(repo_tag "") - set(gh_tag_prefix "") - # Default to specifying `GIT_REPOSITORY` and `GIT_TAG` - set(cpm_git_args GIT_REPOSITORY ${GIT_REPOSITORY}) - - if(GIT_BRANCH) - set(gh_tag_prefix "heads") - set(repo_tag "${GIT_BRANCH}") - list(APPEND cpm_git_args GIT_TAG ${GIT_BRANCH}) - elseif(GIT_TAG) - set(gh_tag_prefix "tags") - set(repo_tag "${GIT_TAG}") - list(APPEND cpm_git_args GIT_TAG ${GIT_TAG}) - endif() - - # Remove `.git` suffix from repo URL - if(GIT_REPOSITORY MATCHES "^(.*)(\.git)$") - set(GIT_REPOSITORY "${CMAKE_MATCH_1}") - endif() - if(GIT_REPOSITORY MATCHES "github\.com") - # If retrieving from github use `.zip` URL to download faster - set(cpm_git_args URL "${GIT_REPOSITORY}/archive/refs/${gh_tag_prefix}/${repo_tag}.zip") - elseif(GIT_REPOSITORY MATCHES "gitlab\.com") - # GitLab archive URIs replace slashes with dashes - string(REPLACE "/" "-" archive_tag "${repo_tag}") - string(LENGTH "${GIT_REPOSITORY}" repo_name_len) - string(FIND "${GIT_REPOSITORY}" "/" repo_name_idx REVERSE) - math(EXPR repo_name_len "${repo_name_len} - ${repo_name_idx}") - string(SUBSTRING "${GIT_REPOSITORY}" ${repo_name_idx} ${repo_name_len} repo_name) - # If retrieving from gitlab use `.zip` URL to download faster - set(cpm_git_args URL "${GIT_REPOSITORY}/-/archive/${repo_tag}/${repo_name}-${archive_tag}.zip") - endif() - - set(${_out_var} ${cpm_git_args} PARENT_SCOPE) - -endfunction() diff --git a/cmake/Modules/cuda_arch_helpers.cmake b/cmake/Modules/cuda_arch_helpers.cmake deleted file mode 100644 index 97d0ee8402..0000000000 --- a/cmake/Modules/cuda_arch_helpers.cmake +++ /dev/null @@ -1,97 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -function(set_cuda_arch_from_names) - set(cuda_archs "") - # translate legacy arch names into numbers - if(Legion_CUDA_ARCH MATCHES "fermi") - list(APPEND cuda_archs 20) - endif() - if(Legion_CUDA_ARCH MATCHES "kepler") - list(APPEND cuda_archs 30) - endif() - if(Legion_CUDA_ARCH MATCHES "k20") - list(APPEND cuda_archs 35) - endif() - if(Legion_CUDA_ARCH MATCHES "k80") - list(APPEND cuda_archs 37) - endif() - if(Legion_CUDA_ARCH MATCHES "maxwell") - list(APPEND cuda_archs 52) - endif() - if(Legion_CUDA_ARCH MATCHES "pascal") - list(APPEND cuda_archs 60) - endif() - if(Legion_CUDA_ARCH MATCHES "volta") - list(APPEND cuda_archs 70) - endif() - if(Legion_CUDA_ARCH MATCHES "turing") - list(APPEND cuda_archs 75) - endif() - if(Legion_CUDA_ARCH MATCHES "ampere") - list(APPEND cuda_archs 80) - endif() - if(Legion_CUDA_ARCH MATCHES "hopper") - list(APPEND cuda_archs 90) - endif() - - if(cuda_archs) - list(LENGTH cuda_archs num_archs) - if(num_archs GREATER 1) - # A CMake architecture list entry of "80" means to build both compute and sm. - # What we want is for the newest arch only to build that way, while the rest - # build only for sm. - list(POP_BACK cuda_archs latest_arch) - list(TRANSFORM cuda_archs APPEND "-real") - list(APPEND cuda_archs ${latest_arch}) - else() - list(TRANSFORM cuda_archs APPEND "-real") - endif() - set(Legion_CUDA_ARCH ${cuda_archs} PARENT_SCOPE) - endif() -endfunction() - -function(add_cuda_architecture_defines) - set(options ) - set(oneValueArgs DEFS) - set(multiValueArgs ARCHS) - cmake_parse_arguments(cuda "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - message(VERBOSE "legate.core: CUDA_ARCHITECTURES=${cuda_ARCHS}") - - set(_defs ${${cuda_DEFS}}) - - macro(add_def_if_arch_enabled arch def) - if("${arch}" IN_LIST cuda_ARCHS OR - ("${arch}-real" IN_LIST cuda_ARCHS) OR - ("${arch}-virtual" IN_LIST cuda_ARCHS)) - list(APPEND _defs ${def}) - endif() - endmacro() - - add_def_if_arch_enabled("20" "FERMI_ARCH") - add_def_if_arch_enabled("30" "KEPLER_ARCH") - add_def_if_arch_enabled("35" "K20_ARCH") - add_def_if_arch_enabled("37" "K80_ARCH") - add_def_if_arch_enabled("52" "MAXWELL_ARCH") - add_def_if_arch_enabled("60" "PASCAL_ARCH") - add_def_if_arch_enabled("70" "VOLTA_ARCH") - add_def_if_arch_enabled("75" "TURING_ARCH") - add_def_if_arch_enabled("80" "AMPERE_ARCH") - add_def_if_arch_enabled("90" "HOPPER_ARCH") - - set(${cuda_DEFS} ${_defs} PARENT_SCOPE) -endfunction() diff --git a/cmake/Modules/legate_core_options.cmake b/cmake/Modules/legate_core_options.cmake deleted file mode 100644 index 175b965b27..0000000000 --- a/cmake/Modules/legate_core_options.cmake +++ /dev/null @@ -1,126 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -option(BUILD_SHARED_LIBS "Build legate.core shared libraries" ON) - -function(set_or_default var_name var_env) - list(LENGTH ARGN num_extra_args) - if(num_extra_args GREATER 0) - list(GET ARGN 0 var_default) - endif() - if(DEFINED ${var_name}) - message(VERBOSE "legate.core: ${var_name}=${${var_name}}") - elseif(DEFINED ENV{${var_env}}) - set(${var_name} $ENV{${var_env}} PARENT_SCOPE) - message(VERBOSE "legate.core: ${var_name}=$ENV{${var_env}} (from envvar '${var_env}')") - elseif(DEFINED var_default) - set(${var_name} ${var_default} PARENT_SCOPE) - message(VERBOSE "legate.core: ${var_name}=${var_default} (from default value)") - else() - message(VERBOSE "legate.core: not setting ${var_name}") - endif() -endfunction() - -# Initialize these vars from the CLI, then fallback to an envvar or a default value. -set_or_default(Legion_SPY USE_SPY OFF) -set_or_default(Legion_USE_LLVM USE_LLVM OFF) -set_or_default(Legion_USE_CUDA USE_CUDA OFF) -set_or_default(Legion_USE_HDF5 USE_HDF OFF) -set_or_default(Legion_NETWORKS NETWORKS "") -set_or_default(Legion_USE_OpenMP USE_OPENMP OFF) -set_or_default(Legion_BOUNDS_CHECKS CHECK_BOUNDS OFF) - -option(Legion_SPY "Enable detailed logging for Legion Spy" OFF) -option(Legion_USE_LLVM "Use LLVM JIT operations" OFF) -option(Legion_USE_HDF5 "Enable support for HDF5" OFF) -option(Legion_USE_CUDA "Enable Legion support for the CUDA runtime" OFF) -option(Legion_NETWORKS "Networking backends to use (semicolon-separated)" "") -option(Legion_USE_OpenMP "Use OpenMP" OFF) -option(Legion_USE_Python "Use Python" OFF) -option(Legion_BOUNDS_CHECKS "Enable bounds checking in Legion accessors" OFF) - -if("${Legion_NETWORKS}" MATCHES ".*gasnet(1|ex).*") - set_or_default(GASNet_ROOT_DIR GASNET) - set_or_default(GASNet_CONDUIT CONDUIT "mpi") - - if(NOT GASNet_ROOT_DIR) - option(Legion_EMBED_GASNet "Embed a custom GASNet build into Legion" ON) - endif() -endif() - -set_or_default(Legion_MAX_DIM LEGION_MAX_DIM 4) - -# Check the max dimensions -if((Legion_MAX_DIM LESS 1) OR (Legion_MAX_DIM GREATER 9)) - message(FATAL_ERROR "The maximum number of Legate dimensions must be between 1 and 9 inclusive") -endif() - -set_or_default(Legion_MAX_FIELDS LEGION_MAX_FIELDS 256) - -# Check that max fields is between 32 and 4096 and is a power of 2 -if(NOT Legion_MAX_FIELDS MATCHES "^(32|64|128|256|512|1024|2048|4096)$") - message(FATAL_ERROR "The maximum number of Legate fields must be a power of 2 between 32 and 4096 inclusive") -endif() - -# We never want local fields -set(Legion_DEFAULT_LOCAL_FIELDS 0) - -option(legate_core_STATIC_CUDA_RUNTIME "Statically link the cuda runtime library" OFF) -option(legate_core_EXCLUDE_LEGION_FROM_ALL "Exclude Legion targets from legate.core's 'all' target" OFF) -option(legate_core_COLLECTIVE "Use of collective instances" ON) -option(legate_core_BUILD_DOCS "Build doxygen docs" OFF) - - -set_or_default(NCCL_DIR NCCL_PATH) -set_or_default(Thrust_DIR THRUST_PATH) -set_or_default(CUDA_TOOLKIT_ROOT_DIR CUDA) -set_or_default(Legion_CUDA_ARCH GPU_ARCH all-major) -set_or_default(Legion_HIJACK_CUDART USE_CUDART_HIJACK OFF) - -include(CMakeDependentOption) -cmake_dependent_option(Legion_HIJACK_CUDART - "Allow Legion to hijack and rewrite application calls into the CUDA runtime" - ON - "Legion_USE_CUDA;Legion_HIJACK_CUDART" - OFF) -# This needs to be added as an option to force values to be visible in Legion build -option(Legion_HIJACK_CUDART "Replace default CUDA runtime with the Realm version" OFF) - -if(Legion_HIJACK_CUDART) - message(WARNING [=[ -##################################################################### -Warning: Realm's CUDA runtime hijack is incompatible with NCCL. -Please note that your code will crash catastrophically as soon as it -calls into NCCL either directly or through some other Legate library. -##################################################################### -]=]) -endif() - -if(BUILD_SHARED_LIBS) - if(Legion_HIJACK_CUDART) - # Statically link CUDA if HIJACK_CUDART is set - set(Legion_CUDA_DYNAMIC_LOAD OFF) - set(CUDA_USE_STATIC_CUDA_RUNTIME ON) - elseif(NOT DEFINED Legion_CUDA_DYNAMIC_LOAD) - # If HIJACK_CUDART isn't set and BUILD_SHARED_LIBS is true, default Legion_CUDA_DYNAMIC_LOAD to true - set(Legion_CUDA_DYNAMIC_LOAD ON) - set(CUDA_USE_STATIC_CUDA_RUNTIME OFF) - endif() -elseif(NOT DEFINED Legion_CUDA_DYNAMIC_LOAD) - # If BUILD_SHARED_LIBS is false, default Legion_CUDA_DYNAMIC_LOAD to false also - set(Legion_CUDA_DYNAMIC_LOAD OFF) - set(CUDA_USE_STATIC_CUDA_RUNTIME ON) -endif() diff --git a/cmake/Modules/set_cpu_arch_flags.cmake b/cmake/Modules/set_cpu_arch_flags.cmake deleted file mode 100644 index ff3e35ca39..0000000000 --- a/cmake/Modules/set_cpu_arch_flags.cmake +++ /dev/null @@ -1,84 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -#------------------------------------------------------------------------------# -# Architecture -#------------------------------------------------------------------------------# -if(BUILD_MARCH AND BUILD_MCPU) - message(FATAL_ERROR "BUILD_MARCH and BUILD_MCPU are incompatible") -endif() - -function(set_cpu_arch_flags out_var) - # Try -march first. On platforms that don't support it, GCC will issue a hard - # error, so we'll know not to use it. Default is "native", but explicitly - # setting BUILD_MARCH="" disables use of the flag - if(BUILD_MARCH) - set(INTERNAL_BUILD_MARCH ${BUILD_MARCH}) - elseif(NOT DEFINED BUILD_MARCH) - set(INTERNAL_BUILD_MARCH "native") - endif() - - set(flags "") - - include(CheckCXXCompilerFlag) - if(INTERNAL_BUILD_MARCH) - check_cxx_compiler_flag("-march=${INTERNAL_BUILD_MARCH}" COMPILER_SUPPORTS_MARCH) - if(COMPILER_SUPPORTS_MARCH) - list(APPEND flags "-march=${INTERNAL_BUILD_MARCH}") - elseif(BUILD_MARCH) - message(FATAL_ERROR "The flag -march=${INTERNAL_BUILD_MARCH} is not supported by the compiler") - else() - unset(INTERNAL_BUILD_MARCH) - endif() - endif() - - # Try -mcpu. We do this second because it is deprecated on x86, but - # GCC won't issue a hard error, so we can't tell if it worked or not. - if (NOT INTERNAL_BUILD_MARCH AND NOT DEFINED BUILD_MARCH) - if(BUILD_MCPU) - set(INTERNAL_BUILD_MCPU ${BUILD_MCPU}) - else() - set(INTERNAL_BUILD_MCPU "native") - endif() - - check_cxx_compiler_flag("-mcpu=${INTERNAL_BUILD_MCPU}" COMPILER_SUPPORTS_MCPU) - if(COMPILER_SUPPORTS_MCPU) - list(APPEND flags "-mcpu=${INTERNAL_BUILD_MCPU}") - elseif(BUILD_MCPU) - message(FATAL_ERROR "The flag -mcpu=${INTERNAL_BUILD_MCPU} is not supported by the compiler") - else() - unset(INTERNAL_BUILD_MCPU) - endif() - endif() - - # Add flags for Power architectures - check_cxx_compiler_flag("-maltivec -Werror" COMPILER_SUPPORTS_MALTIVEC) - if(COMPILER_SUPPORTS_MALTIVEC) - list(APPEND flags "-maltivec") - endif() - check_cxx_compiler_flag("-mabi=altivec -Werror" COMPILER_SUPPORTS_MABI_ALTIVEC) - if(COMPILER_SUPPORTS_MABI_ALTIVEC) - list(APPEND flags "-mabi=altivec") - endif() - check_cxx_compiler_flag("-mvsx -Werror" COMPILER_SUPPORTS_MVSX) - if(COMPILER_SUPPORTS_MVSX) - list(APPEND flags "-mvsx") - endif() - - set(${out_var} "${flags}" PARENT_SCOPE) -endfunction() - -set_cpu_arch_flags(arch_flags) diff --git a/cmake/generate_install_info_py.cmake b/cmake/generate_install_info_py.cmake deleted file mode 100644 index 408500ac91..0000000000 --- a/cmake/generate_install_info_py.cmake +++ /dev/null @@ -1,31 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -execute_process( - COMMAND ${CMAKE_C_COMPILER} - -E -DLEGATE_USE_PYTHON_CFFI - -I "${CMAKE_CURRENT_LIST_DIR}/../src/core" - -P "${CMAKE_CURRENT_LIST_DIR}/../src/core/legate_c.h" - ECHO_ERROR_VARIABLE - OUTPUT_VARIABLE header - COMMAND_ERROR_IS_FATAL ANY -) - -set(libpath "") -configure_file( - "${CMAKE_CURRENT_LIST_DIR}/../legate/install_info.py.in" - "${CMAKE_CURRENT_LIST_DIR}/../legate/install_info.py" -@ONLY) diff --git a/cmake/legate_helper_functions.cmake b/cmake/legate_helper_functions.cmake deleted file mode 100644 index de2216c37f..0000000000 --- a/cmake/legate_helper_functions.cmake +++ /dev/null @@ -1,468 +0,0 @@ -#============================================================================= -# Copyright 2023 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -macro(legate_include_rapids) - if (NOT _LEGATE_HAS_RAPIDS) - if(NOT EXISTS ${CMAKE_BINARY_DIR}/LEGATE_RAPIDS.cmake) - file(DOWNLOAD https://raw.githubusercontent.com/rapidsai/rapids-cmake/branch-23.08/RAPIDS.cmake - ${CMAKE_BINARY_DIR}/LEGATE_RAPIDS.cmake) - endif() - include(${CMAKE_BINARY_DIR}/LEGATE_RAPIDS.cmake) - include(rapids-cmake) - include(rapids-cpm) - include(rapids-cuda) - include(rapids-export) - include(rapids-find) - set(_LEGATE_HAS_RAPIDS ON) - endif() -endmacro() - -function(legate_default_cpp_install target) - set(options) - set(one_value_args EXPORT) - set(multi_value_args) - cmake_parse_arguments( - LEGATE_OPT - "${options}" - "${one_value_args}" - "${multi_value_args}" - ${ARGN} - ) - - if (NOT LEGATE_OPT_EXPORT) - message(FATAL_ERROR "Need EXPORT name for legate_default_install") - endif() - - legate_include_rapids() - - rapids_cmake_install_lib_dir(lib_dir) - - install(TARGETS ${target} - DESTINATION ${lib_dir} - EXPORT ${LEGATE_OPT_EXPORT}) - - set(final_code_block - "set(${target}_BUILD_LIBDIR ${CMAKE_BINARY_DIR}/legate_${target})" - ) - - rapids_export( - INSTALL ${target} - EXPORT_SET ${LEGATE_OPT_EXPORT} - GLOBAL_TARGETS ${target} - NAMESPACE legate:: - LANGUAGES ${ENABLED_LANGUAGES} - ) - - # build export targets - rapids_export( - BUILD ${target} - EXPORT_SET ${LEGATE_OPT_EXPORT} - GLOBAL_TARGETS ${target} - NAMESPACE legate:: - FINAL_CODE_BLOCK final_code_block - LANGUAGES ${ENABLED_LANGUAGES} - ) -endfunction() - -function(legate_add_cffi header) - if (NOT DEFINED CMAKE_C_COMPILER) - message(FATAL_ERROR "Must enable C language to build Legate projects") - endif() - - set(options) - set(one_value_args TARGET PY_PATH) - set(multi_value_args) - cmake_parse_arguments( - LEGATE_OPT - "${options}" - "${one_value_args}" - "${multi_value_args}" - ${ARGN} - ) - - # determine full Python path - if (NOT DEFINED LEGATE_OPT_PY_PATH) - set(py_path "${CMAKE_CURRENT_SOURCE_DIR}/${LEGATE_OPT_TARGET}") - elseif(IS_ABSOLUTE LEGATE_OPT_PY_PATH) - set(py_path "${LEGATE_OPT_PY_PATH}") - else() - set(py_path "${CMAKE_CURRENT_SOURCE_DIR}/${LEGATE_OPT_PY_PATH}") - endif() - - # abbreviate for the function below - set(target ${LEGATE_OPT_TARGET}) - set(install_info_in -[=[ -from pathlib import Path - -def get_libpath(): - import os, sys, platform - join = os.path.join - exists = os.path.exists - dirname = os.path.dirname - cn_path = dirname(dirname(__file__)) - so_ext = { - "": "", - "Java": ".jar", - "Linux": ".so", - "Darwin": ".dylib", - "Windows": ".dll" - }[platform.system()] - - def find_lib(libdir): - target = f"lib@target@{so_ext}*" - search_path = Path(libdir) - matches = [m for m in search_path.rglob(target)] - if matches: - return matches[0].parent - return None - - return ( - find_lib("@libdir@") or - find_lib(join(dirname(dirname(dirname(cn_path))), "lib")) or - find_lib(join(dirname(dirname(sys.executable)), "lib")) or - "" - ) - -libpath: str = get_libpath() - -header: str = """ - @header@ - void @target@_perform_registration(); -""" -]=]) - set(install_info_py_in ${CMAKE_BINARY_DIR}/legate_${target}/install_info.py.in) - set(install_info_py ${py_path}/install_info.py) - file(WRITE ${install_info_py_in} "${install_info_in}") - - set(generate_script_content - [=[ - execute_process( - COMMAND ${CMAKE_C_COMPILER} - -E - -P @header@ - ECHO_ERROR_VARIABLE - OUTPUT_VARIABLE header - COMMAND_ERROR_IS_FATAL ANY - ) - configure_file( - @install_info_py_in@ - @install_info_py@ - @ONLY) - ]=]) - - set(generate_script ${CMAKE_CURRENT_BINARY_DIR}/gen_install_info.cmake) - file(CONFIGURE - OUTPUT ${generate_script} - CONTENT "${generate_script_content}" - @ONLY - ) - - if (DEFINED ${target}_BUILD_LIBDIR) - # this must have been imported from an existing editable build - set(libdir ${${target}_BUILD_LIBDIR}) - else() - # libraries are built in a common spot - set(libdir ${CMAKE_BINARY_DIR}/legate_${target}) - endif() - add_custom_target("${target}_generate_install_info_py" ALL - COMMAND ${CMAKE_COMMAND} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -Dtarget=${target} - -Dlibdir=${libdir} - -P ${generate_script} - OUTPUT ${install_info_py} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMENT "Generating install_info.py" - DEPENDS ${header} - ) -endfunction() - -function(legate_default_python_install target) - set(options) - set(one_value_args EXPORT) - set(multi_value_args) - cmake_parse_arguments( - LEGATE_OPT - "${options}" - "${one_value_args}" - "${multi_value_args}" - ${ARGN} - ) - - if (NOT LEGATE_OPT_EXPORT) - message(FATAL_ERROR "Need EXPORT name for legate_default_python_install") - endif() - - if (SKBUILD) - add_library(${target}_python INTERFACE) - add_library(legate::${target}_python ALIAS ${target}_python) - target_link_libraries(${target}_python INTERFACE legate::core legate::${target}) - - install(TARGETS ${target}_python - DESTINATION ${lib_dir} - EXPORT ${LEGATE_OPT_EXPORT}) - - legate_include_rapids() - rapids_export( - INSTALL ${target}_python - EXPORT_SET ${LEGATE_OPT_EXPORT} - GLOBAL_TARGETS ${target}_python - NAMESPACE legate:: - ) - endif() -endfunction() - -function(legate_add_cpp_subdirectory dir) - set(options) - set(one_value_args EXPORT TARGET) - set(multi_value_args) - cmake_parse_arguments( - LEGATE_OPT - "${options}" - "${one_value_args}" - "${multi_value_args}" - ${ARGN} - ) - - if (NOT LEGATE_OPT_EXPORT) - message(FATAL_ERROR "Need EXPORT name for legate_default_install") - endif() - - if (NOT LEGATE_OPT_TARGET) - message(FATAL_ERROR "Need TARGET name for Legate package") - endif() - # abbreviate for the function - set(target ${LEGATE_OPT_TARGET}) - - legate_include_rapids() - - rapids_find_package(legate_core CONFIG - GLOBAL_TARGETS legate::core - BUILD_EXPORT_SET ${LEGATE_OPT_EXPORT} - INSTALL_EXPORT_SET ${LEGATE_OPT_EXPORT}) - - if (SKBUILD) - if (NOT DEFINED ${target}_ROOT) - set(${target}_ROOT ${CMAKE_SOURCE_DIR}/build) - endif() - rapids_find_package(${target} CONFIG - GLOBAL_TARGETS legate::${target} - BUILD_EXPORT_SET ${LEGATE_OPT_EXPORT} - INSTALL_EXPORT_SET ${LEGATE_OPT_EXPORT}) - if (NOT ${target}_FOUND) - add_subdirectory(${dir} ${CMAKE_BINARY_DIR}/legate_${target}) - legate_default_cpp_install(${target} EXPORT ${LEGATE_OPT_EXPORT}) - else() - # Make sure the libdir is visible to other functions - set(${target}_BUILD_LIBDIR "${${target}_BUILD_LIBDIR}" PARENT_SCOPE) - endif() - else() - add_subdirectory(${dir} ${CMAKE_BINARY_DIR}/legate_${target}) - legate_default_cpp_install(${target} EXPORT ${LEGATE_OPT_EXPORT}) - endif() - -endfunction() - -function(legate_cpp_library_template target output_sources_variable) - set(file_template -[=[ -/* Copyright 2023 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#pragma once - -#include "legate.h" - -namespace @target@ { - -struct Registry { - static legate::TaskRegistrar& get_registrar(); -}; - -template -struct Task : public legate::LegateTask { - using Registrar = Registry; - static constexpr int TASK_ID = ID; -}; - -} -]=]) - string(CONFIGURE "${file_template}" file_content @ONLY) - file(WRITE ${CMAKE_CURRENT_SOURCE_DIR}/legate_library.h "${file_content}") - - set(file_template -[=[ -/* Copyright 2023 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "legate_library.h" - -namespace @target@ { - -static const char* const library_name = "@target@"; - -Legion::Logger log_@target@(library_name); - -/*static*/ legate::TaskRegistrar& Registry::get_registrar() -{ - static legate::TaskRegistrar registrar; - return registrar; -} - -void registration_callback() -{ - auto context = legate::Runtime::get_runtime()->create_library(library_name); - - Registry::get_registrar().register_all_tasks(context); -} - -} // namespace @target@ - -extern "C" { - -void @target@_perform_registration(void) -{ - // Tell the runtime about our registration callback so we hook it - // in before the runtime starts and make it global so that we know - // that this call back is invoked everywhere across all nodes - legate::Core::perform_registration<@target@::registration_callback>(); -} - -} -]=]) - string(CONFIGURE "${file_template}" file_content @ONLY) - file(WRITE ${CMAKE_CURRENT_SOURCE_DIR}/legate_library.cc "${file_content}") - - set(${output_sources_variable} - legate_library.h - legate_library.cc - PARENT_SCOPE - ) -endfunction() - -function(legate_python_library_template py_path) -set(options) -set(one_value_args TARGET PY_IMPORT_PATH) -set(multi_value_args) -cmake_parse_arguments( - LEGATE_OPT - "${options}" - "${one_value_args}" - "${multi_value_args}" - ${ARGN} -) - -if (DEFINED LEGATE_OPT_TARGET) - set(target "${LEGATE_OPT_TARGET}") -else() - string(REPLACE "/" "_" target "${py_path}") -endif() - -if (DEFINED LEGATE_OPT_PY_IMPORT_PATH) - set(py_import_path "${LEGATE_OPT_PY_IMPORT_PATH}") -else() - string(REPLACE "/" "." py_import_path "${py_path}") -endif() - -set(fn_library "${CMAKE_CURRENT_SOURCE_DIR}/${py_path}/library.py") - -set(file_template -[=[ -# Copyright 2023 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from legate.core import ( - Library, - get_legate_runtime, -) -import os -from typing import Any - -class UserLibrary(Library): - def __init__(self, name: str) -> None: - self.name = name - self.shared_object: Any = None - - @property - def cffi(self) -> Any: - return self.shared_object - - def get_name(self) -> str: - return self.name - - def get_shared_library(self) -> str: - from @py_import_path@.install_info import libpath - return os.path.join(libpath, f"lib@target@{self.get_library_extension()}") - - def get_c_header(self) -> str: - from @py_import_path@.install_info import header - - return header - - def get_registration_callback(self) -> str: - return "@target@_perform_registration" - - def initialize(self, shared_object: Any) -> None: - self.shared_object = shared_object - - def destroy(self) -> None: - pass - -user_lib = UserLibrary("@target@") -user_context = get_legate_runtime().register_library(user_lib) -]=]) - string(CONFIGURE "${file_template}" file_content @ONLY) - file(WRITE "${fn_library}" "${file_content}") -endfunction() diff --git a/cmake/thirdparty/get_legion.cmake b/cmake/thirdparty/get_legion.cmake deleted file mode 100644 index 1b6c136a54..0000000000 --- a/cmake/thirdparty/get_legion.cmake +++ /dev/null @@ -1,180 +0,0 @@ -#============================================================================= -# Copyright 2022-2023 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -include_guard(GLOBAL) - -function(find_or_configure_legion) - set(oneValueArgs VERSION REPOSITORY BRANCH EXCLUDE_FROM_ALL) - cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - include("${rapids-cmake-dir}/export/detail/parse_version.cmake") - rapids_export_parse_version(${PKG_VERSION} Legion PKG_VERSION) - - string(REGEX REPLACE "^0([0-9]+)?$" "\\1" Legion_major_version "${Legion_major_version}") - string(REGEX REPLACE "^0([0-9]+)?$" "\\1" Legion_minor_version "${Legion_minor_version}") - string(REGEX REPLACE "^0([0-9]+)?$" "\\1" Legion_patch_version "${Legion_patch_version}") - - include("${rapids-cmake-dir}/cpm/detail/package_details.cmake") - rapids_cpm_package_details(Legion version git_repo git_branch shallow exclude_from_all) - - set(version "${Legion_major_version}.${Legion_minor_version}.${Legion_patch_version}") - set(exclude_from_all ${PKG_EXCLUDE_FROM_ALL}) - if(PKG_BRANCH) - set(git_branch "${PKG_BRANCH}") - endif() - if(PKG_REPOSITORY) - set(git_repo "${PKG_REPOSITORY}") - endif() - - set(FIND_PKG_ARGS - GLOBAL_TARGETS Legion::Realm - Legion::Regent - Legion::Legion - Legion::RealmRuntime - Legion::LegionRuntime - BUILD_EXPORT_SET legate-core-exports - INSTALL_EXPORT_SET legate-core-exports) - - if((NOT CPM_Legion_SOURCE) AND (NOT CPM_DOWNLOAD_Legion)) - # First try to find Legion via find_package() - # so the `Legion_USE_*` variables are visible - # Use QUIET find by default. - set(_find_mode QUIET) - # If Legion_DIR/Legion_ROOT are defined as something other than empty or NOTFOUND - # use a REQUIRED find so that the build does not silently download Legion. - if(Legion_DIR OR Legion_ROOT) - set(_find_mode REQUIRED) - endif() - rapids_find_package(Legion ${version} EXACT CONFIG ${_find_mode} ${FIND_PKG_ARGS}) - endif() - - if(Legion_FOUND) - message(STATUS "CPM: using local package Legion@${version}") - else() - - include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/cpm_helpers.cmake) - get_cpm_git_args(legion_cpm_git_args REPOSITORY ${git_repo} BRANCH ${git_branch}) - if(NOT DEFINED Legion_PYTHON_EXTRA_INSTALL_ARGS) - set(Legion_PYTHON_EXTRA_INSTALL_ARGS "--root / --prefix \"\${CMAKE_INSTALL_PREFIX}\"") - endif() - - # Support comma and semicolon delimited lists - string(REPLACE "," " " Legion_PYTHON_EXTRA_INSTALL_ARGS "${Legion_PYTHON_EXTRA_INSTALL_ARGS}") - string(REPLACE ";" " " Legion_PYTHON_EXTRA_INSTALL_ARGS "${Legion_PYTHON_EXTRA_INSTALL_ARGS}") - - set(_legion_cuda_options "") - - # Set CMAKE_CXX_STANDARD and CMAKE_CUDA_STANDARD for Legion builds. Legion's FindCUDA.cmake - # use causes CUDA object compilation to fail if `-std=` flag is present in `CXXFLAGS` but - # missing in `CUDA_NVCC_FLAGS`. - set(_cxx_std "${CMAKE_CXX_STANDARD}") - if(NOT _cxx_std) - set(_cxx_std 17) - endif() - - if(Legion_USE_CUDA) - set(_cuda_std "${CMAKE_CUDA_STANDARD}") - if(NOT _cuda_std) - set(_cuda_std ${_cxx_std}) - endif() - - list(APPEND _legion_cuda_options "CMAKE_CUDA_STANDARD ${_cuda_std}") - - if(legate_core_STATIC_CUDA_RUNTIME) - list(APPEND _legion_cuda_options "CMAKE_CUDA_RUNTIME_LIBRARY STATIC") - else() - list(APPEND _legion_cuda_options "CMAKE_CUDA_RUNTIME_LIBRARY SHARED") - endif() - endif() - - # Because legion sets these as cache variables, we need to force set this as a cache variable here - # to ensure that Legion doesn't override this in the CMakeCache.txt and create an unexpected state. - # This only applies to set() but does not apply to option() variables. - # See discussion of FetchContent subtleties: - # Only use these FORCE calls if using a Legion subbuild. - # https://discourse.cmake.org/t/fetchcontent-cache-variables/1538/8 - set(Legion_MAX_DIM ${Legion_MAX_DIM} CACHE STRING "The max number of dimensions for Legion" FORCE) - set(Legion_MAX_FIELDS ${Legion_MAX_FIELDS} CACHE STRING "The max number of fields for Legion" FORCE) - set(Legion_DEFAULT_LOCAL_FIELDS ${Legion_DEFAULT_LOCAL_FIELDS} CACHE STRING "Number of local fields for Legion" FORCE) - - message(VERBOSE "legate.core: Legion version: ${version}") - message(VERBOSE "legate.core: Legion git_repo: ${git_repo}") - message(VERBOSE "legate.core: Legion git_branch: ${git_branch}") - message(VERBOSE "legate.core: Legion exclude_from_all: ${exclude_from_all}") - - if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - set(Legion_BACKTRACE_USE_LIBDW ON) - else() - set(Legion_BACKTRACE_USE_LIBDW OFF) - endif() - - set(Legion_BUILD_RUST_PROFILER OFF CACHE BOOL "Whether to build the Legion profiler" FORCE) - - rapids_cpm_find(Legion ${version} ${FIND_PKG_ARGS} - CPM_ARGS - ${legion_cpm_git_args} - FIND_PACKAGE_ARGUMENTS EXACT - EXCLUDE_FROM_ALL ${exclude_from_all} - OPTIONS ${_legion_cuda_options} - "CMAKE_CXX_STANDARD ${_cxx_std}" - "Legion_VERSION ${version}" - "Legion_BUILD_BINDINGS ON" - "Legion_REDOP_HALF ON" - "Legion_REDOP_COMPLEX ON" - "Legion_UCX_DYNAMIC_LOAD ON" - ) - endif() - - set(Legion_USE_CUDA ${Legion_USE_CUDA} PARENT_SCOPE) - set(Legion_USE_OpenMP ${Legion_USE_OpenMP} PARENT_SCOPE) - set(Legion_USE_Python ${Legion_USE_Python} PARENT_SCOPE) - set(Legion_CUDA_ARCH ${Legion_CUDA_ARCH} PARENT_SCOPE) - set(Legion_BOUNDS_CHECKS ${Legion_BOUNDS_CHECKS} PARENT_SCOPE) - set(Legion_NETWORKS ${Legion_NETWORKS} PARENT_SCOPE) - - message(VERBOSE "Legion_USE_CUDA=${Legion_USE_CUDA}") - message(VERBOSE "Legion_USE_OpenMP=${Legion_USE_OpenMP}") - message(VERBOSE "Legion_USE_Python=${Legion_USE_Python}") - message(VERBOSE "Legion_CUDA_ARCH=${Legion_CUDA_ARCH}") - message(VERBOSE "Legion_BOUNDS_CHECKS=${Legion_BOUNDS_CHECKS}") - message(VERBOSE "Legion_NETWORKS=${Legion_NETWORKS}") - -endfunction() - -foreach(_var IN ITEMS "legate_core_LEGION_VERSION" - "legate_core_LEGION_BRANCH" - "legate_core_LEGION_REPOSITORY" - "legate_core_EXCLUDE_LEGION_FROM_ALL") - if(DEFINED ${_var}) - # Create a legate_core_LEGION_BRANCH variable in the current scope either from the existing - # current-scope variable, or the cache variable. - set(${_var} "${${_var}}") - # Remove legate_core_LEGION_BRANCH from the CMakeCache.txt. This ensures reconfiguring the same - # build dir without passing `-Dlegate_core_LEGION_BRANCH=` reverts to the value in versions.json - # instead of reusing the previous `-Dlegate_core_LEGION_BRANCH=` value. - unset(${_var} CACHE) - endif() -endforeach() - -if(NOT DEFINED legate_core_LEGION_VERSION) - set(legate_core_LEGION_VERSION "${legate_core_VERSION}") -endif() - -find_or_configure_legion(VERSION ${legate_core_LEGION_VERSION} - REPOSITORY ${legate_core_LEGION_REPOSITORY} - BRANCH ${legate_core_LEGION_BRANCH} - EXCLUDE_FROM_ALL ${legate_core_EXCLUDE_LEGION_FROM_ALL} -) diff --git a/cmake/thirdparty/get_nccl.cmake b/cmake/thirdparty/get_nccl.cmake deleted file mode 100644 index 1aee52b6f5..0000000000 --- a/cmake/thirdparty/get_nccl.cmake +++ /dev/null @@ -1,34 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -function(find_or_configure_nccl) - - if(TARGET NCCL::NCCL) - return() - endif() - - rapids_find_generate_module(NCCL - HEADER_NAMES nccl.h - LIBRARY_NAMES nccl - ) - - # Currently NCCL has no CMake build-system so we require - # it built and installed on the machine already - rapids_find_package(NCCL REQUIRED) - -endfunction() - -find_or_configure_nccl() diff --git a/cmake/thirdparty/get_thrust.cmake b/cmake/thirdparty/get_thrust.cmake deleted file mode 100644 index 84784a1cef..0000000000 --- a/cmake/thirdparty/get_thrust.cmake +++ /dev/null @@ -1,26 +0,0 @@ -#============================================================================= -# Copyright 2022 NVIDIA Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Use CPM to find or clone thrust -function(find_or_configure_thrust) - include(${rapids-cmake-dir}/cpm/thrust.cmake) - - rapids_cpm_thrust(NAMESPACE legate - BUILD_EXPORT_SET legate-core-exports - INSTALL_EXPORT_SET legate-core-exports) -endfunction() - -find_or_configure_thrust() diff --git a/cmake/versions.json b/cmake/versions.json deleted file mode 100644 index e3f9a11419..0000000000 --- a/cmake/versions.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "packages" : { - "Thrust" : { - "version" : "1.17.0.0", - "git_url" : "https://github.com/NVIDIA/thrust.git", - "git_tag" : "1.17.0" - }, - "Legion": { - "git_url" : "https://gitlab.com/StanfordLegion/legion.git", - "git_tag" : "0cbee456b9ee80e494262f663bd4838666bdd0be" - } - } -} diff --git a/conda/conda-build/build.sh b/conda/conda-build/build.sh index f02bd15716..b5b7ba211e 100644 --- a/conda/conda-build/build.sh +++ b/conda/conda-build/build.sh @@ -1,71 +1,200 @@ -#!/bin/bash +#!/usr/bin/env bash echo -e "\n\n--------------------- CONDA/CONDA-BUILD/BUILD.SH -----------------------\n" set -xeo pipefail -# Rewrite conda's -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY to -# -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH -CMAKE_ARGS="$(echo "$CMAKE_ARGS" | $SED -r "s@_INCLUDE=ONLY@_INCLUDE=BOTH@g")" - -# Add our options to conda's CMAKE_ARGS -CMAKE_ARGS+=" ---log-level=VERBOSE --DBUILD_MARCH=x86-64 --DLegion_USE_OpenMP=ON --DLegion_USE_Python=ON --DLegion_Python_Version=$($PYTHON --version 2>&1 | cut -d' ' -f2 | cut -d'.' -f3 --complement)" - -# We rely on an environment variable to determine if we need to build cpu-only bits -if [ -z "$CPU_ONLY" ]; then - CMAKE_ARGS+=" --DLegion_USE_CUDA=ON --DLegion_CUDA_ARCH:LIST=60-real;70-real;75-real;80-real;90 -" +# LICENSE, README.md, conda/, and configure are guaranteed to always be at the root +# directory. If we can't find them, then probably we are not in the root directory. +if [[ ! -f LICENSE ]] || [[ ! -f README.md ]] || [[ ! -d conda ]] || [[ ! -f configure ]]; then + echo "Must run this script from the root directory" + exit 1 fi -# We rely on an environment variable to determine if we need to make a debug build. -if [ -n "$DEBUG_BUILD" ]; then - CMAKE_ARGS+=" --DCMAKE_BUILD_TYPE=Debug -" +# If run through CI, BUILD_MARCH is set externally. If it is not set, try to set it. +ARCH=$(uname -m) +if [[ -z "${BUILD_MARCH}" ]]; then + if [[ "${ARCH}" = "aarch64" ]]; then + # Use the gcc march value used by aarch64 Ubuntu. + BUILD_MARCH=armv8-a + else + # Use uname -m otherwise + BUILD_MARCH=$(uname -m | tr '_' '-') + fi fi -# Do not compile with NDEBUG until Legion handles it without warnings -# Note: -UNDEBUG undefines any NDEBUG that may be present on the C compiler commandline. -# See: https://stackoverflow.com/questions/1978155/how-to-undefine-a-define-at-commandline-using-gcc -export CFLAGS="-UNDEBUG" -export CXXFLAGS="-UNDEBUG" -export CPPFLAGS="-UNDEBUG" -export CUDAFLAGS="-UNDEBUG" - -export CMAKE_GENERATOR=Ninja -export CUDAHOSTCXX=${CXX} -export OPENSSL_DIR="$CONDA_PREFIX" - -echo "Build starting on $(date)" - -cmake -S . -B build ${CMAKE_ARGS} -cmake --build build -j$CPU_COUNT -cmake --install build --prefix "$PREFIX" - -CMAKE_ARGS=" --DFIND_LEGATE_CORE_CPP=ON --Dlegate_core_ROOT=$PREFIX -" - -SKBUILD_BUILD_OPTIONS=-j$CPU_COUNT \ -$PYTHON -m pip install \ - --root / \ - --no-deps \ - --prefix "$PREFIX" \ - --no-build-isolation \ - --cache-dir "$PIP_CACHE_DIR" \ - --disable-pip-version-check \ - . -vv - -echo "Build ending on $(date)" - -# Legion leaves an egg-info file which will confuse conda trying to pick up the information -# Remove it so the legate-core is the only egg-info file added -rm -rf $SP_DIR/legion*egg-info +. continuous_integration/scripts/tools/pretty_printing.bash + +if [[ "${LEGATE_CI:-0}" == '0' ]]; then + # not running in CI, define a dummy version of this function + function run_command() + { + { set +x; } 2>/dev/null; + + shift # ignore group name argument + local command=("$@") + + "${command[@]}" + } +else + export LEGATE_CI_GROUP=0 + export PYTHONUNBUFFERED=1 +fi + +function preamble() +{ + set -xeo pipefail + # Rewrite conda's -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY to + # -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=BOTH + CMAKE_ARGS="${CMAKE_ARGS//_INCLUDE=ONLY/_INCLUDE=BOTH}" + # Conda sets these to -I/some/path/in/cuda/toolkit, but this breaks all sorts of stuff + # in CMake. Since we use CCCL as a third-party dependency, we add its headers with + # -isystem, but since conda adds additional headers with -I, those take precedence over + # -isystem, meaning they are effectively shadowed. + # + # So thanks, conda, once again, for doing something "helpful"! + unset NVCC_APPEND_FLAGS + unset NVCC_PREPEND_FLAGS + + configure_args=() + configure_args+=(--legion-max-dim=6) + if [[ "${USE_OPENMP:-OFF}" == 'OFF' ]]; then + configure_args+=(--with-openmp=0) + else + configure_args+=(--with-openmp) + fi + + if [[ "${UPLOAD_ENABLED:-0}" == '0' ]]; then + configure_args+=(--with-tests) + configure_args+=(--with-benchmarks) + fi + + # We rely on an environment variable to determine if we need to build cpu-only bits + if [[ "${CPU_ONLY:-0}" == '0' ]]; then + configure_args+=(--with-cuda) + configure_args+=(--with-cal) + else + configure_args+=(--with-cuda=0) + fi + + configure_args+=(--build-type="${LEGATE_BUILD_MODE}") + +# shellcheck disable=SC2154 +case "${LEGATE_NETWORK}" in + "ucx") + configure_args+=(--with-ucx) + ;; + "gex") + configure_args+=(--with-gasnet) + configure_args+=(--) + configure_args+=(-DLegion_USE_GASNETEX_WRAPPER=ON) + ;; + *) + echo "${LEGATE_NETWORK} is not a valid choice for the network interface" + exit 1 + ;; +esac + + # ${CXX} is set by conda compiler package. Disable shellcheck warning. + # shellcheck disable=SC2154 + export CUDAHOSTCXX="${CXX}" + # ${PREFIX} is set by conda build. Ignore shellcheck warning. + # shellcheck disable=SC2154 + export OPENSSL_DIR="${PREFIX}" + # shellcheck disable=SC2154 + LEGATE_DIR="$(${PYTHON} ./scripts/get_legate_dir.py)" + export LEGATE_DIR + export LEGATE_ARCH='arch-conda' + + # In classic conda fashion, it sets a bunch of environment variables for you but as + # usual this just ends up creating more headaches. We don't want FORTIFY_SOURCE because + # GCC and clang error with: + # + # /tmp/conda-croot/legate/_build_env/x86_64-conda-linux-gnu/sysroot/usr/include/features.h:330:4: + # error: #warning _FORTIFY_SOURCE requires compiling with optimization (-O) + # [-Werror=cpp] + # 330 | # warning _FORTIFY_SOURCE requires compiling with optimization (-O) + # | ^~~~~~~ + # + # Thanks conda, such a great help! + if [[ ${LEGATE_BUILD_MODE} == *debug* ]]; then + CPPFLAGS="${CPPFLAGS//-D_FORTIFY_SOURCE=[0-9]/}" + export CPPFLAGS + DEBUG_CPPFLAGS="${DEBUG_CPPFLAGS//-D_FORTIFY_SOURCE=[0-9]/}" + export DEBUG_CPPFLAGS + CFLAGS="${CFLAGS//-D_FORTIFY_SOURCE=[0-9]/}" + export CFLAGS + fi +} + +function configure_legate() +{ + set -xou pipefail + set +e + + # ${CC} is set by the conda compiler package. Disable shellcheck. + # shellcheck disable=SC2154 + ./configure \ + --LEGATE_ARCH="${LEGATE_ARCH}" \ + --with-python \ + --with-cc="${CC}" \ + --with-cxx="${CXX}" \ + --build-march="${BUILD_MARCH}" \ + --cmake-generator="Ninja" \ + "${configure_args[@]}" + + ret=$? + set -e + if [[ "${ret}" != '0' ]]; then + cat configure.log + return "${ret}" + fi + if [[ "${LEGATE_BUILD_MODE:-}" != '' ]]; then + found="$(grep -c -e "--build-type=${LEGATE_BUILD_MODE}" configure.log || true)" + if [[ "${found}" == '0' ]]; then + echo "FAILED TO PROPERLY SET BUILD TYPE:" + echo "- expected to find --build-type=${LEGATE_BUILD_MODE} in configure.log" + return 1 + fi + fi + return 0 +} + +function pip_install_legate() +{ + set -xeo pipefail + # CPU_COUNT and PIP_CACHE_DIR are set by the build. Disable shellcheck. + # shellcheck disable=SC2154 + export CMAKE_BUILD_PARALLEL_LEVEL="${CPU_COUNT}" + # shellcheck disable=SC2154 + local cache_dir="${PIP_CACHE_DIR}" + "${PYTHON}" -m pip install \ + --root / \ + --no-deps \ + --prefix "${PREFIX}" \ + --no-build-isolation \ + --cache-dir "${cache_dir}" \ + --disable-pip-version-check \ + . \ + -vv + + # Legion leaves an egg-info file which will confuse conda trying to pick up the information + # Remove it so the legate is the only egg-info file added + # SP_DIR is set by conda build. Disable shellcheck. + # shellcheck disable=SC2154 + rm -rf "${SP_DIR}"/legion*egg-info + + # If building gex, for now remove legate MPI wrapper. This should be handled more completely + # with a configure option in the future. + if [[ ${LEGATE_NETWORK} == "gex" ]]; then + find "${PREFIX}" -name "*legate*mpi*.so*" -exec rm {} \; + fi +} + +build_start=$(date) +echo "Build starting on ${build_start}" +run_command 'Preamble' preamble +run_command 'Configure Legate' configure_legate +run_command 'pip install Legate' pip_install_legate +build_end=$(date) +echo "Build ending on ${build_end}" diff --git a/conda/conda-build/conda_build_config.yaml b/conda/conda-build/conda_build_config.yaml index 71925b6784..125158f2ba 100644 --- a/conda/conda-build/conda_build_config.yaml +++ b/conda/conda-build/conda_build_config.yaml @@ -1,22 +1,33 @@ -debug_build: - - true - - false +--- +build_mode_str: + - debug + - debug-sanitizer + - release + - release-debug -gpu_enabled: +upload_enabled: - true - false -ucx_configured: +network: + - ucx + - gex + +gpu_enabled: - true - false python: - - 3.9 - 3.10 - 3.11 numpy_version: - - ">=1.22" + # Not 2.1.0 which segfaults on asarray() sometimes, see + # https://github.com/numpy/numpy/pull/27249 + - ">=1.22,!=2.1.0" cmake_version: - - ">=3.20.1,!=3.23.0" + - ">=3.26.4" + +cuda_compiler: + - cuda-nvcc diff --git a/conda/conda-build/meta.yaml b/conda/conda-build/meta.yaml index c381527858..50c052a3dc 100644 --- a/conda/conda-build/meta.yaml +++ b/conda/conda-build/meta.yaml @@ -1,4 +1,5 @@ -{% set name = "legate-core" %} +--- +{% set name = "legate" %} {% if gpu_enabled == "true" %} {% set gpu_enabled_bool = true %} {% elif gpu_enabled == "false" %} @@ -7,21 +8,13 @@ {# We need to have a default value for the initial pass over the recipe #} {% set gpu_enabled_bool = false %} {% endif %} -{% if debug_build == "true" %} - {% set debug_build_bool = true %} -{% elif debug_build == "false" %} - {% set debug_build_bool = false %} +{% if upload_enabled == "true" %} + {% set upload_enabled_bool = true %} +{% elif upload_enabled == "false" %} + {% set upload_enabled_bool = false %} {% else %} {# We need to have a default value for the initial pass over the recipe #} - {% set debug_build_bool = false %} -{% endif %} -{% if ucx_configured == "true" %} - {% set ucx_configured_bool = true %} -{% elif ucx_configured == "false" %} - {% set ucx_configured_bool = false %} -{% else %} - {# We need to have a default value for the initial pass over the recipe #} - {% set ucx_configured_bool = false %} + {% set upload_enabled_bool = false %} {% endif %} {% set default_env_var = '' %} {% if build_number is defined %} @@ -50,22 +43,30 @@ {% endif %} {% if not gpu_enabled_bool %} -{% set cpu_tag='_cpu' %} +{% set cpu_gpu_tag='_cpu' %} {% else %} -{% set cpu_tag='' %} +{% set cpu_gpu_tag='_gpu' %} {% endif %} -{% if debug_build_bool %} -{% set debug_tag='_debug' %} -{% else %} +{% if build_mode_str == 'release' %} {% set debug_tag='' %} +{% else %} +{% set sanitized_build_mode_str = build_mode_str | replace('-', '_') %} +{% set debug_tag='_' + sanitized_build_mode_str %} {% endif %} -{% if ucx_configured_bool %} -{% set ucx_tag='_ucx' %} -{% else %} -{% set ucx_tag='' %} +{% set sanitizer_build_bool = false %} # Default to false +{% if 'sanitizer' in build_mode_str %} + {% set sanitizer_build_bool = true %} +{% endif %} + +{% set upload_tag='' if upload_enabled_bool else '_with_tests' %} + +{% if network is not defined %} +# network must be defined. Let's fail if it is not. +invalid_yaml: {{ network }} {% endif %} +{% set network_tag='_'+network %} package: name: {{ name|lower }} @@ -81,45 +82,69 @@ source: {% endif %} build: - skip: true # [not (linux or osx)] + skip: true # [not (linux or osx)] number: {{ build_number }} missing_dso_whitelist: - - '*libcuda.so*' - - string: "cuda{{ cuda_major }}_py{{ py_version }}{{ git_describe_hash }}_{{ PKG_BUILDNUM }}{{ ucx_tag }}{{ cpu_tag }}{{ debug_tag }}" + - '*libcuda.so*' + - '*libgtest.so*' + string: "cuda{{ cuda_major }}_py{{ py_version }}{{ network_tag }}{{ cpu_gpu_tag }}{{ debug_tag }}{{ upload_tag }}{{ git_describe_hash }}_{{ PKG_BUILDNUM }}" script_env: - SCCACHE_BUCKET - SCCACHE_REGION - SCCACHE_IDLE_TIMEOUT - SCCACHE_S3_KEY_PREFIX - - SCCACHE_S3_KEY_PREFIX + - SCCACHE_S3_USE_SSL + - SCCACHE_S3_NO_CREDENTIALS - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN - CMAKE_C_COMPILER_LAUNCHER - - CMAKE_CXX_COMPILER_LAUNCHER - CMAKE_CUDA_COMPILER_LAUNCHER - - SED + - CMAKE_CXX_COMPILER_LAUNCHER + - USE_OPENMP + - ARTIFACTS_DIR + - LEGATE_CI + - BUILD_MARCH + - LEGATE_BUILD_MODE={{ build_mode_str }} + - LEGATE_NETWORK={{ network }} +{% if upload_enabled_bool %} + - UPLOAD_ENABLED=1 +{% endif %} {% if not gpu_enabled_bool %} - CPU_ONLY=1 + # The CPU-only packages having more track_features than the GPU builds helps + # the solver to prefer the GPU builds when both are viable candidates. + # ref: https://docs.conda.io/projects/conda-build/en/latest/resources/define-metadata.html#track-features track_features: - cpu_only {% endif %} -{% if debug_build_bool %} - - DEBUG_BUILD=1 -{% endif %} + # These expressions force any downstream packages to + # 1) Lock to the exact version (but not variant) that was used to compile it + # 2) Force recompilation if this package is updated in order to use the new package run_exports: + strong: {% if not gpu_enabled_bool %} - - {{ pin_subpackage(name, min_pin="x.x.x", max_pin="x.x.x") }} =*_cpu + - {{ name|lower }} ={{ version }} =*_cpu* {% else %} - - {{ pin_subpackage(name, min_pin="x.x.x", max_pin="x.x.x") }} + - {{ name|lower }} ={{ version }} =*_gpu* {% endif %} -{% if gpu_enabled_bool %} -# prevent nccl from pulling in cudatoolkit - ignore_run_exports: - - cudatoolkit ignore_run_exports_from: - cuda-nvcc + - rich +{% if network == 'gex' %} + - openmpi +{% endif %} + +# Note clang must go first! gxx is also in clangxx!!!!! +{% if "clangxx" in compiler("cxx") %} + {% set compiler_version = 16 %} + {% set sanitizer_lib = "compiler-rt" %} + {% set openmp_lib = "llvm-openmp" %} +{% elif "gxx" in compiler("cxx") %} + {% set compiler_version = 11.2 %} + {% set sanitizer_lib = "libsanitizer" %} + {% set openmp_lib = "_openmp_mutex" %} {% endif %} requirements: @@ -129,71 +154,89 @@ requirements: - rust - ninja - cmake {{ cmake_version }} - - {{ compiler('c') }} =11.2 # [linux] - - {{ compiler('cxx') }} =11.2 # [linux] + - {{ compiler('c') }} ={{ compiler_version }} # [linux] + - {{ compiler('cxx') }} ={{ compiler_version }} # [linux] - {{ compiler('c') }} # [osx] - {{ compiler('cxx') }} # [osx] + - doxygen +{% if gpu_enabled_bool %} + - cuda-version ={{ cuda_version }} + # these are all constrained by cuda-version + - {{ compiler('cuda') }} + # cupti is only in the build section for headers, not in the host section + - cuda-cupti-dev + # Cufile an NVML are not detected by our build system if we don't list them here + # because of the include directory structure. + - libcufile-dev + - cuda-nvml-dev +{% endif %} # Libraries and header files (C/C++). host: - zlib - python - - cython - - llvm-openmp - - scikit-build - - elfutils # [linux] - - libdwarf # [linux] + - cython >=3.0.1 + - scikit-build-core + - setuptools_scm + - rich + - hdf5 +{% if sanitizer_build_bool %} + - {{ sanitizer_lib }} ={{ compiler_version }} +{% endif %} {% if gpu_enabled_bool %} + - cuda-version ={{ cuda_version }} + # these are all constrained by cuda-version - nccl - - cuda-nvcc ={{ cuda_version }} - - cuda-nvtx ={{ cuda_version }} - - cuda-cccl ={{ cuda_version }} - - cuda-cudart ={{ cuda_version }} - - cuda-cudart-static ={{ cuda_version }} - - cuda-nvml-dev ={{ cuda_version }} - - cuda-driver-dev ={{ cuda_version }} - - cuda-cudart-dev ={{ cuda_version }} - - libcublas-dev - - libcufft-dev - - libcurand-dev - - libcusolver-dev -{% endif %} -{% if ucx_configured_bool %} - - ucx >=1.14 - - openmpi + - cuda-cudart-dev + - cuda-nvtx-dev + - cuda-nvml-dev + - cuda-driver-dev + - libcufile-dev + - libcal-dev +{% endif %} +{% if network == 'ucx' %} + - ucx >=1.17,<1.18 + - ucc {% endif %} + - openmpi <5 # Runtime python dependencies run: - - cffi - - llvm-openmp - numpy {{ numpy_version }} - - typing_extensions - - elfutils # [linux] - - libdwarf # [linux] -{% if gpu_enabled_bool %} - - cuda-cudart >={{ cuda_version }},<{{ cuda_major+1 }} - - cuda-version >={{ cuda_version }},<{{ cuda_major+1 }} - - nccl -{% endif %} -{% if ucx_configured_bool %} - - ucx >=1.14 - - openmpi + # FIXME(wonchanl): Kerchunk needs to be updated for Zarr v3 + - zarr <3 + - fsspec + - kerchunk + - zstd + - pynvml + - {{ openmp_lib }} +{% if sanitizer_build_bool %} + - {{ sanitizer_lib }} ={{ compiler_version }} {% endif %} +{% if gpu_enabled_bool %} + # Pin to all minor versions of CUDA newer than the one built against, within the same major version. + # cuda-version constrains the CUDA runtime version and ensures a compatible driver is available + - {{ pin_compatible('cuda-version', min_pin='x.x', max_pin='x') }} + # including __cuda here in 'run' ensures that 'conda install' always + # prefers the CPU-only packages in environments without CUDA + # + # The floor on __cuda makes this stricter than the default constraint that comes through + # depending on 'cuda-version' alone. + - __cuda >={{ cuda_version }} +{% endif %} + - rich run_constrained: - __glibc >=2.17 # [linux] - - python != 3.9.7 -{% if gpu_enabled_bool %} - - __cuda -{% endif %} + - python >= 3.10 + - dask[distributed] test: imports: - legate about: - home: https://github.com/nv-legate/legate.core + home: https://github.com/nv-legate/legate license: Apache-2.0 license_file: LICENSE summary: 'Scalable Computational Code' @@ -202,8 +245,8 @@ about: making it possible for all programmers to leverage the power of large clusters of CPUs and GPUs by running the same code that runs on a desktop or a laptop at scale. - doc_url: https://github.com/nv-legate/legate.core - dev_url: https://github.com/nv-legate/legate.core + doc_url: https://github.com/nv-legate/legate + dev_url: https://github.com/nv-legate/legate extra: recipe-maintainers: diff --git a/conda/gasnet_wrapper/activate.sh b/conda/gasnet_wrapper/activate.sh new file mode 100755 index 0000000000..c85f42a4c4 --- /dev/null +++ b/conda/gasnet_wrapper/activate.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/GASNET_WRAPPER/ACTIVATE.SH -----------------------\n" + +# shellcheck disable=SC2154 +wrapper_file=$(find "${CONDA_PREFIX}/gex-wrapper" \( -name "librealm_gex_wrapper.so" -o -name "librealm_gex_wrapper.dylib" \) -print -quit) +export REALM_GASNETEX_WRAPPER="${wrapper_file}" +# WAR for: +# https://gasnet-bugs.lbl.gov/bugzilla/show_bug.cgi?id=4638 +export GASNET_OFI_SPAWNER=mpi +export FI_CXI_RDZV_THRESHOLD=256 + +echo "REALM_GASNETEX_WRAPPER=${REALM_GASNETEX_WRAPPER}" +echo "GASNET_OFI_SPAWNER=${GASNET_OFI_SPAWNER}" +echo "FI_CXI_RDZV_THRESHOLD=${FI_CXI_RDZV_THRESHOLD}" diff --git a/conda/gasnet_wrapper/build-gex-wrapper.sh b/conda/gasnet_wrapper/build-gex-wrapper.sh new file mode 100755 index 0000000000..3f55a2cb1e --- /dev/null +++ b/conda/gasnet_wrapper/build-gex-wrapper.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +export SCRIPT_DIR="${CONDA_PREFIX}/gex-wrapper" + +# Initialize variables with default values +conduit="ofi" # Default conduit +system_config="slingshot11" # Default system configuration + +# Help function to display usage +gex_wrapper_help() { + echo "Usage: build-gex-wrapper [-h] [-c conduit] [-s system_config]" + echo "Build the Realm GASNet-EX wrapper in your conda environment." + echo + echo "Options:" + echo " -h Display this help and exit" + echo " -c CONDUIT Specify the GASNet conduit to use (default '${conduit}')" + echo " -s SYSTEM_CONFIG Specify the system or machine-specific configuration (default '${system_config}')" + echo +} + +# Parse command-line options +while getopts ":hc:s:" opt; do + case ${opt} in + h) + gex_wrapper_help + exit 0 + ;; + c) + conduit="${OPTARG}" + ;; + s) + system_config="${OPTARG}" + ;; + \?) + echo "Invalid option: -${OPTARG}" >&2 + gex_wrapper_help + exit 1 + ;; + :) + echo "Option -${OPTARG} requires an argument." >&2 + gex_wrapper_help + exit 1 + ;; + *) + echo "Invalid option: -${OPTARG}" >&2 + exit 1 + ;; + esac +done + +# Check if CONDA_PREFIX is set +if [[ -z "${CONDA_PREFIX}" ]]; then + echo "Please activate the environment in which to build the wrapper:" + echo "\$ conda activate " + echo "" + echo "Then re-run this script:" + echo "\$ ${SHELL} ${SCRIPT_DIR}/conda/gasnet_wrapper/build-gex-wrapper.sh" + exit 1 +fi + +echo "Building GASNet-EX wrapper:" +echo " Installation directory: ${CONDA_PREFIX}/lib" +echo " Conduit: ${conduit}" +echo " System configuration: ${system_config}" + +# Proceed with the build process +cd "${SCRIPT_DIR}" || { echo "Failed to navigate to gex-wrapper directory"; exit 1; } +mkdir -p src/build +cd src/build || { echo "Failed to navigate to build directory"; exit 1; } +cmake -DLEGION_SOURCE_DIR="${SCRIPT_DIR}" -DCMAKE_INSTALL_PREFIX="${SCRIPT_DIR}" -DGASNet_CONDUIT="${conduit}" -DGASNet_SYSTEM="${system_config}" .. +cmake --build . +cmake --install . +cd .. +rm -rf build + +echo +echo "Reactivate the conda environment to set the necessary environment variables:" +echo "" +echo "\$ conda deactivate" +# shellcheck disable=SC2154 +echo "\$ conda activate ${CONDA_DEFAULT_ENV}" diff --git a/conda/gasnet_wrapper/build.sh b/conda/gasnet_wrapper/build.sh new file mode 100755 index 0000000000..355aed559c --- /dev/null +++ b/conda/gasnet_wrapper/build.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/GASNET_WRAPPER/BUILD.SH -----------------------\n" + +set -eo pipefail + +# shellcheck disable=SC2154 +mkdir "${PREFIX}/gex-wrapper" +# shellcheck disable=SC2154 +mkdir "${PREFIX}/gex-wrapper/src" +# shellcheck disable=SC2154 +cp -rv "${SRC_DIR}/cmake" "${PREFIX}/gex-wrapper" +# shellcheck disable=SC2154 +cp -rv "${RECIPE_DIR}/build-gex-wrapper.sh" "${PREFIX}/gex-wrapper" +cp -rv "${SRC_DIR}"/runtime/realm/gasnetex/gasnetex_wrapper/* "${PREFIX}/gex-wrapper/src" + +# Copy the [de]activate scripts to ${PREFIX}/etc/conda/[de]activate.d. +# This will allow them to be run on environment activation. +for CHANGE in "activate" "deactivate" +do + mkdir -p "${PREFIX}/etc/conda/${CHANGE}.d" + # shellcheck disable=SC2154 + cp "${RECIPE_DIR}/${CHANGE}.sh" "${PREFIX}/etc/conda/${CHANGE}.d/${PKG_NAME}_${CHANGE}.sh" +done diff --git a/conda/gasnet_wrapper/conda_build_config.yaml b/conda/gasnet_wrapper/conda_build_config.yaml new file mode 100644 index 0000000000..177ff4bbdd --- /dev/null +++ b/conda/gasnet_wrapper/conda_build_config.yaml @@ -0,0 +1,3 @@ +--- +version: + - 0.0.1 diff --git a/conda/gasnet_wrapper/deactivate.sh b/conda/gasnet_wrapper/deactivate.sh new file mode 100755 index 0000000000..0f88fe1666 --- /dev/null +++ b/conda/gasnet_wrapper/deactivate.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/GASNET_WRAPPER/DEACTIVATE.SH -----------------------\n" + +unset REALM_GASNETEX_WRAPPER +unset GASNET_OFI_SPAWNER +unset FI_CXI_RDZV_THRESHOLD diff --git a/conda/gasnet_wrapper/meta.yaml b/conda/gasnet_wrapper/meta.yaml new file mode 100644 index 0000000000..7a4fe7af87 --- /dev/null +++ b/conda/gasnet_wrapper/meta.yaml @@ -0,0 +1,25 @@ +--- +{% set name = "realm-gex-wrapper" %} +{% set major_version = (version|string).split(".")[0] %} +{% set minor_version = (version|string).split(".")[1] %} +{% set patch_version = (version|string).split(".")[2] %} + +package: + name: {{ name|lower }} + version: {{ version }} + +source: + git_url: https://gitlab.com/StanfordLegion/legion.git + +build: + include_recipe: false + number: 14 + skip: true # [not linux] + noarch: generic + script_env: + - PKG_NAME={{ name }} + +extra: + recipe-maintainers: + - m3vaz + - marcinz diff --git a/conda/gasnet_wrapper/post-link.sh b/conda/gasnet_wrapper/post-link.sh new file mode 100755 index 0000000000..002cee6a72 --- /dev/null +++ b/conda/gasnet_wrapper/post-link.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +cat << EOF >> "${PREFIX}/.messages.txt" + +To finish configuring the Realm GASNet-EX wrapper, activate your environment and run ${CONDA_PREFIX}/gex-wrapper/build-gex-wrapper.sh + +EOF diff --git a/conda/gasnet_wrapper/pre-unlink.sh b/conda/gasnet_wrapper/pre-unlink.sh new file mode 100755 index 0000000000..4394c69082 --- /dev/null +++ b/conda/gasnet_wrapper/pre-unlink.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/GASNET_WRAPPER/PREUNLINK.SH -----------------------\n" + +set -eo pipefail +echo "Remove the built artifacts" +# shellcheck disable=SC2154 +rm -rf "${CONDA_PREFIX}/gex-wrapper/lib*" diff --git a/conda/legate_profiler/build.sh b/conda/legate_profiler/build.sh new file mode 100755 index 0000000000..a1570179de --- /dev/null +++ b/conda/legate_profiler/build.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +echo -e "\n\n--------------------- CONDA/LEGATE-PROFILER/BUILD.SH -----------------------\n" + +set -xeo pipefail + +# Build the legate profiler +echo "Building legate-profiler..." +# shellcheck disable=SC2154 +GIT_COMMIT=$(git -C "${SRC_DIR}" rev-parse HEAD) +echo "Legion checked-out with commit: ${GIT_COMMIT}" + +# Navigate to the legate-profiler directory and build it +# shellcheck disable=SC2154 +LIBCLANG_PATH=${BUILD_PREFIX}/lib cargo install --path "${SRC_DIR}"/tools/legion_prof_rs --all-features --root "${PREFIX}" +echo "Done" diff --git a/conda/legate_profiler/conda_build_config.yaml b/conda/legate_profiler/conda_build_config.yaml new file mode 100644 index 0000000000..3453c28326 --- /dev/null +++ b/conda/legate_profiler/conda_build_config.yaml @@ -0,0 +1,7 @@ +--- +LEGION_GIT_URL: + - https://gitlab.com/StanfordLegion/legion.git +LEGION_GIT_SHALLOW: + - false +rust_min_version: + - "1.84" diff --git a/conda/legate_profiler/dummy_legate/build.sh b/conda/legate_profiler/dummy_legate/build.sh new file mode 100755 index 0000000000..5f66d21e1a --- /dev/null +++ b/conda/legate_profiler/dummy_legate/build.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +echo -e "\n\n--------------------- CONDA/LEGATE-PROFILER/DUMMY-LEGATE/BUILD.SH -----------------------\n" + +set -ex +echo "Dummy Legate build completed successfully." diff --git a/conda/legate_profiler/dummy_legate/meta.yaml b/conda/legate_profiler/dummy_legate/meta.yaml new file mode 100644 index 0000000000..eaa5e4f6c6 --- /dev/null +++ b/conda/legate_profiler/dummy_legate/meta.yaml @@ -0,0 +1,37 @@ +--- +{% set name = "legate" %} + +{% set default_env_var = '' %} +{% set legate_version = '' %} +{% if LEGATE_GIT_DESCRIBE_TAG is defined %} + {% if 'dev' in environ.get('LEGATE_GIT_DESCRIBE_TAG', default_env_var) %} + {% set legate_version = (environ.get('LEGATE_GIT_DESCRIBE_TAG') ~ environ.get('LEGATE_GIT_DESCRIBE_NUMBER', default_env_var)).lstrip('v') %} + {% else %} + {% set legate_version = environ.get('LEGATE_GIT_DESCRIBE_TAG', default_env_var).lstrip('v') %} + {% endif %} +{% else %} + {% set legate_version = '25.05.00' %} +{% endif %} + +{% set major_version = (legate_version|string).split(".")[0] %} +{% set minor_version = (legate_version|string).split(".")[1] %} +{% set patch_version = (legate_version|string).split(".")[2] %} + +{% set git_describe_hash = environ.get('GIT_DESCRIBE_HASH', '0') %} + +package: + name: {{ name|lower }} + version: {{ legate_version }} + +build: + number: 0 + skip: true # [not linux] + string: {{ git_describe_hash }} + +requirements: + build: [] + run: [] + +extra: + recipe-maintainers: + - m3vaz diff --git a/conda/legate_profiler/meta.yaml b/conda/legate_profiler/meta.yaml new file mode 100644 index 0000000000..1c113fc5fd --- /dev/null +++ b/conda/legate_profiler/meta.yaml @@ -0,0 +1,55 @@ +--- +{% set name = "legate-profiler" %} + +{% set default_env_var = '' %} +{% set legate_version = '' %} +{% if LEGATE_GIT_DESCRIBE_TAG is defined %} + {% if 'dev' in environ.get('LEGATE_GIT_DESCRIBE_TAG', default_env_var) %} + {% set legate_version = (environ.get('LEGATE_GIT_DESCRIBE_TAG') ~ environ.get('LEGATE_GIT_DESCRIBE_NUMBER', default_env_var)).lstrip('v') %} + {% else %} + {% set legate_version = environ.get('LEGATE_GIT_DESCRIBE_TAG', default_env_var).lstrip('v') %} + {% endif %} +{% else %} + {% set legate_version = '25.05.00' %} +{% endif %} + +{% set major_version = (legate_version|string).split(".")[0] %} +{% set minor_version = (legate_version|string).split(".")[1] %} +{% set patch_version = (legate_version|string).split(".")[2] %} + +{% set git_rev = environ.get('LEGION_GIT_REV', 'master') %} +{% set git_url = environ.get('LEGION_GIT_URL', 'https://gitlab.com/StanfordLegion/legion.git') %} +{% set git_shallow = environ.get('LEGION_GIT_SHALLOW', true) %} +{% set git_describe_hash = environ.get('LEGATE_GIT_DESCRIBE_HASH', '0') %} + +package: + name: {{ name|lower }} + version: {{ legate_version }} + +source: + git_url: {{ git_url }} + git_rev: {{ git_rev }} + git_shallow: {{ git_shallow }} + +build: + include_recipe: false + number: 0 + skip: true # [not linux] + string: {{ git_describe_hash }} + +requirements: + build: + - rust >={{ rust_min_version }} + - {{ compiler('c') }} + - libclang + - clang + + host: + - openssl + + run: + - legate ={{ legate_version }} + +extra: + recipe-maintainers: + - m3vaz diff --git a/conda/mpi_wrapper/activate.sh b/conda/mpi_wrapper/activate.sh new file mode 100755 index 0000000000..3b16d3580a --- /dev/null +++ b/conda/mpi_wrapper/activate.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/MPI_WRAPPER/ACTIVATE.SH -----------------------\n" + +# shellcheck disable=SC2154 +wrapper_file=$(find "${CONDA_PREFIX}/mpi-wrapper" -regex ".*/liblegate_mpi_wrapper\.\(so\|dylib\)" -print -quit) +export LEGATE_MPI_WRAPPER="${wrapper_file}" +echo "LEGATE_MPI_WRAPPER=${LEGATE_MPI_WRAPPER}" diff --git a/conda/mpi_wrapper/build-mpi-wrapper.sh b/conda/mpi_wrapper/build-mpi-wrapper.sh new file mode 100755 index 0000000000..e9b7df8886 --- /dev/null +++ b/conda/mpi_wrapper/build-mpi-wrapper.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +export SCRIPT_DIR="${CONDA_PREFIX}/mpi-wrapper" + +# Initialize variables with default values +compiler="CC" # Default compiler + +# Help function to display usage +mpi_wrapper_help() { + echo "Usage: build-mpi-wrapper [-h] [-c COMPILER]" + echo "Build the Legate MPI wrapper in your conda environment." + echo "" + echo "Options:" + echo " -h Display this help and exit" + echo " -c COMPILER Specify the compiler to use (default '${compiler}')" + echo "" +} + +# Parse command-line options +while getopts ":hc:s:" opt; do + case ${opt} in + h) + mpi_wrapper_help + exit 0 + ;; + c) + compiler="${OPTARG}" + ;; + \?) + echo "Invalid option: -${OPTARG}" >&2 + mpi_wrapper_help + exit 1 + ;; + :) + echo "Option -${OPTARG} requires an argument." >&2 + mpi_wrapper_help + exit 1 + ;; + *) + echo "Invalid option: -${OPTARG}" >&2 + exit 1 + ;; + esac +done + +# Check if CONDA_PREFIX is set +if [[ -z "${CONDA_PREFIX}" ]]; then + echo "Please activate the environment in which to build the wrapper:" + echo "\$ conda activate " + echo "" + echo "Then re-run this script:" + echo "\$ ${SHELL} ${SCRIPT_DIR}/conda/mpi-wrapper/build-mpi-wrapper.sh" + exit 1 +fi + +echo "Building Legate MPI wrapper:" +echo " Installation directory: ${CONDA_PREFIX}/lib" +echo " Compiler: ${compiler}" + +# Proceed with the build process +cd "${SCRIPT_DIR}" || { echo "Failed to navigate to mpi-wrapper directory"; exit 1; } +CXX="${compiler}" PREFIX="${SCRIPT_DIR}" ./install.bash + +echo "" +echo "Reactivate the conda environment to set the necessary environment variables:" +echo "" +echo "\$ conda deactivate" +# shellcheck disable=SC2154 +echo "\$ conda activate ${CONDA_DEFAULT_ENV}" diff --git a/conda/mpi_wrapper/build.sh b/conda/mpi_wrapper/build.sh new file mode 100755 index 0000000000..922e5967a4 --- /dev/null +++ b/conda/mpi_wrapper/build.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/MPI_WRAPPER/BUILD.SH -----------------------\n" + +set -eou pipefail + +# shellcheck disable=SC2154 +cp -rv "${SRC_DIR}/share/legate/mpi_wrapper" "${PREFIX}/mpi-wrapper" +# shellcheck disable=SC2154 +cp -rv "${RECIPE_DIR}/build-mpi-wrapper.sh" "${PREFIX}/mpi-wrapper" + +# Copy the [de]activate scripts to ${PREFIX}/etc/conda/[de]activate.d. +# This will allow them to be run on environment activation. +for CHANGE in "activate" "deactivate" +do + mkdir -p "${PREFIX}/etc/conda/${CHANGE}.d" + # shellcheck disable=SC2154 + cp "${RECIPE_DIR}/${CHANGE}.sh" "${PREFIX}/etc/conda/${CHANGE}.d/${PKG_NAME}_${CHANGE}.sh" +done diff --git a/conda/mpi_wrapper/conda_build_config.yaml b/conda/mpi_wrapper/conda_build_config.yaml new file mode 100644 index 0000000000..98f439dd1b --- /dev/null +++ b/conda/mpi_wrapper/conda_build_config.yaml @@ -0,0 +1,3 @@ +--- +version: + - 1.0 diff --git a/conda/mpi_wrapper/deactivate.sh b/conda/mpi_wrapper/deactivate.sh new file mode 100755 index 0000000000..b3ab570d45 --- /dev/null +++ b/conda/mpi_wrapper/deactivate.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/MPI_WRAPPER/DEACTIVATE.SH -----------------------\n" + +unset LEGATE_MPI_WRAPPER diff --git a/conda/mpi_wrapper/meta.yaml b/conda/mpi_wrapper/meta.yaml new file mode 100644 index 0000000000..684f87c60d --- /dev/null +++ b/conda/mpi_wrapper/meta.yaml @@ -0,0 +1,25 @@ +--- +{% set name = "legate-mpi-wrapper" %} +{% set major_version = (version|string).split(".")[0] %} +{% set minor_version = (version|string).split(".")[1] %} +{% set patch_version = (version|string).split(".")[2] %} + +package: + name: {{ name|lower }} + version: {{ version }} + +source: + git_url: ../../ + +build: + include_recipe: false + number: 17 + skip: true # [not linux] + noarch: generic + script_env: + - PKG_NAME={{ name }} + +extra: + recipe-maintainers: + - m3vaz + - marcinz diff --git a/conda/mpi_wrapper/post-link.sh b/conda/mpi_wrapper/post-link.sh new file mode 100755 index 0000000000..cb1c7ac869 --- /dev/null +++ b/conda/mpi_wrapper/post-link.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +cat << EOF >> "${PREFIX}/.messages.txt" + +To finish configuring the Legate MPI wrapper, activate your environment and run ${CONDA_PREFIX}/mpi-wrapper/build-mpi-wrapper.sh + +EOF diff --git a/conda/mpi_wrapper/pre-unlink.sh b/conda/mpi_wrapper/pre-unlink.sh new file mode 100755 index 0000000000..6fcae5b0f4 --- /dev/null +++ b/conda/mpi_wrapper/pre-unlink.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +echo -e "\n\n--------------------- CONDA/MPI_WRAPPER/PREUNLINK.SH -----------------------\n" + +set -eo pipefail +echo "Remove the built artifacts" +# shellcheck disable=SC2154 +MPI_WRAPPER_DIR="${CONDA_PREFIX}/mpi-wrapper" +rm -rf "${MPI_WRAPPER_DIR}/include" "${MPI_WRAPPER_DIR}/lib*" diff --git a/config/__init__.py b/config/__init__.py new file mode 100644 index 0000000000..0c66b09dad --- /dev/null +++ b/config/__init__.py @@ -0,0 +1,4 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES.B +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations diff --git a/config/aedifix/__init__.py b/config/aedifix/__init__.py new file mode 100644 index 0000000000..cf62e855fa --- /dev/null +++ b/config/aedifix/__init__.py @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from . import cmake, package, util +from .cmake import ( + CMAKE_VARIABLE, + CMakeBool, + CMakeExecutable, + CMakeInt, + CMakeList, + CMakePath, + CMaker, + CMakeString, +) +from .main import basic_configure +from .manager import ConfigurationManager +from .package import MainPackage, Package +from .util.argument_parser import ArgSpec, ConfigArgument + +__all__ = ( + "CMAKE_VARIABLE", + "ArgSpec", + "CMakeBool", + "CMakeExecutable", + "CMakeInt", + "CMakeList", + "CMakePath", + "CMakeString", + "CMaker", + "ConfigArgument", + "ConfigurationManager", + "MainPackage", + "Package", + "basic_configure", + "cmake", + "package", + "util", +) diff --git a/config/aedifix/base.py b/config/aedifix/base.py new file mode 100644 index 0000000000..697d79f532 --- /dev/null +++ b/config/aedifix/base.py @@ -0,0 +1,160 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from typing import TYPE_CHECKING, ParamSpec, TypeVar + +from .logger import Logger + +if TYPE_CHECKING: + from argparse import Namespace + from collections.abc import Callable, Sequence + from pathlib import Path + from subprocess import CompletedProcess + + from .logger import AlignMethod + from .manager import ConfigurationManager + +_P = ParamSpec("_P") +_T = TypeVar("_T") + + +class Configurable: + __slots__ = ("_manager",) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a Configurable. + + Parameters + ---------- + manager : ConfigurationManager + The global configuration manager which manages this configurable. + """ + self._manager = manager + + @property + def manager(self) -> ConfigurationManager: + r"""Get the configuration manager. + + Returns + ------- + manager : ConfigurationManager + The configuration manager that manages this configurable. + """ + return self._manager + + @property + def cl_args(self) -> Namespace: + r"""See `ConfigurationManager.cl_args`.""" + return self.manager.cl_args + + @property + def project_name(self) -> str: + r"""See `ConfigurationManager.project_name`.""" + return self.manager.project_name + + @property + def project_name_upper(self) -> str: + r"""See `ConfigurationManager.project_name`.""" + return self.manager.project_name_upper + + @property + def project_arch(self) -> str: + r"""See `ConfigurationManager.project_arch`.""" + return self.manager.project_arch + + @property + def project_arch_name(self) -> str: + r"""See `ConfigurationManager.project_arch_name`.""" + return self.manager.project_arch_name + + @property + def project_dir(self) -> Path: + r"""See `ConfigurationManager.project_dir`.""" + return self.manager.project_dir + + @property + def project_src_dir(self) -> Path: + r"""See `ConfigurationManager.project_src_dir`.""" + return self.manager.project_src_dir + + @property + def project_dir_name(self) -> str: + r"""See `ConfigurationManager.project_dir_name`.""" + return self.manager.project_dir_name + + @property + def project_arch_dir(self) -> Path: + r"""See `ConfigurationManager.project_arch_dir`.""" + return self.manager.project_arch_dir + + @property + def project_cmake_dir(self) -> Path: + r"""See `ConfigurationManager.project_cmake_dir`.""" + return self.manager.project_cmake_dir + + @Logger.log_passthrough + def log( + self, + msg: str | list[str] | tuple[str, ...], + *, + tee: bool = False, + caller_context: bool = True, + keep: bool = False, + ) -> None: + r"""See `ConfigurationManager.log`.""" + return self.manager.log( + msg, tee=tee, caller_context=caller_context, keep=keep + ) + + @Logger.log_passthrough + def log_divider(self, *, tee: bool = False, keep: bool = False) -> None: + r"""See `ConfigurationManager.log_divider`.""" + return self.manager.log_divider(tee=tee, keep=keep) + + @Logger.log_passthrough + def log_boxed( + self, + message: str, + *, + title: str = "", + title_style: str = "", + align: AlignMethod = "center", + ) -> None: + r"""See `ConfigurationManager.log_boxed`.""" + return self.manager.log_boxed( + message, title=title, title_style=title_style, align=align + ) + + @Logger.log_passthrough + def log_warning(self, message: str, *, title: str = "WARNING") -> None: + r"""See `ConfigurationManager.log_warning`.""" + return self.manager.log_warning(message, title=title) + + @Logger.log_passthrough + def log_execute_func( + self, fn: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs + ) -> _T: + r"""See `ConfigurationManager.log_execute_func`.""" + return self.manager.log_execute_func(fn, *args, **kwargs) + + @Logger.log_passthrough + def log_execute_command( + self, command: Sequence[_T], *, live: bool = False + ) -> CompletedProcess[str]: + r"""See `ConfigurationManager.log_execute_command`.""" + return self.manager.log_execute_command(command, live=live) + + def setup(self) -> None: + r"""Setup a `Configurable` for later configuration. By default, + does nothing. + """ + + def configure(self) -> None: + r"""Configure a `Configurable`, setting any options. By default, does + nothing. + """ + + def finalize(self) -> None: + r"""Finalize a `Configurable`. By default, does nothing.""" diff --git a/config/aedifix/cmake/__init__.py b/config/aedifix/cmake/__init__.py new file mode 100644 index 0000000000..5704267355 --- /dev/null +++ b/config/aedifix/cmake/__init__.py @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from .cmake_flags import ( + CMAKE_VARIABLE, + CMakeBool, + CMakeExecutable, + CMakeInt, + CMakeList, + CMakePath, + CMakeString, +) +from .cmaker import CMaker + +__all__ = ( + "CMAKE_VARIABLE", + "CMakeBool", + "CMakeExecutable", + "CMakeInt", + "CMakeList", + "CMakePath", + "CMakeString", + "CMaker", +) diff --git a/config/aedifix/cmake/cmake_flags.py b/config/aedifix/cmake/cmake_flags.py new file mode 100644 index 0000000000..181cece112 --- /dev/null +++ b/config/aedifix/cmake/cmake_flags.py @@ -0,0 +1,409 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import shutil +from abc import ABC, abstractmethod +from pathlib import Path +from shlex import quote as shlex_quote +from types import GeneratorType +from typing import TYPE_CHECKING, Any, Final, TypeVar + +if TYPE_CHECKING: + from collections.abc import Sequence + + +_T = TypeVar("_T") + + +class CMakeFlagBase(ABC): + __slots__ = "_name", "_prefix", "_type", "_value" + + _name: Final[str] + _prefix: Final[str] + _type: Final[str] + + def __init__( + self, + name: str, + value: _T | None = None, + prefix: str = "-D", + type_str: str = "STRING", + ) -> None: + r"""Construct a CMakeFlagBase. + + Parameters + ---------- + name : str + The name of the CMake variable. + value : Any, optional + The initial value for the variable. + prefix : str, '-D' + The command line prefix for the variable. + type_str : str, optional + The type string of the cmake variable + """ + super().__init__() + # Init these first in case derived classes want to inspect them in + # _sanitize_value() + self._name = name + self._prefix = prefix + self._type = type_str + + if value is not None: + value = self._sanitize_value(value) + + self._value = value + + @property + def name(self) -> str: + r"""Get the name of the CMake variable. + + Returns + ------- + name : str + The name of the variable, e.g. 'CMAKE_C_FLAGS'. + """ + return self._name + + @property + def prefix(self) -> str: + r"""Get the prefix of the CMake variable. + + Returns + ------- + prefix : str + The prefix of the variable, e.g. '-D'. + """ + return self._prefix + + @property + def type(self) -> str: + r"""Get the CMake type string of the variable. + + Returns + ------- + type : str + The CMake type, e.g. 'BOOL'. + """ + return self._type + + @property + def value(self) -> Any: + r"""Get the value of the CMake variable. + + Returns + ------- + value : Any + The value of the variable, e.g. 'ON'. + """ + return self._value + + @value.setter + def value(self, val: _T) -> None: + self._value = self._sanitize_value(val) + + @abstractmethod + def _sanitize_value(self, val: _T) -> Any: + r"""The callback hook for value setter, which must be overridden by + derived classes. + + Parameters + ---------- + val : Any + The value to assign to `self._value`. + + Returns + ------- + val : SomeType + The sanitized value. + + Raises + ------ + TypeError + If the input type could not be properly sanitized. + ValueError + If the input value could not be properly sanitized. + + Notes + ----- + Derived classes must return a concrete value from this function. Any + unhandled types *must* raise a TypeError, and any failure to sanitized + handled types *must* raise a ValueError. + """ + raise NotImplementedError + + def canonicalize(self) -> CMakeFlagBase | None: + r"""Canonicalize the CMake variable. + + Returns + ------- + canonical : CMakeFlagBase | None + The canonical form of the CMake variable, or None if the variable + is not canonicalizeable. + """ + valid, val = self._canonicalize_cb() + if valid: + # type(self) is critical, we want to construct the most derived + # type here. + return type(self)(self.name, val, self.prefix) + return None + + def _canonicalize_cb(self) -> tuple[bool, Any | None]: + r"""Callback to construct the canonical object. Must be implemented + by the derived class. + + Returns + ------- + valid : bool + True if `val` is a valid, canonical value for this CMake variable, + False otherwise. + val : Any + The canonical value of the variable, e.g. 'ON' or ['-O2', '-g3']. + """ + val = self.value + return (val is not None), val + + def to_command_line(self, *, quote: bool = False) -> str: + r"""Create a command line friendly representation of the CMake + variable. + + Parameters + ---------- + quote : bool, False + True if the value should be quoted (if necessary), False otherwise. + + Returns + ------- + value : str + The command line form of the variable and its values. + + Raises + ------ + ValueError + If the variable is empty + + Notes + ----- + If the variable is to be passed to `subprocess`, then `quote` should + almost certainly be false, since commands taken in list form are + automatically treated as quoted already. + """ + val = self.value + if val is None: + msg = ( + f'Cannot convert "{self.name}" to command-line, ' + "have empty value" + ) + raise ValueError(msg) + if quote: + val = shlex_quote(str(val)) + return f"{self.prefix}{self.name}:{self.type}={val}" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, type(self)): + return NotImplemented + return ( + (self.name == other.name) + and (self.prefix == other.prefix) + and (self.type == other.type) + and (self.value == other.value) + ) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(" + f"name={self.name}, " + f"prefix={self.prefix}, " + f"type={self.type}, " + f"value={self.value}" + ")" + ) + + +class CMakeList(CMakeFlagBase): + def __init__( + self, name: str, value: Sequence[Any] | None = None, prefix: str = "-D" + ) -> None: + super().__init__(name=name, value=value, prefix=prefix) + + @staticmethod + def _sanitize_value(value: Any) -> list[str]: + if isinstance(value, (list, tuple, GeneratorType)): + return list(value) + if isinstance(value, str): + return value.split(";") + raise TypeError(type(value)) + + def _canonicalize_cb(self) -> tuple[bool, list[str] | None]: + if (val := self.value) is not None: + val = [v for v in (str(x).strip() for x in val) if v] + return bool(val), val + + def to_command_line(self, *, quote: bool = False) -> str: + if (val := self.value) is None: + val = [] + val = " ".join(map(str, val)) + if quote: + val = shlex_quote(val) + return f"{self.prefix}{self.name}:{self.type}={val}" + + +class CMakeBool(CMakeFlagBase): + def __init__( + self, + name: str, + value: bool | int | str | None = None, + prefix: str = "-D", + ) -> None: + super().__init__( + name=name, value=value, prefix=prefix, type_str="BOOL" + ) + + @staticmethod + def _sanitize_value(value: Any) -> bool: + if isinstance(value, bool): + return value + + if isinstance(value, int): + if value not in {0, 1}: + msg = f"value: {value} not in [0, 1]" + raise ValueError(msg) + return bool(value) + + if isinstance(value, str): + match value.strip().casefold(): + case "off" | "false" | "no" | "f" | "0" | "": + return False + case "on" | "true" | "yes" | "t" | "1": + return True + case _: + m = f"Invalid boolean value {value}" + raise ValueError(m) + + raise TypeError(type(value)) + + def to_command_line(self, *, quote: bool = False) -> str: + val = self.value + if val is None: + msg = ( + f'Cannot convert "{self.name}" to command-line, ' + "have empty value" + ) + raise ValueError(msg) + cmake_val = "ON" if val else "OFF" + if quote: + cmake_val = shlex_quote(cmake_val) + return f"{self.prefix}{self.name}:{self.type}={cmake_val}" + + +class CMakeInt(CMakeFlagBase): + def __init__( + self, + name: str, + value: int | bool | str | float | None = None, # noqa: PYI041 + prefix: str = "-D", + ) -> None: + super().__init__(name=name, value=value, prefix=prefix) + + @staticmethod + def _sanitize_value(value: Any) -> int: + if isinstance(value, (bool, str, float, int)): + return int(value) + raise TypeError(type(value)) + + +class CMakeString(CMakeFlagBase): + def __init__( + self, name: str, value: str | None = None, prefix: str = "-D" + ) -> None: + super().__init__(name=name, value=value, prefix=prefix) + + @staticmethod + def _sanitize_value(value: Any) -> str: + if isinstance(value, str): + return value + raise TypeError(type(value)) + + +class CMakePath(CMakeFlagBase): + def __init__( + self, name: str, value: str | Path | None = None, prefix: str = "-D" + ) -> None: + super().__init__(name=name, value=value, prefix=prefix) + if self.value is not None: + self.__update_type(self.value) + + def __update_type(self, value: Path) -> None: + if value.exists(): + # We are re-assigning a Final value here, but this class cannot + # work without doing so. + self._type = ( # type: ignore[misc] + "PATH" if value.is_dir() else "FILEPATH" + ) + + def _sanitize_value(self, value: Any) -> Path | None: + if not isinstance(value, (str, Path)): + raise TypeError(type(value)) + + if isinstance(value, str) and "notfound" in value.casefold(): + return None + + value = Path(value).resolve() + self.__update_type(value) + return value + + +class CMakeExecutable(CMakeFlagBase): + def __init__( + self, name: str, value: str | Path | None = None, prefix: str = "-D" + ) -> None: + super().__init__( + name=name, value=value, prefix=prefix, type_str="FILEPATH" + ) + + @staticmethod + def _sanitize_value(value: Any) -> Path | None: + if not isinstance(value, (str, Path)): + raise TypeError(type(value)) + + if isinstance(value, str) and "notfound" in value.casefold(): + return None + + if not isinstance(value, Path): + value = Path(value) + if value.exists(): + if value.is_dir(): + msg = f"Got a directory as an executable: {value}" + raise ValueError(msg) + elif valtmp := shutil.which(value): + value = Path(valtmp) + return value + + +class _CMakeVar(str): + __slots__ = ("__cmake_type", "__cmake_type_args", "__cmake_type_kwargs") + + def _set_cmake_type( + self, + ty: type[CMakeFlagBase], + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> None: + self.__cmake_type = ty + self.__cmake_type_args = args + self.__cmake_type_kwargs = kwargs + + def __config_cmake_type__(self) -> CMakeFlagBase: + return self.__cmake_type( + self, *self.__cmake_type_args, **self.__cmake_type_kwargs + ) + + +def CMAKE_VARIABLE( + name: str, ty: type[CMakeFlagBase], *args: Any, **kwargs: Any +) -> _CMakeVar: + ret = _CMakeVar(name) + ret._set_cmake_type(ty, args, kwargs) # noqa: SLF001 + return ret diff --git a/config/aedifix/cmake/cmaker.py b/config/aedifix/cmake/cmaker.py new file mode 100644 index 0000000000..1069d80cc4 --- /dev/null +++ b/config/aedifix/cmake/cmaker.py @@ -0,0 +1,341 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import json +import shlex +from typing import TYPE_CHECKING, Any, TypedDict, TypeVar + +from ..util.exception import CMakeConfigureError, WrongOrderError +from .cmake_flags import CMakeList + +if TYPE_CHECKING: + from collections.abc import Sequence + from pathlib import Path + + from ..manager import ConfigurationManager + from ..package.main_package import DebugConfigureValue + from .cmake_flags import CMakeFlagBase + + _T = TypeVar("_T") + _CMakeFlagT = TypeVar("_CMakeFlagT", bound=CMakeFlagBase) + + +class CMakeCommandSpec(TypedDict): + CMAKE_EXECUTABLE: str + CMAKE_GENERATOR: str + SOURCE_DIR: str + BUILD_DIR: str + CMAKE_COMMANDS: list[str] + + +class CMaker: + __slots__ = ("_args",) + + def __init__(self) -> None: + r"""Construct a CMaker.""" + self._args: dict[str, CMakeFlagBase] = {} + + def register_variable( + self, manager: ConfigurationManager, var: _CMakeFlagT + ) -> None: + r"""Register a CMake variable. + + Parameters + ---------- + manager : ConfigurationManager + The manager responsible for the current configuration. + var : CMakeFlagT + The variable to register. + + Raises + ------ + ValueError + If `var` was previously registered with a different type. + """ + kind = type(var) + name = var.name + manager.log(f"Trying to register {name} as kind {kind}") + if name not in self._args: + self._args[name] = var + manager.log(f"Successfully registered {name} as kind {kind}") + return + + prev_reg = self._args[name] + if not isinstance(prev_reg, kind): + msg = ( + f"Variable {name} already registered as kind " + f"{type(prev_reg)}, cannot overwrite it!" + ) + raise ValueError(msg) # noqa: TRY004 + manager.log(f"{name} already registered as kind {kind}") + + def _ensure_registered(self, name: str) -> None: + if name not in self._args: + msg = f"No variable with name {name!r} has been registered" + raise WrongOrderError(msg) + + def set_value( + self, manager: ConfigurationManager, name: str, value: _T + ) -> None: + r"""Set a CMake variable's value. + + Parameters + ---------- + manager : ConfigurationManager + The manager responsible for the current configuration. + name : str + The name of the CMake variable. + value : T + The value to set the variable to. + + Raises + ------ + WrongOrderError + If no variable with name `name` has been registered. + """ + self._ensure_registered(name) + manager.log( + f"Setting value {name} to {value} (current: {self._args[name]})" + ) + self._args[name].value = value + + def get_value(self, manager: ConfigurationManager, name: str) -> Any: + r"""Get a CMake variable's value. + + Parameters + ---------- + manager : ConfigurationManager + The manager responsible for the current configuration. + name : str + The name of the CMake variable. + + Returns + ------- + value : Any + The value of the CMake variable. + + Raises + ------ + WrongOrderError + If no variable with name `name` has been registered. + """ + self._ensure_registered(name) + value = self._args[name].value + manager.log(f"Value for {name}: {value}") + return value + + def append_value( + self, manager: ConfigurationManager, name: str, values: Sequence[_T] + ) -> None: + r"""Append a value to a CMake list. + + Parameters + ---------- + manager : ConfigurationManager + The manager responsible for the current configuration. + name : str + The name of the CMake variable. + values : Sequence[str] + The values to append to the list. + + Raises + ------ + WrongOrderError + If no variable with name `name` has been registered. + TypeError + If the CMake variable is not a list-type. + """ + self._ensure_registered(name) + manager.log(f"Appending values {values} to {name}") + if not values: + manager.log("No values to append, bailing") + return + + cmake_var = self._args[name] + if not isinstance(cmake_var, CMakeList): + msg = f"Cannot append to {type(cmake_var)}" + raise TypeError(msg) + cur_values = cmake_var.value + # Need "was_none" since the getter/setter for cmake_var may perform a + # copy on assignment, so cmake_var.value = cur_values = [] wouldn't + # work, since cmake_var.value would not contain the same list object as + # cur_values. + if was_none := (cur_values is None): + cur_values = [] + else: + assert isinstance(cur_values, list) + + manager.log(f"Current values for {name}: {cur_values}") + cur_values.extend(values) + manager.log(f"New values for {name}: {cur_values}") + if was_none: + cmake_var.value = cur_values + + def _canonical_args(self) -> dict[str, CMakeFlagBase]: + ret = {} + for key, value in self._args.items(): + canonical = value.canonicalize() + if canonical is None: + continue + ret[key] = canonical + return ret + + @staticmethod + def _dump_cmake_command_spec( + manager: ConfigurationManager, + cmd_spec: CMakeCommandSpec, + cmd_file: Path, + ) -> None: + if cmd_file.exists(): + manager.log(f"Command file {cmd_file} already exists, loading it") + with cmd_file.open() as fd: + old_cmds = json.load(fd)["CMAKE_COMMANDS"] + + new_cmds = cmd_spec["CMAKE_COMMANDS"] + # De-deuplicate keys. Note this won't catch everything, + # i.e. --foo=1 --foo=2 won't get deduplicated, but at least the + # ordering is kept. + new_cmds[:] = list(dict.fromkeys(old_cmds + new_cmds)) + manager.log(f"Merged and de-duplicated cmake commands: {new_cmds}") + + manager.log(f"Saving configure command to {cmd_file}") + with cmd_file.open("w") as fd: + json.dump(cmd_spec, fd, sort_keys=True, indent=4) + + def _load_cmake_export_conf(self, manager: ConfigurationManager) -> None: + conf_path = manager.project_export_config_path + if not conf_path.is_file(): + m = f"CMake project failed to emit {conf_path}" + raise CMakeConfigureError(m) + + config = json.loads(manager.project_export_config_path.read_text()) + for key, value in config.items(): + if value: + self.set_value(manager, key, value) + else: + manager.log( + f"Ignoring cmake value {key} (falsey value: {value})" + ) + + def finalize( + self, + manager: ConfigurationManager, + source_dir: Path, + build_dir: Path, + extra_argv: list[str] | None = None, + ) -> None: + r"""Execute the CMake configuration. + + Parameters + ---------- + manager : ConfigurationManager + The configuration to execute. + source_dir : Path + The full path to the source directory of the project. Usually + synonymous with the project directory. + build_dir : Path + The full path to the build directory in which to invoke cmake. + extra_argv : list[str], optional + Additional verbatim commands to pass to CMake. + + Raises + ------ + CMakeConfigureError + If the CMake configuration fails. + """ + if extra_argv is None: + extra_argv = [] + + assert source_dir.exists(), "Source directory doesn't exist" + manager.log(f"Using source dir: {source_dir}") + manager.log(f"Using build dir: {build_dir}") + manager.log(f"Using extra commands: {extra_argv}") + if not build_dir.exists(): + build_dir.mkdir(parents=True) + args = self._canonical_args() + cmake_exe = args.pop("CMAKE_COMMAND").value + generator = args.pop("CMAKE_GENERATOR") + # These commands should not go in the cmake_command.txt since they + # pertain only to this precise invocation. + cmake_base_command = [ + cmake_exe, + "-S", + source_dir, + "-B", + build_dir, + generator.prefix, + generator.value, + ] + + def create_cmake_commands(*, quote: bool) -> list[str]: + # These are the commands should go in the cmake_command.txt since + # they are general for any invocation + ret = ["--log-context", "--log-level=DEBUG"] + debug_value: DebugConfigureValue = ( + manager.cl_args.debug_configure.value + ) + ret.extend(debug_value.to_flags()) + ret.extend( + ( + "-DAEDIFIX:BOOL=ON", + f"-D{manager.project_arch_name}:STRING" + f"='{manager.project_arch}'", + f"-D{manager.project_dir_name}:PATH=" + f"'{manager.project_dir}'", + f"-D{manager.project_name_upper}_CONFIGURE_OPTIONS:STRING=" + f"{shlex.join(manager._orig_argv)}", # noqa: SLF001 + ) + ) + export_vars = ";".join(arg.name for arg in self._args.values()) + + ret.extend( + ( + f"-DAEDIFIX_EXPORT_VARIABLES:STRING='{export_vars}'", + "-DAEDIFIX_EXPORT_CONFIG_PATH:FILEPATH=" + f"'{manager.project_export_config_path}'", + ) + ) + + ret.extend( + value.to_command_line(quote=quote) for value in args.values() + ) + + # mypy is confused? We massage extra_argv into a list above + assert isinstance(extra_argv, list) + ret.extend(extra_argv) + + return ret + + cmake_extra_command = create_cmake_commands(quote=False) + cmake_command = list( + map(str, cmake_base_command + cmake_extra_command) + ) + manager.log("Built CMake arguments:") + manager.log("- " + "\n- ".join(cmake_command)) + + cmd_spec: CMakeCommandSpec = { + "CMAKE_EXECUTABLE": str(cmake_exe), + "CMAKE_GENERATOR": generator.value, + "SOURCE_DIR": str(source_dir), + "BUILD_DIR": str(build_dir), + "CMAKE_COMMANDS": create_cmake_commands(quote=True), + } + + self._dump_cmake_command_spec( + manager, cmd_spec, build_dir / "aedifix_cmake_command_spec.json" + ) + + manager.log_boxed( + "This may take a few minutes", + title=f"Configuring {manager.project_name}", + ) + try: + manager.log_execute_command(cmake_command, live=True) + except Exception as e: + msg = f"CMake failed to configure {manager.project_name}" + raise CMakeConfigureError(msg) from e + + manager.log_divider(tee=True) + self._load_cmake_export_conf(manager) diff --git a/config/aedifix/config.py b/config/aedifix/config.py new file mode 100644 index 0000000000..2833fba0d1 --- /dev/null +++ b/config/aedifix/config.py @@ -0,0 +1,180 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING, Final + +from .base import Configurable +from .util.exception import UnsatisfiableConfigurationError +from .util.utility import cmake_configure_file + +if TYPE_CHECKING: + from pathlib import Path + + from .manager import ConfigurationManager + + +class ConfigFile(Configurable): + r"""A helper class to manage a set of post-configuration config variables. + These are written to disk after the configuration is complete so that other + downstream tools may inspect or use them. + + Similar to a CMakeCache.txt. + """ + + __slots__ = ( + "_cmake_configure_file", + "_config_file_template", + "_default_subst", + ) + + def __init__( + self, manager: ConfigurationManager, config_file_template: Path + ) -> None: + r"""Construct a Config. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this Config. + config_file_template : Path + The template file to read + """ + super().__init__(manager=manager) + self._config_file_template = config_file_template.resolve() + self._default_subst = {"PYTHON_EXECUTABLE": sys.executable} + + @property + def template_file(self) -> Path: + r"""Return the path to the template file. + + Returns + ------- + template_file : Path + The path to the template file, e.g. /path/to/gmakevariables.in + """ + return self._config_file_template + + @property + def project_variables_file(self) -> Path: + r"""Return the project variables file. + + Returns + ------- + variables_file : Path + The full path to the project variables file. + + Notes + ----- + The file is not guaranteed to exist, or be up to date. Usually it is + created/refreshed during finalization of this object. + """ + return self.project_arch_dir / "gmakevariables" + + def _read_entire_cmake_cache(self, cmake_cache: Path) -> dict[str, str]: + r"""Read a CMakeCache.txt and convert all of the cache values to + CMake command-line values in the form of -DNAME=VALUE. + + Parameters + ---------- + cmake_cache : Path + The path to the CMakeCache.txt. + + Returns + ------- + list[str] + A list of CMake command line arguments. + """ + + def keep_line(line: str) -> bool: + line = line.strip() + if not line: + return False + return not line.startswith(("//", "#")) + + cmake_variable_re: Final = re.compile( + r"(?P[A-Za-z_0-9\-]+):(?P[A-Z]+)\s*=\s*(?P.*)" + ) + with cmake_cache.open() as fd: + line_gen = ( + cmake_variable_re.match(line.lstrip()) + for line in filter(keep_line, fd) + ) + return {m.group("name"): m.group("value") for m in line_gen if m} + + def _make_aedifix_substitutions(self, text: str) -> dict[str, str]: + r"""Read the template file and find any aedifix-specific variable + substitutions. Return a list of CMake command line arguments with the + requested substitution value. + + Parameters + ---------- + text : str + The text of to the config file to parse. + + Returns + ------- + list[str] + The list of CMake commands. + + Raises + ------ + UnsatisfiableConfigurationError + If the substitution could not be made. + """ + + def make_subst(var: str) -> str | Path: + try: + return getattr(self.manager, var.casefold()) + except AttributeError: + pass + + try: + return self._default_subst[var] + except KeyError: + pass + + msg = f"Unknown project variable: {var!r}" + raise UnsatisfiableConfigurationError(msg) + + ret = {} + aedifix_vars = set(re.findall(r"@AEDIFIX_([^\s]+?)@", text)) + for var in aedifix_vars: + value = str(make_subst(var)) + match value.casefold(): + case "on" | "yes" | "true": + value = "1" + case "off" | "no" | "false": + value = "0" + case _: + pass + ret[f"AEDIFIX_{var}"] = value + return ret + + def finalize(self) -> None: + r"""Generate and dump project variables into the project variables + file. + + Raises + ------ + UnsatisfiableConfigurationError + If the user config file contains an unknown AEDIFIX substitution. + """ + project_file = self.project_variables_file + template_file = self._config_file_template + self.log(f"Using project file: {project_file}") + self.log(f"Using template file: {template_file}") + + cache_vars = self.log_execute_func( + self._read_entire_cmake_cache, + self.project_cmake_dir / "CMakeCache.txt", + ) + aedifix_vars = self.log_execute_func( + self._make_aedifix_substitutions, template_file.read_text() + ) + defs = cache_vars | aedifix_vars + cmake_configure_file(self, template_file, project_file, defs) + self.log(f"Wrote to project file: {project_file}") diff --git a/config/aedifix/logger.py b/config/aedifix/logger.py new file mode 100644 index 0000000000..a6f565c1ad --- /dev/null +++ b/config/aedifix/logger.py @@ -0,0 +1,512 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES.B +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import sys +import shutil +import logging +import textwrap +from collections import deque +from pathlib import Path +from typing import TYPE_CHECKING, Any, ClassVar, TypeVar + +# This must be the ONLY place that rich is imported to ensure that this error +# message is seen when running configure on a system where it is not yet +# installed. +try: + import rich # noqa: F401 +except ModuleNotFoundError as mnfe: + msg = "Please run 'python3 -m pip install rich' to continue" + raise RuntimeError(msg) from mnfe + +import contextlib + +from rich.align import Align, AlignMethod +from rich.console import Console, RenderableType +from rich.live import Live +from rich.panel import Panel +from rich.rule import Rule +from rich.table import Table +from typing_extensions import Self + +if TYPE_CHECKING: + from collections.abc import Sequence + +_T = TypeVar("_T") + + +class Logger: + __slots__ = ( + "_console", + "_file_logger", + "_live", + "_live_raii", + "_row_data", + "_table", + ) + __unique_id: ClassVar = 0 + + def __init__(self, path: Path, max_live_lines: int = 40) -> None: + r"""Construct a Logger. + + Parameters + ---------- + path : Path + The path at which to create the on-disk log. + max_live_lines : 40 + The maximum number of live output lines to keep. + """ + + def make_name(base_name: str) -> str: + env_var = "__AEDIFIX_TESTING_DO_NOT_USE_OR_YOU_WILL_BE_FIRED__" + if os.environ.get(env_var, "") == "1": + from random import random + + assert "pytest" in sys.modules, ( + "Attempting to randomize the logger names outside of " + f"testing! The variable is called '{env_var}' for a " + "reason!" + ) + base_name += f"_{int(random() * 100_000)}_{Logger.__unique_id}" + Logger.__unique_id += 1 + return base_name + + self._file_logger = self._create_logger( + make_name("file_configure"), + logging.FileHandler, + path.resolve(), + mode="w", + delay=True, + ) + + self._row_data: deque[tuple[RenderableType, bool]] = deque( + maxlen=max_live_lines + ) + self._console = Console() + self._table = self._make_table(self._row_data) + self._live = Live( + self._table, console=self.console, auto_refresh=False + ) + self._live_raii: Live | None = None + + orig_hook = sys.breakpointhook + + def bphook(*args: Any, **kwargs: Any) -> Any: + self._live.stop() + return orig_hook(*args, **kwargs) + + sys.breakpointhook = bphook + + @staticmethod + def _make_table(row_data: deque[tuple[RenderableType, bool]]) -> Table: + table = Table.grid(expand=True) + table.highlight = True + for data, _ in row_data: + table.add_row(data) + return table + + def __enter__(self) -> Self: + self._live_raii = self._live.__enter__() + return self + + def __exit__(self, *args: object) -> None: + self.flush() + self._live.__exit__(*args) # type: ignore[arg-type] + self._live_raii = None + + @property + def console(self) -> Console: + r"""Get the current active Console. + + Returns + ------- + Console + The current active console. + """ + return self._console + + @property + def file_path(self) -> Path: + r"""Retrieve the path to the file handler log file. + + Returns + ------- + file_path : Path + The path to the file handler log file, e.g. + '/path/to/configure.log'. + """ + handlers = self._file_logger.handlers + assert len(handlers) == 1, f"Multiple file handlers: {handlers}" + assert isinstance(handlers[0], logging.FileHandler) + return Path(handlers[0].baseFilename) + + @staticmethod + def log_passthrough(func: _T) -> _T: + r"""A decorator to signify that `func` should never appear in the log + context. + + Parameters + ---------- + func : T + The function. + + Returns + ------- + func : T + `func` unchanged. + + Notes + ----- + The logger usually prints the name of the calling function as the + prefix of the logged message. It does this by walking up the call-stack + until it finds an appropriate name to print. This decorator marks the + decorated functions as ignored in this stack walk, i.e. the logger + skips that function and keeps walking up the stack. + + This decorator is useful for defining "pass-through" function (hence + the name), whose only job is to accept some arguments and forward them + on to the next one. Such functions are effectively syntactic sugar, and + hence should not count as the origin of the logged message. Consider + for example: + + def foo(): + manager.log("hello") + + @Logger.log_passthrough + def bar(): + manager.log("there") + + def baz(): + bar() + + >>> foo() + .foo: 'hello' + >>> baz() + .baz: 'there' + + Note how bar() (the true originator of the logging call) is ignored, + and baz is printed instead. + """ + func.__config_log_ignore___ = True # type: ignore[attr-defined] + return func + + @staticmethod + def _create_logger( + name: str, HandlerType: type, *args: Any, **kwargs: Any + ) -> logging.Logger: + logger = logging.getLogger(name) + logger.setLevel(logging.INFO) + handler = HandlerType(*args, **kwargs) + handler.setLevel(logger.level) + formatter = logging.Formatter("%(message)s") + handler.setFormatter(formatter) + logger.addHandler(handler) + return logger + + def build_multiline_message( + self, + sup_title: str, + text: str, + divider_char: str = "-", + length: int | None = None, + prefix: str = " " * 2, + **kwargs: Any, + ) -> str: + r"""Construct a properly formatted multiline message. + + Parameters + ---------- + sup_tile : str + The super title for the message, e.g. 'WARNING'. + text : str + The body of the message. + divider_char : str, "-" + The char to form the divider between `sup_title` and `text`, if + any. Must be a single character. + length : int, optional + The maximum width of the message. + prefix : str, ' ' + The prefix to add to the beginning of each wrapped line of the + resultant message. + **kwargs : Any + Additional keyword arguments to `textwrap.wrap()`. + + Returns + ------- + message : str + The formatted multiline message. + """ + if length is None: + length = self.console.width - 1 + + kwargs.setdefault("break_on_hyphens", False) + kwargs.setdefault("break_long_words", False) + kwargs.setdefault("width", length - 2) + kwargs.setdefault("initial_indent", prefix) + kwargs.setdefault("subsequent_indent", prefix) + kwargs.setdefault("drop_whitespace", False) + + def center_line(line: str) -> str: + return line.center(length).rstrip() + + def add_dummy_space(line: str) -> str: + # The extra " " returned is so that newlines ("\n\n") in text are + # respected. splitlines() returns an empty string ("") for them: + # + # >>> x = "foo\n\nbar".splitlines() + # ["foo", "", "bar"] + # + # (which is what we want), but textwrap.wrap() will simply discard + # these empty strings: + # + # >>> textwrap.wrap(x) + # [] + # + # when instead we want it to return [""]. If we return " " (and set + # drop_whitespace=False), then textwrap.wrap() returns [" "] as + # expected. + # + # The extra whitespace is then taken care of in line.rstrip() later + # in the list comprehension (which leaves the prefix intact). + return line if line else " " + + wrapped = [ + line.rstrip() + for para in text.splitlines() + for line in textwrap.wrap( + add_dummy_space(textwrap.dedent(para)), **kwargs + ) + ] + if len(wrapped) == 1: + # center-justify single lines, and remove the bogus prefix + wrapped[0] = center_line(wrapped[0].lstrip()) + if divider_char and sup_title: + # add the divider if we are making a message like + # + # ===================== + # BIG SCARY TITLE + # --------------------- <- divider_char is '-' + # foo bar + assert len(divider_char) == 1 + wrapped.insert(0, divider_char * length) + if sup_title: + # add the super title if we are making a message like + # + # ===================== + # BIG SCARY TITLE <- sup_title is 'BIG SCARY TITLE' + # --------------------- + # foo bar + # add the banner + wrapped.insert(0, center_line(str(sup_title))) + return "\n".join(wrapped) + + def flush(self) -> None: + r"""Flush any pending log writes to disk or screen.""" + for handler in self._file_logger.handlers: + with contextlib.suppress(AttributeError): + handler.flush() + self._live.refresh() + + def _append_live_message( + self, mess: RenderableType, *, keep: bool + ) -> None: + r"""Append a new message to the live data stream. + + Parameters + ---------- + mess : RenderableType + The message to append. + keep : bool + True if the message should persist, False otherwise. If the + message persists, then appending new data (which might otherwise + bump the message off the queue) will instead bump the next + available message off. + + """ + row_data = self._row_data + assert row_data.maxlen is not None # mypy + if len(row_data) >= (row_data.maxlen - 1): + for idx, (_, data_keep) in enumerate(row_data): + if not data_keep: + del row_data[idx] + break + else: + msg = ( + "Could not prune row data, every entry was marked as " + "persistent" + ) + raise ValueError(msg) + row_data.append((mess, keep)) + + def log_screen( + self, + mess: ( + RenderableType | list[RenderableType] | tuple[RenderableType, ...] + ), + *, + keep: bool = False, + ) -> None: + r"""Log a message to the screen. + + Parameters + ---------- + mess : RenderableType | + list[RenderableType] | + tuple[RenderableType, ...] + The message(s) to print to screen. + keep : bool, False + Whether to keep the message permanently in live output. + """ + if not self._live.is_started: + with self: + self.log_screen(mess, keep=keep) + return + + def do_log(message: RenderableType, *, keep: bool) -> None: + self._append_live_message(message, keep=keep) + self._table = self._make_table(self._row_data) + self._live.update(self._table, refresh=True) + + match mess: + case list() | tuple(): + for m in mess: + do_log(m, keep=keep) + case _: + do_log(mess, keep=keep) + + def log_file(self, message: str | Sequence[str]) -> None: + r"""Log a message to the log file. + + Parameters + ---------- + message : str | Sequence[str] + The message, or sequence of lines to log to file. + """ + if not isinstance(message, str): + message = "\n".join(message) + self._file_logger.log(self._file_logger.level, message) + + def _log_boxed_file(self, message: str, title: str) -> None: + file_msg = self.build_multiline_message(title, message) + self.log_divider(tee=False) + self.log_file(file_msg) + self.log_divider(tee=False) + + def _log_boxed_screen( + self, message: str, title: str, style: str, align: AlignMethod + ) -> None: + def fixup_title(title: str, style: str) -> str: + if not title: + return title + + if style: + if not style.startswith("["): + style = "[" + style + if not style.endswith("]"): + style += "]" + return f"[bold]{style}{title}[/]" + + title = fixup_title(title, style) + rich_txt = self.console.render_str(message) + screen_message = Panel( + Align(rich_txt, align=align), style=style, title=title + ) + self.log_screen(screen_message, keep=True) + + def log_boxed( + self, + message: str, + *, + title: str = "", + title_style: str = "", + align: AlignMethod = "center", + ) -> None: + r"""Log a message surrounded by a box. + + Parameters + ---------- + message : str + The message to log. + title : str, '' + An optional title for the box. + title_style : str, '' + Optional additional styling for the title. + align : AlignMethod, 'center' + How to align the text. + """ + self._log_boxed_file(message, title) + self._log_boxed_screen(message, title, title_style, align) + + def log_warning(self, message: str, *, title: str = "WARNING") -> None: + r"""Log a warning to the log. + + Parameters + ---------- + message : str + The message to print. + title : str, 'WARNING' + The title to use for the box. + """ + self.log_boxed( + message, + title=f"***** {title.strip()} *****", + title_style="bold yellow", + ) + + def log_error(self, message: str, *, title: str = "WARNING") -> None: + r"""Log a warning to the log. + + Parameters + ---------- + message : str + The message to print. + title : str, 'WARNING' + The title to use for the box. + """ + self.log_boxed( + message, + title=f"***** {title.strip()} *****", + title_style="bold red", + ) + + def log_divider(self, *, tee: bool = False, keep: bool = True) -> None: + r"""Append a dividing line to the logs. + + Parameters + ---------- + tee : bool, False + If True, output is printed to screen in addition to being appended + to the on-disk log file. If False, output is only written to disk. + keep : bool, True + If ``tee`` is True, whether to persist the message in terminal + output. + """ + self.log_file("=" * (self.console.width - 1)) + if tee: + self.log_screen(Rule(), keep=keep) + + def copy_log(self, dest: Path) -> Path: + r"""Copy the file log to another location. + + Parameters + ---------- + dest : Path + The destination to copy the log to. + + Returns + ------- + dest : Path + The destination path. + """ + dest = dest.resolve() + src = self.file_path + if src == dest: + self.log_file( + f"Destination log path ({dest}) same as source, not copying!" + ) + return dest + self.log_file(f"Copying file log from {src} to {dest}") + self.flush() + return Path(shutil.copy2(src, dest)) diff --git a/config/aedifix/main.py b/config/aedifix/main.py new file mode 100644 index 0000000000..eca792beab --- /dev/null +++ b/config/aedifix/main.py @@ -0,0 +1,149 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +import traceback +from argparse import ArgumentError +from contextlib import suppress +from typing import TYPE_CHECKING, Final + +from .manager import ConfigurationManager +from .package.main_package import ON_ERROR_DEBUGGER_FLAG +from .util.exception import ( + CMakeConfigureError, + UnsatisfiableConfigurationError, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + from pathlib import Path + + from .package.main_package import MainPackage + + +SUCCESS: Final = 0 +FAILURE: Final = 1 + + +def _handle_generic_error( + config: ConfigurationManager, message: str, title: str +) -> None: + try: + config.log_divider() + config.log_error(message, title=title) + config.log_divider() + except Exception as e: + print( # noqa: T201 + "Error printing error message from exception or " + "printing the traceback:", + str(e), + flush=True, + ) + print(title, flush=True) # noqa: T201 + print(message, flush=True) # noqa: T201 + + +def _handle_exception( + config: ConfigurationManager, title: str, excn_obj: Exception +) -> None: + trace = "".join(traceback.format_exception(excn_obj, chain=True)) + excn_str = str(excn_obj) + if not excn_str: + excn_str = "[No Error Message Provided]" + + log_path: str | Path + try: + log_path = config._logger.file_path # noqa: SLF001 + except Exception: + log_path = "configure.log" + + excn_str += f", please see {log_path} for additional details." + try: + config.log(trace) + _handle_generic_error(config, excn_str, title) + except Exception as e: + print( # noqa: T201 + "Error printing error message from exception or " + "printing the traceback:", + str(e), + flush=True, + ) + print(trace, flush=True) # noqa: T201 + + +def _basic_configure_impl( + argv: Sequence[str], MainPackageType: type[MainPackage] +) -> int: + try: + import ipdb as py_db # type: ignore[import, unused-ignore] # noqa: T100 + except ModuleNotFoundError: + import pdb as py_db # noqa: T100 + + post_mortem = any(ON_ERROR_DEBUGGER_FLAG in arg for arg in argv) + excn: Exception | None = None + # If the following throws, then something is seriously beansed. Better to + # eschew pretty-printing and just allow the entire exception to be printed. + config = ConfigurationManager(argv, MainPackageType) + try: + try: + config.main() + except: + if post_mortem: + py_db.post_mortem() + raise + except UnsatisfiableConfigurationError as e: + title = "Configuration is not satisfiable" + excn = e + except CMakeConfigureError as e: + title = "CMake configuration failed" + excn = e + except KeyboardInterrupt: + _handle_generic_error( + config, + message="Configuration was aborted by the user (received SIGINT)", + title="Configuration Aborted", + ) + return FAILURE + except ArgumentError as e: + title = "Invalid Option" + excn = e + except Exception as e: + title = "CONFIGURATION CRASH" + excn = e + + if excn is not None: + _handle_exception(config, title, excn) + return FAILURE + return SUCCESS + + +def basic_configure( + argv: Sequence[str], MainPackageType: type[MainPackage] +) -> int: + r"""Run a basic configuration. + + Parameters + ---------- + argv : Sequence[str] + The command line arguments to configure with. + MainPackageType : type[MainPackage] + The type of the main package for which to configure. + + Returns + ------- + ret : int + The return code to return to the calling shell. On success, returns + `SUCCESS`, on failure, returns `FAILURE`. + """ + try: + return _basic_configure_impl(argv, MainPackageType) + finally: + # Flush both streams on end. This is needed because if there is an + # error in CI, the internal buffering won't properly flush the error + # message and we get garbled output. + with suppress(Exception): + sys.stdout.flush() + with suppress(Exception): + sys.stderr.flush() diff --git a/config/aedifix/manager.py b/config/aedifix/manager.py new file mode 100644 index 0000000000..1950aa9692 --- /dev/null +++ b/config/aedifix/manager.py @@ -0,0 +1,1017 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import sys +import time +import shutil +import inspect +import platform +import textwrap +from argparse import ( + SUPPRESS as ARGPARSE_SUPPRESS, + ArgumentDefaultsHelpFormatter, + ArgumentParser, + Namespace, + RawDescriptionHelpFormatter, +) +from collections import defaultdict +from graphlib import TopologicalSorter +from pathlib import Path +from typing import TYPE_CHECKING, Any, Final, ParamSpec, TypeVar + +from .cmake.cmaker import CMaker +from .config import ConfigFile +from .logger import Logger +from .package import packages +from .reconfigure import Reconfigure +from .util.argument_parser import ConfigArgument +from .util.callables import classify_callable, get_calling_function +from .util.cl_arg import CLArg +from .util.exception import ( + CommandError, + UnsatisfiableConfigurationError, + WrongOrderError, +) +from .util.utility import ( + ValueProvenance, + dest_to_flag, + partition_argv, + subprocess_capture_output_live, +) + +if TYPE_CHECKING: + from collections.abc import Callable, Iterator, Sequence + from subprocess import CompletedProcess + + from .cmake.cmake_flags import CMakeFlagBase + from .logger import AlignMethod + from .package.main_package import MainPackage + from .package.package import Package + +_P = ParamSpec("_P") +_T = TypeVar("_T") + + +class ConfigurationManager: + r"""The god-object for a particular configuration. Holds and manages all + related objects for a run. + """ + + __slots__ = ( + "_aedifix_root_dir", + "_argv", + "_cl_args", + "_cmaker", + "_config", + "_ephemeral_args", + "_extra_argv", + "_logger", + "_main_package", + "_module_map", + "_modules", + "_orig_argv", + "_reconfigure", + "_topo_sorter", + ) + + def __init__( + self, argv: Sequence[str], MainModuleType: type[MainPackage] + ) -> None: + r"""Construct a `ConfigurationManager`. + + Parameters + ---------- + argv : Sequence[str] + The command-line arguments to parse from + MainModuleType : type[MainPackage] + The type of the main module for which this manager must produce + a configuration. + """ + os.environ["AEDIFIX"] = "1" + # points to the directory containing the "aedifix" install + self._aedifix_root_dir: Final = Path(__file__).resolve().parent.parent + main_package = MainModuleType.from_argv(self, argv) + self._cl_args: Namespace | None = None + self._orig_argv = tuple(argv) + main_argv, extra_argv = partition_argv(self._orig_argv) + self._argv = tuple(main_argv) + self._extra_argv = extra_argv + self._main_package = main_package + self._modules: list[Package] = [main_package] + self._logger = Logger(self.project_dir / "configure.log") + self._cmaker = CMaker() + self._config = ConfigFile( + manager=self, + config_file_template=main_package.project_configure_file_template, + ) + self._reconfigure = Reconfigure(manager=self) + self._ephemeral_args: set[str] = set() + + # Private methods + def _setup_log(self) -> None: + r"""Output just the ~bear necessities~.""" + self.log_boxed( + f"Configuring {self.project_name} to compile on your system" + ) + self.log_divider() + self.log( + "Starting configure run at " + f"{time.strftime('%a, %d %b %Y %H:%M:%S %z')}", + caller_context=False, + ) + self.log( + f"Configure Options: {' '.join(self.argv)}", caller_context=False + ) + self.log(f"Working directory: {Path.cwd()}", caller_context=False) + self.log( + f"Machine platform:\n{platform.uname()}", caller_context=False + ) + self.log(f"Python version:\n{sys.version}", caller_context=False) + self.log_divider() + self.log( + "Environment Variables:\n" + + "\n".join( + f"{key} = {val}" for key, val in sorted(os.environ.items()) + ) + ) + self.log_divider() + + def _log_git_info(self) -> None: + r"""Log information about the current commit and branch of the + repository. + """ + git_exe = shutil.which("git") + if git_exe is None: + self.log( + "'git' command not found, likely not a development repository" + ) + return + + try: + branch = self.log_execute_command( + [git_exe, "branch", "--show-current"] + ).stdout.strip() + except CommandError as ce: + NOT_A_GIT_REPO_ERROR = 128 + if ce.return_code == NOT_A_GIT_REPO_ERROR: + self.log( + "git branch --show-current returned exit code " + f"{NOT_A_GIT_REPO_ERROR}. Current directory is not a git " + "repository." + ) + # Silently gobble this error, it's mostly just informational + return + + if not branch: + # Per git branch --help: 'In detached HEAD state, nothing is + # printed.' + branch = "" + + try: + commit = self.log_execute_command( + [git_exe, "rev-parse", "HEAD"] + ).stdout.strip() + except CommandError: + return + + self.log(f"Git branch: {branch}") + self.log(f"Git commit: {commit}") + + def _parse_args(self, argv: Sequence[str]) -> Namespace: + r"""Parse arguments as specified in arg list. + + Parameters + ---------- + argv: Sequence[str] + The command line arguments to parse. + + Returns + ------- + args : Namespace + The parsed arguments. + + Notes + ----- + Each argument in the returned object lists both the value, and whether + the argument was explicitly set by the user. This allows detecting + whether the value is as a result of a default, or whether the user + specifically set the value. + """ + + class CustomFormatter( + ArgumentDefaultsHelpFormatter, RawDescriptionHelpFormatter + ): + pass + + descr = f"""\ + Configure {self.project_name}. + + On success, a directory {self.project_dir}/{{project arch name}} will + be created, containing the configured build. + + Options listed below are handled directly by configure. Any options + following a '--' are passed verbatim to CMake. For example: + + $ ./configure --with-cxx clang++ -- -DCMAKE_C_COMPILER='gcc' + + will set the C++ compiler to 'clang++' and the C compiler to 'gcc'. + However, such manual intervention is rarely needed, and serves only + as an escape hatch for as-of-yet unsupported arguments. + """ + descr = textwrap.dedent(descr) + + parser = ArgumentParser( + usage="%(prog)s [options...] [-- raw cmake options...]", + description=descr, + formatter_class=CustomFormatter, + # This may lead to confusing errors. E.g. + # + # ./configure --cuda --something-else + # + # would result in "argument --cuda-arch: expected one argument" + # because --cuda would disambiguate to --cuda-arch. This would be + # confusing to the user because clearly they never passed + # "--cuda-arch" as a flag. + allow_abbrev=False, + # We want to catch this as an exception so we can properly log it. + exit_on_error=False, + ) + + for conf_obj in self._modules: + self.log_execute_func(conf_obj.add_options, parser) + + # Parse the arguments normally, this will populate the values from the + # defaults + if "-h" in argv or "--help" in argv: + self.log("", tee=True, caller_context=False) # to undo scrolling + + full_args = parser.parse_args(argv) + # Create a dummy parser which will determine whether a particular value + # was passed on the command line. This is done by re-adding all of the + # arguments in the parsed args, but making the default + # value "suppressed". + # + # If an arguments default is suppressed, then if that argument does NOT + # appear in the command line, then it is NOT added to the resultant + # namespace. Thus any argument that exists in both the full args and + # the suppressed args implies that argument was set by the user. + suppress_parser = ArgumentParser( + argument_default=ARGPARSE_SUPPRESS, add_help=False + ) + for action in parser._actions: # noqa: SLF001 + suppress_parser.add_argument( + *action.option_strings, dest=action.dest, nargs="*" + ) + cli_args, _ = suppress_parser.parse_known_args(argv) + + args = Namespace() + for name, value in vars(full_args).items(): + setattr( + args, + name, + CLArg(name=name, value=value, cl_set=hasattr(cli_args, name)), + ) + return args + + def _setup_environ(self) -> None: + r"""Sets up the environment. Among other things, properly injects + the values of PROJECT_ARCH and PROJECT_DIR if unset. + """ + arch_name = self.project_arch_name + arch_value = self.project_arch + environ = os.environ + match self._main_package.arch_value_provenance: + case ValueProvenance.COMMAND_LINE: + if arch_name in environ: + if (env_val := environ[arch_name]) != arch_value: + self.log_warning( + f"Ignoring environment variable " + f'{arch_name}="{env_val}". Using command-line ' + f'value "{arch_value}" instead.' + ) + else: + self.log( + f'Using {arch_name} from command-line: "{arch_value}"' + ) + case ValueProvenance.ENVIRONMENT: + assert arch_name in environ, ( + f"Arch provenance was environment, but {arch_name} not " + "found in os.environ" + ) + self.log( + f'{arch_name} found in environment: "{environ[arch_name]}"' + ) + case ValueProvenance.GENERATED: + self.log(f"{arch_name} was generated") + + self.log( + f"Setting environment value for {arch_name}, new value: " + f"{arch_value}" + ) + environ[arch_name] = arch_value + dir_name = self.project_dir_name + dir_value = self.project_dir + self.log( + f"Setting environment value for {dir_name}, new value: {dir_value}" + ) + environ[dir_name] = str(dir_value) + + def _setup_arch_dir(self) -> None: + r"""Ensure the creation and validity of the project arch directory. + + Raises + ------ + RuntimeError + If the arch directory exists but is not a directory. + """ + arch_dir = self.project_arch_dir + proj_name = self.project_name + with_clean = self.cl_args.with_clean + with_clean_val = with_clean.value + if with_clean_val: + self.log_warning( + f"{dest_to_flag(with_clean.name)} specified, deleting " + f"contents of {arch_dir}!" + ) + + if arch_dir.exists(): + self.log(f"{proj_name} arch exists: {arch_dir}") + if not arch_dir.is_dir(): + msg = ( + f"{proj_name} arch directory " + f"{arch_dir} already exists, but is not a " + "directory. Please delete move or delete this file " + "before re-running configure!" + ) + raise RuntimeError(msg) + if not with_clean_val: + reconfigure_file = self._reconfigure.reconfigure_file + if Path(sys.argv[0]).resolve() == reconfigure_file: + # The user is following our advice below and reconfiguring, + # so it's OK if the arch already exists. + self.log("User is reconfiguring, so no need to error out") + return + + cmake_cache = self.project_cmake_dir / "CMakeCache.txt" + if not cmake_cache.exists(): + # The cmake cache file doesn't exist, this would indicate + # that this is a new configuration. There is no need to + # error out because there are no prior effects that cmake + # could see. + self.log( + "CMake cache does not exist, so no need to error out " + "because for all intents and purposes, this is a " + "brand new configuration for CMake" + ) + return + + force = self.cl_args.force + if force.value: + self.log( + "User is forcing configuration, ignoring existing " + "arch dir" + ) + return + + msg = ( + f"{proj_name} arch directory {arch_dir} already exists and" + " would be overwritten by this configure command. If you:" + "\n" + "\n" + " 1. Meant to update an existing configuration, use " + f'{reconfigure_file.name} in place of "configure".' + "\n" + " 2. Meant to create a new configuration, re-run the " + "current configure command with " + f"--{self.project_arch_name}='some-other-name'." + "\n" + f" 3. Meant to redo the current arch ({arch_dir.name!r}) " + "from scratch, re-run configure with " + f"{dest_to_flag(with_clean.name)} option." + "\n" + " 4. Know what you are doing, and just want configure to" + " do as it is told, re-run the current configure command " + f"with {dest_to_flag(force.name)}" + "\n\n" + ) + raise UnsatisfiableConfigurationError(msg) + + self.log("Deleting arch directory, then recreating it") + proj_dir = self.project_dir + if arch_dir == proj_dir or arch_dir in proj_dir.parents: + # yes, this happened to me :( + msg = ( + f"Arch dir {arch_dir} is either a sub-path of or is the " + f"same as the project dir ({proj_dir}). Deleting the arch " + "dir would be catastrophic, probably this is a mistake!" + ) + raise RuntimeError(msg) + + self.log_execute_func(self._reconfigure.backup_reconfigure_script) + shutil.rmtree(arch_dir) + self.log(f"{proj_name} arch does not exist, creating {arch_dir}") + arch_dir.mkdir(parents=True) + self.log(f"Successfully setup arch directory: {arch_dir}") + + def _setup_dependencies(self) -> None: + r"""Setup the package dependency tree. + + Notes + ----- + This function is the only place where packages may declare + dependencies. After this function returns, self._modules is sorted in + topological order based on the requirements dictated by the packages. + """ + self._module_map = { + type(conf_obj): idx for idx, conf_obj in enumerate(self._modules) + } + assert len(self._module_map) == len(self._modules), ( + "Duplicate modules!" + ) + + # pre-populate the topologicalsorter so that modules which are never + # "required" are properly encoded with no dependencies + self._topo_sorter = TopologicalSorter( + {conf_obj: {} for conf_obj in self._modules} + ) + + for conf_obj in self._modules: + self.log_execute_func(conf_obj.declare_dependencies) + + # need to regen self._modules, but also reorder the module map since + # modules may have changed order + self._modules = [] + for idx, conf_obj in enumerate(self._topo_sorter.static_order()): + self._modules.append(conf_obj) + self._module_map[type(conf_obj)] = idx + + del self._topo_sorter + + def _get_package(self, req_package: type[Package]) -> Package: + try: + ret_idx = self._module_map[req_package] + except KeyError as ke: + raise ModuleNotFoundError(req_package) from ke + + return self._modules[ret_idx] # should should never fail + + def _emit_summary(self) -> None: + def gen_summary() -> Iterator[str]: + summary = defaultdict(list) + summary[self._main_package.name].append( + self.log_execute_func(self._main_package.summarize_main) + ) + for conf_obj in self._modules: + if ret := self.log_execute_func(conf_obj.summarize): + summary[conf_obj.name].append(ret) + + for val_list in summary.values(): + yield from val_list + + summary = "\n".join(gen_summary()) + self.log_boxed(summary, title="Configuration Summary", align="left") + install_mess = [ + "Please set the following:", + "", + f"export {self.project_arch_name}='{self.project_arch}'", + f"export {self.project_dir_name}='{self.project_dir}'", + "", + "Then build libraries:", + "$ make", + ] + + from .package.packages.python import Python + + if self._get_package(Python).state.enabled(): + install_mess.extend( + ("And install Python bindings:", "$ pip install .") + ) + self.log_boxed( + "\n".join(install_mess), + title="Configuration Complete", + align="left", + ) + + # Member variable access + @property + def argv(self) -> tuple[str, ...]: + r"""Get the unparsed command-line arguments. + + Returns + ------- + args : tuple[str, ...] + The unparsed command-line arguments. + """ + return self._argv + + @property + def cl_args(self) -> Namespace: + r"""Get the parsed command-line arguments. + + Returns + ------- + args : Namespace + The parsed command-line arguments. + + Raises + ------ + WrongOrderError + If the attribute is retrieved before + `ConfigurationManager.setup()` is called. + """ + if self._cl_args is None: + msg = "Must call setup() first" + raise WrongOrderError(msg) + return self._cl_args + + @property + def project_name(self) -> str: + r"""Get the name of the current main project. + + Returns + ------- + name : str + The name of the current main project, e.g. "Legate". + """ + return self._main_package.name + + @property + def project_name_upper(self) -> str: + r"""Get the name of the current main project in all caps, + suitable for use as a variable. + + Returns + ------- + name : str + The name of the current main project, e.g. "LEGATE". + """ + return self.project_name.replace(" ", "_").upper() + + @property + def project_arch(self) -> str: + r"""Get the current main project arch. + + Returns + ------- + arch : str + The arch name of the current main project, e.g. "arch-darwin-debug". + """ + return self._main_package.arch_value + + @property + def project_arch_name(self) -> str: + r"""Get the current main project arch flag name. + + Returns + ------- + flag_name : str + The name of the current main project arch flag, + e.g. "LEGATE_ARCH". + """ + return self._main_package.arch_name + + @property + def project_dir(self) -> Path: + r"""Get the current main project root directory. + + Returns + ------- + dir : Path + The full path to the current project root directory, e.g. + `/path/to/legate`. + """ + return self._main_package.project_dir_value + + @property + def project_src_dir(self) -> Path: + r"""Get the current main project source directory. + + Returns + ------- + dir : Path + The full path to the current project source directory, e.g. + `/path/to/legate/src`. + """ + return self._main_package.project_src_dir + + @property + def project_dir_name(self) -> str: + r"""Get the name of the current main project root directory. + + Returns + ------- + dir_name : Path + The name of the current project root directory, + e.g. "LEGATE_DIR". + """ + return self._main_package.project_dir_name + + @property + def project_arch_dir(self) -> Path: + r"""Get the the current main project arch directory. + + Returns + ------- + arch_dir : Path + The full path to the current project arch directory, + e.g. `/path/to/legate/arch-darwin-debug`. + """ + return self.project_dir / self.project_arch + + @property + def project_cmake_dir(self) -> Path: + r"""Get the projects current cmake directory. + + Returns + ------- + cmake_dir : Path + The full path to the current project cmake directory. + e.g. `/path/to/legate/arch-darwin-debug/cmake_build`. + """ + return self.project_arch_dir / "cmake_build" + + @property + def project_export_config_path(self) -> Path: + r"""Get the projects export config file path. + + Returns + ------- + export_path : Path + The full path to the export config file containing all of the + exported variables to be read back by aedifix once the cmake + build completes, e.g. + `/path/to/legate/arch-foo/cmake_build/aedifix_export_config.json` + """ + return self.project_cmake_dir / "aedifix_export_config.json" + + @staticmethod + def _sanitize_name(var: str | ConfigArgument) -> str: + name: str + if isinstance(var, ConfigArgument): + if var.cmake_var is None: + msg = f"CMake Variable for {var.name} is unset: {var}" + raise ValueError(msg) + name = var.cmake_var + else: + name = var + return name + + # CMake variables + @Logger.log_passthrough + def register_cmake_variable(self, var: CMakeFlagBase) -> None: + self._cmaker.register_variable(self, var) + + @Logger.log_passthrough + def set_cmake_variable( + self, name: str | ConfigArgument, value: Any + ) -> None: + self._cmaker.set_value(self, self._sanitize_name(name), value) + + @Logger.log_passthrough + def get_cmake_variable(self, name: str | ConfigArgument) -> Any: + return self._cmaker.get_value(self, self._sanitize_name(name)) + + @Logger.log_passthrough + def append_cmake_variable( + self, name: str | ConfigArgument, flags: Sequence[str] + ) -> Any: + return self._cmaker.append_value( + self, self._sanitize_name(name), flags + ) + + # Logging + def log( + self, + msg: str | list[str] | tuple[str, ...], + *, + tee: bool = False, + caller_context: bool = True, + keep: bool = False, + ) -> None: + r"""Append a message to the log. + + Parameters + ---------- + msg : str | list[str] | tuple[str, ...] + The message(s) to append to the log. + tee : bool, False + If True, output is printed to screen in addition to being appended + to the on-disk log file. If False, output is only written to disk. + caller_context : bool, True + Whether to prepend the name of the function which called this + function to `mess`. + keep : bool, False + Whether to make the message persist in live output. + """ + verbose_mess = msg + if caller_context: + try: + caller = get_calling_function() + except ValueError: + pass + else: + caller_name, _, _ = classify_callable( + caller, fully_qualify=False + ) + match msg: + case str(): + verbose_mess = f"{caller_name}(): {msg}" + case list() | tuple(): + verbose_mess = [ + f"{caller_name}(): {sub}" for sub in msg + ] + case _: + raise TypeError(msg) + + self._logger.log_file(verbose_mess) + if tee: + # See https://github.com/python/mypy/issues/18121 for why this + # type-check is ignored + self._logger.log_screen(msg, keep=keep) # type: ignore[arg-type] + + def log_divider(self, *, tee: bool = False, keep: bool = True) -> None: + r"""Append a dividing line to the logs. + + Parameters + ---------- + tee : bool, False + If True, output is printed to screen in addition to being appended + to the on-disk log file. If False, output is only written to disk. + keep : bool, True + If ``tee`` is True, whether to persist the message in terminal + output. + """ + self._logger.log_divider(tee=tee, keep=keep) + + def log_boxed( + self, + message: str, + *, + title: str = "", + title_style: str = "", + align: AlignMethod = "center", + ) -> None: + r"""Log a message surrounded by a box. + + Parameters + ---------- + message : str + The message to log. + title : str, '' + An optional title for the box. + title_style : str, '' + Optional additional styling for the title. + align : AlignMethod, 'center' + How to align the text. + """ + self._logger.log_boxed( + message, title=title, title_style=title_style, align=align + ) + + def log_warning(self, message: str, *, title: str = "WARNING") -> None: + r"""Log a warning to the log. + + Parameters + ---------- + message : str + The message to print. + title : str, 'WARNING' + The title to use for the box. + """ + self._logger.log_warning(message, title=title) + + def log_error(self, message: str, *, title: str = "ERROR") -> None: + r"""Log an error to the log. + + Parameters + ---------- + message : str + The message to print. + title : str, 'ERROR' + The title to use for the box. + """ + self._logger.log_error(message, title=title) + + def log_execute_command( + self, command: Sequence[_T], *, live: bool = False + ) -> CompletedProcess[str]: + r"""Execute a system command and return the output. + + Parameters + ---------- + command : Sequence[T] + The command list to execute. + live : bool, False + Whether to output the live output to screen as well (it is always + updated continuously to the log file). + + Returns + ------- + ret : CompletedProcess + The completed process object. + + Raises + ------ + RuntimeError + If the command returns a non-zero errorcode + """ + from rich.markup import escape + + def callback(stdout: str, stderr: str) -> None: + if stdout := stdout.strip(): + if live: + stdout = escape(stdout) + lines = tuple(map(str.rstrip, stdout.splitlines())) + self.log(lines, caller_context=False, tee=True) + else: + self.log(stdout, caller_context=False) + if stderr := stderr.strip(): + self.log(f"STDERR:\n{stderr}", caller_context=False) + + self.log(f"Executing command: {' '.join(map(str, command))}") + try: + return subprocess_capture_output_live( + command, callback=callback, check=True + ) + except CommandError as ce: + self.log(ce.summary) + raise + except Exception as e: + self.log(str(e)) + raise + + def log_execute_func( + self, fn: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs + ) -> _T: + r"""Execute a function and log its execution to screen and log. + + Parameters + ---------- + fn : Callable[P, T] + The callable object to execute. + *args : P.args + The positional arguments to `fn`. + **kwargs : P.kwargs + The keyword arguments to `fn`. + + Returns + ------- + ret : T + The return value of `fn`. + """ + + def pruned_path(src_path: Path) -> Path: + # Given + # + # src_path = '/path/to/project/foo/bar/baz/module.py + # + # we want to extract 'foo/bar/baz/module.py' since that makes for + # prettier printing below + for parent in (self._aedifix_root_dir, self.project_dir): + if src_path.is_relative_to(parent): + return src_path.relative_to(parent) + return src_path + + qual_name, src_path, lineno = classify_callable(fn) + qual_path = pruned_path(src_path) + self.log_divider() + self.log( + f"RUNNING: {qual_name}() ({qual_path}:{lineno})", + tee=True, + caller_context=False, + ) + if docstr := inspect.getdoc(fn): + self.log(f" {docstr}\n", caller_context=False) + else: + # for a newline + self.log("\n", caller_context=False) + return fn(*args, **kwargs) + + # Meat and potatoes + def require(self, package: Package, req_package: type[Package]) -> Package: + r"""Indicate to the manager that `package` requires `req_package` to + run before itself, and return a handle to the requested package. + + Parameters + ---------- + package : Package + The package that is requesting the dependency. + req_package : type[Package] + The class of the required package + + Returns + ------- + package : Package + The indicated package. + + Raises + ------ + RuntimeError + If this routine is called outside of + `ConfigurationManager.setup_dependencies()`. + ModuleNotFoundError + If the requested module cannot be located. + """ + self.log(f"Module {package} requesting requirement: {req_package}") + ret = self._get_package(req_package) + try: + topo_sorter = self._topo_sorter + except AttributeError as ae: + msg = ( + "Trying to require a module outside of setup_dependencies(), " + "this is not allowed" + ) + raise RuntimeError(msg) from ae + + topo_sorter.add(package, ret) + return ret + + def add_ephemeral_arg(self, arg: str) -> None: + r"""Register an ephemeral command-line argument. + + Parameters + ---------- + arg : str + The command-line argument to add. + + Notes + ----- + Ephemeral arguments are a set of "one-shot" arguments, which that + should not re-appear on a reconfiguration run. + """ + self._ephemeral_args.add(arg) + + def setup(self) -> None: + r"""Setup the `ConfigurationManager`, and parse any command line + arguments. + + Notes + ----- + This routine will also ensure the creation of the arch directory. + """ + self._setup_log() + self.log_execute_func(self._log_git_info) + self._modules.extend( + self.log_execute_func(packages.load_packages, self) + ) + + # Sort the modules alphabetically for the parsing of arguments, but + # keep the main package on top. + self._modules.remove(self._main_package) + self._modules.sort(key=lambda x: x.name.casefold()) + self._modules.insert(0, self._main_package) + + self._cl_args = self.log_execute_func(self._parse_args, self.argv) + + # do this after parsing args because args might have --help (in which + # case we do not want to clobber the arch directory) + self.log_execute_func(self._setup_environ) + self.log_execute_func(self._setup_arch_dir) + # This call re-shuffles the modules + self.log_execute_func(self._setup_dependencies) + self.log_execute_func(self._config.setup) + self.log_execute_func(self._reconfigure.setup) + + for conf in self._modules: + self.log_execute_func(conf.setup) + + def configure(self) -> None: + r"""Configure all collected modules.""" + self.log_execute_func(self._config.configure) + self.log_execute_func(self._reconfigure.configure) + for conf in self._modules: + self.log_execute_func(conf.configure) + + def finalize(self) -> None: + r"""Finalize the configuration and instantiate the CMake configure.""" + for conf_obj in self._modules: + self.log_execute_func(conf_obj.finalize) + + self.log_execute_func( + self._cmaker.finalize, + self, + self.project_src_dir, + self.project_cmake_dir, + extra_argv=self._extra_argv, + ) + + self.log_execute_func(self._config.finalize) + self.log_execute_func( + self._reconfigure.finalize, + main_package_type=type(self._main_package), + ephemeral_args=self._ephemeral_args, + extra_argv=self._extra_argv, + ) + self.log_execute_func(self._main_package.post_finalize) + self.log_execute_func(self._emit_summary) + self._logger.copy_log(self.project_arch_dir / "configure.log") + + def main(self) -> None: + r"""Perform the main loop of the configuration.""" + with self._logger: + self.setup() + self.configure() + self.finalize() diff --git a/config/aedifix/package/__init__.py b/config/aedifix/package/__init__.py new file mode 100644 index 0000000000..9d535ea9ac --- /dev/null +++ b/config/aedifix/package/__init__.py @@ -0,0 +1,10 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from . import packages +from .main_package import MainPackage +from .package import Package + +__all__ = ("MainPackage", "Package", "packages") diff --git a/config/aedifix/package/main_package.py b/config/aedifix/package/main_package.py new file mode 100644 index 0000000000..03126dab8f --- /dev/null +++ b/config/aedifix/package/main_package.py @@ -0,0 +1,832 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import shlex +import shutil +import platform +import multiprocessing as mp +from abc import ABC, abstractmethod +from argparse import ArgumentParser +from enum import Enum, IntEnum +from pathlib import Path +from typing import TYPE_CHECKING, Final, Literal + +from ..cmake.cmake_flags import ( + CMAKE_VARIABLE, + CMakeBool, + CMakeExecutable, + CMakeInt, + CMakeList, + CMakePath, + CMakeString, +) +from ..util.argument_parser import ArgSpec, ConfigArgument +from ..util.utility import ( + CMAKE_TEMPLATES_DIR, + ValueProvenance, + cmake_configure_file, + flag_to_dest, +) +from .package import Package + +if TYPE_CHECKING: + from collections.abc import Sequence + + from ..manager import ConfigurationManager + +_DEFAULT_BUILD_TYPE: Final = os.environ.get( + "CMAKE_BUILD_TYPE", "release" +).casefold() +_CMAKE_BUILD_TYPE_MAP: Final = { + "debug": "Debug", + "release": "Release", + "release-debug": "RelWithDebInfo", + # An alias for release-debug + "relwithdebinfo": "RelWithDebInfo", + # This still maps to Debug because we don't want to invent a new build type + # for it. Specifically, we want the main package to be both debug and + # sanitized, but have all other packages be regular debug builds. It is up + # to the main package to properly set sanitizer flags for itself based on + # the --build-type command line argument. + "debug-sanitizer": "Debug", +} + + +def _make_default_flags() -> dict[str, dict[str, list[str]]]: + def to_cuda_flags(flags: list[str]) -> list[str]: + return [f"--compiler-options={f}" for f in map(shlex.quote, flags)] + + def make_subdict( + c_flags: list[str], + cxx_flags: list[str] | None = None, + cuda_flags: list[str] | None = None, + ) -> dict[str, list[str]]: + if cxx_flags is None: + cxx_flags = c_flags[:] + if cuda_flags is None: + cuda_flags = to_cuda_flags(cxx_flags) + + return { + "CFLAGS": c_flags, + "CXXFLAGS": cxx_flags, + "CUDAFLAGS": cuda_flags, + } + + debug_c_flags = ["-O0", "-g", "-g3"] + debug_cuda_flags = ["-g", *to_cuda_flags(debug_c_flags)] + + release_c_flags = ["-O3"] + + reldeb_c_flags = debug_c_flags + release_c_flags + reldeb_cuda_flags = ["-g", *to_cuda_flags(reldeb_c_flags)] + + return { + "Debug": make_subdict( + c_flags=debug_c_flags, cuda_flags=debug_cuda_flags + ), + "Release": make_subdict(c_flags=release_c_flags), + "RelWithDebInfo": make_subdict( + c_flags=reldeb_c_flags, cuda_flags=reldeb_cuda_flags + ), + } + + +_DEFAULT_FLAGS: Final = _make_default_flags() +assert set(_CMAKE_BUILD_TYPE_MAP.values()) == set(_DEFAULT_FLAGS.keys()) + + +class LibraryLinkage(str, Enum): + SHARED = "shared" + STATIC = "static" + + def __str__(self) -> str: + return self.name.casefold() + + +def _guess_c_compiler() -> str | None: + for env_guess in ("CC", "CMAKE_C_COMPILER"): + if guess := os.environ.get(env_guess): + return guess + if guess := shutil.which("cc"): + return guess + return None + + +def _guess_cxx_compiler() -> str | None: + for env_guess in ("CXX", "CMAKE_CXX_COMPILER"): + if guess := os.environ.get(env_guess): + return guess + for ccguess in ("c++", "C++", "CC", "CXX", "cxx"): + if guess := shutil.which(ccguess): + return guess + return None + + +WITH_CLEAN_FLAG: Final = "--with-clean" +FORCE_FLAG: Final = "--force" +ON_ERROR_DEBUGGER_FLAG: Final = "--on-error-debugger" +DEBUG_CONFIGURE_FLAG: Final = "--debug-configure" + + +def _detect_num_cpus() -> int: + if env_val := os.environ.get("CMAKE_BUILD_PARALLEL_LEVEL", ""): + return int(env_val) + return max(mp.cpu_count() - 1, 1) + + +DebugConfigureFlag = Literal["", "--debug-find", "--trace", "--trace-expand"] + + +class DebugConfigureValue(IntEnum): + NONE = 0 + DEBUG_FIND = 1 + TRACE = 2 + TRACE_EXPAND = 3 + + @classmethod + def from_string(cls, str_val: str) -> DebugConfigureValue: + return cls(int(str_val)) + + @classmethod + def help_str(cls) -> str: + possible_values = "\n".join(f"- {v}: {v.to_flag()!r}" for v in cls) + return f"Possible values:\n{possible_values}" + + def to_flag(self) -> DebugConfigureFlag: + r"""Retrieve the corresponding CMake flag for the debug value. + + Returns + ------- + DebugConfigureFlag + The CMake flag corresponding to the debug value. + + Raises + ------ + ValueError + If the value of the current object is out of range. + """ + match self: + case self.NONE: + return "" + case self.DEBUG_FIND: + return "--debug-find" + case self.TRACE: + return "--trace" + case self.TRACE_EXPAND: + return "--trace-expand" + case _: + msg = f"Enum value out of bounds: {self}" + raise ValueError(msg) + + def to_flags(self) -> list[DebugConfigureFlag]: + r"""Build a list of CMake flags corresponding to the current value. + + Returns + ------- + list[DebugConfigureFlag] + The CMake flags. + """ + raw_flags = (f.to_flag() for f in type(self) if self >= f) + return [f for f in raw_flags if f] # Need to weed out NONE + + +class MainPackage(Package, ABC): + DEBUG_CONFIGURE: Final = ConfigArgument( + name=DEBUG_CONFIGURE_FLAG, + spec=ArgSpec( + dest=flag_to_dest(DEBUG_CONFIGURE_FLAG), + type=DebugConfigureValue.from_string, + default=DebugConfigureValue.NONE, + const=DebugConfigureValue.DEBUG_FIND, + nargs="?", + help=( + "Enable additional debugging flags to help debug configure. " + 'A higher value means more debug info. High levels "stack" ' + "on top of lower levels. So if level '1' adds --foo, then '2' " + "adds --foo --bar, and so on. Must be >= 0. " + + DebugConfigureValue.help_str() + ), + ), + ephemeral=True, + ) + ON_ERROR_DEBUGGER: Final = ConfigArgument( + name=ON_ERROR_DEBUGGER_FLAG, + spec=ArgSpec( + dest=flag_to_dest(ON_ERROR_DEBUGGER_FLAG), + type=bool, + help=( + "Start a post-mortem debugger if a Python exception was raised" + ), + ), + ephemeral=True, + ) + WITH_CLEAN: Final = ConfigArgument( + name=WITH_CLEAN_FLAG, + spec=ArgSpec( + dest=flag_to_dest(WITH_CLEAN_FLAG), + type=bool, + help="Discard all existing configuration and start fresh", + ), + ephemeral=True, + ) + FORCE: Final = ConfigArgument( + name=FORCE_FLAG, + spec=ArgSpec( + dest=flag_to_dest(FORCE_FLAG), + type=bool, + help=( + "Tell configure that you know what you are doing and force " + "it to proceed, even if configure believes that doing so " + "would be erroneous" + ), + ), + ephemeral=True, + ) + CMAKE_BUILD_PARALLEL_LEVEL: Final = ConfigArgument( + name="--num-threads", + spec=ArgSpec( + dest="num_threads", + type=int, + nargs="?", + default=_detect_num_cpus(), + help="Number of threads with which to compile", + ), + cmake_var=CMAKE_VARIABLE("CMAKE_BUILD_PARALLEL_LEVEL", CMakeInt), + ) + CMAKE_BUILD_TYPE: Final = ConfigArgument( + name="--build-type", + spec=ArgSpec( + dest="build_type", + choices=tuple(_CMAKE_BUILD_TYPE_MAP.keys()), + default=_DEFAULT_BUILD_TYPE, + help="Set the default build type", + ), + cmake_var=CMAKE_VARIABLE("CMAKE_BUILD_TYPE", CMakeString), + ) + BUILD_SHARED_LIBS: Final = ConfigArgument( + name="--library-linkage", + spec=ArgSpec( + dest="library_linkage", + choices=(LibraryLinkage.SHARED, LibraryLinkage.STATIC), + default=LibraryLinkage.SHARED, + help="Set the default linkage strategy for built libraries", + ), + cmake_var=CMAKE_VARIABLE("BUILD_SHARED_LIBS", CMakeBool), + ) + CMAKE_C_COMPILER: Final = ConfigArgument( + name="--with-cc", + spec=ArgSpec( + dest="CC", + type=Path, + default=_guess_c_compiler(), + help="Specify C compiler", + ), + cmake_var=CMAKE_VARIABLE("CMAKE_C_COMPILER", CMakeExecutable), + ) + CMAKE_CXX_COMPILER: Final = ConfigArgument( + name="--with-cxx", + spec=ArgSpec( + dest="CXX", + type=Path, + default=_guess_cxx_compiler(), + help="Specify C++ compiler", + ), + cmake_var=CMAKE_VARIABLE("CMAKE_CXX_COMPILER", CMakeExecutable), + ) + CMAKE_C_FLAGS: Final = ConfigArgument( + name="--CFLAGS", + spec=ArgSpec(dest="CFLAGS", nargs=1, help="C compiler flags"), + cmake_var=CMAKE_VARIABLE("CMAKE_C_FLAGS", CMakeList), + ) + CMAKE_CXX_FLAGS: Final = ConfigArgument( + name="--CXXFLAGS", + spec=ArgSpec(dest="CXXFLAGS", nargs=1, help="C++ compiler flags"), + cmake_var=CMAKE_VARIABLE("CMAKE_CXX_FLAGS", CMakeList), + ) + CMAKE_EXPORT_COMPILE_COMMANDS: Final = CMAKE_VARIABLE( + "CMAKE_EXPORT_COMPILE_COMMANDS", CMakeBool + ) + CMAKE_COLOR_DIAGNOSTICS: Final = CMAKE_VARIABLE( + "CMAKE_COLOR_DIAGNOSTICS", CMakeBool + ) + CMAKE_COLOR_MAKEFILE: Final = CMAKE_VARIABLE( + "CMAKE_COLOR_MAKEFILE", CMakeBool + ) + CMAKE_INSTALL_PREFIX: Final = ConfigArgument( + name="--prefix", + spec=ArgSpec( + dest="prefix", + type=Path, + help=( + "Default installation prefix. Defaults to /usr/local on Unix." + ), + ), + cmake_var=CMAKE_VARIABLE("CMAKE_INSTALL_PREFIX", CMakePath), + ) + CMAKE_MAKE_PROGRAM: Final = CMAKE_VARIABLE( + "CMAKE_MAKE_PROGRAM", CMakeExecutable + ) + + __package_ignore_attrs__ = ( + "DEBUG_CONFIGURE", + "ON_ERROR_DEBUGGER", + "WITH_CLEAN", + "FORCE", + "CMAKE_BUILD_PARALLEL_LEVEL", + "CMAKE_BUILD_TYPE", + "BUILD_SHARED_LIBS", + "CMAKE_C_COMPILER", + "CMAKE_CXX_COMPILER", + "CMAKE_C_FLAGS", + "CMAKE_CXX_FLAGS", + "CMAKE_EXPORT_COMPILE_COMMANDS", + "CMAKE_COLOR_DIAGNOSTICS", + "CMAKE_COLOR_MAKEFILE", + "CMAKE_INSTALL_PREFIX", + ) + + __slots__ = ( + "_arch_name", + "_arch_value", + "_arch_value_provenance", + "_default_arch_file_path", + "_proj_config_file_template", + "_proj_dir_name", + "_proj_dir_value", + "_proj_src_dir", + ) + + def __init__( # noqa: PLR0913 + self, + manager: ConfigurationManager, + name: str, + argv: Sequence[str], + arch_name: str, + project_dir_name: str, + project_dir_value: Path, + project_config_file_template: Path, + project_src_dir: Path | None = None, + default_arch_file_path: Path | None = None, + dependencies: tuple[type[Package], ...] = (), + ) -> None: + r"""Construct the MainPackage. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager that will manage the main package. + name : str + The name of the main package, e.g. 'Legate'. + argv : Sequence[str] + The command line options. + arch_name : str + The name of the arch value, e.g. 'LEGATE_ARCH'. + project_dir_name : str + The name of the project dir variable, e.g. 'LEGATE_DIR'. + project_dir_value : Path + The value of the project dir, e.g. /path/to/legate. + project_config_file_template: Path + A path to a configure file template to fill out and place under + PROJECT_DIR/PROJECT_ARCH on successful configure. + project_src_dir : Path, optional + The path to the projects source directory for CMake. If not + provided, ``project_dir_value`` is used instead. + default_arch_file_path : Path, optional + The location to place a file containing the default PROJECT_ARCH + value. If not provided, or None, no file is emitted. + + Raises + ------ + AssertionError + If the project arch value does not start with a '-', ends with a + '_', or does not end with 'ARCH', or is not all caps. + ValueError + If the project arch value is set (either from command line or + environment variable) but empty. + """ + super().__init__( + manager=manager, + name=name, + always_enabled=True, + dependencies=dependencies, + ) + assert not arch_name.startswith("-") + assert not arch_name.endswith("_") + assert arch_name.isupper() + assert arch_name.endswith("ARCH") + if not project_config_file_template.exists(): + msg = ( + f"Project configure file: {project_config_file_template} does " + "not exist" + ) + raise ValueError(msg) + if not project_config_file_template.is_file(): + msg = ( + f"Project configure file: {project_config_file_template} is " + "not a file" + ) + raise ValueError(msg) + self._arch_name = arch_name + self._arch_value, self._arch_value_provenance = ( + self.preparse_arch_value(argv) + ) + if not self.arch_value: + msg = ( + f"WARNING: {self.arch_name} is set, but empty (set via " + f"{self.arch_value_provenance})! This is extremely dangerous, " + f"and WILL cause many options (e.g. {self.WITH_CLEAN.name}) " + "to misbehave! Please set this to a non-empty value before" + "continuing." + ) + raise ValueError(msg) + self._proj_dir_name = project_dir_name + self._proj_dir_value = project_dir_value.resolve(strict=True) + self._proj_config_file_template = ( + project_config_file_template.resolve() + ) + if project_src_dir is None: + project_src_dir = self._proj_dir_value + self._proj_src_dir = project_src_dir.resolve(strict=True) + self._default_arch_file_path = default_arch_file_path + + @classmethod + @abstractmethod + def from_argv( + cls, manager: ConfigurationManager, argv: Sequence[str] + ) -> MainPackage: + raise NotImplementedError + + @property + def arch_name(self) -> str: + r"""Return the arch name of the main package. + + Returns + ------- + arch_name : str + The arch name of the main package, e.g. 'LEGATE_ARCH'. + """ + return self._arch_name + + @property + def arch_value(self) -> str: + r"""Return the arch value of the main package. + + Returns + ------- + arch_value : str + The arch value of the main package, e.g. 'arch-darwin-debug'. + """ + return self._arch_value + + @property + def arch_value_provenance(self) -> ValueProvenance: + r"""Get the provenance of the arch value. + + Returns + ------- + provenance : ValueProvenance + The provenance of the arch value. + """ + return self._arch_value_provenance + + @property + def project_dir_name(self) -> str: + r"""Get the project dir name of the main package. + + Returns + ------- + proj_dir_name : str + The name of the project dir variable, e.g. 'LEGATE_DIR'. + """ + return self._proj_dir_name + + @property + def project_dir_value(self) -> Path: + r"""Get the project dir value of the main package. + + Returns + ------- + proj_dir_value : Path + The value of the project dir variable, + e.g. /path/to/legate. + """ + return self._proj_dir_value + + @property + def project_configure_file_template(self) -> Path: + r"""Get the path to the project configure file template. + + Returns + ------- + proj_config_file_template : Path + The path to the template file, e.g. + /path/to/config/legate_internal/gmakevariables.in + """ + return self._proj_config_file_template + + @property + def project_src_dir(self) -> Path: + r"""Get the source directory of the main package. + + Returns + ------- + proj_dir_value : Path + The project source dir e.g. /path/to/legate/src. + """ + return self._proj_src_dir + + @staticmethod + def _preparse_value( + argv: Sequence[str], + opt_name: str, + *, + bool_opt: bool = False, + environ_name: str | None = None, + ) -> tuple[str | None, ValueProvenance]: + r"""Parse out a value from command line and environment. + + Parameters + ---------- + argv : Sequence[str] + The command line to parse. + opt_name : str + The name of the command line option to extract. + bool_opt : False + True if `opt_name` refers to a boolean option, False otherwise. + environ_name : str, optional + The name of the environment variables to parse (if any). + + Raises + ------ + AssertionError + If `opt_name` does not start with a '-'. + """ + assert opt_name.startswith("-"), ( + f"Option name '{opt_name}' must start with '-'" + ) + dest_name = flag_to_dest(opt_name) + parser = ArgumentParser(add_help=False) + if bool_opt: + parser.add_argument( + opt_name, + nargs="?", + const=True, + default=None, + type=ConfigArgument._str_to_bool, # noqa: SLF001 + dest=dest_name, + ) + else: + parser.add_argument(opt_name, required=False, dest=dest_name) + args, _ = parser.parse_known_args(argv) + + if (val := getattr(args, dest_name)) is not None: + return val, ValueProvenance.COMMAND_LINE + + if environ_name is not None and ( + (val := os.environ.get(environ_name, None)) is not None + ): + return val, ValueProvenance.ENVIRONMENT + return None, ValueProvenance.GENERATED # not found + + def preparse_arch_value( + self, argv: Sequence[str] + ) -> tuple[str, ValueProvenance]: + r"""Pre-parse (or generate) the project ARCH value based on argv. + + Parameters + ---------- + argv : Sequence[str] + The command-line arguments to search + + Returns + ------- + arch : str + The value of the found or generated ARCH + provenance : ValueProvenance + The provenance of the arch value, detailing where the value was + found. + """ + arch, provenance = self._preparse_value( + argv, f"--{self.arch_name}", environ_name=self.arch_name + ) + if arch is not None: + # found something + return arch, provenance + + gen_arch = ["arch", platform.system().casefold()] + have_py, _ = self._preparse_value(argv, "--with-python", bool_opt=True) + if have_py: + gen_arch.append("py") + have_cuda, _ = self._preparse_value(argv, "--with-cuda", bool_opt=True) + if have_cuda: + gen_arch.append("cuda") + build_type, _ = self._preparse_value( + argv, + self.CMAKE_BUILD_TYPE.name, + environ_name=str(self.CMAKE_BUILD_TYPE.cmake_var), + ) + if build_type is None: + build_type = _DEFAULT_BUILD_TYPE + else: + build_type = build_type.casefold() + gen_arch.append(build_type) + return "-".join(gen_arch), ValueProvenance.GENERATED + + def add_options(self, parser: ArgumentParser) -> None: + r"""Add options for the main package. + + Parameters + ---------- + parser : ArgumentParser + The argument parser to add options to. + """ + # first do the base options + base_group = self.create_argument_group(parser, title="Base Options") + base_group.add_argument( + f"--{self.arch_name}", + help=( + f"{self.manager.project_name} build directory. Can be any " + "arbitrary string, so long as the name is unique inside " + f"{self.project_dir_value}. If not passed, a suitable value " + "is generated automatically based on influential configure " + "arguments." + ), + default=self.manager.project_arch, + ) + self.log_execute_func( + self.add_package_options, base_group, ignored_only=True + ) + # then do the options for the derived main package + package_group = self.create_argument_group(parser, title=self.name) + self.log_execute_func(self.add_package_options, package_group) + + def configure_core_package_variables(self) -> None: + r"""Configure the core main package cmake variables.""" + self.manager.set_cmake_variable( + self.CMAKE_EXPORT_COMPILE_COMMANDS, True + ) + self.manager.set_cmake_variable(self.CMAKE_COLOR_DIAGNOSTICS, True) + self.manager.set_cmake_variable(self.CMAKE_COLOR_MAKEFILE, True) + self.manager.set_cmake_variable( + self.CMAKE_BUILD_PARALLEL_LEVEL, self.cl_args.num_threads.value + ) + match self.cl_args.library_linkage.value: + case LibraryLinkage.SHARED: + self.manager.set_cmake_variable(self.BUILD_SHARED_LIBS, True) + case LibraryLinkage.STATIC: + self.manager.set_cmake_variable(self.BUILD_SHARED_LIBS, False) + self.set_flag_if_set(self.CMAKE_INSTALL_PREFIX, self.cl_args.prefix) + + def configure_c(self) -> None: + r"""Configure C compiler variables.""" + self.set_flag_if_user_set(self.CMAKE_C_COMPILER, self.cl_args.CC) + self._configure_language_flags(self.CMAKE_C_FLAGS, self.cl_args.CFLAGS) + + def configure_cxx(self) -> None: + r"""Configure C++ compiler variables.""" + self.set_flag_if_user_set(self.CMAKE_CXX_COMPILER, self.cl_args.CXX) + self._configure_language_flags( + self.CMAKE_CXX_FLAGS, self.cl_args.CXXFLAGS + ) + + def setup(self) -> None: + r"""Setup the Main Package.""" + # We do this here because the compilers need to know what the build + # type is to set reasonable defaults in their configure(). Because we + # cannot guarantee that the main package is configured first (in fact, + # in almost all cases it isn't), we must do this here. + self.manager.set_cmake_variable( + self.CMAKE_BUILD_TYPE, + _CMAKE_BUILD_TYPE_MAP[self.cl_args.build_type.value], + ) + super().setup() + + def configure(self) -> None: + r"""Configure the Main Package.""" + super().configure() + self.log_execute_func(self.configure_core_package_variables) + self.log_execute_func(self.configure_c) + self.log_execute_func(self.configure_cxx) + + def finalize_default_arch_file(self) -> None: + r"""Emit a file containing this configuration's PROJECT_ARCH so + that the user doesn't have to have it defined in env. + """ + path = self._default_arch_file_path + if path is None: + self.log("Default arch file path is None, not emitting file") + return + + from datetime import date + + defs = { + "YEAR": date.today().year, + "FILE": __file__, + "PROJECT_NAME": self.project_name.casefold(), + "PROJECT_ARCH_VALUE": self.arch_value, + } + cmake_configure_file( + self, + CMAKE_TEMPLATES_DIR / "get_project_arch.py.in", + path.resolve(), + defs, + ) + + def post_finalize(self) -> None: + r"""Execute finalization for the main package only after + successful configure run. + """ + self.log_execute_func(self.finalize_default_arch_file) + + def summarize_main(self) -> str: + r"""Provide the main summary for the Main Package. + + Returns + ------- + summary : str + The summary + + Notes + ----- + This is different from `Package.summarize()`. It should be appended + in addition to the former to the package summary. + """ + ret = [ + self.create_package_summary( + [ + ( + f"{self.manager.project_name} Dir", + self.manager.project_dir, + ), + ( + f"{self.manager.project_name} Arch", + self.manager.project_arch, + ), + ( + "Build Generator", + self.manager.get_cmake_variable( + self.CMAKE_MAKE_PROGRAM + ), + ), + ( + "Build type", + self.manager.get_cmake_variable(self.CMAKE_BUILD_TYPE), + ), + ( + "Num Build Threads", + self.manager.get_cmake_variable( + self.CMAKE_BUILD_PARALLEL_LEVEL + ), + ), + ( + "Install prefix", + self.manager.get_cmake_variable( + self.CMAKE_INSTALL_PREFIX + ), + ), + ], + title="Core Project", + ) + ] + + def summarize_compiler( + name: str, compiler_var: ConfigArgument, flags_var: ConfigArgument + ) -> str: + cc = self.manager.get_cmake_variable(compiler_var) + if cc: + version = self.log_execute_command([cc, "--version"]).stdout + else: + version = "(unknown)" + + ccflags = self.manager.get_cmake_variable(flags_var) + match ccflags: + case list() | tuple(): + ccflags_str = " ".join(ccflags) + case str(): + ccflags_str = ccflags + case None: + ccflags_str = "" + case _: + raise TypeError(type(ccflags)) + + return self.create_package_summary( + [ + ("Executable", cc), + ("Version", version), + (f"Global {name} Flags", ccflags_str), + ], + title=f"{name} Compiler", + ) + + ret.append( + summarize_compiler("C", self.CMAKE_C_COMPILER, self.CMAKE_C_FLAGS) + ) + ret.append( + summarize_compiler( + "C++", self.CMAKE_CXX_COMPILER, self.CMAKE_CXX_FLAGS + ) + ) + return "\n".join(ret) diff --git a/config/aedifix/package/package.py b/config/aedifix/package/package.py new file mode 100644 index 0000000000..1c6f783129 --- /dev/null +++ b/config/aedifix/package/package.py @@ -0,0 +1,462 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import shlex +import textwrap +from argparse import ArgumentParser, Namespace, _ArgumentGroup as ArgumentGroup +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, TypeVar + +from ..base import Configurable +from ..cmake.cmake_flags import _CMakeVar +from ..util.argument_parser import ConfigArgument, ExclusiveArgumentGroup +from ..util.exception import WrongOrderError + +if TYPE_CHECKING: + from collections.abc import Sequence + + from ..manager import ConfigurationManager + from ..util.cl_arg import CLArg + + +_T = TypeVar("_T") + + +class Dependencies(Namespace): + def __getattr__(self, value: str) -> Package: + return super().__getattr__(value) + + +class Package(Configurable): + __slots__ = "_always_enabled", "_dep_types", "_deps", "_name", "_state" + + @dataclass(slots=True, frozen=True) + class EnableState: + value: bool + explicit: bool = False + + def enabled(self) -> bool: + return self.value + + def disabled(self) -> bool: + return not self.enabled() + + def explicitly_enabled(self) -> bool: + return self.enabled() and self.explicit + + def explicitly_disabled(self) -> bool: + return self.disabled() and self.explicit + + def implicitly_enabled(self) -> bool: + return self.enabled() and (not self.explicit) + + def implicitly_disabled(self) -> bool: + return self.disabled() and (not self.explicit) + + def __init__( + self, + manager: ConfigurationManager, + name: str, + *, + always_enabled: bool = False, + dependencies: tuple[type[Package], ...] = (), + ) -> None: + r"""Construct a Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + name : str + The name of the package, e.g. 'CUDA'. + always_enabled : bool, False + Whether this package should be considered unconditionally enabled. + """ + from .main_package import MainPackage + + super().__init__(manager=manager) + + if isinstance(self, MainPackage): + always_enabled = True + + self._name = name + self._state = Package.EnableState(value=always_enabled) + self._always_enabled = always_enabled + self._dep_types = dependencies + self._deps: Dependencies | None = None + + @property + def name(self) -> str: + r"""Get the name of the package. + + Returns + ------- + name : str + The name of the package, e.g. 'Legion'. + """ + return self._name + + @property + def state(self) -> EnableState: + r"""Get whether the package is enabled or disabled. + + Returns + ------- + enabled : bool + True if the package is enabled (i.e. found, requested by user, or + implied), False otherwise. + """ + if self._always_enabled: + assert self._state.enabled(), ( + f"{self.name} is always enabled but state is Falsey: " + f"{self._state}" + ) + return self._state + + @property + def deps(self) -> Dependencies: + r"""Get the package dependencies. + + Returns + ------- + deps : Namespace + The package dependencies. + """ + if self._deps is None: + msg = "Must declare dependencies before accessing them" + raise WrongOrderError(msg) + return self._deps + + def add_options(self, parser: ArgumentParser) -> None: + r"""Add options for a package. + + Parameters + ---------- + parser : ArgumentParser + The argument parser to which to add options. + + Notes + ----- + Must not be overridden by packages unless they wish to supply no + options at all. + """ + group = self.create_argument_group(parser) + self.log_execute_func(self.add_package_options, group) + + def add_package_options( # noqa: C901 + self, parser: ArgumentGroup, *, ignored_only: bool = False + ) -> None: + r"""Callback to add options for each package. + + Parameters + ---------- + parser : ArgumentGroup + The argument group to which to add options. + + Notes + ----- + By default, this does nothing. + """ + + def handle_cmake_var(attr: _CMakeVar) -> None: + cmake_ty = attr.__config_cmake_type__() + self.log( + f'Registering CMake variable "{attr}" for {self!r}: {cmake_ty}' + ) + # have found a special attribute + self.manager.register_cmake_variable(cmake_ty) + + def handle_config_arg( + arg: ConfigArgument, parser: ArgumentGroup + ) -> None: + self.log(f"Adding {arg.name} to parser: {arg}") + arg.add_to_argparser(parser) + if arg.ephemeral: + self.manager.add_ephemeral_arg(arg.name) + if arg.cmake_var is not None: + handle_cmake_var(arg.cmake_var) + + # TODO(jfaibussowit) + # This is extremely hacky, but basically, for the main package we want + # to add 2 argument sections: Base Options and + # Options. + # + # So we make 2 argument groups and end up calling this function twice, + # which results in argparse errors about duplicate argument definitions + # (because the derived class ends up re-registering the arguments of + # the parent class). + # + # So our solution is as follows, we have a magic attribute + # __package_ignore_attrs__, which contains the names of all the + # attributes which should be ignored. + # + # So when the main package calls this function, it calls it with + # ignored_only = True, and only registers its special attributes. The + # second time around we call it without that, and register all the + # others. + attr_names = dir(self) + ignores: set[str] = getattr(self, "__package_ignore_attrs__", set()) + if ignored_only: + attr_names = [attr for attr in attr_names if attr in ignores] + else: + attr_names = [attr for attr in attr_names if attr not in ignores] + + for attr_name in attr_names: + if attr_name.startswith("__"): + continue + + try: + attr = getattr(self, attr_name) + except Exception as e: + self.log( + f"Skipping attribute {attr_name!r} due to raised " + f"exception: {e}" + ) + continue + + # attr is e.g. MainPackage.CMAKE_BUILD_TYPE + match attr: + case _CMakeVar(): + self.log( + f"Attribute {attr_name!r}: detected cmake variable" + ) + handle_cmake_var(attr) + case ExclusiveArgumentGroup(required=required, group=group): + self.log( + f"Attribute {attr_name!r}: detected exclusive " + "argument group" + ) + mut_group = parser.add_mutually_exclusive_group( + required=required + ) + for sub_attr in group.values(): + handle_config_arg(sub_attr, mut_group) + case ConfigArgument(): + self.log( + f"Attribute {attr_name!r}: detected config argument" + ) + handle_config_arg(attr, parser) + + def create_argument_group( + self, parser: ArgumentParser, title: str | None = None + ) -> ArgumentGroup: + if title is None: + title = self.name + return parser.add_argument_group(title=title) + + def set_flag(self, name: str | ConfigArgument, value: CLArg[_T]) -> None: + self.manager.set_cmake_variable(name=name, value=value.value) + + def set_flag_if_set( + self, name: str | ConfigArgument, value: CLArg[_T] + ) -> None: + if value.value is not None: + self.set_flag(name=name, value=value) + + def set_flag_if_user_set( + self, name: str | ConfigArgument, value: CLArg[_T] + ) -> None: + if value.cl_set: + self.set_flag(name=name, value=value) + + def append_flags_if_user_set( + self, name: str | ConfigArgument, value: CLArg[Sequence[str]] + ) -> None: + if value.cl_set: + self.append_flags_if_set(name, value) + + def append_flags_if_set( + self, name: str | ConfigArgument, value: CLArg[Sequence[str]] + ) -> None: + flags = value.value + if flags is None: + return + assert isinstance(flags, (list, tuple)) + flg_list = [] + for f in flags: + flg_list.extend(shlex.split(f)) + if not flg_list: + return + self.manager.append_cmake_variable(name=name, flags=flg_list) + + # TODO(jfaibussowit) + # HACK HACK HACK: this is only here because the CUDA package also needs to + # see it.. + def _configure_language_flags( + self, + name: ConfigArgument, + cl_arg: CLArg[Sequence[str]], + default_flags: dict[str, list[str]] | None = None, + ) -> None: + if default_flags is None: + from .main_package import _DEFAULT_FLAGS + + default_flags = _DEFAULT_FLAGS[ + self.manager.get_cmake_variable("CMAKE_BUILD_TYPE") + ] + + if cl_arg.cl_set: + flags = cl_arg.value + assert flags is not None + else: + flags = default_flags[cl_arg.name] + self.manager.append_cmake_variable(name=name, flags=flags) + + def declare_dependencies(self) -> None: + r"""Set up and declare dependencies for packages. By default, + declares no dependencies. + """ + deps = { + dep_ty.__name__: self.manager.require(self, dep_ty) + for dep_ty in self._dep_types + } + self._deps = Dependencies(**deps) + + def _determine_package_enabled(self) -> EnableState: + r"""Try to determine if a package is enabled or not. + + Returns + ------- + enabled : EnableState + Whether the package is enabled. + """ + if self._always_enabled: + self.log(f"{self.name}: always enabled") + return Package.EnableState(value=True, explicit=False) + + config_args = [] + primary_attr = None + for attr_name in dir(self): + try: + attr = getattr(self, attr_name) + except Exception: # noqa: S112 + continue + + if not isinstance(attr, ConfigArgument): + continue + + if not attr.enables_package: + # Don't care about attributes that don't play a role in + # enabling the package + continue + + config_args.append(attr) + if attr.primary: + assert primary_attr is None, ( + "Multiple primary ConfigArgument's, previously " + f"found {attr}" + ) + primary_attr = attr + + assert primary_attr is not None, ( + f"Never found primary config argument for {self.name}" + ) + + # The primary attribute, if set, should ultimately control whether the + # package is enabled or disabled, so we check it first + config_args.insert(0, primary_attr) + for arg in config_args: + cl_arg = getattr(self.cl_args, arg.spec.dest) + if (val := cl_arg.value) is not None: + return Package.EnableState( + value=bool(val), explicit=cl_arg.cl_set + ) + + return Package.EnableState(value=False, explicit=False) + + def configure(self) -> None: + r"""Configure a Package.""" + super().configure() + self._state = self._determine_package_enabled() + if self.state.enabled(): + self.log(f"Package {self.name}: enabled") + else: + self.log( + f"Package {self.name}: disabled due to all indicators being " + "falsey" + ) + + def summarize(self) -> str: + r"""Return a summary of this `Configurable`. By default, returns + an empty summary. + + Returns + ------- + summary : str + The summary + """ + return "" + + def create_package_summary( + self, extra_lines: Sequence[tuple[str, Any]], title: str | None = None + ) -> str: + r"""Create a package summary. + + Parameters + ---------- + extra_lines : Sequence[tuple[str, Any]] + Extra lines to add to the package summary. + title : str, optional + Title to use for the summary, defaults to package name if not + given. + + Returns + ------- + summary : str + The formatted package summary string. + + Notes + ----- + Each entry in `extra_lines` must be a pair of values, the line + heading, and its contents. The line heading must not contain a ';'. + For example, it may be: + + >>> + extra_lines = [ + ("Foo", "a foo"), + ("Bar", "a bar") + ] + + which results in + + >>> + Foo: a foo + Bar: a bar + """ + if not extra_lines: + return "" + + if title is None: + title = self.name + + max_len = max(map(len, (name for name, _ in extra_lines))) + 1 + + def fixup_extra_lines( + lines: Sequence[tuple[str, Any]], + ) -> list[tuple[str, str]]: + # We want to align any overflow with the start of the text, so + # + # foo: some text + # bar: some .... + # very long text + # ^^^^^^^^^^^^^^ aligned to "some" + # + indent = " " * (max_len + len(": ")) + ret = [] + for name, value in lines: + str_v = str(value).strip() + if "\n" in str_v: + str_v = textwrap.indent(str_v, indent).lstrip() + ret.append((name, str_v)) + return ret + + extra_lines = fixup_extra_lines(extra_lines) + return "\n".join( + [f"{title}:"] + + [ + f" {str(name) + ':':<{max_len}} {value}" + for name, value in extra_lines + ] + ) diff --git a/config/aedifix/package/packages/__init__.py b/config/aedifix/package/packages/__init__.py new file mode 100644 index 0000000000..d220107ef7 --- /dev/null +++ b/config/aedifix/package/packages/__init__.py @@ -0,0 +1,46 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import importlib +from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + from ..package import Package + + +def load_packages(manager: ConfigurationManager) -> list[Package]: + r"""Load all package modules in the packages directory, and return the + constructed packages. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager with which to construct the packages. + + Returns + ------- + packages : list[Package] + The list of loaded packages. + """ + assert __package__, "Cannot auto-load packages without relative imports!" + packages = [] + cur_dir = Path(__file__).parent + manager.log(f"Using package directory: {cur_dir}") + for module_file in cur_dir.iterdir(): + manager.log(f"Attempting to load package: {module_file}") + if module_file.is_dir() or module_file.name.startswith("_"): + # skip __init__.py, __pycache__ and any other directories + manager.log( + f"Skipping loading package: {module_file} is not a package!" + ) + continue + module = importlib.import_module(f".{module_file.stem}", __package__) + manager.log(f"Loaded package: {module}") + conf_obj = module.create_package(manager) + manager.log(f"Adding package: {conf_obj}") + packages.append(conf_obj) + return packages diff --git a/config/aedifix/package/packages/cal.py b/config/aedifix/package/packages/cal.py new file mode 100644 index 0000000000..44246ae721 --- /dev/null +++ b/config/aedifix/package/packages/cal.py @@ -0,0 +1,55 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakePath +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class CAL(Package): + With_CAL: Final = ConfigArgument( + name="--with-cal", + spec=ArgSpec( + dest="with_cal", type=bool, help="Build with CAL support." + ), + enables_package=True, + primary=True, + ) + CAL_DIR: Final = ConfigArgument( + name="--with-cal-dir", + spec=ArgSpec( + dest="cal_dir", + type=Path, + help="Path to CAL installation directory.", + ), + cmake_var=CMAKE_VARIABLE("CAL_DIR", CMakePath), + enables_package=True, + ) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a CAL Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__(manager=manager, name="CAL") + + def configure(self) -> None: + r"""Configure CAL.""" + super().configure() + if self.state.enabled(): + self.set_flag_if_user_set(self.CAL_DIR, self.cl_args.cal_dir) + + +def create_package(manager: ConfigurationManager) -> CAL: + return CAL(manager) diff --git a/config/aedifix/package/packages/cmake.py b/config/aedifix/package/packages/cmake.py new file mode 100644 index 0000000000..1a9ab9528f --- /dev/null +++ b/config/aedifix/package/packages/cmake.py @@ -0,0 +1,137 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import shutil +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakePath, CMakeString +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +_cmake_exe = shutil.which("cmake") + + +def _determine_default_generator() -> str | None: + if ret := os.environ.get("CMAKE_GENERATOR"): + return ret + if shutil.which("ninja"): + return "Ninja" + if ( + shutil.which("make") + or shutil.which("gmake") + or shutil.which("gnumake") + ): + return "Unix Makefiles" + return None + + +_default_gen = _determine_default_generator() + + +class CMake(Package): + CMAKE_COMMAND: Final = ConfigArgument( + name="--cmake-executable", + spec=ArgSpec( + dest="cmake_executable", + metavar="EXE", + required=_cmake_exe is None, + default=_cmake_exe, + help="Path to CMake executable (if not on PATH).", + ), + cmake_var=CMAKE_VARIABLE("CMAKE_COMMAND", CMakePath, prefix=""), + ) + CMAKE_GENERATOR: Final = ConfigArgument( + name="--cmake-generator", + spec=ArgSpec( + dest="cmake_generator", + default=_default_gen, + required=_default_gen is None, + choices=["Ninja", "Unix Makefiles", None], + help="The CMake build generator", + ), + cmake_var=CMAKE_VARIABLE("CMAKE_GENERATOR", CMakeString, prefix="-G"), + ) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a CMake Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__(manager=manager, name="CMake", always_enabled=True) + + def configure_cmake_version(self) -> None: + r"""Determine the version of the cmake executable. + + Raises + ------ + RuntimeError + If the cmake version is not all numeric. + """ + cmake_exe = self.manager.get_cmake_variable(self.CMAKE_COMMAND) + version = ( + self.log_execute_command([cmake_exe, "--version"]) + .stdout.splitlines()[0] # "cmake version XX.YY.ZZ" + .split()[2] # ["cmake", "version", "XX.YY.ZZ"] + ) + # In case we have, e.g. 3.27.4-gdfbe7aa-dirty + version = version.split("-")[0] + if not all(num.isdigit() for num in version.split(".")): + msg = ( + f"Unknown CMake version {version!r}, could not parse. " + 'Expected .split(".") to be all numeric.' + ) + raise RuntimeError(msg) + + self.log(f"CMake executable version: {version}") + self.version = version + + def configure_core_cmake_variables(self) -> None: + r"""Configure the core cmake variables.""" + self.manager.set_cmake_variable( + self.CMAKE_COMMAND, self.cl_args.cmake_executable.value + ) + self.manager.set_cmake_variable( + self.CMAKE_GENERATOR, self.cl_args.cmake_generator.value + ) + + def configure(self) -> None: + r"""Configure CMake.""" + super().configure() + self.log_execute_func(self.configure_core_cmake_variables) + self.log_execute_func(self.configure_cmake_version) + + def summarize(self) -> str: + r"""Summarize CMake. + + Returns + ------- + summary : str + A summary of configured CMake. + """ + lines = [ + ( + "Executable", + self.manager.get_cmake_variable(self.CMAKE_COMMAND), + ), + ("Version", self.version), + ( + "Generator", + self.manager.get_cmake_variable(self.CMAKE_GENERATOR), + ), + ] + + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> CMake: + return CMake(manager) diff --git a/config/aedifix/package/packages/cuda.py b/config/aedifix/package/packages/cuda.py new file mode 100644 index 0000000000..cb0ab2c377 --- /dev/null +++ b/config/aedifix/package/packages/cuda.py @@ -0,0 +1,195 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import shutil +from argparse import Action, ArgumentParser, Namespace +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakeExecutable, CMakeList, CMakePath +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from collections.abc import Sequence + + from ...manager import ConfigurationManager + + +def _guess_cuda_compiler() -> str | None: + for env_guess in ("CUDAC", "CMAKE_CUDA_COMPILER"): + if guess := os.environ.get(env_guess): + return guess + for ccguess in ("nvcc", "cudac"): + if guess := shutil.which(ccguess): + return guess + return None + + +class CudaArchAction(Action): + @staticmethod + def map_cuda_arch_names(in_arch: str) -> list[str]: + arch_map = { + "pascal": "60", + "volta": "70", + "turing": "75", + "ampere": "80", + "ada": "89", + "hopper": "90", + "blackwell": "100", + # TODO(jfaibussowit): rubin? + } + arch = [] + for sub_arch in in_arch.split(","): + # support Turing, TURING, and, if the user is feeling spicy, tUrInG + sub_arch_lo = sub_arch.strip().casefold() + if not sub_arch_lo: + # in_arch = "something,,something_else" + continue + arch.append(arch_map.get(sub_arch_lo, sub_arch_lo)) + return arch + + def __call__( + self, + parser: ArgumentParser, # noqa: ARG002 + namespace: Namespace, + values: str | Sequence[str] | None, + option_string: str | None = None, # noqa: ARG002 + ) -> None: + if isinstance(values, (list, tuple)): + str_values = ",".join(values) + elif isinstance(values, str): + str_values = values + elif values is None: + str_values = getattr(namespace, self.dest) + else: + raise TypeError(type(values)) + + cuda_arch = self.map_cuda_arch_names(str_values) + setattr(namespace, self.dest, cuda_arch) + + +class CUDA(Package): + With_CUDA: Final = ConfigArgument( + name="--with-cuda", + spec=ArgSpec( + dest="with_cuda", + type=bool, + default=shutil.which("nvcc") is not None, + help="Build with CUDA support.", + ), + enables_package=True, + primary=True, + ) + CUDAToolkit_ROOT: Final = ConfigArgument( + name="--with-cuda-dir", + spec=ArgSpec( + dest="cuda_dir", + type=Path, + default=os.environ.get("CUDA_HOME"), + required=False, + help="Path to CUDA installation directory.", + ), + cmake_var=CMAKE_VARIABLE("CUDAToolkit_ROOT", CMakePath), + enables_package=True, + ) + CMAKE_CUDA_COMPILER: Final = ConfigArgument( + name="--with-cudac", + spec=ArgSpec( + dest="CUDAC", + type=Path, + default=_guess_cuda_compiler(), + help="Specify CUDA compiler", + ), + cmake_var=CMAKE_VARIABLE("CMAKE_CUDA_COMPILER", CMakeExecutable), + enables_package=True, + ) + CMAKE_CUDA_FLAGS: Final = ConfigArgument( + name="--CUDAFLAGS", + spec=ArgSpec(dest="CUDAFLAGS", nargs=1, help="CUDA compiler flags"), + cmake_var=CMAKE_VARIABLE("CMAKE_CUDA_FLAGS", CMakeList), + ) + CMAKE_CUDA_ARCHITECTURES: Final = ConfigArgument( + name="--cuda-arch", + spec=ArgSpec( + dest="cuda_arch", + required=False, + default=["all-major"], + action=CudaArchAction, + help=( + "Specify the target GPU architecture. Available choices are: " + "'all-major', 'all', 'native', a comma-separated list of " + "numbers: '60' or '70, 80', or comma-separated list of names " + "'ampere' or 'hopper, blackwell'" + ), + ), + cmake_var=CMAKE_VARIABLE("CMAKE_CUDA_ARCHITECTURES", CMakeList), + ) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a CUDA Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__(manager=manager, name="CUDA") + + def configure(self) -> None: + r"""Configure CUDA.""" + super().configure() + if not self.state.enabled(): + return + + self.set_flag_if_user_set(self.CMAKE_CUDA_COMPILER, self.cl_args.CUDAC) + self._configure_language_flags( + self.CMAKE_CUDA_FLAGS, self.cl_args.CUDAFLAGS + ) + self.append_flags_if_set( + self.CMAKE_CUDA_ARCHITECTURES, self.cl_args.cuda_arch + ) + self.set_flag_if_user_set(self.CUDAToolkit_ROOT, self.cl_args.cuda_dir) + + def summarize(self) -> str: + r"""Summarize CUDA. + + Returns + ------- + summary : str + A summary of configured CUDA. + """ + if not self.state.enabled(): + return "" + + arches: list[str] | str | None = self.manager.get_cmake_variable( + self.CMAKE_CUDA_ARCHITECTURES + ) + if not arches: + arches = [] + if isinstance(arches, (list, tuple)): + arches = " ".join(arches) + ret = [("Architectures", arches)] + + if cuda_dir := self.manager.get_cmake_variable(self.CUDAToolkit_ROOT): + ret.append(("CUDA Dir", cuda_dir)) + + cc = self.manager.get_cmake_variable(self.CMAKE_CUDA_COMPILER) + assert cc is not None + ret.append(("Executable", cc)) + + version = self.log_execute_command([cc, "--version"]).stdout + ret.append(("Version", version)) + + ccflags = self.manager.get_cmake_variable(self.CMAKE_CUDA_FLAGS) + if not ccflags: + ccflags = "[]" + ret.append(("Flags", ccflags)) + return self.create_package_summary(ret) + + +def create_package(manager: ConfigurationManager) -> CUDA: + return CUDA(manager) diff --git a/config/aedifix/package/packages/gasnet.py b/config/aedifix/package/packages/gasnet.py new file mode 100644 index 0000000000..351c9ebb0d --- /dev/null +++ b/config/aedifix/package/packages/gasnet.py @@ -0,0 +1,104 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakePath, CMakeString +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class GASNet(Package): + With_GASNET: Final = ConfigArgument( + name="--with-gasnet", + spec=ArgSpec( + dest="with_gasnet", type=bool, help="Build with GASNet support." + ), + enables_package=True, + primary=True, + ) + GASNet_ROOT_DIR: Final = ConfigArgument( + name="--with-gasnet-dir", + spec=ArgSpec( + dest="gasnet_dir", + type=Path, + help="Path to GASNet installation directory.", + ), + cmake_var=CMAKE_VARIABLE("GASNet_ROOT_DIR", CMakePath), + enables_package=True, + ) + GASNet_CONDUIT: Final = ConfigArgument( + name="--gasnet-conduit", + spec=ArgSpec( + dest="gasnet_conduit", + # TODO: To support UDP conduit, we would need to add a special case + # on the legate launcher. See + # https://github.com/nv-legate/legate.core/issues/294. + choices=("ibv", "ucx", "aries", "mpi", "ofi"), + help="Build with specified GASNet conduit.", + ), + cmake_var=CMAKE_VARIABLE("GASNet_CONDUIT", CMakeString), + enables_package=True, + ) + GASNet_SYSTEM: Final = ConfigArgument( + name="--gasnet-system", + spec=ArgSpec( + dest="gasnet_system", + help="Specify a system-specific configuration to use for GASNet", + ), + cmake_var=CMAKE_VARIABLE("GASNet_SYSTEM", CMakeString), + enables_package=True, + ) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a GASNet Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__(manager=manager, name="GASNet") + + def configure(self) -> None: + r"""Configure GASNet.""" + super().configure() + if not self.state.enabled(): + return + + self.set_flag_if_user_set( + self.GASNet_ROOT_DIR, self.cl_args.gasnet_dir + ) + self.set_flag_if_user_set( + self.GASNet_CONDUIT, self.cl_args.gasnet_conduit + ) + self.set_flag_if_user_set( + self.GASNet_SYSTEM, self.cl_args.gasnet_system + ) + + def summarize(self) -> str: + r"""Summarize GASNet. + + Returns + ------- + summary : str + A summary of configured GASNet. + """ + lines = [] + if root_dir := self.manager.get_cmake_variable(self.GASNet_ROOT_DIR): + lines.append(("Root directory", root_dir)) + if conduit := self.manager.get_cmake_variable(self.GASNet_CONDUIT): + lines.append(("Conduit(s)", conduit)) + if system := self.manager.get_cmake_variable(self.GASNet_SYSTEM): + lines.append(("System", system)) + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> GASNet: + return GASNet(manager) diff --git a/config/aedifix/package/packages/hdf5.py b/config/aedifix/package/packages/hdf5.py new file mode 100644 index 0000000000..1d63431c21 --- /dev/null +++ b/config/aedifix/package/packages/hdf5.py @@ -0,0 +1,85 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import shutil +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakePath +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class HDF5(Package): + With_HDF5: Final = ConfigArgument( + name="--with-hdf5", + spec=ArgSpec( + dest="with_hdf5", + type=bool, + help="Build with HDF5 support.", + default=bool(shutil.which("h5dump")), + ), + enables_package=True, + primary=True, + ) + HDF5_ROOT: Final = ConfigArgument( + name="--with-hdf5-dir", + spec=ArgSpec( + dest="hdf5_dir", + type=Path, + help="Path to HDF5 installation directory.", + ), + cmake_var=CMAKE_VARIABLE("HDF5_ROOT", CMakePath), + enables_package=True, + ) + HDF5_DIR: Final = CMAKE_VARIABLE("HDF5_DIR", CMakePath) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a HDF5 Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__(manager=manager, name="HDF5") + + def configure(self) -> None: + r"""Configure HDF5.""" + super().configure() + if not self.state.enabled(): + return + + self.set_flag_if_user_set(self.HDF5_ROOT, self.cl_args.hdf5_dir) + + def summarize(self) -> str: + r"""Summarize HDF5. + + Returns + ------- + summary : str + A summary of configured HDF5. + """ + if not self.state.enabled(): + return "" + + lines = [] + + def get_root_dir() -> str: + root_dir = self.manager.get_cmake_variable(self.HDF5_ROOT) + if not root_dir: + root_dir = self.manager.get_cmake_variable(self.HDF5_DIR) + return root_dir + + if root_dir := get_root_dir(): + lines.append(("Root directory", root_dir)) + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> HDF5: + return HDF5(manager) diff --git a/config/aedifix/package/packages/legion.py b/config/aedifix/package/packages/legion.py new file mode 100644 index 0000000000..2ae5a8ec45 --- /dev/null +++ b/config/aedifix/package/packages/legion.py @@ -0,0 +1,407 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Any, Final, cast + +from ...cmake import ( + CMAKE_VARIABLE, + CMakeBool, + CMakeInt, + CMakeList, + CMakePath, + CMakeString, +) +from ...util.argument_parser import ( + ArgSpec, + ConfigArgument, + ExclusiveArgumentGroup, +) +from ...util.exception import UnsatisfiableConfigurationError +from ...util.utility import dest_to_flag +from ..package import Package +from .cuda import CUDA +from .gasnet import GASNet +from .mpi import MPI +from .openmp import OpenMP +from .python import Python +from .ucx import UCX +from .zlib import ZLIB + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class Legion(Package): + DirGroup: Final = ExclusiveArgumentGroup( + Legion_ROOT=ConfigArgument( + name="--with-legion-dir", + spec=ArgSpec( + dest="legion_dir", + type=Path, + help="Path to an existing Legion build directory.", + ), + cmake_var=CMAKE_VARIABLE("Legion_ROOT", CMakePath), + ), + CPM_Legion_SOURCE=ConfigArgument( + name="--with-legion-src-dir", + spec=ArgSpec( + dest="with_legion_src_dir", + type=Path, + help="Path to an existing Legion source directory.", + ), + cmake_var=CMAKE_VARIABLE("CPM_Legion_SOURCE", CMakePath), + ), + ) + Legion_BRANCH: Final = ConfigArgument( + name="--legion-branch", + spec=ArgSpec( + dest="legion_branch", help="Git branch to download for Legion" + ), + ) + Legion_MAX_DIM: Final = ConfigArgument( + name="--legion-max-dim", + spec=ArgSpec( + dest="legion_max_dim", + type=int, + default=4, + help="Maximum number of dimensions that Legion will support", + ), + cmake_var=CMAKE_VARIABLE("Legion_MAX_DIM", CMakeInt), + ) + Legion_MAX_FIELDS: Final = ConfigArgument( + name="--legion-max-fields", + spec=ArgSpec( + dest="legion_max_fields", + type=int, + default=256, + help="Maximum number of fields that Legion will support", + ), + cmake_var=CMAKE_VARIABLE("Legion_MAX_FIELDS", CMakeInt), + ) + Legion_SPY: Final = ConfigArgument( + name="--legion-spy", + spec=ArgSpec( + dest="legion_spy", + type=bool, + help="Build with detailed Legion Spy enabled.", + ), + cmake_var=CMAKE_VARIABLE("Legion_SPY", CMakeBool), + ) + Legion_BOUNDS_CHECKS: Final = ConfigArgument( + name="--legion-bounds-checks", + spec=ArgSpec( + dest="legion_bounds_checks", + type=bool, + help=( + "Build Legion with bounds checking enabled " + "(warning: expensive)." + ), + ), + cmake_var=CMAKE_VARIABLE("Legion_BOUNDS_CHECKS", CMakeBool), + ) + Legion_BUILD_RUST_PROFILER: Final = ConfigArgument( + name="--legion-rust-profiler", + spec=ArgSpec( + dest="legion_rust_profiler", + type=bool, + help="Build the Legion profiler (requires rust).", + ), + cmake_var=CMAKE_VARIABLE("Legion_BUILD_RUST_PROFILER", CMakeBool), + ) + Legion_CXX_FLAGS: Final = ConfigArgument( + name="--legion-cxx-flags", + spec=ArgSpec( + dest="legion_cxx_flags", nargs=1, help="C++ flags for Legion" + ), + cmake_var=CMAKE_VARIABLE("Legion_CXX_FLAGS", CMakeList), + ) + Legion_CUDA_FLAGS: Final = ConfigArgument( + name="--legion-cuda-flags", + spec=ArgSpec( + dest="legion_cuda_flags", nargs=1, help="CUDA flags for Legion" + ), + cmake_var=CMAKE_VARIABLE("Legion_CUDA_FLAGS", CMakeList), + ) + + Legion_EMBED_GASNet_CONFIGURE_ARGS: Final = CMAKE_VARIABLE( + "Legion_EMBED_GASNet_CONFIGURE_ARGS", CMakeList + ) + Legion_USE_CUDA: Final = CMAKE_VARIABLE("Legion_USE_CUDA", CMakeBool) + Legion_USE_OpenMP: Final = CMAKE_VARIABLE("Legion_USE_OpenMP", CMakeBool) + Legion_USE_Python: Final = CMAKE_VARIABLE("Legion_USE_Python", CMakeBool) + Legion_USE_ZLIB: Final = CMAKE_VARIABLE("Legion_USE_ZLIB", CMakeBool) + Legion_Python_Version: Final = CMAKE_VARIABLE( + "Legion_Python_Version", CMakeString + ) + Legion_NETWORKS: Final = CMAKE_VARIABLE("Legion_NETWORKS", CMakeString) + Legion_BUILD_JUPYTER: Final = CMAKE_VARIABLE( + "Legion_BUILD_JUPYTER", CMakeBool + ) + Legion_BUILD_BINDINGS: Final = CMAKE_VARIABLE( + "Legion_BUILD_BINDINGS", CMakeBool + ) + CPM_DOWNLOAD_Legion: Final = CMAKE_VARIABLE( + "CPM_DOWNLOAD_Legion", CMakeBool + ) + Legion_DIR: Final = CMAKE_VARIABLE("Legion_DIR", CMakePath) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a Legion Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__( + manager=manager, + name="Legion", + always_enabled=True, + dependencies=(CUDA, GASNet, OpenMP, Python, MPI, UCX, ZLIB), + ) + + def check_conflicting_options(self) -> None: + r"""Check for conflicting options that are too complicated to " + "describe statically. + + Raises + ------ + UnsatisfiableConfigurationError + If both --with-legion-src-dir and --legion-branch are set + """ + with_legion_src_dir = self.cl_args.with_legion_src_dir + legion_branch = self.cl_args.legion_branch + if with_legion_src_dir.value and legion_branch.value: + msg = ( + "Cannot specify both " + f"{dest_to_flag(with_legion_src_dir.name)} and " + f"{dest_to_flag(legion_branch.name)}, their combined meaning " + "is ambiguous. If the source dir is given, the contents of " + "the directory are used as-is (i.e. using whatever branch or " + "commit that dir is currently on), so the chosen Legion " + "branch would have no effect." + ) + raise UnsatisfiableConfigurationError(msg) + + def configure_root_dirs(self) -> None: + r"""Configure the various "root" directories that Legion requires.""" + dir_group = self.DirGroup + if (lg_dir := self.cl_args.legion_dir).cl_set: + self.manager.set_cmake_variable( + dir_group.Legion_ROOT, # type: ignore[attr-defined] + lg_dir.value, + ) + elif (lg_src_dir := self.cl_args.with_legion_src_dir).cl_set: + self.manager.set_cmake_variable( + dir_group.CPM_Legion_SOURCE, # type: ignore[attr-defined] + lg_src_dir.value, + ) + + def configure_variables(self) -> None: + r"""Configure the variable default variables.""" + self.set_flag_if_user_set( + self.Legion_MAX_DIM, self.cl_args.legion_max_dim + ) + self.set_flag_if_user_set( + self.Legion_MAX_FIELDS, self.cl_args.legion_max_fields + ) + self.set_flag_if_user_set(self.Legion_SPY, self.cl_args.legion_spy) + self.set_flag_if_user_set( + self.Legion_BOUNDS_CHECKS, self.cl_args.legion_bounds_checks + ) + self.set_flag_if_user_set( + self.Legion_BUILD_RUST_PROFILER, self.cl_args.legion_rust_profiler + ) + + self.append_flags_if_set( + self.Legion_CXX_FLAGS, self.cl_args.legion_cxx_flags + ) + + def configure_cuda(self) -> None: + r"""If CUDA is enabled, set the various CUDA flags for Legion. + Does nothing otherwise. + + Raises + ------ + RuntimeError + If CUDA flags are requested but CUDA is not enabled. + """ + cuda_state = self.deps.CUDA.state + if cuda_state.enabled(): + self.manager.set_cmake_variable(self.Legion_USE_CUDA, True) + self.append_flags_if_set( + self.Legion_CUDA_FLAGS, self.cl_args.legion_cuda_flags + ) + elif self.cl_args.legion_cuda_flags.cl_set: + msg = ( + "--legion-cuda-flags set " + f"({self.cl_args.legion_cuda_flags.value}), " + "but CUDA is not enabled." + ) + raise RuntimeError(msg) + elif cuda_state.explicitly_disabled(): + self.manager.set_cmake_variable(self.Legion_USE_CUDA, False) + + def configure_gasnet(self) -> None: + r"""Configure Legion to use GASNet. Does nothing if GASNet is not + enabled. + """ + if self.deps.GASNet.state.enabled(): + self.manager.append_cmake_variable( + self.Legion_EMBED_GASNet_CONFIGURE_ARGS, + ["--with-ibv-max-hcas=8"], + ) + + def configure_openmp(self) -> None: + r"""Configure Legion to use OpenMP. Does nothing if OpenMP is not + enabled. + """ + omp_state = self.deps.OpenMP.state + if omp_state.enabled(): + self.manager.set_cmake_variable(self.Legion_USE_OpenMP, True) + elif omp_state.explicitly_disabled(): + self.manager.set_cmake_variable(self.Legion_USE_OpenMP, False) + + def configure_python(self) -> None: + r"""Configure Legion to use Python. Does nothing if Python is not + enabled. + """ + python = cast(Python, self.deps.Python) + py_state = python.state + if py_state.enabled(): + self.manager.set_cmake_variable(self.Legion_BUILD_BINDINGS, True) + self.manager.set_cmake_variable(self.Legion_USE_Python, True) + self.manager.set_cmake_variable(self.Legion_BUILD_JUPYTER, True) + self.manager.set_cmake_variable( + self.Legion_Python_Version, python.lib_version + ) + elif py_state.explicitly_disabled(): + self.manager.set_cmake_variable(self.Legion_BUILD_BINDINGS, False) + self.manager.set_cmake_variable(self.Legion_BUILD_JUPYTER, False) + self.manager.set_cmake_variable(self.Legion_USE_Python, False) + + def configure_zlib(self) -> None: + r"""Configure Legion to use ZLIB. Disables ZLIB if is not enabled.""" + zlib_state = self.deps.ZLIB.state + if zlib_state.enabled(): + self.manager.set_cmake_variable(self.Legion_USE_ZLIB, True) + elif zlib_state.explicitly_disabled(): + self.manager.set_cmake_variable(self.Legion_USE_ZLIB, False) + + def configure_networks(self) -> None: + r"""Configure all of the collected networks, and enable them.""" + networks = [] + explicit_disable = False + network_map = {"GASNet": "gasnetex", "UCX": "ucx", "MPI": "mpi"} + for py_attr, net_name in network_map.items(): + state = getattr(self.deps, py_attr).state + if state.enabled(): + networks.append(net_name) + elif state.explicit: + # explicitly disabled + explicit_disable = True + + if len(networks) > 1: + self.log_warning( + "Building Realm with multiple networking backends " + f"({', '.join(networks)}) is not fully supported currently." + ) + if networks: + self.manager.set_cmake_variable( + self.Legion_NETWORKS, ";".join(networks) + ) + elif explicit_disable: + # ensure that it is properly cleared + self.manager.set_cmake_variable(self.Legion_NETWORKS, "") + + def configure(self) -> None: + r"""Configure Legion.""" + super().configure() + self.log_execute_func(self.check_conflicting_options) + self.log_execute_func(self.configure_root_dirs) + self.log_execute_func(self.configure_variables) + self.log_execute_func(self.configure_cuda) + self.log_execute_func(self.configure_gasnet) + self.log_execute_func(self.configure_openmp) + self.log_execute_func(self.configure_python) + self.log_execute_func(self.configure_zlib) + self.log_execute_func(self.configure_networks) + + def summarize(self) -> str: + r"""Summarize Legion. + + Returns + ------- + summary : str + A summary of configured Legion. + """ + m = self.manager + + def get_location() -> Path | None: + dir_group = self.DirGroup + root_dir = m.get_cmake_variable( + dir_group.Legion_ROOT # type: ignore[attr-defined] + ) + if root_dir: + return Path(root_dir) + + root_dir = m.get_cmake_variable(self.Legion_DIR) + if root_dir: + return Path(root_dir) + + root_dir = m.get_cmake_variable( + dir_group.CPM_Legion_SOURCE # type: ignore[attr-defined] + ) + if root_dir: + root_path = Path(root_dir) + # If the source directory is relative to the cmake + # directory, then we downloaded Legion, but set + # CPM_Legion_Source ourselves. + if not root_path.is_relative_to(m.project_cmake_dir): + return root_path + return None + + lines: list[tuple[str, Any]] = [] + root_dir = get_location() + downloaded = root_dir is None + lines.append(("Downloaded", downloaded)) + if not downloaded: + assert root_dir is not None # pacify mypy + lines.append((" Root dir", root_dir)) + + if cxx_flags := m.get_cmake_variable(self.Legion_CXX_FLAGS): + lines.append(("C++ flags", cxx_flags)) + + lines.append(("With CUDA", m.get_cmake_variable(self.Legion_USE_CUDA))) + if cuda_flags := m.get_cmake_variable(self.Legion_CUDA_FLAGS): + lines.append(("CUDA flags", cuda_flags)) + + if networks := m.get_cmake_variable(self.Legion_NETWORKS): + pass + else: + networks = "None" + lines.append(("Networks", networks)) + lines.append( + ("Bounds checks", m.get_cmake_variable(self.Legion_BOUNDS_CHECKS)) + ) + lines.append(("Max dim", m.get_cmake_variable(self.Legion_MAX_DIM))) + lines.append( + ("Max fields", m.get_cmake_variable(self.Legion_MAX_FIELDS)) + ) + lines.append(("Build Spy", m.get_cmake_variable(self.Legion_SPY))) + lines.append( + ( + "Build Rust profiler", + m.get_cmake_variable(self.Legion_BUILD_RUST_PROFILER), + ) + ) + # TODO continue + + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> Legion: + return Legion(manager) diff --git a/config/aedifix/package/packages/mpi.py b/config/aedifix/package/packages/mpi.py new file mode 100644 index 0000000000..9dd3bde45c --- /dev/null +++ b/config/aedifix/package/packages/mpi.py @@ -0,0 +1,119 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import shutil +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakeExecutable, CMakePath +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +def _gen_mpiexec_guesses() -> str | None: + for guess in ("mpiexec", "mpirun"): + if found := shutil.which(guess): + return found + return None + + +class MPI(Package): + With_MPI: Final = ConfigArgument( + name="--with-mpi", + spec=ArgSpec( + dest="with_mpi", type=bool, help="Build with MPI support." + ), + enables_package=True, + primary=True, + ) + MPI_HOME: Final = ConfigArgument( + name="--with-mpi-dir", + spec=ArgSpec( + dest="mpi_dir", + type=Path, + help="Path to MPI installation directory.", + ), + cmake_var=CMAKE_VARIABLE("MPI_HOME", CMakePath), + enables_package=True, + ) + MPIEXEC_EXECUTABLE: Final = ConfigArgument( + name="--with-mpiexec-executable", + spec=ArgSpec( + dest="mpiexec", + default=_gen_mpiexec_guesses(), + type=Path, + help="Path to mpiexec executable.", + ), + cmake_var=CMAKE_VARIABLE("MPIEXEC_EXECUTABLE", CMakeExecutable), + enables_package=True, + ) + MPI_CXX_COMPILER: Final = CMAKE_VARIABLE( + "MPI_CXX_COMPILER", CMakeExecutable + ) + MPI_CXX_COMPILER_INCLUDE_DIRS: Final = CMAKE_VARIABLE( + "MPI_CXX_COMPILER_INCLUDE_DIRS", CMakePath + ) + MPI_C_COMPILER: Final = CMAKE_VARIABLE("MPI_C_COMPILER", CMakeExecutable) + MPI_C_COMPILER_INCLUDE_DIRS: Final = CMAKE_VARIABLE( + "MPI_C_COMPILER_INCLUDE_DIRS", CMakePath + ) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a MPI Package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__(manager=manager, name="MPI") + + def configure(self) -> None: + r"""Configure MPI.""" + super().configure() + if not self.state.enabled(): + return + + self.set_flag_if_user_set(self.MPI_HOME, self.cl_args.mpi_dir) + self.set_flag_if_user_set( + self.MPIEXEC_EXECUTABLE, self.cl_args.mpiexec + ) + + def summarize(self) -> str: + r"""Summarize MPI. + + Returns + ------- + summary : str + A summary of configured MPI. + """ + if not self.state.enabled(): + return "" + + lines = [] + if mpi_dir := self.manager.get_cmake_variable(self.MPI_HOME): + lines.append(("Root dir", mpi_dir)) + if mpicc := self.manager.get_cmake_variable(self.MPI_C_COMPILER): + lines.append(("mpicc", mpicc)) + if mpicc_inc := self.manager.get_cmake_variable( + self.MPI_C_COMPILER_INCLUDE_DIRS + ): + lines.append(("C Include Dirs", mpicc_inc)) + if mpicxx := self.manager.get_cmake_variable(self.MPI_CXX_COMPILER): + lines.append(("mpicxx", mpicxx)) + if mpicxx_inc := self.manager.get_cmake_variable( + self.MPI_CXX_COMPILER_INCLUDE_DIRS + ): + lines.append(("C++ Include Dirs", mpicxx_inc)) + if mpiexec := self.manager.get_cmake_variable(self.MPIEXEC_EXECUTABLE): + lines.append(("mpiexec", mpiexec)) + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> MPI: + return MPI(manager) diff --git a/config/aedifix/package/packages/nccl.py b/config/aedifix/package/packages/nccl.py new file mode 100644 index 0000000000..0553230df2 --- /dev/null +++ b/config/aedifix/package/packages/nccl.py @@ -0,0 +1,70 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakePath +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package +from .cuda import CUDA + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class NCCL(Package): + With_NCCL: Final = ConfigArgument( + name="--with-nccl", + spec=ArgSpec( + dest="with_nccl", type=bool, help="Build with NCCL support." + ), + enables_package=True, + primary=True, + ) + NCCL_DIR: Final = ConfigArgument( + name="--with-nccl-dir", + spec=ArgSpec( + dest="nccl_dir", + type=Path, + help="Path to NCCL installation directory.", + ), + cmake_var=CMAKE_VARIABLE("NCCL_DIR", CMakePath), + enables_package=True, + ) + + def __init__(self, manager: ConfigurationManager) -> None: + super().__init__(manager=manager, name="NCCL", dependencies=(CUDA,)) + + def configure(self) -> None: + r"""Configure NCCL.""" + super().configure() + # TODO(jfaibussowit) + # Make this kind of relationship statically declarable from the CTOR, + # by updating the dependencies argument to include a "this dependency + # also enables the current package" + if not self.state.explicit and self.deps.CUDA.state.enabled(): + self._state = Package.EnableState(value=True) + if not self.state.enabled(): + return + + self.set_flag_if_user_set(self.NCCL_DIR, self.cl_args.nccl_dir) + + def summarize(self) -> str: + r"""Summarize NCCL. + + Returns + ------- + summary : str + A summary of configured NCCL. + """ + lines = [] + if nccl_dir := self.manager.get_cmake_variable(self.NCCL_DIR): + lines.append(("Root dir", nccl_dir)) + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> NCCL: + return NCCL(manager) diff --git a/config/aedifix/package/packages/openmp.py b/config/aedifix/package/packages/openmp.py new file mode 100644 index 0000000000..1f16f664ac --- /dev/null +++ b/config/aedifix/package/packages/openmp.py @@ -0,0 +1,51 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakeString +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class OpenMP(Package): + With_OpenMP: Final = ConfigArgument( + name="--with-openmp", + spec=ArgSpec( + dest="with_openmp", type=bool, help="Build with OpenMP support." + ), + enables_package=True, + primary=True, + ) + OpenMP_VERSION = CMAKE_VARIABLE("OpenMP_VERSION", CMakeString) + OpenMP_CXX_FLAGS = CMAKE_VARIABLE("OpenMP_CXX_FLAGS", CMakeString) + + def __init__(self, manager: ConfigurationManager) -> None: + super().__init__(manager=manager, name="OpenMP") + + def summarize(self) -> str: + r"""Summarize configured OpenMP. + + Returns + ------- + summary : str + The summary of OpenMP + """ + if not self.state.enabled(): + return "" + + lines = [] + if version := self.manager.get_cmake_variable(self.OpenMP_VERSION): + lines.append(("Version", version)) + if flags := self.manager.get_cmake_variable(self.OpenMP_CXX_FLAGS): + lines.append(("Flags", flags)) + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> OpenMP: + return OpenMP(manager) diff --git a/config/aedifix/package/packages/python.py b/config/aedifix/package/packages/python.py new file mode 100644 index 0000000000..37afb2755c --- /dev/null +++ b/config/aedifix/package/packages/python.py @@ -0,0 +1,89 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, Final + +from ...util.argument_parser import ArgSpec, ConfigArgument +from ...util.exception import UnsatisfiableConfigurationError +from ...util.utility import find_active_python_version_and_path +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class Python(Package): + With_Python: Final = ConfigArgument( + name="--with-python", + spec=ArgSpec( + dest="with_python", type=bool, help="Build with Python bindings." + ), + enables_package=True, + primary=True, + ) + + def __init__(self, manager: ConfigurationManager) -> None: + super().__init__(manager=manager, name="Python") + + def configure_lib_version_and_paths(self) -> None: + r"""Determine the Python library version and its location.""" + try: + version, lib_path = find_active_python_version_and_path() + except (RuntimeError, FileNotFoundError) as excn: + if self.state.disabled(): + # Not sure how we'd get here + msg = ( + "The Python package does not appear to be enabled, yet we " + "are in the middle of configuring it. I'm not sure how we " + "got here, this should not happen" + ) + raise RuntimeError(msg) from excn + # Python is requested, now to determine whether the user did + # that or some other piece of the code + if self.state.explicitly_enabled(): + # If the user wants python, but we cannot find/use it, then + # that's a hard error + msg = ( + f"{excn}. You have explicitly requested Python via " + f"{self.With_Python.name} {self.cl_args.with_python.value}" + ) + raise UnsatisfiableConfigurationError(msg) from excn + # Some other piece of code has set the cl_args to true + msg = ( + f"{excn}. Some other package has implicitly enabled python" + " but could not locate active lib directories for it" + ) + raise RuntimeError(msg) from excn + + self.lib_version = version + self.lib_path = lib_path + self.log( + f"Python: found lib version: {version} and library path {lib_path}" + ) + + def configure(self) -> None: + r"""Configure Python.""" + super().configure() + if not self.state.enabled(): + return + + self.log_execute_func(self.configure_lib_version_and_paths) + + def summarize(self) -> str: + r"""Summarize configured Python. + + Returns + ------- + summary : str + The summary of Python. + """ + return self.create_package_summary( + [("Executable", sys.executable), ("Version", sys.version)] + ) + + +def create_package(manager: ConfigurationManager) -> Python: + return Python(manager) diff --git a/config/aedifix/package/packages/ucx.py b/config/aedifix/package/packages/ucx.py new file mode 100644 index 0000000000..307317edba --- /dev/null +++ b/config/aedifix/package/packages/ucx.py @@ -0,0 +1,65 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakePath +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class UCX(Package): + With_UCX: Final = ConfigArgument( + name="--with-ucx", + spec=ArgSpec( + dest="with_ucx", type=bool, help="Build with UCX support." + ), + enables_package=True, + primary=True, + ) + UCX_ROOT: Final = ConfigArgument( + name="--with-ucx-dir", + spec=ArgSpec( + dest="ucx_dir", + type=Path, + help="Path to UCX installation directory.", + ), + cmake_var=CMAKE_VARIABLE("UCX_ROOT", CMakePath), + enables_package=True, + ) + + def __init__(self, manager: ConfigurationManager) -> None: + super().__init__(manager=manager, name="UCX") + + def configure(self) -> None: + r"""Configure UCX.""" + super().configure() + if self.state.enabled(): + return + + self.set_flag_if_user_set(self.UCX_ROOT, self.cl_args.ucx_dir) + + def summarize(self) -> str: + r"""Summarize UCX. + + Returns + ------- + summary : str + The summary of UCX. + """ + lines = [] + if self.state.enabled() and ( + root_dir := self.manager.get_cmake_variable(self.UCX_ROOT) + ): + lines.append(("Root dir", root_dir)) + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> UCX: + return UCX(manager) diff --git a/config/aedifix/package/packages/zlib.py b/config/aedifix/package/packages/zlib.py new file mode 100644 index 0000000000..1d74c2c7c4 --- /dev/null +++ b/config/aedifix/package/packages/zlib.py @@ -0,0 +1,86 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +# ruff: noqa: A005 +from pathlib import Path +from typing import TYPE_CHECKING, Final + +from ...cmake import CMAKE_VARIABLE, CMakePath, CMakeString +from ...util.argument_parser import ArgSpec, ConfigArgument +from ..package import Package + +if TYPE_CHECKING: + from ...manager import ConfigurationManager + + +class ZLIB(Package): + With_ZLIB: Final = ConfigArgument( + name="--with-zlib", + spec=ArgSpec( + dest="with_zlib", type=bool, help="Build with Zlib support." + ), + enables_package=True, + primary=True, + ) + ZLIB_ROOT: Final = ConfigArgument( + name="--with-zlib-dir", + spec=ArgSpec( + dest="zlib_dir", + type=Path, + help="Path to ZLIB installation directory.", + ), + cmake_var=CMAKE_VARIABLE("ZLIB_ROOT", CMakePath), + enables_package=True, + ) + ZLIB_VERSION = CMAKE_VARIABLE("ZLIB_VERSION", CMakeString) + ZLIB_INCLUDE_DIRS = CMAKE_VARIABLE("ZLIB_INCLUDE_DIRS", CMakePath) + ZLIB_INCLUDE_DIR = CMAKE_VARIABLE("ZLIB_INCLUDE_DIR", CMakePath) + ZLIB_LIBRARIES = CMAKE_VARIABLE("ZLIB_LIBRARIES", CMakeString) + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a ZLIB package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + """ + super().__init__(manager=manager, name="ZLIB") + + def configure(self) -> None: + r"""Configure ZLIB.""" + super().configure() + if not self.state.enabled(): + return + + self.set_flag_if_user_set(self.ZLIB_ROOT, self.cl_args.zlib_dir) + + def summarize(self) -> str: + r"""Summarize configured ZLIB. + + Returns + ------- + summary : str + The summary of ZLIB + """ + if not self.state.enabled(): + return "" + + lines = [] + # Some versions of FindZLIB don't actually set these variables in the + # cache, so we may or may not find them + if version := self.manager.get_cmake_variable(self.ZLIB_VERSION): + lines.append(("Version", version)) + if inc_dirs := self.manager.get_cmake_variable(self.ZLIB_INCLUDE_DIRS): + lines.append(("Include Dirs", inc_dirs)) + elif inc_dir := self.manager.get_cmake_variable(self.ZLIB_INCLUDE_DIR): + lines.append(("Include Dir", inc_dir)) + if libs := self.manager.get_cmake_variable(self.ZLIB_LIBRARIES): + lines.append(("Libraries", libs)) + return self.create_package_summary(lines) + + +def create_package(manager: ConfigurationManager) -> ZLIB: + return ZLIB(manager) diff --git a/config/aedifix/reconfigure.py b/config/aedifix/reconfigure.py new file mode 100644 index 0000000000..661c9003b2 --- /dev/null +++ b/config/aedifix/reconfigure.py @@ -0,0 +1,165 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +import shutil +import inspect +from datetime import date +from typing import TYPE_CHECKING + +from .base import Configurable +from .util.utility import ( + CMAKE_TEMPLATES_DIR, + cmake_configure_file, + deduplicate_command_line_args, + prune_command_line_args, +) + +if TYPE_CHECKING: + from pathlib import Path + + from .manager import ConfigurationManager + + +class Reconfigure(Configurable): + __slots__ = "_backup", "_file" + + def __init__(self, manager: ConfigurationManager) -> None: + r"""Construct a Reconfigure. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this object. + """ + super().__init__(manager=manager) + fname = ( + f"reconfigure-{self.project_arch.replace(' ', '-').casefold()}.py" + ) + self._file = self.project_arch_dir / fname + self._backup: Path | None = None + + @property + def reconfigure_file(self) -> Path: + r"""Get the full path to the reconfigure file. + + Returns + ------- + file : Path + The path to the reconfigure file. + """ + return self._file + + @staticmethod + def get_import_line(main_package_type: type) -> tuple[str, str]: + main_package_module = inspect.getmodule(main_package_type) + assert main_package_module is not None, ( + "Could not determine module containing the main package!" + ) + assert main_package_module.__package__ + return main_package_module.__name__, main_package_type.__name__ + + def sanitized_argv( + self, + argv: tuple[str, ...], + ephemeral_args: set[str], + extra_argv: list[str] | None, + ) -> list[str]: + cl_args = prune_command_line_args(argv, ephemeral_args) + cl_args = deduplicate_command_line_args(cl_args) + # We want to include an explicit --PROJECT_ARCH= in the + # reconfigure script, in case the current project arch was taken via + # environment variables. + arch_flag = f"--{self.project_arch_name}" + if not any(arg.startswith(arch_flag) for arg in cl_args): + cl_args.insert(0, f"{arch_flag}={self.project_arch}") + + if extra_argv: + if "--" not in cl_args: + cl_args.append("--") + cl_args.extend(extra_argv) + return cl_args + + def backup_reconfigure_script(self) -> None: + r"""Create a backup of the reconfigure script for builds where + --with-clean is specified, in case configure fails. + """ + symlink = self.project_dir / self.reconfigure_file.name + self.log(f"Attempting to backup reconfigure script: {symlink}") + if not symlink.exists(): + self.log( + f"Reconfigure script symlink ({symlink}) does not exist, " + "nothing to backup" + ) + return + + self._backup = symlink.with_suffix(symlink.suffix + ".bk") + self.log(f"Copying reconfigure script to backup: {self._backup}") + shutil.copy2(symlink, self._backup, follow_symlinks=True) + + def finalize( # type: ignore [override] + self, + main_package_type: type, + ephemeral_args: set[str], + extra_argv: list[str] | None = None, + ) -> None: + r"""Finalize the reconfigure script (i.e. instantiate it). + + Parameters + ---------- + main_package_type : type + The concrete type of the main package + ephemeral_args : set[str] + A set of arguments which appeared on the command line, but should + not make it into the reconfigure script. + extra_argv : list[str], optional + Additional verbatim commands passed to CMake. + + Notes + ----- + An example of an ephemeral arg is '--with-clean'. This argument should + only be handled once; subsequent reconfigurations should not continue + to delete and recreate the arch directory. + """ + cl_args = self.sanitized_argv( + self.manager.argv, ephemeral_args, extra_argv + ) + cl_args_str = ",".join(f'"{arg}"' for arg in cl_args) + + mod_name, type_name = self.get_import_line(main_package_type) + + defs = { + "PYTHON_EXECUTABLE": sys.executable, + "YEAR": str(date.today().year), + "PROJECT_DIR": self.project_dir, + "MAIN_PACKAGE_MODULE": mod_name, + "MAIN_PACKAGE_TYPE": type_name, + "ARGV_LIST": cl_args_str, + } + + self.log_execute_func( + cmake_configure_file, + self, + CMAKE_TEMPLATES_DIR / "reconfigure_file.py.in", + self.reconfigure_file, + defs, + ) + + symlink = self.project_dir / self.reconfigure_file.name + self.log(f"Symlinking reconfigure script to {symlink}") + try: + symlink.symlink_to( + self.reconfigure_file.relative_to(self.project_dir) + ) + except FileExistsError: + self.log("Symlink destination already exists") + + if self._backup is not None: + self.log( + f"Backup reconfigure script exists ({self._backup}), " + "removing it!" + ) + self._backup.unlink() + self._backup = None diff --git a/config/aedifix/templates/configure_file.cmake b/config/aedifix/templates/configure_file.cmake new file mode 100644 index 0000000000..e84371e117 --- /dev/null +++ b/config/aedifix/templates/configure_file.cmake @@ -0,0 +1,2 @@ +configure_file("${AEDIFIX_CONFIGURE_FILE_SRC}" "${AEDIFIX_CONFIGURE_FILE_DEST}" + USE_SOURCE_PERMISSIONS @ONLY) diff --git a/config/aedifix/templates/get_project_arch.py.in b/config/aedifix/templates/get_project_arch.py.in new file mode 100755 index 0000000000..9f6f7fff55 --- /dev/null +++ b/config/aedifix/templates/get_project_arch.py.in @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) @YEAR@ NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +# This file was automatically generated by: +# @FILE@ +# Any modifications may be lost when configure is next invoked. +from __future__ import annotations + + +def get_@PROJECT_NAME@_arch() -> str: + return "@PROJECT_ARCH_VALUE@" + + +def main() -> None: + print(get_@PROJECT_NAME@_arch(), end="", flush=True) + + +if __name__ == "__main__": + main() diff --git a/config/aedifix/templates/reconfigure_file.py.in b/config/aedifix/templates/reconfigure_file.py.in new file mode 100755 index 0000000000..f6f2e2782d --- /dev/null +++ b/config/aedifix/templates/reconfigure_file.py.in @@ -0,0 +1,22 @@ +#!@PYTHON_EXECUTABLE@ +# SPDX-FileCopyrightText: Copyright (c) @YEAR@ NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys + +sys.path.insert(0, "@PROJECT_DIR@") + +from config.aedifix.main import basic_configure + +from @MAIN_PACKAGE_MODULE@ import @MAIN_PACKAGE_TYPE@ + + +def main() -> int: + argv = [@ARGV_LIST@] + sys.argv[1:] + return basic_configure(tuple(argv), @MAIN_PACKAGE_TYPE@) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/aedifix/tests/__init__.py b/config/aedifix/tests/__init__.py new file mode 100644 index 0000000000..182ad12bb8 --- /dev/null +++ b/config/aedifix/tests/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/docs/legate/core/source/_static/.keep b/config/aedifix/tests/cmake/__init__.py similarity index 100% rename from docs/legate/core/source/_static/.keep rename to config/aedifix/tests/cmake/__init__.py diff --git a/config/aedifix/tests/cmake/test_cmake_flags.py b/config/aedifix/tests/cmake/test_cmake_flags.py new file mode 100644 index 0000000000..2485288ad5 --- /dev/null +++ b/config/aedifix/tests/cmake/test_cmake_flags.py @@ -0,0 +1,612 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import re +import sys +import copy +import shutil +from pathlib import Path +from typing import Any + +import pytest + +from ...cmake.cmake_flags import ( + CMakeBool, + CMakeExecutable, + CMakeInt, + CMakeList, + CMakePath, + CMakeString, +) + + +class TestCMakeList: + def test_create(self) -> None: + var = CMakeList("foo") + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeList("foo", value=[1, 2, 3]) + assert var.name == "foo" + assert var.value == [1, 2, 3] + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeList("foo", value=(1, 2, 3), prefix="bar") + assert var.name == "foo" + assert var.value == [1, 2, 3] + assert var.prefix == "bar" + assert var.type == "STRING" + + var = CMakeList("foo", value="foo;bar") + assert var.name == "foo" + assert var.value == ["foo", "bar"] + assert var.prefix == "-D" + assert var.type == "STRING" + + def test_create_bad(self) -> None: + # looking for "" here + with pytest.raises(TypeError, match=re.escape(rf"{int}")): + CMakeList("foo", value=1) # type: ignore[arg-type] + + def test_canonicalize(self) -> None: + var = CMakeList("foo") + canon = var.canonicalize() + assert canon is None + assert id(canon) != id(var) # must be distinct + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var.value = [4, 5, 6] + canon = var.canonicalize() + assert isinstance(canon, CMakeList) + assert id(canon) != id(var) # must be distinct + assert canon.name == var.name + assert canon.value == list(map(str, var.value)) + assert id(canon.value) != id(var.value) # must be distinct + assert canon.prefix == var.prefix + assert canon.type == var.type + + @pytest.mark.parametrize( + "value", (None, [], ["1"], ["1", "2"], [34, 99, 999]) + ) + def test_to_command_line(self, value: list[str]) -> None: + val_copy = copy.deepcopy(value) + var = CMakeList("foo", value=value) + cmd = var.to_command_line() + assert isinstance(cmd, str) + expected_str = "" if val_copy is None else " ".join(map(str, val_copy)) + assert cmd == f"-Dfoo:STRING={expected_str}" + assert var.value == val_copy + + def test_eq(self) -> None: + lhs = CMakeList("foo", value=(1, 2, 3), prefix="bar") + rhs = CMakeList("foo", value=(1, 2, 3), prefix="bar") + assert lhs == rhs + + def test_neq(self) -> None: + lhs = CMakeList("foo", value=(1, 2, 3), prefix="bar") + + rhs = CMakeList("bar", value=(1, 2, 3), prefix="bar") + assert lhs != rhs + + rhs = CMakeList("foo", value=(1, 2, 3, 4), prefix="bar") + assert lhs != rhs + + rhs = CMakeList("foo", value=(1, 2, 3), prefix="asdasd") + assert lhs != rhs + + rhs_b = CMakeBool("foo", value=None, prefix="asdasd") + assert lhs != rhs_b + + +class TestCMakeBool: + def test_create(self) -> None: + var = CMakeBool("foo") + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "BOOL" + + var = CMakeBool("foo", value=True) + assert var.name == "foo" + assert var.value is True + assert var.prefix == "-D" + assert var.type == "BOOL" + + var = CMakeBool("foo", value=1, prefix="bar") + assert var.name == "foo" + assert var.value is True + assert var.prefix == "bar" + assert var.type == "BOOL" + + var = CMakeBool("foo", value=False, prefix="bar") + assert var.name == "foo" + assert var.value is False + assert var.prefix == "bar" + assert var.type == "BOOL" + + def test_create_bad(self) -> None: + with pytest.raises(ValueError): # noqa: PT011 + _ = CMakeBool("foo", value="hello") + + with pytest.raises(ValueError): # noqa: PT011 + _ = CMakeBool("foo", value=400) + + with pytest.raises(TypeError): + _ = CMakeBool("foo", value=1.0) # type: ignore[arg-type] + + def test_canonicalize(self) -> None: + var = CMakeBool("foo") + canon = var.canonicalize() + assert canon is None + assert id(canon) != id(var) # must be distinct + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "BOOL" + + var.value = True + canon = var.canonicalize() + assert isinstance(canon, CMakeBool) + assert id(canon) != id(var) # must be distinct + assert canon.name == var.name + assert canon.value is True + assert canon.prefix == var.prefix + assert canon.type == var.type + + @pytest.mark.parametrize("value", (True, False, 0, 1)) + def test_to_command_line(self, value: Any) -> None: + val_copy = copy.deepcopy(value) + var = CMakeBool("foo", value=value) + cmd = var.to_command_line() + assert isinstance(cmd, str) + expected_str = ( + "" if val_copy is None else ("ON" if val_copy else "OFF") + ) + assert cmd == f"-Dfoo:BOOL={expected_str}" + assert var.value == bool(val_copy) + + def test_to_command_line_bad(self) -> None: + var = CMakeBool("foo", value=None) + with pytest.raises( + ValueError, + match='Cannot convert "foo" to command-line, have empty value', + ): + var.to_command_line() + + def test_eq(self) -> None: + lhs = CMakeBool("foo", value=True, prefix="bar") + rhs = CMakeBool("foo", value=True, prefix="bar") + assert lhs == rhs + + def test_neq(self) -> None: + lhs = CMakeBool("foo", value=True, prefix="bar") + + rhs = CMakeBool("bar", value=True, prefix="bar") + assert lhs != rhs + + rhs = CMakeBool("foo", value=False, prefix="bar") + assert lhs != rhs + + rhs = CMakeBool("foo", value=True, prefix="asdasd") + assert lhs != rhs + + rhs_i = CMakeInt("foo", value=1, prefix="bar") + assert lhs != rhs_i + + +class TestCMakeInt: + def test_create(self) -> None: + var = CMakeInt("foo") + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeInt("foo", value=0) + assert var.name == "foo" + assert var.value == 0 + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeInt("foo", value=10, prefix="bar") + assert var.name == "foo" + assert var.value == 10 + assert var.prefix == "bar" + assert var.type == "STRING" + + var = CMakeInt("foo", value=10.0) + assert var.name == "foo" + assert var.value == 10 + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeInt("foo", value=True) + assert var.name == "foo" + assert var.value == 1 + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeInt("foo", value="23") + assert var.name == "foo" + assert var.value == 23 + assert var.prefix == "-D" + assert var.type == "STRING" + + def test_create_bad(self) -> None: + with pytest.raises(ValueError): # noqa: PT011 + CMakeInt("foo", value="hello") + + with pytest.raises(TypeError): + CMakeInt("foo", value=complex(1, 2)) # type: ignore[arg-type] + + def test_canonicalize(self) -> None: + var = CMakeInt("foo") + canon = var.canonicalize() + assert canon is None + assert id(canon) != id(var) # must be distinct + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var.value = 45 + canon = var.canonicalize() + assert isinstance(canon, CMakeInt) + assert id(canon) != id(var) # must be distinct + assert canon.name == var.name + assert canon.value == var.value + assert canon.prefix == var.prefix + assert canon.type == var.type + + @pytest.mark.parametrize("value", (0, 1, 10, True, False, 123.45, "38")) + def test_to_command_line(self, value: Any) -> None: + val_copy = copy.deepcopy(int(value)) + var = CMakeInt("foo", value=value) + cmd = var.to_command_line() + assert isinstance(cmd, str) + assert cmd == f"-Dfoo:STRING={val_copy}" + assert var.value == val_copy + + def test_to_command_line_bad(self) -> None: + var = CMakeInt("foo", value=None) + with pytest.raises( + ValueError, + match='Cannot convert "foo" to command-line, have empty value', + ): + var.to_command_line() + + def test_eq(self) -> None: + lhs = CMakeInt("foo", value=12, prefix="bar") + rhs = CMakeInt("foo", value=12, prefix="bar") + assert lhs == rhs + + def test_neq(self) -> None: + lhs = CMakeInt("foo", value=45, prefix="bar") + + rhs = CMakeInt("asdasd", value=45, prefix="bar") + assert lhs != rhs + + rhs = CMakeInt("foo", value=12, prefix="bar") + assert lhs != rhs + + rhs = CMakeInt("foo", value=45, prefix="asdasd") + assert lhs != rhs + + rhs_s = CMakeString("foo", value="", prefix="bar") + assert lhs != rhs_s + + +class TestCMakeString: + def test_create(self) -> None: + var = CMakeString("foo") + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeString("foo", value="0") + assert var.name == "foo" + assert var.value == "0" + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakeString("foo", value="asdasd", prefix="bar") + assert var.name == "foo" + assert var.value == "asdasd" + assert var.prefix == "bar" + assert var.type == "STRING" + + def test_create_bad(self) -> None: + with pytest.raises(TypeError): + CMakeString("foo", value=1) # type: ignore[arg-type] + + with pytest.raises(TypeError): + CMakeString("foo", value=complex(1, 2)) # type: ignore[arg-type] + + with pytest.raises(TypeError): + CMakeString("foo", value=[1, 2]) # type: ignore[arg-type] + + def test_canonicalize(self) -> None: + var = CMakeString("foo") + canon = var.canonicalize() + assert canon is None + assert id(canon) != id(var) # must be distinct + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var.value = "hello world" + canon = var.canonicalize() + assert isinstance(canon, CMakeString) + assert id(canon) != id(var) # must be distinct + assert canon.name == var.name + assert canon.value == var.value + assert canon.prefix == var.prefix + assert canon.type == var.type + + @pytest.mark.parametrize("value", ("hello", "goodbye", "38")) + def test_to_command_line(self, value: str) -> None: + val_copy = copy.deepcopy(value) + var = CMakeString("foo", value=value) + cmd = var.to_command_line() + assert isinstance(cmd, str) + assert cmd == f"-Dfoo:STRING={val_copy}" + assert var.value == val_copy + + def test_to_command_line_bad(self) -> None: + var = CMakeString("foo", value=None) + with pytest.raises( + ValueError, + match='Cannot convert "foo" to command-line, have empty value', + ): + var.to_command_line() + + def test_eq(self) -> None: + lhs = CMakeString("foo", value="hello", prefix="bar") + rhs = CMakeString("foo", value="hello", prefix="bar") + assert lhs == rhs + + def test_neq(self) -> None: + lhs = CMakeString("foo", value="hello", prefix="bar") + + rhs = CMakeString("asdads", value="hello", prefix="bar") + assert lhs != rhs + + rhs = CMakeString("foo", value="asdasd", prefix="bar") + assert lhs != rhs + + rhs = CMakeString("foo", value="hello", prefix="asdas") + assert lhs != rhs + + rhs_p = CMakePath("foo", value="/foo/bar", prefix="bar") + assert lhs != rhs_p + + +class TestCMakePath: + def test_create(self) -> None: + var = CMakePath("foo") + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakePath("foo", value="/foo/bar/baz") + assert var.name == "foo" + assert var.value == Path("/foo/bar/baz").resolve() + assert var.prefix == "-D" + assert var.type == "STRING" + + var = CMakePath("foo", value=Path("/foo/bar/baz"), prefix="bar") + assert var.name == "foo" + assert var.value == Path("/foo/bar/baz").resolve() + assert var.prefix == "bar" + assert var.type == "STRING" + + var = CMakePath("foo", value=__file__) + assert var.name == "foo" + assert var.value == Path(__file__).resolve() + assert var.prefix == "-D" + assert var.type == "FILEPATH" + + var = CMakePath("foo", value=Path(__file__)) + assert var.name == "foo" + assert var.value == Path(__file__).resolve() + assert var.prefix == "-D" + assert var.type == "FILEPATH" + + var = CMakePath("foo", value=Path(__file__).parent) + assert var.name == "foo" + assert var.value == Path(__file__).resolve().parent + assert var.prefix == "-D" + assert var.type == "PATH" + + def test_create_bad(self) -> None: + with pytest.raises(TypeError): + CMakePath("foo", value=1) # type: ignore[arg-type] + + with pytest.raises(TypeError): + CMakePath("foo", value=complex(1, 2)) # type: ignore[arg-type] + + with pytest.raises(TypeError): + CMakePath("foo", value=[1, 2]) # type: ignore[arg-type] + + def test_canonicalize(self) -> None: + var = CMakePath("foo") + canon = var.canonicalize() + assert canon is None + assert id(canon) != id(var) # must be distinct + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "STRING" + + var.value = Path(__file__) + canon = var.canonicalize() + assert isinstance(canon, CMakePath) + assert id(canon) != id(var) # must be distinct + assert canon.name == var.name + assert canon.value == var.value + assert canon.prefix == var.prefix + assert canon.type == var.type + + @pytest.mark.parametrize( + "value", ("/hello/world", "/goodbye/world", __file__) + ) + def test_to_command_line(self, value: str) -> None: + val_copy = copy.deepcopy(Path(value).resolve()) + var = CMakePath("foo", value=value) + cmd = var.to_command_line() + assert isinstance(cmd, str) + if val_copy.exists(): + type_str = "PATH" if val_copy.is_dir() else "FILEPATH" + else: + type_str = "STRING" + assert cmd == f"-Dfoo:{type_str}={val_copy}" + assert var.value == val_copy + + def test_to_command_line_bad(self) -> None: + var = CMakePath("foo", value=None) + with pytest.raises( + ValueError, + match='Cannot convert "foo" to command-line, have empty value', + ): + var.to_command_line() + + def test_eq(self) -> None: + lhs = CMakePath("foo", value="/foo/bar", prefix="bar") + rhs = CMakePath("foo", value="/foo/bar", prefix="bar") + assert lhs == rhs + + def test_neq(self) -> None: + lhs = CMakePath("foo", value="/foo/bar", prefix="bar") + + rhs = CMakePath("asdasd", value="/foo/bar", prefix="bar") + assert lhs != rhs + + rhs = CMakePath("foo", value="/foo/bar/baz", prefix="bar") + assert lhs != rhs + + rhs = CMakePath("foo", value="/foo/bar", prefix="asdasd") + assert lhs != rhs + + rhs_e = CMakeExecutable("foo", value="/foo/bar", prefix="bar") + assert lhs != rhs_e + + +class TestCMakeExecutable: + def test_create(self) -> None: + var = CMakeExecutable("foo") + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "FILEPATH" + + var = CMakeExecutable("foo", value="/foo/bar/baz") + assert var.name == "foo" + assert var.value == Path("/foo/bar/baz").resolve() + assert var.prefix == "-D" + assert var.type == "FILEPATH" + + var = CMakeExecutable("foo", value=Path("/foo/bar/baz"), prefix="bar") + assert var.name == "foo" + assert var.value == Path("/foo/bar/baz").resolve() + assert var.prefix == "bar" + assert var.type == "FILEPATH" + + var = CMakeExecutable("foo", value=sys.executable) + assert var.name == "foo" + assert var.value == Path(sys.executable) + assert var.prefix == "-D" + assert var.type == "FILEPATH" + + def test_create_bad(self) -> None: + with pytest.raises( + ValueError, match="Got a directory as an executable: .*" + ): + CMakeExecutable("foo", value=Path(sys.executable).parent) + + with pytest.raises(TypeError): + CMakeExecutable("foo", value=1) # type: ignore[arg-type] + + with pytest.raises(TypeError): + CMakeExecutable( + "foo", + value=complex(1, 2), # type: ignore[arg-type] + ) + + with pytest.raises(TypeError): + CMakeExecutable("foo", value=[1, 2]) # type: ignore[arg-type] + + def test_canonicalize(self) -> None: + var = CMakeExecutable("foo") + canon = var.canonicalize() + assert canon is None + assert id(canon) != id(var) # must be distinct + assert var.name == "foo" + assert var.value is None + assert var.prefix == "-D" + assert var.type == "FILEPATH" + + var.value = Path(__file__) + canon = var.canonicalize() + assert isinstance(canon, CMakeExecutable) + assert id(canon) != id(var) # must be distinct + assert canon.name == var.name + assert canon.value == var.value + assert canon.prefix == var.prefix + assert canon.type == var.type + + @pytest.mark.parametrize( + "value", (shutil.which("ls"), shutil.which("gcc"), __file__) + ) + def test_to_command_line(self, value: str | None) -> None: + if value is None: + return # test is not meaningful if these are not found + + val_copy = copy.deepcopy(Path(value)) + var = CMakeExecutable("foo", value=value) + cmd = var.to_command_line() + assert isinstance(cmd, str) + assert cmd == f"-Dfoo:FILEPATH={val_copy}" + assert var.value == val_copy + + def test_to_command_line_bad(self) -> None: + var = CMakeExecutable("foo", value=None) + with pytest.raises( + ValueError, + match='Cannot convert "foo" to command-line, have empty value', + ): + var.to_command_line() + + def test_eq(self) -> None: + lhs = CMakeExecutable("foo", value="/foo/bar/baz.py", prefix="bar") + rhs = CMakeExecutable("foo", value="/foo/bar/baz.py", prefix="bar") + assert lhs == rhs + + def test_neq(self) -> None: + lhs = CMakeExecutable("foo", value="/foo/bar/baz.py", prefix="bar") + + rhs = CMakeExecutable("asdasd", value="/foo/bar/baz.py", prefix="bar") + assert lhs != rhs + + rhs = CMakeExecutable("foo", value="/foo/bar/bop.py", prefix="bar") + assert lhs != rhs + + rhs = CMakeExecutable("foo", value="/foo/bar/baz.py", prefix="asdasd") + assert lhs != rhs + + rhs_l = CMakeList("foo", value=(1, 2), prefix="bar") + assert lhs != rhs_l + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/cmake/test_cmaker.py b/config/aedifix/tests/cmake/test_cmaker.py new file mode 100644 index 0000000000..6b7542a6d9 --- /dev/null +++ b/config/aedifix/tests/cmake/test_cmaker.py @@ -0,0 +1,142 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +import pytest + +from ...cmake.cmake_flags import CMakeBool, CMakeInt, CMakeList, CMakeString +from ...cmake.cmaker import CMaker +from ...util.exception import WrongOrderError + +if TYPE_CHECKING: + from ..fixtures.dummy_manager import DummyManager + + +@pytest.fixture +def cmaker() -> CMaker: + return CMaker() + + +class TestCMaker: + def test_create(self) -> None: + cmaker = CMaker() + assert cmaker._args == {} + + def test_register_variable( + self, cmaker: CMaker, manager: DummyManager + ) -> None: + var = CMakeString("foo", value="bar") + cmaker.register_variable(manager, var) + assert var.name in cmaker._args + assert cmaker._args[var.name] == var + + var2 = CMakeInt("bar", value=2) + cmaker.register_variable(manager, var2) + assert var.name in cmaker._args + assert var2.name in cmaker._args + assert cmaker._args[var.name] == var + assert cmaker._args[var2.name] == var2 + + def test_register_bad(self, cmaker: CMaker, manager: DummyManager) -> None: + var = CMakeString("foo", value="bar") + cmaker.register_variable(manager, var) + assert var.name in cmaker._args + assert cmaker._args[var.name] == var + + # same name, different kind + var2 = CMakeInt("foo", value=2) + with pytest.raises( + ValueError, + match=( + f"Variable foo already registered as kind {type(var)}, " + "cannot overwrite it!" + ), + ): + cmaker.register_variable(manager, var2) + + def test_set_value(self, cmaker: CMaker, manager: DummyManager) -> None: + var = CMakeBool("foo") + assert var.value is None + cmaker.register_variable(manager, var) + assert cmaker._args[var.name] == var + cmaker.set_value(manager, "foo", False) + assert cmaker._args[var.name] == var + assert cmaker._args[var.name].value is False + + def test_set_value_bad( + self, cmaker: CMaker, manager: DummyManager + ) -> None: + with pytest.raises( + WrongOrderError, + match="No variable with name 'foo' has been registered", + ): + cmaker.set_value(manager, "foo", 1234) + + def test_get_value(self, cmaker: CMaker, manager: DummyManager) -> None: + var = CMakeBool("foo") + assert var.value is None + cmaker.register_variable(manager, var) + assert cmaker._args[var.name] == var + value = cmaker.get_value(manager, "foo") + assert value is None + assert var.value is None + + var.value = True + value = cmaker.get_value(manager, "foo") + assert value is True + + def test_get_value_bad( + self, cmaker: CMaker, manager: DummyManager + ) -> None: + with pytest.raises( + WrongOrderError, + match="No variable with name 'foo' has been registered", + ): + cmaker.get_value(manager, "foo") + + def test_append_value(self, cmaker: CMaker, manager: DummyManager) -> None: + var = CMakeList("foo") + assert var.value is None + cmaker.register_variable(manager, var) + + cmaker.append_value(manager, "foo", [1, 2, 3]) + assert var.value == [1, 2, 3] + assert cmaker._args[var.name].value == [1, 2, 3] + + # no change + cmaker.append_value(manager, "foo", []) + assert var.value == [1, 2, 3] + assert cmaker._args[var.name].value == [1, 2, 3] + + cmaker.append_value(manager, "foo", [4, 5, 6]) + assert var.value == [1, 2, 3, 4, 5, 6] + assert cmaker._args[var.name].value == [1, 2, 3, 4, 5, 6] + + cmaker.append_value(manager, "foo", ["7", "8"]) + assert var.value == [1, 2, 3, 4, 5, 6, "7", "8"] + assert cmaker._args[var.name].value == [1, 2, 3, 4, 5, 6, "7", "8"] + + def test_append_value_bad( + self, cmaker: CMaker, manager: DummyManager + ) -> None: + with pytest.raises( + WrongOrderError, + match="No variable with name 'foo' has been registered", + ): + cmaker.append_value(manager, "foo", [1, 2]) + + var_int = CMakeInt("foo") + cmaker.register_variable(manager, var_int) + + with pytest.raises( + TypeError, match=f"Cannot append to {type(var_int)}" + ): + cmaker.append_value(manager, "foo", [1, 2]) + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/conftest.py b/config/aedifix/tests/conftest.py new file mode 100644 index 0000000000..86a77e960b --- /dev/null +++ b/config/aedifix/tests/conftest.py @@ -0,0 +1,49 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES.B +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import random +import string +from os import environ +from pathlib import Path + +import pytest + +from .fixtures.dummy_main_package import DummyMainPackage # noqa: F401 +from .fixtures.dummy_manager import DummyManager, manager # noqa: F401 + + +def _id_generator( + size: int = 8, chars: str = string.ascii_uppercase + string.digits +) -> str: + return "".join(random.choice(chars) for _ in range(size)).casefold() + + +@pytest.fixture(scope="session", autouse=True) +def setup_env() -> None: + environ["__AEDIFIX_TESTING_DO_NOT_USE_OR_YOU_WILL_BE_FIRED__"] = "1" + + +@pytest.fixture(autouse=True) +def setup_project_dir(tmp_path_factory: pytest.TempPathFactory) -> None: + tmp_path = tmp_path_factory.mktemp(_id_generator(size=16)) + environ["AEDIFIX_PYTEST_DIR"] = str(tmp_path) + print("\nAEDIFIX_PYTEST_DIR =", tmp_path) # noqa: T201 + + +@pytest.fixture(scope="session", autouse=True) +def setup_project_arch() -> None: + arch_val = "arch-pytest" + environ["AEDIFIX_PYTEST_ARCH"] = arch_val + print("AEDIFIX_PYTEST_ARCH =", arch_val) # noqa: T201 + + +@pytest.fixture +def AEDIFIX_PYTEST_DIR() -> Path: + return Path(environ["AEDIFIX_PYTEST_DIR"]) + + +@pytest.fixture +def AEDIFIX_PYTEST_ARCH() -> str: + return environ["AEDIFIX_PYTEST_ARCH"] diff --git a/legate/py.typed b/config/aedifix/tests/fixtures/__init__.py similarity index 100% rename from legate/py.typed rename to config/aedifix/tests/fixtures/__init__.py diff --git a/config/aedifix/tests/fixtures/dummy_main_module.py b/config/aedifix/tests/fixtures/dummy_main_module.py new file mode 100644 index 0000000000..9c11803d2d --- /dev/null +++ b/config/aedifix/tests/fixtures/dummy_main_module.py @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES.B +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from os import environ +from pathlib import Path +from tempfile import NamedTemporaryFile +from typing import TYPE_CHECKING, Final + +from .dummy_main_package import DummyMainPackage + +if TYPE_CHECKING: + from collections.abc import Sequence + + from ...manager import ConfigurationManager + +_tmp_file: Final = NamedTemporaryFile() # noqa: SIM115 +_tmp_path: Final = Path(_tmp_file.name) + + +class DummyMainModule(DummyMainPackage): + def __init__( + self, manager: ConfigurationManager, argv: Sequence[str] + ) -> None: + super().__init__( + manager=manager, + argv=argv, + name="DummyMainModule", + arch_name="AEDIFIX_PYTEST_ARCH", + project_dir_name="AEDIFIX_PYTEST_DIR", + project_dir_value=Path(environ["AEDIFIX_PYTEST_DIR"]), + project_config_file_template=_tmp_path, + ) + + @classmethod + def from_argv( + cls, manager: ConfigurationManager, argv: Sequence[str] + ) -> DummyMainModule: + return cls(manager, argv) diff --git a/config/aedifix/tests/fixtures/dummy_main_package.py b/config/aedifix/tests/fixtures/dummy_main_package.py new file mode 100644 index 0000000000..df4f9be3f6 --- /dev/null +++ b/config/aedifix/tests/fixtures/dummy_main_package.py @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES.B +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from os import environ +from pathlib import Path + +from ...package.main_package import MainPackage + + +class DummyMainPackage(MainPackage): + @property + def arch_value(self) -> str: + return environ[self.arch_name] + + @property + def project_dir_value(self) -> Path: + return Path(environ[self.project_dir_name]) diff --git a/config/aedifix/tests/fixtures/dummy_manager.py b/config/aedifix/tests/fixtures/dummy_manager.py new file mode 100644 index 0000000000..d9b4b40b45 --- /dev/null +++ b/config/aedifix/tests/fixtures/dummy_manager.py @@ -0,0 +1,46 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES.B +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar + +import pytest + +from ...manager import ConfigurationManager +from .dummy_main_module import DummyMainModule + +if TYPE_CHECKING: + from collections.abc import Callable, Sequence + +_T = TypeVar("_T") +_P = ParamSpec("_P") + + +class DummyManager(ConfigurationManager): + def log(self, *args: Any, **kwargs: Any) -> None: + pass + + def log_divider(self, *args: Any, **kwargs: Any) -> None: + pass + + def log_boxed(self, *args: Any, **kwargs: Any) -> None: + pass + + def log_warning(self, *args: Any, **kwargs: Any) -> None: + pass + + def log_execute_command( + self, cmd: Sequence[_T], live: bool = False + ) -> Any: + pass + + def log_execute_func( # type: ignore[override] + self, func: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs + ) -> _T: + return func(*args, **kwargs) + + +@pytest.fixture +def manager() -> DummyManager: + return DummyManager((), DummyMainModule) diff --git a/config/aedifix/tests/fixtures/dummy_module.py b/config/aedifix/tests/fixtures/dummy_module.py new file mode 100644 index 0000000000..23b3f0b6f7 --- /dev/null +++ b/config/aedifix/tests/fixtures/dummy_module.py @@ -0,0 +1,17 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +MODULE_ATTRIBUTE = "a string" + + +def function() -> None: + pass + + +function.MAGIC_NUMBER = 1 # type: ignore[attr-defined] + + +class Class: + MAGIC_ATTR = complex(1, 3) diff --git a/config/aedifix/tests/package/__init__.py b/config/aedifix/tests/package/__init__.py new file mode 100644 index 0000000000..182ad12bb8 --- /dev/null +++ b/config/aedifix/tests/package/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/config/aedifix/tests/package/packages/__init__.py b/config/aedifix/tests/package/packages/__init__.py new file mode 100644 index 0000000000..182ad12bb8 --- /dev/null +++ b/config/aedifix/tests/package/packages/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/config/aedifix/tests/package/packages/test_cuda.py b/config/aedifix/tests/package/packages/test_cuda.py new file mode 100644 index 0000000000..8c9907c93e --- /dev/null +++ b/config/aedifix/tests/package/packages/test_cuda.py @@ -0,0 +1,32 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys + +import pytest + +from ....package.packages.cuda import CudaArchAction + +ARCH_STR: tuple[tuple[str, list[str]], ...] = ( + ("", []), + (",,", []), + ("70", ["70"]), + ("70,80", ["70", "80"]), + ("ampere", ["80"]), + ("turing,hopper", ["75", "90"]), + ("volta,60,all-major", ["70", "60", "all-major"]), + ("60,,80", ["60", "80"]), +) + + +class TestCudaArchAction: + @pytest.mark.parametrize(("argv", "expected"), ARCH_STR) + def test_map_cuda_arch_names(self, argv: str, expected: list[str]) -> None: + ret = CudaArchAction.map_cuda_arch_names(argv) + assert ret == expected + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/package/test_main_package.py b/config/aedifix/tests/package/test_main_package.py new file mode 100644 index 0000000000..2159950167 --- /dev/null +++ b/config/aedifix/tests/package/test_main_package.py @@ -0,0 +1,62 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +import pytest + +from ...package.main_package import DebugConfigureValue + +if TYPE_CHECKING: + from collections.abc import Iterator + +ALL_DEBUG_CONFIGURE_FLAGS = ( + DebugConfigureValue.NONE, + DebugConfigureValue.DEBUG_FIND, + DebugConfigureValue.TRACE, + DebugConfigureValue.TRACE_EXPAND, +) + +ALL_DEBUG_CMAKE_FLAGS = ("", "--debug-find", "--trace", "--trace-expand") + + +def gen_expected_flags() -> Iterator[list[str]]: + for i in range(len(ALL_DEBUG_CMAKE_FLAGS)): + ret = list(ALL_DEBUG_CMAKE_FLAGS[: i + 1]) + ret.remove("") + yield ret + + +class TestDebugConfigureValue: + @pytest.mark.parametrize( + ("val", "expected"), + list(zip(ALL_DEBUG_CONFIGURE_FLAGS, ALL_DEBUG_CMAKE_FLAGS)), + ) + def test_flag_matches( + self, val: DebugConfigureValue, expected: str + ) -> None: + assert val.to_flag() == expected + + def test_help_str(self) -> None: + help_str = DebugConfigureValue.help_str() + for cmake_flg in ALL_DEBUG_CMAKE_FLAGS: + assert cmake_flg in help_str + for flg in ALL_DEBUG_CONFIGURE_FLAGS: + assert flg.to_flag() in help_str + assert str(flg) in help_str + + @pytest.mark.parametrize( + ("val", "expected"), + list(zip(ALL_DEBUG_CONFIGURE_FLAGS, gen_expected_flags())), + ) + def test_to_flags( + self, val: DebugConfigureValue, expected: list[str] + ) -> None: + assert val.to_flags() == expected + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/test_base.py b/config/aedifix/tests/test_base.py new file mode 100644 index 0000000000..0e82984e0b --- /dev/null +++ b/config/aedifix/tests/test_base.py @@ -0,0 +1,42 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import re +import sys +from typing import TYPE_CHECKING + +import pytest + +from ..base import Configurable +from ..util.exception import WrongOrderError + +if TYPE_CHECKING: + from .fixtures.dummy_manager import DummyManager + + +@pytest.fixture +def configurable(manager: DummyManager) -> Configurable: + return Configurable(manager) + + +class TestConfigurable: + def test_create(self, manager: DummyManager) -> None: + conf = Configurable(manager) + assert conf.manager == manager + assert conf.project_name == manager.project_name + assert conf.project_arch == manager.project_arch + assert conf.project_arch_name == manager.project_arch_name + assert conf.project_dir == manager.project_dir + assert conf.project_dir_name == manager.project_dir_name + assert conf.project_arch_dir == manager.project_arch_dir + assert conf.project_cmake_dir == manager.project_cmake_dir + with pytest.raises( + WrongOrderError, match=re.escape("Must call setup() first") + ): + _ = conf.cl_args + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/test_config.py b/config/aedifix/tests/test_config.py new file mode 100644 index 0000000000..08c1065849 --- /dev/null +++ b/config/aedifix/tests/test_config.py @@ -0,0 +1,40 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +import pytest + +from ..config import ConfigFile + +if TYPE_CHECKING: + from pathlib import Path + + from .fixtures.dummy_manager import DummyManager + + +@pytest.fixture +def config_file(manager: DummyManager, tmp_path: Path) -> ConfigFile: + template = tmp_path / "foo.in" + template.touch() + return ConfigFile(manager=manager, config_file_template=template) + + +class TestConfigFile: + def test_create(self, manager: DummyManager, tmp_path: Path) -> None: + template = tmp_path / "foo.in" + template.touch() + config = ConfigFile(manager=manager, config_file_template=template) + + assert config.template_file.exists() + assert config.template_file.is_file() + assert config.template_file == template + + assert config._default_subst == {"PYTHON_EXECUTABLE": sys.executable} + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/test_logger.py b/config/aedifix/tests/test_logger.py new file mode 100644 index 0000000000..72be9e2b60 --- /dev/null +++ b/config/aedifix/tests/test_logger.py @@ -0,0 +1,122 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES.B +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING + +import pytest + +from ..logger import Logger + +if TYPE_CHECKING: + from pathlib import Path + + +@pytest.fixture +def tmp_configure_log(AEDIFIX_PYTEST_DIR: Path) -> Path: + return AEDIFIX_PYTEST_DIR / "configure.log" + + +@pytest.fixture +def logger(tmp_configure_log: Path) -> Logger: + return Logger(tmp_configure_log) + + +class TestLogger: + def test_create(self, tmp_configure_log: Path) -> None: + import logging + + logger = Logger(tmp_configure_log) + assert isinstance(logger._file_logger, logging.Logger) + assert logger.file_path == tmp_configure_log + assert len(logger._row_data) == 0 + assert not logger._live.is_started + + def test_flush(self, logger: Logger) -> None: + logger.flush() + + @pytest.mark.parametrize("mess", ("hello world", "goodbye world")) + def test_log_screen( + self, logger: Logger, capsys: pytest.CaptureFixture[str], mess: str + ) -> None: + with logger: + logger.log_screen(mess=mess) + captured = capsys.readouterr() + assert mess in captured.out + assert captured.err == "" + + def test_logger_context(self, logger: Logger) -> None: + assert logger._live.is_started is False + with logger as lg: + # Need to use alias lg since otherwise mypy says the final line is + # unreachable, since I guess it assumes the lifetime of the + # variable "logger" is tied to the with statement? + assert lg is logger + assert lg._live.is_started is True + assert logger._live.is_started is False + + def test_append_live_message(self, logger: Logger) -> None: + row_data = logger._row_data + # make sure that the above access doesn't return a copy or something + # like that + assert row_data is logger._row_data + assert row_data.maxlen is not None + assert len(row_data) == 0 + logger._append_live_message("foo", keep=True) + assert len(row_data) == 1 + assert row_data[0] == ("foo", True) + for i in range(row_data.maxlen - len(row_data)): + row_data.append((f"bar_{i}", False)) + assert len(row_data) == row_data.maxlen + assert row_data[0] == ("foo", True) + logger._append_live_message("new_foo", keep=True) + assert len(row_data) == row_data.maxlen + assert row_data[0] == ("foo", True) + # The last non-kept entry should now be next + assert row_data[1] == ("bar_1", False) + + def test_append_live_message_full(self, logger: Logger) -> None: + assert logger._row_data.maxlen is not None + for i in range(logger._row_data.maxlen): + logger._row_data.append((f"foo_{i}", True)) + with pytest.raises( + ValueError, + match=( + "Could not prune row data, every entry was marked as " + "persistent" + ), + ): + logger._append_live_message("oh no", keep=True) + + def test_log_file(self, logger: Logger) -> None: + mess = "foo bar baz" + logger.log_file(mess) + assert logger.file_path.read_text() == mess + "\n" + + mess2 = "asdasdasdasd qwdoiqnwdnqwid\ndqowdqowdqiwodqowdi" + logger.log_file(mess2) + assert logger.file_path.read_text() == mess + "\n" + mess2 + "\n" + + def test_copy_log(self, logger: Logger) -> None: + mess = "foo, bar, baz" + logger.log_file(mess) + orig_log = logger.file_path + other_log = orig_log.parent / "backup_log.log" + assert not other_log.exists() + dest = logger.copy_log(other_log) + full_mess = ( + f"{mess}\nCopying file log from {orig_log} to {other_log}\n" + ) + assert dest == other_log + assert other_log.exists() + assert other_log.is_file() + assert other_log.read_text() == full_mess + assert orig_log.exists() + assert orig_log.is_file() + assert orig_log.read_text() == full_mess + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/test_main.py b/config/aedifix/tests/test_main.py new file mode 100644 index 0000000000..29114504b4 --- /dev/null +++ b/config/aedifix/tests/test_main.py @@ -0,0 +1,470 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +import json +import shlex +import shutil +import textwrap +from pathlib import Path +from typing import TYPE_CHECKING, Any, TypeAlias + +import pytest + +from ..logger import Logger +from ..main import basic_configure +from ..manager import ConfigurationManager +from ..package.main_package import _detect_num_cpus +from .fixtures.dummy_main_module import DummyMainModule + +if TYPE_CHECKING: + from ..cmake.cmaker import CMakeCommandSpec + + +@pytest.fixture(autouse=True) +def setup_cmake_project(AEDIFIX_PYTEST_DIR: Path) -> None: + main_cpp_template = textwrap.dedent( + r""" + #include + + int main(int argc, char *argv[]) + { + std::cout << "hello, world!\n"; + return 0; + } + """ + ).strip() + cmakelists_template = textwrap.dedent( + r""" + cmake_minimum_required(VERSION 3.13...3.16 FATAL_ERROR) + + project(example_exec VERSION 0.0.1 LANGUAGES C CXX) + + add_executable(example_exec src/main.cpp) + target_compile_features(example_exec PRIVATE cxx_auto_type) + + install(TARGETS example_exec) + + set(data "{}") + foreach(var IN LISTS AEDIFIX_EXPORT_VARIABLES) + string(JSON data SET "${data}" "${var}" "\"${${var}}\"") + endforeach() + file(WRITE "${AEDIFIX_EXPORT_CONFIG_PATH}" "${data}") + """ + ).strip() + src_dir = AEDIFIX_PYTEST_DIR / "src" + src_dir.mkdir() + (src_dir / "main.cpp").write_text(main_cpp_template) + (AEDIFIX_PYTEST_DIR / "CMakeLists.txt").write_text(cmakelists_template) + + +def shutil_which(thing: str) -> Path: + ret = shutil.which(thing) + assert ret is not None + return Path(ret) + + +Argv: TypeAlias = list[str] + + +class TestInfo: + # tell pytest to ignore this class, even though it starts with "Test" + __test__ = False + + def __init__( + self, + AEDIFIX_PYTEST_DIR: Path, + AEDIFIX_PYTEST_ARCH: str, + generator: str | None = None, + ) -> None: + self.AEDIFIX_PYTEST_DIR = AEDIFIX_PYTEST_DIR + self.AEDIFIX_PYTEST_ARCH = AEDIFIX_PYTEST_ARCH + self.arch_dir = self.AEDIFIX_PYTEST_DIR / self.AEDIFIX_PYTEST_ARCH + self.configure_log = self.AEDIFIX_PYTEST_DIR / "configure.log" + self.backup_configure_log = self.arch_dir / "configure.log" + self.reconfigure = ( + self.arch_dir / f"reconfigure-{self.AEDIFIX_PYTEST_ARCH}.py" + ) + self.reconfigure_symlink = ( + self.AEDIFIX_PYTEST_DIR / self.reconfigure.name + ) + self.cmake_dir = self.arch_dir / "cmake_build" + self.cmakecache_txt = self.cmake_dir / "CMakeCache.txt" + self.command_spec = self.cmake_dir / "aedifix_cmake_command_spec.json" + self.cmake_exe = Path(shutil_which("cmake")).resolve() + if generator is None: + generator = "Ninja" if shutil.which("ninja") else "Unix Makefiles" + self.generator = generator + self.export_config_path = self.cmake_dir / "aedifix_export_config.json" + + def pre_test(self) -> None: + assert not self.arch_dir.exists() + assert not self.configure_log.exists() + assert not self.backup_configure_log.exists() + assert not self.reconfigure.exists() + assert not self.reconfigure_symlink.exists() + assert not self.cmake_dir.exists() + assert not self.cmakecache_txt.exists() + assert not self.command_spec.exists() + + def post_test(self, expected_spec: CMakeCommandSpec) -> None: + # basics + assert self.arch_dir.is_dir() + # configure.log + assert self.configure_log.exists() + assert self.configure_log.is_file() + + assert self.backup_configure_log.exists() + assert self.backup_configure_log.is_file() + assert ( + self.configure_log.read_text() + == self.backup_configure_log.read_text() + ) + + # reconfigure + assert self.reconfigure.exists() + assert self.reconfigure.is_file() + + assert self.reconfigure_symlink.exists() + assert self.reconfigure_symlink.is_symlink() + assert ( + self.AEDIFIX_PYTEST_DIR / self.reconfigure_symlink.readlink() + == self.reconfigure + ) + + # cmake dir + assert self.cmake_dir.exists() + assert self.cmake_dir.is_dir() + + # TODO: check more cmake cache! + assert self.cmakecache_txt.exists() + assert self.cmakecache_txt.is_file() + cache_header_lines = [ + "# This is the CMakeCache file.\n", + f"# For build in directory: {self.cmake_dir}\n", + f"# It was generated by CMake: {self.cmake_exe}\n", + "# You can edit this file to change values found and used by cmake.\n", # noqa: E501 + "# If you do not want to change any of the values, simply exit the editor.\n", # noqa: E501 + "# If you do want to change a value, simply edit, save, and exit the editor.\n", # noqa: E501 + "# The syntax for the file is as follows:\n", + "# KEY:TYPE=VALUE\n", + "# KEY is the name of a variable in the cache.\n", + "# TYPE is a hint to GUIs for the type of VALUE, DO NOT EDIT TYPE!.\n", # noqa: E501 + "# VALUE is the current value for the KEY.\n", + ] + idx = 0 + with self.cmakecache_txt.open() as fd: + # Exploit the fact that zip() will end when the shortest iterator + # is exhausted (i.e. cache_header_lines in this case) + for line, expected in zip(fd, cache_header_lines): + assert line == expected + idx += 1 + # But double check the fact that cache_header_lines was indeed the + # shortest + assert idx == len(cache_header_lines) + + assert self.command_spec.exists() + assert self.command_spec.is_file() + with self.command_spec.open() as fd: + spec = json.load(fd) + + assert "CMAKE_COMMANDS" in spec + # Remove the export variables entry. It's too odious to test properly + # (and would break the second anyone added any new flags), so just do + # some minor tests + cmake_commands = spec["CMAKE_COMMANDS"] + for idx, val in enumerate(cmake_commands): + if val.startswith("-DAEDIFIX_EXPORT_VARIABLES"): + assert "CMAKE_C_COMPILER" in val + assert "CMAKE_CXX_COMPILER" in val + assert "CMAKE_COMMAND" in val + assert "CMAKE_GENERATOR" in val + assert len(val.split(";")) > 20 + del cmake_commands[idx] + break + else: + pytest.fail("Did not find export variables in cmake command") + assert spec == expected_spec + + +class SpecialException(Exception): + pass + + +@pytest.mark.slow +class TestMain: + def test_basic_configure_bad_init( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: + exn_mess = "Throwing from __init__" + + def throwing_init(*args: Any, **kwargs: Any) -> None: + raise SpecialException(exn_mess) + + monkeypatch.setattr(ConfigurationManager, "__init__", throwing_init) + with pytest.raises(SpecialException, match=exn_mess): + basic_configure((), DummyMainModule) + + def test_basic_configure_bad_halfway( + self, monkeypatch: pytest.MonkeyPatch, AEDIFIX_PYTEST_DIR: Path + ) -> None: + exn_mess = "Throwing from setup" + + def throwing_setup(*args: Any, **kwargs: Any) -> None: + raise SpecialException(exn_mess) + + monkeypatch.setattr(ConfigurationManager, "setup", throwing_setup) + + ret = basic_configure((), DummyMainModule) + assert ret != 0 + + configure_log = AEDIFIX_PYTEST_DIR / "configure.log" + assert configure_log.is_file() + config_log_text = configure_log.read_text().strip() + assert config_log_text + + logger = Logger(AEDIFIX_PYTEST_DIR / "dummy.log") + expected_lines = logger.build_multiline_message( + sup_title="***** CONFIGURATION CRASH *****", + divider_char="-", + text=( + f"{exn_mess}, please see {configure_log} for additional " + "details." + ), + ).splitlines() + banner = "=" * (logger.console.width - 1) + expected_lines.insert(0, banner) + expected_lines.append(banner) + config_log_lines = set(config_log_text.splitlines()) + assert set(expected_lines) <= config_log_lines + + def test_basic_configure_bare( + self, AEDIFIX_PYTEST_DIR: Path, AEDIFIX_PYTEST_ARCH: str + ) -> None: + test_info = TestInfo(AEDIFIX_PYTEST_DIR, AEDIFIX_PYTEST_ARCH) + test_info.pre_test() + + num_cpus = _detect_num_cpus() + argv: Argv = [] + expected_spec: CMakeCommandSpec = { + "CMAKE_EXECUTABLE": f"{test_info.cmake_exe}", + "CMAKE_GENERATOR": test_info.generator, + "SOURCE_DIR": f"{AEDIFIX_PYTEST_DIR}", + "BUILD_DIR": f"{test_info.cmake_dir}", + "CMAKE_COMMANDS": [ + "--log-context", + "--log-level=DEBUG", + "-DAEDIFIX:BOOL=ON", + f"-DAEDIFIX_PYTEST_ARCH:STRING='{AEDIFIX_PYTEST_ARCH}'", + f"-DAEDIFIX_PYTEST_DIR:PATH='{AEDIFIX_PYTEST_DIR}'", + "-DDUMMYMAINMODULE_CONFIGURE_OPTIONS:STRING=", + f"-DAEDIFIX_EXPORT_CONFIG_PATH:FILEPATH='{test_info.export_config_path}'", + "-DBUILD_SHARED_LIBS:BOOL=ON", + f"-DCMAKE_BUILD_PARALLEL_LEVEL:STRING={num_cpus}", + "-DCMAKE_BUILD_TYPE:STRING=Release", + "-DCMAKE_COLOR_DIAGNOSTICS:BOOL=ON", + "-DCMAKE_COLOR_MAKEFILE:BOOL=ON", + "-DCMAKE_CXX_FLAGS:STRING=-O3", + "-DCMAKE_C_FLAGS:STRING=-O3", + "-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON", + ], + } + + ret = basic_configure(argv, DummyMainModule) + assert ret == 0 + test_info.post_test(expected_spec) + + def test_basic_configure_release( + self, AEDIFIX_PYTEST_DIR: Path, AEDIFIX_PYTEST_ARCH: str + ) -> None: + test_info = TestInfo(AEDIFIX_PYTEST_DIR, AEDIFIX_PYTEST_ARCH) + test_info.pre_test() + + num_cpus = _detect_num_cpus() + argv: Argv = ["--build-type=release"] + argv_str = shlex.join(argv) + expected_spec: CMakeCommandSpec = { + "CMAKE_EXECUTABLE": f"{test_info.cmake_exe}", + "CMAKE_GENERATOR": test_info.generator, + "SOURCE_DIR": f"{AEDIFIX_PYTEST_DIR}", + "BUILD_DIR": f"{test_info.cmake_dir}", + "CMAKE_COMMANDS": [ + "--log-context", + "--log-level=DEBUG", + "-DAEDIFIX:BOOL=ON", + f"-DAEDIFIX_PYTEST_ARCH:STRING='{AEDIFIX_PYTEST_ARCH}'", + f"-DAEDIFIX_PYTEST_DIR:PATH='{AEDIFIX_PYTEST_DIR}'", + f"-DDUMMYMAINMODULE_CONFIGURE_OPTIONS:STRING={argv_str}", + f"-DAEDIFIX_EXPORT_CONFIG_PATH:FILEPATH='{test_info.export_config_path}'", + "-DBUILD_SHARED_LIBS:BOOL=ON", + f"-DCMAKE_BUILD_PARALLEL_LEVEL:STRING={num_cpus}", + "-DCMAKE_BUILD_TYPE:STRING=Release", + "-DCMAKE_COLOR_DIAGNOSTICS:BOOL=ON", + "-DCMAKE_COLOR_MAKEFILE:BOOL=ON", + "-DCMAKE_CXX_FLAGS:STRING=-O3", + "-DCMAKE_C_FLAGS:STRING=-O3", + "-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON", + ], + } + + ret = basic_configure(argv, DummyMainModule) + assert ret == 0 + test_info.post_test(expected_spec) + + def test_basic_configure_relwithdebinfo( + self, AEDIFIX_PYTEST_DIR: Path, AEDIFIX_PYTEST_ARCH: str + ) -> None: + test_info = TestInfo(AEDIFIX_PYTEST_DIR, AEDIFIX_PYTEST_ARCH) + test_info.pre_test() + + num_cpus = _detect_num_cpus() + argv: Argv = ["--build-type=relwithdebinfo"] + argv_str = shlex.join(argv) + expected_spec: CMakeCommandSpec = { + "CMAKE_EXECUTABLE": f"{test_info.cmake_exe}", + "CMAKE_GENERATOR": test_info.generator, + "SOURCE_DIR": f"{AEDIFIX_PYTEST_DIR}", + "BUILD_DIR": f"{test_info.cmake_dir}", + "CMAKE_COMMANDS": [ + "--log-context", + "--log-level=DEBUG", + "-DAEDIFIX:BOOL=ON", + f"-DAEDIFIX_PYTEST_ARCH:STRING='{AEDIFIX_PYTEST_ARCH}'", + f"-DAEDIFIX_PYTEST_DIR:PATH='{AEDIFIX_PYTEST_DIR}'", + f"-DDUMMYMAINMODULE_CONFIGURE_OPTIONS:STRING={argv_str}", + f"-DAEDIFIX_EXPORT_CONFIG_PATH:FILEPATH='{test_info.export_config_path}'", + "-DBUILD_SHARED_LIBS:BOOL=ON", + f"-DCMAKE_BUILD_PARALLEL_LEVEL:STRING={num_cpus}", + "-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo", + "-DCMAKE_COLOR_DIAGNOSTICS:BOOL=ON", + "-DCMAKE_COLOR_MAKEFILE:BOOL=ON", + "-DCMAKE_CXX_FLAGS:STRING='-O0 -g -g3 -O3'", + "-DCMAKE_C_FLAGS:STRING='-O0 -g -g3 -O3'", + "-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON", + ], + } + + ret = basic_configure(argv, DummyMainModule) + assert ret == 0 + test_info.post_test(expected_spec) + + def test_basic_configure_clang_debug( + self, AEDIFIX_PYTEST_DIR: Path, AEDIFIX_PYTEST_ARCH: str + ) -> None: + test_info = TestInfo( + AEDIFIX_PYTEST_DIR, AEDIFIX_PYTEST_ARCH, generator="Unix Makefiles" + ) + test_info.pre_test() + + # This causes it to be joined as a string that is too long + flags = " ".join( # noqa: FLY002 + [ + "-O0", + "-g3", + "-fstack-protector", + "-Walloca", + "-Wdeprecated", + "-Wimplicit-fallthrough", + "-fdiagnostics-show-template-tree", + "-Wignored-qualifiers", + "-Wmissing-field-initializers", + "-Wextra", + "-fsanitize=address,undefined,bounds", + ] + ) + + cc = Path(shutil_which("clang")) + cxx = Path(shutil_which("clang++")) + num_cpus = _detect_num_cpus() + argv: Argv = [ + f"--with-cc={cc}", + f"--with-cxx={cxx}", + "--build-type=debug", + "--library-linkage=static", + f"--cmake-generator={test_info.generator}", + f"--CFLAGS={flags}", + f"--CXXFLAGS={flags}", + ] + argv_str = shlex.join(argv) + expected_spec: CMakeCommandSpec = { + "CMAKE_EXECUTABLE": f"{test_info.cmake_exe}", + "CMAKE_GENERATOR": test_info.generator, + "SOURCE_DIR": f"{AEDIFIX_PYTEST_DIR}", + "BUILD_DIR": f"{test_info.cmake_dir}", + "CMAKE_COMMANDS": [ + "--log-context", + "--log-level=DEBUG", + "-DAEDIFIX:BOOL=ON", + f"-DAEDIFIX_PYTEST_ARCH:STRING='{AEDIFIX_PYTEST_ARCH}'", + f"-DAEDIFIX_PYTEST_DIR:PATH='{AEDIFIX_PYTEST_DIR}'", + f"-DDUMMYMAINMODULE_CONFIGURE_OPTIONS:STRING={argv_str}", + f"-DAEDIFIX_EXPORT_CONFIG_PATH:FILEPATH='{test_info.export_config_path}'", + "-DBUILD_SHARED_LIBS:BOOL=OFF", + f"-DCMAKE_BUILD_PARALLEL_LEVEL:STRING={num_cpus}", + "-DCMAKE_BUILD_TYPE:STRING=Debug", + "-DCMAKE_COLOR_DIAGNOSTICS:BOOL=ON", + "-DCMAKE_COLOR_MAKEFILE:BOOL=ON", + f"-DCMAKE_CXX_COMPILER:FILEPATH={cxx}", + f"-DCMAKE_CXX_FLAGS:STRING='{flags}'", + f"-DCMAKE_C_COMPILER:FILEPATH={cc}", + f"-DCMAKE_C_FLAGS:STRING='{flags}'", + "-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON", + ], + } + + ret = basic_configure(argv, DummyMainModule) + assert ret == 0 + test_info.post_test(expected_spec) + + def test_extra_argv( + self, AEDIFIX_PYTEST_DIR: Path, AEDIFIX_PYTEST_ARCH: str + ) -> None: + test_info = TestInfo( + AEDIFIX_PYTEST_DIR, AEDIFIX_PYTEST_ARCH, generator="Unix Makefiles" + ) + test_info.pre_test() + + num_cpus = _detect_num_cpus() + argv: Argv = [ + "--build-type=relwithdebinfo", + f"--cmake-generator={test_info.generator}", + "--", + "-DFOO=BAR", + "-DBAZ=1234", + ] + argv_str = shlex.join(argv) + expected_spec: CMakeCommandSpec = { + "CMAKE_EXECUTABLE": f"{test_info.cmake_exe}", + "CMAKE_GENERATOR": test_info.generator, + "SOURCE_DIR": f"{AEDIFIX_PYTEST_DIR}", + "BUILD_DIR": f"{test_info.cmake_dir}", + "CMAKE_COMMANDS": [ + "--log-context", + "--log-level=DEBUG", + "-DAEDIFIX:BOOL=ON", + f"-DAEDIFIX_PYTEST_ARCH:STRING='{AEDIFIX_PYTEST_ARCH}'", + f"-DAEDIFIX_PYTEST_DIR:PATH='{AEDIFIX_PYTEST_DIR}'", + f"-DDUMMYMAINMODULE_CONFIGURE_OPTIONS:STRING={argv_str}", + f"-DAEDIFIX_EXPORT_CONFIG_PATH:FILEPATH='{test_info.export_config_path}'", + "-DBUILD_SHARED_LIBS:BOOL=ON", + f"-DCMAKE_BUILD_PARALLEL_LEVEL:STRING={num_cpus}", + "-DCMAKE_BUILD_TYPE:STRING=RelWithDebInfo", + "-DCMAKE_COLOR_DIAGNOSTICS:BOOL=ON", + "-DCMAKE_COLOR_MAKEFILE:BOOL=ON", + "-DCMAKE_CXX_FLAGS:STRING='-O0 -g -g3 -O3'", + "-DCMAKE_C_FLAGS:STRING='-O0 -g -g3 -O3'", + "-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON", + "-DFOO=BAR", + "-DBAZ=1234", + ], + } + + ret = basic_configure(argv, DummyMainModule) + assert ret == 0 + test_info.post_test(expected_spec) + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/test_manager.py b/config/aedifix/tests/test_manager.py new file mode 100644 index 0000000000..0f15cbc740 --- /dev/null +++ b/config/aedifix/tests/test_manager.py @@ -0,0 +1,177 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import re +import sys +import textwrap +from copy import deepcopy +from typing import TYPE_CHECKING + +import pytest + +from ..manager import ConfigurationManager +from ..package.main_package import ( + DEBUG_CONFIGURE_FLAG, + FORCE_FLAG, + ON_ERROR_DEBUGGER_FLAG, + WITH_CLEAN_FLAG, +) +from ..util.cl_arg import CLArg +from ..util.exception import WrongOrderError +from .fixtures.dummy_main_module import DummyMainModule + +if TYPE_CHECKING: + from pathlib import Path + + +@pytest.fixture +def manager() -> ConfigurationManager: + return ConfigurationManager((), DummyMainModule) + + +class TestConfigurationManager: + @pytest.mark.parametrize( + "argv", ((), ("--foo",), ("-b", "1", "--bar=baz")) + ) + def test_create( + self, + argv: tuple[str, ...], + AEDIFIX_PYTEST_DIR: Path, + AEDIFIX_PYTEST_ARCH: str, + ) -> None: + manager = ConfigurationManager(argv, DummyMainModule) + assert os.environ["AEDIFIX"] == "1" + assert manager.argv == argv + with pytest.raises( + WrongOrderError, match=re.escape("Must call setup() first") + ): + _ = manager.cl_args + assert manager.project_name == "DummyMainModule" + assert manager.project_arch == AEDIFIX_PYTEST_ARCH + assert manager.project_arch_name == "AEDIFIX_PYTEST_ARCH" + assert manager.project_dir == AEDIFIX_PYTEST_DIR + # This dir is created by the pytest fixtures, but let's just double + # check that it still exists + assert manager.project_dir.exists() + assert manager.project_dir.is_dir() + assert manager.project_dir_name == "AEDIFIX_PYTEST_DIR" + assert ( + manager.project_arch_dir + == AEDIFIX_PYTEST_DIR / AEDIFIX_PYTEST_ARCH + ) + assert not manager.project_arch_dir.exists() + assert ( + manager.project_cmake_dir + == AEDIFIX_PYTEST_DIR / AEDIFIX_PYTEST_ARCH / "cmake_build" + ) + assert not manager.project_cmake_dir.exists() + + # This should not exist yet, because the manager should not have + # emitted any logging yet! + assert not manager._logger.file_path.exists() + + assert manager._aedifix_root_dir.exists() + assert manager._aedifix_root_dir.is_dir() + assert (manager._aedifix_root_dir / "aedifix").exists() + assert (manager._aedifix_root_dir / "aedifix").is_dir() + + def test_setup( + self, manager: ConfigurationManager, AEDIFIX_PYTEST_ARCH: str + ) -> None: + orig_argv = deepcopy(manager.argv) + assert len(manager._modules) == 1 + manager.setup() + assert len(manager._modules) > 1 + assert manager.argv == orig_argv + assert ( + CLArg( + name="AEDIFIX_PYTEST_ARCH", + value=AEDIFIX_PYTEST_ARCH, + cl_set=False, + ) + == manager.cl_args.AEDIFIX_PYTEST_ARCH + ) + assert manager._ephemeral_args == { + WITH_CLEAN_FLAG, + FORCE_FLAG, + ON_ERROR_DEBUGGER_FLAG, + DEBUG_CONFIGURE_FLAG, + } + assert manager.project_dir.exists() + assert manager.project_dir.is_dir() + assert manager.project_arch_dir.exists() + assert manager.project_arch_dir.is_dir() + assert manager._logger.file_path.exists() + assert manager._logger.file_path.is_file() + + @pytest.mark.slow + def test_manager_extra_args(self, AEDIFIX_PYTEST_DIR: Path) -> None: + main_cpp_template = textwrap.dedent( + r""" + #include + + int main(int argc, char *argv[]) + { + std::cout << "hello, world!\n"; + return 0; + } + """ + ).strip() + cmakelists_template = textwrap.dedent( + r""" + cmake_minimum_required(VERSION 3.13...3.16 FATAL_ERROR) + + if(NOT DEFINED MY_VARIABLE) + message( + FATAL_ERROR + "ConfigurationManager failed to forward extra arguments to CMake!" + ) + endif() + + if(NOT ("${MY_VARIABLE}" STREQUAL "foo-bar-baz")) + message( + FATAL_ERROR + "ConfigurationManager failed to forward extra arguments to CMake!" + ) + endif() + + project(example_exec VERSION 0.0.1 LANGUAGES CXX) + + add_executable(example_exec src/main.cpp) + + install(TARGETS example_exec) + + set(data "{}") + foreach(var IN LISTS AEDIFIX_EXPORT_VARIABLES) + string(JSON data SET "${data}" "${var}" "\"${${var}}\"") + endforeach() + file(WRITE "${AEDIFIX_EXPORT_CONFIG_PATH}" "${data}") + """ + ).strip() + src_dir = AEDIFIX_PYTEST_DIR / "src" + src_dir.mkdir() + (src_dir / "main.cpp").write_text(main_cpp_template) + (AEDIFIX_PYTEST_DIR / "CMakeLists.txt").write_text(cmakelists_template) + manager = ConfigurationManager( + ("--", "-DMY_VARIABLE='foo-bar-baz'"), DummyMainModule + ) + manager.setup() + manager.configure() + manager.finalize() + cmake_cache = manager.project_cmake_dir / "CMakeCache.txt" + var = "" + for line in cmake_cache.read_text().splitlines(): + if line.startswith("MY_VARIABLE"): + var = line.split("=")[1].strip() + break + else: + pytest.fail(f"Failed to find 'MY_VARIABLE' in {cmake_cache}") + assert isinstance(var, str) + assert var == "foo-bar-baz" + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/test_reconfigure.py b/config/aedifix/tests/test_reconfigure.py new file mode 100644 index 0000000000..05847812f8 --- /dev/null +++ b/config/aedifix/tests/test_reconfigure.py @@ -0,0 +1,228 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import os +import sys +import random +from pathlib import Path +from typing import TYPE_CHECKING, Any + +import pytest + +from ..cmake.cmake_flags import CMakeString +from ..reconfigure import Reconfigure +from ..util.utility import subprocess_capture_output +from .fixtures.dummy_main_module import DummyMainModule + +if TYPE_CHECKING: + from .fixtures.dummy_manager import DummyManager + + +@pytest.fixture +def reconf(manager: DummyManager) -> Reconfigure: + return Reconfigure(manager) + + +TEST_SANITIZED_ARGV_ARGS: tuple[ + tuple[tuple[str, ...], set[str], list[str], list[str]], ... +] = ( + ( + ( + "--arg_1", + "value_1", + "--arg2=value2", + "--ephem_1", + "--ephem_2=value2", + "--ephem_3", + "value3", + "--arg-end", + ), + {"--ephem_1", "--ephem_2", "--ephem_3"}, + [], + [ + "--AEDIFIX_PYTEST_ARCH={AEDIFIX_PYTEST_ARCH}", + "--arg_1", + "value_1", + "--arg2=value2", + "--arg-end", + ], + ), + ((), set(), [], ["--AEDIFIX_PYTEST_ARCH={AEDIFIX_PYTEST_ARCH}"]), + ( + ("--arg_1", "value_1", "--arg2=value2", "--arg-end"), + set(), + ["-DFOO=BAR", "-DBAZ='BOP BLIP'"], + [ + "--AEDIFIX_PYTEST_ARCH={AEDIFIX_PYTEST_ARCH}", + "--arg_1", + "value_1", + "--arg2=value2", + "--arg-end", + "--", + "-DFOO=BAR", + "-DBAZ='BOP BLIP'", + ], + ), + ( + ( + "--arg_1", + "value_1", + "--arg2=value2", + "--arg-ephem", + "--arg-end", + "--", + "-DFOO=BAZ", + ), + {"--arg-ephem"}, + ["-DFOO=BAR", "-DBAZ='BOP BLIP'"], + [ + "--AEDIFIX_PYTEST_ARCH={AEDIFIX_PYTEST_ARCH}", + "--arg_1", + "value_1", + "--arg2=value2", + "--arg-end", + "--", + "-DFOO=BAZ", + "-DFOO=BAR", + "-DBAZ='BOP BLIP'", + ], + ), +) + + +class TestReconfigure: + def test_create( + self, + manager: DummyManager, + AEDIFIX_PYTEST_DIR: Path, + AEDIFIX_PYTEST_ARCH: str, + ) -> None: + reconf = Reconfigure(manager) + assert isinstance(reconf.reconfigure_file, Path) + assert ( + reconf.reconfigure_file.parent + == AEDIFIX_PYTEST_DIR / AEDIFIX_PYTEST_ARCH + ) + assert not reconf.reconfigure_file.parent.exists() + assert not reconf.reconfigure_file.exists() + + def test_get_import_line(self) -> None: + class DummyClass: + pass + + mod_name, type_name = Reconfigure.get_import_line(DummyClass) + assert mod_name == "config.aedifix.tests.test_reconfigure" + assert type_name == "DummyClass" + + def ensure_reconfigure_file( + self, reconfigure: Reconfigure, link_symlink: bool + ) -> tuple[Path, Path]: + # create project-dir/arch-name/reconfigure.py + reconf_file = reconfigure.reconfigure_file + reconf_file.parent.mkdir(exist_ok=False) + reconf_text = f"foo, bar, baz: {random.random()}" + reconf_file.write_text(reconf_text) + # create the symlink + # project-dir/reconfigure.py -> ./arch-name/reconfigure.py + project_dir = reconfigure.project_dir + symlink = project_dir / reconf_file.name + assert not symlink.exists() + if link_symlink: + symlink.symlink_to(reconf_file.relative_to(project_dir)) + return reconf_file, symlink + + def test_backup_reconfigure_script_with_symlink( + self, reconf: Reconfigure, AEDIFIX_PYTEST_DIR: Path + ) -> None: + reconf_file, symlink = self.ensure_reconfigure_file( + reconfigure=reconf, link_symlink=True + ) + reconf_text = reconf_file.read_text() + + reconf.backup_reconfigure_script() + assert symlink.exists() + assert symlink.is_symlink() + assert (AEDIFIX_PYTEST_DIR / symlink.readlink()) == reconf_file + assert reconf_file.exists() + assert reconf_file.is_file() + assert reconf_file.read_text() == reconf_text + assert hasattr(reconf, "_backup") + assert isinstance(reconf._backup, Path) + assert reconf._backup.exists() + assert reconf._backup.is_file() + assert reconf._backup != reconf_file + assert reconf._backup.read_text() == reconf_text + + def test_backup_reconfigure_script_without_symlink( + self, reconf: Reconfigure + ) -> None: + reconf_file, symlink = self.ensure_reconfigure_file( + reconfigure=reconf, link_symlink=False + ) + reconf_text = reconf_file.read_text() + + reconf.backup_reconfigure_script() + assert not symlink.exists() + assert reconf_file.exists() + assert reconf_file.is_file() + assert reconf_file.read_text() == reconf_text + assert hasattr(reconf, "_backup") + assert reconf._backup is None + + @pytest.mark.parametrize( + ("argv", "ephemeral_args", "extra_argv", "expected"), + TEST_SANITIZED_ARGV_ARGS, + ) + def test_sanitized_argv( + self, + reconf: Reconfigure, + AEDIFIX_PYTEST_ARCH: str, + argv: tuple[str, ...], + ephemeral_args: set[str], + extra_argv: list[str], + expected: list[str], + ) -> None: + ret = reconf.sanitized_argv(argv, ephemeral_args, extra_argv) + expected = [ + s.format(AEDIFIX_PYTEST_ARCH=AEDIFIX_PYTEST_ARCH) for s in expected + ] + assert ret == expected + + def test_finalize( + self, + reconf: Reconfigure, + AEDIFIX_PYTEST_DIR: Path, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + reconf_file = reconf.reconfigure_file + assert not reconf_file.exists() + reconf_file.parent.mkdir(exist_ok=False) + + def log_execute_command(cmd: list[Any], **kwargs: Any) -> None: + subprocess_capture_output(list(map(str, cmd))) + + monkeypatch.setattr( + reconf.manager, "log_execute_command", log_execute_command + ) + reconf.manager.register_cmake_variable(CMakeString("CMAKE_COMMAND")) + reconf.manager.set_cmake_variable("CMAKE_COMMAND", "cmake") + + reconf.finalize(DummyMainModule, set()) + + assert reconf_file.exists() + assert reconf_file.is_file() + assert os.access(reconf_file, os.X_OK) + text = reconf_file.read_text() + assert "import DummyMainModule" in text + assert "return basic_configure(tuple(argv), DummyMainModule)" in text + + symlink = AEDIFIX_PYTEST_DIR / reconf_file.name + assert symlink.exists() + assert symlink.is_symlink() + assert symlink.parent / symlink.readlink() == reconf_file + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/typings/jupyter_client/__init__.pyi b/config/aedifix/tests/util/__init__.py similarity index 100% rename from typings/jupyter_client/__init__.pyi rename to config/aedifix/tests/util/__init__.py diff --git a/config/aedifix/tests/util/test_argument_parser.py b/config/aedifix/tests/util/test_argument_parser.py new file mode 100644 index 0000000000..9e0535ab58 --- /dev/null +++ b/config/aedifix/tests/util/test_argument_parser.py @@ -0,0 +1,154 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from argparse import ArgumentParser +from typing import Any + +import pytest + +from ...cmake.cmake_flags import CMAKE_VARIABLE, CMakeString, _CMakeVar +from ...util.argument_parser import ( + ArgSpec, + ConfigArgument, + ExclusiveArgumentGroup, + Unset, +) +from ...util.exception import LengthError + + +class TestConfigArgument: + def test_create_bare(self) -> None: + name = "--bar" + spec = ArgSpec(dest="bar") + arg = ConfigArgument(name=name, spec=spec) + + assert arg.name == name + assert arg.spec == spec + assert arg.cmake_var is None + assert arg.ephemeral is False + assert arg.enables_package is False + assert arg.primary is False + + @pytest.mark.parametrize( + "cmake_var", (None, CMAKE_VARIABLE("FOO", CMakeString)) + ) + @pytest.mark.parametrize("ephemeral", (True, False)) + @pytest.mark.parametrize("enables_package", (True, False)) + @pytest.mark.parametrize("primary", (True, False)) + def test_create( + self, + cmake_var: _CMakeVar | None, + ephemeral: bool, + enables_package: bool, + primary: bool, + ) -> None: + arg = ConfigArgument( + name="--foo", + spec=ArgSpec(dest="bar"), + cmake_var=cmake_var, + ephemeral=ephemeral, + enables_package=enables_package, + primary=primary, + ) + assert arg.name == "--foo" + assert arg.spec == ArgSpec(dest="bar") + assert arg.cmake_var == cmake_var + assert arg.ephemeral == ephemeral + assert arg.enables_package == enables_package + assert arg.primary == primary + + @pytest.mark.parametrize( + "cmake_var", (None, CMAKE_VARIABLE("FOO", CMakeString)) + ) + @pytest.mark.parametrize("ephemeral", (True, False)) + @pytest.mark.parametrize("ty", (None, int, str, bool)) + @pytest.mark.parametrize( + ("const", "nargs"), ((Unset, Unset), (True, "?"), (False, "?")) + ) + @pytest.mark.parametrize("default", (Unset, True, False)) + @pytest.mark.parametrize("metavar", (Unset, "foo")) + def test_add_to_argparser( + self, + cmake_var: _CMakeVar | None, + ephemeral: bool, + ty: type, + nargs: str | Any, + const: bool | Any, + default: bool | Any, + metavar: str | Any, + ) -> None: + parser = ArgumentParser() + arg = ConfigArgument( + name="--foo", + spec=ArgSpec( + dest="bar", + type=ty, + nargs=nargs, # type: ignore[arg-type] + const=const, + default=default, + metavar=metavar, + ), + cmake_var=cmake_var, + ephemeral=ephemeral, + ) + + arg.add_to_argparser(parser) + assert len(parser._actions) == 2 # has implicit help action + action = parser._actions[1] + assert action.option_strings == ["--foo"] + assert action.dest == "bar" + if ty is bool: + assert action.nargs == ("?" if nargs is Unset else nargs) + assert action.const is (True if const is Unset else const) + assert action.default is (False if default is Unset else default) + assert action.metavar == ("bool" if metavar is Unset else metavar) + assert action.type is ConfigArgument._str_to_bool + else: + assert action.nargs == (None if nargs is Unset else nargs) + assert action.const == (None if const is Unset else const) + assert action.default == (None if default is Unset else default) + assert action.metavar == (None if metavar is Unset else metavar) + assert action.type is ty + assert action.choices is None + assert action.required is False + assert action.help is None + + +class TestExclusiveArgumentGroup: + @pytest.mark.parametrize("required", (True, False)) + def test_create(self, required: bool) -> None: + foo = ConfigArgument(name="--foo", spec=ArgSpec(dest="foo")) + bar = ConfigArgument(name="--bar", spec=ArgSpec(dest="bar")) + group = ExclusiveArgumentGroup(required=required, Foo=foo, Bar=bar) + + assert group.group == {"Foo": foo, "Bar": bar} + assert group.Foo is foo # type: ignore[attr-defined] + assert group.Bar is bar # type: ignore[attr-defined] + assert group.required == required + + def test_create_bad(self) -> None: + foo = ConfigArgument(name="--foo", spec=ArgSpec(dest="foo")) + with pytest.raises( + LengthError, + match="Must supply at least 2 arguments to exclusive group", + ): + ExclusiveArgumentGroup() + + with pytest.raises( + LengthError, + match="Must supply at least 2 arguments to exclusive group", + ): + ExclusiveArgumentGroup(foo=foo) + + bar = 10 + with pytest.raises( + TypeError, match=f"Argument Bar wrong type: {type(bar)}" + ): + ExclusiveArgumentGroup(Foo=foo, Bar=bar) # type: ignore[arg-type] + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/util/test_callables.py b/config/aedifix/tests/util/test_callables.py new file mode 100644 index 0000000000..b209b0ea6c --- /dev/null +++ b/config/aedifix/tests/util/test_callables.py @@ -0,0 +1,96 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from pathlib import Path +from typing import TYPE_CHECKING, Any + +import pytest + +from ...util.callables import classify_callable, get_calling_function + +if TYPE_CHECKING: + from collections.abc import Callable + + +def foo() -> Any: + return get_calling_function() + + +class Foo: + # type hinting these properly would just confuse the type hinters + # (not to mention, recurse infinitely). So these are just any's. + def method(self) -> Any: + return foo() + + @classmethod + def class_method(cls) -> Any: + return foo() + + @property + def prop(self) -> Any: + return foo() + + def __call__(self) -> Any: + return foo() + + +class TestGetCallingFunction: + def test_bare_func(self) -> None: + def bar() -> Callable[[], Any]: + return foo() + + assert foo() == self.test_bare_func + assert bar() == bar + + def test_class(self) -> None: + inst = Foo() + assert inst.method() == inst.method + assert inst.class_method() == inst.class_method + # Error: "Callable[[Foo], Any]" has no attribute "fget" [attr-defined] + # + # ... yes it obviously does you absolute dunce + assert inst.prop == Foo.prop.fget # type: ignore[attr-defined] + assert inst() == inst.__call__ + + +class TestClassifyCallable: + def test_func(self) -> None: + qualname, path, lineno = classify_callable(foo) + assert qualname == "config.aedifix.tests.util.test_callables.foo" + assert path == Path(__file__) + assert lineno == 18 # Unfortunately a brittle test... + + qualname, path, lineno = classify_callable(Foo().method) + assert ( + qualname == "config.aedifix.tests.util.test_callables.Foo.method" + ) + assert path == Path(__file__) + assert lineno == 25 # Unfortunately a brittle test... + + qualname, path, lineno = classify_callable(Foo.class_method) + assert ( + qualname + == "config.aedifix.tests.util.test_callables.Foo.class_method" + ) + assert path == Path(__file__) + assert lineno == 28 # Unfortunately a brittle test... + + prop_function = Foo.prop.fget # type: ignore[attr-defined] + qualname, path, lineno = classify_callable(prop_function) + assert qualname == "config.aedifix.tests.util.test_callables.Foo.prop" + assert path == Path(__file__) + assert lineno == 32 # Unfortunately a brittle test... + + qualname, path, lineno = classify_callable(Foo().__call__) + assert ( + qualname == "config.aedifix.tests.util.test_callables.Foo.__call__" + ) + assert path == Path(__file__) + assert lineno == 36 # Unfortunately a brittle test... + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/util/test_cl_arg.py b/config/aedifix/tests/util/test_cl_arg.py new file mode 100644 index 0000000000..f6c5358798 --- /dev/null +++ b/config/aedifix/tests/util/test_cl_arg.py @@ -0,0 +1,46 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from typing import TypeVar + +import pytest + +from ...util.cl_arg import CLArg + +_T = TypeVar("_T") + + +class TestCLArg: + @pytest.mark.parametrize("name", ("foo", "bar")) + @pytest.mark.parametrize( + ("value", "new_val"), + ((True, False), (1, -1), (2.0, 123123123.0), ("three", "four")), + ) + @pytest.mark.parametrize("cl_set", (True, False)) + def test_create( + self, name: str, value: _T, new_val: _T, cl_set: bool + ) -> None: + clarg = CLArg(name=name, value=value, cl_set=cl_set) + assert clarg.name == name + assert clarg.value == value + assert clarg.cl_set == cl_set + + clarg.value = new_val + assert clarg.name == name + assert clarg.value == new_val + assert clarg.cl_set is False + + @pytest.mark.parametrize("name", ("foo", "bar")) + @pytest.mark.parametrize("value", (True, 1, 2.0, "three")) + @pytest.mark.parametrize("cl_set", (True, False)) + def test_eq(self, name: str, value: _T, cl_set: bool) -> None: + lhs = CLArg(name=name, value=value, cl_set=cl_set) + rhs = CLArg(name=name, value=value, cl_set=cl_set) + assert lhs == rhs + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/util/test_load_module.py b/config/aedifix/tests/util/test_load_module.py new file mode 100644 index 0000000000..fe1b11eacc --- /dev/null +++ b/config/aedifix/tests/util/test_load_module.py @@ -0,0 +1,67 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest + +from ...util.load_module import load_module_from_path +from ..fixtures import dummy_module + +if TYPE_CHECKING: + from types import ModuleType + + +@pytest.fixture +def module_path() -> Path: + return ( + Path(__file__).resolve().parent.parent / "fixtures" / "dummy_module.py" + ) + + +class TestLoadModule: + def do_module_checks(self, mod: ModuleType) -> None: + # Unfortunately, due to some idiosyncrasies with the loading + # mechanism, the modules are not the same, because the path-loaded + # module does not retain any __package__ information. So we must resort + # to checking magic numbers and attributes to ensure we've loaded the + # intended module correctly. + assert mod is not dummy_module + assert mod.__file__ == dummy_module.__file__ + assert mod.MODULE_ATTRIBUTE == dummy_module.MODULE_ATTRIBUTE + assert ( + mod.function.__code__.co_filename + == dummy_module.function.__code__.co_filename + ) + assert ( + mod.function.MAGIC_NUMBER == dummy_module.function.MAGIC_NUMBER # type: ignore[attr-defined] + ) + assert mod.Class.MAGIC_ATTR == dummy_module.Class.MAGIC_ATTR + + def test_load_module_from_path(self, module_path: Path) -> None: + mod = load_module_from_path(module_path) + self.do_module_checks(mod) + + def test_load_module_from_str(self, module_path: Path) -> None: + mod = load_module_from_path(str(module_path)) + self.do_module_checks(mod) + + def test_load_module_from_path_bad(self) -> None: + path = Path("/foo/bar/baz") + assert not path.exists(), "Well well well..." + modules_cpy = dict(sys.modules.items()) + with pytest.raises( + (ImportError, FileNotFoundError), + match=rf"\[Errno \d+\] No such file or directory: '{path}'", + ): + load_module_from_path(path) + + assert sys.modules == modules_cpy + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/tests/util/test_utility.py b/config/aedifix/tests/util/test_utility.py new file mode 100644 index 0000000000..3dd3b56a0d --- /dev/null +++ b/config/aedifix/tests/util/test_utility.py @@ -0,0 +1,135 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys + +import pytest + +from ...util.utility import ( + deduplicate_command_line_args, + dest_to_flag, + flag_to_dest, + partition_argv, + prune_command_line_args, +) + + +class TestUtility: + def test_prune_command_line_args_empty(self) -> None: + argv = [ + "--foo", + "--bar=1", + "--baz", + "0", + "1", + "asdasd", + "--remain", + "--foo=yes", + "--bar", + "1", + ] + + new_argv = prune_command_line_args(argv, set()) + assert new_argv == argv + + def test_prune_command_line_args(self) -> None: + to_remove = {"--foo", "--bar", "--baz"} + argv = [ + "--foo", + "--bar=1", + "--baz", + "0", + "1", + "asdasd", + "--remain", + "--foo=yes", + "--bar", + "1", + ] + + new_argv = prune_command_line_args(argv, to_remove) + assert new_argv == ["--remain"] + + def test_prune_command_line_args_bad(self) -> None: + bad_remove = {"asdasd", "asdau999"} + + with pytest.raises( + ValueError, match=r"Argument '.*' must start with '-'" + ): + prune_command_line_args([], bad_remove) + + def test_deduplicate_command_line_args(self) -> None: + argv = [ + "--foo=1", + "--foo", + "--foo=45", + "--hello", + "world", + "--foo", + "2", + "--bar", + "--baz=17", + ] + new_argv = deduplicate_command_line_args(argv) + assert new_argv == [ + "--foo", + "2", + "--hello", + "world", + "--bar", + "--baz=17", + ] + + def test_deduplicate_command_line_args_empty(self) -> None: + new_argv = deduplicate_command_line_args([]) + assert new_argv == [] + + def test_deduplicate_command_line_args_positional_arg(self) -> None: + new_argv = deduplicate_command_line_args(["foo", "--bar", "--foo"]) + assert new_argv == ["foo", "--bar", "--foo"] + + @pytest.mark.parametrize( + ("flag_str", "expected"), + (("", ""), ("--foo", "foo"), ("-f", "f"), ("-foo--bar", "foo__bar")), + ) + def test_flag_to_dest(self, flag_str: str, expected: str) -> None: + assert flag_to_dest(flag_str) == expected + + @pytest.mark.parametrize( + ("dest_str", "expected"), + ( + ("", "--"), + ("foo", "--foo"), + ("f", "--f"), + ("foo_bar", "--foo-bar"), + ("foo-bar", "--foo-bar"), + ), + ) + def test_dest_to_flag(self, dest_str: str, expected: str) -> None: + assert dest_to_flag(dest_str) == expected + + @pytest.mark.parametrize( + ("argv", "expected"), + [ + ([], ([], [])), + (["-foo"], (["-foo"], [])), + (["-foo", "--bar"], (["-foo", "--bar"], [])), + (["--foo", " -- "], (["--foo"], [])), + (["--foo", " -- ", "-baz"], (["--foo"], ["-baz"])), + ([" --", "-baz", "-bop"], ([], ["-baz", "-bop"])), + (["--"], ([], [])), + ], + ) + def test_partition_argv( + self, argv: list[str], expected: tuple[list[str], list[str]] + ) -> None: + main_expected, rest_expected = expected + main_ret, rest_ret = partition_argv(argv) + assert main_ret == main_expected + assert rest_ret == rest_expected + + +if __name__ == "__main__": + sys.exit(pytest.main()) diff --git a/config/aedifix/util/__init__.py b/config/aedifix/util/__init__.py new file mode 100644 index 0000000000..d7cdd7b63e --- /dev/null +++ b/config/aedifix/util/__init__.py @@ -0,0 +1,4 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations diff --git a/config/aedifix/util/argument_parser.py b/config/aedifix/util/argument_parser.py new file mode 100644 index 0000000000..3a77dae1cb --- /dev/null +++ b/config/aedifix/util/argument_parser.py @@ -0,0 +1,193 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from argparse import ( + Action, + ArgumentParser, + ArgumentTypeError, + _ArgumentGroup as ArgumentGroup, +) +from dataclasses import ( + dataclass, + fields as dataclasses_fields, + replace as dataclasses_replace, +) +from typing import TYPE_CHECKING, Any, Literal, TypeAlias, TypeVar + +from .exception import LengthError + +if TYPE_CHECKING: + from collections.abc import Callable, Sequence + + from ..cmake.cmake_flags import _CMakeVar + + +# https://docs.python.org/3/library/argparse.html#action +ActionType: TypeAlias = ( + Literal[ + "store", + "store_const", + "store_true", + "append", + "append_const", + "count", + "help", + "version", + "extend", + ] + | type[Action] +) + +# https://docs.python.org/3/library/argparse.html#nargs +NargsType: TypeAlias = Literal["?", "*", "+", "..."] + + +class Unset: + pass + + +T = TypeVar("T") + +NotRequired: TypeAlias = type[Unset] | T + + +@dataclass(slots=True, frozen=True) +class ArgSpec: + dest: str + action: NotRequired[ActionType] = Unset + nargs: NotRequired[int | NargsType] = Unset + const: NotRequired[Any] = Unset + default: NotRequired[Any] = Unset + type: NotRequired[type[Any] | Callable[[str], Any]] = Unset + choices: NotRequired[Sequence[Any]] = Unset + help: NotRequired[str] = Unset + metavar: NotRequired[str] = Unset + required: NotRequired[bool] = Unset + + def as_pruned_dict(self) -> dict[str, Any]: + ret = {} + for f in dataclasses_fields(self): + name = f.name + value = getattr(self, name) + if value is not Unset: + ret[name] = value + return ret + + +@dataclass(slots=True, frozen=True) +class ConfigArgument: + name: str + """ + The command-line flag, e.g. --with-foo. This variable is pretty poorly + named (no pun intended), it should really just be 'flag'. + """ + + spec: ArgSpec + """The argparse argument spec.""" + + cmake_var: _CMakeVar | None = None + """The CMake variable corresponding to this variable""" + + ephemeral: bool = False + """ + Whether the flag should be stored in the reconfigure script. An ephemeral + flag (when this is True) will NOT be stored in the reconfigure script. + This is used, for example, for the --with-clean flag, since it should + only happen once. Running reconfigure should not delete the arch directory + over and over again. + """ + + enables_package: bool = False + """ + Whether this flag 'enables' the package in question. If True, and there is + a truthy value give on the command-line for the flag, then the package will + consider itself enabled. This is commonly used with the --with-foo-dir + class of flags. --with-foo-dir implies --with-foo, so the user shouldn't + have to pass both. + """ + + primary: bool = False + """ + Whether this flag is the 'primary' enabler or disabler of the package. The + primary flags is the ultimate tie-breaker in deciding if a package is + enabled or disabled. The rationale is that --with-cuda-dir=path + --with-cudac=nvcc --with-cuda=0 should always result in CUDA being disabled + even thoough all the other flags are truthy. + """ + + @staticmethod + def _str_to_bool(v: str | bool) -> bool: + if isinstance(v, bool): + return v + match v.casefold(): + case "yes" | "true" | "t" | "y" | "1": + return True + case "no" | "false" | "f" | "n" | "0" | "": + return False + case _: + pass + msg = f"Boolean value expected, got '{v}'" + raise ArgumentTypeError(msg) + + def add_to_argparser(self, parser: ArgumentParser | ArgumentGroup) -> None: + r"""Add the contents of this ConfigArgument to an argument parser. + + Parameters + ---------- + parser : ArgumentParser | ArgumentGroup + The argument parser to add to. + """ + spec = self.spec + if spec.type is bool: + to_replace: dict[str, Any] = {"type": self._str_to_bool} + + def replace_if_unset(attr_name: str, value: Any) -> None: + if getattr(spec, attr_name) is Unset: + to_replace[attr_name] = value + + replace_if_unset("nargs", "?") + replace_if_unset("const", True) + replace_if_unset("default", False) + replace_if_unset("metavar", "bool") + spec = dataclasses_replace(spec, **to_replace) + + kwargs = spec.as_pruned_dict() + parser.add_argument(self.name, **kwargs) + + +class ExclusiveArgumentGroup: + def __init__( + self, *, required: bool = False, **kwargs: ConfigArgument + ) -> None: + r"""Construct an ExclusiveArgumentGroup. + + Parameters + ---------- + required : bool, False + Whether the argument group requires one of the arguments + to be set. + **kwargs : ConfigArgument + The ConfigArgument's that make up this argument group. + + Raises + ------ + LengthError + If the number of arguments is less than 2 (since that would be + pointless). + TypeError + If any of **kwargs is not a ConfigArgument. + """ + if len(kwargs) < 2: # noqa: PLR2004 + msg = "Must supply at least 2 arguments to exclusive group" + raise LengthError(msg) + self.group = kwargs + self.required = required + for attr, value in kwargs.items(): + if not isinstance(value, ConfigArgument): + # Obviously this _should_ be unreachable, but bugs happen all + # the time :) + msg = f"Argument {attr} wrong type: {type(value)}" # type: ignore[unreachable] + raise TypeError(msg) + setattr(self, attr, value) diff --git a/config/aedifix/util/callables.py b/config/aedifix/util/callables.py new file mode 100644 index 0000000000..d545ce5bc8 --- /dev/null +++ b/config/aedifix/util/callables.py @@ -0,0 +1,225 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import inspect +from functools import lru_cache +from pathlib import Path +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from collections.abc import Callable + from types import CodeType, FrameType + +_get_calling_function_called = 0 + + +class GetCallingFuncRecursionError(Exception): + pass + + +# regular function or staticmethod +def _gcf_function(fr: FrameType, co: CodeType) -> Callable[..., Any]: + return fr.f_globals[co.co_name] + + +def _gcf_method(fr: FrameType, co: CodeType) -> Callable[..., Any]: + obj = fr.f_locals["self"] + name = co.co_name + try: + return getattr(obj, name) + except GetCallingFuncRecursionError: + # it's possible that the "function" is actually a property, e.g.: + # + # class Foo: + # @property + # def foo(self): + # return get_calling_function() + # + # In which case this would infinitely recurse. So we have to bypass the + # descriptor, and try and retrieve the actual + # function + prop = inspect.getattr_static(obj, name) + if ( + isinstance(prop, property) + and (prop_getter := prop.fget) is not None + ): + return prop_getter + # Another game is afoot... + raise + + +def _gcf_classmethod(fr: FrameType, co: CodeType) -> Callable[..., Any]: + return getattr(fr.f_locals["cls"], co.co_name) + + +def _gcf_get_local(fr: FrameType, name: str) -> Callable[..., Any]: + f_back = fr.f_back + # Item "None" of "FrameType | None" has no attribute "f_locals" + # [union-attr] + return f_back.f_locals[name] # type: ignore[union-attr] + + +def _gcf_nested(fr: FrameType, co: CodeType) -> Callable[..., Any]: + return _gcf_get_local(fr, co.co_name) + + +def _gcf_functools_wraps(fr: FrameType, _co: CodeType) -> Callable[..., Any]: + return _gcf_get_local(fr, "func") + + +def _gcf_misc_1(fr: FrameType, _co: CodeType) -> Callable[..., Any]: + return _gcf_get_local(fr, "meth") + + +def _gcf_misc_2(fr: FrameType, _co: CodeType) -> Callable[..., Any]: + return _gcf_get_local(fr, "f") + + +def _get_calling_function_impl() -> Callable[..., Any]: + stack = inspect.stack() + maxidx = 20 + for idx in range( + 3, # the caller of the function that called get_calling_function() + maxidx, # if anyone is more than 20 ignores deep, probably a bug + ): + try: + fr = stack[idx].frame + except IndexError: + break + co = fr.f_code + + getters = ( + _gcf_function, + _gcf_method, + _gcf_classmethod, + _gcf_nested, + _gcf_functools_wraps, + _gcf_misc_1, + _gcf_misc_2, + ) + for getter in getters: + try: + func = getter(fr, co) + except (KeyError, AttributeError): + continue + if func.__code__ != co: + continue + if getattr(func, "__config_log_ignore___", False): + # found a passthrough function, continue searching up the stack + break + return func + else: + # we did not break due to ignores, so we failed to find the caller + break + else: + # We exhausted the range iterator + msg = ( + f"Iterated {maxidx} times trying to determine the calling " + "function, but failed to find it. This is likely a bug! " + f"Stack: {stack}" + ) + raise AssertionError(msg) + raise ValueError + + +def get_calling_function() -> Callable[..., Any]: + r"""Finds the calling function in many decent cases. + + Returns + ------- + func : Any + The function or method object that called this function. + + Raises + ------ + ValueError + If the calling function cannot be determined. + """ + global _get_calling_function_called # noqa: PLW0603 + + if _get_calling_function_called: + raise GetCallingFuncRecursionError + + _get_calling_function_called += 1 + try: + return _get_calling_function_impl() + finally: + _get_calling_function_called -= 1 + assert _get_calling_function_called >= 0 + + +def _is_classmethod(method: Any) -> bool: + bound_to = getattr(method, "__self__", None) + if not isinstance(bound_to, type): + # must be bound to a class + return False + name = method.__name__ + for cls in bound_to.__mro__: + descriptor = vars(cls).get(name) + if descriptor is not None: + return isinstance(descriptor, classmethod) + return False + + +@lru_cache +def classify_callable( + fn: Callable[..., Any], *, fully_qualify: bool = True +) -> tuple[str, Path, int]: + r"""Classify a callable object. + + Parameters + ---------- + fn : Callable + The callable object to classify. + fully_qualify : bool, True + Whether to return the fully qualified name, or just the short name + of `fn`. + + Returns + ------- + qualname : str + The qualified name of `fn`. + src_file : Path + The full path to the source file where `fn` was defined. + lineno : int + The line number in `src_file` where `fn` was defined. + + Raises + ------ + TypeError + If `fn` is not a callable object, or not handled by this method. + """ + if inspect.ismethod(fn): # method or classmethod + if _is_classmethod(fn): + class_obj = fn.__self__ + else: + class_obj = fn.__self__.__class__ + assert hasattr(class_obj, "__name__") # appease mypy + if fully_qualify: + qualname = ( + f"{class_obj.__module__}.{class_obj.__name__}.{fn.__name__}" + ) + else: + qualname = f"{class_obj.__name__}.{fn.__name__}" + func_obj = fn.__func__ + elif inspect.isfunction(fn): + module = inspect.getmodule(fn) + assert module is not None, ( + f"Could not determine host module for function {fn}" + ) + if fully_qualify: + qualname = f"{module.__name__}.{fn.__qualname__}" + else: + qualname = fn.__qualname__ + func_obj = fn + else: + raise TypeError(fn) + + src_file = inspect.getsourcefile(fn) + assert src_file is not None, ( + f"Could not determine source file for function {fn}" + ) + lineno = func_obj.__code__.co_firstlineno + return qualname, Path(src_file), lineno diff --git a/config/aedifix/util/cl_arg.py b/config/aedifix/util/cl_arg.py new file mode 100644 index 0000000000..27afdd37b0 --- /dev/null +++ b/config/aedifix/util/cl_arg.py @@ -0,0 +1,92 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class CLArg(Generic[_T]): + __slots__ = "_cl_set", "_name", "_value" + + def __init__(self, name: str, value: _T | None, *, cl_set: bool) -> None: + r"""Construct a ``CLArg``. + + Parameters + ---------- + name : str + The name of the command line argument. + value : T + The value of the command line argument. + cl_set : bool + True if the value was set by the user on the command line, False + otherwise. + """ + self._name = name + self._value = value + self._cl_set = cl_set + + @property + def name(self) -> str: + r"""Get the name of a command line argument. + + Returns + ------- + name : str + The name of the command line argument. + """ + return self._name + + @property + def value(self) -> _T | None: + r"""Get the value of the command line argument. + + Returns + ------- + value : T + The value of the command line argument. + """ + return self._value + + @value.setter + def value(self, val: _T) -> None: + r"""Set the value of a command line argument. + + Parameters + ---------- + val : T + The new value. + """ + self._value = val + self._cl_set = False + + @property + def cl_set(self) -> bool: + r"""Get whether the value was set by the user on command line. + + Returns + ------- + set : bool + True if set by the user, false otherwise. + """ + return self._cl_set + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CLArg): + return NotImplemented + return ( + (self.name == other.name) + and (self.value == other.value) + and (self.cl_set == other.cl_set) + ) + + def __repr__(self) -> str: + return ( + "CLArg(" + f"name={self.name}, " + f"value={self.value}, " + f"cl_set={self.cl_set}" + ")" + ) diff --git a/config/aedifix/util/exception.py b/config/aedifix/util/exception.py new file mode 100644 index 0000000000..beabdc8d65 --- /dev/null +++ b/config/aedifix/util/exception.py @@ -0,0 +1,59 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + + +class BaseError(Exception): + r"""Base exception.""" + + +class UnsatisfiableConfigurationError(BaseError): + r"""An error as a result of user configuration that cannot be satisfied. + For example, the user has requested X, but it does not work. Or the user + has told us to look for Y in Z, but we did not find it there. + """ + + +class CMakeConfigureError(BaseError): + r"""An error as a result of CMake failing.""" + + +class LengthError(BaseError): + r"""An exception to signify an object that is not of the right length.""" + + +class WrongOrderError(BaseError): + r"""An error raised when an operation is performed in the wrong order, e.g. + accessing an object before it has been setup, or retrieving a resource + before it has been registered. + """ + + +class CommandError(BaseError): + r"""An error raised when an external command returns an error.""" + + def __init__( + self, + return_code: int, + stdout: str, + stderr: str, + summary: str | None = None, + ) -> None: + self.return_code = return_code + self.stdout = stdout + self.stderr = stderr + if summary is None: + summary = self._make_summary(return_code, stdout, stderr) + self.summary = summary + + @staticmethod + def _make_summary(return_code: int, stdout: str, stderr: str) -> str: + lines = ( + f"Subprocess error, returned exit-code: {return_code}", + "stdout:", + f"{stdout}", + "stderr:", + f"{stderr}", + ) + return "\n".join(lines) diff --git a/config/aedifix/util/load_module.py b/config/aedifix/util/load_module.py new file mode 100644 index 0000000000..d4023524d6 --- /dev/null +++ b/config/aedifix/util/load_module.py @@ -0,0 +1,45 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import pydoc +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pathlib import Path + from types import ModuleType + + +def load_module_from_path(path: Path | str) -> ModuleType: + r"""Load a module given an absolute path. + + Parameters + ---------- + path : Path | str + The absolute path to the python module to load. + + Returns + ------- + mod : ModuleType + The loaded module. + + Raises + ------ + ImportError + If the module failed to import. + """ + # I tried every given solution at: + # + # https://stackoverflow.com/questions/67631/how-can-i-import-a-module-dynamically-given-the-full-path + # + # and this was -- unironically -- the most complete answer. All of the + # others would produce usable modules, but then some thing or another would + # be "off" about them. For example, inspect.getmodule() would fail to find + # the module (i.e. return None), or sometimes they were missing __name__ + # and __package__ attributes (i.e. empty). + try: + return pydoc.importfile(str(path)) + except pydoc.ErrorDuringImport as edi: + msg = f"Failed to import {path}" + raise ImportError(msg) from edi diff --git a/config/aedifix/util/utility.py b/config/aedifix/util/utility.py new file mode 100644 index 0000000000..99f10499f8 --- /dev/null +++ b/config/aedifix/util/utility.py @@ -0,0 +1,513 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import re +import enum +import shlex +import platform +import sysconfig +import subprocess +from pathlib import Path +from signal import SIGINT +from subprocess import ( + PIPE, + STDOUT, + CalledProcessError, + CompletedProcess, + Popen, + TimeoutExpired, +) +from sys import version_info +from typing import TYPE_CHECKING, Any, Final, TypeVar + +from .exception import CommandError + +if TYPE_CHECKING: + from collections.abc import Callable, Iterable, Sequence + + from ..base import Configurable + +_T = TypeVar("_T") + + +def subprocess_check_returncode( + ret: CompletedProcess[_T], +) -> CompletedProcess[_T]: + r"""Check the return code of a subprocess return value. + + Parameters + ---------- + ret : CompletedProcess + the return value of `subprocess.run()` + + Returns + ------- + ret : CompletedProcess + `ret` unchanged + + Raises + ------ + CommandError + If `ret.returncode` is nonzero + """ + try: + ret.check_returncode() + except CalledProcessError as cpe: + emess = "\n".join( + [ + "Subprocess error:", + "stdout:", + f"{cpe.stdout}", + "stderr:", + f"{cpe.stderr}", + f"{cpe}", + ] + ) + raise CommandError( + return_code=cpe.returncode, + stdout=cpe.stdout, + stderr=cpe.stderr, + summary=emess, + ) from cpe + return ret + + +def subprocess_capture_output( + *args: Any, check: bool = True, **kwargs: Any +) -> CompletedProcess[str]: + r"""Lightweight wrapper over `subprocess.run()`. + + Parameters + ---------- + *args : Any + Arguments to `subprocess.run()`. + check : bool, True + Whether to check the return code. + **kwargs : Any + Keyword arguments to `subprocess.run()` + + Returns + ------- + ret : CompletedProcess + The return value of `subprocess.run()`. + + Raises + ------ + CommandError + If `subprocess.run()` raises a `subprocess.CalledProcessError`, this + routine converts it into a `CommandError` with the output attached. + + Notes + ----- + Turns a `subprocess.CalledProcessError` into a `CommandError` with more + diagnostics. + """ + ret = subprocess.run( + *args, capture_output=True, text=True, check=False, **kwargs + ) + if check: + ret = subprocess_check_returncode(ret) + return ret + + +def _normalize_output(output: bytes | None | str) -> str: + match output: + case None: + return "" + case str(): + return output + case bytes(): + return output.decode() + + +def subprocess_capture_output_live_impl( + callback: Callable[[str, str], None], *args: Any, **kwargs: Any +) -> CompletedProcess[str]: + kwargs.setdefault("stdout", PIPE) + kwargs.setdefault("stderr", STDOUT) + kwargs["universal_newlines"] = True + kwargs["text"] = True + + timeout = kwargs.pop("timeout", 1) + + total_stdout_len = 0 + total_stderr_len = 0 + stdout = "" + stderr = "" + + with Popen(*args, **kwargs) as process: + done = False + while not done: + try: + stdout, stderr = process.communicate(timeout=timeout) + except TimeoutExpired as te_exn: + stdout = _normalize_output(te_exn.stdout) + stderr = _normalize_output(te_exn.stderr) + except KeyboardInterrupt: + process.send_signal(SIGINT) + raise + except: + process.kill() + raise + else: + # Don't break, instead wait for end of loop, in case stdout + # and/or stderr were updated. + done = True + stderr = _normalize_output(stderr) + stdout = _normalize_output(stdout) + + # The streams will always contain the sum-total output of the + # subprocess call, so we need to strip the stuff we've already + # "seen" from the output before sending it to the callback. + new_stdout = stdout[total_stdout_len:] + new_stderr = stderr[total_stderr_len:] + if new_stdout or new_stderr: + # Now we replace the complete stdout + total_stdout_len = len(stdout) + total_stderr_len = len(stderr) + callback(new_stdout, new_stderr) + + retcode = process.poll() + if retcode is None: + retcode = 0 + + return CompletedProcess(process.args, retcode, stdout, stderr) + + +def subprocess_capture_output_live( + *args: Any, + callback: Callable[[str, str], None] | None = None, + check: bool = True, + **kwargs: Any, +) -> CompletedProcess[str]: + r"""Execute a subprocess call with a live callback. + + Parameters + ---------- + *args : Any + Positional arguments to Popen. + callback : Callable[[str, str], None], optional + The callback to intermittently execute. + check : bool, True + Whether to check the returncode. + **kwargs : Any + Keyword arguments to Popen. + + Returns + ------- + ret : CompletedProcess + The object representing the subprocess results. + + Raises + ------ + CommandError + If `subprocess.run()` raises a `subprocess.CalledProcessError`, this + routine converts it into a `CommandError` with the output attached. + + Notes + ----- + The utility of this routine is to be able to monitor the output of the + running subprocess in real time. This is done via the callback argument, + which takes as arguments the stdout and stderr of the executing process. + + If callback is None, this routine is identical to + subprocess_capture_output(). + """ + if callback is None: + return subprocess_capture_output(*args, check=check, **kwargs) + + ret = subprocess_capture_output_live_impl(callback, *args, **kwargs) + if check: + ret = subprocess_check_returncode(ret) + return ret + + +def copy_doc(source: Any) -> Callable[[_T], _T]: + r"""Copy the docstring from one object to another. + + Parameters + ---------- + source : Any + The object to copy the docstring from + + Returns + ------- + wrapper : Callable[[T], T] + A wrapper which takes a target object and sets it docstring to that + of `source` + """ + + def wrapper(target: _T) -> _T: + if (sdoc := getattr(source, "__doc__", None)) is not None: + target.__doc__ = sdoc + return target + + return wrapper + + +class ValueProvenance(enum.Enum): + COMMAND_LINE = enum.auto() + ENVIRONMENT = enum.auto() + GENERATED = enum.auto() + + +def find_active_python_version_and_path() -> tuple[str, Path]: + r"""Determine the current Python version and the path to its shared + library. + + Returns + ------- + version : str + The current Python version as a string. + lib_path : Path + The full path to the python shared library. + + Raises + ------ + FileNotFoundError + If the python shared library could not be located. + """ + # Launching a sub-process to do this in a general way seems hard + version = f"{version_info.major}.{version_info.minor}.{version_info.micro}" + cv = sysconfig.get_config_vars() + # Homebrew or pkg mgr installations may give bad values for LDLIBRARY. + # Uses a fallback default path in case LDLIBRARY fails. + default_libname = f"libpython{cv['LDVERSION']}.a" + libdirs = [str(cv["LIBDIR"]), str(cv["LIBPL"])] + libnames = [str(cv["LDLIBRARY"]), default_libname] + paths = [ + libdir / libname + for libdir in map(Path, libdirs) + for libname in libnames + ] + # ensure that static libraries are replaced with the dynamic version + shlib_suffix = ".dylib" if platform.system() == "Darwin" else ".so" + paths = [p.with_suffix(shlib_suffix) for p in paths] + paths = [p for p in paths if p.is_file()] + try: + py_lib_path = paths[0] + except IndexError as ie: + msg = "Could not auto-locate Python library" + raise FileNotFoundError(msg) from ie + + if not py_lib_path.exists(): + msg = ( + "Could not auto-locate Python library, " + f"found library ({py_lib_path}) does not appear to exist" + ) + raise RuntimeError(msg) + return version, py_lib_path + + +def prune_command_line_args( + argv: Sequence[str], remove_args: set[str] +) -> list[str]: + r"""Remove a set of command line arguments from argv. + + Parameters + ---------- + argv : Sequence[str] + The command line arguments to prune. + remove_args : set[str] + The arguments to remove. + + Returns + ------- + argv : list[str] + The pruned command line arguments. + + Raises + ------ + ValueError + If any of the arguments in `remove_args` do not start with '-'. + """ + for arg in remove_args: + if not arg.startswith("-"): + msg = f"Argument '{arg}' must start with '-'" + raise ValueError(msg) + + if not remove_args: + return list(argv) + + idx = 0 + cl_args = [] + nargs = len(argv) + while idx < nargs: + arg = argv[idx] + idx += 1 + if arg.split("=")[0] in remove_args: + # we intend to skip this argument + # if "=" in arg: + # # have --foo=x, can bail now + # continue + # have + # + # --foo[=something] --bar + # + # So we want to iterate through array until we find the + # next flag. + while idx < nargs: + if (arg := argv[idx]).startswith("-"): + # found flag + break + idx += 1 + continue + cl_args.append(arg) + return cl_args + + +def deduplicate_command_line_args(argv: Sequence[str]) -> list[str]: + r"""Deduplicate a set of command-line arguments. + + Parameters + ---------- + argv : Sequence[str] + The command line arguments to deduplicate. + + Returns + ------- + argv : list[str] + The deduplicated command line arguments. + + Notes + ----- + Deduplicates the arguments by keeping only the *last* occurrence of each + command line flag and its values. + """ + # A dummy name that is used only in case the first arguments are + # positional. Currently configure does not actually have any such arguments + # (and, in fact, this function does not handle any remaining positional + # arguments correctly), but good to be forward-looking. + arg_name = "===POSITIONAL=FIRST=ARGUMENTS===" + last_seen: dict[str, list[str]] = {arg_name: []} + for arg in argv: + if arg.startswith("-"): + # --foo=bar + # -> arg_name = --foo + # -> *rest = bar + arg_name, *rest = arg.split("=") + # Clobbering the old last_seen[arg_name] is intentional + last_seen[arg_name] = [] + last_seen[arg_name].append(arg) + + return [v for values in last_seen.values() for v in values] + + +def flag_to_dest(flag: str) -> str: + r"""Convert a command-line flag to a 'dest' argument usable in an + ArgumentParser. + + Parameters + ---------- + flag : str + The flag to convert. + + Returns + ------- + dest : str + The flag in 'dest' form. + """ + return flag.lstrip("-").casefold().replace("-", "_") + + +def dest_to_flag(dest: str) -> str: + r"""Convert a 'dest' argument usable in an ArgumentParser to the + corresponding command-line flag. + + Parameters + ---------- + dest : str + The argument to convert. + + Returns + ------- + flag : str + The command-line flag. + """ + return "--" + dest.replace("_", "-") + + +def partition_argv(argv: Iterable[str]) -> tuple[list[str], list[str]]: + r"""Split a command-line list of arguments into 2. + + Parameters + ---------- + argv : Iterable[str] + The original argv to split. + + Returns + ------- + main_argv : list[str] + The argument before the first '--' + rest_argv : list[str] + The arguments after the first '--' + """ + main_argv = [] + rest_argv = [] + found_sep = False + for arg in argv: + if arg.strip() == "--": + found_sep = True + continue + + if found_sep: + rest_argv.append(arg) + else: + main_argv.append(arg) + + return main_argv, rest_argv + + +CMAKE_TEMPLATES_DIR: Final = Path(__file__).resolve().parents[1] / "templates" +CMAKE_CONFIGURE_FILE: Final = CMAKE_TEMPLATES_DIR / "configure_file.cmake" + +assert CMAKE_CONFIGURE_FILE.exists(), ( + f"Cmake configure file {CMAKE_CONFIGURE_FILE} does not exist" +) +assert CMAKE_CONFIGURE_FILE.is_file(), ( + f"Cmake configure file {CMAKE_CONFIGURE_FILE} is not a file" +) + + +def cmake_configure_file( + obj: Configurable, src_file: Path, dest_file: Path, defs: dict[str, Any] +) -> None: + r"""Configure a file using CMake's configure_file(). + + Parameters + ---------- + obj : Configurable + The configurable to use to launch the cmake command. + src_file : Path + The input file (i.e. the "template" file) to configure. + dest_file : Path + The destination file, where the output should be written. + defs : dict[str, Any] + A mapping of variable names to values to be replaced. I.e. ``@key@`` + will be replaced by ``value``. + """ + cmake_exe = obj.manager.get_cmake_variable("CMAKE_COMMAND") + base_cmd = [ + cmake_exe, + f"-DAEDIFIX_CONFIGURE_FILE_SRC={src_file}", + f"-DAEDIFIX_CONFIGURE_FILE_DEST={dest_file}", + ] + defs_cmd = [ + f"-D{key}={shlex.quote(str(value))}" for key, value in defs.items() + ] + + if unhandled_subs := { + var_name + for var_name in re.findall(r"@([\w_]+)@", src_file.read_text()) + if var_name not in defs + }: + msg = ( + f"Substitution(s) {unhandled_subs} for {src_file} not found in " + f"defs {defs.keys()}" + ) + raise ValueError(msg) + + cmd = base_cmd + defs_cmd + ["-P", CMAKE_CONFIGURE_FILE] + obj.log_execute_command(cmd) diff --git a/config/examples/arch-ci-linux-docs.py b/config/examples/arch-ci-linux-docs.py new file mode 100755 index 0000000000..c19d940f71 --- /dev/null +++ b/config/examples/arch-ci-linux-docs.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + # legate args + f"--LEGATE_ARCH={Path(__file__).stem}", + "--build-type=debug", + "--cmake-generator=Ninja", + # common options + "--with-cuda=0", + "--with-python", + "--with-docs", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-ci-linux-gcc-py-pkgs-debug-sanitizer.py b/config/examples/arch-ci-linux-gcc-py-pkgs-debug-sanitizer.py new file mode 100755 index 0000000000..65089c16fc --- /dev/null +++ b/config/examples/arch-ci-linux-gcc-py-pkgs-debug-sanitizer.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + arch = Path(__file__).stem + sanitize_flags = [ + "-fsanitize=address,undefined,bounds", + "-fno-sanitize-recover=undefined", + ] + cxx_only_flags = set(sanitize_flags + ["-pedantic"]) + cxx_flags = [ + "-Wall", + "-Wextra", + "-Werror", + "-fstack-protector", + "-Walloca", + "-Wdeprecated", + "-Wimplicit-fallthrough", + "-fdiagnostics-show-template-tree", + "-Wignored-qualifiers", + "-Wmissing-field-initializers", + "-Wshadow", + "-fno-omit-frame-pointer", + "-pedantic", + ] + sanitize_flags + linker_flags = sanitize_flags + cuda_flags = [ + f"--compiler-options={flag}" + for flag in cxx_flags + if flag not in cxx_only_flags + ] + argv = [ + # legate args + f"--LEGATE_ARCH={arch}", + "--build-type=debug-sanitizer", + "--cmake-generator=Ninja", + # compilers and flags + "--with-cc=gcc", + "--with-cxx=g++", + "--with-cudac=nvcc", + "--CFLAGS=-O0 -g -g3", + "--CXXFLAGS=-O0 -g -g3", + "--CUDAFLAGS=-O0 -g -lineinfo -Xcompiler -O0 -Xcompiler -g3", + # common options + "--with-python", + "--with-tests", + # compiler flags + "--legate-cxx-flags=" + " ".join(cxx_flags), + "--legate-cuda-flags=" + " ".join(cuda_flags), + "--legate-linker-flags=" + " ".join(linker_flags), + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-ci-linux-gcc-py-pkgs-debug.py b/config/examples/arch-ci-linux-gcc-py-pkgs-debug.py new file mode 100755 index 0000000000..2656e24291 --- /dev/null +++ b/config/examples/arch-ci-linux-gcc-py-pkgs-debug.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + arch = Path(__file__).stem + cxx_only_flags = {"-pedantic"} + cxx_flags = [ + "-Wall", + "-Wextra", + "-Werror", + "-fstack-protector", + "-Walloca", + "-Wdeprecated", + "-Wimplicit-fallthrough", + "-fdiagnostics-show-template-tree", + "-Wignored-qualifiers", + "-Wmissing-field-initializers", + "-Wshadow", + "-pedantic", + ] + cuda_flags = [ + f"--compiler-options={flag}" + for flag in cxx_flags + if flag not in cxx_only_flags + ] + argv = [ + # legate args + f"--LEGATE_ARCH={arch}", + "--build-type=debug", + "--cmake-generator=Ninja", + # compilers and flags + "--with-cc=gcc", + "--with-cxx=g++", + "--with-cudac=nvcc", + "--CFLAGS=-O0 -g -g3", + "--CXXFLAGS=-O0 -g -g3", + "--CUDAFLAGS=-O0 -g -lineinfo -Xcompiler -O0 -Xcompiler -g3", + # common options + "--with-python", + "--with-tests", + # compiler flags + "--legate-cxx-flags=" + " ".join(cxx_flags), + "--legate-cuda-flags=" + " ".join(cuda_flags), + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-ci-linux-gcc-py-pkgs-release-debug.py b/config/examples/arch-ci-linux-gcc-py-pkgs-release-debug.py new file mode 100755 index 0000000000..a0967d9818 --- /dev/null +++ b/config/examples/arch-ci-linux-gcc-py-pkgs-release-debug.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + arch = Path(__file__).stem + argv = [ + # legate args + f"--LEGATE_ARCH={arch}", + "--build-type=relwithdebinfo", + "--cmake-generator=Ninja", + # compilers and flags + "--with-cc=gcc", + "--with-cxx=g++", + "--with-cudac=nvcc", + # common options + "--with-python", + "--with-tests", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-ci-linux-gcc-py-pkgs-release.py b/config/examples/arch-ci-linux-gcc-py-pkgs-release.py new file mode 100755 index 0000000000..298214e144 --- /dev/null +++ b/config/examples/arch-ci-linux-gcc-py-pkgs-release.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + arch = Path(__file__).stem + cxx_only_flags = {"-pedantic"} + cxx_flags = [ + "-Wall", + "-Wextra", + "-Werror", + "-Walloca", + "-Wdeprecated", + "-Wimplicit-fallthrough", + "-fdiagnostics-show-template-tree", + "-Wignored-qualifiers", + "-Wmissing-field-initializers", + "-Wshadow", + "-pedantic", + "-fstack-protector-strong", + ] + cuda_flags = [ + f"--compiler-options={flag}" + for flag in cxx_flags + if flag not in cxx_only_flags + ] + argv = [ + # legate args + f"--LEGATE_ARCH={arch}", + "--build-type=release", + "--cmake-generator=Ninja", + # compilers and flags + "--with-cc=gcc", + "--with-cxx=g++", + "--with-cudac=nvcc", + "--CFLAGS=-O3", + "--CXXFLAGS=-O3", + "--CUDAFLAGS=-O3", + # common options + "--with-python", + "--with-tests", + # compiler flags + "--legate-cxx-flags=" + " ".join(cxx_flags), + "--legate-cuda-flags=" + " ".join(cuda_flags), + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-darwin-debug.py b/config/examples/arch-darwin-debug.py new file mode 100755 index 0000000000..cd64208908 --- /dev/null +++ b/config/examples/arch-darwin-debug.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + f"--LEGATE_ARCH={Path(__file__).stem}", + "--with-cc=clang", + "--with-cxx=clang++", + "--build-type=debug", + "--CFLAGS=-O0 -g3", + "--CXXFLAGS=-O0 -g3", + "--legate-cxx-flags=-Wall -Werror -fsanitize=address,undefined,bounds", # noqa: E501 + "--legate-linker-flags=-fsanitize=address,undefined,bounds -fno-sanitize-recover=undefined", # noqa: E501 + "--legion-bounds-check", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-darwin-py-debug.py b/config/examples/arch-darwin-py-debug.py new file mode 100755 index 0000000000..8aa8127dc0 --- /dev/null +++ b/config/examples/arch-darwin-py-debug.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + f"--LEGATE_ARCH={Path(__file__).stem}", + "--with-cc=clang", + "--with-cxx=clang++", + "--build-type=debug", + "--CFLAGS=-O0 -g3", + "--CXXFLAGS=-O0 -g3", + "--legate-cxx-flags=-Wall -Werror", + "--legion-bounds-check", + "--with-python", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-darwin-py-release.py b/config/examples/arch-darwin-py-release.py new file mode 100755 index 0000000000..64b27d7dd2 --- /dev/null +++ b/config/examples/arch-darwin-py-release.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + f"--LEGATE_ARCH={Path(__file__).stem}", + "--with-cc=clang", + "--with-cxx=clang++", + "--build-type=release", + "--CFLAGS=-O3", + "--CXXFLAGS=-O3", + "--with-python", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-dgx-superpod-debug.py b/config/examples/arch-dgx-superpod-debug.py new file mode 100755 index 0000000000..7467018298 --- /dev/null +++ b/config/examples/arch-dgx-superpod-debug.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + f"--LEGATE_ARCH={Path(__file__).stem}", + # Specify the build type and enable extensive debugging + "--build-type=debug", + "--legion-bounds-check", + # Enable GPUs + "--with-cuda", + "--cuda-arch=ampere", + # Enable UCX + "--with-ucx", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-legion-src-dir.py b/config/examples/arch-legion-src-dir.py new file mode 100755 index 0000000000..c19364d2e8 --- /dev/null +++ b/config/examples/arch-legion-src-dir.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + f"--LEGATE_ARCH={Path(__file__).stem}", + "--with-tests", + "--build-type=release", + "--with-legion-src-dir=../legion", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-linux-cuda-local-debug.py b/config/examples/arch-linux-cuda-local-debug.py new file mode 100755 index 0000000000..f92e7aee25 --- /dev/null +++ b/config/examples/arch-linux-cuda-local-debug.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + f"--LEGATE_ARCH={Path(__file__).stem}", + "--build-type=debug", + "--with-cuda", + "--with-cuda-dir=/usr/local/cuda", + "--with-cudac=/usr/local/cuda/bin/nvcc", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/examples/arch-linux-py-cuda-pkgs-debug.py b/config/examples/arch-linux-py-cuda-pkgs-debug.py new file mode 100755 index 0000000000..4564d0acbb --- /dev/null +++ b/config/examples/arch-linux-py-cuda-pkgs-debug.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main() -> int: + argv = [ + f"--LEGATE_ARCH={Path(__file__).stem}", + "--with-python", + "--with-cuda", + "--with-ucx", + "--with-openmp", + "--build-type=debug", + "--with-tests", + "--with-docs", + ] + sys.argv[1:] + return basic_configure(tuple(argv), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/config/legate_internal/__init__.py b/config/legate_internal/__init__.py new file mode 100644 index 0000000000..d7cdd7b63e --- /dev/null +++ b/config/legate_internal/__init__.py @@ -0,0 +1,4 @@ +# SPDX-FileCopyrightText: Copyright (c) 2021-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations diff --git a/config/legate_internal/gmakevariables.in b/config/legate_internal/gmakevariables.in new file mode 100644 index 0000000000..c4938d56f8 --- /dev/null +++ b/config/legate_internal/gmakevariables.in @@ -0,0 +1,80 @@ +# -*- mode: makefile-gmake -*- +# WARNING: this file was generated by config/aedifix/config.py. +# Any modifications may be lost when configure is next invoked! + +.NOTPARALLEL: + +MAKEFLAGS += --no-builtin-rules + +export SHELL ?= /usr/bin/env bash +export AWK ?= awk + +export PYTHON ?= @AEDIFIX_PYTHON_EXECUTABLE@ + +export CMAKE ?= @CMAKE_COMMAND@ +export CMAKE_BUILD_PARALLEL_LEVEL ?= @CMAKE_BUILD_PARALLEL_LEVEL@ +export CMAKE_GENERATOR ?= @CMAKE_GENERATOR@ + +LEGATE_BUILD_COMMAND ?= $(CMAKE) --build $(LEGATE_DIR)/$(LEGATE_ARCH)/cmake_build +LEGATE_INSTALL_COMMAND ?= $(CMAKE) --install $(LEGATE_DIR)/$(LEGATE_ARCH)/cmake_build + +ifeq ($(strip $(PREFIX)),) +LEGATE_INSTALL_PREFIX_COMMAND = # nothing +else +LEGATE_INSTALL_PREFIX_COMMAND = --prefix $(PREFIX) +endif + +ifndef NINJA_STATUS +LEGATE_ARCH_COLOR = $(shell $(LEGATE_DIR)/scripts/select_arch_color.py) +COLOR_ARCH = $(shell $(CMAKE) -E cmake_echo_color --switch=$(COLOR) --$(LEGATE_ARCH_COLOR) $(LEGATE_ARCH)) +export NINJA_STATUS = [%f/%t] $(COLOR_ARCH): $(SOME_UNDEFINED_VARIABLE_TO_ADD_A_SPACE) +endif + +.PHONY: default_help +default_help: + @printf "Usage: make [MAKE_OPTIONS] [target] (see 'make --help' for MAKE_OPTIONS)\n" + @printf "" + @$(AWK) ' \ + { \ + if ($$0 ~ /^.PHONY: [a-zA-Z\-\0-9]+$$/) { \ + helpCommand = substr($$0, index($$0, ":") + 2); \ + if (helpMessage) { \ + printf "\033[36m%-20s\033[0m %s\n", helpCommand, helpMessage; \ + helpMessage = ""; \ + } \ + } else if ($$0 ~ /^[a-zA-Z\-\0-9.]+:/) { \ + helpCommand = substr($$0, 0, index($$0, ":")); \ + if (helpMessage) { \ + printf "\033[36m%-20s\033[0m %s\n", helpCommand, helpMessage; \ + helpMessage = ""; \ + } \ + } else if ($$0 ~ /^##/) { \ + if (helpMessage) { \ + helpMessage = helpMessage"\n "substr($$0, 3); \ + } else { \ + helpMessage = substr($$0, 3); \ + } \ + } else { \ + if (helpMessage) { \ + print "\n "helpMessage"\n"; \ + } \ + helpMessage = ""; \ + } \ + }' \ + $(MAKEFILE_LIST) + +.PHONY: default_all +default_all: + @$(LEGATE_BUILD_COMMAND) $(LEGATE_CMAKE_ARGS) + +.PHONY: default_clean +default_clean: + @$(LEGATE_BUILD_COMMAND) --target clean $(LEGATE_CMAKE_ARGS) + +.PHONY: default_install +default_install: + @$(LEGATE_INSTALL_COMMAND) $(LEGATE_INSTALL_PREFIX_COMMAND) $(LEGATE_CMAKE_ARGS) + +.PHONY: default_package +default_package: + @$(LEGATE_BUILD_COMMAND) --target package $(LEGATE_CMAKE_ARGS) diff --git a/config/legate_internal/main_package.py b/config/legate_internal/main_package.py new file mode 100644 index 0000000000..c9b0914fbe --- /dev/null +++ b/config/legate_internal/main_package.py @@ -0,0 +1,512 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import re +import sys +import shutil +import contextlib +from pathlib import Path +from typing import TYPE_CHECKING, Any, Final, cast + +from ..aedifix import ( + CMAKE_VARIABLE, + ArgSpec, + CMakeBool, + CMakeExecutable, + CMakeList, + CMakeString, + ConfigArgument, + ConfigurationManager, + MainPackage, +) +from ..aedifix.package.packages.cal import CAL +from ..aedifix.package.packages.cmake import CMake +from ..aedifix.package.packages.cuda import CUDA +from ..aedifix.package.packages.hdf5 import HDF5 +from ..aedifix.package.packages.legion import Legion +from ..aedifix.package.packages.nccl import NCCL +from ..aedifix.package.packages.python import Python + +if TYPE_CHECKING: + from collections.abc import Sequence + + +class Legate(MainPackage): + legate_BUILD_DOCS: Final = ConfigArgument( + name="--with-docs", + spec=ArgSpec( + dest="with_docs", type=bool, help="Generate docs build makefile" + ), + cmake_var=CMAKE_VARIABLE("legate_BUILD_DOCS", CMakeBool), + ) + legate_BUILD_TESTS: Final = ConfigArgument( + name="--with-tests", + spec=ArgSpec(dest="with_tests", type=bool, help="Build tests"), + cmake_var=CMAKE_VARIABLE("legate_BUILD_TESTS", CMakeBool), + ) + legate_BUILD_EXAMPLES: Final = ConfigArgument( + name="--with-examples", + spec=ArgSpec(dest="with_examples", type=bool, help="Build examples"), + cmake_var=CMAKE_VARIABLE("legate_BUILD_EXAMPLES", CMakeBool), + ) + legate_BUILD_BENCHMARKS: Final = ConfigArgument( + name="--with-benchmarks", + spec=ArgSpec( + dest="with_benchmarks", type=bool, help="Build benchmarks" + ), + cmake_var=CMAKE_VARIABLE("legate_BUILD_BENCHMARKS", CMakeBool), + ) + legate_CXX_FLAGS: Final = ConfigArgument( + name="--legate-cxx-flags", + spec=ArgSpec( + dest="legate_cxx_flags", nargs=1, help="C++ flags for Legate" + ), + cmake_var=CMAKE_VARIABLE("legate_CXX_FLAGS", CMakeList), + ) + legate_CUDA_FLAGS: Final = ConfigArgument( + name="--legate-cuda-flags", + spec=ArgSpec( + dest="legate_cuda_flags", nargs=1, help="CUDA flags for Legate" + ), + cmake_var=CMAKE_VARIABLE("legate_CUDA_FLAGS", CMakeList), + ) + legate_LINKER_FLAGS: Final = ConfigArgument( + name="--legate-linker-flags", + spec=ArgSpec( + dest="legate_linker_flags", nargs=1, help="Linker flags for Legate" + ), + cmake_var=CMAKE_VARIABLE("legate_LINKER_FLAGS", CMakeList), + ) + BUILD_MARCH: Final = ConfigArgument( + name="--build-march", + spec=ArgSpec( + dest="build_march", + default="native", + help="CPU architecture to build for", + ), + cmake_var=CMAKE_VARIABLE("BUILD_MARCH", CMakeString), + ) + LEGATE_CLANG_TIDY: Final = ConfigArgument( + name="--clang-tidy-executable", + spec=ArgSpec( + dest="clang_tidy_executable", + type=Path, + default=shutil.which("clang-tidy"), + help="clang-tidy executable", + ), + cmake_var=CMAKE_VARIABLE("LEGATE_CLANG_TIDY", CMakeExecutable), + ) + legate_LEGION_REPOSITORY: Final = CMAKE_VARIABLE( + "legate_LEGION_REPOSITORY", CMakeString + ) + legate_LEGION_BRANCH: Final = CMAKE_VARIABLE( + "legate_LEGION_BRANCH", CMakeString + ) + legate_ENABLE_SANITIZERS: Final = CMAKE_VARIABLE( + "legate_ENABLE_SANITIZERS", CMakeBool + ) + legate_IGNORE_INSTALLED_PACKAGES: Final = ConfigArgument( + name="--ignore-installed-packages", + spec=ArgSpec( + dest="ignore_installed_packages", + type=bool, + default=True, + help=( + "If true, when deciding to search for, or download third-party" + " packages, never search and always download. WARNING: " + "setting this option to false may make your builds " + "non-idempotent! Prior builds (and installations) may affect " + "the current ones in non-trivial ways. reconfiguring may " + "yield different results." + ), + ), + cmake_var=CMAKE_VARIABLE( + "legate_IGNORE_INSTALLED_PACKAGES", CMakeBool + ), + ) + legate_USE_CPROFILE: Final = ConfigArgument( + name="--with-cprofile", + spec=ArgSpec( + dest="use_cprofile", + type=bool, + help="If true, Legate will be built with detailed cProfile output." + " In particular, this flag will enable profiling Cython code" + " in Legate. WARNING: When enabled, this may negatively affect" + " program performance. cProfile is a Python built-in module" + " for profiling runtime performance, measuring function" + " calls and execution time in Python programs.", + ), + cmake_var=CMAKE_VARIABLE("legate_USE_CPROFILE", CMakeBool), + ) + legate_USE_CAL: Final = CMAKE_VARIABLE("legate_USE_CAL", CMakeBool) + legate_USE_HDF5: Final = CMAKE_VARIABLE("legate_USE_HDF5", CMakeBool) + legate_USE_HDF5_VFD_GDS: Final = ConfigArgument( + name="--with-hdf5-vfd-gds", + spec=ArgSpec( + dest="with_hdf5_vfd_gds", + type=bool, + help=( + "Enable VFD GPU Direct Storage support in Legate IO. Support " + "for this is automatically detected based on the availability " + "of both CUDA and HDF5." + ), + ), + cmake_var=CMAKE_VARIABLE("legate_USE_HDF5_VFD_GDS", CMakeBool), + ) + legate_USE_NCCL: Final = CMAKE_VARIABLE("legate_USE_NCCL", CMakeBool) + + def __init__( + self, manager: ConfigurationManager, argv: Sequence[str] + ) -> None: + r"""Construct a Legate main package. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + argv : Sequence[str] + The command line arguments for this configuration. + """ + from scripts.get_legate_dir import get_legate_dir + + legate_dir = Path(get_legate_dir()) + super().__init__( + manager=manager, + argv=argv, + name="Legate", + arch_name="LEGATE_ARCH", + project_dir_name="LEGATE_DIR", + project_dir_value=legate_dir, + project_config_file_template=( + Path(__file__).parent / "gmakevariables.in" + ), + project_src_dir=legate_dir / "src", + default_arch_file_path=( + legate_dir / "scripts" / "get_legate_arch.py" + ), + dependencies=(CMake, Legion, Python, CAL, HDF5, NCCL, CUDA), + ) + + @classmethod + def from_argv( + cls, manager: ConfigurationManager, argv: Sequence[str] + ) -> Legate: + r"""Construct a Legate main package from argv. + + Parameters + ---------- + manager : ConfigurationManager + The configuration manager to manage this package. + argv : Sequence[str] + The command line arguments for this configuration + + Returns + ------- + package : Legate + The constructed main package. + """ + return cls(manager, argv) + + def maybe_uninstall_legate(self) -> None: + r"""Uninstall Legate if --with-clean is given on command line + arguments. + """ + # Returns all the packages in the format: + # + # Package Version Editable project location + # ------- ------- ------------------------- + # foo 0.7.16 + # bar 2.4.1 + # baz 2.15.0 /path/to/baz + # ... + try: + import pip # noqa: F401 + except ModuleNotFoundError as mnfe: + self.log( + f"pip does not appear to be installed: '{mnfe}'. Nothing to do" + ) + return + + installed_packages = self.log_execute_command( + [sys.executable, "-m", "pip", "list"] + ).stdout.splitlines() + # skip the "Package Version" header and divider lines + installed_packages = installed_packages[2:] + package_names = ( + line.split(maxsplit=1)[0] for line in installed_packages + ) + found_legate = any(name.startswith("legate") for name in package_names) + self.log(f"Have pre-existing legate installation: {found_legate}") + + if not found_legate: + return + + if self.cl_args.with_clean.value: + cmd = [sys.executable, "-m", "pip", "uninstall", "--yes", "legate"] + str_cmd = " ".join(cmd) + self.log_warning( + f"Running {str_cmd!r} to uninstall legate as part of a clean " + "build." + ) + self.log_execute_command(cmd) + else: + self.log( + "No clean requested, leaving potentially installed legate " + "in place" + ) + self.log_warning( + "You appear to have previously installed Legate, which " + "may interfere with the current and/or future " + "(re-)configurations of Legate. Issues stemming from " + "this are likely to manifest at build-time, not " + "configure-time, and so if you encounter confusing build " + "errors the culprit is likely this.\n" + "\n" + "The user is strongly encouranged to run either:\n" + "\n" + f"$ {sys.executable} -m pip uninstall --yes legate\n" + "\n" + "(then retry configuration), or, re-run configuration " + f"with the {self.WITH_CLEAN.name} flag." + ) + + def setup(self) -> None: + r"""Setup Legate.""" + self.log_execute_func(self.maybe_uninstall_legate) + super().setup() + + def check_min_cmake_version(self) -> None: + r"""Assert the minimum cmake version is met.""" + try: + from packaging.version import parse as version_parse + except ModuleNotFoundError: + # error: All conditional function variants must have identical + # signatures + # + # Yes, I know, but this is good enough. + def version_parse( # type: ignore[misc] + version: str, + ) -> tuple[int, ...]: + args = (a.strip() for a in version.split(".")) + return tuple(int(a) for a in args if a) + + min_ver_re = re.compile( + r"cmake_minimum_required\(.*VERSION\s+([\d\.]+)" + ) + cmakelists_txt = self.project_src_dir / "CMakeLists.txt" + with cmakelists_txt.open() as fd: + for line in fd: + if re_match := min_ver_re.search(line): + min_ver = re_match.group(1) + break + else: + msg = ( + "Failed to parse minimum required CMake version from" + f" {cmakelists_txt}" + ) + raise RuntimeError(msg) + + self.log(f"Minimum cmake version required: {min_ver}") + cmake = cast(CMake, self.deps.CMake) + if version_parse(cmake.version) < version_parse(min_ver): + cmake_exe = self.manager.get_cmake_variable(cmake.CMAKE_COMMAND) + msg = ( + f"CMake executable {cmake_exe} too old! Expected version " + f"{min_ver}, have {cmake.version}" + ) + raise RuntimeError(msg) + + def configure_legate_variables(self) -> None: + r"""Configure the general variables for Legate.""" + self.append_flags_if_set( + self.legate_CXX_FLAGS, self.cl_args.legate_cxx_flags + ) + self.append_flags_if_set( + self.legate_LINKER_FLAGS, self.cl_args.legate_linker_flags + ) + self.append_flags_if_set( + self.legate_CUDA_FLAGS, self.cl_args.legate_cuda_flags + ) + self.set_flag_if_user_set( + self.legate_BUILD_DOCS, self.cl_args.with_docs + ) + self.set_flag_if_user_set( + self.legate_BUILD_TESTS, self.cl_args.with_tests + ) + self.set_flag_if_user_set( + self.legate_BUILD_EXAMPLES, self.cl_args.with_examples + ) + self.set_flag_if_user_set( + self.legate_BUILD_BENCHMARKS, self.cl_args.with_benchmarks + ) + self.set_flag_if_user_set(self.BUILD_MARCH, self.cl_args.build_march) + build_type = self.cl_args.build_type + if "sanitizer" in build_type.value: + self.manager.set_cmake_variable( + self.legate_ENABLE_SANITIZERS, True + ) + elif build_type.cl_set: + self.manager.set_cmake_variable( + self.legate_ENABLE_SANITIZERS, False + ) + ignore_packages = self.cl_args.ignore_installed_packages + if (not ignore_packages.value) and ignore_packages.cl_set: + flag_name = ignore_packages.name.replace("_", "-") + self.log_warning( + f"Setting --{flag_name} to false may make your builds " + "non-idempotent! Prior builds (and installations) may affect " + "the current one in non-trivial ways." + "\n" + "\n" + "** If you are a developer, building a development build, " + "this is probably not what you want. Please consider removing " + "this flag from your command-line arguments. **" + "\n" + "\n" + "For example, consider the following:" + "\n" + "\n" + f" 1. ./configure --{flag_name}=0 --with-foo (CMake downloads and builds libfoo.so)\n" # noqa: E501 + f" 2. pip install . (CMake -- as a byproduct of installing {self.project_name} -- installs libfoo.so)\n" # noqa: E501 + " 3. ./reconfigure... (CMake now picks up installed libfoo.so instead of reusing the downloaded one)\n" # noqa: E501 + "\n" + "The package can now no longer be built." + "\n" + "\n" + "CMake still has a local target libfoo.so (from step 1), but " + "now due to step 3, libfoo.so is considered 'imported' " + "(because CMake found the installed version first). Imported " + "packages provide no recipes to build their products " + "(libfoo.so) and so the build is broken." + ) + self.set_flag(self.legate_IGNORE_INSTALLED_PACKAGES, ignore_packages) + + def configure_legion(self) -> None: + r"""Configure Legion for use with Legate.""" + self.set_flag_if_user_set( + self.legate_LEGION_BRANCH, self.cl_args.legion_branch + ) + if self.deps.Python.state.enabled(): + self.manager.set_cmake_variable( + cast(Legion, self.deps.Legion).Legion_BUILD_BINDINGS, False + ) + + def configure_clang_tidy(self) -> None: + r"""Configure clang-tidy variables.""" + self.set_flag_if_user_set( + self.LEGATE_CLANG_TIDY, self.cl_args.clang_tidy_executable + ) + + def configure_cal(self) -> None: + r"""Configure CAL variables.""" + state = self.deps.CAL.state + if state.enabled(): + self.manager.set_cmake_variable(self.legate_USE_CAL, True) + elif state.explicitly_disabled(): + self.manager.set_cmake_variable(self.legate_USE_CAL, False) + + def configure_cprofile(self) -> None: + r"""Configure cprofile variables.""" + self.set_flag_if_user_set( + self.legate_USE_CPROFILE, self.cl_args.use_cprofile + ) + + def configure_hdf5(self) -> None: + r"""Configure HDF5 variables.""" + hdf5_state = self.deps.HDF5.state + if hdf5_state.enabled(): + self.manager.set_cmake_variable(self.legate_USE_HDF5, True) + elif hdf5_state.explicitly_disabled(): + self.manager.set_cmake_variable(self.legate_USE_HDF5, False) + + self.set_flag_if_user_set( + self.legate_USE_HDF5_VFD_GDS, self.cl_args.with_hdf5_vfd_gds + ) + + def configure_nccl(self) -> None: + r"""Configure NCCL variables.""" + state = self.deps.NCCL.state + if state.enabled(): + self.manager.set_cmake_variable(self.legate_USE_NCCL, True) + elif state.explicitly_disabled(): + self.manager.set_cmake_variable(self.legate_USE_NCCL, False) + + def configure(self) -> None: + r"""Configure Legate.""" + super().configure() + self.log_execute_func(self.check_min_cmake_version) + self.log_execute_func(self.configure_legate_variables) + self.log_execute_func(self.configure_legion) + self.log_execute_func(self.configure_clang_tidy) + self.log_execute_func(self.configure_cal) + self.log_execute_func(self.configure_cprofile) + self.log_execute_func(self.configure_hdf5) + self.log_execute_func(self.configure_nccl) + + def _summarize_flags(self) -> list[tuple[str, Any]]: + def make_summary( + name: str, cmake_varname: ConfigArgument + ) -> tuple[str, str]: + flags = self.manager.get_cmake_variable(cmake_varname) + match flags: + case list() | tuple(): + flags_str = " ".join(flags) + case None: + flags_str = "" + case str(): + flags_str = flags + case _: + raise TypeError(type(flags)) + return (f"{name} Flags", flags_str.replace(";", " ")) + + return [ + make_summary("C++", self.legate_CXX_FLAGS), + make_summary("Linker", self.legate_LINKER_FLAGS), + make_summary("CUDA", self.legate_CUDA_FLAGS), + ] + + def _summarize_python(self) -> list[tuple[str, Any]]: + python = cast(Python, self.deps.Python) + py_enabled = python.state.enabled() + lines: list[tuple[str, Any]] = [("Python bindings", py_enabled)] + if py_enabled: + with contextlib.suppress(AttributeError): + lines.append(("Python library path", python.lib_path)) + with contextlib.suppress(AttributeError): + lines.append(("Python library version", python.lib_version)) + return lines + + def _summarize_misc(self) -> list[tuple[str, Any]]: + m = self.manager + return [ + ("Tests", m.get_cmake_variable(self.legate_BUILD_TESTS)), + ("Docs", m.get_cmake_variable(self.legate_BUILD_DOCS)), + ("Examples", m.get_cmake_variable(self.legate_BUILD_EXAMPLES)), + ("Benchmarks", m.get_cmake_variable(self.legate_BUILD_BENCHMARKS)), + ("CAL", m.get_cmake_variable(self.legate_USE_CAL)), + ("HDF5", m.get_cmake_variable(self.legate_USE_HDF5)), + ( + "HDF5 VFD GDS", + m.get_cmake_variable(self.legate_USE_HDF5_VFD_GDS), + ), + ("NCCL", m.get_cmake_variable(self.legate_USE_NCCL)), + ] + + def summarize(self) -> str: + r"""Summarize Legate. + + Returns + ------- + summary : str + The summary of Legate. + """ + lines = [] + for summarizer in ( + self._summarize_flags, + self._summarize_misc, + self._summarize_python, + ): + lines.extend(summarizer()) + return self.create_package_summary(lines) diff --git a/config/py.typed b/config/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/configure b/configure new file mode 100755 index 0000000000..52ee3b4bdb --- /dev/null +++ b/configure @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +import sys + +MIN_PY = (3, 10) + +if sys.version_info < MIN_PY: + banner_len = 80 + banner_sep = "*" + banner = banner_sep * banner_len + + def _banner_print(line): + print(banner_sep + line.center(banner_len - 2) + banner_sep) # noqa: T201 + + print(banner) # noqa: T201 + _banner_print( + "Python version " + + ".".join(map(str, MIN_PY)) + + "+ is required to run configure" + ) + info = sys.version_info + _banner_print( + "Current Python version: " + + ".".join(map(str, [info.major, info.minor, info.micro])) + ) + print(banner) # noqa: T201 + sys.exit(1) + +import os # noqa: E402 + +sys.path.insert(0, os.path.abspath("config")) # noqa: PTH100 + +from config.aedifix.main import basic_configure # noqa: E402 +from config.legate_internal.main_package import Legate # noqa: E402 + + +def main(): # noqa: D103 + return basic_configure(tuple(sys.argv[1:]), Legate) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/continuous_integration/requirements-build.txt b/continuous_integration/requirements-build.txt new file mode 100644 index 0000000000..4b097feac4 --- /dev/null +++ b/continuous_integration/requirements-build.txt @@ -0,0 +1,10 @@ +--extra-index-url=https://pypi.anaconda.org/rapidsai-wheels-nightly/simple +--extra-index-url=https://pypi.nvidia.com +cmake>=3.26.4,!=3.30.0 +ninja +nvidia-nccl-cu12 +libucx-cu12 +nvidia-libcal-cu12 +scikit-build-core[pyproject]>=0.10.0 +setuptools_scm +cython diff --git a/continuous_integration/scripts/build b/continuous_integration/scripts/build new file mode 100755 index 0000000000..094021a84b --- /dev/null +++ b/continuous_integration/scripts/build @@ -0,0 +1,192 @@ +#!/usr/bin/env bash +set -eou pipefail +{ set +x; } 2>/dev/null + +export LEGATE_SCRIPT_NAME="build" + +. "$(dirname "$0")/tools/pretty_printing.bash" + +setup_legate_env() { + set -eou pipefail + + # rename REPO_DIR + # shellcheck disable=SC2154 + export LEGATE_DIR="${REPO_DIR}" + + # HACK: this should be done much better, and in a much more transparent manner... but + # alas, we no longer have any control of the setup phases in github ci, so we must do + # it here. + if [[ -z "${LEGATE_ARCH:-}" ]]; then + # shellcheck disable=SC2154 + case "${BUILD_TYPE}" in + ci|nightly|profiler) + # shellcheck disable=SC2154 + local locase_uname + locase_uname=$(uname | tr '[:upper:]' '[:lower:]') + # shellcheck disable=SC2154 + LEGATE_ARCH="arch-ci-${locase_uname}-${COMPILER:-gcc}-py-pkgs-${LEGATE_BUILD_MODE%-gcc}" + ;; + docs) + LEGATE_ARCH='arch-ci-linux-docs' + ;; + *) return 1;; + esac + export LEGATE_ARCH + fi + echo "export LEGATE_DIR=${LEGATE_DIR}" + echo "export LEGATE_ARCH=${LEGATE_ARCH}" + + # shellcheck disable=SC2154 + mkdir -p "${ARTIFACTS_DIR}/${LEGATE_ARCH}" +} + +setup_profiler() { + cd "${LEGATE_DIR}" + LEGATE_GIT_DESCRIBE_TAG=$(git describe --tags | sed 's/-.*//') + LEGATE_GIT_DESCRIBE_NUMBER=$(git describe --tags | sed 's/.*-\([^-]*\)-[^-]*$/\1/') + LEGATE_GIT_DESCRIBE_HASH="g$(git rev-parse --short HEAD)" + export LEGATE_GIT_DESCRIBE_TAG=${LEGATE_GIT_DESCRIBE_TAG} + export LEGATE_GIT_DESCRIBE_NUMBER=${LEGATE_GIT_DESCRIBE_NUMBER} + export LEGATE_GIT_DESCRIBE_HASH="g${LEGATE_GIT_DESCRIBE_HASH}" + + LEGION_GIT_REV=$(jq -r '.packages.Legion.git_tag' "${LEGATE_DIR}/src/cmake/versions/legion_version.json") + LEGION_GIT_URL=$(jq -r '.packages.Legion.git_url' "${LEGATE_DIR}/src/cmake/versions/legion_version.json") + export LEGION_GIT_REV=${LEGION_GIT_REV} + export LEGION_GIT_URL=${LEGION_GIT_URL} + + export CONDA_ROOT="/tmp/conda-croot/legate-profiler" + export CONDA_OUTPUT="/tmp/conda-build/legate-profiler" +} + +build_legate_profiler() { + set -eo pipefail; + + run_command 'Profiler Build Initial config' setup_profiler + + mkdir -p /tmp/conda-build /tmp/conda-croot + local conda_build_args=(); + conda_build_args+=(--override-channels); + conda_build_args+=(-c conda-forge); + conda_build_args+=(--croot "${CONDA_ROOT}"); + conda_build_args+=(--output-folder "${CONDA_OUTPUT}"); + conda_build_args+=(--numpy 1.22); + + mamba install -y boa conda-verify python + mamba install -y -c conda-forge gcc gxx make pkg-config + conda mambabuild "${conda_build_args[@]}" conda/legate_profiler/dummy_legate + conda mambabuild "${conda_build_args[@]}" conda/legate_profiler + + run_command 'Copy Profiler Artifacts' copy_profiler_artifacts +} + +build_legate_release() { + set -eo pipefail; + + mkdir -p /tmp/env_yaml /tmp/conda-build + local conda_build_args=(); + conda_build_args+=(--quiet); + conda_build_args+=(--override-channels); + + conda_build_args+=(-c legate/label/ucc140); + conda_build_args+=(-c conda-forge); + + conda_build_args+=(--croot /tmp/conda-croot/legate); + conda_build_args+=(--numpy 1.22); + conda_build_args+=(--no-test); + conda_build_args+=(--no-verify); + conda_build_args+=(--no-build-id); + conda_build_args+=("--build-id-pat=''"); + conda_build_args+=(--no-include-recipe); + conda_build_args+=(--no-anaconda-upload); + conda_build_args+=(--output-folder /tmp/conda-build/legate); + + GPU_ENABLED=true + [[ "${USE_CUDA:-}" = "OFF" ]] && GPU_ENABLED=false + + UPLOAD_BUILD=true + [[ "${UPLOAD_ENABLED:-}" = "OFF" ]] && UPLOAD_BUILD=false + + # shellcheck disable=SC2154 + variantOpts=$(printf '{"build_mode_str": ["%s"], "gpu_enabled": ["%s"], "upload_enabled": ["%s"], "network": ["%s"], "python": ["%s"]}' \ + "${LEGATE_BUILD_MODE}" "${GPU_ENABLED}" "${UPLOAD_BUILD}" "${LEGATE_NETWORK}" "${PYTHON_VERSION}") + conda_build_args+=(--variants "${variantOpts}") + + # Conda is far too chatty by default, spewing a great deal of cruft. Silence it to + # only explicit warnings. If you want more verbose output, set this to 2 or higher. + export CONDA_VERBOSITY=0 + # Use the new .conda format. + conda config --set conda_build.pkg_format 2 + # For whatever reason, the default buffering of conda/mamba is not sufficient, and + # leads to garbled output in CI (mixing conda output and whatever build.sh prints). So + # we need to force unbuffered output. + run_command 'Mamba Preamble' stdbuf -o0 -e0 conda mambabuild "${conda_build_args[@]}" conda/conda-build + + run_command 'Copy Artifacts' copy_release_artifacts +} + +copy_release_artifacts() { + echo Copying release artifacts + cp -r /tmp/out "${ARTIFACTS_DIR}" + cp -r /tmp/conda-build "${ARTIFACTS_DIR}" + ls -lahR "${ARTIFACTS_DIR}" +} + +copy_profiler_artifacts() { + echo Copying profiler artifacts + cp -r /tmp/conda-build "${ARTIFACTS_DIR}" + ls -lahR "${ARTIFACTS_DIR}" +} + +copy_docs_artifacts() { + echo Copying artifacts + cp -r "${LEGATE_DIR}"/"${LEGATE_ARCH}"/cmake_build/cpp/docs/legate/sphinx/* "${ARTIFACTS_DIR}/." +} + +build_docs() { + set -eou pipefail + + cd "${LEGATE_DIR}" + set +u + run_command 'Activate Conda' conda activate legate + run_command 'List conda env' conda list + set -u + run_command 'Dump configure.py' cat "./config/examples/${LEGATE_ARCH}.py" + run_command 'Configure Legate' "./config/examples/${LEGATE_ARCH}.py" + # Install Legate Python bindings so the Python docs build works + run_command 'pip install Legate' python3 -m pip install . -v + run_command 'Build Documentation' make docs + run_command 'Copy Artifacts' copy_docs_artifacts +} + +build_project() { + set -eou pipefail + + export LEGATE_CI=1 + export PYTHONUNBUFFERED=1 + export PATH="${PATH}:${REPO_DIR}/continuous_integration/scripts/tools" + + run_command "Source setup-util" . setup-utils + run_command "Init build environment" init_build_env "$@" + run_command "Setup Legate Env" setup_legate_env + + git config --global --add safe.directory "${LEGATE_DIR}/.git" + + # Set up the SCCACHE environment variables + export CI=true + source "${LEGATE_DIR}/continuous_integration/scripts/tools/legate-configure-sccache" + sccache --zero-stats + # Disable the CUDA compiler launcher to avoid issues with the realm CUDA kernels. + unset CMAKE_CUDA_COMPILER_LAUNCHER + + case "${BUILD_TYPE}" in + ci) build_legate_release;; + nightly) build_legate_release;; + profiler) build_legate_profiler;; + docs) build_docs;; + *) return 1;; + esac + + sccache --show-adv-stats +} + +(build_project "$@"); diff --git a/continuous_integration/scripts/build-legate b/continuous_integration/scripts/build-legate deleted file mode 100755 index c38c6cf384..0000000000 --- a/continuous_integration/scripts/build-legate +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash - -set_repo_dir() { - set -xeuo pipefail - - # Resolve the directory of the script - SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" - - # Navigate to the parent of the parent of SCRIPT_DIR, then get the full path - REPO_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)" - - export REPO_DIR - - export PATH="${PATH}:${REPO_DIR}/continuous_integration/scripts" - export ARTIFACTS_DIR="${REPO_DIR}/.artifacts" -} - -build_legate_ci() { - build-legate-cpp; - build-legate-wheel; - build-legate-conda; -} - -build_legate_release() { - mkdir -p /tmp/env_yaml /tmp/conda-build - - conda mambabuild --output-folder /tmp/conda-build -c nvidia/label/cuda-${CUDA_VERSION} -c conda-forge --no-include-recipe conda/conda-build -} - -copy_release_artifacts() { - echo Copying release artifacts - cp /tmp/conda-build/linux-64/legate-core-*.tar.bz2 "$ARTIFACTS_DIR" -} - -copy_ci_artifacts() { - echo Copying CI artifacts - - cp -r /tmp/out "$ARTIFACTS_DIR" - cp -r /tmp/conda-build "$ARTIFACTS_DIR" -} - -copy_docs_artifacts() { - echo Copying artifacts - cp -r $REPO_DIR/docs/legate/core/build/html/* "$ARTIFACTS_DIR/." -} - -build_docs() { - set -x; - cd $REPO_DIR; - - echo "Build doxygen documentation through install.py" - conda run -n legate /bin/bash -c "./install.py --docs" - - echo "Build documentation using Makefile" - cd $REPO_DIR/docs/legate/core - conda run -n legate /bin/bash -c "make html" - - echo "Run link checker" - cd $REPO_DIR/docs/legate/core - conda run -n legate /bin/bash -c "make linkcheck" -} - -build_legate() { - set -x; - - set_repo_dir; - . conda-utils; - . setup-utils; - - export BUILD_TYPE=$1 - - set -xeuo pipefail; - setup_build_env; - init_sccache; - cd $REPO_DIR; - - make-conda-env "$BUILD_TYPE"; - - set -xeo pipefail; - activate_conda_env; - conda_info; - - case "$BUILD_TYPE" in - ci) build_legate_ci && copy_ci_artifacts;; - release) build_legate_release && copy_release_artifacts;; - docs) build_docs && copy_docs_artifacts;; - *) return 1;; - esac -} - -(build_legate "$@"); \ No newline at end of file diff --git a/continuous_integration/scripts/build-legate-conda b/continuous_integration/scripts/build-legate-conda deleted file mode 100755 index 487dd41bc4..0000000000 --- a/continuous_integration/scripts/build-legate-conda +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env bash - -build_legate_conda_package() { - set -xeuo pipefail; - - local python_version="${PYTHON_VERSION:-}"; - - if [ -z "${python_version}" ]; then - python_version="$(python3 --version 2>&1 | cut -d' ' -f2 | cut -d'.' -f3 --complement)"; - fi - - # Patch post.py - if [[ "$(uname)" == "Darwin" ]]; then - patch "/usr/local/Caskroom/mambaforge/base/envs/legate/lib/python${python_version}/site-packages/conda_build/post.py" < "$REPO_DIR/continuous_integration/post_py.patch" - fi - - local conda_build_args=(); - conda_build_args+=(--override-channels); - conda_build_args+=(-c conda-forge); - conda_build_args+=(-c nvidia/label/cuda-${CUDA_VERSION}); - conda_build_args+=(--croot /tmp/conda-croot/legate_core); - conda_build_args+=(--numpy 1.22); - conda_build_args+=(--python ${python_version}); - conda_build_args+=(--no-test); - conda_build_args+=(--no-verify); - conda_build_args+=(--no-build-id); - conda_build_args+=("--build-id-pat=''"); - conda_build_args+=(--no-include-recipe); - conda_build_args+=(--no-anaconda-upload); - conda_build_args+=(--output-folder /tmp/conda-build/legate_core); - - GPU_ENABLED=true - [ "${USE_CUDA:-}" = "OFF" ] && GPU_ENABLED=false - - UCX_CONFIGURED=true - [ "${UCX_ENABLED:-}" = "OFF" ] && UCX_CONFIGURED=false - - conda_build_args+=(--variants "{gpu_enabled:${GPU_ENABLED},python:${python_version},ucx_configured:${UCX_CONFIGURED}}"); - - rm -rf /tmp/conda-croot/legate_core; - mkdir -p /tmp/conda-croot/legate_core; - rm -rf /tmp/conda-build/legate_core; - mkdir -p /tmp/conda-build/legate_core; - - # Synthesize new legate_core conda-build build.sh script - - cat < $REPO_DIR/conda/conda-build/conda_build_config.yaml -numpy: - - 1.22 -python: - - "${python_version}" -numpy_version: - - ">=1.22" -use_local_path: - - "true" -gpu_enabled: - - "${GPU_ENABLED}" -debug_build: - - "false" -cmake_version: - - ">=3.20.1,!=3.23.0" -package_version: - - "$(git -C $REPO_DIR describe --abbrev=0 --tags | $SED 's/[a-zA-Z]//g' | cut -d '.' -f -2).00" -ucx_configured: - - "${UCX_CONFIGURED}" -EOF -if [ "$UCX_ENABLED" = "ON" ]; then - cat <> $REPO_DIR/conda/conda-build/conda_build_config.yaml -ucx: - - ">=1.14" -EOF -fi - cat < $REPO_DIR/conda/conda-build/build.sh -echo "---------------------build.sh-------------------------" -set -x - -# Install legate_core C++ libs -tar -C "\$PREFIX" --exclude="*.a" --strip-components=1 -xf /tmp/out/legate_core-*-$(uname).tar.gz; -$SED -E -i "s@$CONDA_PREFIX@\$PREFIX@g" "\$PREFIX/share/Legion/cmake/LegionConfigCommon.cmake"; -$SED -E -i "s@$REPO_DIR/build/_CPack_Packages/$(uname)/TGZ/legate_core-(.*)-$(uname)@\$PREFIX@g" "\$SP_DIR/legion_canonical_cffi.py"; - -# Install legate_core Python wheel -pip install --no-deps --root / --prefix "\$PREFIX" /tmp/out/legate_core-*.whl; - -# Legion leaves .egg-info files, which confuses conda trying to pick up the information -# Remove them so legate-core is the only egg-info file added. -rm -rf "\$SP_DIR"/legion*egg-info; -EOF - - git -C $REPO_DIR add .; - git -C $REPO_DIR commit --allow-empty --allow-empty-message -n -m ""; - - # Build legate_core conda package - conda mambabuild ${conda_build_args[@]} $REPO_DIR/conda/conda-build; - - git -C $REPO_DIR reset --hard HEAD~1; - - cp /tmp/conda-build/legate_core/${CONDA_PLATFORM}/legate-*.tar.bz2 /tmp/out/; -} - -(build_legate_conda_package "$@"); diff --git a/continuous_integration/scripts/build-legate-cpp b/continuous_integration/scripts/build-legate-cpp deleted file mode 100755 index 06a34bdba7..0000000000 --- a/continuous_integration/scripts/build-legate-cpp +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -build_legate_cpp() { - set -xeuo pipefail; - - # Build + package legate.core C++ libs - local cmake_args=(${CMAKE_ARGS:-}); - cmake_args+=(-DBUILD_SHARED_LIBS=ON); - - cmake_args+=(-DBUILD_MARCH=${BUILD_MARCH}); - - cmake_args+=(-DCMAKE_BUILD_TYPE=Release); - cmake_args+=(-DLegion_CUDA_ARCH=all-major); - cmake_args+=(-DLegion_NETWORKS=${UCX_ENABLED:-ucx}); - cmake_args+=(-DLegion_USE_Python=ON); - cmake_args+=(-DLegion_PYTHON_EXTRA_INSTALL_ARGS="--root;/;--prefix;\"\${CMAKE_INSTALL_PREFIX}\""); - cmake_args+=(-DLegion_Python_Version=$(python3 --version 2>&1 | cut -d' ' -f2 | cut -d'.' -f3 --complement)); - cmake_args+=(-DLegion_REDOP_HALF=ON); - cmake_args+=(-DLegion_REDOP_COMPLEX=ON); - cmake_args+=(-DLegion_BUILD_JUPYTER=ON); - cmake_args+=(-DLegion_BUILD_BINDINGS=ON); - cmake_args+=(-DLegion_BOUNDS_CHECKS=${BOUNDS_CHECKS:-OFF}); - cmake_args+=(-DLegion_EMBED_GASNet_CONFIGURE_ARGS=${GASNet_CONFIGURE_ARGS:---with-ibv-max-hcas=8}); - cmake_args+=(-DLegion_MAX_DIM=${MAX_DIM:-4}); - cmake_args+=(-DLegion_MAX_FIELDS=${MAX_FIELDS:-256}); - cmake_args+=(-DLegion_SPY=${USE_SPY:-OFF}); - cmake_args+=(-DLegion_USE_CUDA=${USE_CUDA:-ON}); - cmake_args+=(-DLegion_USE_HDF5=${USE_HDF5:-OFF}); - cmake_args+=(-DLegion_USE_LLVM=${USE_LLVM:-OFF}); - cmake_args+=(-DLegion_USE_OpenMP=${USE_OPENMP:-OFF}); - cmake_args+=(-DLegion_BUILD_RUST_PROFILER=OFF) - cmake_args+=(-Dlegate_core_BUILD_DOCS=ON); - cmake_args+=(-DCMAKE_BUILD_PARALLEL_LEVEL=${JOBS:-$(nproc --ignore=1)}); - cmake_args+=(${@:-}); - - sccache --show-stats; - - if test -n "${CONDA_PREFIX:-}"; then - export OPENSSL_DIR="${CONDA_PREFIX}"; - fi - - rm -rf $REPO_DIR/build; - - time CMAKE_BUILD_PARALLEL_LEVEL=${JOBS:-$(nproc --ignore=1)} \ - cmake -S $REPO_DIR -B $REPO_DIR/build "${cmake_args[@]}" -GNinja; - - sccache --show-stats; - - time CMAKE_BUILD_PARALLEL_LEVEL=${JOBS:-$(nproc --ignore=1)} \ - cmake --build $REPO_DIR/build --verbose --parallel ${JOBS:-$(nproc --ignore=1)}; - - sccache --show-stats; - - ( - mkdir -p /tmp/out; - pushd $REPO_DIR/build; - cpack -G TGZ; - cp ./*-$(uname).tar.gz /tmp/out/; - ); -} - -(build_legate_cpp "$@"); diff --git a/continuous_integration/scripts/build-legate-wheel b/continuous_integration/scripts/build-legate-wheel deleted file mode 100755 index 300fa914c8..0000000000 --- a/continuous_integration/scripts/build-legate-wheel +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -build_legate_wheel() { - set -xeuo pipefail; - - rm -rf $REPO_DIR/_skbuild; - - mkdir -p /tmp/out; - - local pip_args=(-vv); - pip_args+=(--wheel-dir /tmp/out); - - if type conda 2>&1 >/dev/null; then - pip_args+=(--no-deps); - pip_args+=(--no-build-isolation); - fi - - local ninja_args=(); - ninja_args+=("-v"); - ninja_args+=("-j${JOBS:-$(nproc --ignore=1)}"); - - local cmake_args=(${CMAKE_ARGS:-}); - cmake_args+=("-DFIND_LEGATE_CORE_CPP=ON"); - cmake_args+=("-Dlegate_core_ROOT=$REPO_DIR/build"); - cmake_args+=("-DLegion_USE_CUDA=${USE_CUDA:-OFF}"); - cmake_args+=("-DLegion_USE_OpenMP=${USE_OPENMP:-OFF}"); - - - # Build + package legate.core Python wheel - time CMAKE_GENERATOR="Ninja" \ - CMAKE_ARGS="${cmake_args[@]}" \ - SKBUILD_BUILD_OPTIONS="${ninja_args[@]}" \ - pip wheel ${pip_args[@]} $REPO_DIR; - -} - -(build_legate_wheel "$@"); diff --git a/continuous_integration/scripts/build_hdf5.sh b/continuous_integration/scripts/build_hdf5.sh new file mode 100755 index 0000000000..80ff134495 --- /dev/null +++ b/continuous_integration/scripts/build_hdf5.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# SPDX-FileCopyrightText: Copyright (c) 2025-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# The HDF5 package doesn't support building against build trees seamlessly. +# Build and install the library to a prefix that can then be found and used. +hdf5_version="1.14.6" + +wget "https://github.com/HDFGroup/hdf5/releases/download/hdf5_${hdf5_version}/hdf5-${hdf5_version}.tar.gz" -O hdf5.tgz +mkdir -p hdf5-build +tar zvxf hdf5.tgz -C hdf5-build --strip-components=2 +cd hdf5-build + +install_prefix="$(pwd)/../prefix" + +cmake \ + -DBUILD_TESTING=OFF \ + -DEXAMPLES_EXTERNALLY_CONFIGURED=OFF \ + -DH5EX_BUILD_EXAMPLES=OFF \ + -DH5EX_BUILD_HL_LIB=OFF \ + -DH5EX_BUILD_TESTING=OFF \ + -DHDF5_BUILD_EXAMPLES=OFF \ + -DHDF5_BUILD_HL_LIB=OFF \ + -DBUILD_SHARED_LIBS=ON \ + -DDBUILD_STATIC_LIBS=OFF \ + -DHDF5_BUILD_TOOLS=ON \ + -DHDF_BUILD_UTILS=ON \ + -DHDF5_ENABLE_ALL_WARNINGS=OFF \ + -DCMAKE_INSTALL_PREFIX="${install_prefix}" \ + -B build -S . +cmake --build build +cmake --build build --target install diff --git a/continuous_integration/scripts/build_wheel_linux.bash b/continuous_integration/scripts/build_wheel_linux.bash new file mode 100755 index 0000000000..ea23609650 --- /dev/null +++ b/continuous_integration/scripts/build_wheel_linux.bash @@ -0,0 +1,132 @@ +#!/usr/bin/env bash + +# SPDX-FileCopyrightText: Copyright (c) 2025-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +# Enable sccache for faster builds but disable it for CUDA (#1884) issues +# with the realm CUDA kernel embedding. +source legate-configure-sccache +unset CMAKE_CUDA_COMPILER_LAUNCHER + +export CMAKE_BUILD_PARALLEL_LEVEL=${PARALLEL_LEVEL:=8} + +if [[ "${CI:-false}" == "true" ]]; then + echo "Installing extra system packages" + dnf install -y gcc-toolset-11-libatomic-devel openmpi-devel mpich-devel + # Enable gcc-toolset-11 environment + source /opt/rh/gcc-toolset-11/enable + # Verify compiler version + gcc --version + g++ --version +fi + +echo "PATH: ${PATH}" + +if [[ "${LEGATE_DIR:-}" == "" ]]; then + # If we are running in an action then GITHUB_WORKSPACE is set. + if [[ "${GITHUB_WORKSPACE:-}" == "" ]]; then + script_dir="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" + LEGATE_DIR="$(python "${script_dir}"/../../scripts/get_legate_dir.py)" + else + # Simple path within GitHub actions workflows. + LEGATE_DIR="${GITHUB_WORKSPACE}" + fi + export LEGATE_DIR +fi +package_dir="${LEGATE_DIR}/scripts/build/python/legate" +package_name="legate" + +echo "Installing build requirements" +python -m pip install -v --prefer-binary -r continuous_integration/requirements-build.txt + +# Recreate the missing symlink and add in the cmake config for UCC. +sitepkgs=$(python -c 'import site; print(site.getsitepackages()[0], end="")') +ln -fs "${sitepkgs}"/nvidia/libcal/cu12/lib/libcal.so.0 "${sitepkgs}"/nvidia/libcal/cu12/lib/libcal.so +ln -fs "${sitepkgs}"/nvidia/libcal/cu12/lib/libucc.so.1 "${sitepkgs}"/nvidia/libcal/cu12/lib/libucc.so +if [[ ! -d "${sitepkgs}/nvidia/libcal/cu12/lib/cmake" ]]; then + mkdir -p "${sitepkgs}/nvidia/libcal/cu12/lib/cmake" + cp -r "${LEGATE_DIR}/continuous_integration/scripts/ucc-cmake-config" "${sitepkgs}/nvidia/libcal/cu12/lib/cmake/ucc" +fi + +cd "${package_dir}" + +echo "Building HDF5 and installing into prefix" +"${LEGATE_DIR}/continuous_integration/scripts/build_hdf5.sh" + +# Build the wrappers and install into their prefix +MPI_WRAPPERS_DIR="${LEGATE_DIR}"/scripts/build/mpi_wrappers +cmake \ + -B "${LEGATE_DIR}/buildompi" \ + -S "${MPI_WRAPPERS_DIR}" \ + -DMPI_HOME=/usr/lib64/openmpi \ + -DLEGATE_WRAPPER_MPI_SUFFIX=ompi \ + -DCMAKE_INSTALL_PREFIX="${LEGATE_DIR}/wrapper-prefix" +cmake --build "${LEGATE_DIR}/buildompi" +cmake --install "${LEGATE_DIR}/buildompi" +cmake \ + -B "${LEGATE_DIR}/buildmpich" \ + -S "${MPI_WRAPPERS_DIR}" \ + -DMPI_HOME=/usr/lib64/mpich \ + -DLEGATE_WRAPPER_MPI_SUFFIX=mpich \ + -DCMAKE_INSTALL_PREFIX="${LEGATE_DIR}/wrapper-prefix" +cmake --build "${LEGATE_DIR}/buildmpich" +cmake --install "${LEGATE_DIR}/buildmpich" + +# build with '--no-build-isolation', for better sccache hit rate +# 0 really means "add --no-build-isolation" (ref: https://github.com/pypa/pip/issues/5735) +export PIP_NO_BUILD_ISOLATION=0 + +echo "Building ${package_name}" +if [[ ! -d "prefix" ]]; then + echo "No prefix, HDF5 may not have built where we thought!" + exit 1 +fi + +# TODO(cryos): https://github.com/nv-legate/legate.internal/issues/1894 +# Improve the use of CMAKE_PREFIX_PATH to find legate and cutensor once +# scikit-build supports it. +CMAKE_ARGS="-DCMAKE_PREFIX_PATH=$(pwd)/prefix;${sitepkgs}/libucx;${sitepkgs}/nvidia/libcal/cu12" +export CMAKE_ARGS +SKBUILD_CMAKE_ARGS="-DLEGATE_WRAPPER_DIR=${LEGATE_DIR}/wrapper-prefix" +export SKBUILD_CMAKE_ARGS +echo "SKBUILD_CMAKE_ARGS='${SKBUILD_CMAKE_ARGS}'" + +sccache --zero-stats + +python -m pip wheel \ + -w "${LEGATE_DIR}/dist" \ + -v \ + --no-deps \ + --disable-pip-version-check \ + . + +sccache --show-adv-stats + +echo "Show dist contents" +pwd +ls -lh "${LEGATE_DIR}/dist" + +echo "Repairing the wheel" +mkdir -p "${LEGATE_DIR}/final-dist" +export LD_LIBRARY_PATH="${LEGATE_DIR}/scripts/build/python/legate/prefix/lib" +python -m auditwheel repair \ + --exclude libcal.so.* \ + --exclude libcrypto.so.* \ + --exclude libcuda.so.* \ + --exclude libcudart.so.* \ + --exclude libevent_core.so.* \ + --exclude libevent_pthreads-2.so.* \ + --exclude libhwloc.so.* \ + --exclude libmpi.so.* \ + --exclude libmpi_cxx.so.* \ + --exclude libmpicxx.so.* \ + --exclude libnccl.so.* \ + --exclude libopen-*.so.* \ + --exclude libucc.so.* \ + -w "${LEGATE_DIR}/final-dist" \ + "${LEGATE_DIR}"/dist/*.whl + +echo "Wheel has been repaired. Contents:" +ls -lh "${LEGATE_DIR}/final-dist" diff --git a/continuous_integration/scripts/conda-utils b/continuous_integration/scripts/conda-utils deleted file mode 100755 index 04c6c22c86..0000000000 --- a/continuous_integration/scripts/conda-utils +++ /dev/null @@ -1,97 +0,0 @@ -make_conda_env_from_yaml() { - mamba env create -n "${CONDA_ENV}" -f "${yaml_file}" --force; -} - -generate_yaml_file() { - local python_version="${PYTHON_VERSION:-}"; - - if [ -z "${python_version}" ]; then - python_version="$(python3 --version 2>&1 | cut -d' ' -f2 | cut -d'.' -f3 --complement)"; - fi - - UCX_PKG=ucx - [ "${UCX_ENABLED:-}" = "OFF" ] && UCX_PKG=no-ucx - - if [[ "$USE_CUDA" == "OFF" ]]; then - yaml_file="$(\ - $REPO_DIR/scripts/generate-conda-envs.py \ - --os "$OS_SHORT_NAME" \ - --compilers \ - --python ${python_version} \ - --openmpi \ - --${UCX_PKG} \ - | head -n1 | cut -d' ' -f3 \ - )" - else - local cuda_version="${CUDA_VERSION:-${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}}"; - yaml_file="$(\ - $REPO_DIR/scripts/generate-conda-envs.py \ - --os "$OS_SHORT_NAME" \ - --compilers \ - --ctk ${cuda_version} \ - --python ${python_version} \ - --openmpi \ - --${UCX_PKG} \ - | head -n1 | cut -d' ' -f3 \ - )" - fi - - $SED -i -re "s/legate-test/${CONDA_ENV}/g" "${yaml_file}"; - echo " - boa" >> "${yaml_file}"; - - mkdir -p /tmp/out/ - cp "${yaml_file}" /tmp/out/ - mkdir -p /tmp/env_yaml - cp "${yaml_file}" /tmp/env_yaml -} - -find_yaml_file() { - pattern="/tmp/env_yaml/*.yaml"; - files=( $pattern ); - yaml_file="${files[0]}"; - - if [ -z "${yaml_file:-}" ] || [ ! -f "$yaml_file" ]; then - return 1; - fi - - return 0; -} - -get_yaml_and_make_conda_env() { - set -e; - - local yaml_file=""; - - generate_yaml_file; - - echo YAML file: ${yaml_file} - cat "${yaml_file}"; - - make_conda_env_from_yaml; -} - -install_legate_core_with_war() { - # WAR: legate-core depends on a different version of numpy than what is already installed. - # The correct version will be installed when legate-core is installed below. - # See github issue: https://github.com/nv-legate/legate.core/issues/812 - mamba uninstall -y -n "${CONDA_ENV}" numpy; - - mamba install -y -n "${CONDA_ENV}" -c nvidia/label/cuda-${CUDA_VERSION} -c conda-forge -c "${ARTIFACTS_DIR}/conda-build/legate_core" legate-core; -} - -activate_conda_env() { - set +xu - eval "$(conda shell.bash hook)" - conda activate ${CONDA_ENV}; - set -xu -} - -conda_info() { - set +x - conda info - set -x -} - -make_release_env() { - mamba create -y -n "${CONDA_ENV}" -c conda-forge boa -} \ No newline at end of file diff --git a/continuous_integration/scripts/make-conda-env b/continuous_integration/scripts/make-conda-env index 31d62879c2..ffbf4e4f82 100755 --- a/continuous_integration/scripts/make-conda-env +++ b/continuous_integration/scripts/make-conda-env @@ -1,13 +1,14 @@ #!/usr/bin/env bash -. conda-utils +set -euo pipefail -make_conda_env() { - set -xeuo pipefail +. "$(dirname "$0")/tools/legate-conda-utils" +make_conda_env() { case "$1" in - ci) get_yaml_and_make_conda_env;; - release) make_release_env;; + ci) make_release_env;; + nightly) make_release_env;; + profiler) make_release_env ;; docs) get_yaml_and_make_conda_env;; *) return 1;; esac @@ -15,4 +16,4 @@ make_conda_env() { return 0; } -(make_conda_env "$@"); \ No newline at end of file +(make_conda_env "$@"); diff --git a/continuous_integration/scripts/run-test-or-analysis b/continuous_integration/scripts/run-test-or-analysis deleted file mode 100755 index f38f119281..0000000000 --- a/continuous_integration/scripts/run-test-or-analysis +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash - -set_repo_dir() { - set -xeuo pipefail - - # Resolve the directory of the script - SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" - - # Navigate to the parent of the parent of SCRIPT_DIR, then get the full path - REPO_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)" - - export REPO_DIR - - export PATH="${PATH}:${REPO_DIR}/continuous_integration/scripts" - export ARTIFACTS_DIR="${REPO_DIR}/.artifacts" -} - -run_test_or_analysis() { - set -x - - set_repo_dir; - - . conda-utils - . setup-utils; - - set_base_defs; - install_test_tools; - - install_legate_core_with_war; - - activate_conda_env; - - conda_info; - - set -xeuo pipefail - - case "$1" in - "unit") - echo "Executing unit tests..." - ucx_libs= - [ "${UCX_ENABLED:-}" = "ON" ] && ucx_libs=ucx\ openmpi - mamba install -y -n "${CONDA_ENV}" -c conda-forge pytest pytest-mock ipython jupyter_client $ucx_libs - cd $REPO_DIR/tests/unit - pytest - ;; - "mypy") - echo "Executing mypy..." - # Keep mypy version identical to mypy version in .pre-commit.yaml. The only - # reason we don't read it directly here is because when this is run in CI, it - # is done on a job which does not checkout the repo (and hence cannot read the - # .pre-commit.yaml). - mamba install -y -n "${CONDA_ENV}" mypy=1.5.1 - cd $REPO_DIR - mypy legate - ;; - *) - echo "Invalid command: $1" - return 1 - ;; - esac - - return 0 -} - -(run_test_or_analysis "$@"); diff --git a/continuous_integration/scripts/setup-utils b/continuous_integration/scripts/setup-utils deleted file mode 100644 index 02dd137615..0000000000 --- a/continuous_integration/scripts/setup-utils +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/env bash - -set_darwin_build_env() { - set -xeuo pipefail - - export USE_CUDA=OFF - export OS_SHORT_NAME=osx - export PATH="/usr/local/opt/coreutils/libexec/gnubin:${PATH}" -} - -install_darwin_mamba() { - set -xeuo pipefail - - if [ "${GITHUB_ACTIONS:-}" == "true" ]; then - conda install -y -n base anaconda-clean - conda run -n base anaconda-clean --yes - sudo rm -rf /usr/local/miniconda - fi - - brew install --cask mambaforge -} - -install_darwin_tools() { - set -xeuo pipefail - - export SED=gsed - export READLINK=greadlink - - brew update - brew install cmake coreutils git gnu-getopt gnu-sed jq ninja wget sccache - install_darwin_mamba; -} - -install_darwin_test_tools() { - set -xeuo pipefail - - export SED=gsed - export READLINK=greadlink - - brew update - brew install coreutils git gnu-getopt gnu-sed jq wget - install_darwin_mamba; -} - -# Function to compare version numbers -version_greater_equal() { - set -xeuo pipefail - - set +x - IFS='.' read -ra ver1 <<< "$1" - IFS='.' read -ra ver2 <<< "$2" - - for i in "${!ver1[@]}"; do - if [[ -z ${ver2[i]} ]]; then - # ver1 has more segments and is greater - set -x - return 0 - fi - - if ((10#${ver1[i]} > 10#${ver2[i]})); then - set -x - return 0 - elif ((10#${ver1[i]} < 10#${ver2[i]})); then - set -x - return 1 - fi - done - - return 0 -} - -install_from_apt() { - set -xeuo pipefail - - export DEBIAN_FRONTEND=non-interactive - - # Run package updates and install packages - apt-get update - apt-get install -y wget curl jq sudo ninja-build vim numactl rsync -} - -install_sccache_linux() { - set -xeuo pipefail - - wget https://github.com/mozilla/sccache/releases/download/v0.5.4/sccache-v0.5.4-x86_64-unknown-linux-musl.tar.gz && \ - tar -xf sccache-v0.5.4-x86_64-unknown-linux-musl.tar.gz && \ - sudo mv sccache-v0.5.4-x86_64-unknown-linux-musl/sccache /usr/bin/sccache -} - -maybe_install_sccache_linux() { - set -xeuo pipefail - - if ! command -v sccache &> /dev/null; then - echo "sccache not found, proceeding with installation." - install_sccache_linux - else - sccache_version=$(sccache --version 2>&1 | awk '/sccache/ {print $2}') - if [[ -z "$sccache_version" ]] || ! version_greater_equal "$sccache_version" "0.5.4"; then - echo "sccache version less than 0.5.4, proceeding with installation." - install_sccache_linux - else - echo "sccache version is 0.5.4 or greater, no need to install." - fi - fi -} - - -install_cmake() { - set -xeuo pipefail - - wget https://github.com/Kitware/CMake/releases/download/v3.26.5/cmake-3.26.5-linux-x86_64.tar.gz - - tar -xzf cmake-3.26.5-linux-x86_64.tar.gz -} - -setup_linux_build_env() { - set -xeuo pipefail - export OS_SHORT_NAME=linux - export PATH="${PATH}:${PREBUILD_DIR}/cmake-3.26.5-linux-x86_64/bin" - - mkdir -p /tmp/out /tmp/env_yaml -} - -install_linux_tools() { - set -xeuo pipefail - - export SED=sed - export READLINK=readlink - - install_from_apt; - maybe_install_sccache_linux; - install_cmake; - - mkdir -p /tmp/out /tmp/env_yaml -} - -install_linux_test_tools() { - set -xeuo pipefail - - export SED=sed - export READLINK=readlink -} - -set_base_defs() { - set -xeuo pipefail - - export USE_CUDA=${USE_CUDA:-OFF} - export CONDA_ENV=legate - - CONDA_PLATFORM=$(conda info | grep 'platform' | awk -F ' : ' '{print $2}') - export CONDA_PLATFORM - - export PROJECT=legate.core - export PREBUILD_DIR=/tmp/prebuild - - export BUILD_MARCH=$(uname -m | tr '_' '-') - - export CUDA_VERSION=12.2.2 - export CUDA_VERSION_MAJOR=12 - export CUDA_VERSION_MINOR=0 - - export PYTHON_VERSION=3.11 - - export USE_OPENMP=ON -} - -# ----------------------------------------------------------------------------- - -prep_git() { - local current_email=$(git config --global user.email) - local current_name=$(git config --global user.name) - - if [ -z "$current_email" ]; then - git config --global --add user.email "users.noreply.github.com" - else - echo "Note: git user.email is already set to $current_email" - fi - - if [ -z "$current_name" ]; then - git config --global --add user.name "anon" - else - echo "Note: git user.name is already set to $current_name" - fi - - # Fix "fatal: detected dubious ownership in repository at '/tmp/legate.core'" - # during local builds. - git config --global --add safe.directory "$REPO_DIR" -} - -install_tools() { - if [[ "$(uname)" == "Darwin" ]]; then - install_darwin_tools; - elif [[ "$(uname)" == "Linux" ]]; then - install_linux_tools; - else - echo "Unknown OS" - exit 1 - fi -} - -install_test_tools() { - if [[ "$(uname)" == "Darwin" ]]; then - install_darwin_test_tools; - elif [[ "$(uname)" == "Linux" ]]; then - install_linux_test_tools; - else - echo "Unknown OS" - exit 1 - fi -} - -setup_os_specific_env() { - if [[ "$(uname)" == "Darwin" ]]; then - set_darwin_build_env; - elif [[ "$(uname)" == "Linux" ]]; then - setup_linux_build_env; - else - echo "Unknown OS" - exit 1 - fi -} - -setup_build_env() { - set -xeuo pipefail - - set_base_defs; - - install_tools; - - setup_os_specific_env; - - rm -rf "$PREBUILD_DIR" - mkdir -p "$PREBUILD_DIR" - cd $PREBUILD_DIR - - prep_git; -} - -sccache_stop_server_and_show_stats() { - set -xeuo pipefail - sccache --stop-server || true && sccache --show-stats; -} - -init_sccache() { - set -xeuo pipefail - - export SCCACHE_REGION="us-east-2" - export SCCACHE_BUCKET="rapids-sccache-east" - export SCCACHE_S3_KEY_PREFIX=legate-cunumeric-dev - export VAULT_HOST=https://vault.ops.k8s.rapids.ai - CMAKE_C_COMPILER_LAUNCHER=$(which sccache) - export CMAKE_C_COMPILER_LAUNCHER - export CMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} - export CMAKE_CUDA_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER} - - echo AWS_REGION="${AWS_REGION:-}" - echo AWS_SESSION_TOKEN="${AWS_SESSION_TOKEN:-}" - echo AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID:-}" - echo AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY:-}" - - mkdir -p ~/.cache; - - local secrets_dir="$REPO_DIR/.creds" - - if [ -d "$secrets_dir" ] && [ "$(ls -A "$secrets_dir")" ]; then - vault-s3-init; - else - sccache_stop_server_and_show_stats - fi -} \ No newline at end of file diff --git a/continuous_integration/scripts/test b/continuous_integration/scripts/test new file mode 100755 index 0000000000..e06c4c5844 --- /dev/null +++ b/continuous_integration/scripts/test @@ -0,0 +1,285 @@ +#!/usr/bin/env bash +set -euo pipefail + +export LEGATE_SCRIPT_NAME="test" + +run_test_or_analysis() { + # rename + # shellcheck disable=SC2154 + export LEGATE_DIR="${REPO_DIR}" + + export PATH="${PATH}:${LEGATE_DIR}/continuous_integration/scripts/tools" + + . "$(dirname "$0")/tools/legate-conda-utils" + . setup-utils; + + set_base_defs; + # shellcheck disable=SC2154 + cd "${PREBUILD_DIR}" + + install_test_tools; + + install_legate_with_war; + + activate_conda_env; + + conda_info; + + if command -v "legate-issue" &> /dev/null; then + legate-issue + else + echo "WARNING: legate-issue not found." + fi + + # HACK: this should be done much better, and in a much more transparent manner... but + # alas, we no longer have any control of the setup phases in github ci, so we must do + # it here. + if [[ -z "${LEGATE_ARCH:-}" ]]; then + local locase_uname + locase_uname="$(uname | tr '[:upper:]' '[:lower:]')" + # shellcheck disable=SC2154 + LEGATE_ARCH="arch-ci-${locase_uname}-${COMPILER:-gcc}-py-pkgs-${LEGATE_BUILD_MODE%-gcc}" + + export LEGATE_ARCH + fi + + cd "${LEGATE_DIR}" + + if [[ ${LEGATE_ARCH} == *sanitizer* ]]; then + # Needed to fix AddressSanitizer:DEADLYSIGNAL bug in GCC libsanitizer. + # See https://stackoverflow.com/questions/77894856/possible-bug-in-gcc-sanitizers + sysctl vm.mmap_rnd_bits=28 + fi + + # Need to LD_PRELOAD for both C++ and Python tests now, as the legate tester transitively loads liblegate.so + function set_ld_preload() + { + if [[ ${LEGATE_ARCH} == *gcc-*-sanitizer* ]]; then + # Trying to reproduce this on macOS? See + # https://stackoverflow.com/questions/47619097/address-sanitizing-boost-python-modules + # + # The TL;DR is, however: + # + # asan_lib='/Library/Developer/CommandLineTools/usr/lib/clang/15.0.0/lib/darwin/libclang_rt.asan_osx_dynamic.dylib' + # + # py_paths='/path/to/legate' (optional, only if you are using a venv) + # + # DYLD_INSERT_LIBRARIES=$asan_lib PYTHONPATH=$py_paths + # /opt/homebrew/Cellar/python@3.11/3.11.5/Frameworks/Python.framework/Versions/3.11/Resources/Python.app/Contents/MacOS/Python + # /path/to/your/file.py + # shellcheck disable=SC2154 + local libasan_path="${CONDA_PREFIX}/lib/libasan.so" + + if [[ ! -f "${libasan_path}" ]]; then + echo "Sanitizer libraries not found." + echo "Expected to find ${libasan_path} but this seemingly does not exist" + return 1 + fi + + export LD_PRELOAD="${libasan_path}" + + local asan_opts=() + asan_opts+=("check_initialization_order=1") + asan_opts+=("alloc_dealloc_mismatch=1") + asan_opts+=("abort_on_error=1") + asan_opts+=("strict_string_checks=1") + asan_opts+=("color=always") + asan_opts+=("detect_odr_violation=2") + [[ -f ${CONDA_PREFIX}/bin/tests_with_gpus ]] && { + # GPU build + asan_opts+=("protect_shadow_gap=0") + } + asan_opts+=("symbolize=1") + + # Note trailing ':' on this! This is deliberate so we can do + # ASAN_OPTIONS+='foo:bar:baz' + # v~~ trailing ':' here + ASAN_OPTIONS="$(IFS=':'; echo "${asan_opts[*]}"):" + export ASAN_OPTIONS + export UBSAN_OPTIONS="include=${LEGATE_DIR}/share/legate/sanitizers/ubsan_default_options.txt" + export LSAN_OPTIONS="suppressions=${LEGATE_DIR}/share/legate/sanitizers/lsan_suppressions.txt" + export TSAN_OPTIONS="include=${LEGATE_DIR}/share/legate/sanitizers/tsan_default_options.txt:suppressions=${LEGATE_DIR}/share/legate/sanitizers/tsan_suppressions.txt" + + elif [[ ${LEGATE_ARCH} == *clang-*-sanitizer* ]]; then + echo "Must properly implement Address sanitizer lib injection for Clang-based presets!" + echo "See impls for GCC above" + return 1 + fi + } + + case "$2" in + "cpu") + export GPUS_COUNT=0 + ;; + "gpu") + export GPUS_COUNT=1 + ;; + "2gpu") + export GPUS_COUNT=2 + ;; + *) + echo "Invalid argument: $2" + return 2 + ;; + esac + + case "$1" in + "python") + echo "Executing python tests..." + + ucx_libs=(ucx openmpi openssh) + + set +u + # shellcheck disable=SC2154 + legate-mamba-retry install \ + -y \ + -n "${CONDA_ENV}" \ + -c conda-forge \ + psutil pytest pytest-mock ipython jupyter_client cupy "${ucx_libs[@]}" + + hdf5_libs=(zarr fsspec kerchunk) + # shellcheck disable=SC2154 + legate-mamba-retry install \ + -y \ + -n "${CONDA_ENV}" \ + "${hdf5_libs[@]}" + # Conda packages for h5py are very outdated only going up to 3.11. See + # pyproject.toml for reason to skip h5py 3.13.0 + python3 -m pip install 'h5py!=3.13.0' + set -u + + cd "${LEGATE_DIR}" + # pytest doesn't truncate output if "CI" is defined in the env: + # https://doc.pytest.org/en/latest/explanation/ci.html + export CI=1 + + set_ld_preload + + case "${GPUS_COUNT}" in + "0") + echo "Using CPUs ..." + LEGATE_AUTO_CONFIG=0 pytest --color=yes tests/python -s + ;; + "1") + echo "Using 1 GPU ..." + LEGATE_AUTO_CONFIG=0 LEGATE_CONFIG="--fbmem 4000 --gpus 1" pytest --color=yes tests/python -s + ;; + "2") + echo "Using 2 GPUs ..." + LEGATE_AUTO_CONFIG=0 LEGATE_CONFIG="--fbmem 4000 --gpus 2" pytest --color=yes tests/python -s + ;; + *) + echo "Invalid case: $2" + return 2 + ;; + esac + ;; + "cpp") + echo "Running C++ unit tests..." + legate-mamba-retry install \ + -y \ + -n "${DEFAULT_CONDA_ENV:-legate}" \ + -c conda-forge \ + psutil + + set +u + legate-mamba-retry install \ + -y \ + -n "${CONDA_ENV}" \ + ucx openmpi openssh + set -u + + cd "${LEGATE_DIR}" + + function run_legate_tests() + { + local gtest_file="${1}" + local proc_type="${2}" + local proc_count="${3}" + local worker_count="${4:-2}" + LEGATE_TEST=1 \ + LEGATE_AUTO_CONFIG=0 \ + LEGATE_CONFIG="--${proc_type} ${proc_count} --sysmem 4000 --fbmem 1000" \ + "${gtest_file}" --gtest_color=yes + } + + function run_legate_tests_with_driver() + { + local gtest_file="${1}" + local proc_type="${2}" + local proc_count="${3}" + local worker_count="${4:-2}" + python3 "${LEGATE_DIR}/test.py" \ + --gtest-file "${gtest_file}" \ + --verbose \ + --debug \ + --color \ + --timeout 90 \ + --"${proc_type}" "${proc_count}" \ + --sysmem 100 \ + -j "${worker_count}" + } + + function run_sequential_tests() + { + # shellcheck disable=SC2154 + run_legate_tests "${CONDA_PREFIX}/bin/tests_wo_runtime" cpus 1 + # shellcheck disable=SC2154 + run_legate_tests_with_driver "${CONDA_PREFIX}/bin/tests_non_reentrant_with_runtime" cpus 1 + # shellcheck disable=SC2154 + run_legate_tests_with_driver "${CONDA_PREFIX}/bin/tests_non_reentrant_wo_runtime" cpus 1 + } + + function run_cpp_tests() + { + run_legate_tests "${CONDA_PREFIX}/bin/tests_with_runtime" "$@" + } + + function run_gpu_tests() + { + run_legate_tests "${CONDA_PREFIX}/bin/tests_with_gpus" "$@" + } + + function run_all_runtime_tests() + { + if [[ "${GPUS_COUNT}" -ne 2 ]]; then + run_cpp_tests cpus 1 + # Also do some multi-CPU testing + uname_value=$(uname) + if [[ "${uname_value}" == "Darwin" ]]; then + run_cpp_tests cpus 2 + else + run_cpp_tests cpus 4 + fi + fi + + if [[ "${GPUS_COUNT}" -gt 0 ]]; then + if [[ -f ${CONDA_PREFIX}/bin/tests_with_gpus ]]; then + run_gpu_tests gpus "${GPUS_COUNT}" + fi + fi + } + + set_ld_preload + + if [[ "${GPUS_COUNT}" -ne 2 ]]; then + run_sequential_tests + fi + + export LEGATE_WINDOW_SIZE=1 + run_all_runtime_tests + + # Run tests with the scheduling window enabled too + export LEGATE_WINDOW_SIZE=1024 + run_all_runtime_tests + ;; + *) + echo "Invalid case: $1" + return 1 + ;; + esac + + return 0 +} + +(run_test_or_analysis "$@"); diff --git a/continuous_integration/scripts/test_wheel_linux.bash b/continuous_integration/scripts/test_wheel_linux.bash new file mode 100755 index 0000000000..2a4f55d7fc --- /dev/null +++ b/continuous_integration/scripts/test_wheel_linux.bash @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# SPDX-FileCopyrightText: Copyright (c) 2025-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +echo "Are my wheels there???" + +ls -lh + +ls -lh final-dist + +pip install final-dist/*.whl + +echo "Lamest of proof of life tests for legate" +export LEGATE_SHOW_CONFIG=1 +export LEGATE_CONFIG="--fbmem 512" +export LEGION_DEFAULT_ARGS="-ll:show_rsrv" +python -c 'import legate.core' +echo "Maybe that worked" diff --git a/continuous_integration/scripts/tools/legate-conda-retry b/continuous_integration/scripts/tools/legate-conda-retry new file mode 100755 index 0000000000..63dd87ad3d --- /dev/null +++ b/continuous_integration/scripts/tools/legate-conda-retry @@ -0,0 +1,236 @@ +#!/usr/bin/env bash + +. pretty_printing.bash + +# Shamelessly stolen from https://github.com/rapidsai/gha-tools/blob/main/tools/rapids-conda-retry + +# legate-conda-retry +# +# wrapper for conda that retries the command after a CondaHTTPError, +# ChecksumMismatchError, or JSONDecodeError (ideally, any conda error that +# is normally resolved by retrying) +# +# This must be set in order for the script to recognize failing exit codes when +# output is piped to tee +# +# Example usage: +# $ legate-conda-retry install cudatoolkit=11.0 rapids=0.16 +# +# Configurable options are set using the following env vars: +# +# LEGATE_CONDA_EXE - override the conda executable +# Default is "conda" +# +# LEGATE_CONDA_RETRY_MAX - set to a positive integer to set the max number of retry +# attempts (attempts after the initial try). +# Default is 3 retries +# +# LEGATE_CONDA_RETRY_SLEEP - set to a positive integer to set the duration, in +# seconds, to wait between retries. +# Default is a 10 second sleep +# +# LEGATE_CONDA_RETRY_TIMEOUT - set to a positive integer to specify the timeout in seconds +# for the conda command execution. +# Default is 600 seconds (10 minutes) +# +set -eou pipefail +export LEGATE_SCRIPT_NAME="legate-conda-retry" + +# Global variables for error propagation +LEGATE_EXITCODE=0 +LEGATE_NEED_TO_RETRY=0 + +# Default timeout: 10 minutes +LEGATE_CONDA_RETRY_TIMEOUT=${LEGATE_CONDA_RETRY_TIMEOUT:=600} + +# Function to run conda and check output for specific retryable errors +# input variables: +# conda_cmd: the command used for running conda, which accepts the args +# passed to this script +# outfile: file to tee output to for checking, likely a temp file +# output variables: +# LEGATE_EXITCODE: the exit code from running ${conda_cmd} ${args} +# LEGATE_NEED_TO_RETRY: 1 if the command should be retried, 0 if it should not be +legate_run_conda() { + # shellcheck disable=SC2086 + # LEGATE_OTEL_WRAPPER is optionally passed in as an env var. It is + # used to instrument conda-build or mambabuild for finer-grained spans. + local outfile + outfile=$(mktemp) + + # Momentarily disable error returns because the conda command might... fail + set +e + # shellcheck disable=SC2086 + timeout ${LEGATE_CONDA_RETRY_TIMEOUT} ${LEGATE_OTEL_WRAPPER:-} ${conda_cmd} ${args} 2>&1 | tee "${outfile}" + LEGATE_EXITCODE=${PIPESTATUS[0]} + set -e + LEGATE_NEED_TO_RETRY=0 + local need_to_clean=0 + local retrying_msg="" + + if (( LEGATE_EXITCODE == 124 )); then + retrying_msg="Retrying, conda command timed out after ${LEGATE_CONDA_RETRY_TIMEOUT} seconds..." + LEGATE_NEED_TO_RETRY=1 + elif (( LEGATE_EXITCODE != 0 )); then + # Show exit code + legate-echo-stderr "conda returned exit code: ${LEGATE_EXITCODE}" + + if grep -q ChecksumMismatchError: "${outfile}"; then + retrying_msg="Retrying, found 'ChecksumMismatchError:' in output..." + LEGATE_NEED_TO_RETRY=1 + retrying_msg="Retrying after cleaning tarball and package caches, found 'ChecksumMismatchError:' in output..." + LEGATE_NEED_TO_RETRY=1 + need_to_clean=1 + elif grep -q ChunkedEncodingError: "${outfile}"; then + retrying_msg="Retrying, found 'ChunkedEncodingError:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q CondaHTTPError: "${outfile}"; then + retrying_msg="Retrying, found 'CondaHTTPError:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q CondaMultiError: "${outfile}"; then + retrying_msg="Retrying after cleaning tarball cache, found 'CondaMultiError:' in output..." + LEGATE_NEED_TO_RETRY=1 + need_to_clean=1 + elif grep -q CondaSSLError: "${outfile}"; then + retrying_msg="Retrying, found 'CondaSSLError:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q "Connection broken:" "${outfile}"; then + retrying_msg="Retrying, found 'Connection broken:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q ConnectionError: "${outfile}"; then + retrying_msg="Retrying, found 'ConnectionError:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q DependencyNeedsBuildingError: "${outfile}"; then + retrying_msg="Retrying, found 'DependencyNeedsBuildingError:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q EOFError: "${outfile}"; then + retrying_msg="Retrying, found 'EOFError:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q JSONDecodeError: "${outfile}"; then + retrying_msg="Retrying, found 'JSONDecodeError:' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q "Multi-download failed" "${outfile}"; then + retrying_msg="Retrying, found 'Multi-download failed' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q "Response ended prematurely" "${outfile}"; then + retrying_msg="Retrying, found 'Response ended prematurely' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q "Timeout was reached" "${outfile}"; then + retrying_msg="Retrying, found 'Timeout was reached' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q "Unexpected error .* on netlink descriptor" "${outfile}"; then + retrying_msg="Retrying, found 'Unexpected error .* on netlink descriptor' in output..." + LEGATE_NEED_TO_RETRY=1 + elif grep -q "File not valid: SHA256 sum doesn't match expectation" "${outfile}"; then + retrying_msg="Retrying after cleaning tarball and package caches, found 'File not valid: SHA256 sum doesn't match expectation' in output..." + LEGATE_NEED_TO_RETRY=1 + need_to_clean=1 + elif grep -q "Error when extracting package: basic_filebuf::underflow error reading the file: Bad file descriptor" "${outfile}"; then + retrying_msg="Retrying after cleaning tarball and package caches, found 'Error when extracting package: basic_filebuf::underflow error reading the file: Bad file descriptor' in output..." + LEGATE_NEED_TO_RETRY=1 + need_to_clean=1 + elif grep -q 'Download error.*Could not read a file' "${outfile}"; then + retrying_msg="Retrying, found 'Download error .*Could not read a file' in output..." + LEGATE_NEED_TO_RETRY=1 + need_to_clean=1 + elif grep -q 'Download error.*Timeout was reached' "${outfile}"; then + retrying_msg="Retrying, found 'Download error.*Timeout was reached' in output..." + LEGATE_NEED_TO_RETRY=1 + need_to_clean=1 + elif [[ ${LEGATE_EXITCODE} -eq 139 ]]; then + retrying_msg="Retrying, command resulted in a segfault. This may be an intermittent failure..." + LEGATE_NEED_TO_RETRY=1 + need_to_clean=1 + else + legate-echo-stderr "Exiting, no retryable ${LEGATE_CONDA_EXE} errors detected" + fi + + if (( LEGATE_NEED_TO_RETRY == 1 )) && \ + (( retries >= max_retries )); then + # Catch instance where we run out of retries + legate-echo-stderr "Exiting, reached max retries..." + else + # Give reason for retry + legate-echo-stderr "${retrying_msg}" + if (( need_to_clean == 1 )); then + legate-echo-stderr "Cleaning tarball and package caches before retrying..." + ${conda_cmd} clean --tarballs --packages -y + fi + fi + fi + rm -f "${outfile}" +} + +legate_conda_retry_fn() { + local condaretry_help=" +legate-conda-retry options: + + --condaretry_max_retries=n Retry the conda command at most n times (default is 3) + --condaretry_sleep_interval=n Sleep n seconds between retries (default is 5) + +ALSO legate-conda-retry options can be set using the following env vars: + + LEGATE_CONDA_RETRY_MAX - set to a positive integer to set the max number of retry + attempts (attempts after the initial try). + Default is 3 retries + + LEGATE_CONDA_RETRY_SLEEP - set to a positive integer to set the duration, in + seconds, to wait between retries. + Default is a 10 second sleep +========== +" + local max_retries=${LEGATE_CONDA_RETRY_MAX:=3} + local sleep_interval=${LEGATE_CONDA_RETRY_SLEEP:=10} + local retries=0 + + # Temporarily set this to something else (eg. a script called "testConda" that + # prints "CondaHTTPError:" and exits with 1) for testing this script. + #conda_cmd=./testConda + local conda_cmd="${LEGATE_CONDA_EXE:=conda}" + + # Process and remove args recognized only by this script, save others for conda + # Process help separately + local args="" + + for arg in "$@"; do + local opt=${arg%%=*} + local val=${arg##*=} + if [[ ${opt} == "--help" ]] || [[ ${opt} == "-h" ]]; then + echo "${condaretry_help}" + ${conda_cmd} --help + exit $? + elif [[ ${opt} == "--condaretry_max_retries" ]]; then + max_retries=${val} + elif [[ ${opt} == "--condaretry_sleep_interval" ]]; then + sleep_interval=${val} + elif [[ ${opt} == "--condaretry_timeout" ]]; then + LEGATE_CONDA_RETRY_TIMEOUT=${val} + else + args="${args} ${arg}" + fi + done + + # shellcheck disable=SC2086 + legate_run_conda ${args} + + # Retry loop, only if needed + while (( LEGATE_NEED_TO_RETRY == 1 )) && \ + (( retries < max_retries )); do + + retries=$(( retries + 1 )) + legate-echo-stderr "Waiting, retry ${retries} of ${max_retries} -> sleeping for ${sleep_interval} seconds..." + sleep "${sleep_interval}" + legate-echo-stderr "Starting, retry ${retries} of ${max_retries} -> sleep done..." + + # shellcheck disable=SC2086 + legate_run_conda ${args} + done + + return "${LEGATE_EXITCODE}" +} + +run_command "${LEGATE_CONDA_EXE:-conda} '$*'" legate_conda_retry_fn "$@" + +# Cleanup global variables +unset LEGATE_EXITCODE +unset LEGATE_NEED_TO_RETRY diff --git a/continuous_integration/scripts/tools/legate-conda-utils b/continuous_integration/scripts/tools/legate-conda-utils new file mode 100755 index 0000000000..3f2ebafa41 --- /dev/null +++ b/continuous_integration/scripts/tools/legate-conda-utils @@ -0,0 +1,101 @@ +#!/usr/bin/env bash + +set -eou pipefail + +. conda-utils +. pretty_printing.bash + +make_conda_env_from_yaml() { + # shellcheck disable=SC2154 + legate-mamba-retry env create -q -n "${CONDA_ENV}" -f "${yaml_file}"; +} + +generate_yaml_file() { + # shellcheck disable=SC2154 + if [[ "${USE_CUDA}" == "OFF" ]]; then + # shellcheck disable=SC2154 + output=$("${REPO_DIR}/scripts/generate-conda-envs.py" \ + --os "${OS_SHORT_NAME}" \ + --compilers \ + --openmpi \ + --ucx) + + yaml_output=$(echo "${output}" | head -n1) + yaml_file=$(echo "${yaml_output}" | cut -d' ' -f3) + else + # shellcheck disable=SC2154 + output=$("${REPO_DIR}/scripts/generate-conda-envs.py" \ + --os "${OS_SHORT_NAME}" \ + --compilers \ + --ctk "${CUDA_VERSION}" \ + --openmpi \ + --ucx) + + yaml_output=$(echo "${output}" | head -n1) + yaml_file=$(echo "${yaml_output}" | cut -d' ' -f3) + fi + + + echo Dumping: "${yaml_file}" + cat "${yaml_file}"; + + # shellcheck disable=SC2154 + ${SED} -i -re "s/legate-test/${CONDA_ENV}/g" "${yaml_file}"; + echo " - boa" >> "${yaml_file}"; + + # shellcheck disable=SC2154 + if [[ ${LEGATE_BUILD_MODE} == *sanitizer* ]]; then + # shellcheck disable=SC2154 + echo " - libsanitizer <=${MAX_LIBSANITIZER_VERSION}" >> "${yaml_file}"; + fi + + mkdir -p /tmp/out/ + cp "${yaml_file}" /tmp/out/ + mkdir -p /tmp/env_yaml + cp "${yaml_file}" /tmp/env_yaml +} + +find_yaml_file() { + pattern="/tmp/env_yaml/*.yaml"; + files=( "${pattern}" ); + yaml_file="${files[0]}"; + + if [[ -z "${yaml_file:-}" ]] || [[ ! -f "${yaml_file}" ]]; then + return 1; + fi + + return 0; +} + +get_yaml_and_make_conda_env() { + set -eou pipefail + + local yaml_file="" + + generate_yaml_file + + echo YAML file: "${yaml_file}" + cat "${yaml_file}" + + make_conda_env_from_yaml +} + +install_legate_with_war() { + # WAR: legate depends on a different version of numpy than what is already installed. + # The correct version will be installed when legate is installed below. + # See github issue: https://github.com/nv-legate/legate.core/issues/812 + legate-mamba-retry uninstall -q -y -n "${CONDA_ENV}" numpy + + # shellcheck disable=SC2154 + legate-mamba-retry install \ + -y \ + -n "${CONDA_ENV}" \ + -c legate/label/ucc140 \ + -c conda-forge \ + -c "${ARTIFACTS_DIR}/conda-build/legate" \ + legate +} + +make_release_env() { + legate-mamba-retry create -q -y -n "${CONDA_ENV}" -c conda-forge boa +} diff --git a/continuous_integration/scripts/tools/legate-configure-sccache b/continuous_integration/scripts/tools/legate-configure-sccache new file mode 100755 index 0000000000..bd7a5e0be5 --- /dev/null +++ b/continuous_integration/scripts/tools/legate-configure-sccache @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# A utility script that configures sccache environment variables + +export CMAKE_CUDA_COMPILER_LAUNCHER=sccache +export CMAKE_CXX_COMPILER_LAUNCHER=sccache +export CMAKE_C_COMPILER_LAUNCHER=sccache +export RUSTC_WRAPPER=sccache +export PARALLEL_LEVEL=${PARALLEL_LEVEL:-$(nproc --all --ignore=2)} +export SCCACHE_BUCKET=rapids-sccache-east +export SCCACHE_IDLE_TIMEOUT=32768 +export SCCACHE_REGION=us-east-2 +export SCCACHE_S3_KEY_PREFIX=legate-cunumeric-dev +export SCCACHE_S3_NO_CREDENTIALS=false +export SCCACHE_S3_USE_SSL=true + +if [[ "${CI:-false}" == "false" ]]; then + # Configure sccache for read-only mode since no credentials + # are available in local builds. + export SCCACHE_S3_NO_CREDENTIALS=true +fi diff --git a/continuous_integration/scripts/tools/legate-echo-stderr b/continuous_integration/scripts/tools/legate-echo-stderr new file mode 100755 index 0000000000..3c371bca22 --- /dev/null +++ b/continuous_integration/scripts/tools/legate-echo-stderr @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# Shamelessly stolen from https://github.com/rapidsai/gha-tools/blob/main/tools/rapids-echo-stderr +# Echo to stderr helper function +STR="" +if [[ -n "${LEGATE_SCRIPT_NAME}" ]]; then + STR+="[${LEGATE_SCRIPT_NAME}] " +fi +STR+="$*" + +echo "${STR}" >&2 diff --git a/continuous_integration/scripts/tools/legate-mamba-retry b/continuous_integration/scripts/tools/legate-mamba-retry new file mode 100755 index 0000000000..af2a73a0a0 --- /dev/null +++ b/continuous_integration/scripts/tools/legate-mamba-retry @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Shamelessly stolen from https://github.com/rapidsai/gha-tools/blob/main/tools/rapids-mamba-retry + +# legate-mamba-retry +# +# Same as legate-conda-retry, but sets LEGATE_CONDA_EXE="mamba" to combine both scripts +# +# Configurable options are set using the following env vars: +# +# LEGATE_MAMBA_BIN - override the mamba binary +# Default is "mamba" +# +# LEGATE_MAMBA_RETRY_MAX - set to a positive integer to set the max number of retry +# attempts (attempts after the initial try). +# Default is 3 retries +# +# LEGATE_MAMBA_RETRY_SLEEP - set to a positive integer to set the duration, in +# seconds, to wait between retries. +# Default is a 10 second sleep +# +# These are copied to LEGATE_CONDA_RETRY_MAX and LEGATE_CONDA_RETRY_SLEEP + +set -eou pipefail + +export LEGATE_SCRIPT_NAME="legate-mamba-retry" +export LEGATE_CONDA_EXE=${LEGATE_MAMBA_BIN:=mamba} + +if [[ -v LEGATE_MAMBA_RETRY_MAX ]]; then + export LEGATE_CONDA_RETRY_MAX="${LEGATE_MAMBA_RETRY_MAX}" +fi + +if [[ -v LEGATE_MAMBA_RETRY_SLEEP ]]; then + export LEGATE_CONDA_RETRY_SLEEP="${LEGATE_MAMBA_RETRY_SLEEP}" +fi + +legate-conda-retry "$@" diff --git a/continuous_integration/scripts/tools/pretty_printing.bash b/continuous_integration/scripts/tools/pretty_printing.bash new file mode 100644 index 0000000000..84962f309a --- /dev/null +++ b/continuous_integration/scripts/tools/pretty_printing.bash @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +# begin_group: Start a named section of log output, possibly with color. +# Usage: begin_group "Group Name" [Color] +# Group Name: A string specifying the name of the group. +# Color (optional): ANSI color code to set text color. Default is blue (1;34). +function begin_group() +{ + # See options for colors here: https://gist.github.com/JBlond/2fea43a3049b38287e5e9cefc87b2124 + local blue='34' + local name="${1:-'(unnamed group)'}" + local color="${2:-${blue}}" + + if [[ "${LEGATE_CI_GROUP:-}" == '' ]]; then + LEGATE_CI_GROUP=0 + fi + + if [[ "${LEGATE_CI_GROUP}" == '0' ]]; then + echo -e "::group::\e[${color}m${name}\e[0m" + else + echo -e "\e[${color}m== ${name} ===========================================================================\e[0m" + fi + export LEGATE_CI_GROUP=$((LEGATE_CI_GROUP+1)) +} +export -f begin_group + +# end_group: End a named section of log output and print status based on exit status. +# Usage: end_group "Group Name" [Exit Status] +# Group Name: A string specifying the name of the group. +# Exit Status (optional): The exit status of the command run within the group. Default is 0. +function end_group() +{ + local name="${1:-'(unnamed group)'}" + local build_status="${2:-0}" + local duration="${3:-}" + local red='31' + local blue='34' + + if [[ "${LEGATE_CI_GROUP:-}" == '' ]]; then + echo 'end_group called without matching begin_group!' + exit 1 + fi + + export LEGATE_CI_GROUP=$((LEGATE_CI_GROUP-1)) + if [[ "${LEGATE_CI_GROUP}" == '0' ]]; then + echo -e "::endgroup::\e[${blue}m (took ${duration})\e[0m" + else + echo -e "\e[${blue}m== ${name} ===========================================================================\e[0m" + fi + + if [[ "${build_status}" != '0' ]]; then + echo -e "::error::\e[${red}m ${name} - Failed (⬆️ click above for full log ⬆️)\e[0m" + exit "${build_status}" + fi +} +export -f end_group + +# Runs a command within a named group, handles the exit status, and prints appropriate +# messages based on the result. +# Usage: run_command "Group Name" command [arguments...] +function run_command() +{ + { set +x; } 2>/dev/null; + local old_opts + old_opts=$(set +o) + set +e + + local group_name="${1:-}" + shift + local command=("$@") + local status + + begin_group "${group_name}" + local start_time + start_time=$(date +%s) + echo "Running command: " "${command[@]}" + "${command[@]}" + status=$? + # In case the command enables either of these, we want to disable them so that we can + # finish up here -- we will be restoring the old options at function end anyways. + { set +xe; } 2>/dev/null; + local end_time + end_time=$(date +%s) + local duration + duration=$((end_time - start_time)) + end_group "${group_name}" "${status}" "${duration}" + eval "${old_opts}" + return "${status}" +} +export -f run_command diff --git a/continuous_integration/scripts/ucc-cmake-config/ucc-config-version.cmake b/continuous_integration/scripts/ucc-cmake-config/ucc-config-version.cmake new file mode 100644 index 0000000000..91f9e667c0 --- /dev/null +++ b/continuous_integration/scripts/ucc-cmake-config/ucc-config-version.cmake @@ -0,0 +1,31 @@ +# +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# + +# This is a basic version file for the Config-mode of find_package(). +# +# This file sets PACKAGE_VERSION_EXACT if the current version string and +# the requested version string are exactly the same and it sets +# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version. + +set(PACKAGE_VERSION 1.3.0) + +if (PACKAGE_FIND_VERSION_RANGE) + # Package version must be in the requested version range + if ((PACKAGE_FIND_VERSION_RANGE_MIN STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MIN) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_GREATER PACKAGE_FIND_VERSION_MAX) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_GREATER_EQUAL PACKAGE_FIND_VERSION_MAX)) + set(PACKAGE_VERSION_COMPATIBLE FALSE) + else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + endif() +else() + if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) + set(PACKAGE_VERSION_COMPATIBLE FALSE) + else() + set(PACKAGE_VERSION_COMPATIBLE TRUE) + if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + endif() + endif() +endif() diff --git a/continuous_integration/scripts/ucc-cmake-config/ucc-config.cmake b/continuous_integration/scripts/ucc-cmake-config/ucc-config.cmake new file mode 100644 index 0000000000..8fb305bc4b --- /dev/null +++ b/continuous_integration/scripts/ucc-cmake-config/ucc-config.cmake @@ -0,0 +1,8 @@ +# +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# + +include("${CMAKE_CURRENT_LIST_DIR}/ucc-targets.cmake") + +set(UCC_LIBRARIES "${exec_prefix}/lib") +set(UCC_INCLUDE_DIRS "${prefix}/include") diff --git a/continuous_integration/scripts/ucc-cmake-config/ucc-targets.cmake b/continuous_integration/scripts/ucc-cmake-config/ucc-targets.cmake new file mode 100644 index 0000000000..2fd55da72b --- /dev/null +++ b/continuous_integration/scripts/ucc-cmake-config/ucc-targets.cmake @@ -0,0 +1,13 @@ +# +# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# + +set(prefix ${CMAKE_CURRENT_LIST_DIR}/../../..) +set(exec_prefix "${prefix}") + +add_library(ucc::ucc SHARED IMPORTED) + +set_target_properties(ucc::ucc PROPERTIES + IMPORTED_LOCATION "${exec_prefix}/lib/libucc.so" + INTERFACE_INCLUDE_DIRECTORIES "${prefix}/include" +) diff --git a/continuous_integration/scripts/vault-s3-init b/continuous_integration/scripts/vault-s3-init deleted file mode 100755 index 179adc3cf4..0000000000 --- a/continuous_integration/scripts/vault-s3-init +++ /dev/null @@ -1,109 +0,0 @@ -#! /usr/bin/env bash - -set -xeuo pipefail; - -get_vault_token() { - set -eo pipefail - local VAULT_HOST="$1"; - local user_org="$2"; - local gh_token="$3"; - - local vault_token=null; - - vault_token="$( \ - curl -s \ - -X POST \ - -H "Content-Type: application/json" \ - -d "{\"token\": \"$gh_token\"}" \ - "$VAULT_HOST/v1/auth/github-${user_org}/login" \ - | jq -r '.auth.client_token' \ - )"; - - echo "vault_token='$vault_token'"; -} - -vault_s3_init() { - set -eo pipefail - # Attempt to retrieve temporary AWS credentials from a vault - # instance using GitHub OAuth. - - eval "export $(find $REPO_DIR/.creds -type f -exec bash -c 'echo $(basename $0)=$(<$0)' {} \;)"; - - if [[ -z "${VAULT_HOST:-}" ]]; then return; fi - if [[ -z "${SCCACHE_BUCKET:-}" ]]; then return; fi - if [[ -z "${GH_TOKEN:-}" ]]; then return; fi - - echo "" - echo "Attempting to use your GitHub account to authenticate"; - echo "with vault at '${VAULT_HOST}'."; - echo "" - - local vault_token=null; - local user_orgs=nv-legate; - - # Attempt to authenticate with GitHub - eval "$(get_vault_token "${VAULT_HOST}" ${user_orgs} $GH_TOKEN)"; - - if [[ "${vault_token:-null}" == null ]]; then - echo "Your GitHub user was not recognized by vault. Exiting." >&2; - return; - fi - - echo "Successfully authenticated with vault!"; - - local ttl="${VAULT_S3_TTL:-"43200s"}"; - local uri="${VAULT_S3_URI:-"v1/aws/creds/devs"}"; - - # Generate temporary AWS creds - local aws_creds="$( \ - curl -s \ - -X GET \ - -H "X-Vault-Token: $vault_token" \ - -H "Content-Type: application/json" \ - "${VAULT_HOST}/$uri?ttl=$ttl" \ - | jq -r '.data' \ - )"; - - export AWS_ACCESS_KEY_ID="$(echo "$aws_creds" | jq -r '.access_key')"; - export AWS_SECRET_ACCESS_KEY="$(echo "$aws_creds" | jq -r '.secret_key')"; - - if [[ "${AWS_ACCESS_KEY_ID:-null}" == null ]]; then - echo "Failed to generate temporary AWS S3 credentials. Exiting." >&2; - return; - fi - - if [[ "${AWS_SECRET_ACCESS_KEY:-null}" == null ]]; then - echo "Failed to generate temporary AWS S3 credentials. Exiting." >&2; - return; - fi - - # Generate AWS config files - mkdir -p ~/.aws; - - echo "$(date '+%s')" > ~/.aws/stamp; - - cat < ~/.aws/config -[default] -${SCCACHE_BUCKET:+bucket=$SCCACHE_BUCKET} -${SCCACHE_REGION:+region=$SCCACHE_REGION} -EOF - - cat < ~/.aws/credentials -[default] -aws_access_key_id=$AWS_ACCESS_KEY_ID -aws_secret_access_key=$AWS_SECRET_ACCESS_KEY -EOF - - chmod 0600 ~/.aws/{config,credentials}; - - echo "Successfully generated temporary AWS S3 credentials!"; - - # Stop server and reset sccache stats. - sccache --stop-server || true - - # Wait for AWS credentials to propagate - sleep 10 - sccache --show-stats -} - -(vault_s3_init "$@"); \ No newline at end of file diff --git a/docs/legate/CMakeLists.txt b/docs/legate/CMakeLists.txt new file mode 100644 index 0000000000..071c1f2f1d --- /dev/null +++ b/docs/legate/CMakeLists.txt @@ -0,0 +1,84 @@ +#============================================================================= +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +#============================================================================= + +# cmake-format: off +list(APPEND CMAKE_MODULE_PATH + "${LEGATE_CMAKE_DIR}/thirdparty/sphinx" # codespell:ignore thirdparty +) +# cmake-format: on + +list(APPEND CMAKE_MESSAGE_CONTEXT "docs") + +find_package(Doxygen REQUIRED) +find_package(Sphinx REQUIRED) + +# Only set "dynamic" settings here. Any settings which would have a hard-coded value +# should just be directly set in the Doxyfile.in +set(DOXYGEN_INPUT_DIR "${legate_LOCAL_INCLUDE_DIR}") +set(DOXYGEN_OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/doxygen") +set(DOXYGEN_INDEX_FILE "${DOXYGEN_OUTPUT_DIR}/xml/index.html") +set(DOXYGEN_STRIP_FROM_INC_PATH "${LEGATE_DIR}/src/cpp") +set(DOXYGEN_EXAMPLE_PATH "\"${LEGATE_DIR}/tests/cpp\" \"${LEGATE_DIR}/src/cpp\"") + +set(DOXYFILE_IN "${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in") +set(DOXYFILE_OUT "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile") + +configure_file("${DOXYFILE_IN}" "${DOXYFILE_OUT}" @ONLY) + +file(MAKE_DIRECTORY "${DOXYGEN_OUTPUT_DIR}") # Doxygen won't create this for us + +add_custom_command(OUTPUT "${DOXYGEN_INDEX_FILE}" DEPENDS "${DOXYFILE_OUT}" + COMMAND "${DOXYGEN_EXECUTABLE}" "${DOXYFILE_OUT}" + COMMENT "Generating doxygen output") + +add_custom_target(Doxygen DEPENDS "${DOXYGEN_INDEX_FILE}" COMMENT "Running doxygen") + +set(SPHINX_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/source") +set(SPHINX_BUILD "${CMAKE_CURRENT_BINARY_DIR}/sphinx") + +file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/switcher.json" + DESTINATION "${SPHINX_BUILD}/legate") + +if(NOT legate_LICENSE_FILE) + message(FATAL_ERROR "Don't know where Legate license file is. " + "Expected cmake variable legate_LICENSE_FILE to contain it's location, but that variable is empty. " + "Presumably somebody changed the name?") +endif() + +# Need to make this every time because git does not allow you to check in empty +# directories +file(MAKE_DIRECTORY "${SPHINX_SOURCE}/generated") + +# cmake-format: off +# Need to do this symlink because the .rst files have no way to know where the build +# directory is. I tried using rst_prolog (while setting an environment variable +# LEGATE_SPHINX_BUILD): +# +# rst_prolog=""" +# .. |BUILD_DIR| replace:: {os.environ["LEGATE_SPHINX_BUILD"]} +# """ +# +# and then: +# +# .. include:: |BUILD_DIR|/path/to/license.txt +# +# But this didn't work because you can't do replacements inside other directives in rst +# apparently. The only way I found that works is just symlinking it somewhere into the +# source directory then... +# cmake-format: on +add_custom_target(symlink_license_file + COMMAND ${CMAKE_COMMAND} -E create_symlink "${legate_LICENSE_FILE}" + "${SPHINX_SOURCE}/generated/licenses.txt" + BYPRODUCTS "${SPHINX_SOURCE}/generated/licenses.txt" + COMMENT "Symlinking licenses.txt -> ${SPHINX_SOURCE}/generated") +add_dependencies(symlink_license_file legate_write_licenses) + +add_custom_target(Sphinx + COMMAND "${SPHINX_EXECUTABLE}" -b html + -Dbreathe_projects.legate="${DOXYGEN_OUTPUT_DIR}/xml" -W + "${SPHINX_SOURCE}" "${SPHINX_BUILD}/legate/latest" + WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" + COMMENT "Generating documentation with Sphinx") +add_dependencies(Sphinx symlink_license_file) diff --git a/docs/legate/Doxyfile.in b/docs/legate/Doxyfile.in new file mode 100644 index 0000000000..b08d5d20da --- /dev/null +++ b/docs/legate/Doxyfile.in @@ -0,0 +1,2863 @@ +# Doxyfile 1.10.0 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). +# +# Note: +# +# Use doxygen to compare the used configuration file with the template +# configuration file: +# doxygen -x [configFile] +# Use doxygen to compare the used configuration file with the template +# configuration file without replacing the environment variables or CMake type +# replacement variables: +# doxygen -x_noenv [configFile] + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = Legate + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = @PROJECT_VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = @PROJECT_DESCRIPTION@ + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# With the PROJECT_ICON tag one can specify an icon that is included in the tabs +# when the HTML document is shown. Doxygen will copy the logo to the output +# directory. + +PROJECT_ICON = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = "@DOXYGEN_OUTPUT_DIR@" + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096 +# sub-directories (in 2 levels) under the output directory of each output format +# and will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to +# control the number of sub-directories. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# Controls the number of sub-directories that will be created when +# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every +# level increment doubles the number of directories, resulting in 4096 +# directories at level 8 which is the default and also the maximum value. The +# sub-directories are organized in 2 levels, the first level always has a fixed +# number of 16 directories. +# Minimum value: 0, maximum value: 8, default value: 8. +# This tag requires that the tag CREATE_SUBDIRS is set to YES. + +CREATE_SUBDIRS_LEVEL = 8 + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian, +# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English +# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek, +# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with +# English messages), Korean, Korean-en (Korean with English messages), Latvian, +# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, +# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, +# Swedish, Turkish, Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = @DOXYGEN_STRIP_FROM_INC_PATH@ + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:^^" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". Note that you cannot put \n's in the value part of an alias +# to insert newlines (in the resulting output). You can put ^^ in the value part +# of an alias to insert a newline as if a physical newline was in the original +# file. When you need a literal { or } or , in the value part of an alias you +# have to escape them by means of a backslash (\), this can lead to conflicts +# with the commands \{ and \} for these it is advised to use the version @{ and +# @} or use a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, +# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = cu=C++ cuh=C++ + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 5 + +# The MARKDOWN_ID_STYLE tag can be used to specify the algorithm used to +# generate identifiers for the Markdown headings. Note: Every identifier is +# unique. +# Possible values are: DOXYGEN use a fixed 'autotoc_md' string followed by a +# sequence number starting at 0 and GITHUB use the lower case version of title +# with any whitespace replaced by '-' and punctuation characters removed. +# The default value is: DOXYGEN. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +MARKDOWN_ID_STYLE = DOXYGEN + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +# If the TIMESTAMP tag is set different from NO then each generated page will +# contain the date or date and time when the page was generated. Setting this to +# NO can help when comparing the output of multiple runs. +# Possible values are: YES, NO, DATETIME and DATE. +# The default value is: NO. + +TIMESTAMP = NO + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = YES + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# will also hide undocumented C++ concepts if enabled. This option has no effect +# if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# Possible values are: SYSTEM, NO and YES. +# The default value is: SYSTEM. + +CASE_SENSE_NAMES = SYSTEM + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = NO + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. See also section "Changing the +# layout of pages" for information. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as documenting some parameters in +# a documented function twice, or documenting parameters that don't exist or +# using markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong parameter +# documentation, but not about the absence of documentation. If EXTRACT_ALL is +# set to YES then this flag will automatically be disabled. See also +# WARN_IF_INCOMPLETE_DOC +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If WARN_IF_UNDOC_ENUM_VAL option is set to YES, doxygen will warn about +# undocumented enumeration values. If set to NO, doxygen will accept +# undocumented enumeration values. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: NO. + +WARN_IF_UNDOC_ENUM_VAL = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS_PRINT then doxygen behaves +# like FAIL_ON_WARNINGS but in case no WARN_LOGFILE is defined doxygen will not +# write the warning messages in between other messages but write them at the end +# of a run, in case a WARN_LOGFILE is defined the warning messages will be +# besides being in the defined file also be shown at the end of a run, unless +# the WARN_LOGFILE is defined as - i.e. standard output (stdout) in that case +# the behavior will remain as with the setting FAIL_ON_WARNINGS. +# Possible values are: NO, YES, FAIL_ON_WARNINGS and FAIL_ON_WARNINGS_PRINT. +# The default value is: NO. + +WARN_AS_ERROR = FAIL_ON_WARNINGS + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# See also: WARN_LINE_FORMAT +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# In the $text part of the WARN_FORMAT command it is possible that a reference +# to a more specific place is given. To make it easier to jump to this place +# (outside of doxygen) the user can define a custom "cut" / "paste" string. +# Example: +# WARN_LINE_FORMAT = "'vi $file +$line'" +# See also: WARN_FORMAT +# The default value is: at line $line of file $file. + +WARN_LINE_FORMAT = "at line $line of file $file" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). In case the file specified cannot be opened for writing the +# warning and error messages are written to standard error. When as file - is +# specified the warning and error messages are written to standard output +# (stdout). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = "@DOXYGEN_INPUT_DIR@" + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# See also: INPUT_FILE_ENCODING +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify +# character encoding on a per file pattern basis. Doxygen will compare the file +# name with each pattern and apply the encoding instead of the default +# INPUT_ENCODING) if there is a match. The character encodings are a list of the +# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding +# "INPUT_ENCODING" for further information on supported encodings. + +INPUT_FILE_ENCODING = + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cxxm, +# *.cpp, *.cppm, *.ccm, *.c++, *.c++m, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, +# *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, *.h++, *.ixx, *.l, *.cs, *.d, +# *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to +# be provided as doxygen C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f18, *.f, *.for, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cxxm \ + *.cpp \ + *.cppm \ + *.ccm \ + *.c++ \ + *.c++m \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.cuh \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.ixx \ + *.l \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f18 \ + *.f \ + *.for \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.ice + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# ANamespace::AClass, ANamespace::*Test + +EXCLUDE_SYMBOLS = detail + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = @DOXYGEN_EXAMPLE_PATH@ + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that doxygen will use the data processed and written to standard output +# for further processing, therefore nothing else, like debug statements or used +# commands (so in case of a Windows batch file always use @echo OFF), should be +# written to standard output. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +# The Fortran standard specifies that for fixed formatted Fortran code all +# characters from position 72 are to be considered as comment. A common +# extension is to allow longer lines before the automatic comment starts. The +# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can +# be processed before the automatic comment starts. +# Minimum value: 7, maximum value: 10000, default value: 72. + +FORTRAN_COMMENT_AFTER = 72 + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# multi-line macros, enums or list initialized variables directly into the +# documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The IGNORE_PREFIX tag can be used to specify a prefix (or a list of prefixes) +# that should be ignored while generating the index headers. The IGNORE_PREFIX +# tag works for classes, function and member names. The entity will be placed in +# the alphabetical list under the first letter of the entity name that remains +# after removing the prefix. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). +# Note: Since the styling of scrollbars can currently not be overruled in +# Webkit/Chromium, the styling will be left out of the default doxygen.css if +# one or more extra stylesheets have been specified. So if scrollbar +# customization is desired it has to be added explicitly. For an example see the +# documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output +# should be rendered with a dark or light theme. +# Possible values are: LIGHT always generate light mode output, DARK always +# generate dark mode output, AUTO_LIGHT automatically set the mode according to +# the user preference, use light mode if no preference is set (the default), +# AUTO_DARK automatically set the mode according to the user preference, use +# dark mode if no preference is set and TOGGLE allow to user to switch between +# light and dark mode via a button. +# The default value is: AUTO_LIGHT. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE = AUTO_LIGHT + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a color-wheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use gray-scales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# If the HTML_CODE_FOLDING tag is set to YES then classes and functions can be +# dynamically folded and expanded in the generated HTML source code. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_CODE_FOLDING = YES + +# If the HTML_COPY_CLIPBOARD tag is set to YES then doxygen will show an icon in +# the top right corner of code and text fragments that allows the user to copy +# its content to the clipboard. Note this only works if supported by the browser +# and the web page is served via a secure context (see: +# https://www.w3.org/TR/secure-contexts/), i.e. using the https: or file: +# protocol. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COPY_CLIPBOARD = YES + +# Doxygen stores a couple of settings persistently in the browser (via e.g. +# cookies). By default these settings apply to all HTML pages generated by +# doxygen across all projects. The HTML_PROJECT_COOKIE tag can be used to store +# the settings under a project specific key, such that the user preferences will +# be stored separately. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_PROJECT_COOKIE = + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# on Windows. In the beginning of 2021 Microsoft took the original page, with +# a.o. the download links, offline the HTML help workshop was already many years +# in maintenance mode). You can download the HTML help workshop from the web +# archives at Installation executable (see: +# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo +# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# The SITEMAP_URL tag is used to specify the full URL of the place where the +# generated documentation will be placed on the server by the user during the +# deployment of the documentation. The generated sitemap is called sitemap.xml +# and placed on the directory specified by HTML_OUTPUT. In case no SITEMAP_URL +# is specified no sitemap is generated. For information about the sitemap +# protocol see https://www.sitemaps.org +# This tag requires that the tag GENERATE_HTML is set to YES. + +SITEMAP_URL = + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine tune the look of the index (see "Fine-tuning the output"). As an +# example, the default style sheet generated by doxygen has an example that +# shows how to put an image at the root of the tree instead of the PROJECT_NAME. +# Since the tree basically has the same information as the tab index, you could +# consider setting DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = YES + +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_3 + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. For more details about the output format see MathJax +# version 2 (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 +# (see: +# http://docs.mathjax.org/en/latest/web/components/output.html). +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility. This is the name for Mathjax version 2, for MathJax version 3 +# this will be translated into chtml), NativeMML (i.e. MathML. Only supported +# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This +# is the name for Mathjax version 3, for MathJax version 2 this will be +# translated into HTML-CSS) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. The default value is: +# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 +# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# for MathJax version 2 (see +# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# For example for MathJax version 3 (see +# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): +# MATHJAX_EXTENSIONS = ams +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /