From 3d4e5464c1d3fef4f87226676176ea90ddeb9b1d Mon Sep 17 00:00:00 2001 From: Kaelyn Ferris <43348706+kaelynj@users.noreply.github.com> Date: Mon, 22 Sep 2025 11:22:33 -0400 Subject: [PATCH 1/9] Add draft pages of slurm plugin --- docs/guides/_toc.json | 16 ++- docs/guides/slurm-hpc-ux.mdx | 213 +++++++++++++++++++++++++++++++++++ docs/guides/slurm-plugin.mdx | 151 +++++++++++++++++++++++++ 3 files changed, 379 insertions(+), 1 deletion(-) create mode 100644 docs/guides/slurm-hpc-ux.mdx create mode 100644 docs/guides/slurm-plugin.mdx diff --git a/docs/guides/_toc.json b/docs/guides/_toc.json index 57e29a70ca0..78daa2b8140 100644 --- a/docs/guides/_toc.json +++ b/docs/guides/_toc.json @@ -127,7 +127,6 @@ "title": "Set up custom roles", "url": "/docs/guides/custom-roles" } - ] }, { @@ -613,6 +612,21 @@ } ] }, + { + "title": "High-Performance Compute", + "childre": [ + { + "title": "Spank Plugins for Slurm", + "url": "/docs/guides/slurm-plugin", + "isNew": "true" + }, + { + "title": "HPX User Experience", + "url": "/docs/guides/slurm-hpc-ux", + "isNew": "true" + } + ] + }, { "title": "Visualization", "children": [ diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx new file mode 100644 index 00000000000..5e8f0b2ec12 --- /dev/null +++ b/docs/guides/slurm-hpc-ux.mdx @@ -0,0 +1,213 @@ +HPC user experience, HPC developer experience and usage patterns +================================================================ + +## Content + +- [Principles](#principles) +- [Connecting physical resources to Slurm resoures and how to use them](#connecting-physical-resources-to-slurm-resources-and-how-to-use-them) + - [HPC admin scope](#hpc-admin-scope) + - [HPC user scope](#hpc-user-scope) + - [HPC application scope](#hpc-application-scope) + - [Backend specifics](#backend-specifics) + - [IBM Direct Access API](#ibm-direct-access-api) + - [Qiskit Runtime Service](#qiskit-runtime-service) +- [Examples](#examples) + - [Running jobs with dependencies](#running-jobs-with-dependencies) + - [Running a job with several Slurm QPU resources](#running-a-job-with-several-slurm-qpu-resources) + - [Running primitives directly](#running-primitives-directly) + - [Other workflow tools](#other-workflow-tools) + +See [Overview](./overview.md) for a glossary of terms. + +## Principles + +Slurm QPU resource definitions determine what physical resources can be used by Slurm jobs. +User source code should be agnostic to specific backend instances and even backend types as far as possible. +This keeps source code portable while the QPU selection criteria are part of the resource definition (which is considered configuration as opposed to source code). +The source code does not have to take care resp. is not involved in resource reservation handling (that is done when Slurm jobs are assigned QPU resources and start running, if applicable on the backend) or execution modes like sessions (these are automatically in place while the job is running, if applicable on the backend). +This makes the source code more portable between similar QPU resource types through different backend access methods (such as IBM's Direct Access API and IBM's Qiskit Runtime service through IBM Quantum Platform). +All backend types (such as IBM's Direct Access API, IBM's Qiskit Runtime service, or Pasqal's backends) follow these principles. + +## Connecting physical resources to Slurm resources and how to use them + +Note the exact syntax is subject to change -- this is a sketch of the UX at this time. + +### HPC admin scope + +HPC administrators configure the SPANK plugin, what physical resources can be provided to Slurm jobs. +This configuration contains all the information needed to have Slurm jobs access the physical resources, such as endpoints, and access credentials -- note some parts of the configuration such as credentials can be sensitive information. + +See the file [qrmi_config.json.example](../plugins/spank_qrmi/qrmi_config.json.example) for a comprehensive example showing. + +In `slurm.conf`, qpu generic resources can be assigned to some or all nodes for usage: +``` +... +GresTypes=qpu,name +NodeName=node[1-5000] Gres=qpu,name:ibm_fez +... +``` + +### HPC user scope + +HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. +The name attribute references what the HPC administrator has defined. +Mid-term, backend selection can be based on criteria other than a predefined name which refers to a specific backend (e.g. by capacity and error rate qualifiers which help downselect between the defined set of backends). + +There might be additional environment variables required, depending on the backend type. + +SBATCH parameters will point to one or more QPU resource assigned to the application as generic resources. +Environment variables provided through the plugin will provide the necessary information to the application (see the [HPC application scope](#hpc-application-scope) section for details). + +```shell +#SBATCH --time=100 +#SBATCH --output= +#SBATCH --gres=qpu:1 +#SBATCH --qpu=ibm_fez +#SBATCH --... # other options + +srun ... +``` + +To use more QPU resources, add more QPUs to the `--qpu` parameter: + +```shell +#SBATCH --time=100 +#SBATCH --output= +#SBATCH --gres=qpu:3 +#SBATCH --qpu=my_local_qpu,ibm_fez,ibm_marrakesh +#SBATCH --... # other options + +srun ... +``` + +### HPC application scope + +HPC applications use the Slurm QPU resources assigned to the Slurm job. + +Environment variables provide more details for use by the appliction, e.g. `SLURM_JOB_QPU_RESOURCES` listing the quantum resource names (comma separated if there are several provided). +These variables will be used by QRMI. +See the README files in the various QRMI flavor directories ([ibm](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/ibm/README.md), [pasqal](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/pasqal/README.md)) for details. + +```python +from qiskit import QuantumCircuit +# using an IBM QRMI flavor: +from qrmi.primitives import QRMIService +from qrmi.primitives.ibm import SamplerV2, get_target + +# define circuit + +circuit = QuantumCircuit(2) +circuit.h(0) +circuit.cx(0, 1) +circuit.measure_all() + +# instantiate QRMI service and get quantum resource (we'll take the first one should there be serveral of them) +# inject credentials needed for accessing the service at this point +load_dotenv() +service = QRMIService() + +resources = service.resources() +qrmi = resources[0] + +# Generate transpiler target from backend configuration & properties and transpile +target = get_target(qrmi) +pm = generate_preset_pass_manager( + optimization_level=1, + target=target, +) + +isa_circuit = pm.run(circuit) + +# run the circuit +options = {} +sampler = SamplerV2(qrmi, options=options) + +job = sampler.run([(isa_circuit, isa_observable, param_values)]) +print(f">>> Job ID: {job.job_id()}") + +result = job.result() +print(f">>> {result}") +``` + +See [examples directory](https://github.com/qiskit-community/qrmi/tree/main/examples/qiskit_primitives/) for example files. + +### Backend specifics +#### IBM Direct Access API +##### HPC admin scope +Configuration of Direct Access API backends (HPC admin scope) includes endpoints and credentials to the Direct Access endpoint, authentication services as well as the S3 endpoint. +Specifically, this includes: + +* IBM Cloud API key for creating bearer tokens +* endpoint of Direct Access API +* S3 bucket and access details + +Access credentials should not be visible to HPC users or other non-privileged users on the system. +Therefore, sensitive data can be put in separate files which can be access protected accordingly. + +Note that Slurm has got full access to the backend. +This has several implications: + +* the Slurm plugin is responsible for multi-tenancy (ensuring that users don't see results of other users' jobs) +* vetting of users (who is allowed to access the QPU) and ensuring according access is up to the HPC cluster side +* the capacity and priority of the QPU usage is solely managed through Slurm; there is not other scheduling of users involved outside of Slurm + +##### HPC user scope +Execution lanes are not exposed to the HPC administrator or user directly. +Instead, mid term, there can be two different modes that HPC users can specify: + +* `exclusive=true` specifies that no other jobs can use the resource at the same time. An exclusive mode job gets all execution lanes and can not run at the same time as a non-exclusive job +* `exclusive=false` allows other jobs to run in parallel. In that case, there can be as many jobs as there are execution lanes at the same time, and the job essentially only gets one lane + +#### Qiskit Runtime Service +##### HPC user scope + +It is expected, that users specify additional access details in environment variables. +Specifically, this includes + +* Qiskit Runtime service instance (CRN, Cloud Resource Name) +* Endpoint for Qiskit Runtime (unless auto-detected from the CRN) +* API key which has access to the CRN +* S3 instance, bucket and access token/credentials for data transfers + +This determines under which user and service instance the Qiskit Runtime service is used +Accordingly, IBM Quantum Platform's scheduling considers the user's and service instance's capabilities for scheduling. + +At this time, users have to provide the above details (no shared cluster-wide Quantum access). + +#### Pasqal + +#### Pasqal Cloud Services +##### HPC admin scope +There is no specific set-up required from HPC admins for PCS usage. + +##### HPC user scope +It is expected, that users specify additional access details in environment variables. +Specifically, this currently includes + +* PCS resource to target (FRESNEL, EMU_FRESNEL, EMU_MPS) +* Authorization token + +#### Pasqal on-prem devices +TBD. + +## Examples + +### Running jobs with dependencies + +FIXME: show example with 1 classical job => 1 quantum job (python pseudo code)=> 1 classical job. +Main topic: show dependencies + +### Running a job with several Slurm QPU resources + +FIXME: show example (quantum only, python, is good enough) where several backends are defined, referenced and used +Main topic: show how ids play an important role in that case + +### Running primitives directly + +FIXME: show example of qrun -- same SBATCH, but different executable. +Main topic: present qrun as an option +FIXME: define/finalize qrun at some time (parameters etc) + +### Other workflow tools + +FIXME: show how other workflow tooling could play into that diff --git a/docs/guides/slurm-plugin.mdx b/docs/guides/slurm-plugin.mdx new file mode 100644 index 00000000000..82eb0f1de35 --- /dev/null +++ b/docs/guides/slurm-plugin.mdx @@ -0,0 +1,151 @@ +Spank plugins for Slurm to support quantum resources +==================================================== + +## Content + +- [Context](#content) +- [Definitions](#definitions) + - [QPU](#qpu) + - [Quantum computer](#quantum-computer) + - [Spank plugins](#spank-plugins) + - [Spank quantum plugin](#spank-quantum-plugin) + - [Qiskit primitives (Sampler and Estimator)](#qiskit-primitives-sampler-and-estimator) +- [Vendor-Specific Context: IBM](#vendor-specific-context-ibm) +- [Vendor-Specific Definitions: IBM](#vendor-specific-definitions-ibm) + - [IBM Quantum Platform](#ibm-quantum-platform) + - [Direct Access API](#direct-access-api) +- [High Level Structure](#high-level-structure) +- [Quantum resource for workload management systems](#quantum-resource-for-workload-management-system) +- [Quantum resource API](#quantum-resource-api) +- [Integration Flow](#integration-flow) +- [High Level Flow of Quantum Plugin](#high-level-flow-of-quantum-plugin) +- [General architecture](#general-architecture-of-plugin) +- [Architectural Tenets](#architectural-tenents) + +See [UX](./ux.md) for HPC user experience, HPC developer experience and usage patterns. + +## Context + +Overview of involved components, personas and backend service options: +![context diagram](./images/context_diagram.png) + +## Definitions + +### QPU +A `QPU` includes all of the hardware responsible for accepting an executable quantum instruction set, or a quantum circuit, and returning an accurate answer. That means the QPU includes the quantum chip(s) in a superconducting quantum computer, as well as additional components such as the amplifiers, control electronics, instruments. + +### Quantum Computer +A `Quantum Computer` is comprised of the QPU and the classical compute needed to execute requests coming in through an API (its endpoint). + +### Spank plugins +`SPANK` provides a very generic interface for stackable plug-ins which may be used to dynamically modify the job launch code in Slurm. +https://slurm.schedmd.com/spank.html + +### Spank quantum plugin +A plugin in Slurm that manages the operation of quantum jobs in Slurm. It handles Slurm resources related to quantum and is configured so that jobs can execute on Quantum Computers. + +### Qiskit primitives (Sampler and Estimator) +The two most common tasks for quantum computers are sampling quantum states and calculating expectation values. These tasks motivated the design of the Qiskit primitives: `Estimator` and `Sampler`. + +- Estimator computes expectation values of observables with respect to states prepared by quantum circuits. +- Sampler samples the output register from quantum circuit execution. + +In short, the computational model introduced by the Qiskit primitives moves quantum programming one step closer to where classical programming is today, where the focus is less on the hardware details and more on the results you are trying to achieve. + +## Vendor-Specific Context: IBM + +Extension of the context overview of involved components, personas and backend service options for IBM: +![context diagram IBM](./images/context_diagram_ibm.png) + +## Vendor-Specific Definitions: IBM + +### IBM Quantum Platform +Cloud-based quantum computing service providing access to IBM's fleet of quantum backends. Sometimes abbreviated as IQP. + +### Direct Access API +Local interface to am IBM Quantum Computer. Sometimes abbreviated as DA API. Below the Direct Access API, classical preparation of jobs prior to the actual quantum execution can run in parallel (called *lanes* in the API definition). + +## Vendor-Specific Definitions: Pasqal + +### Pasqal Cloud Service +Cloud-based quantum computing service providing access to Pasqal QPU's and emulators. Sometimes abbreviated as PCS. + +### Pulser +Pasqal's native programming library [GitHub](https://github.com/pasqal-io/pulser). Supported in the low-level interfaces such as the QRMI. + +## High Level Structure + +At large, there are three domains: +* HPC users, consuming slurm resources and using access to Quantum Computers through these resources +* HPC admins, configuring slurm and managing access and mapping to available Quantum Computers +* Quantum Computer providers, offering access to Quantum/QPU resources on Quantum Computers + +![High Level Structure](./images/high_level_structure.png) + +## Quantum resource for workload management system +General GRES (custom resource) for quantum computers is QPU. +All quantum resources will have an identity and map to a Quantum Computer's quantum resource (i.e. map to a QPU). + +Additional resource definition might be needed depending on implementation from hardware vendors. Some vendors expose to parallelism within quantum computer as execution lanes, threads, parts of devices, etc. Therefore we define quantum resource as an abstract that composed out of physical device and parallelism notion. + +![resource definition](./images/resource_definition.png) + +The QPU resource definition does not expose individual parallelism abstracts. Each backend flavor can have specific qualifiers how to use backend specific capabilities (e.g. for common use case: if a user wants to exclusively use a backend, all parallel job preparation units will be available for use -- if not, several users could submit jobs and share these units. As execution lanes in DA API do not have any identities that could be managed explicitly, only quantities resp. exclusive/shared use should be user controlled). + +![resource mapping](./images/resource_mapping.png) + +## Quantum resource API +Any type of resource should implement resource control interface. Flow of working with resource following pattern: `acquire resource` → `execute` → `release resource`. Implementation of this interface might vary from platform to platform. + +![resource control api](./images/resource_control_api.png) + +## Integration Flow + +Similar to any Gres resource (GPU, AIU, etc), we treat QPU as gres and acquire it for whole duration of the job. +Primitive calls will manage the data and call towards the Quantum Computer (for most cases through Slurm to govern user access to the slurm-level quantum resource and potentially inject Quantum Computer access credentials) + +![integration Flow](./images/integration_flow.png) + +This avoids drawbacks of other options, e.g. when the user application's primitive call will create other Slurm jobs that send primitive data towards the Quantum Computer. +Having this logic of sending data towards the Quantum Computer in qiskit level code reduces complexity and latency, and avoids complexity in error handling. + +## High Level Flow of Quantum Plugin + +This is the high level flow from when Slurm jobs are started to how requests find their way to a Quantum Computer. +Requests refer to any interaction between application and Quantum Computer (e.g. including getting configuration information that is needed for transpilation). + +![High Level Flow -- png editable with draw.io, please keep it that way](./images/high-level-plugin-flow.png) + +## General architecture of plugin + +Quantum plugin will be using Spank architecture events sequence of call during job execution to inject necessary logic to work with quantum computer API. + +1. Job execution flow + 1. Prolog + 1. Handle secrets + 2. Acquire resource + 3. Create network middleware + 2. Task init + 1. Handle options + 2. Set env variables + 3. Epilog + 1. Remove middleware + 2. Release resource + +![general architecture](./images/general_architecture.png) + +## Architectural Tenents + +* A Slurm-level QPU resource maps to physical resource of a Quantum Computer + * Quantum backend selection is part of that Slurm resource definition and is considered configuration, not source code + * Qiskit-level code can refer to that Slurm resource and access/use the quantum resource behind it. Qiskit-level code should avoid naming the desired backend directly (=> it should be part of the Slurm resource definition instead) + * Slurm QPU resources have an identity (allowing to bind against it from qiskit) + * additional qualifiers of the Slurm QPU resource are backend type specific + * parallelism abstracts (such as execution lanes which are anonymous units to prepare jobs in parallel for quantum execution which is still serialized) are abstracted behind the Slurm QPU resource. Qualifiers may be used to deal with specifics (such as: are these lanes held exclusive for one user, or is there a shared access possible) +* Quantum resources are acquired/locked before usage + * as identification/selection of the quantum resource is through the Slurm resource, transpilation can only happen after that + * initially, transpilation will happen after acquiring the resource, which can can lead to slightly lower QPU utilization, as other jobs may be locked out. This may (should!) be improved in a later phase and requires an extended concept (such as first step is define the resource, which may or may not result in actions, second step is lock for execution) -- more details required at a later time! +* Primitive calls will trigger submission towards the Quantum Computer + * Flow is triggered from the qiskit-level code, without scheduling additional intermediate Slurm jobs + * The network flow can go through the Slurm plugin, to govern user access or manage access to the Quantum Computer + * The network/data flow can be specific to a backend type (using intermediate storage to hold input/output data, or sending data in-line) From 647748b3ad3aa62c1e50af8b1ba8673d368cb933 Mon Sep 17 00:00:00 2001 From: Kaelyn Ferris <43348706+kaelynj@users.noreply.github.com> Date: Tue, 25 Nov 2025 16:30:19 -0500 Subject: [PATCH 2/9] Finish draft of slurm plugin docs --- docs/guides/_toc.json | 21 +- docs/guides/qrmi.mdx | 141 ++++++++++ docs/guides/slurm-hpc-ux.mdx | 98 ++----- docs/guides/slurm-plugin.mdx | 240 ++++++++++-------- qiskit_bot.yaml | 6 + scripts/config/cspell/dictionaries/qiskit.txt | 1 + scripts/js/commands/checkPatternsIndex.ts | 3 + 7 files changed, 329 insertions(+), 181 deletions(-) create mode 100644 docs/guides/qrmi.mdx diff --git a/docs/guides/_toc.json b/docs/guides/_toc.json index 4bb0124eeb7..e0d93c484e9 100644 --- a/docs/guides/_toc.json +++ b/docs/guides/_toc.json @@ -206,7 +206,6 @@ ], "collapsible": false }, - { "title": "Qiskit Functions", "children": [ @@ -643,6 +642,26 @@ } ] }, + { + "title": "High-Performance Compute", + "children": [ + { + "title": "Quantum resource management interface (QRMI)", + "url": "/docs/guides/qrmi", + "isNew": true + }, + { + "title": "SPANK plugin for QRMI", + "url": "/docs/guides/slurm-plugin", + "isNew": true + }, + { + "title": "SPANK plugin user guide", + "url": "/docs/guides/slurm-hpc-ux", + "isNew": true + } + ] + }, { "title": "Visualization", "children": [ diff --git a/docs/guides/qrmi.mdx b/docs/guides/qrmi.mdx new file mode 100644 index 00000000000..0d76c1efcb2 --- /dev/null +++ b/docs/guides/qrmi.mdx @@ -0,0 +1,141 @@ +--- +title: Quantum resource management interface (QMRI) +description: Overview of the Quantum Resource Management Interface for integrating quantum resources to high-performance compute systems +--- +{/* cspell:ignore QRMI, stubgen, maturin, Doxyfile, rowser */} + +# Quantum resource management interface (QMRI) + +The Quantum resource management interface is a vendor-agnostic library for high-performance compute (HPC) systems to control state, run tasks, and monitor the behavior of quantum computational resources. It acts as a thin middleware layer that abstracts away the complexities associated with controlling quantum resources through a set of simple APIs. Written in Rust, this interface also exposes Python and C APIs for ease of integration into nearly any computational environment. + +The source code to build and deploy this interface can be found in this [GitHub repository](https://github.com/qiskit-community/qrmi). + +There is also an optional `task_runner` command line tool to execute quantum payloads against quantum hardware which is included in the Python package. The full documentation can be found [here](https://github.com/qiskit-community/qrmi/blob/main/python/qrmi/tools/task_runner/README.md). + +## Build the QRMI libraries + +At its core, QRMI is a set of libraries to control state for a set of quantum resources. Written in Rust with a C and Python API exposed for ease of integration to any compute infrastructure. + +This section shows how to build QRMI for C and Python. + +### Requirements + +QRMI supports the following operating systems: + +``` +AlmaLinux 9, Amazon Linux 2023, CentOS Stream 9, CentOS Stream 10, +RedHat Enterprise Linux 8, RedHat Enterprise Linux 9, +RedHat Enterprise Linux 10, Rocky Linux 8, Rocky Linux 9, SuSE 15, +Ubuntu 22.04, Ubuntu 24.04, MacOS Sequoia 15.1 or above +``` + +#### Compiling environment: +* Rust compiler 1.91 or above [Link](https://www.rust-lang.org/tools/install) +* A C compiler: for example, GCC (`gcc`) on Linux and Clang (`clang-tools-extra`) for Rust unknown targets/cross compilations. QRMI is compatible with a compiler conforming to the C11 standard. +* `make/cmake` (make/cmake RPM for RHEL compatible OS) +* `openssl` (openssl-devel RPM for RHEL compatible OS) +* `zlib` (zlib-devel RPM for RHEL compatible OS) +* Python 3.11, 3.12 or 3.13 (For Python API) + * Libraries and header files needed for Python development(python3.1x-devel RPM for RHEL compatible OS) + * /usr/include/python3.1x + * /usr/lib64/libpython3.1x.so +* Doxygen (for generating C API document), depending on the OS + * ```dnf install doxygen``` for Linux(RHEL/CentOS/Rocky Linux etc) + * ```apt install doxygen``` for Linux(Ubuntu etc.) + * ```brew install doxygen```for MacOS + +#### Runtime environment +* gcc (libgcc RPM for RHEL compatible OS) +* openssl (openssl-libs RPM for RHEL compatible OS) +* zlib (zlib RPM for RHEL compatible OS) +* Python 3.11, 3.12 or 3.13 (For Python API) + * Libraries and header files needed for Python development(python3.1x-devel RPM for RHEL compatible OS) + +--- + +Build the Rust/C API library with the following commands wherever you have saved the QRMI repository. +```shell-session +. ~/.cargo/env +cargo clean +cargo build --release +``` + + + +Then to build the Python package, first set up a python environment and install the required dependencies. +```shell-session +. ~/.cargo/env +cargo clean +python3.12 -m venv ~/py312_qrmi_venv +source ~/py312_qrmi_venv/bin/activate +pip install --upgrade pip +pip install -r requirements-dev.txt +``` + +Create the stub files for the python code. +```shell-session +. ~/.cargo/env +cargo run --bin stubgen --features=pyo3 +``` + +And lastly, build the python wheels for distribution to your hosts. +```shell-session +source ~/py312_qrmi_venv/bin/activate +CARGO_TARGET_DIR=./target/release/maturin maturin build --release +``` + +The wheel is created in the `./target/release/maturin/wheels` directory. You can distribute and install on your hosts by `pip install `. + + + +## Logging + +QRMI supports [log crate](https://crates.io/crates/log) for logging. You can find the detailed QRMI runtime logs by specifying `RUST_LOG` environment variable with log level. Supported levels are `error`, `warn`, `info`, `debug` and `trace`. Default level is `warn`. + +If you specify `trace`, you can find underlying HTTP transaction logs. + + +```shell-session +RUST_LOG=trace +``` + +Example logs: +```shell-session +[2025-08-16T03:47:38Z DEBUG request::connect] starting new connection: https://iam.cloud.ibm.com/ +[2025-08-16T03:47:38Z DEBUG direct_access_api::middleware::auth] current token ... +``` + + +## Build the API documentation + +The Rust API documentation can be created by running +```shell-session +. ~/.cargo/env +cargo doc --no-deps --open +``` + +And the C API documentation can be created using doxygen +```shell-session +doxygen Doxyfile +``` + +This will create an HTML document under the `./html` directory which you can open in a web browser. + + +The Python API documentation is generated with `pydoc`. After entering the virtual environment with the QRMI packaged installed, run the following commands: +```shell-session +python -m pydoc -p 8290 +Server ready at http://localhost:8290/ +Server commands: [b]rowser, [q]uit +server> b +``` + +Then open the following page in your browser +```shell-session +http://localhost:8290/qrmi.html +``` + +and stop the server with +```shell-session +server> q +``` \ No newline at end of file diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx index 5e8f0b2ec12..52ceb8438a7 100644 --- a/docs/guides/slurm-hpc-ux.mdx +++ b/docs/guides/slurm-hpc-ux.mdx @@ -1,45 +1,27 @@ -HPC user experience, HPC developer experience and usage patterns -================================================================ +--- +title: SPANK plugin user guide +description: User guide for the quantum resource management SPANK plugin +--- +{/* cspell:ignore QRMI, SBATCH, srun, Pasqal, slurmd, Doxyfile, Gres */} -## Content +# SPANK plugin user guide -- [Principles](#principles) -- [Connecting physical resources to Slurm resoures and how to use them](#connecting-physical-resources-to-slurm-resources-and-how-to-use-them) - - [HPC admin scope](#hpc-admin-scope) - - [HPC user scope](#hpc-user-scope) - - [HPC application scope](#hpc-application-scope) - - [Backend specifics](#backend-specifics) - - [IBM Direct Access API](#ibm-direct-access-api) - - [Qiskit Runtime Service](#qiskit-runtime-service) -- [Examples](#examples) - - [Running jobs with dependencies](#running-jobs-with-dependencies) - - [Running a job with several Slurm QPU resources](#running-a-job-with-several-slurm-qpu-resources) - - [Running primitives directly](#running-primitives-directly) - - [Other workflow tools](#other-workflow-tools) +Slurm QPU resource definitions determine what physical resources can be used by Slurm jobs in high-performance compute (HPC) environments. User source code should be agnostic to specific backend instances and even backend types as far as possible. This keeps source code portable while the QPU selection criteria are part of the resource definition (which is considered configuration as opposed to source code). -See [Overview](./overview.md) for a glossary of terms. +## Configuring QPU resources in job creation -## Principles + +Note that this plugin is under active development and the exact syntax is subject to change. + -Slurm QPU resource definitions determine what physical resources can be used by Slurm jobs. -User source code should be agnostic to specific backend instances and even backend types as far as possible. -This keeps source code portable while the QPU selection criteria are part of the resource definition (which is considered configuration as opposed to source code). -The source code does not have to take care resp. is not involved in resource reservation handling (that is done when Slurm jobs are assigned QPU resources and start running, if applicable on the backend) or execution modes like sessions (these are automatically in place while the job is running, if applicable on the backend). -This makes the source code more portable between similar QPU resource types through different backend access methods (such as IBM's Direct Access API and IBM's Qiskit Runtime service through IBM Quantum Platform). -All backend types (such as IBM's Direct Access API, IBM's Qiskit Runtime service, or Pasqal's backends) follow these principles. +### Administrator scope -## Connecting physical resources to Slurm resources and how to use them +HPC administrators configure the SPANK plugin, i.e. what physical resources can be provided to Slurm jobs. +This configuration contains all the information needed to have Slurm jobs access the physical resources, such as endpoints, and access credentials. -Note the exact syntax is subject to change -- this is a sketch of the UX at this time. +You can read over the [`qrmi_config.json.example`](https://github.com/qiskit-community/spank-plugins/blob/main/plugins/spank_qrmi/qrmi_config.json.example) for a comprehensive example configuration. -### HPC admin scope - -HPC administrators configure the SPANK plugin, what physical resources can be provided to Slurm jobs. -This configuration contains all the information needed to have Slurm jobs access the physical resources, such as endpoints, and access credentials -- note some parts of the configuration such as credentials can be sensitive information. - -See the file [qrmi_config.json.example](../plugins/spank_qrmi/qrmi_config.json.example) for a comprehensive example showing. - -In `slurm.conf`, qpu generic resources can be assigned to some or all nodes for usage: +In `slurm.conf`, qpu resources can be assigned to some or all nodes for usage: ``` ... GresTypes=qpu,name @@ -47,15 +29,13 @@ NodeName=node[1-5000] Gres=qpu,name:ibm_fez ... ``` -### HPC user scope +### User scope -HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. -The name attribute references what the HPC administrator has defined. -Mid-term, backend selection can be based on criteria other than a predefined name which refers to a specific backend (e.g. by capacity and error rate qualifiers which help downselect between the defined set of backends). +HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. The name attribute references what the HPC administrator has defined. Mid-term, backend selection can be based on criteria other than a predefined name which refers to a specific backend (e.g. by capacity and error rate qualifiers which help downselect between the defined set of backends). There might be additional environment variables required, depending on the backend type. -SBATCH parameters will point to one or more QPU resource assigned to the application as generic resources. +`SBATCH` parameters will point to one or more QPU resource assigned to the application as generic resources. Environment variables provided through the plugin will provide the necessary information to the application (see the [HPC application scope](#hpc-application-scope) section for details). ```shell @@ -84,9 +64,8 @@ srun ... HPC applications use the Slurm QPU resources assigned to the Slurm job. -Environment variables provide more details for use by the appliction, e.g. `SLURM_JOB_QPU_RESOURCES` listing the quantum resource names (comma separated if there are several provided). -These variables will be used by QRMI. -See the README files in the various QRMI flavor directories ([ibm](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/ibm/README.md), [pasqal](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/pasqal/README.md)) for details. +Environment variables provide more details for use by the application, e.g. `SLURM_JOB_QPU_RESOURCES` listing the quantum resource names (comma separated if there are several provided). +These variables will be used by QRMI. (See the README files in the various QRMI directories ([ibm](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/ibm/README.md), [pasqal](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/pasqal/README.md)) for more details.) ```python from qiskit import QuantumCircuit @@ -129,11 +108,11 @@ result = job.result() print(f">>> {result}") ``` -See [examples directory](https://github.com/qiskit-community/qrmi/tree/main/examples/qiskit_primitives/) for example files. +See the [examples directory](https://github.com/qiskit-community/qrmi/tree/main/examples/qiskit_primitives/) for example files. ### Backend specifics #### IBM Direct Access API -##### HPC admin scope +##### Administrator scope Configuration of Direct Access API backends (HPC admin scope) includes endpoints and credentials to the Direct Access endpoint, authentication services as well as the S3 endpoint. Specifically, this includes: @@ -151,7 +130,7 @@ This has several implications: * vetting of users (who is allowed to access the QPU) and ensuring according access is up to the HPC cluster side * the capacity and priority of the QPU usage is solely managed through Slurm; there is not other scheduling of users involved outside of Slurm -##### HPC user scope +##### User scope Execution lanes are not exposed to the HPC administrator or user directly. Instead, mid term, there can be two different modes that HPC users can specify: @@ -159,7 +138,7 @@ Instead, mid term, there can be two different modes that HPC users can specify: * `exclusive=false` allows other jobs to run in parallel. In that case, there can be as many jobs as there are execution lanes at the same time, and the job essentially only gets one lane #### Qiskit Runtime Service -##### HPC user scope +##### User scope It is expected, that users specify additional access details in environment variables. Specifically, this includes @@ -174,8 +153,6 @@ Accordingly, IBM Quantum Platform's scheduling considers the user's and service At this time, users have to provide the above details (no shared cluster-wide Quantum access). -#### Pasqal - #### Pasqal Cloud Services ##### HPC admin scope There is no specific set-up required from HPC admins for PCS usage. @@ -186,28 +163,3 @@ Specifically, this currently includes * PCS resource to target (FRESNEL, EMU_FRESNEL, EMU_MPS) * Authorization token - -#### Pasqal on-prem devices -TBD. - -## Examples - -### Running jobs with dependencies - -FIXME: show example with 1 classical job => 1 quantum job (python pseudo code)=> 1 classical job. -Main topic: show dependencies - -### Running a job with several Slurm QPU resources - -FIXME: show example (quantum only, python, is good enough) where several backends are defined, referenced and used -Main topic: show how ids play an important role in that case - -### Running primitives directly - -FIXME: show example of qrun -- same SBATCH, but different executable. -Main topic: present qrun as an option -FIXME: define/finalize qrun at some time (parameters etc) - -### Other workflow tools - -FIXME: show how other workflow tooling could play into that diff --git a/docs/guides/slurm-plugin.mdx b/docs/guides/slurm-plugin.mdx index 82eb0f1de35..ac0f5d59124 100644 --- a/docs/guides/slurm-plugin.mdx +++ b/docs/guides/slurm-plugin.mdx @@ -1,151 +1,177 @@ -Spank plugins for Slurm to support quantum resources -==================================================== +--- +title: SPANK plugin for QRMI +description: Overview of the SPANK plugin for quantum resource management in HPC systems. +--- +{/* cspell:ignore QRMI, SBATCH, srun, Pasqal, slurmd, Doxyfile, Gres, ntasks, fffffffb */} -## Content +# SPANK plugin for QRMI -- [Context](#content) -- [Definitions](#definitions) - - [QPU](#qpu) - - [Quantum computer](#quantum-computer) - - [Spank plugins](#spank-plugins) - - [Spank quantum plugin](#spank-quantum-plugin) - - [Qiskit primitives (Sampler and Estimator)](#qiskit-primitives-sampler-and-estimator) -- [Vendor-Specific Context: IBM](#vendor-specific-context-ibm) -- [Vendor-Specific Definitions: IBM](#vendor-specific-definitions-ibm) - - [IBM Quantum Platform](#ibm-quantum-platform) - - [Direct Access API](#direct-access-api) -- [High Level Structure](#high-level-structure) -- [Quantum resource for workload management systems](#quantum-resource-for-workload-management-system) -- [Quantum resource API](#quantum-resource-api) -- [Integration Flow](#integration-flow) -- [High Level Flow of Quantum Plugin](#high-level-flow-of-quantum-plugin) -- [General architecture](#general-architecture-of-plugin) -- [Architectural Tenets](#architectural-tenents) +The [SPANK plugin](https://github.com/qiskit-community/spank-plugins/) for the [Quantum Resource Management Interface (QRMI)](./qrmi) is used to configure access to quantum resources from user jobs in a compute environment administrated by the Slurm workload manager. It handles the acquisition and release of access to quantum resources and sets the necessary environment variables for executing quantum workloads. The available quantum resources are specified in a `qrmi_config.json` file, which is managed by an administrator. -See [UX](./ux.md) for HPC user experience, HPC developer experience and usage patterns. +Once installed, this plugin registers the following option. A Slurm user can specify which quantum resources are used for the Slurm job script. -## Context +```bash +--qpu=names Comma separated list of QPU resources to use. +``` -Overview of involved components, personas and backend service options: -![context diagram](./images/context_diagram.png) +For example, +```bash +#!/bin/bash -## Definitions +#SBATCH --job-name=sampler_job +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=1 +#SBATCH --qpu=ibm_quebec,ibm_sherbrooke -### QPU -A `QPU` includes all of the hardware responsible for accepting an executable quantum instruction set, or a quantum circuit, and returning an accurate answer. That means the QPU includes the quantum chip(s) in a superconducting quantum computer, as well as additional components such as the amplifiers, control electronics, instruments. +# Your script goes here... +``` -### Quantum Computer -A `Quantum Computer` is comprised of the QPU and the classical compute needed to execute requests coming in through an API (its endpoint). -### Spank plugins -`SPANK` provides a very generic interface for stackable plug-ins which may be used to dynamically modify the job launch code in Slurm. -https://slurm.schedmd.com/spank.html +## Requirements and configuration -### Spank quantum plugin -A plugin in Slurm that manages the operation of quantum jobs in Slurm. It handles Slurm resources related to quantum and is configured so that jobs can execute on Quantum Computers. +The following tools are required for the compiling environment: +* Rust compiler 1.86 or above [Link](https://www.rust-lang.org/tools/install) +* A C compiler: for example, GCC(gcc) on Linux and Clang(clang-tools-extra) for Rust unknown targets/cross compilations. QRMI and its Spank plugin are compatible with a compiler conforming to the C11 standard. +* make/cmake (make/cmake RPM for RHEL compatible OS +* openssl (openssl-devel RPM for RHEL compatible OS) +* zlib (zlib-devel RPM for RHEL compatible OS) +* Slurm header files(slurm/slurm.h etc.) must be available on your host -### Qiskit primitives (Sampler and Estimator) -The two most common tasks for quantum computers are sampling quantum states and calculating expectation values. These tasks motivated the design of the Qiskit primitives: `Estimator` and `Sampler`. -- Estimator computes expectation values of observables with respect to states prepared by quantum circuits. -- Sampler samples the output register from quantum circuit execution. +The runtime environment requires: +* gcc (libgcc RPM for RHEL compatible OS) +* openssl (openssl-libs RPM for RHEL compatible OS) +* zlib (zlib RPM for RHEL compatible OS) -In short, the computational model introduced by the Qiskit primitives moves quantum programming one step closer to where classical programming is today, where the focus is less on the hardware details and more on the results you are trying to achieve. +### Configure available Quantum Resources -## Vendor-Specific Context: IBM +A [`qrmi_config.json.example`](https://github.com/qiskit-community/spank-plugins/blob/main/plugins/spank_qrmi/qrmi_config.json.example) is provided in the repository as an example configuration. -Extension of the context overview of involved components, personas and backend service options for IBM: -![context diagram IBM](./images/context_diagram_ibm.png) +The `resources` array contains a set of available Quantum Resources which can be used by Slurm users in the jobs. Each Quantum Resource definition contains: -## Vendor-Specific Definitions: IBM +| Property | Description | +| ---- | ---- | +| name | Quantum resource name. e.g. Quantum backend name. | +| type | Resource type (`direct-access`, `qiskit-runtime-service` and `pasqal-cloud`) | +| environment | A set of environment variables to work with QRMI. Current implementations assume API endpoint and credentials are specified via environment variable setting. | -### IBM Quantum Platform -Cloud-based quantum computing service providing access to IBM's fleet of quantum backends. Sometimes abbreviated as IQP. +If a user specifies a resource with the `--qpu` option that is not defined in the `qrmi_config.json` file, the specification will be ignored. -### Direct Access API -Local interface to am IBM Quantum Computer. Sometimes abbreviated as DA API. Below the Direct Access API, classical preparation of jobs prior to the actual quantum execution can run in parallel (called *lanes* in the API definition). +If the user sets the necessary environment variables for job execution themselves, it is not required to specify them in this file. In this case, the environment property will be `{}`. -## Vendor-Specific Definitions: Pasqal + +If you are using a QPU resource with the resource type `qiskit-runtime-service`, use an account that supports [opening a session](https://quantum.cloud.ibm.com/docs/en/guides/run-jobs-session#open-a-session), such as a Premium plan. -### Pasqal Cloud Service -Cloud-based quantum computing service providing access to Pasqal QPU's and emulators. Sometimes abbreviated as PCS. +If you are using an account that does not support opening a session, such as an Open plan account, add `QRMI_IBM_QRS_SESSION_MODE="batch"` to the environment variable list in qrmi_config.json as workaround. + -### Pulser -Pasqal's native programming library [GitHub](https://github.com/pasqal-io/pulser). Supported in the low-level interfaces such as the QRMI. +## Installation -## High Level Structure +Run the build using `make` and `cmake` +```shell-session +. ~/.cargo/env +mkdir build +cd build +cmake .. +make +``` -At large, there are three domains: -* HPC users, consuming slurm resources and using access to Quantum Computers through these resources -* HPC admins, configuring slurm and managing access and mapping to available Quantum Computers -* Quantum Computer providers, offering access to Quantum/QPU resources on Quantum Computers +By default, the `CMakeLists.txt` file expects the Slurm header file (`slurm.h`) to be located in `/usr/include/slurm`, but this can be customized as shown below. -![High Level Structure](./images/high_level_structure.png) +```shell-session +SLURM_INCLUDE_DIRS= cmake .. +``` -## Quantum resource for workload management system -General GRES (custom resource) for quantum computers is QPU. -All quantum resources will have an identity and map to a Quantum Computer's quantum resource (i.e. map to a QPU). -Additional resource definition might be needed depending on implementation from hardware vendors. Some vendors expose to parallelism within quantum computer as execution lanes, threads, parts of devices, etc. Therefore we define quantum resource as an abstract that composed out of physical device and parallelism notion. +If the above build step is successful, a Linux shared library named `spank_qrmi.so` will be created under the `build/` directory. -![resource definition](./images/resource_definition.png) +In addition, add the following 1 line to the `/etc/slurm/plugstack.conf` on the nodes where this plugin is installed. -The QPU resource definition does not expose individual parallelism abstracts. Each backend flavor can have specific qualifiers how to use backend specific capabilities (e.g. for common use case: if a user wants to exclusively use a backend, all parallel job preparation units will be available for use -- if not, several users could submit jobs and share these units. As execution lanes in DA API do not have any identities that could be managed explicitly, only quantities resp. exclusive/shared use should be user controlled). +Note that administrator needs to create `qrmi_config.json` file and specify the path as plugin argument like below. -![resource mapping](./images/resource_mapping.png) +```bash +optional /usr/lib64/slurm/spank_qrmi.so /etc/slurm/qrmi_config.json +``` -## Quantum resource API -Any type of resource should implement resource control interface. Flow of working with resource following pattern: `acquire resource` → `execute` → `release resource`. Implementation of this interface might vary from platform to platform. + +There are optional arguments available. It allows you to add environment variables to the Slurm process where the SPANK plugin is loaded. The format for specifying environment variables is defined as follows. +```bash +--env:{variable name}={value} +``` +For example, when interacting with Quantum resources via an HTTP proxy, the environment variables `http_proxy`, `https_proxy`, and `no_proxy` are required. These can be added as shown below. +```bash +optional /usr/lib64/slurm/spank_qrmi.so /etc/slurm/qrmi_config.json --env:http_proxy=http://192.168.1.128:3128 --env:https_proxy=http://192.168.1.128:3128 +``` + +For allocator node, your don't need to specify the path to qrmi_config.json like below. -![resource control api](./images/resource_control_api.png) +```bash +optional /usr/lib64/slurm/spank_qrmi.so +``` -## Integration Flow -Similar to any Gres resource (GPU, AIU, etc), we treat QPU as gres and acquire it for whole duration of the job. -Primitive calls will manage the data and call towards the Quantum Computer (for most cases through Slurm to govern user access to the slurm-level quantum resource and potentially inject Quantum Computer access credentials) +The following table shows which Slurm context nodes these files should be copied to. For more details of each context, refer to the [SPANK Plugin documentation](https://slurm.schedmd.com/spank.html#SECTION_SPANK-PLUGINS) +| Files | Slurm contexts | +| ------- | ------- | +| `plugstack.conf` | **local**, **remote**, **allocator**, **slurmd** and **job_script**. | +| `qrmi_config.json` | **remote** (Compute nodes) | +| `spank_qrmi.so` | **allocator** and **remote** (Login nodes and Compute nodes) | -![integration Flow](./images/integration_flow.png) -This avoids drawbacks of other options, e.g. when the user application's primitive call will create other Slurm jobs that send primitive data towards the Quantum Computer. -Having this logic of sending data towards the Quantum Computer in qiskit level code reduces complexity and latency, and avoids complexity in error handling. + +Once `plugstack.conf` is updated, spank plugins will be loaded at runtime during the next job launch, which means administrators do not need to restart Slurm cluster. + -## High Level Flow of Quantum Plugin +Once you complete installation, you should find `--qpu=names` option in the sbatch help message. -This is the high level flow from when Slurm jobs are started to how requests find their way to a Quantum Computer. -Requests refer to any interaction between application and Quantum Computer (e.g. including getting configuration information that is needed for transpilation). +```bash +Options provided by plugins: + --qpu=names Comma separated list of QPU resources to use. +``` -![High Level Flow -- png editable with draw.io, please keep it that way](./images/high-level-plugin-flow.png) +## Logging -## General architecture of plugin +This plugin uses Slurm logger for logging. Log messages from this plugin can be found in `/var/log/slurm/slurmd.log`, etc. -Quantum plugin will be using Spank architecture events sequence of call during job execution to inject necessary logic to work with quantum computer API. +```bash +[2025-07-31T09:43:34.019] [21.batch] debug: spank: /etc/slurm/plugstack.conf:1: Loaded plugin spank_qrmi.so +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c(6582, 0): -> slurm_spank_init argc=1 remote=1 +[2025-07-31T09:43:34.019] [21.batch] debug: SPANK: appending plugin option "qpu" +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c(6582,0): <- slurm_spank_init rc=0 +[2025-07-31T09:43:34.019] [21.batch] debug2: spank: spank_qrmi.so: init = 0 +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c: --qpu=[ibm_sherbrooke,ibm_torino] +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c(6582, 0): -> slurm_spank_init_post_opt argc=1 remote=1 +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c, fffffffb +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c: argv[0] = [/etc/slurm/qrmi_config.json] +[2025-07-31T09:43:34.020] [21.batch] debug: spank_qrmi_c: name(ibm_sherbrooke), type(1) found in qrmi_config +``` -1. Job execution flow - 1. Prolog - 1. Handle secrets - 2. Acquire resource - 3. Create network middleware - 2. Task init - 1. Handle options - 2. Set env variables - 3. Epilog - 1. Remove middleware - 2. Release resource +You can enable QRMI runtime log by specifying the following `srun` arguments. -![general architecture](./images/general_architecture.png) +| sbatch/srun option | Slurm log level (SRUN_DEBUG) | QRMI log level (RUST_LOG) | +| ---- | ---- | ---- | +| (default) | 3 | info | +| `--quiet` | 2 | error | +| `--verbose` | 4 | debug | +| `-vv` or more | 5 | trace | -## Architectural Tenents -* A Slurm-level QPU resource maps to physical resource of a Quantum Computer - * Quantum backend selection is part of that Slurm resource definition and is considered configuration, not source code - * Qiskit-level code can refer to that Slurm resource and access/use the quantum resource behind it. Qiskit-level code should avoid naming the desired backend directly (=> it should be part of the Slurm resource definition instead) - * Slurm QPU resources have an identity (allowing to bind against it from qiskit) - * additional qualifiers of the Slurm QPU resource are backend type specific - * parallelism abstracts (such as execution lanes which are anonymous units to prepare jobs in parallel for quantum execution which is still serialized) are abstracted behind the Slurm QPU resource. Qualifiers may be used to deal with specifics (such as: are these lanes held exclusive for one user, or is there a shared access possible) -* Quantum resources are acquired/locked before usage - * as identification/selection of the quantum resource is through the Slurm resource, transpilation can only happen after that - * initially, transpilation will happen after acquiring the resource, which can can lead to slightly lower QPU utilization, as other jobs may be locked out. This may (should!) be improved in a later phase and requires an extended concept (such as first step is define the resource, which may or may not result in actions, second step is lock for execution) -- more details required at a later time! -* Primitive calls will trigger submission towards the Quantum Computer - * Flow is triggered from the qiskit-level code, without scheduling additional intermediate Slurm jobs - * The network flow can go through the Slurm plugin, to govern user access or manage access to the Quantum Computer - * The network/data flow can be specific to a backend type (using intermediate storage to hold input/output data, or sending data in-line) +## Considerations for multiple QPUs + +At runtime, each QRMI instance is linked to a single QPU resource. To enable the use of multiple Quantum resources within a single job script, this plugin sets environment variables with the resource name as a prefix. For example, if `--qpu=qpu1,qpu2` is specified, the environment variables will be set as follows: + +```bash +qpu1_QRMI_IBM_DA_ENDPOINT=http://test1 +qpu2_QRMI_IBM_DA_ENDPOINT=http://test2 +``` + +This ensures that each QRMI instance operates with the configuration parameters set for its respective resource during the execution of the Slurm job. + +The above environment variable settings are applied only to jobs where the `--qpu=names` option is specified. + +This plugin also set the following 2 environment variables which will be referred by QRMI primitives code. + +| Environment variable | Description | +| ---- | ---- | +| `SLURM_JOB_QPU_RESOURCES` | Comma separated list of QPU resources to use at runtime. Undocumented resources will be filtered out. For example, `qpu1,qpu2`. | +| `SLURM_JOB_QPU_TYPES` | Comma separated list of Resource type (`direct-access`, `qiskit-runtime-service` and `pasqal-cloud`). For example, `direct-access,pasqal-cloud` | diff --git a/qiskit_bot.yaml b/qiskit_bot.yaml index 079b8a66dfd..bd1a1a6d0f1 100644 --- a/qiskit_bot.yaml +++ b/qiskit_bot.yaml @@ -535,6 +535,12 @@ notifications: "docs/guides/open-source": - "@beckykd" - "@abbycross" + "docs/guides/slurm-hpc-ux": + - "@kaelynj" + "docs/guides/slurm-plugin": + - "@kaelynj" + "docs/guides/qrmi": + - "@kaelynj" "docs/tutorials/index": - "@miamico" - "@annaliese-estes" diff --git a/scripts/config/cspell/dictionaries/qiskit.txt b/scripts/config/cspell/dictionaries/qiskit.txt index 2a5b375b3a1..d1eec649983 100644 --- a/scripts/config/cspell/dictionaries/qiskit.txt +++ b/scripts/config/cspell/dictionaries/qiskit.txt @@ -88,6 +88,7 @@ QKNAME QNN QNNs QPU's +QRMI QRTE QSCI QUBO diff --git a/scripts/js/commands/checkPatternsIndex.ts b/scripts/js/commands/checkPatternsIndex.ts index 9e63674c48c..6e10e9123fb 100644 --- a/scripts/js/commands/checkPatternsIndex.ts +++ b/scripts/js/commands/checkPatternsIndex.ts @@ -32,6 +32,9 @@ const ALLOWLIST_MISSING_FROM_INDEX: Set = new Set([ "/docs/guides/faq", "/docs/guides/execution-modes-faq", "/docs/guides/open-source", + "/docs/guides/qrmi", + "/docs/guides/slurm-plugin", + "/docs/guides/slurm-hpc-ux", ]); // URLs that show up in the INDEX_PAGES, but are not in the left ToC under From f1c4e7a0acff816a4a344b08ae8cd1ba5d44c549 Mon Sep 17 00:00:00 2001 From: abbycross Date: Tue, 25 Nov 2025 16:50:46 -0500 Subject: [PATCH 3/9] tiny copyedits --- docs/guides/qrmi.mdx | 48 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/guides/qrmi.mdx b/docs/guides/qrmi.mdx index 0d76c1efcb2..63bdf8e9406 100644 --- a/docs/guides/qrmi.mdx +++ b/docs/guides/qrmi.mdx @@ -6,11 +6,11 @@ description: Overview of the Quantum Resource Management Interface for integrati # Quantum resource management interface (QMRI) -The Quantum resource management interface is a vendor-agnostic library for high-performance compute (HPC) systems to control state, run tasks, and monitor the behavior of quantum computational resources. It acts as a thin middleware layer that abstracts away the complexities associated with controlling quantum resources through a set of simple APIs. Written in Rust, this interface also exposes Python and C APIs for ease of integration into nearly any computational environment. +The Quantum resource management interface (QRMI) is a vendor-agnostic library for high-performance compute (HPC) systems to control state, run tasks, and monitor the behavior of quantum computational resources. It acts as a thin middleware layer that abstracts away the complexities associated with controlling quantum resources through a set of simple APIs. Written in Rust, this interface also exposes Python and C APIs for ease of integration into nearly any computational environment. -The source code to build and deploy this interface can be found in this [GitHub repository](https://github.com/qiskit-community/qrmi). +Find the source code to build and deploy QMRI in this [GitHub repository](https://github.com/qiskit-community/qrmi). -There is also an optional `task_runner` command line tool to execute quantum payloads against quantum hardware which is included in the Python package. The full documentation can be found [here](https://github.com/qiskit-community/qrmi/blob/main/python/qrmi/tools/task_runner/README.md). +An optional `task_runner` command line tool to execute quantum payloads against quantum hardware is included in the Python package. Find the [full documentation](https://github.com/qiskit-community/qrmi/blob/main/python/qrmi/tools/task_runner/README.md) in the GitHub repository. ## Build the QRMI libraries @@ -29,27 +29,27 @@ RedHat Enterprise Linux 10, Rocky Linux 8, Rocky Linux 9, SuSE 15, Ubuntu 22.04, Ubuntu 24.04, MacOS Sequoia 15.1 or above ``` -#### Compiling environment: +#### Compiling environment * Rust compiler 1.91 or above [Link](https://www.rust-lang.org/tools/install) * A C compiler: for example, GCC (`gcc`) on Linux and Clang (`clang-tools-extra`) for Rust unknown targets/cross compilations. QRMI is compatible with a compiler conforming to the C11 standard. -* `make/cmake` (make/cmake RPM for RHEL compatible OS) -* `openssl` (openssl-devel RPM for RHEL compatible OS) -* `zlib` (zlib-devel RPM for RHEL compatible OS) -* Python 3.11, 3.12 or 3.13 (For Python API) - * Libraries and header files needed for Python development(python3.1x-devel RPM for RHEL compatible OS) +* `make/cmake` (make/cmake RPM for RHEL-compatible OS) +* `openssl` (openssl-devel RPM for RHEL-compatible OS) +* `zlib` (zlib-devel RPM for RHEL-compatible OS) +* Python 3.11, 3.12, or 3.13 (For Python API) + * Libraries and header files needed for Python development (python3.1x-devel RPM for RHEL-compatible OS): * /usr/include/python3.1x * /usr/lib64/libpython3.1x.so -* Doxygen (for generating C API document), depending on the OS - * ```dnf install doxygen``` for Linux(RHEL/CentOS/Rocky Linux etc) +* Doxygen (for generating C API document), depending on the OS: + * ```dnf install doxygen``` for Linux(RHEL/CentOS/Rocky Linux etc.) * ```apt install doxygen``` for Linux(Ubuntu etc.) * ```brew install doxygen```for MacOS #### Runtime environment -* gcc (libgcc RPM for RHEL compatible OS) -* openssl (openssl-libs RPM for RHEL compatible OS) -* zlib (zlib RPM for RHEL compatible OS) -* Python 3.11, 3.12 or 3.13 (For Python API) - * Libraries and header files needed for Python development(python3.1x-devel RPM for RHEL compatible OS) +* gcc (libgcc RPM for RHEL-compatible OS) +* openssl (openssl-libs RPM for RHEL-compatible OS) +* zlib (zlib RPM for RHEL-compatible OS) +* Python 3.11, 3.12, or 3.13 (For Python API) + * Libraries and header files needed for Python development (python3.1x-devel RPM for RHEL-compatible OS) --- @@ -62,7 +62,7 @@ cargo build --release -Then to build the Python package, first set up a python environment and install the required dependencies. +To build the Python package, first set up a Python environment and install the required dependencies. ```shell-session . ~/.cargo/env cargo clean @@ -72,13 +72,13 @@ pip install --upgrade pip pip install -r requirements-dev.txt ``` -Create the stub files for the python code. +Create the stub files for the Python code. ```shell-session . ~/.cargo/env cargo run --bin stubgen --features=pyo3 ``` -And lastly, build the python wheels for distribution to your hosts. +Lastly, build the Python wheels for distribution to your hosts. ```shell-session source ~/py312_qrmi_venv/bin/activate CARGO_TARGET_DIR=./target/release/maturin maturin build --release @@ -90,7 +90,7 @@ The wheel is created in the `./target/release/maturin/wheels` directory. You can ## Logging -QRMI supports [log crate](https://crates.io/crates/log) for logging. You can find the detailed QRMI runtime logs by specifying `RUST_LOG` environment variable with log level. Supported levels are `error`, `warn`, `info`, `debug` and `trace`. Default level is `warn`. +QRMI supports [log crate](https://crates.io/crates/log) for logging. You can find the detailed QRMI runtime logs by specifying `RUST_LOG` environment variable with the log level. Supported levels are `error`, `warn`, `info`, `debug` and `trace`. The default level is `warn`. If you specify `trace`, you can find underlying HTTP transaction logs. @@ -114,12 +114,12 @@ The Rust API documentation can be created by running cargo doc --no-deps --open ``` -And the C API documentation can be created using doxygen +The C API documentation can be created by using doxygen: ```shell-session doxygen Doxyfile ``` -This will create an HTML document under the `./html` directory which you can open in a web browser. +This will create an HTML document under the `./html` directory, which you can open in a web browser. The Python API documentation is generated with `pydoc`. After entering the virtual environment with the QRMI packaged installed, run the following commands: @@ -130,12 +130,12 @@ Server commands: [b]rowser, [q]uit server> b ``` -Then open the following page in your browser +Then, open the following page in your browser: ```shell-session http://localhost:8290/qrmi.html ``` -and stop the server with +Stop the server with ```shell-session server> q ``` \ No newline at end of file From 6fadee18473a2e823fad2cdf02c88378973db792 Mon Sep 17 00:00:00 2001 From: abbycross Date: Tue, 25 Nov 2025 17:05:57 -0500 Subject: [PATCH 4/9] Apply suggestions from code review --- docs/guides/slurm-hpc-ux.mdx | 42 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx index 52ceb8438a7..b7fdb9e5748 100644 --- a/docs/guides/slurm-hpc-ux.mdx +++ b/docs/guides/slurm-hpc-ux.mdx @@ -8,7 +8,7 @@ description: User guide for the quantum resource management SPANK plugin Slurm QPU resource definitions determine what physical resources can be used by Slurm jobs in high-performance compute (HPC) environments. User source code should be agnostic to specific backend instances and even backend types as far as possible. This keeps source code portable while the QPU selection criteria are part of the resource definition (which is considered configuration as opposed to source code). -## Configuring QPU resources in job creation +## Configure QPU resources in job creation Note that this plugin is under active development and the exact syntax is subject to change. @@ -17,11 +17,11 @@ Note that this plugin is under active development and the exact syntax is subjec ### Administrator scope HPC administrators configure the SPANK plugin, i.e. what physical resources can be provided to Slurm jobs. -This configuration contains all the information needed to have Slurm jobs access the physical resources, such as endpoints, and access credentials. +This configuration contains all the information needed to have Slurm jobs access the physical resources, such as endpoints and access credentials. -You can read over the [`qrmi_config.json.example`](https://github.com/qiskit-community/spank-plugins/blob/main/plugins/spank_qrmi/qrmi_config.json.example) for a comprehensive example configuration. +Read the [`qrmi_config.json.example`](https://github.com/qiskit-community/spank-plugins/blob/main/plugins/spank_qrmi/qrmi_config.json.example) for a comprehensive example configuration. -In `slurm.conf`, qpu resources can be assigned to some or all nodes for usage: +In `slurm.conf`, QPU resources can be assigned to some or all nodes for usage: ``` ... GresTypes=qpu,name @@ -31,11 +31,11 @@ NodeName=node[1-5000] Gres=qpu,name:ibm_fez ### User scope -HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. The name attribute references what the HPC administrator has defined. Mid-term, backend selection can be based on criteria other than a predefined name which refers to a specific backend (e.g. by capacity and error rate qualifiers which help downselect between the defined set of backends). +HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. The name attribute references what the HPC administrator has defined. Mid-term, backend selection can be based on criteria other than a predefined name referring to a specific backend (for example, by capacity and error rate qualifiers, to help down-select among the defined set of backends). There might be additional environment variables required, depending on the backend type. -`SBATCH` parameters will point to one or more QPU resource assigned to the application as generic resources. +`SBATCH` parameters will point to one or more QPU resources assigned to the application as generic resources. Environment variables provided through the plugin will provide the necessary information to the application (see the [HPC application scope](#hpc-application-scope) section for details). ```shell @@ -64,8 +64,8 @@ srun ... HPC applications use the Slurm QPU resources assigned to the Slurm job. -Environment variables provide more details for use by the application, e.g. `SLURM_JOB_QPU_RESOURCES` listing the quantum resource names (comma separated if there are several provided). -These variables will be used by QRMI. (See the README files in the various QRMI directories ([ibm](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/ibm/README.md), [pasqal](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/pasqal/README.md)) for more details.) +Environment variables provide more details for use by the application; for example, `SLURM_JOB_QPU_RESOURCES` lists the quantum resource names (comma-separated if several are provided). +These variables will be used by QRMI. (See the README files in the various QRMI directories ([IBM](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/ibm/README.md), [pasqal](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/pasqal/README.md)) for more details.) ```python from qiskit import QuantumCircuit @@ -116,14 +116,14 @@ See the [examples directory](https://github.com/qiskit-community/qrmi/tree/main/ Configuration of Direct Access API backends (HPC admin scope) includes endpoints and credentials to the Direct Access endpoint, authentication services as well as the S3 endpoint. Specifically, this includes: -* IBM Cloud API key for creating bearer tokens -* endpoint of Direct Access API +* IBM Cloud® API key for creating bearer tokens +* Endpoint of the Direct Access API * S3 bucket and access details Access credentials should not be visible to HPC users or other non-privileged users on the system. -Therefore, sensitive data can be put in separate files which can be access protected accordingly. +Therefore, sensitive data can be put in separate files, which can be access-protected accordingly. -Note that Slurm has got full access to the backend. +Note that Slurm has full access to the backend. This has several implications: * the Slurm plugin is responsible for multi-tenancy (ensuring that users don't see results of other users' jobs) @@ -140,26 +140,26 @@ Instead, mid term, there can be two different modes that HPC users can specify: #### Qiskit Runtime Service ##### User scope -It is expected, that users specify additional access details in environment variables. -Specifically, this includes +It is expected that users specify additional access details in environment variables. +Specifically, this includes the following: * Qiskit Runtime service instance (CRN, Cloud Resource Name) * Endpoint for Qiskit Runtime (unless auto-detected from the CRN) -* API key which has access to the CRN -* S3 instance, bucket and access token/credentials for data transfers +* API key, which has access to the CRN +* S3 instance, bucket, and access token/credentials for data transfers -This determines under which user and service instance the Qiskit Runtime service is used -Accordingly, IBM Quantum Platform's scheduling considers the user's and service instance's capabilities for scheduling. +These details determine under which user and service instance the Qiskit Runtime service is used. +Accordingly, IBM Quantum® Platform scheduling considers the user's and service instance's capabilities for scheduling. -At this time, users have to provide the above details (no shared cluster-wide Quantum access). +At this time, users must provide the above details (no shared cluster-wide quantum access). #### Pasqal Cloud Services ##### HPC admin scope -There is no specific set-up required from HPC admins for PCS usage. +There is no specific setup required from HPC admins for PCS usage. ##### HPC user scope It is expected, that users specify additional access details in environment variables. -Specifically, this currently includes +Currently, this includes the following: * PCS resource to target (FRESNEL, EMU_FRESNEL, EMU_MPS) * Authorization token From cca02f5836387a7cdbb02b3639c84fdb6338732b Mon Sep 17 00:00:00 2001 From: abbycross Date: Tue, 25 Nov 2025 17:06:57 -0500 Subject: [PATCH 5/9] missed one --- docs/guides/slurm-hpc-ux.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx index b7fdb9e5748..31080ad9941 100644 --- a/docs/guides/slurm-hpc-ux.mdx +++ b/docs/guides/slurm-hpc-ux.mdx @@ -158,7 +158,7 @@ At this time, users must provide the above details (no shared cluster-wide quant There is no specific setup required from HPC admins for PCS usage. ##### HPC user scope -It is expected, that users specify additional access details in environment variables. +It is expected that users specify additional access details in environment variables. Currently, this includes the following: * PCS resource to target (FRESNEL, EMU_FRESNEL, EMU_MPS) From dfa3aa874f5ca0c8a19840f046cfbf45cf7583e6 Mon Sep 17 00:00:00 2001 From: abbycross Date: Tue, 25 Nov 2025 17:14:45 -0500 Subject: [PATCH 6/9] missed one --- docs/guides/_toc.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/_toc.json b/docs/guides/_toc.json index e0d93c484e9..04bf5b6407f 100644 --- a/docs/guides/_toc.json +++ b/docs/guides/_toc.json @@ -643,7 +643,7 @@ ] }, { - "title": "High-Performance Compute", + "title": "High-performance compute", "children": [ { "title": "Quantum resource management interface (QRMI)", From fe035297ae9459d144b044cd5c0abfcebc033cd5 Mon Sep 17 00:00:00 2001 From: Kaelyn Ferris <43348706+kaelynj@users.noreply.github.com> Date: Thu, 4 Dec 2025 10:25:18 -0500 Subject: [PATCH 7/9] Apply suggestions from code review Co-authored-by: abbycross --- docs/guides/qrmi.mdx | 1 - docs/guides/slurm-hpc-ux.mdx | 16 ++++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/guides/qrmi.mdx b/docs/guides/qrmi.mdx index 63bdf8e9406..c6a022a2267 100644 --- a/docs/guides/qrmi.mdx +++ b/docs/guides/qrmi.mdx @@ -14,7 +14,6 @@ An optional `task_runner` command line tool to execute quantum payloads against ## Build the QRMI libraries -At its core, QRMI is a set of libraries to control state for a set of quantum resources. Written in Rust with a C and Python API exposed for ease of integration to any compute infrastructure. This section shows how to build QRMI for C and Python. diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx index 31080ad9941..000692edefb 100644 --- a/docs/guides/slurm-hpc-ux.mdx +++ b/docs/guides/slurm-hpc-ux.mdx @@ -6,7 +6,7 @@ description: User guide for the quantum resource management SPANK plugin # SPANK plugin user guide -Slurm QPU resource definitions determine what physical resources can be used by Slurm jobs in high-performance compute (HPC) environments. User source code should be agnostic to specific backend instances and even backend types as far as possible. This keeps source code portable while the QPU selection criteria are part of the resource definition (which is considered configuration as opposed to source code). +Slurm QPU resource definitions determine what physical resources can be used by Slurm jobs in high-performance compute (HPC) environments. User source code should be agnostic to specific backend instances, and even backend types whenever possible. This keeps source code portable while the QPU selection criteria are part of the resource definition (which is considered configuration rather than source code). ## Configure QPU resources in job creation @@ -16,7 +16,7 @@ Note that this plugin is under active development and the exact syntax is subjec ### Administrator scope -HPC administrators configure the SPANK plugin, i.e. what physical resources can be provided to Slurm jobs. +HPC administrators configure the SPANK plugin to specify what physical resources can be provided to Slurm jobs. This configuration contains all the information needed to have Slurm jobs access the physical resources, such as endpoints and access credentials. Read the [`qrmi_config.json.example`](https://github.com/qiskit-community/spank-plugins/blob/main/plugins/spank_qrmi/qrmi_config.json.example) for a comprehensive example configuration. @@ -113,7 +113,7 @@ See the [examples directory](https://github.com/qiskit-community/qrmi/tree/main/ ### Backend specifics #### IBM Direct Access API ##### Administrator scope -Configuration of Direct Access API backends (HPC admin scope) includes endpoints and credentials to the Direct Access endpoint, authentication services as well as the S3 endpoint. +Configuration of Direct Access API backends (HPC admin scope) includes endpoints and credentials to the Direct Access endpoint and authentication services as well as to the S3 endpoint. Specifically, this includes: * IBM Cloud® API key for creating bearer tokens @@ -126,16 +126,16 @@ Therefore, sensitive data can be put in separate files, which can be access-prot Note that Slurm has full access to the backend. This has several implications: -* the Slurm plugin is responsible for multi-tenancy (ensuring that users don't see results of other users' jobs) -* vetting of users (who is allowed to access the QPU) and ensuring according access is up to the HPC cluster side -* the capacity and priority of the QPU usage is solely managed through Slurm; there is not other scheduling of users involved outside of Slurm +* The Slurm plugin is responsible for multi-tenancy (ensuring that users don't see results of other users' jobs) +* The HPC cluster side is responsible for vetting users (who is allowed to access the QPU) and ensuring according access +* the capacity and priority of the QPU usage is solely managed through Slurm; there is notother scheduling of users involved outside of Slurm ##### User scope Execution lanes are not exposed to the HPC administrator or user directly. Instead, mid term, there can be two different modes that HPC users can specify: -* `exclusive=true` specifies that no other jobs can use the resource at the same time. An exclusive mode job gets all execution lanes and can not run at the same time as a non-exclusive job -* `exclusive=false` allows other jobs to run in parallel. In that case, there can be as many jobs as there are execution lanes at the same time, and the job essentially only gets one lane +* `exclusive=true` specifies that no other jobs can use the resource at the same time. An exclusive mode job gets all execution lanes and cannot run at the same time as a non-exclusive job +* `exclusive=false` allows other jobs to run in parallel. In this case, there can be as many jobs as there are execution lanes, all running at the same time, and the job is assigned one lane #### Qiskit Runtime Service ##### User scope From 56440b206f8fca9c4282d375b06542de245f6615 Mon Sep 17 00:00:00 2001 From: Kaelyn Ferris <43348706+kaelynj@users.noreply.github.com> Date: Thu, 4 Dec 2025 11:33:42 -0500 Subject: [PATCH 8/9] Address feedback, fix some typos --- docs/guides/qrmi.mdx | 8 ++++---- docs/guides/slurm-hpc-ux.mdx | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/guides/qrmi.mdx b/docs/guides/qrmi.mdx index c6a022a2267..7b34bdac0d7 100644 --- a/docs/guides/qrmi.mdx +++ b/docs/guides/qrmi.mdx @@ -1,14 +1,14 @@ --- -title: Quantum resource management interface (QMRI) +title: Quantum resource management interface (QRMI) description: Overview of the Quantum Resource Management Interface for integrating quantum resources to high-performance compute systems --- {/* cspell:ignore QRMI, stubgen, maturin, Doxyfile, rowser */} -# Quantum resource management interface (QMRI) +# Quantum resource management interface (QRMI) The Quantum resource management interface (QRMI) is a vendor-agnostic library for high-performance compute (HPC) systems to control state, run tasks, and monitor the behavior of quantum computational resources. It acts as a thin middleware layer that abstracts away the complexities associated with controlling quantum resources through a set of simple APIs. Written in Rust, this interface also exposes Python and C APIs for ease of integration into nearly any computational environment. -Find the source code to build and deploy QMRI in this [GitHub repository](https://github.com/qiskit-community/qrmi). +Find the source code to build and deploy QRMI in this [GitHub repository](https://github.com/qiskit-community/qrmi). An optional `task_runner` command line tool to execute quantum payloads against quantum hardware is included in the Python package. Find the [full documentation](https://github.com/qiskit-community/qrmi/blob/main/python/qrmi/tools/task_runner/README.md) in the GitHub repository. @@ -137,4 +137,4 @@ http://localhost:8290/qrmi.html Stop the server with ```shell-session server> q -``` \ No newline at end of file +``` diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx index 000692edefb..039d3090331 100644 --- a/docs/guides/slurm-hpc-ux.mdx +++ b/docs/guides/slurm-hpc-ux.mdx @@ -31,7 +31,7 @@ NodeName=node[1-5000] Gres=qpu,name:ibm_fez ### User scope -HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. The name attribute references what the HPC administrator has defined. Mid-term, backend selection can be based on criteria other than a predefined name referring to a specific backend (for example, by capacity and error rate qualifiers, to help down-select among the defined set of backends). +HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. The name attribute references what the HPC administrator has defined. During a slurm job's runtime, backend selection can be based on criteria other than a predefined name referring to a specific backend (for example, by capacity and error rate qualifiers, to help down-select among the defined set of backends). There might be additional environment variables required, depending on the backend type. @@ -128,11 +128,11 @@ This has several implications: * The Slurm plugin is responsible for multi-tenancy (ensuring that users don't see results of other users' jobs) * The HPC cluster side is responsible for vetting users (who is allowed to access the QPU) and ensuring according access -* the capacity and priority of the QPU usage is solely managed through Slurm; there is notother scheduling of users involved outside of Slurm +* The capacity and priority of the QPU usage is solely managed through Slurm; there is no other scheduling of users involved outside of Slurm ##### User scope Execution lanes are not exposed to the HPC administrator or user directly. -Instead, mid term, there can be two different modes that HPC users can specify: +Instead, during runtime, there can be two different modes that HPC users can specify: * `exclusive=true` specifies that no other jobs can use the resource at the same time. An exclusive mode job gets all execution lanes and cannot run at the same time as a non-exclusive job * `exclusive=false` allows other jobs to run in parallel. In this case, there can be as many jobs as there are execution lanes, all running at the same time, and the job is assigned one lane From d8e6419100c7ff1cf790182e554a898ee6e37f2f Mon Sep 17 00:00:00 2001 From: Kaelyn Ferris <43348706+kaelynj@users.noreply.github.com> Date: Tue, 6 Jan 2026 10:38:45 -0500 Subject: [PATCH 9/9] Address comments from code review --- docs/guides/qrmi.mdx | 2 +- docs/guides/slurm-hpc-ux.mdx | 15 +++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/docs/guides/qrmi.mdx b/docs/guides/qrmi.mdx index 7b34bdac0d7..25d44a05a0f 100644 --- a/docs/guides/qrmi.mdx +++ b/docs/guides/qrmi.mdx @@ -6,7 +6,7 @@ description: Overview of the Quantum Resource Management Interface for integrati # Quantum resource management interface (QRMI) -The Quantum resource management interface (QRMI) is a vendor-agnostic library for high-performance compute (HPC) systems to control state, run tasks, and monitor the behavior of quantum computational resources. It acts as a thin middleware layer that abstracts away the complexities associated with controlling quantum resources through a set of simple APIs. Written in Rust, this interface also exposes Python and C APIs for ease of integration into nearly any computational environment. +The Quantum resource management interface (QRMI) is a vendor-agnostic library for high-performance compute (HPC) systems to access, control, and monitor the behavior of quantum computational resources. It acts as a thin middleware layer that abstracts away the complexities associated with controlling quantum resources through a set of simple APIs. Written in Rust, this interface also exposes Python and C APIs for ease of integration into nearly any computational environment. Find the source code to build and deploy QRMI in this [GitHub repository](https://github.com/qiskit-community/qrmi). diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx index 039d3090331..f23f6fd50f1 100644 --- a/docs/guides/slurm-hpc-ux.mdx +++ b/docs/guides/slurm-hpc-ux.mdx @@ -71,7 +71,7 @@ These variables will be used by QRMI. (See the README files in the various QRMI from qiskit import QuantumCircuit # using an IBM QRMI flavor: from qrmi.primitives import QRMIService -from qrmi.primitives.ibm import SamplerV2, get_target +from qrmi.primitives.ibm import SamplerV2, get_backend # define circuit @@ -89,10 +89,10 @@ resources = service.resources() qrmi = resources[0] # Generate transpiler target from backend configuration & properties and transpile -target = get_target(qrmi) +backend = get_backend(qrmi) pm = generate_preset_pass_manager( optimization_level=1, - target=target, + backend=backend, ) isa_circuit = pm.run(circuit) @@ -105,7 +105,14 @@ job = sampler.run([(isa_circuit, isa_observable, param_values)]) print(f">>> Job ID: {job.job_id()}") result = job.result() -print(f">>> {result}") + +if job.done(): + pub_result = result[0] + print(f"Counts for the 'meas' output register: {pub_result.data.meas.get_counts()}") +elif job.cancelled(): + print("Cancelled") +elif job.errored(): + print(qrmi.task_logs(job.job_id())) ``` See the [examples directory](https://github.com/qiskit-community/qrmi/tree/main/examples/qiskit_primitives/) for example files.