diff --git a/docs/guides/_toc.json b/docs/guides/_toc.json index 849be7271ae..99c78a7c0eb 100644 --- a/docs/guides/_toc.json +++ b/docs/guides/_toc.json @@ -204,7 +204,6 @@ ], "collapsible": false }, - { "title": "Qiskit Functions", "children": [ @@ -637,6 +636,26 @@ } ] }, + { + "title": "High-performance compute", + "children": [ + { + "title": "Quantum resource management interface (QRMI)", + "url": "/docs/guides/qrmi", + "isNew": true + }, + { + "title": "SPANK plugin for QRMI", + "url": "/docs/guides/slurm-plugin", + "isNew": true + }, + { + "title": "SPANK plugin user guide", + "url": "/docs/guides/slurm-hpc-ux", + "isNew": true + } + ] + }, { "title": "Visualization", "children": [ diff --git a/docs/guides/qrmi.mdx b/docs/guides/qrmi.mdx new file mode 100644 index 00000000000..25d44a05a0f --- /dev/null +++ b/docs/guides/qrmi.mdx @@ -0,0 +1,140 @@ +--- +title: Quantum resource management interface (QRMI) +description: Overview of the Quantum Resource Management Interface for integrating quantum resources to high-performance compute systems +--- +{/* cspell:ignore QRMI, stubgen, maturin, Doxyfile, rowser */} + +# Quantum resource management interface (QRMI) + +The Quantum resource management interface (QRMI) is a vendor-agnostic library for high-performance compute (HPC) systems to access, control, and monitor the behavior of quantum computational resources. It acts as a thin middleware layer that abstracts away the complexities associated with controlling quantum resources through a set of simple APIs. Written in Rust, this interface also exposes Python and C APIs for ease of integration into nearly any computational environment. + +Find the source code to build and deploy QRMI in this [GitHub repository](https://github.com/qiskit-community/qrmi). + +An optional `task_runner` command line tool to execute quantum payloads against quantum hardware is included in the Python package. Find the [full documentation](https://github.com/qiskit-community/qrmi/blob/main/python/qrmi/tools/task_runner/README.md) in the GitHub repository. + +## Build the QRMI libraries + + +This section shows how to build QRMI for C and Python. + +### Requirements + +QRMI supports the following operating systems: + +``` +AlmaLinux 9, Amazon Linux 2023, CentOS Stream 9, CentOS Stream 10, +RedHat Enterprise Linux 8, RedHat Enterprise Linux 9, +RedHat Enterprise Linux 10, Rocky Linux 8, Rocky Linux 9, SuSE 15, +Ubuntu 22.04, Ubuntu 24.04, MacOS Sequoia 15.1 or above +``` + +#### Compiling environment +* Rust compiler 1.91 or above [Link](https://www.rust-lang.org/tools/install) +* A C compiler: for example, GCC (`gcc`) on Linux and Clang (`clang-tools-extra`) for Rust unknown targets/cross compilations. QRMI is compatible with a compiler conforming to the C11 standard. +* `make/cmake` (make/cmake RPM for RHEL-compatible OS) +* `openssl` (openssl-devel RPM for RHEL-compatible OS) +* `zlib` (zlib-devel RPM for RHEL-compatible OS) +* Python 3.11, 3.12, or 3.13 (For Python API) + * Libraries and header files needed for Python development (python3.1x-devel RPM for RHEL-compatible OS): + * /usr/include/python3.1x + * /usr/lib64/libpython3.1x.so +* Doxygen (for generating C API document), depending on the OS: + * ```dnf install doxygen``` for Linux(RHEL/CentOS/Rocky Linux etc.) + * ```apt install doxygen``` for Linux(Ubuntu etc.) + * ```brew install doxygen```for MacOS + +#### Runtime environment +* gcc (libgcc RPM for RHEL-compatible OS) +* openssl (openssl-libs RPM for RHEL-compatible OS) +* zlib (zlib RPM for RHEL-compatible OS) +* Python 3.11, 3.12, or 3.13 (For Python API) + * Libraries and header files needed for Python development (python3.1x-devel RPM for RHEL-compatible OS) + +--- + +Build the Rust/C API library with the following commands wherever you have saved the QRMI repository. +```shell-session +. ~/.cargo/env +cargo clean +cargo build --release +``` + + + +To build the Python package, first set up a Python environment and install the required dependencies. +```shell-session +. ~/.cargo/env +cargo clean +python3.12 -m venv ~/py312_qrmi_venv +source ~/py312_qrmi_venv/bin/activate +pip install --upgrade pip +pip install -r requirements-dev.txt +``` + +Create the stub files for the Python code. +```shell-session +. ~/.cargo/env +cargo run --bin stubgen --features=pyo3 +``` + +Lastly, build the Python wheels for distribution to your hosts. +```shell-session +source ~/py312_qrmi_venv/bin/activate +CARGO_TARGET_DIR=./target/release/maturin maturin build --release +``` + +The wheel is created in the `./target/release/maturin/wheels` directory. You can distribute and install on your hosts by `pip install `. + + + +## Logging + +QRMI supports [log crate](https://crates.io/crates/log) for logging. You can find the detailed QRMI runtime logs by specifying `RUST_LOG` environment variable with the log level. Supported levels are `error`, `warn`, `info`, `debug` and `trace`. The default level is `warn`. + +If you specify `trace`, you can find underlying HTTP transaction logs. + + +```shell-session +RUST_LOG=trace +``` + +Example logs: +```shell-session +[2025-08-16T03:47:38Z DEBUG request::connect] starting new connection: https://iam.cloud.ibm.com/ +[2025-08-16T03:47:38Z DEBUG direct_access_api::middleware::auth] current token ... +``` + + +## Build the API documentation + +The Rust API documentation can be created by running +```shell-session +. ~/.cargo/env +cargo doc --no-deps --open +``` + +The C API documentation can be created by using doxygen: +```shell-session +doxygen Doxyfile +``` + +This will create an HTML document under the `./html` directory, which you can open in a web browser. + + +The Python API documentation is generated with `pydoc`. After entering the virtual environment with the QRMI packaged installed, run the following commands: +```shell-session +python -m pydoc -p 8290 +Server ready at http://localhost:8290/ +Server commands: [b]rowser, [q]uit +server> b +``` + +Then, open the following page in your browser: +```shell-session +http://localhost:8290/qrmi.html +``` + +Stop the server with +```shell-session +server> q +``` diff --git a/docs/guides/slurm-hpc-ux.mdx b/docs/guides/slurm-hpc-ux.mdx new file mode 100644 index 00000000000..f23f6fd50f1 --- /dev/null +++ b/docs/guides/slurm-hpc-ux.mdx @@ -0,0 +1,172 @@ +--- +title: SPANK plugin user guide +description: User guide for the quantum resource management SPANK plugin +--- +{/* cspell:ignore QRMI, SBATCH, srun, Pasqal, slurmd, Doxyfile, Gres */} + +# SPANK plugin user guide + +Slurm QPU resource definitions determine what physical resources can be used by Slurm jobs in high-performance compute (HPC) environments. User source code should be agnostic to specific backend instances, and even backend types whenever possible. This keeps source code portable while the QPU selection criteria are part of the resource definition (which is considered configuration rather than source code). + +## Configure QPU resources in job creation + + +Note that this plugin is under active development and the exact syntax is subject to change. + + +### Administrator scope + +HPC administrators configure the SPANK plugin to specify what physical resources can be provided to Slurm jobs. +This configuration contains all the information needed to have Slurm jobs access the physical resources, such as endpoints and access credentials. + +Read the [`qrmi_config.json.example`](https://github.com/qiskit-community/spank-plugins/blob/main/plugins/spank_qrmi/qrmi_config.json.example) for a comprehensive example configuration. + +In `slurm.conf`, QPU resources can be assigned to some or all nodes for usage: +``` +... +GresTypes=qpu,name +NodeName=node[1-5000] Gres=qpu,name:ibm_fez +... +``` + +### User scope + +HPC users submit jobs using QPU resources that are tied to Slurm QPU resources. The name attribute references what the HPC administrator has defined. During a slurm job's runtime, backend selection can be based on criteria other than a predefined name referring to a specific backend (for example, by capacity and error rate qualifiers, to help down-select among the defined set of backends). + +There might be additional environment variables required, depending on the backend type. + +`SBATCH` parameters will point to one or more QPU resources assigned to the application as generic resources. +Environment variables provided through the plugin will provide the necessary information to the application (see the [HPC application scope](#hpc-application-scope) section for details). + +```shell +#SBATCH --time=100 +#SBATCH --output= +#SBATCH --gres=qpu:1 +#SBATCH --qpu=ibm_fez +#SBATCH --... # other options + +srun ... +``` + +To use more QPU resources, add more QPUs to the `--qpu` parameter: + +```shell +#SBATCH --time=100 +#SBATCH --output= +#SBATCH --gres=qpu:3 +#SBATCH --qpu=my_local_qpu,ibm_fez,ibm_marrakesh +#SBATCH --... # other options + +srun ... +``` + +### HPC application scope + +HPC applications use the Slurm QPU resources assigned to the Slurm job. + +Environment variables provide more details for use by the application; for example, `SLURM_JOB_QPU_RESOURCES` lists the quantum resource names (comma-separated if several are provided). +These variables will be used by QRMI. (See the README files in the various QRMI directories ([IBM](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/ibm/README.md), [pasqal](https://github.com/qiskit-community/qrmi/blob/main/examples/qiskit_primitives/pasqal/README.md)) for more details.) + +```python +from qiskit import QuantumCircuit +# using an IBM QRMI flavor: +from qrmi.primitives import QRMIService +from qrmi.primitives.ibm import SamplerV2, get_backend + +# define circuit + +circuit = QuantumCircuit(2) +circuit.h(0) +circuit.cx(0, 1) +circuit.measure_all() + +# instantiate QRMI service and get quantum resource (we'll take the first one should there be serveral of them) +# inject credentials needed for accessing the service at this point +load_dotenv() +service = QRMIService() + +resources = service.resources() +qrmi = resources[0] + +# Generate transpiler target from backend configuration & properties and transpile +backend = get_backend(qrmi) +pm = generate_preset_pass_manager( + optimization_level=1, + backend=backend, +) + +isa_circuit = pm.run(circuit) + +# run the circuit +options = {} +sampler = SamplerV2(qrmi, options=options) + +job = sampler.run([(isa_circuit, isa_observable, param_values)]) +print(f">>> Job ID: {job.job_id()}") + +result = job.result() + +if job.done(): + pub_result = result[0] + print(f"Counts for the 'meas' output register: {pub_result.data.meas.get_counts()}") +elif job.cancelled(): + print("Cancelled") +elif job.errored(): + print(qrmi.task_logs(job.job_id())) +``` + +See the [examples directory](https://github.com/qiskit-community/qrmi/tree/main/examples/qiskit_primitives/) for example files. + +### Backend specifics +#### IBM Direct Access API +##### Administrator scope +Configuration of Direct Access API backends (HPC admin scope) includes endpoints and credentials to the Direct Access endpoint and authentication services as well as to the S3 endpoint. +Specifically, this includes: + +* IBM Cloud® API key for creating bearer tokens +* Endpoint of the Direct Access API +* S3 bucket and access details + +Access credentials should not be visible to HPC users or other non-privileged users on the system. +Therefore, sensitive data can be put in separate files, which can be access-protected accordingly. + +Note that Slurm has full access to the backend. +This has several implications: + +* The Slurm plugin is responsible for multi-tenancy (ensuring that users don't see results of other users' jobs) +* The HPC cluster side is responsible for vetting users (who is allowed to access the QPU) and ensuring according access +* The capacity and priority of the QPU usage is solely managed through Slurm; there is no other scheduling of users involved outside of Slurm + +##### User scope +Execution lanes are not exposed to the HPC administrator or user directly. +Instead, during runtime, there can be two different modes that HPC users can specify: + +* `exclusive=true` specifies that no other jobs can use the resource at the same time. An exclusive mode job gets all execution lanes and cannot run at the same time as a non-exclusive job +* `exclusive=false` allows other jobs to run in parallel. In this case, there can be as many jobs as there are execution lanes, all running at the same time, and the job is assigned one lane + +#### Qiskit Runtime Service +##### User scope + +It is expected that users specify additional access details in environment variables. +Specifically, this includes the following: + +* Qiskit Runtime service instance (CRN, Cloud Resource Name) +* Endpoint for Qiskit Runtime (unless auto-detected from the CRN) +* API key, which has access to the CRN +* S3 instance, bucket, and access token/credentials for data transfers + +These details determine under which user and service instance the Qiskit Runtime service is used. +Accordingly, IBM Quantum® Platform scheduling considers the user's and service instance's capabilities for scheduling. + +At this time, users must provide the above details (no shared cluster-wide quantum access). + +#### Pasqal Cloud Services +##### HPC admin scope +There is no specific setup required from HPC admins for PCS usage. + +##### HPC user scope +It is expected that users specify additional access details in environment variables. +Currently, this includes the following: + +* PCS resource to target (FRESNEL, EMU_FRESNEL, EMU_MPS) +* Authorization token diff --git a/docs/guides/slurm-plugin.mdx b/docs/guides/slurm-plugin.mdx new file mode 100644 index 00000000000..ac0f5d59124 --- /dev/null +++ b/docs/guides/slurm-plugin.mdx @@ -0,0 +1,177 @@ +--- +title: SPANK plugin for QRMI +description: Overview of the SPANK plugin for quantum resource management in HPC systems. +--- +{/* cspell:ignore QRMI, SBATCH, srun, Pasqal, slurmd, Doxyfile, Gres, ntasks, fffffffb */} + +# SPANK plugin for QRMI + +The [SPANK plugin](https://github.com/qiskit-community/spank-plugins/) for the [Quantum Resource Management Interface (QRMI)](./qrmi) is used to configure access to quantum resources from user jobs in a compute environment administrated by the Slurm workload manager. It handles the acquisition and release of access to quantum resources and sets the necessary environment variables for executing quantum workloads. The available quantum resources are specified in a `qrmi_config.json` file, which is managed by an administrator. + +Once installed, this plugin registers the following option. A Slurm user can specify which quantum resources are used for the Slurm job script. + +```bash +--qpu=names Comma separated list of QPU resources to use. +``` + +For example, +```bash +#!/bin/bash + +#SBATCH --job-name=sampler_job +#SBATCH --ntasks=1 +#SBATCH --cpus-per-task=1 +#SBATCH --qpu=ibm_quebec,ibm_sherbrooke + +# Your script goes here... +``` + + +## Requirements and configuration + +The following tools are required for the compiling environment: +* Rust compiler 1.86 or above [Link](https://www.rust-lang.org/tools/install) +* A C compiler: for example, GCC(gcc) on Linux and Clang(clang-tools-extra) for Rust unknown targets/cross compilations. QRMI and its Spank plugin are compatible with a compiler conforming to the C11 standard. +* make/cmake (make/cmake RPM for RHEL compatible OS +* openssl (openssl-devel RPM for RHEL compatible OS) +* zlib (zlib-devel RPM for RHEL compatible OS) +* Slurm header files(slurm/slurm.h etc.) must be available on your host + + +The runtime environment requires: +* gcc (libgcc RPM for RHEL compatible OS) +* openssl (openssl-libs RPM for RHEL compatible OS) +* zlib (zlib RPM for RHEL compatible OS) + +### Configure available Quantum Resources + +A [`qrmi_config.json.example`](https://github.com/qiskit-community/spank-plugins/blob/main/plugins/spank_qrmi/qrmi_config.json.example) is provided in the repository as an example configuration. + +The `resources` array contains a set of available Quantum Resources which can be used by Slurm users in the jobs. Each Quantum Resource definition contains: + +| Property | Description | +| ---- | ---- | +| name | Quantum resource name. e.g. Quantum backend name. | +| type | Resource type (`direct-access`, `qiskit-runtime-service` and `pasqal-cloud`) | +| environment | A set of environment variables to work with QRMI. Current implementations assume API endpoint and credentials are specified via environment variable setting. | + +If a user specifies a resource with the `--qpu` option that is not defined in the `qrmi_config.json` file, the specification will be ignored. + +If the user sets the necessary environment variables for job execution themselves, it is not required to specify them in this file. In this case, the environment property will be `{}`. + + +If you are using a QPU resource with the resource type `qiskit-runtime-service`, use an account that supports [opening a session](https://quantum.cloud.ibm.com/docs/en/guides/run-jobs-session#open-a-session), such as a Premium plan. + +If you are using an account that does not support opening a session, such as an Open plan account, add `QRMI_IBM_QRS_SESSION_MODE="batch"` to the environment variable list in qrmi_config.json as workaround. + + +## Installation + +Run the build using `make` and `cmake` +```shell-session +. ~/.cargo/env +mkdir build +cd build +cmake .. +make +``` + +By default, the `CMakeLists.txt` file expects the Slurm header file (`slurm.h`) to be located in `/usr/include/slurm`, but this can be customized as shown below. + +```shell-session +SLURM_INCLUDE_DIRS= cmake .. +``` + + +If the above build step is successful, a Linux shared library named `spank_qrmi.so` will be created under the `build/` directory. + +In addition, add the following 1 line to the `/etc/slurm/plugstack.conf` on the nodes where this plugin is installed. + +Note that administrator needs to create `qrmi_config.json` file and specify the path as plugin argument like below. + +```bash +optional /usr/lib64/slurm/spank_qrmi.so /etc/slurm/qrmi_config.json +``` + + +There are optional arguments available. It allows you to add environment variables to the Slurm process where the SPANK plugin is loaded. The format for specifying environment variables is defined as follows. +```bash +--env:{variable name}={value} +``` +For example, when interacting with Quantum resources via an HTTP proxy, the environment variables `http_proxy`, `https_proxy`, and `no_proxy` are required. These can be added as shown below. +```bash +optional /usr/lib64/slurm/spank_qrmi.so /etc/slurm/qrmi_config.json --env:http_proxy=http://192.168.1.128:3128 --env:https_proxy=http://192.168.1.128:3128 +``` + +For allocator node, your don't need to specify the path to qrmi_config.json like below. + +```bash +optional /usr/lib64/slurm/spank_qrmi.so +``` + + +The following table shows which Slurm context nodes these files should be copied to. For more details of each context, refer to the [SPANK Plugin documentation](https://slurm.schedmd.com/spank.html#SECTION_SPANK-PLUGINS) +| Files | Slurm contexts | +| ------- | ------- | +| `plugstack.conf` | **local**, **remote**, **allocator**, **slurmd** and **job_script**. | +| `qrmi_config.json` | **remote** (Compute nodes) | +| `spank_qrmi.so` | **allocator** and **remote** (Login nodes and Compute nodes) | + + + +Once `plugstack.conf` is updated, spank plugins will be loaded at runtime during the next job launch, which means administrators do not need to restart Slurm cluster. + + +Once you complete installation, you should find `--qpu=names` option in the sbatch help message. + +```bash +Options provided by plugins: + --qpu=names Comma separated list of QPU resources to use. +``` + +## Logging + +This plugin uses Slurm logger for logging. Log messages from this plugin can be found in `/var/log/slurm/slurmd.log`, etc. + +```bash +[2025-07-31T09:43:34.019] [21.batch] debug: spank: /etc/slurm/plugstack.conf:1: Loaded plugin spank_qrmi.so +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c(6582, 0): -> slurm_spank_init argc=1 remote=1 +[2025-07-31T09:43:34.019] [21.batch] debug: SPANK: appending plugin option "qpu" +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c(6582,0): <- slurm_spank_init rc=0 +[2025-07-31T09:43:34.019] [21.batch] debug2: spank: spank_qrmi.so: init = 0 +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c: --qpu=[ibm_sherbrooke,ibm_torino] +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c(6582, 0): -> slurm_spank_init_post_opt argc=1 remote=1 +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c, fffffffb +[2025-07-31T09:43:34.019] [21.batch] debug: spank_qrmi_c: argv[0] = [/etc/slurm/qrmi_config.json] +[2025-07-31T09:43:34.020] [21.batch] debug: spank_qrmi_c: name(ibm_sherbrooke), type(1) found in qrmi_config +``` + +You can enable QRMI runtime log by specifying the following `srun` arguments. + +| sbatch/srun option | Slurm log level (SRUN_DEBUG) | QRMI log level (RUST_LOG) | +| ---- | ---- | ---- | +| (default) | 3 | info | +| `--quiet` | 2 | error | +| `--verbose` | 4 | debug | +| `-vv` or more | 5 | trace | + + +## Considerations for multiple QPUs + +At runtime, each QRMI instance is linked to a single QPU resource. To enable the use of multiple Quantum resources within a single job script, this plugin sets environment variables with the resource name as a prefix. For example, if `--qpu=qpu1,qpu2` is specified, the environment variables will be set as follows: + +```bash +qpu1_QRMI_IBM_DA_ENDPOINT=http://test1 +qpu2_QRMI_IBM_DA_ENDPOINT=http://test2 +``` + +This ensures that each QRMI instance operates with the configuration parameters set for its respective resource during the execution of the Slurm job. + +The above environment variable settings are applied only to jobs where the `--qpu=names` option is specified. + +This plugin also set the following 2 environment variables which will be referred by QRMI primitives code. + +| Environment variable | Description | +| ---- | ---- | +| `SLURM_JOB_QPU_RESOURCES` | Comma separated list of QPU resources to use at runtime. Undocumented resources will be filtered out. For example, `qpu1,qpu2`. | +| `SLURM_JOB_QPU_TYPES` | Comma separated list of Resource type (`direct-access`, `qiskit-runtime-service` and `pasqal-cloud`). For example, `direct-access,pasqal-cloud` | diff --git a/qiskit_bot.yaml b/qiskit_bot.yaml index 68e9be4d8a6..c6bc8a40e2f 100644 --- a/qiskit_bot.yaml +++ b/qiskit_bot.yaml @@ -537,6 +537,12 @@ notifications: "docs/guides/open-source": - "@beckykd" - "@abbycross" + "docs/guides/slurm-hpc-ux": + - "@kaelynj" + "docs/guides/slurm-plugin": + - "@kaelynj" + "docs/guides/qrmi": + - "@kaelynj" "docs/tutorials/index": - "@miamico" "docs/tutorials/ai-transpiler-introduction": diff --git a/scripts/config/cspell/dictionaries/qiskit.txt b/scripts/config/cspell/dictionaries/qiskit.txt index 331b6d2c955..10edc429a19 100644 --- a/scripts/config/cspell/dictionaries/qiskit.txt +++ b/scripts/config/cspell/dictionaries/qiskit.txt @@ -83,6 +83,7 @@ QKNAME QNN QNNs QPU's +QRMI QRTE QSCI QUBO diff --git a/scripts/js/commands/checkPatternsIndex.ts b/scripts/js/commands/checkPatternsIndex.ts index 7abf85c2e32..c17f0634601 100644 --- a/scripts/js/commands/checkPatternsIndex.ts +++ b/scripts/js/commands/checkPatternsIndex.ts @@ -33,6 +33,9 @@ const ALLOWLIST_MISSING_FROM_INDEX: Set = new Set([ "/docs/guides/faq", "/docs/guides/execution-modes-faq", "/docs/guides/open-source", + "/docs/guides/qrmi", + "/docs/guides/slurm-plugin", + "/docs/guides/slurm-hpc-ux", ]); // URLs that show up in the INDEX_PAGES, but are not in the left ToC under