|
| 1 | +# Copyright 2026 NVIDIA Corporation. All rights reserved. |
| 2 | +# SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE |
| 3 | + |
| 4 | + |
| 5 | +# ################################################################################ |
| 6 | +# |
| 7 | +# This example demonstrates the core cuda.bindings.nvml functionality by |
| 8 | +# implementing a subset of the NVIDIA System Management Interface (nvidia-smi) |
| 9 | +# command line tool in Python. |
| 10 | +# |
| 11 | +# ################################################################################ |
| 12 | + |
| 13 | + |
| 14 | +import sys |
| 15 | + |
| 16 | +from cuda.bindings import nvml |
| 17 | + |
| 18 | + |
| 19 | +def format_size(bytes_val: int) -> str: |
| 20 | + """Formats bytes to MiB.""" |
| 21 | + return f"{bytes_val / (1024 * 1024):.0f}MiB" |
| 22 | + |
| 23 | + |
| 24 | +LINES = [[[4, 27, 6], [18, 3], [20]], [[4, 6, 13, 13], [22], [9, 10]]] |
| 25 | + |
| 26 | + |
| 27 | +class TableFormatter: |
| 28 | + def __init__(self, lines): |
| 29 | + self.formats, self.sizes, self.counts = zip(*[self._create_line_format(line) for line in lines]) |
| 30 | + |
| 31 | + def _create_line_format(self, descriptor): |
| 32 | + parts = [] |
| 33 | + sizes = [] |
| 34 | + for section in descriptor: |
| 35 | + parts.append("| ") |
| 36 | + sizes.append(1) |
| 37 | + for i, align in enumerate(section): |
| 38 | + if i == len(section) - 1: |
| 39 | + direct = ">" |
| 40 | + else: |
| 41 | + direct = "<" |
| 42 | + parts.append(f"{{:{direct}{align}}} ") |
| 43 | + sizes[-1] += align + 1 |
| 44 | + parts.append("|") |
| 45 | + return "".join(parts), sizes, sum(len(x) for x in descriptor) |
| 46 | + |
| 47 | + def print_line(self, char="-"): |
| 48 | + parts = ["+"] |
| 49 | + for size in self.sizes[0]: |
| 50 | + parts.append(char * size) |
| 51 | + parts.append("+") |
| 52 | + print("".join(parts)) |
| 53 | + |
| 54 | + def print_values(self, *args): |
| 55 | + for line_format, count in zip(self.formats, self.counts): |
| 56 | + print(line_format.format(*args[:count])) |
| 57 | + args = args[count:] |
| 58 | + |
| 59 | + |
| 60 | +def print_table(): |
| 61 | + formatter = TableFormatter(LINES) |
| 62 | + |
| 63 | + driver_version = nvml.system_get_driver_version() |
| 64 | + cuda_version_int = nvml.system_get_cuda_driver_version() |
| 65 | + cuda_major = cuda_version_int // 1000 |
| 66 | + cuda_minor = (cuda_version_int % 1000) // 10 |
| 67 | + cuda_version = f"{cuda_major}.{cuda_minor}" |
| 68 | + |
| 69 | + print("+-----------------------------------------------------------------------------------------+") |
| 70 | + print( |
| 71 | + f"| NVIDIA-MINI-SMI {driver_version:<16} Driver Version: {driver_version:<15} CUDA Version: {cuda_version:<9}|" |
| 72 | + ) |
| 73 | + formatter.print_line() |
| 74 | + print("| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |") |
| 75 | + print("| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |") |
| 76 | + formatter.print_line("=") |
| 77 | + |
| 78 | + device_count = nvml.device_get_count_v2() |
| 79 | + |
| 80 | + for i in range(device_count): |
| 81 | + handle = nvml.device_get_handle_by_index_v2(i) |
| 82 | + |
| 83 | + name = nvml.device_get_name(handle) |
| 84 | + |
| 85 | + try: |
| 86 | + persistence = nvml.device_get_persistence_mode(handle) |
| 87 | + persistence_str = "On" if persistence == nvml.EnableState.FEATURE_ENABLED else "Off" |
| 88 | + except nvml.NotSupportedError: |
| 89 | + persistence_str = "Unsupp." |
| 90 | + except nvml.NvmlError: |
| 91 | + persistence_str = "N/A" |
| 92 | + |
| 93 | + try: |
| 94 | + pci_info = nvml.device_get_pci_info_v3(handle) |
| 95 | + bus_id = pci_info.bus_id |
| 96 | + except nvml.NvmlError: |
| 97 | + bus_id = "N/A" |
| 98 | + |
| 99 | + try: |
| 100 | + display_active = nvml.device_get_display_active(handle) |
| 101 | + disp_str = "On" if display_active == nvml.EnableState.FEATURE_ENABLED else "Off" |
| 102 | + except nvml.NvmlError: |
| 103 | + disp_str = "N/A" |
| 104 | + |
| 105 | + try: |
| 106 | + current, _ = nvml.device_get_ecc_mode(handle) |
| 107 | + ecc_str = "On" if current == nvml.EnableState.FEATURE_ENABLED else "Off" |
| 108 | + except nvml.NvmlError: |
| 109 | + ecc_str = "N/A" |
| 110 | + |
| 111 | + try: |
| 112 | + fan = nvml.device_get_fan_speed(handle) |
| 113 | + fan_str = f"{fan: >3}%" |
| 114 | + except nvml.NvmlError: |
| 115 | + fan_str = "N/A" |
| 116 | + |
| 117 | + try: |
| 118 | + temp = nvml.device_get_temperature_v(handle, nvml.TemperatureSensors.TEMPERATURE_GPU) |
| 119 | + temp_str = f"{temp}C" |
| 120 | + except nvml.NvmlError: |
| 121 | + temp_str = "N/A" |
| 122 | + |
| 123 | + try: |
| 124 | + perf_state = nvml.device_get_performance_state(handle) |
| 125 | + perf_str = f"P{perf_state}" |
| 126 | + except nvml.NvmlError: |
| 127 | + perf_str = "N/A" |
| 128 | + |
| 129 | + try: |
| 130 | + power_usage = nvml.device_get_power_usage(handle) # mW |
| 131 | + usage_str = f"{power_usage // 1000}W" |
| 132 | + except nvml.NvmlError: |
| 133 | + usage_str = "N/A" |
| 134 | + |
| 135 | + try: |
| 136 | + power_cap = nvml.device_get_power_management_limit(handle) # mW |
| 137 | + cap_str = f"{power_cap // 1000}W" |
| 138 | + except nvml.NvmlError: |
| 139 | + cap_str = "N/A" |
| 140 | + |
| 141 | + pwr_str = f"{usage_str} / {cap_str}" |
| 142 | + |
| 143 | + try: |
| 144 | + mem_info = nvml.device_get_memory_info_v2(handle) |
| 145 | + mem_used = format_size(mem_info.used) |
| 146 | + mem_total = format_size(mem_info.total) |
| 147 | + mem_str = f"{mem_used} / {mem_total}" |
| 148 | + except nvml.NvmlError: |
| 149 | + mem_str = "N/A" |
| 150 | + |
| 151 | + try: |
| 152 | + util_rates = nvml.device_get_utilization_rates(handle) |
| 153 | + gpu_util = util_rates.gpu |
| 154 | + util_str = f"{gpu_util: >3}%" |
| 155 | + except nvml.NvmlError: |
| 156 | + util_str = "N/A" |
| 157 | + |
| 158 | + try: |
| 159 | + compute_mode = nvml.device_get_compute_mode(handle) |
| 160 | + if compute_mode == nvml.ComputeMode.COMPUTEMODE_DEFAULT: |
| 161 | + comp_str = "Default" |
| 162 | + elif compute_mode == nvml.ComputeMode.COMPUTEMODE_EXCLUSIVE_PROCESS: |
| 163 | + comp_str = "E. Process" |
| 164 | + elif compute_mode == nvml.ComputeMode.COMPUTEMODE_PROHIBITED: |
| 165 | + comp_str = "Prohibited" |
| 166 | + else: |
| 167 | + comp_str = "Unknown" |
| 168 | + except nvml.NvmlError: |
| 169 | + comp_str = "N/A" |
| 170 | + |
| 171 | + formatter.print_values( |
| 172 | + str(i), |
| 173 | + name, |
| 174 | + persistence_str, |
| 175 | + bus_id, |
| 176 | + disp_str, |
| 177 | + ecc_str, |
| 178 | + fan_str, |
| 179 | + temp_str, |
| 180 | + perf_str, |
| 181 | + pwr_str, |
| 182 | + mem_str, |
| 183 | + util_str, |
| 184 | + comp_str, |
| 185 | + ) |
| 186 | + formatter.print_line() |
| 187 | + |
| 188 | + |
| 189 | +def main(): |
| 190 | + try: |
| 191 | + nvml.init_v2() |
| 192 | + except nvml.NvmlError as e: |
| 193 | + print(f"Failed to initialize NVML: {e}") |
| 194 | + sys.exit(1) |
| 195 | + |
| 196 | + try: |
| 197 | + print_table() |
| 198 | + finally: |
| 199 | + nvml.shutdown() |
| 200 | + |
| 201 | + |
| 202 | +if __name__ == "__main__": |
| 203 | + main() |
0 commit comments