Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 21 additions & 67 deletions eessi/testsuite/tests/apps/MetalWalls.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,60 +31,50 @@
from reframe.core.builtins import run_after
from reframe.core.parameters import TestParam as parameter

from eessi.testsuite import hooks
from eessi.testsuite.constants import COMPUTE_UNITS, DEVICE_TYPES, SCALES, TAGS
from eessi.testsuite.eessi_mixin import EESSI_Mixin
from eessi.testsuite.constants import COMPUTE_UNITS, DEVICE_TYPES
from eessi.testsuite.hpctestlib.sciapps.metalwalls.benchmarks import MetalWallsCheck
from eessi.testsuite.utils import find_modules, log
from eessi.testsuite.utils import find_modules


@rfm.simple_test
class EESSI_MetalWalls_MW(MetalWallsCheck):
class EESSI_MetalWalls_MW(MetalWallsCheck, EESSI_Mixin):
"""MetalWalls benchmark tests.

`MetalWalls <https://gitlab.com/ampere2/metalwalls>`__ """

scale = parameter(SCALES.keys())

valid_systems = ['*']
valid_prog_environs = ['default']
time_limit = '60m'
# input files are downloaded
readonly_files = ['']

module_name = parameter(find_modules('MetalWalls'))
# For now, MetalWalls is being build for CPU targets only
# compute_device = parameter([DEVICE_TYPES.CPU, DEVICE_TYPES.GPU])
compute_device = parameter([DEVICE_TYPES.CPU])
num_tasks_per_compute_unit = 1
used_cpus_per_task = None
device_type = parameter([DEVICE_TYPES.CPU])

def required_mem_per_node(self):
mem_per_task = 0.4
if self.benchmark_info[0] == 'hackathonGPU/benchmark5':
mem_per_task = 1.2
return self.num_tasks_per_node * mem_per_task + 2

@run_after('init')
def run_after_init(self):
"""Hooks to run after the init phase"""

# Filter on which scales are supported by the partitions defined in the ReFrame configuration
hooks.filter_supported_scales(self)

# Support selecting modules on the cmd line.
hooks.set_modules(self)

# Make sure that GPU tests run in partitions that support running on a GPU,
# and that CPU-only tests run in partitions that support running CPU-only.
# Also support setting valid_systems on the cmd line.
hooks.filter_valid_systems_by_device_type(self, required_device_type=self.compute_device)

# Make sure the test is not run on offline partitions
hooks.filter_valid_systems_for_offline_partitions(self)

# Support selecting scales on the cmd line via tags.
hooks.set_tag_scale(self)
# Launch 1 task per CPU (when run on CPUs) or 1 task per GPU (when run on GPUs)
if self.device_type == DEVICE_TYPES.CPU:
self.compute_unit = COMPUTE_UNITS.CPU
elif self.device_type == DEVICE_TYPES.GPU:
self.compute_unit = COMPUTE_UNITS.GPU
else:
raise NotImplementedError(
f"Device type {self.device_type} was not implemented for test {self.name}"
)

@run_after('init')
def set_tag_ci(self):
"""Set tag CI on smallest benchmark, so it can be selected on the cmd line via --tag CI"""
if self.benchmark_info[0] == 'hackathonGPU/benchmark':
self.tags.add(TAGS.CI)
log(f'tags set to {self.tags}')
self.is_ci_test = True

@run_after('init')
def set_increased_walltime(self):
Expand All @@ -94,34 +84,6 @@ def set_increased_walltime(self):
if self.num_tasks <= 4 and self.benchmark_info[0] in large_benchmarks:
self.time_limit = '120m'

@run_after('setup')
def run_after_setup(self):
"""Hooks to run after the setup phase"""

# Calculate default requested resources based on the scale:
# 1 task per CPU for CPU-only tests, 1 task per GPU for GPU tests.
# Also support setting the resources on the cmd line.
if self.compute_device == DEVICE_TYPES.GPU:
self.compute_unit = COMPUTE_UNITS.GPU
hooks.assign_tasks_per_compute_unit(test=self)
else:
self.compute_unit = COMPUTE_UNITS.CPU
hooks.assign_tasks_per_compute_unit(test=self)

@run_after('setup')
def set_binding(self):
"""Set binding to compact to improve performance reproducibility."""
hooks.set_compact_process_binding(self)

@run_after('setup')
def request_mem(self):
"""Request memory per node based on the benchmark."""
mem_per_task = 0.4
if self.benchmark_info[0] == 'hackathonGPU/benchmark5':
mem_per_task = 1.2
memory_required = self.num_tasks_per_node * mem_per_task + 2
hooks.req_memory_per_node(test=self, app_mem_req=memory_required * 1024)

@run_after('setup')
def skip_max_corecnt(self):
"""Skip tests if number of tasks per node exceeds maximum core count."""
Expand All @@ -131,11 +93,3 @@ def skip_max_corecnt(self):
self.num_tasks > max_task_cnt,
f'Number of tasks {self.num_tasks} exceeds maximum task count {max_task_cnt} for {bench_name}'
)

@run_after('setup')
def set_omp_num_threads(self):
"""
Set number of OpenMP threads via OMP_NUM_THREADS.
Set default number of OpenMP threads equal to number of CPUs per task.
"""
hooks.set_omp_num_threads(self)
Loading