Skip to content
Snippets Groups Projects
Commit 714569b1 authored by MarkLTurner's avatar MarkLTurner
Browse files

Refactor

parent 0bbb4f37
Branches master
No related tags found
No related merge requests found
...@@ -3,18 +3,19 @@ import reframe.core.launchers.mpi ...@@ -3,18 +3,19 @@ import reframe.core.launchers.mpi
import common import common
# 4 ranks and 64 threads were found to be an optimal way to use a
@rfm.parameterized_test(*([tasks, threads, iteration, launcher] # node on DINE for the pmill 768 benchmark
for tasks in [2] @rfm.parameterized_test(*([tasks, tasks_per_node, threads, iteration, launcher]
for threads in list(reversed([56])) for tasks in [16]
for tasks_per_node in [4]
for threads in list(reversed([64]))
for iteration in [1] for iteration in [1]
for launcher in ['mpirun','perf-report'])) for launcher in ['profiler:Vtune','profiler:Allinea']))
class PMillenniumTest(rfm.RegressionTest): class PMillennium768MultiNode(rfm.RegressionTest):
def __init__(self, num_tasks, num_threads, iteration, launcher): def __init__(self, num_tasks, tasks_per_node, num_threads, iteration, launcher):
ic_dir = common.setup(self, launcher, num_tasks, num_tasks_per_node=num_tasks) ic_dir = common.setup(self, launcher, num_tasks, tasks_per_node)
test_dir = 'swiftsim/examples/PMillennium/PMillennium-768' test_dir = 'swiftsim/examples/PMillennium/PMillennium-768'
self.cpus_per_task = 32 # added
print(f'Running with threads : <{num_threads}>') print(f'Running with threads : <{num_threads}>')
self.keep_files = [test_dir] self.keep_files = [test_dir]
...@@ -24,23 +25,22 @@ class PMillenniumTest(rfm.RegressionTest): ...@@ -24,23 +25,22 @@ class PMillenniumTest(rfm.RegressionTest):
'--with-parmetis' '--with-parmetis'
] ]
self.prerun_cmds = [f'pushd {test_dir}'] self.prerun_cmds = [f'pushd {test_dir}']
num_iterations = 5 num_steps = 10
self.time_limit = '3h'
if launcher == 'mpirun': if launcher == 'profiler:Vtune':
self.executable = 'aps' self.executable = 'aps'
self.executable_opts = ['--collection-mode=mpi,omp', '../../swift_mpi'] self.executable_opts = ['--collection-mode=mpi,omp', '../../swift_mpi']
elif launcher == 'perf-report': elif launcher == 'profiler:Allinea':
self.executable = 'perf-report' self.executable = 'perf-report'
self.executable_opts = ['--mpi=intel-mpi', '../../swift_mpi'] self.executable_opts = ['--mpi=intel-mpi', '../../swift_mpi']
self.time_limit = '2h40m'
self.executable_opts += [ self.executable_opts += [
'--cosmology', '--cosmology',
'--self-gravity', '--self-gravity',
'-v', '1', '-v', '1',
f'--threads={num_threads}', f'--threads={num_threads}',
'-n', f'{num_iterations}', '-n', f'{num_steps}',
'-P', 'Restarts:enable:0', '-P', 'Restarts:enable:0',
f'-PInitialConditions:file_name:{ic_dir}/pmillenium/PMill-768.hdf5', f'-PInitialConditions:file_name:{ic_dir}/pmillenium/PMill-768.hdf5',
'p-mill-768.yml' 'p-mill-768.yml'
......
...@@ -2,11 +2,10 @@ import reframe as rfm ...@@ -2,11 +2,10 @@ import reframe as rfm
import reframe.utility.sanity as sn import reframe.utility.sanity as sn
def setup(test, launcher, num_tasks, num_tasks_per_node=1): def setup(test, launcher, num_tasks, num_tasks_per_node=1):
test.time_limit = '1h' test.time_limit = '1h'
test.num_tasks = 1 # num_tasks test.num_tasks = num_tasks
test.num_tasks_per_node = 1 test.num_tasks_per_node = num_tasks_per_node
test.valid_prog_environs = ['*'] test.valid_prog_environs = ['*']
...@@ -28,26 +27,31 @@ def setup(test, launcher, num_tasks, num_tasks_per_node=1): ...@@ -28,26 +27,31 @@ def setup(test, launcher, num_tasks, num_tasks_per_node=1):
if rfm.utility.osext.osuser() == 'dc-turn5': if rfm.utility.osext.osuser() == 'dc-turn5':
if test.current_system.name == 'cosma7': if test.current_system.name == 'cosma7':
ic_dir = '/cosma7/data/ds007/dc-turn5/swift_initial_conditions' ic_dir = '/cosma7/data/ds007/dc-turn5/swift_initial_conditions'
if launcher == 'mpirun': if launcher == 'profiler:Vtune':
test.valid_systems = ['cosma7:cpu_multi_node_aps'] test.valid_systems = ['cosma7:cpu_multi_node_aps']
elif launcher == 'perf-report': elif launcher == 'profiler:Allinea':
test.valid_systems = ['cosma7:cpu_multi_node_perf_report'] test.valid_systems = ['cosma7:cpu_multi_node_perf_report']
elif test.current_system.name == 'dine': elif test.current_system.name == 'dine':
ic_dir = '/cosma5/data/durham/dc-turn5/swift_initial_conditions' ic_dir = '/cosma5/data/durham/dc-turn5/swift_initial_conditions'
if launcher == 'mpirun': if launcher == 'profiler:Vtune':
test.valid_systems = ['dine:cpu_multi_node_aps'] test.valid_systems = ['dine:cpu_multi_node_aps']
elif launcher == 'perf-report': elif launcher == 'profiler:Allinea':
test.valid_systems = ['dine:cpu_multi_node_perf_report'] test.valid_systems = ['dine:cpu_multi_node_perf_report']
elif launcher == 'scalasca': elif launcher == 'profiler:ScoreP':
# ScoreP only works on with non-mpi version of SWIFT
# because MPI_THREAD_MULTIPLE (needed by SWIFT) is not supported
test.valid_systems = ['dine:cpu_single_node'] test.valid_systems = ['dine:cpu_single_node']
else: else:
raise ValueError(f'Need to handle {test.current_system.name} for {rfm.utility.osext.osuser()}') raise ValueError(f'Need to handle {test.current_system.name} for {rfm.utility.osext.osuser()}')
''' TODO: ADD FILE PATHS
elif rfm.utility.osext.osuser() == 'dc-fraw1': elif rfm.utility.osext.osuser() == 'dc-fraw1':
if test.current_system.name == 'cosma7' or test.current_system.name == 'dine': if test.current_system.name == 'cosma7' or test.current_system.name == 'dine':
ic_dir = '/cosma5/data/do008/dc-fraw1/swift_initial_conditions' ic_dir = '/cosma5/data/do008/dc-fraw1/swift_initial_conditions'
else: else:
raise ValueError(f'Need to handle {test.current_system.name} for {rfm.utility.osext.osuser()}') raise ValueError(f'Need to handle {test.current_system.name} for {rfm.utility.osext.osuser()}')
'''
else: else:
raise ValueError(f'Need to handle {test.current_system.name} for {rfm.utility.osext.osuser()}') raise ValueError(f'Need to handle {test.current_system.name} for {rfm.utility.osext.osuser()}')
return ic_dir return ic_dir
File moved
...@@ -3,18 +3,19 @@ import reframe.core.launchers.mpi ...@@ -3,18 +3,19 @@ import reframe.core.launchers.mpi
import common import common
# 4 ranks and 64 threads were found to be an optimal way to use a
@rfm.parameterized_test(*([tasks, threads, iteration, launcher] # node on DINE for the pmill 768 benchmark
for tasks in [2] @rfm.parameterized_test(*([tasks, tasks_per_node, threads, iteration, launcher]
for threads in list(reversed([32])) for tasks in [16]
for tasks_per_node in [4]
for threads in list(reversed([64]))
for iteration in [1] for iteration in [1]
for launcher in ['mpirun', 'perf-report', 'scalasca'])) for launcher in ['profiler:Vtune', 'profiler:Allinea']))
class SodShock3dTest(rfm.RegressionTest): class SodShock3dTestMultiNode(rfm.RegressionTest):
def __init__(self, num_tasks, num_threads, iteration, launcher): def __init__(self, num_tasks, tasks_per_node, num_threads, iteration, launcher):
ic_dir = common.setup(self, launcher, num_tasks, num_tasks_per_node=num_tasks) ic_dir = common.setup(self, launcher, num_tasks, tasks_per_node)
test_dir = 'swiftsim/examples/HydroTests/SodShock_3D' test_dir = 'swiftsim/examples/HydroTests/SodShock_3D'
self.cpus_per_task = 32 # added
print(f'Running with threads : <{num_threads}>') print(f'Running with threads : <{num_threads}>')
self.keep_files = [test_dir] self.keep_files = [test_dir]
...@@ -26,18 +27,14 @@ class SodShock3dTest(rfm.RegressionTest): ...@@ -26,18 +27,14 @@ class SodShock3dTest(rfm.RegressionTest):
self.prerun_cmds = [f'pushd {test_dir}'] self.prerun_cmds = [f'pushd {test_dir}']
num_iterations = 10000 num_iterations = 10000
if launcher == 'mpirun': if launcher == 'profiler:Vtune':
self.executable = 'aps' self.executable = 'aps'
self.executable_opts = ['--collection-mode=mpi,omp', '../../swift_mpi', self.executable_opts = ['--collection-mode=mpi,omp', '../../swift_mpi',
'-P', 'Snapshots:time_first:999999999'] '-P', 'Snapshots:time_first:999999999']
elif launcher == 'perf-report': elif launcher == 'profiler:Allinea':
self.executable = 'perf-report' self.executable = 'perf-report'
self.executable_opts = ['--mpi=intel-mpi', '../../swift_mpi'] self.executable_opts = ['--mpi=intel-mpi', '../../swift_mpi']
self.time_limit = '2h40m' self.time_limit = '2h40m'
elif launcher == 'scalasca':
self.build_system.make_opts = ['CC="scorep --user --thread=pthread icc"']
self.executable = '../../swift'
self.executable_opts = ['-P', 'Snapshots:time_first:999999999']
self.executable_opts += [ self.executable_opts += [
'--hydro', '--hydro',
......
...@@ -3,42 +3,38 @@ import reframe.core.launchers.mpi ...@@ -3,42 +3,38 @@ import reframe.core.launchers.mpi
import common import common
# ScoreP is only compatible with SWIFT if we with no MPI
@rfm.parameterized_test(*([tasks, threads, iteration, launcher] @rfm.parameterized_test(*([tasks, threads, iteration, launcher]
for tasks in [2] for tasks in [1]
for threads in list(reversed([32])) for threads in list(reversed([64]))
for iteration in [1] for iteration in [1]
for launcher in ['scalasca'])) for launcher in ['profiler:ScoreP']))
class SodShock3dTest(rfm.RegressionTest): class SodShock3dTestSingleNode(rfm.RegressionTest):
def __init__(self, num_tasks, num_threads, iteration, launcher): def __init__(self, num_tasks, num_threads, iteration, launcher):
ic_dir = common.setup(self, launcher, num_tasks, num_tasks_per_node=num_tasks) ic_dir = common.setup(self, launcher, num_tasks, num_tasks_per_node=num_tasks)
test_dir = 'swiftsim/examples/HydroTests/SodShock_3D' test_dir = 'swiftsim/examples/HydroTests/SodShock_3D'
# self.cpus_per_task = 32 # added
print(f'Running with threads : <{num_threads}>') print(f'Running with threads : <{num_threads}>')
self.keep_files = [test_dir] self.keep_files = [test_dir]
self.build_system.config_opts = [ self.build_system.config_opts = [
'--disable-ipo', #temporary disable - support is forthcoming '--disable-ipo', #temporary disable - ScoreP support is forthcoming
'--with-tbbmalloc', '--with-tbbmalloc',
'--with-parmetis' '--with-parmetis'
] ]
self.prerun_cmds = [f'pushd {test_dir}'] self.prerun_cmds = [f'pushd {test_dir}']
num_iterations = 1 num_steps = 10000
# self.build_system.options = ['CC=scorep mpicc', 'CFLAGS=-fopenmp'] #Added for scalasca
#self.build_system.make_opts = ['CC="scorep --user --thread=pthread mpicc"'] #change mpicc to icc for non-mpi
self.build_system.make_opts = ['CC="scorep --user --thread=pthread icc"'] self.build_system.make_opts = ['CC="scorep --user --thread=pthread icc"']
if launcher == 'scalasca': self.executable = '../../swift'
self.executable = '../../swift'
self.executable_opts += [ self.executable_opts += [
'-P', 'Snapshots:time_first:999999999', '-P', 'Snapshots:time_first:999999999',
'--hydro', '--hydro',
'-v', '1', '-v', '1',
f'--threads={num_threads}', f'--threads={num_threads}',
'-n', f'{num_iterations}', '-n', f'{num_steps}',
'-P', 'Restarts:enable:0', '-P', 'Restarts:enable:0',
'-P', f'InitialConditions:file_name:{ic_dir}/sodshock/glassCube_64.hdf5', '-P', f'InitialConditions:file_name:{ic_dir}/sodshock/glassCube_64.hdf5',
'sodShock.yml' 'sodShock.yml'
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment