From 4cebd4229dbfc804830c92bc304ee6d723df5b6a Mon Sep 17 00:00:00 2001 From: Joerg Henrichs Date: Tue, 19 Nov 2024 20:22:20 +1100 Subject: [PATCH 1/2] Improved MPI/OpenMP support (#336) --- .gitignore | 6 +- Documentation/source/site-specific-config.rst | 12 + run_configs/gcom/build_gcom_ar.py | 2 +- run_configs/gcom/build_gcom_so.py | 4 +- run_configs/jules/build_jules.py | 19 +- run_configs/lfric/atm.py | 2 +- run_configs/lfric/grab_lfric.py | 10 +- run_configs/lfric/gungho.py | 6 +- run_configs/lfric/mesh_tools.py | 2 +- .../tiny_fortran/build_tiny_fortran.py | 12 +- run_configs/um/build_um.py | 7 +- source/fab/build_config.py | 110 ++++++--- source/fab/cli.py | 4 +- source/fab/steps/compile_c.py | 66 +++-- source/fab/steps/compile_fortran.py | 174 +++++++++----- source/fab/steps/link.py | 46 ++-- source/fab/tools/__init__.py | 7 +- source/fab/tools/compiler.py | 173 +++++++++++--- source/fab/tools/linker.py | 4 + source/fab/tools/tool.py | 9 +- source/fab/tools/tool_box.py | 23 +- source/fab/tools/tool_repository.py | 73 +++++- tests/conftest.py | 3 +- .../test_incremental_fortran.py | 17 +- tests/unit_tests/parse/c/test_c_analyser.py | 3 +- tests/unit_tests/steps/test_analyse.py | 78 +++--- .../unit_tests/steps/test_archive_objects.py | 14 +- tests/unit_tests/steps/test_compile_c.py | 2 +- tests/unit_tests/steps/test_link.py | 12 +- .../steps/test_link_shared_object.py | 1 + tests/unit_tests/steps/test_preprocess.py | 2 +- tests/unit_tests/test_config.py | 8 +- tests/unit_tests/tools/test_compiler.py | 225 ++++++++++++++---- tests/unit_tests/tools/test_linker.py | 35 ++- tests/unit_tests/tools/test_tool_box.py | 8 +- .../unit_tests/tools/test_tool_repository.py | 91 ++++++- 36 files changed, 927 insertions(+), 343 deletions(-) diff --git a/.gitignore b/.gitignore index 9fd85da1..e4f65d63 100644 --- a/.gitignore +++ b/.gitignore @@ -4,9 +4,9 @@ __pycache__/ *$py.class # Build directory for documentation -docs/build -docs/source/api -docs/source/apidoc +Documentation/build +Documentation/source/api +Documentation/source/apidoc # C extensions *.so diff --git a/Documentation/source/site-specific-config.rst b/Documentation/source/site-specific-config.rst index c6363f21..0fd0f840 100644 --- a/Documentation/source/site-specific-config.rst +++ b/Documentation/source/site-specific-config.rst @@ -148,6 +148,18 @@ rsync, ar, ...). tool_box = ToolBox() default_c_compiler = tool_box.get_tool(Category.C_COMPILER) +There is special handling for compilers and linkers: the build +configuration stores the information if an MPI and/or OpenMP build +is requested. So when a default tool is requested by the ToolBox +from the ToolRepository (i.e. when the user has not added specific +compilers or linkers), this information is taken into account, and +only a compiler that will fulfil the requirements is returned. For +example, if you have `gfortran` and `mpif90-gfortran` defined in this +order in the ToolRepository, and request the default compiler for an +MPI build, the `mpif90-gfortran` instance is returned, not `gfortran`. +On the other hand, if no MPI is requested, an MPI-enabled compiler +might be returned, which does not affect the final result, since +an MPI compiler just adds include- and library-paths. TODO ==== diff --git a/run_configs/gcom/build_gcom_ar.py b/run_configs/gcom/build_gcom_ar.py index f89b4380..c52a2048 100755 --- a/run_configs/gcom/build_gcom_ar.py +++ b/run_configs/gcom/build_gcom_ar.py @@ -15,7 +15,7 @@ if __name__ == '__main__': with BuildConfig(project_label='gcom object archive $compiler', - tool_box=ToolBox()) as state: + mpi=True, openmp=False, tool_box=ToolBox()) as state: common_build_steps(state) archive_objects(state, output_fpath='$output/libgcom.a') cleanup_prebuilds(state, all_unused=True) diff --git a/run_configs/gcom/build_gcom_so.py b/run_configs/gcom/build_gcom_so.py index 09a97af1..a5110536 100755 --- a/run_configs/gcom/build_gcom_so.py +++ b/run_configs/gcom/build_gcom_so.py @@ -20,7 +20,7 @@ parsed_args = arg_parser.parse_args() with BuildConfig(project_label='gcom shared library $compiler', - tool_box=ToolBox()) as state: + mpi=True, openmp=False, tool_box=ToolBox()) as state: common_build_steps(state, fpic=True) - link_shared_object(state, output_fpath='$output/libgcom.so'), + link_shared_object(state, output_fpath='$output/libgcom.so') cleanup_prebuilds(state, all_unused=True) diff --git a/run_configs/jules/build_jules.py b/run_configs/jules/build_jules.py index f3fc983c..aba22c7c 100755 --- a/run_configs/jules/build_jules.py +++ b/run_configs/jules/build_jules.py @@ -42,12 +42,15 @@ def __init__(self): tool_box.add_tool(Linker(compiler=fc)) with BuildConfig(project_label=f'jules {revision} $compiler', - tool_box=tool_box) as state: - # grab the source. todo: use some checkouts instead of exports in these configs. - fcm_export(state, src='fcm:jules.xm_tr/src', revision=revision, dst_label='src') - fcm_export(state, src='fcm:jules.xm_tr/utils', revision=revision, dst_label='utils') + mpi=False, openmp=False, tool_box=tool_box) as state: + # grab the source. todo: use some checkouts instead of exports + # in these configs. + fcm_export(state, src='fcm:jules.xm_tr/src', revision=revision, + dst_label='src') + fcm_export(state, src='fcm:jules.xm_tr/utils', revision=revision, + dst_label='utils') - grab_pre_build(state, path='/not/a/real/folder', allow_fail=True), + grab_pre_build(state, path='/not/a/real/folder', allow_fail=True) # find the source files find_source_files(state, path_filters=[ @@ -61,9 +64,11 @@ def __init__(self): # move inc files to the root for easy tool use root_inc_files(state) - preprocess_fortran(state, common_flags=['-P', '-DMPI_DUMMY', '-DNCDF_DUMMY', '-I$output']) + preprocess_fortran(state, common_flags=['-P', '-DMPI_DUMMY', + '-DNCDF_DUMMY', '-I$output']) - analyse(state, root_symbol='jules', unreferenced_deps=['imogen_update_carb']) + analyse(state, root_symbol='jules', + unreferenced_deps=['imogen_update_carb']) compile_fortran(state) diff --git a/run_configs/lfric/atm.py b/run_configs/lfric/atm.py index f1c31017..3f93c588 100755 --- a/run_configs/lfric/atm.py +++ b/run_configs/lfric/atm.py @@ -172,7 +172,7 @@ def file_filtering(config): gpl_utils_source = gpl_utils_source_config.source_root / 'gpl_utils' with BuildConfig(project_label='atm $compiler $two_stage', - tool_box=ToolBox()) as state: + mpi=False, openmp=False, tool_box=ToolBox()) as state: # todo: use different dst_labels because they all go into the same folder, # making it hard to see what came from where? diff --git a/run_configs/lfric/grab_lfric.py b/run_configs/lfric/grab_lfric.py index c649ada2..82a18897 100755 --- a/run_configs/lfric/grab_lfric.py +++ b/run_configs/lfric/grab_lfric.py @@ -16,10 +16,12 @@ # these configs are interrogated by the build scripts # todo: doesn't need two separate configs, they use the same project workspace tool_box = ToolBox() -lfric_source_config = BuildConfig(project_label=f'lfric source {LFRIC_REVISION}', - tool_box=tool_box) -gpl_utils_source_config = BuildConfig(project_label=f'lfric source {LFRIC_REVISION}', - tool_box=tool_box) +lfric_source_config = BuildConfig( + project_label=f'lfric source {LFRIC_REVISION}', + tool_box=tool_box) +gpl_utils_source_config = BuildConfig( + project_label=f'lfric source {LFRIC_REVISION}', + tool_box=tool_box) if __name__ == '__main__': diff --git a/run_configs/lfric/gungho.py b/run_configs/lfric/gungho.py index caf59216..7f075c10 100755 --- a/run_configs/lfric/gungho.py +++ b/run_configs/lfric/gungho.py @@ -33,7 +33,7 @@ gpl_utils_source = gpl_utils_source_config.source_root / 'gpl_utils' with BuildConfig(project_label='gungho $compiler $two_stage', - tool_box=ToolBox()) as state: + mpi=True, openmp=True, tool_box=ToolBox()) as state: grab_folder(state, src=lfric_source / 'infrastructure/source/', dst_label='') grab_folder(state, src=lfric_source / 'components/driver/source/', dst_label='') grab_folder(state, src=lfric_source / 'components' / 'inventory' / 'source', dst_label='') @@ -87,7 +87,7 @@ state, common_flags=[ '-c', - '-ffree-line-length-none', '-fopenmp', + '-ffree-line-length-none', '-g', '-std=f2008', @@ -104,8 +104,6 @@ link_exe( state, flags=[ - '-fopenmp', - '-lyaxt', '-lyaxt_c', '-lnetcdff', '-lnetcdf', '-lhdf5', # EXTERNAL_DYNAMIC_LIBRARIES '-lxios', # EXTERNAL_STATIC_LIBRARIES '-lstdc++', diff --git a/run_configs/lfric/mesh_tools.py b/run_configs/lfric/mesh_tools.py index f49aa43b..fde5b793 100755 --- a/run_configs/lfric/mesh_tools.py +++ b/run_configs/lfric/mesh_tools.py @@ -25,7 +25,7 @@ psyclone_overrides = Path(__file__).parent / 'mesh_tools_overrides' with BuildConfig(project_label='mesh tools $compiler $two_stage', - tool_box=ToolBox()) as state: + mpi=True, openmp=False, tool_box=ToolBox()) as state: grab_folder(state, src=lfric_source / 'infrastructure/source/', dst_label='') grab_folder(state, src=lfric_source / 'mesh_tools/source/', dst_label='') grab_folder(state, src=lfric_source / 'components/science/source/', dst_label='') diff --git a/run_configs/tiny_fortran/build_tiny_fortran.py b/run_configs/tiny_fortran/build_tiny_fortran.py index 17907cdd..09a6ad49 100755 --- a/run_configs/tiny_fortran/build_tiny_fortran.py +++ b/run_configs/tiny_fortran/build_tiny_fortran.py @@ -33,13 +33,13 @@ def __init__(self): with BuildConfig(project_label='tiny_fortran $compiler', tool_box=tool_box) as state: git_checkout(state, src='https://github.com/metomi/fab-test-data.git', - revision='main', dst_label='src'), + revision='main', dst_label='src') - find_source_files(state), + find_source_files(state) - preprocess_fortran(state), + preprocess_fortran(state) - analyse(state, root_symbol='my_prog'), + analyse(state, root_symbol='my_prog') - compile_fortran(state), - link_exe(state), + compile_fortran(state) + link_exe(state) diff --git a/run_configs/um/build_um.py b/run_configs/um/build_um.py index 05177bd2..4cf38e4c 100755 --- a/run_configs/um/build_um.py +++ b/run_configs/um/build_um.py @@ -124,8 +124,11 @@ def replace_in_file(inpath, outpath, find, replace): revision = 'vn12.1' um_revision = revision.replace('vn', 'um') - state = BuildConfig(project_label=f'um atmos safe {revision} $compiler $two_stage', - tool_box=ToolBox()) + # The original build script disabled openmp, so for now + # we keep this disabled. + state = BuildConfig( + project_label=f'um atmos safe {revision} $compiler $two_stage', + mpi=True, openmp=False, tool_box=ToolBox()) # compiler-specific flags compiler = state.tool_box[Category.FORTRAN_COMPILER] diff --git a/source/fab/build_config.py b/source/fab/build_config.py index 614c4328..c98c8d9b 100644 --- a/source/fab/build_config.py +++ b/source/fab/build_config.py @@ -22,7 +22,8 @@ from fab.artefacts import ArtefactSet, ArtefactStore from fab.constants import BUILD_OUTPUT, SOURCE_ROOT, PREBUILD -from fab.metrics import send_metric, init_metrics, stop_metrics, metrics_summary +from fab.metrics import (send_metric, init_metrics, stop_metrics, + metrics_summary) from fab.tools.category import Category from fab.tools.tool_box import ToolBox from fab.steps.cleanup_prebuilds import CLEANUP_COUNT, cleanup_prebuilds @@ -41,36 +42,55 @@ class BuildConfig(): """ def __init__(self, project_label: str, tool_box: ToolBox, - multiprocessing: bool = True, n_procs: Optional[int] = None, + mpi: bool = False, + openmp: bool = False, + multiprocessing: bool = True, + n_procs: Optional[int] = None, reuse_artefacts: bool = False, - fab_workspace: Optional[Path] = None, two_stage=False, - verbose=False): + fab_workspace: Optional[Path] = None, + two_stage: bool = False, + verbose: bool = False): """ :param project_label: - Name of the build project. The project workspace folder is created from this name, with spaces replaced - by underscores. + Name of the build project. The project workspace folder is + created from this name, with spaces replaced by underscores. :param tool_box: The ToolBox with all tools to use in the build. + :param mpi: whether the project uses MPI or not. This is used to + pick a default compiler (if none is explicitly set in the + ToolBox), and controls PSyclone parameters. + :param openmp: as with `mpi`, this controls whether the project is + using OpenMP or not. This is used to pick a default compiler + (if none is explicitly set in the ToolBox). The compiler-specific + flag to enable OpenMP will automatically be added when compiling + and linking. :param multiprocessing: An option to disable multiprocessing to aid debugging. :param n_procs: - The number of cores to use for multiprocessing operations. Defaults to the number of available cores. + The number of cores to use for multiprocessing operations. + Defaults to the number of available cores. :param reuse_artefacts: A flag to avoid reprocessing certain files on subsequent runs. - WARNING: Currently unsophisticated, this flag should only be used by Fab developers. - The logic behind flag will soon be improved, in a work package called "incremental build". + WARNING: Currently unsophisticated, this flag should only be + used by Fab developers. The logic behind flag will soon be + improved, in a work package called "incremental build". :param fab_workspace: Overrides the FAB_WORKSPACE environment variable. - If not set, and FAB_WORKSPACE is not set, the fab workspace defaults to *~/fab-workspace*. + If not set, and FAB_WORKSPACE is not set, the fab workspace + defaults to *~/fab-workspace*. :param two_stage: - Compile .mod files first in a separate pass. Theoretically faster in some projects.. + Compile .mod files first in a separate pass. Theoretically faster + in some projects. :param verbose: DEBUG level logging. """ self._tool_box = tool_box + self._mpi = mpi + self._openmp = openmp self.two_stage = two_stage self.verbose = verbose - compiler = tool_box[Category.FORTRAN_COMPILER] + compiler = tool_box.get_tool(Category.FORTRAN_COMPILER, mpi=mpi, + openmp=openmp) project_label = Template(project_label).safe_substitute( compiler=compiler.name, two_stage=f'{int(two_stage)+1}stage') @@ -83,7 +103,8 @@ def __init__(self, project_label: str, logger.info(f"fab workspace is {fab_workspace}") self.project_workspace: Path = fab_workspace / self.project_label - self.metrics_folder: Path = self.project_workspace / 'metrics' / self.project_label + self.metrics_folder: Path = (self.project_workspace / 'metrics' / + self.project_label) # source config self.source_root: Path = self.project_workspace / SOURCE_ROOT @@ -93,7 +114,8 @@ def __init__(self, project_label: str, self.multiprocessing = multiprocessing # turn off multiprocessing when debugging - # todo: turn off multiprocessing when running tests, as a good test runner will run using mp + # todo: turn off multiprocessing when running tests, as a good test + # runner will run using mp if 'pydevd' in str(sys.gettrace()): logger.info('debugger detected, running without multiprocessing') self.multiprocessing = False @@ -129,7 +151,8 @@ def __enter__(self): self._start_time = datetime.now().replace(microsecond=0) self._run_prep() - with TimerLogger(f'running {self.project_label} build steps') as build_timer: + with TimerLogger(f'running {self.project_label} ' + f'build steps') as build_timer: # this will return to the build script self._build_timer = build_timer return self @@ -138,10 +161,12 @@ def __exit__(self, exc_type, exc_val, exc_tb): if not exc_type: # None if there's no error. if CLEANUP_COUNT not in self.artefact_store: - logger.info("no housekeeping step was run, using a default hard cleanup") + logger.info("no housekeeping step was run, using a " + "default hard cleanup") cleanup_prebuilds(config=self, all_unused=True) - logger.info(f"Building '{self.project_label}' took {datetime.now() - self._start_time}") + logger.info(f"Building '{self.project_label}' took " + f"{datetime.now() - self._start_time}") # always self._finalise_metrics(self._start_time, self._build_timer) @@ -164,9 +189,20 @@ def build_output(self) -> Path: ''' return self.project_workspace / BUILD_OUTPUT + @property + def mpi(self) -> bool: + ''':returns: whether MPI is requested or not in this config.''' + return self._mpi + + @property + def openmp(self) -> bool: + ''':returns: whether OpenMP is requested or not in this config.''' + return self._openmp + def add_current_prebuilds(self, artefacts: Iterable[Path]): """ - Mark the given file paths as being current prebuilds, not to be cleaned during housekeeping. + Mark the given file paths as being current prebuilds, not to be + cleaned during housekeeping. """ self.artefact_store[ArtefactSet.CURRENT_PREBUILDS].update(artefacts) @@ -193,7 +229,8 @@ def _prep_folders(self): def _init_logging(self): # add a file logger for our run self.project_workspace.mkdir(parents=True, exist_ok=True) - log_file_handler = RotatingFileHandler(self.project_workspace / 'log.txt', backupCount=5, delay=True) + log_file_handler = RotatingFileHandler( + self.project_workspace / 'log.txt', backupCount=5, delay=True) log_file_handler.doRollover() logging.getLogger('fab').addHandler(log_file_handler) @@ -207,9 +244,11 @@ def _init_logging(self): def _finalise_logging(self): # remove our file logger fab_logger = logging.getLogger('fab') - log_file_handlers = list(by_type(fab_logger.handlers, RotatingFileHandler)) + log_file_handlers = list(by_type(fab_logger.handlers, + RotatingFileHandler)) if len(log_file_handlers) != 1: - warnings.warn(f'expected to find 1 RotatingFileHandler for removal, found {len(log_file_handlers)}') + warnings.warn(f'expected to find 1 RotatingFileHandler for ' + f'removal, found {len(log_file_handlers)}') fab_logger.removeHandler(log_file_handlers[0]) def _finalise_metrics(self, start_time, steps_timer): @@ -249,14 +288,16 @@ def __init__(self, match: str, flags: List[str]): # For source in the um folder, add an absolute include path AddFlags(match="$source/um/*", flags=['-I$source/include']), - # For source in the um folder, add an include path relative to each source file. + # For source in the um folder, add an include path relative to + # each source file. AddFlags(match="$source/um/*", flags=['-I$relative/include']), """ self.match: str = match self.flags: List[str] = flags - # todo: we don't need the project_workspace, we could just pass in the output folder + # todo: we don't need the project_workspace, we could just pass in the + # output folder def run(self, fpath: Path, input_flags: List[str], config): """ Check if our filter matches a given file. If it does, add our flags. @@ -269,12 +310,16 @@ def run(self, fpath: Path, input_flags: List[str], config): Contains the folders for templating `$source` and `$output`. """ - params = {'relative': fpath.parent, 'source': config.source_root, 'output': config.build_output} + params = {'relative': fpath.parent, + 'source': config.source_root, + 'output': config.build_output} # does the file path match our filter? - if not self.match or fnmatch(str(fpath), Template(self.match).substitute(params)): + if not self.match or fnmatch(str(fpath), + Template(self.match).substitute(params)): # use templating to render any relative paths in our flags - add_flags = [Template(flag).substitute(params) for flag in self.flags] + add_flags = [Template(flag).substitute(params) + for flag in self.flags] # add our flags input_flags += add_flags @@ -284,15 +329,18 @@ class FlagsConfig(): """ Return command-line flags for a given path. - Simply allows appending flags but may evolve to also replace and remove flags. + Simply allows appending flags but may evolve to also replace and + remove flags. """ - def __init__(self, common_flags: Optional[List[str]] = None, path_flags: Optional[List[AddFlags]] = None): + def __init__(self, common_flags: Optional[List[str]] = None, + path_flags: Optional[List[AddFlags]] = None): """ :param common_flags: List of flags to apply to all files. E.g `['-O2']`. :param path_flags: - List of :class:`~fab.build_config.AddFlags` objects which apply flags to selected paths. + List of :class:`~fab.build_config.AddFlags` objects which apply + flags to selected paths. """ self.common_flags = common_flags or [] @@ -311,8 +359,8 @@ def flags_for_path(self, path: Path, config): """ # We COULD make the user pass these template params to the constructor - # but we have a design requirement to minimise the config burden on the user, - # so we take care of it for them here instead. + # but we have a design requirement to minimise the config burden on + # the user, so we take care of it for them here instead. params = {'source': config.source_root, 'output': config.build_output} flags = [Template(i).substitute(params) for i in self.common_flags] diff --git a/source/fab/cli.py b/source/fab/cli.py index 07154eec..e998638b 100644 --- a/source/fab/cli.py +++ b/source/fab/cli.py @@ -34,7 +34,7 @@ def _generic_build_config(folder: Path, kwargs=None) -> BuildConfig: # Set the default Fortran compiler as linker (otherwise e.g. the # C compiler might be used in linking, requiring additional flags) tr = ToolRepository() - fc = tr.get_default(Category.FORTRAN_COMPILER) + fc = tr.get_default(Category.FORTRAN_COMPILER, mpi=False, openmp=False) # TODO: This assumes a mapping of compiler name to the corresponding # linker name (i.e. `linker-gfortran` or `linker-ifort`). Still, that's # better than hard-coding gnu here. @@ -44,7 +44,7 @@ def _generic_build_config(folder: Path, kwargs=None) -> BuildConfig: tool_box.add_tool(linker) # Within the fab workspace, we'll create a project workspace. # Ideally we'd just use folder.name, but to avoid clashes, we'll use the full absolute path. - with BuildConfig(project_label=project_label, + with BuildConfig(project_label=project_label, mpi=False, openmp=False, tool_box=tool_box, **kwargs) as config: grab_folder(config, folder) find_source_files(config) diff --git a/source/fab/steps/compile_c.py b/source/fab/steps/compile_c.py index 8ac03f65..41332dda 100644 --- a/source/fab/steps/compile_c.py +++ b/source/fab/steps/compile_c.py @@ -13,7 +13,8 @@ from typing import List, Dict, Optional, Tuple from fab import FabException -from fab.artefacts import ArtefactsGetter, ArtefactSet, FilterBuildTrees +from fab.artefacts import (ArtefactsGetter, ArtefactSet, ArtefactStore, + FilterBuildTrees) from fab.build_config import BuildConfig, FlagsConfig from fab.metrics import send_metric from fab.parse.c import AnalysedC @@ -36,9 +37,11 @@ class MpCommonArgs: @step def compile_c(config, common_flags: Optional[List[str]] = None, - path_flags: Optional[List] = None, source: Optional[ArtefactsGetter] = None): + path_flags: Optional[List] = None, + source: Optional[ArtefactsGetter] = None): """ - Compiles all C files in all build trees, creating or extending a set of compiled files for each target. + Compiles all C files in all build trees, creating or extending a set of + compiled files for each target. This step uses multiprocessing. All C files are compiled in a single pass. @@ -47,22 +50,22 @@ def compile_c(config, common_flags: Optional[List[str]] = None, Uses multiprocessing, unless disabled in the *config*. :param config: - The :class:`fab.build_config.BuildConfig` object where we can read settings - such as the project workspace folder or the multiprocessing flag. + The :class:`fab.build_config.BuildConfig` object where we can read + settings such as the project workspace folder or the multiprocessing + flag. :param common_flags: - A list of strings to be included in the command line call, for all files. + A list of strings to be included in the command line call, for all + files. :param path_flags: - A list of :class:`~fab.build_config.AddFlags`, defining flags to be included in the command line call - for selected files. + A list of :class:`~fab.build_config.AddFlags`, defining flags to be + included in the command line call for selected files. :param source: - An :class:`~fab.artefacts.ArtefactsGetter` which give us our c files to process. + An :class:`~fab.artefacts.ArtefactsGetter` which give us our c files + to process. """ # todo: tell the compiler (and other steps) which artefact name to create? - compiler = config.tool_box[Category.C_COMPILER] - logger.info(f'C compiler is {compiler}') - env_flags = os.getenv('CFLAGS', '').split() common_flags = env_flags + (common_flags or []) @@ -74,6 +77,14 @@ def compile_c(config, common_flags: Optional[List[str]] = None, to_compile: list = sum(build_lists.values(), []) logger.info(f"compiling {len(to_compile)} c files") + if len(to_compile) == 0: + # No need to look for compiler etc if there is nothing to do + return + + compiler = config.tool_box.get_tool(Category.C_COMPILER, mpi=config.mpi, + openmp=config.openmp) + logger.info(f'C compiler is {compiler}') + mp_payload = MpCommonArgs(config=config, flags=flags) mp_items = [(fpath, mp_payload) for fpath in to_compile] @@ -83,7 +94,8 @@ def compile_c(config, common_flags: Optional[List[str]] = None, compiled_c = list(by_type(compilation_results, CompiledFile)) logger.info(f"compiled {len(compiled_c)} c files") - # record the prebuild files as being current, so the cleanup knows not to delete them + # record the prebuild files as being current, so the cleanup knows not + # to delete them prebuild_files = {r.output_fpath for r in compiled_c} config.add_current_prebuilds(prebuild_files) @@ -92,9 +104,12 @@ def compile_c(config, common_flags: Optional[List[str]] = None, # todo: very similar code in fortran compiler -def store_artefacts(compiled_files: List[CompiledFile], build_lists: Dict[str, List], artefact_store): +def store_artefacts(compiled_files: List[CompiledFile], + build_lists: Dict[str, List], + artefact_store: ArtefactStore): """ - Create our artefact collection; object files for each compiled file, per root symbol. + Create our artefact collection; object files for each compiled file, + per root symbol. """ # add the new object files to the artefact store, by target @@ -117,25 +132,31 @@ def _compile_file(arg: Tuple[AnalysedC, MpCommonArgs]): config=config)) obj_combo_hash = _get_obj_combo_hash(compiler, analysed_file, flags) - obj_file_prebuild = config.prebuild_folder / f'{analysed_file.fpath.stem}.{obj_combo_hash:x}.o' + obj_file_prebuild = (config.prebuild_folder / + f'{analysed_file.fpath.stem}.' + f'{obj_combo_hash:x}.o') # prebuild available? if obj_file_prebuild.exists(): - log_or_dot(logger, f'CompileC using prebuild: {analysed_file.fpath}') + log_or_dot(logger, f'CompileC using prebuild: ' + f'{analysed_file.fpath}') else: obj_file_prebuild.parent.mkdir(parents=True, exist_ok=True) log_or_dot(logger, f'CompileC compiling {analysed_file.fpath}') try: compiler.compile_file(analysed_file.fpath, obj_file_prebuild, + openmp=config.openmp, add_flags=flags) - except Exception as err: - return FabException(f"error compiling {analysed_file.fpath}:\n{err}") + except RuntimeError as err: + return FabException(f"error compiling " + f"{analysed_file.fpath}:\n{err}") send_metric( group="compile c", name=str(analysed_file.fpath), value={'time_taken': timer.taken, 'start': timer.start}) - return CompiledFile(input_fpath=analysed_file.fpath, output_fpath=obj_file_prebuild) + return CompiledFile(input_fpath=analysed_file.fpath, + output_fpath=obj_file_prebuild) def _get_obj_combo_hash(compiler, analysed_file, flags: Flags): @@ -146,6 +167,7 @@ def _get_obj_combo_hash(compiler, analysed_file, flags: Flags): flags.checksum(), compiler.get_hash(), ]) - except TypeError: - raise ValueError("could not generate combo hash for object file") + except TypeError as err: + raise ValueError("could not generate combo hash for " + "object file") from err return obj_combo_hash diff --git a/source/fab/steps/compile_fortran.py b/source/fab/steps/compile_fortran.py index 734abad9..4b065811 100644 --- a/source/fab/steps/compile_fortran.py +++ b/source/fab/steps/compile_fortran.py @@ -33,7 +33,8 @@ @dataclass class MpCommonArgs: - """Arguments to be passed into the multiprocessing function, alongside the filenames.""" + """Arguments to be passed into the multiprocessing function, + alongside the filenames.""" config: BuildConfig flags: FlagsConfig mod_hashes: Dict[str, int] @@ -41,50 +42,61 @@ class MpCommonArgs: @step -def compile_fortran(config: BuildConfig, common_flags: Optional[List[str]] = None, - path_flags: Optional[List] = None, source: Optional[ArtefactsGetter] = None): +def compile_fortran(config: BuildConfig, + common_flags: Optional[List[str]] = None, + path_flags: Optional[List] = None, + source: Optional[ArtefactsGetter] = None): """ - Compiles all Fortran files in all build trees, creating/extending a set of compiled files for each build target. + Compiles all Fortran files in all build trees, creating/extending a set + of compiled files for each build target. - Files are compiled in multiple passes, with each pass enabling further files to be compiled in the next pass. + Files are compiled in multiple passes, with each pass enabling further + files to be compiled in the next pass. Uses multiprocessing, unless disabled in the config. :param config: - The :class:`fab.build_config.BuildConfig` object where we can read settings - such as the project workspace folder or the multiprocessing flag. + The :class:`fab.build_config.BuildConfig` object where we can read + settings such as the project workspace folder or the multiprocessing + flag. :param common_flags: - A list of strings to be included in the command line call, for all files. + A list of strings to be included in the command line call, for + all files. :param path_flags: - A list of :class:`~fab.build_config.AddFlags`, defining flags to be included in the command line call - for selected files. + A list of :class:`~fab.build_config.AddFlags`, defining flags to be + included in the command line call for selected files. :param source: - An :class:`~fab.artefacts.ArtefactsGetter` which gives us our Fortran files to process. + An :class:`~fab.artefacts.ArtefactsGetter` which gives us our Fortran + files to process. """ - compiler, flags_config = handle_compiler_args(config, common_flags, - path_flags) - # Set module output folder: - compiler.set_module_output_path(config.build_output) - source_getter = source or DEFAULT_SOURCE_GETTER mod_hashes: Dict[str, int] = {} # get all the source to compile, for all build trees, into one big lump build_lists: Dict[str, List] = source_getter(config.artefact_store) + # compile everything in multiple passes + compiled: Dict[Path, CompiledFile] = {} + uncompiled: Set[AnalysedFortran] = set(sum(build_lists.values(), [])) + logger.info(f"compiling {len(uncompiled)} fortran files") + + # No need to do anything else if there are no files to compile + if len(uncompiled) == 0: + return + + compiler, flags_config = handle_compiler_args(config, common_flags, + path_flags) + # Set module output folder: + compiler.set_module_output_path(config.build_output) + syntax_only = compiler.has_syntax_only and config.two_stage # build the arguments passed to the multiprocessing function mp_common_args = MpCommonArgs( config=config, flags=flags_config, mod_hashes=mod_hashes, syntax_only=syntax_only) - # compile everything in multiple passes - compiled: Dict[Path, CompiledFile] = {} - uncompiled: Set[AnalysedFortran] = set(sum(build_lists.values(), [])) - logger.info(f"compiling {len(uncompiled)} fortran files") - if syntax_only: logger.info("Starting two-stage compile: mod files, multiple passes") elif config.two_stage: @@ -92,16 +104,19 @@ def compile_fortran(config: BuildConfig, common_flags: Optional[List[str]] = Non f"disabling two-stage compile.") while uncompiled: - uncompiled = compile_pass(config=config, compiled=compiled, uncompiled=uncompiled, - mp_common_args=mp_common_args, mod_hashes=mod_hashes) + uncompiled = compile_pass(config=config, compiled=compiled, + uncompiled=uncompiled, + mp_common_args=mp_common_args, + mod_hashes=mod_hashes) log_or_dot_finish(logger) if syntax_only: logger.info("Finalising two-stage compile: object files, single pass") mp_common_args.syntax_only = False - # a single pass should now compile all the object files in one go - uncompiled = set(sum(build_lists.values(), [])) # todo: order by last compile duration + # A single pass should now compile all the object files in one go + # todo: order by last compile duration + uncompiled = set(sum(build_lists.values(), [])) mp_args = [(fpath, mp_common_args) for fpath in uncompiled] results_this_pass = run_mp(config, items=mp_args, func=process_file) log_or_dot_finish(logger) @@ -127,29 +142,36 @@ def handle_compiler_args(config: BuildConfig, common_flags=None, # Collate the flags from 1) flags env and 2) parameters. env_flags = os.getenv('FFLAGS', '').split() common_flags = env_flags + (common_flags or []) - flags_config = FlagsConfig(common_flags=common_flags, path_flags=path_flags) + flags_config = FlagsConfig(common_flags=common_flags, + path_flags=path_flags) return compiler, flags_config -def compile_pass(config, compiled: Dict[Path, CompiledFile], uncompiled: Set[AnalysedFortran], +def compile_pass(config, compiled: Dict[Path, CompiledFile], + uncompiled: Set[AnalysedFortran], mp_common_args: MpCommonArgs, mod_hashes: Dict[str, int]): # what can we compile next? compile_next = get_compile_next(compiled, uncompiled) # compile - logger.info(f"\ncompiling {len(compile_next)} of {len(uncompiled)} remaining files") + logger.info(f"\ncompiling {len(compile_next)} of {len(uncompiled)} " + f"remaining files") mp_args = [(fpath, mp_common_args) for fpath in compile_next] results_this_pass = run_mp(config, items=mp_args, func=process_file) - # there's a compilation result and a list of prebuild files for each compiled file - compilation_results, prebuild_files = zip(*results_this_pass) if results_this_pass else (tuple(), tuple()) + # there's a compilation result and a list of prebuild files for each + # compiled file + compilation_results, prebuild_files = (zip(*results_this_pass) + if results_this_pass + else (tuple(), tuple())) check_for_errors(compilation_results, caller_label="compile_pass") compiled_this_pass = list(by_type(compilation_results, CompiledFile)) logger.debug(f"compiled {len(compiled_this_pass)} files") - # record the prebuild files as being current, so the cleanup knows not to delete them + # record the prebuild files as being current, so the cleanup knows + # not to delete them config.add_current_prebuilds(chain(*prebuild_files)) # hash the modules we just created @@ -164,15 +186,19 @@ def compile_pass(config, compiled: Dict[Path, CompiledFile], uncompiled: Set[Ana return uncompiled -def get_compile_next(compiled: Dict[Path, CompiledFile], uncompiled: Set[AnalysedFortran]) \ - -> Set[AnalysedFortran]: - - # find what to compile next +def get_compile_next(compiled: Dict[Path, CompiledFile], + uncompiled: Set[AnalysedFortran]) -> Set[AnalysedFortran]: + '''Find what to compile next. + :param compiled: A dictionary with already compiled files. + :param uncompiled: The set of still to be compiled files. + :returns: A set with all files that can now be compiled. + ''' compile_next = set() not_ready: Dict[Path, List[Path]] = {} for af in uncompiled: # all deps ready? - unfulfilled = [dep for dep in af.file_deps if dep not in compiled and dep.suffix == '.f90'] + unfulfilled = [dep for dep in af.file_deps + if dep not in compiled and dep.suffix == '.f90'] if unfulfilled: not_ready[af.fpath] = unfulfilled else: @@ -195,7 +221,8 @@ def store_artefacts(compiled_files: Dict[Path, CompiledFile], build_lists: Dict[str, List], artefact_store: ArtefactStore): """ - Create our artefact collection; object files for each compiled file, per root symbol. + Create our artefact collection; object files for each compiled file, per + root symbol. """ # add the new object files to the artefact store, by target @@ -208,32 +235,40 @@ def store_artefacts(compiled_files: Dict[Path, CompiledFile], def process_file(arg: Tuple[AnalysedFortran, MpCommonArgs]) \ -> Union[Tuple[CompiledFile, List[Path]], Tuple[Exception, None]]: """ - Prepare to compile a fortran file, and compile it if anything has changed since it was last compiled. + Prepare to compile a fortran file, and compile it if anything has changed + since it was last compiled. Object files are created directly as artefacts in the prebuild folder. - Mod files are created in the module folder and copied as artefacts into the prebuild folder. - If nothing has changed, prebuilt mod files are copied *from* the prebuild folder into the module folder. + Mod files are created in the module folder and copied as artefacts into + the prebuild folder. If nothing has changed, prebuilt mod files are copied + *from* the prebuild folder into the module folder. .. note:: - Prebuild filenames include a "combo-hash" of everything that, if changed, must trigger a recompile. - For mod and object files, this includes a checksum of: *source code, compiler*. - For object files, this also includes a checksum of: *compiler flags, modules on which we depend*. + Prebuild filenames include a "combo-hash" of everything that, if + changed, must trigger a recompile. For mod and object files, this + includes a checksum of: *source code, compiler*. For object files, + this also includes a checksum of: *compiler flags, modules on which + we depend*. - Before compiling a file, we calculate the combo hashes and see if the output files already exists. + Before compiling a file, we calculate the combo hashes and see if the + output files already exists. - Returns a compilation result, regardless of whether it was compiled or prebuilt. + Returns a compilation result, regardless of whether it was compiled or + prebuilt. """ with Timer() as timer: analysed_file, mp_common_args = arg config = mp_common_args.config - compiler = config.tool_box[Category.FORTRAN_COMPILER] + compiler = config.tool_box.get_tool(Category.FORTRAN_COMPILER, + config.mpi) if not isinstance(compiler, FortranCompiler): raise RuntimeError(f"Unexpected tool '{compiler.name}' of type " f"'{type(compiler)}' instead of " f"FortranCompiler") - flags = Flags(mp_common_args.flags.flags_for_path(path=analysed_file.fpath, config=config)) + flags = Flags(mp_common_args.flags.flags_for_path( + path=analysed_file.fpath, config=config)) mod_combo_hash = _get_mod_combo_hash(analysed_file, compiler=compiler) obj_combo_hash = _get_obj_combo_hash(analysed_file, @@ -241,14 +276,18 @@ def process_file(arg: Tuple[AnalysedFortran, MpCommonArgs]) \ compiler=compiler, flags=flags) # calculate the incremental/prebuild artefact filenames - obj_file_prebuild = mp_common_args.config.prebuild_folder / f'{analysed_file.fpath.stem}.{obj_combo_hash:x}.o' + obj_file_prebuild = ( + mp_common_args.config.prebuild_folder / + f'{analysed_file.fpath.stem}.{obj_combo_hash:x}.o') mod_file_prebuilds = [ - mp_common_args.config.prebuild_folder / f'{mod_def}.{mod_combo_hash:x}.mod' + (mp_common_args.config.prebuild_folder / + f'{mod_def}.{mod_combo_hash:x}.mod') for mod_def in analysed_file.module_defs ] # have we got all the prebuilt artefacts we need to avoid a recompile? - prebuilds_exist = list(map(lambda f: f.exists(), [obj_file_prebuild] + mod_file_prebuilds)) + prebuilds_exist = list(map(lambda f: f.exists(), + [obj_file_prebuild] + mod_file_prebuilds)) if not all(prebuilds_exist): # compile try: @@ -257,28 +296,34 @@ def process_file(arg: Tuple[AnalysedFortran, MpCommonArgs]) \ output_fpath=obj_file_prebuild, mp_common_args=mp_common_args) except Exception as err: - return Exception(f"Error compiling {analysed_file.fpath}:\n{err}"), None + return Exception(f"Error compiling {analysed_file.fpath}:\n" + f"{err}"), None # copy the mod files to the prebuild folder as artefacts for reuse - # note: perhaps we could sometimes avoid these copies because mods can change less frequently than obj + # note: perhaps we could sometimes avoid these copies because mods + # can change less frequently than obj for mod_def in analysed_file.module_defs: shutil.copy2( mp_common_args.config.build_output / f'{mod_def}.mod', - mp_common_args.config.prebuild_folder / f'{mod_def}.{mod_combo_hash:x}.mod', + (mp_common_args.config.prebuild_folder / + f'{mod_def}.{mod_combo_hash:x}.mod'), ) else: - log_or_dot(logger, f'CompileFortran using prebuild: {analysed_file.fpath}') + log_or_dot(logger, + f'CompileFortran using prebuild: {analysed_file.fpath}') # copy the prebuilt mod files from the prebuild folder for mod_def in analysed_file.module_defs: shutil.copy2( - mp_common_args.config.prebuild_folder / f'{mod_def}.{mod_combo_hash:x}.mod', + (mp_common_args.config.prebuild_folder + / f'{mod_def}.{mod_combo_hash:x}.mod'), mp_common_args.config.build_output / f'{mod_def}.mod', ) # return the results - compiled_file = CompiledFile(input_fpath=analysed_file.fpath, output_fpath=obj_file_prebuild) + compiled_file = CompiledFile(input_fpath=analysed_file.fpath, + output_fpath=obj_file_prebuild) artefacts = [obj_file_prebuild] + mod_file_prebuilds metric_name = "compile fortran" @@ -298,7 +343,8 @@ def _get_obj_combo_hash(analysed_file, mp_common_args: MpCommonArgs, # get a combo hash of things which matter to the object file we define # todo: don't just silently use 0 for a missing dep hash mod_deps_hashes = { - mod_dep: mp_common_args.mod_hashes.get(mod_dep, 0) for mod_dep in analysed_file.module_deps} + mod_dep: mp_common_args.mod_hashes.get(mod_dep, 0) + for mod_dep in analysed_file.module_deps} try: obj_combo_hash = sum([ analysed_file.file_hash, @@ -306,8 +352,9 @@ def _get_obj_combo_hash(analysed_file, mp_common_args: MpCommonArgs, sum(mod_deps_hashes.values()), compiler.get_hash(), ]) - except TypeError: - raise ValueError("could not generate combo hash for object file") + except TypeError as err: + raise ValueError("Could not generate combo hash " + "for object file") from err return obj_combo_hash @@ -318,8 +365,9 @@ def _get_mod_combo_hash(analysed_file, compiler: Compiler): analysed_file.file_hash, compiler.get_hash(), ]) - except TypeError: - raise ValueError("could not generate combo hash for mod files") + except TypeError as err: + raise ValueError("Could not generate combo " + "hash for mod files") from err return mod_combo_hash @@ -340,11 +388,13 @@ def compile_file(analysed_file, flags, output_fpath, mp_common_args): compiler = config.tool_box[Category.FORTRAN_COMPILER] compiler.compile_file(input_file=analysed_file, output_file=output_fpath, + openmp=config.openmp, add_flags=flags, syntax_only=mp_common_args.syntax_only) -def get_mod_hashes(analysed_files: Set[AnalysedFortran], config) -> Dict[str, int]: +def get_mod_hashes(analysed_files: Set[AnalysedFortran], + config: BuildConfig) -> Dict[str, int]: """ Get the hash of every module file defined in the list of analysed files. diff --git a/source/fab/steps/link.py b/source/fab/steps/link.py index 5c6d15ce..6a14cf64 100644 --- a/source/fab/steps/link.py +++ b/source/fab/steps/link.py @@ -22,8 +22,9 @@ class DefaultLinkerSource(ArtefactsGetter): """ A source getter specifically for linking. - Looks for the default output from archiving objects, falls back to default compiler output. - This allows a link step to work with or without a preceding object archive step. + Looks for the default output from archiving objects, falls back to + default compiler output. This allows a link step to work with or without + a preceding object archive step. """ def __call__(self, artefact_store): @@ -36,15 +37,18 @@ def link_exe(config, flags=None, source: Optional[ArtefactsGetter] = None): """ Link object files into an executable for every build target. - Expects one or more build targets from its artefact getter, of the form Dict[name, object_files]. + Expects one or more build targets from its artefact getter, of the form + Dict[name, object_files]. - The default artefact getter, :py:const:`~fab.steps.link_exe.DefaultLinkerSource`, looks for any output - from an :class:`~fab.steps.archive_objects.ArchiveObjects` step, and falls back to using output from - compiler steps. + The default artefact getter, + :py:const:`~fab.steps.link_exe.DefaultLinkerSource`, looks for any output + from an :class:`~fab.steps.archive_objects.ArchiveObjects` step, and + falls back to using output from compiler steps. :param config: - The :class:`fab.build_config.BuildConfig` object where we can read settings - such as the project workspace folder or the multiprocessing flag. + The :class:`fab.build_config.BuildConfig` object where we can read + settings such as the project workspace folder or the multiprocessing + flag. :param flags: A list of flags to pass to the linker. :param source: @@ -52,7 +56,8 @@ def link_exe(config, flags=None, source: Optional[ArtefactsGetter] = None): output from compiler steps, which typically is the expected behaviour. """ - linker = config.tool_box[Category.LINKER] + linker = config.tool_box.get_tool(Category.LINKER, mpi=config.mpi, + openmp=config.openmp) logger.info(f'Linker is {linker.name}') flags = flags or [] @@ -61,25 +66,29 @@ def link_exe(config, flags=None, source: Optional[ArtefactsGetter] = None): target_objects = source_getter(config.artefact_store) for root, objects in target_objects.items(): exe_path = config.project_workspace / f'{root}' - linker.link(objects, exe_path, flags) + linker.link(objects, exe_path, openmp=config.openmp, add_libs=flags) config.artefact_store.add(ArtefactSet.EXECUTABLES, exe_path) -# todo: the bit about Dict[None, object_files] seems too obscure - try to rethink this. +# todo: the bit about Dict[None, object_files] seems too obscure - try to +# rethink this. @step def link_shared_object(config, output_fpath: str, flags=None, source: Optional[ArtefactsGetter] = None): """ Produce a shared object (*.so*) file from the given build target. - Expects a *single build target* from its artefact getter, of the form Dict[None, object_files]. - We can assume the list of object files is the entire project source, compiled. + Expects a *single build target* from its artefact getter, of the form + Dict[None, object_files]. We can assume the list of object files is the + entire project source, compiled. - Params are as for :class:`~fab.steps.link_exe.LinkerBase`, with the addition of: + Params are as for :class:`~fab.steps.link_exe.LinkerBase`, with the + addition of: :param config: - The :class:`fab.build_config.BuildConfig` object where we can read settings - such as the project workspace folder or the multiprocessing flag. + The :class:`fab.build_config.BuildConfig` object where we can read + settings such as the project workspace folder or the multiprocessing + flag. :param output_fpath: File path of the shared object to create. :param flags: @@ -100,10 +109,11 @@ def link_shared_object(config, output_fpath: str, flags=None, if f not in flags: flags.append(f) - # We expect a single build target containing the whole codebase, with no name (as it's not a root symbol). + # We expect a single build target containing the whole codebase, with no + # name (as it's not a root symbol). target_objects = source_getter(config.artefact_store) assert list(target_objects.keys()) == [None] objects = target_objects[None] out_name = Template(output_fpath).substitute(output=config.build_output) - linker.link(objects, out_name, add_libs=flags) + linker.link(objects, out_name, openmp=config.openmp, add_libs=flags) diff --git a/source/fab/tools/__init__.py b/source/fab/tools/__init__.py index f30cf7fa..ed5850c5 100644 --- a/source/fab/tools/__init__.py +++ b/source/fab/tools/__init__.py @@ -11,7 +11,8 @@ from fab.tools.category import Category from fab.tools.compiler import (CCompiler, Compiler, FortranCompiler, Gcc, Gfortran, GnuVersionHandling, Icc, Ifort, - IntelVersionHandling) + IntelVersionHandling, MpiGcc, MpiGfortran, + MpiIcc, MpiIfort) from fab.tools.flags import Flags from fab.tools.linker import Linker from fab.tools.psyclone import Psyclone @@ -42,6 +43,10 @@ "Ifort", "IntelVersionHandling", "Linker", + "MpiGcc", + "MpiGfortran", + "MpiIcc", + "MpiIfort", "Preprocessor", "Psyclone", "Rsync", diff --git a/source/fab/tools/compiler.py b/source/fab/tools/compiler.py index 13e458ae..5c2dfea5 100644 --- a/source/fab/tools/compiler.py +++ b/source/fab/tools/compiler.py @@ -11,6 +11,7 @@ import os import re from pathlib import Path +import warnings from typing import List, Optional, Tuple, Union import zlib @@ -30,11 +31,13 @@ class Compiler(CompilerSuiteTool): :param exec_name: name of the executable to start. :param suite: name of the compiler suite this tool belongs to. :param category: the Category (C_COMPILER or FORTRAN_COMPILER). + :param mpi: whether the compiler or linker support MPI. :param compile_flag: the compilation flag to use when only requesting compilation (not linking). :param output_flag: the compilation flag to use to indicate the name of the output file - :param omp_flag: the flag to use to enable OpenMP + :param openmp_flag: the flag to use to enable OpenMP. If no flag is + specified, it is assumed that the compiler does not support OpenMP. ''' # pylint: disable=too-many-arguments @@ -42,14 +45,15 @@ def __init__(self, name: str, exec_name: Union[str, Path], suite: str, category: Category, + mpi: bool = False, compile_flag: Optional[str] = None, output_flag: Optional[str] = None, - omp_flag: Optional[str] = None): - super().__init__(name, exec_name, suite, category) + openmp_flag: Optional[str] = None): + super().__init__(name, exec_name, suite, mpi=mpi, category=category) self._version: Union[Tuple[int, ...], None] = None self._compile_flag = compile_flag if compile_flag else "-c" self._output_flag = output_flag if output_flag else "-o" - self._omp_flag = omp_flag + self._openmp_flag = openmp_flag if openmp_flag else "" self.flags.extend(os.getenv("FFLAGS", "").split()) def get_hash(self) -> int: @@ -58,7 +62,21 @@ def get_hash(self) -> int: return (zlib.crc32(self.name.encode()) + zlib.crc32(self.get_version_string().encode())) - def compile_file(self, input_file: Path, output_file: Path, + @property + def openmp(self) -> bool: + ''':returns: if the compiler supports openmp or not + ''' + return self._openmp_flag != "" + + @property + def openmp_flag(self) -> str: + ''':returns: The flag to enable OpenMP for this compiler. + ''' + return self._openmp_flag + + def compile_file(self, input_file: Path, + output_file: Path, + openmp: bool, add_flags: Union[None, List[str]] = None): '''Compiles a file. It will add the flag for compilation-only automatically, as well as the output directives. The current working @@ -68,12 +86,20 @@ def compile_file(self, input_file: Path, output_file: Path, them to have different checksums depending on where they live. :param input_file: the path of the input file. - :param outpout_file: the path of the output file. + :param output_file: the path of the output file. + :param opemmp: whether OpenMP should be used or not. :param add_flags: additional compiler flags. ''' params: List[Union[Path, str]] = [self._compile_flag] + if openmp: + params.append(self._openmp_flag) if add_flags: + if self._openmp_flag in add_flags: + warnings.warn( + f"OpenMP flag '{self._openmp_flag}' explicitly provided. " + f"OpenMP should be enabled in the BuildConfiguration " + f"instead.") params += add_flags params.extend([input_file.name, @@ -191,18 +217,24 @@ class CCompiler(Compiler): :param name: name of the compiler. :param exec_name: name of the executable to start. :param suite: name of the compiler suite. + :param mpi: whether the compiler or linker support MPI. :param compile_flag: the compilation flag to use when only requesting compilation (not linking). :param output_flag: the compilation flag to use to indicate the name of the output file - :param omp_flag: the flag to use to enable OpenMP + :param openmp_flag: the flag to use to enable OpenMP ''' # pylint: disable=too-many-arguments def __init__(self, name: str, exec_name: str, suite: str, - compile_flag=None, output_flag=None, omp_flag=None): - super().__init__(name, exec_name, suite, Category.C_COMPILER, - compile_flag, output_flag, omp_flag) + mpi: bool = False, + compile_flag: Optional[str] = None, + output_flag: Optional[str] = None, + openmp_flag: Optional[str] = None): + super().__init__(name, exec_name, suite, + category=Category.C_COMPILER, mpi=mpi, + compile_flag=compile_flag, output_flag=output_flag, + openmp_flag=openmp_flag) # ============================================================================ @@ -214,27 +246,36 @@ class FortranCompiler(Compiler): :param name: name of the compiler. :param exec_name: name of the executable to start. :param suite: name of the compiler suite. - :param module_folder_flag: the compiler flag to indicate where to - store created module files. - :param syntax_only_flag: flag to indicate to only do a syntax check. - The side effect is that the module files are created. + :param mpi: whether the compiler or linker support MPI. :param compile_flag: the compilation flag to use when only requesting compilation (not linking). :param output_flag: the compilation flag to use to indicate the name of the output file - :param omp_flag: the flag to use to enable OpenMP + :param module_folder_flag: the compiler flag to indicate where to + store created module files. + :param openmp_flag: the flag to use to enable OpenMP + :param syntax_only_flag: flag to indicate to only do a syntax check. + The side effect is that the module files are created. ''' # pylint: disable=too-many-arguments def __init__(self, name: str, exec_name: str, suite: str, - module_folder_flag: str, syntax_only_flag=None, - compile_flag=None, output_flag=None, omp_flag=None): - - super().__init__(name, exec_name, suite, Category.FORTRAN_COMPILER, - compile_flag, output_flag, omp_flag) - self._module_folder_flag = module_folder_flag - self._module_output_path = "" + mpi: bool = False, + compile_flag: Optional[str] = None, + output_flag: Optional[str] = None, + openmp_flag: Optional[str] = None, + module_folder_flag: Optional[str] = None, + syntax_only_flag: Optional[str] = None, + ): + + super().__init__(name=name, exec_name=exec_name, suite=suite, + category=Category.FORTRAN_COMPILER, + mpi=mpi, compile_flag=compile_flag, + output_flag=output_flag, openmp_flag=openmp_flag) + self._module_folder_flag = (module_folder_flag if module_folder_flag + else "") self._syntax_only_flag = syntax_only_flag + self._module_output_path = "" @property def has_syntax_only(self) -> bool: @@ -248,7 +289,9 @@ def set_module_output_path(self, path: Path): ''' self._module_output_path = str(path) - def compile_file(self, input_file: Path, output_file: Path, + def compile_file(self, input_file: Path, + output_file: Path, + openmp: bool, add_flags: Union[None, List[str]] = None, syntax_only: bool = False): '''Compiles a file. @@ -274,7 +317,8 @@ def compile_file(self, input_file: Path, output_file: Path, if self._module_folder_flag and self._module_output_path: params.append(self._module_folder_flag) params.append(self._module_output_path) - super().compile_file(input_file, output_file, params) + super().compile_file(input_file, output_file, openmp=openmp, + add_flags=params) # ============================================================================ @@ -321,11 +365,26 @@ class Gcc(GnuVersionHandling, CCompiler): :param name: name of this compiler. :param exec_name: name of the executable. + :param mpi: whether the compiler supports MPI. ''' def __init__(self, name: str = "gcc", - exec_name: str = "gcc"): - super().__init__(name, exec_name, "gnu", omp_flag="-fopenmp") + exec_name: str = "gcc", + mpi: bool = False): + super().__init__(name, exec_name, suite="gnu", mpi=mpi, + openmp_flag="-fopenmp") + + +# ============================================================================ +class MpiGcc(Gcc): + '''Class for a simple wrapper around gcc that supports MPI. + It calls `mpicc`. + ''' + + def __init__(self): + super().__init__(name="mpicc-gcc", + exec_name="mpicc", + mpi=True) # ============================================================================ @@ -334,16 +393,31 @@ class Gfortran(GnuVersionHandling, FortranCompiler): :param name: name of this compiler. :param exec_name: name of the executable. + :param mpi: whether the compiler supports MPI. ''' + def __init__(self, name: str = "gfortran", - exec_name: str = "gfortran"): - super().__init__(name, exec_name, "gnu", + exec_name: str = "gfortran", + mpi: bool = False): + super().__init__(name, exec_name, suite="gnu", mpi=mpi, + openmp_flag="-fopenmp", module_folder_flag="-J", - omp_flag="-fopenmp", syntax_only_flag="-fsyntax-only") +# ============================================================================ +class MpiGfortran(Gfortran): + '''Class for a simple wrapper around gfortran that supports MPI. + It calls `mpif90`. + ''' + + def __init__(self): + super().__init__(name="mpif90-gfortran", + exec_name="mpif90", + mpi=True) + + # ============================================================================ class IntelVersionHandling(): '''Mixin to handle version information from Intel compilers''' @@ -384,12 +458,26 @@ class Icc(IntelVersionHandling, CCompiler): :param name: name of this compiler. :param exec_name: name of the executable. + :param mpi: whether the compiler supports MPI. ''' def __init__(self, name: str = "icc", - exec_name: str = "icc"): - super().__init__(name, exec_name, "intel-classic", - omp_flag="-qopenmp") + exec_name: str = "icc", + mpi: bool = False): + super().__init__(name, exec_name, suite="intel-classic", mpi=mpi, + openmp_flag="-qopenmp") + + +# ============================================================================ +class MpiIcc(Icc): + '''Class for a simple wrapper around icc that supports MPI. + It calls `mpicc`. + ''' + + def __init__(self): + super().__init__(name="mpicc-icc", + exec_name="mpicc", + mpi=True) # ============================================================================ @@ -398,11 +486,26 @@ class Ifort(IntelVersionHandling, FortranCompiler): :param name: name of this compiler. :param exec_name: name of the executable. + :param mpi: whether the compiler supports MPI. ''' + def __init__(self, name: str = "ifort", - exec_name: str = "ifort"): - super().__init__(name, exec_name, "intel-classic", + exec_name: str = "ifort", + mpi: bool = False): + super().__init__(name, exec_name, suite="intel-classic", mpi=mpi, module_folder_flag="-module", - omp_flag="-qopenmp", + openmp_flag="-qopenmp", syntax_only_flag="-syntax-only") + + +# ============================================================================ +class MpiIfort(Ifort): + '''Class for a simple wrapper around ifort that supports MPI. + It calls `mpif90`. + ''' + + def __init__(self): + super().__init__(name="mpif90-ifort", + exec_name="mpif90", + mpi=True) diff --git a/source/fab/tools/linker.py b/source/fab/tools/linker.py index 06bb5cfa..02932a18 100644 --- a/source/fab/tools/linker.py +++ b/source/fab/tools/linker.py @@ -62,12 +62,14 @@ def check_available(self) -> bool: return super().check_available() def link(self, input_files: List[Path], output_file: Path, + openmp: bool, add_libs: Optional[List[str]] = None) -> str: '''Executes the linker with the specified input files, creating `output_file`. :param input_files: list of input files to link. :param output_file: output file. + :param openm: whether OpenMP is requested or not. :param add_libs: additional linker flags. :returns: the stdout of the link command @@ -75,6 +77,8 @@ def link(self, input_files: List[Path], output_file: Path, if self._compiler: # Create a copy: params = self._compiler.flags[:] + if openmp: + params.append(self._compiler.openmp_flag) else: params = [] # TODO: why are the .o files sorted? That shouldn't matter diff --git a/source/fab/tools/tool.py b/source/fab/tools/tool.py index af9b8bfb..9eaa42e1 100644 --- a/source/fab/tools/tool.py +++ b/source/fab/tools/tool.py @@ -181,13 +181,20 @@ class CompilerSuiteTool(Tool): :param exec_name: name of the executable to start. :param suite: name of the compiler suite. :param category: the Category to which this tool belongs. + :param mpi: whether the compiler or linker support MPI. ''' def __init__(self, name: str, exec_name: Union[str, Path], suite: str, - category: Category): + category: Category, mpi: bool = False): super().__init__(name, exec_name, category) self._suite = suite + self._mpi = mpi @property def suite(self) -> str: ''':returns: the compiler suite of this tool.''' return self._suite + + @property + def mpi(self) -> bool: + ''':returns: whether this tool supports MPI or not.''' + return self._mpi diff --git a/source/fab/tools/tool_box.py b/source/fab/tools/tool_box.py index 7704feeb..99395cb2 100644 --- a/source/fab/tools/tool_box.py +++ b/source/fab/tools/tool_box.py @@ -8,7 +8,7 @@ ''' import warnings -from typing import Dict +from typing import Dict, Optional from fab.tools.category import Category from fab.tools.tool import Tool @@ -46,19 +46,34 @@ def add_tool(self, tool: Tool, f"'{tool}'.") self._all_tools[tool.category] = tool - def get_tool(self, category: Category) -> Tool: + def get_tool(self, category: Category, mpi: Optional[bool] = None, + openmp: Optional[bool] = None) -> Tool: '''Returns the tool for the specified category. :param category: the name of the category in which to look for the tool. + :param mpi: if no compiler or linker is explicitly specified in this + tool box, use the MPI and OpenMP setting to find an appropriate + default from the tool repository. + :param mpi: if no compiler or linker is explicitly specified in this + tool box, use the MPI and OpenMP setting to find an appropriate + default from the tool repository. :raises KeyError: if the category is not known. ''' if category in self._all_tools: + # TODO: Should we test if the compiler has MPI support if + # required? The original LFRic setup compiled files without + # MPI support (and used an mpi wrapper at link time), so for + # now we don't raise an exception here to ease porting - but + # we probably should raise one tbh. return self._all_tools[category] # No tool was specified for this category, get the default tool - # from the ToolRepository: + # from the ToolRepository, and add it, so we don't need to look + # it up again later. tr = ToolRepository() - return tr.get_default(category) + tool = tr.get_default(category, mpi=mpi, openmp=openmp) + self._all_tools[category] = tool + return tool diff --git a/source/fab/tools/tool_repository.py b/source/fab/tools/tool_repository.py index 36aaa514..70479d55 100644 --- a/source/fab/tools/tool_repository.py +++ b/source/fab/tools/tool_repository.py @@ -12,7 +12,7 @@ from __future__ import annotations import logging -from typing import Any, Type +from typing import Any, Optional, Type from fab.tools.tool import Tool from fab.tools.category import Category @@ -43,6 +43,7 @@ def __init__(self): # time the instance is requested (since we overwrite __new__). But # we only want to initialise the instance once, so let the constructor # not do anything if the singleton already exists: + # pylint: disable=too-many-locals if ToolRepository._singleton: return @@ -59,9 +60,11 @@ def __init__(self): # We get circular dependencies if imported at top of the file: # pylint: disable=import-outside-toplevel from fab.tools import (Ar, Cpp, CppFortran, Gcc, Gfortran, - Icc, Ifort, Psyclone, Rsync) + Icc, Ifort, MpiGcc, MpiGfortran, + MpiIcc, MpiIfort, Psyclone, Rsync) for cls in [Gcc, Icc, Gfortran, Ifort, Cpp, CppFortran, + MpiGcc, MpiGfortran, MpiIcc, MpiIfort, Fcm, Git, Subversion, Ar, Psyclone, Rsync]: self.add_tool(cls) @@ -117,26 +120,70 @@ def set_default_compiler_suite(self, suite: str): ''' for category in [Category.FORTRAN_COMPILER, Category.C_COMPILER, Category.LINKER]: - all_members = [tool for tool in self[category] - if tool.suite == suite] - if len(all_members) == 0: + # Now sort the tools in this category to have all tools with the + # right suite at the front. We use the stable sorted function with + # the key being tool.suite != suite --> all tools with the right + # suite use False as key, all other tools True. Since False < True + # this results in all suite tools to be at the front of the list + self[category] = sorted(self[category], + key=lambda x: x.suite != suite) + if len(self[category]) > 0 and self[category][0].suite != suite: raise RuntimeError(f"Cannot find '{category}' " f"in the suite '{suite}'.") - tool = all_members[0] - if tool != self[category][0]: - self[category].remove(tool) - self[category].insert(0, tool) - def get_default(self, category: Category): - '''Returns the default tool for a given category, which is just - the first tool in the category. + def get_default(self, category: Category, + mpi: Optional[bool] = None, + openmp: Optional[bool] = None): + '''Returns the default tool for a given category. For most tools + that will be the first entry in the list of tools. The exception + are compilers and linker: in this case it must be specified if + MPI support is required or not. And the default return will be + the first tool that either supports MPI or not. :param category: the category for which to return the default tool. + :param mpi: if a compiler or linker is required that supports MPI. + :param open: if a compiler or linker is required that supports OpenMP. :raises KeyError: if the category does not exist. + :raises RuntimeError: if no compiler/linker is found with the + requested level of MPI support (yes or no). ''' if not isinstance(category, Category): raise RuntimeError(f"Invalid category type " f"'{type(category).__name__}'.") - return self[category][0] + + # If not a compiler or linker, return the first tool + if not category.is_compiler and category != Category.LINKER: + return self[category][0] + + if not isinstance(mpi, bool): + raise RuntimeError(f"Invalid or missing mpi specification " + f"for '{category}'.") + + if not isinstance(openmp, bool): + raise RuntimeError(f"Invalid or missing openmp specification " + f"for '{category}'.") + + for tool in self[category]: + # If OpenMP is request, but the tool does not support openmp, + # ignore it. + if openmp and not tool.openmp: + continue + # If the tool supports/does not support MPI, return it. + if mpi == tool.mpi: + return tool + + # Don't bother returning an MPI enabled tool if no-MPI is requested - + # that seems to be an unlikely scenario. + if mpi: + if openmp: + raise RuntimeError(f"Could not find '{category}' that " + f"supports MPI and OpenMP.") + raise RuntimeError(f"Could not find '{category}' that " + f"supports MPI.") + + if openmp: + raise RuntimeError(f"Could not find '{category}' that " + f"supports OpenMP.") + raise RuntimeError(f"Could not find any '{category}'.") diff --git a/tests/conftest.py b/tests/conftest.py index 55d948fd..835cd294 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -23,6 +23,7 @@ def fixture_mock_c_compiler(): mock_compiler._version = (1, 2, 3) mock_compiler._name = "mock_c_compiler" mock_compiler._exec_name = "mock_c_compiler.exe" + mock_compiler._openmp_flag = "-fopenmp" return mock_compiler @@ -32,7 +33,7 @@ def fixture_mock_fortran_compiler(): mock_compiler = FortranCompiler("mock_fortran_compiler", "mock_exec", "suite", module_folder_flag="", syntax_only_flag=None, compile_flag=None, - output_flag=None, omp_flag=None) + output_flag=None, openmp_flag=None) mock_compiler.run = mock.Mock() mock_compiler._name = "mock_fortran_compiler" mock_compiler._exec_name = "mock_fortran_compiler.exe" diff --git a/tests/system_tests/incremental_fortran/test_incremental_fortran.py b/tests/system_tests/incremental_fortran/test_incremental_fortran.py index bc4c39eb..acde2066 100644 --- a/tests/system_tests/incremental_fortran/test_incremental_fortran.py +++ b/tests/system_tests/incremental_fortran/test_incremental_fortran.py @@ -38,13 +38,15 @@ class TestIncremental(): def config(self, tmp_path): # tmp_path is a pytest fixture which differs per test, per run logging.getLogger('fab').setLevel(logging.WARNING) - with BuildConfig(project_label=PROJECT_LABEL, - tool_box=ToolBox(), fab_workspace=tmp_path, + with BuildConfig(project_label=PROJECT_LABEL, tool_box=ToolBox(), + fab_workspace=tmp_path, multiprocessing=False) as grab_config: - grab_folder(grab_config, Path(__file__).parent / 'project-source', dst_label='src') + grab_folder(grab_config, Path(__file__).parent / 'project-source', + dst_label='src') build_config = BuildConfig(project_label=PROJECT_LABEL, - tool_box=ToolBox(), fab_workspace=tmp_path, + tool_box=ToolBox(), + fab_workspace=tmp_path, multiprocessing=False) return build_config @@ -244,8 +246,7 @@ class TestCleanupPrebuilds(): @pytest.mark.parametrize("kwargs,expect", in_out) def test_clean(self, tmp_path, kwargs, expect): - with BuildConfig(project_label=PROJECT_LABEL, - tool_box=ToolBox(), + with BuildConfig(project_label=PROJECT_LABEL, tool_box=ToolBox(), fab_workspace=tmp_path, multiprocessing=False) as config: remaining = self._prune(config, kwargs=kwargs) @@ -255,8 +256,8 @@ def test_prune_unused(self, tmp_path): # pruning everything not current current_prebuilds = ArtefactSet.CURRENT_PREBUILDS - with BuildConfig(project_label=PROJECT_LABEL, - tool_box=ToolBox(), fab_workspace=tmp_path, + with BuildConfig(project_label=PROJECT_LABEL, tool_box=ToolBox(), + fab_workspace=tmp_path, multiprocessing=False) as config: config._artefact_store = {current_prebuilds: { tmp_path / PROJECT_LABEL / BUILD_OUTPUT / PREBUILD / 'a.123.foo', diff --git a/tests/unit_tests/parse/c/test_c_analyser.py b/tests/unit_tests/parse/c/test_c_analyser.py index 934c8641..b4f84c94 100644 --- a/tests/unit_tests/parse/c/test_c_analyser.py +++ b/tests/unit_tests/parse/c/test_c_analyser.py @@ -16,7 +16,8 @@ def test_simple_result(tmp_path): c_analyser = CAnalyser() - c_analyser._config = BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) + c_analyser._config = BuildConfig('proj', ToolBox(), mpi=False, + openmp=False, fab_workspace=tmp_path) with mock.patch('fab.parse.AnalysedFile.save'): fpath = Path(__file__).parent / "test_c_analyser.c" diff --git a/tests/unit_tests/steps/test_analyse.py b/tests/unit_tests/steps/test_analyse.py index 79d0ef50..2cec86df 100644 --- a/tests/unit_tests/steps/test_analyse.py +++ b/tests/unit_tests/steps/test_analyse.py @@ -6,8 +6,8 @@ from fab.build_config import BuildConfig from fab.dep_tree import AnalysedDependent from fab.parse.fortran import AnalysedFortran, FortranParserWorkaround -from fab.steps.analyse import _add_manual_results, _add_unreferenced_deps, _gen_file_deps, _gen_symbol_table, \ - _parse_files +from fab.steps.analyse import (_add_manual_results, _add_unreferenced_deps, + _gen_file_deps, _gen_symbol_table, _parse_files) from fab.tools import ToolBox from fab.util import HashedFile @@ -16,8 +16,10 @@ class Test_gen_symbol_table(object): @pytest.fixture def analysed_files(self): - return [AnalysedDependent(fpath=Path('foo.c'), symbol_defs=['foo_1', 'foo_2'], file_hash=0), - AnalysedDependent(fpath=Path('bar.c'), symbol_defs=['bar_1', 'bar_2'], file_hash=0)] + return [AnalysedDependent(fpath=Path('foo.c'), + symbol_defs=['foo_1', 'foo_2'], file_hash=0), + AnalysedDependent(fpath=Path('bar.c'), + symbol_defs=['bar_1', 'bar_2'], file_hash=0)] def test_vanilla(self, analysed_files): result = _gen_symbol_table(analysed_files=analysed_files) @@ -58,12 +60,14 @@ def test_vanilla(self): analysed_files = [ mock.Mock( - spec=AnalysedDependent, fpath=my_file, symbol_deps={'my_func', 'dep1_mod', 'dep2'}, file_deps=set()), + spec=AnalysedDependent, fpath=my_file, + symbol_deps={'my_func', 'dep1_mod', 'dep2'}, file_deps=set()), ] _gen_file_deps(analysed_files=analysed_files, symbols=symbols) - assert analysed_files[0].file_deps == {symbols['dep1_mod'], symbols['dep2']} + assert analysed_files[0].file_deps == {symbols['dep1_mod'], + symbols['dep2']} # todo: this is fortran-ey, move it? @@ -86,19 +90,26 @@ def test_vanilla(self): Path('root_dep.f90'): AnalysedFortran(fpath=Path(), file_hash=0), } - # we want to force this symbol into the build (because it's not used via modules) + # we want to force this symbol into the build (because it's not used + # via modules) unreferenced_deps = ['util'] # the stuff to add to the build tree will be found in here all_analysed_files = { - # root.f90 and root_util.f90 would also be in here but the test doesn't need them - Path('util.f90'): AnalysedFortran(fpath=Path('util.f90'), file_deps={Path('util_dep.f90')}, file_hash=0), - Path('util_dep.f90'): AnalysedFortran(fpath=Path('util_dep.f90'), file_hash=0), + # root.f90 and root_util.f90 would also be in here but the test + # doesn't need them + Path('util.f90'): AnalysedFortran(fpath=Path('util.f90'), + file_deps={Path('util_dep.f90')}, + file_hash=0), + Path('util_dep.f90'): AnalysedFortran(fpath=Path('util_dep.f90'), + file_hash=0), } _add_unreferenced_deps( unreferenced_deps=unreferenced_deps, - symbol_table=symbol_table, all_analysed_files=all_analysed_files, build_tree=build_tree) + symbol_table=symbol_table, + all_analysed_files=all_analysed_files, + build_tree=build_tree) assert Path('util.f90') in build_tree assert Path('util_dep.f90') in build_tree @@ -111,33 +122,46 @@ def test_vanilla(self): class Test_parse_files(object): - # todo: test the correct artefacts are marked as current for the cleanup step + # todo: test the correct artefacts are marked as current for the + # cleanup step # todo: this method should be tested a bit more thoroughly def test_exceptions(self, tmp_path): # make sure parse exceptions do not stop the build - with mock.patch('fab.steps.run_mp', return_value=[(Exception('foo'), None)]), \ + with mock.patch('fab.steps.run_mp', + return_value=[(Exception('foo'), None)]), \ pytest.warns(UserWarning, match="deprecated 'DEPENDS ON:'"): - # The warning "deprecated 'DEPENDS ON:' comment found in fortran code" - # is in "def _parse_files" in "source/steps/analyse.py" + # The warning "deprecated 'DEPENDS ON:' comment found in fortran + # code" is in "def _parse_files" in "source/steps/analyse.py" config = BuildConfig('proj', ToolBox(), fab_workspace=tmp_path) - # the exception should be suppressed (and logged) and this step should run to completion - _parse_files(config, files=[], fortran_analyser=mock.Mock(), c_analyser=mock.Mock()) + # the exception should be suppressed (and logged) and this step + # should run to completion + _parse_files(config, files=[], fortran_analyser=mock.Mock(), + c_analyser=mock.Mock()) -class Test_add_manual_results(object): - # test user-specified analysis results, for when fparser fails to parse a valid file. +class TestAddManualResults: + '''test user-specified analysis results, for when fparser fails to parse a + valid file. + ''' def test_vanilla(self): # test normal usage of manual analysis results - workaround = FortranParserWorkaround(fpath=Path('foo.f'), symbol_defs={'foo', }) + workaround = FortranParserWorkaround(fpath=Path('foo.f'), + symbol_defs={'foo', }) analysed_files = set() - with mock.patch('fab.parse.fortran.file_checksum', return_value=HashedFile(None, 123)), \ - pytest.warns(UserWarning, match="SPECIAL MEASURE: injecting user-defined analysis results"): - # This warning "UserWarning: SPECIAL MEASURE: injecting user-defined analysis results" - # is in "def _add_manual_results" in "source/steps/analyse.py" - _add_manual_results(special_measure_analysis_results=[workaround], analysed_files=analysed_files) - - assert analysed_files == {AnalysedFortran(fpath=Path('foo.f'), file_hash=123, symbol_defs={'foo', })} + with mock.patch('fab.parse.fortran.file_checksum', + return_value=HashedFile(None, 123)), \ + pytest.warns(UserWarning, match="SPECIAL MEASURE: injecting user-" + "defined analysis results"): + # This warning "UserWarning: SPECIAL MEASURE: injecting + # user-defined analysis results" is in "def _add_manual_results" + # in "source/steps/analyse.py" + _add_manual_results(special_measure_analysis_results=[workaround], + analysed_files=analysed_files) + + assert analysed_files == {AnalysedFortran(fpath=Path('foo.f'), + file_hash=123, + symbol_defs={'foo', })} diff --git a/tests/unit_tests/steps/test_archive_objects.py b/tests/unit_tests/steps/test_archive_objects.py index d366f422..f5b2683e 100644 --- a/tests/unit_tests/steps/test_archive_objects.py +++ b/tests/unit_tests/steps/test_archive_objects.py @@ -51,7 +51,8 @@ def test_for_exes(self): # ensure the correct artefacts were created assert config.artefact_store[ArtefactSet.OBJECT_ARCHIVES] == { - target: set([str(config.build_output / f'{target}.a')]) for target in targets} + target: set([str(config.build_output / f'{target}.a')]) + for target in targets} def test_for_library(self): '''As used when building an object archive or archiving before linking @@ -65,12 +66,15 @@ def test_for_library(self): mock_result = mock.Mock(returncode=0, return_value=123) with mock.patch('fab.tools.tool.subprocess.run', return_value=mock_result) as mock_run_command, \ - pytest.warns(UserWarning, match="_metric_send_conn not set, cannot send metrics"): - archive_objects(config=config, output_fpath=config.build_output / 'mylib.a') + pytest.warns(UserWarning, match="_metric_send_conn not set, " + "cannot send metrics"): + archive_objects(config=config, + output_fpath=config.build_output / 'mylib.a') # ensure the correct command line calls were made mock_run_command.assert_called_once_with([ - 'ar', 'cr', str(config.build_output / 'mylib.a'), 'util1.o', 'util2.o'], + 'ar', 'cr', str(config.build_output / 'mylib.a'), + 'util1.o', 'util2.o'], capture_output=True, env=None, cwd=None, check=False) # ensure the correct artefacts were created @@ -83,7 +87,7 @@ def test_incorrect_tool(self): config = BuildConfig('proj', ToolBox()) tool_box = config.tool_box - cc = tool_box[Category.C_COMPILER] + cc = tool_box.get_tool(Category.C_COMPILER, config.mpi, config.openmp) # And set its category to C_COMPILER cc._category = Category.AR # So overwrite the C compiler with the re-categories Fortran compiler diff --git a/tests/unit_tests/steps/test_compile_c.py b/tests/unit_tests/steps/test_compile_c.py index 8ec687a2..8e8c8845 100644 --- a/tests/unit_tests/steps/test_compile_c.py +++ b/tests/unit_tests/steps/test_compile_c.py @@ -100,7 +100,7 @@ def test_exception_handling(self, content): compiler = config.tool_box[Category.C_COMPILER] # mock the run command to raise an exception with pytest.raises(RuntimeError): - with mock.patch.object(compiler, "run", side_effect=Exception): + with mock.patch.object(compiler, "run", side_effect=RuntimeError): with mock.patch('fab.steps.compile_c.send_metric') as mock_send_metric: with mock.patch('pathlib.Path.mkdir'): compile_c(config=config) diff --git a/tests/unit_tests/steps/test_link.py b/tests/unit_tests/steps/test_link.py index f015bb27..a675f54c 100644 --- a/tests/unit_tests/steps/test_link.py +++ b/tests/unit_tests/steps/test_link.py @@ -22,9 +22,12 @@ def test_run(self, tool_box): config = SimpleNamespace( project_workspace=Path('workspace'), artefact_store=ArtefactStore(), - tool_box=tool_box + tool_box=tool_box, + mpi=False, + openmp=False, ) - config.artefact_store[ArtefactSet.OBJECT_FILES] = {'foo': {'foo.o', 'bar.o'}} + config.artefact_store[ArtefactSet.OBJECT_FILES] = \ + {'foo': {'foo.o', 'bar.o'}} with mock.patch('os.getenv', return_value='-L/foo1/lib -L/foo2/lib'): # We need to create a linker here to pick up the env var: @@ -35,8 +38,9 @@ def test_run(self, tool_box): mock_result = mock.Mock(returncode=0, stdout="abc\ndef".encode()) with mock.patch('fab.tools.tool.subprocess.run', return_value=mock_result) as tool_run, \ - pytest.warns(UserWarning, match="_metric_send_conn not " - "set, cannot send metrics"): + pytest.warns(UserWarning, + match="_metric_send_conn not " + "set, cannot send metrics"): link_exe(config, flags=['-fooflag', '-barflag']) tool_run.assert_called_with( diff --git a/tests/unit_tests/steps/test_link_shared_object.py b/tests/unit_tests/steps/test_link_shared_object.py index 224dda19..700a3de3 100644 --- a/tests/unit_tests/steps/test_link_shared_object.py +++ b/tests/unit_tests/steps/test_link_shared_object.py @@ -26,6 +26,7 @@ def test_run(tool_box): project_workspace=Path('workspace'), build_output=Path("workspace"), artefact_store=ArtefactStore(), + openmp=False, tool_box=tool_box ) config.artefact_store[ArtefactSet.OBJECT_FILES] = \ diff --git a/tests/unit_tests/steps/test_preprocess.py b/tests/unit_tests/steps/test_preprocess.py index 32e7e09f..38376503 100644 --- a/tests/unit_tests/steps/test_preprocess.py +++ b/tests/unit_tests/steps/test_preprocess.py @@ -50,7 +50,7 @@ def source_getter(artefact_store): cpp._category = Category.FORTRAN_PREPROCESSOR # Now overwrite the Fortran preprocessor with the re-categorised # C preprocessor: - tool_box.add_tool(cpp) + tool_box.add_tool(cpp, silent_replace=True) with pytest.raises(RuntimeError) as err: preprocess_fortran(config=config) diff --git a/tests/unit_tests/test_config.py b/tests/unit_tests/test_config.py index 12357c37..201aa0bb 100644 --- a/tests/unit_tests/test_config.py +++ b/tests/unit_tests/test_config.py @@ -8,8 +8,9 @@ class TestAddFlags: def test_run(self): - add_flags = AddFlags(match="$source/foo/*", flags=['-I', '$relative/include']) - config = BuildConfig('proj', ToolBox(), + add_flags = AddFlags(match="$source/foo/*", + flags=['-I', '$relative/include']) + config = BuildConfig('proj', ToolBox(), mpi=False, openmp=False, fab_workspace=Path("/fab_workspace")) # anything in $source/foo should get the include folder @@ -18,7 +19,8 @@ def test_run(self): fpath=Path(f"/fab_workspace/proj/{SOURCE_ROOT}/foo/bar.c"), input_flags=my_flags, config=config) - assert my_flags == ['-foo', '-I', f'/fab_workspace/proj/{SOURCE_ROOT}/foo/include'] + assert my_flags == ['-foo', '-I', + f'/fab_workspace/proj/{SOURCE_ROOT}/foo/include'] # anything in $source/bar should NOT get the include folder my_flags = ["-foo"] diff --git a/tests/unit_tests/tools/test_compiler.py b/tests/unit_tests/tools/test_compiler.py index 28d41f2f..b9b7c808 100644 --- a/tests/unit_tests/tools/test_compiler.py +++ b/tests/unit_tests/tools/test_compiler.py @@ -14,35 +14,68 @@ import pytest -from fab.tools import (Category, CCompiler, FortranCompiler, Gcc, Gfortran, - Icc, Ifort) +from fab.tools import (Category, CCompiler, FortranCompiler, + Gcc, Gfortran, Icc, Ifort, MpiGcc, MpiGfortran, + MpiIcc, MpiIfort) def test_compiler(): '''Test the compiler constructor.''' - cc = CCompiler("gcc", "gcc", "gnu") + cc = CCompiler("gcc", "gcc", "gnu", openmp_flag="-fopenmp") assert cc.category == Category.C_COMPILER assert cc._compile_flag == "-c" assert cc._output_flag == "-o" assert cc.flags == [] assert cc.suite == "gnu" + assert not cc.mpi + assert cc.openmp_flag == "-fopenmp" with pytest.raises(NotImplementedError) as err: cc.parse_version_output(Category.FORTRAN_COMPILER, "NOT NEEDED") assert ("The method `parse_version_output` must be provided using a mixin." in str(err.value)) - fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J") + fc = FortranCompiler("gfortran", "gfortran", "gnu", openmp_flag="-fopenmp", + module_folder_flag="-J") assert fc._compile_flag == "-c" assert fc._output_flag == "-o" assert fc.category == Category.FORTRAN_COMPILER assert fc.suite == "gnu" assert fc.flags == [] + assert not fc.mpi + assert fc.openmp_flag == "-fopenmp" with pytest.raises(NotImplementedError) as err: fc.parse_version_output(Category.FORTRAN_COMPILER, "NOT NEEDED") assert ("The method `parse_version_output` must be provided using a mixin." in str(err.value)) +def test_compiler_openmp(): + '''Test that the openmp flag is correctly reflected in the test if + a compiler supports OpenMP or not.''' + cc = CCompiler("gcc", "gcc", "gnu", openmp_flag="-fopenmp") + assert cc.openmp_flag == "-fopenmp" + assert cc.openmp + cc = CCompiler("gcc", "gcc", "gnu", openmp_flag=None) + assert cc.openmp_flag == "" + assert not cc.openmp + cc = CCompiler("gcc", "gcc", "gnu") + assert cc.openmp_flag == "" + assert not cc.openmp + + fc = FortranCompiler("gfortran", "gfortran", "gnu", openmp_flag="-fopenmp", + module_folder_flag="-J") + assert fc.openmp_flag == "-fopenmp" + assert fc.openmp + fc = FortranCompiler("gfortran", "gfortran", "gnu", openmp_flag=None, + module_folder_flag="-J") + assert fc.openmp_flag == "" + assert not fc.openmp + fc = FortranCompiler("gfortran", "gfortran", "gnu", + module_folder_flag="-J") + assert fc.openmp_flag == "" + assert not fc.openmp + + def test_compiler_check_available(): '''Check if check_available works as expected. The compiler class uses internally get_version to test if a compiler works or not. Check the @@ -98,7 +131,8 @@ def test_compiler_hash_invalid_version(): with mock.patch.object(cc, "run", mock.Mock(return_value='foo v1')): with pytest.raises(RuntimeError) as err: cc.get_hash() - assert "Unexpected version output format for compiler 'gcc'" in str(err.value) + assert ("Unexpected version output format for compiler 'gcc'" + in str(err.value)) def test_compiler_with_env_fflags(): @@ -112,25 +146,55 @@ def test_compiler_with_env_fflags(): def test_compiler_syntax_only(): '''Tests handling of syntax only flags.''' - fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J") + fc = FortranCompiler("gfortran", "gfortran", "gnu", + openmp_flag="-fopenmp", module_folder_flag="-J") + # Empty since no flag is defined assert not fc.has_syntax_only - fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J", - syntax_only_flag=None) + + fc = FortranCompiler("gfortran", "gfortran", "gnu", openmp_flag="-fopenmp", + module_folder_flag="-J", syntax_only_flag=None) + # Empty since no flag is defined assert not fc.has_syntax_only - fc = FortranCompiler("gfortran", "gfortran", "gnu", "-J", + fc = FortranCompiler("gfortran", "gfortran", "gnu", + openmp_flag="-fopenmp", + module_folder_flag="-J", syntax_only_flag="-fsyntax-only") - fc.set_module_output_path("/tmp") assert fc.has_syntax_only assert fc._syntax_only_flag == "-fsyntax-only" + + +def test_compiler_without_openmp(): + '''Tests that the openmp flag is not used when openmp is not enabled. ''' + fc = FortranCompiler("gfortran", "gfortran", "gnu", + openmp_flag="-fopenmp", + module_folder_flag="-J", + syntax_only_flag="-fsyntax-only") + fc.set_module_output_path("/tmp") fc.run = mock.Mock() - fc.compile_file(Path("a.f90"), "a.o", syntax_only=True) + fc.compile_file(Path("a.f90"), "a.o", openmp=False, syntax_only=True) fc.run.assert_called_with(cwd=Path('.'), additional_parameters=['-c', '-fsyntax-only', "-J", '/tmp', 'a.f90', '-o', 'a.o', ]) +def test_compiler_with_openmp(): + '''Tests that the openmp flag is used as expected if openmp is enabled. + ''' + fc = FortranCompiler("gfortran", "gfortran", "gnu", + openmp_flag="-fopenmp", + module_folder_flag="-J", + syntax_only_flag="-fsyntax-only") + fc.set_module_output_path("/tmp") + fc.run = mock.Mock() + fc.compile_file(Path("a.f90"), "a.o", openmp=True, syntax_only=False) + fc.run.assert_called_with(cwd=Path('.'), + additional_parameters=['-c', '-fopenmp', + "-J", '/tmp', 'a.f90', + '-o', 'a.o', ]) + + def test_compiler_module_output(): '''Tests handling of module output_flags.''' fc = FortranCompiler("gfortran", "gfortran", suite="gnu", @@ -138,7 +202,7 @@ def test_compiler_module_output(): fc.set_module_output_path("/module_out") assert fc._module_output_path == "/module_out" fc.run = mock.MagicMock() - fc.compile_file(Path("a.f90"), "a.o", syntax_only=True) + fc.compile_file(Path("a.f90"), "a.o", openmp=False, syntax_only=True) fc.run.assert_called_with(cwd=PosixPath('.'), additional_parameters=['-c', '-J', '/module_out', 'a.f90', '-o', 'a.o']) @@ -146,26 +210,32 @@ def test_compiler_module_output(): def test_compiler_with_add_args(): '''Tests that additional arguments are handled as expected.''' - fc = FortranCompiler("gfortran", "gfortran", "gnu", + fc = FortranCompiler("gfortran", "gfortran", suite="gnu", + openmp_flag="-fopenmp", module_folder_flag="-J") fc.set_module_output_path("/module_out") assert fc._module_output_path == "/module_out" fc.run = mock.MagicMock() with pytest.warns(UserWarning, match="Removing managed flag"): fc.compile_file(Path("a.f90"), "a.o", add_flags=["-J/b", "-O3"], - syntax_only=True) + openmp=False, syntax_only=True) # Notice that "-J/b" has been removed fc.run.assert_called_with(cwd=PosixPath('.'), additional_parameters=['-c', "-O3", '-J', '/module_out', 'a.f90', '-o', 'a.o']) + with pytest.warns(UserWarning, + match="explicitly provided. OpenMP should be enabled in " + "the BuildConfiguration"): + fc.compile_file(Path("a.f90"), "a.o", + add_flags=["-fopenmp", "-O3"], + openmp=True, syntax_only=True) def test_get_version_string(): '''Tests the get_version_string() method. ''' full_output = 'GNU Fortran (gcc) 6.1.0' - c = Gfortran() with mock.patch.object(c, "run", mock.Mock(return_value=full_output)): assert c.get_version_string() == "6.1.0" @@ -328,11 +398,22 @@ def test_gcc(): assert gcc.name == "gcc" assert isinstance(gcc, CCompiler) assert gcc.category == Category.C_COMPILER + assert not gcc.mpi + +def test_mpi_gcc(): + '''Tests the MPI enables gcc class.''' + mpi_gcc = MpiGcc() + assert mpi_gcc.name == "mpicc-gcc" + assert isinstance(mpi_gcc, CCompiler) + assert mpi_gcc.category == Category.C_COMPILER + assert mpi_gcc.mpi -def test_gcc_get_version(): + +@pytest.mark.parametrize("compiler", [Gcc, MpiGcc]) +def test_gcc_get_version(compiler): '''Tests the gcc class get_version method.''' - gcc = Gcc() + gcc = compiler() full_output = dedent(""" gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-20) Copyright (C) 2018 Free Software Foundation, Inc. @@ -341,9 +422,10 @@ def test_gcc_get_version(): assert gcc.get_version() == (8, 5, 0) -def test_gcc_get_version_with_icc_string(): +@pytest.mark.parametrize("compiler", [Gcc, MpiGcc]) +def test_gcc_get_version_with_icc_string(compiler): '''Tests the gcc class with an icc version output.''' - gcc = Gcc() + gcc = compiler() full_output = dedent(""" icc (ICC) 2021.10.0 20230609 Copyright (C) 1985-2023 Intel Corporation. All rights reserved. @@ -362,6 +444,16 @@ def test_gfortran(): assert gfortran.name == "gfortran" assert isinstance(gfortran, FortranCompiler) assert gfortran.category == Category.FORTRAN_COMPILER + assert not gfortran.mpi + + +def test_mpi_gfortran(): + '''Tests the MPI enabled gfortran class.''' + mpi_gfortran = MpiGfortran() + assert mpi_gfortran.name == "mpif90-gfortran" + assert isinstance(mpi_gfortran, FortranCompiler) + assert mpi_gfortran.category == Category.FORTRAN_COMPILER + assert mpi_gfortran.mpi # Possibly overkill to cover so many gfortran versions but I had to go @@ -369,7 +461,8 @@ def test_gfortran(): # Note: different sources, e.g conda, change the output slightly... -def test_gfortran_get_version_4(): +@pytest.mark.parametrize("compiler", [Gfortran, MpiGfortran]) +def test_gfortran_get_version_4(compiler): '''Test gfortran 4.8.5 version detection.''' full_output = dedent(""" GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-44) @@ -381,12 +474,13 @@ def test_gfortran_get_version_4(): For more information about these matters, see the file named COPYING """) - gfortran = Gfortran() + gfortran = compiler() with mock.patch.object(gfortran, "run", mock.Mock(return_value=full_output)): assert gfortran.get_version() == (4, 8, 5) -def test_gfortran_get_version_6(): +@pytest.mark.parametrize("compiler", [Gfortran, MpiGfortran]) +def test_gfortran_get_version_6(compiler): '''Test gfortran 6.1.0 version detection.''' full_output = dedent(""" GNU Fortran (GCC) 6.1.0 @@ -395,12 +489,13 @@ def test_gfortran_get_version_6(): warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. """) - gfortran = Gfortran() + gfortran = compiler() with mock.patch.object(gfortran, "run", mock.Mock(return_value=full_output)): assert gfortran.get_version() == (6, 1, 0) -def test_gfortran_get_version_8(): +@pytest.mark.parametrize("compiler", [Gfortran, MpiGfortran]) +def test_gfortran_get_version_8(compiler): '''Test gfortran 8.5.0 version detection.''' full_output = dedent(""" GNU Fortran (conda-forge gcc 8.5.0-16) 8.5.0 @@ -409,12 +504,13 @@ def test_gfortran_get_version_8(): warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. """) - gfortran = Gfortran() + gfortran = compiler() with mock.patch.object(gfortran, "run", mock.Mock(return_value=full_output)): assert gfortran.get_version() == (8, 5, 0) -def test_gfortran_get_version_10(): +@pytest.mark.parametrize("compiler", [Gfortran, MpiGfortran]) +def test_gfortran_get_version_10(compiler): '''Test gfortran 10.4.0 version detection.''' full_output = dedent(""" GNU Fortran (conda-forge gcc 10.4.0-16) 10.4.0 @@ -423,12 +519,13 @@ def test_gfortran_get_version_10(): warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. """) - gfortran = Gfortran() + gfortran = compiler() with mock.patch.object(gfortran, "run", mock.Mock(return_value=full_output)): assert gfortran.get_version() == (10, 4, 0) -def test_gfortran_get_version_12(): +@pytest.mark.parametrize("compiler", [Gfortran, MpiGfortran]) +def test_gfortran_get_version_12(compiler): '''Test gfortran 12.1.0 version detection.''' full_output = dedent(""" GNU Fortran (conda-forge gcc 12.1.0-16) 12.1.0 @@ -437,19 +534,20 @@ def test_gfortran_get_version_12(): warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. """) - gfortran = Gfortran() + gfortran = compiler() with mock.patch.object(gfortran, "run", mock.Mock(return_value=full_output)): assert gfortran.get_version() == (12, 1, 0) -def test_gfortran_get_version_with_ifort_string(): +@pytest.mark.parametrize("compiler", [Gfortran, MpiGfortran]) +def test_gfortran_get_version_with_ifort_string(compiler): '''Tests the gfortran class with an ifort version output.''' full_output = dedent(""" ifort (IFORT) 14.0.3 20140422 Copyright (C) 1985-2014 Intel Corporation. All rights reserved. """) - gfortran = Gfortran() + gfortran = compiler() with mock.patch.object(gfortran, "run", mock.Mock(return_value=full_output)): with pytest.raises(RuntimeError) as err: gfortran.get_version() @@ -463,27 +561,39 @@ def test_icc(): assert icc.name == "icc" assert isinstance(icc, CCompiler) assert icc.category == Category.C_COMPILER + assert not icc.mpi + +def test_mpi_icc(): + '''Tests the MPI enabled icc class.''' + mpi_icc = MpiIcc() + assert mpi_icc.name == "mpicc-icc" + assert isinstance(mpi_icc, CCompiler) + assert mpi_icc.category == Category.C_COMPILER + assert mpi_icc.mpi -def test_icc_get_version(): + +@pytest.mark.parametrize("compiler", [Icc, MpiIcc]) +def test_icc_get_version(compiler): '''Tests the icc class get_version method.''' full_output = dedent(""" icc (ICC) 2021.10.0 20230609 Copyright (C) 1985-2023 Intel Corporation. All rights reserved. """) - icc = Icc() + icc = compiler() with mock.patch.object(icc, "run", mock.Mock(return_value=full_output)): assert icc.get_version() == (2021, 10, 0) -def test_icc_get_version_with_gcc_string(): +@pytest.mark.parametrize("compiler", [Icc, MpiIcc]) +def test_icc_get_version_with_gcc_string(compiler): '''Tests the icc class with a GCC version output.''' full_output = dedent(""" gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-20) Copyright (C) 2018 Free Software Foundation, Inc. """) - icc = Icc() + icc = compiler() with mock.patch.object(icc, "run", mock.Mock(return_value=full_output)): with pytest.raises(RuntimeError) as err: icc.get_version() @@ -497,75 +607,91 @@ def test_ifort(): assert ifort.name == "ifort" assert isinstance(ifort, FortranCompiler) assert ifort.category == Category.FORTRAN_COMPILER + assert not ifort.mpi + + +def test_mpi_ifort(): + '''Tests the MPI enabled ifort class.''' + mpi_ifort = MpiIfort() + assert mpi_ifort.name == "mpif90-ifort" + assert isinstance(mpi_ifort, FortranCompiler) + assert mpi_ifort.category == Category.FORTRAN_COMPILER + assert mpi_ifort.mpi -def test_ifort_get_version_14(): +@pytest.mark.parametrize("compiler", [Ifort, MpiIfort]) +def test_ifort_get_version_14(compiler): '''Test ifort 14.0.3 version detection.''' full_output = dedent(""" ifort (IFORT) 14.0.3 20140422 Copyright (C) 1985-2014 Intel Corporation. All rights reserved. """) - ifort = Ifort() + ifort = compiler() with mock.patch.object(ifort, "run", mock.Mock(return_value=full_output)): assert ifort.get_version() == (14, 0, 3) -def test_ifort_get_version_15(): +@pytest.mark.parametrize("compiler", [Ifort, MpiIfort]) +def test_ifort_get_version_15(compiler): '''Test ifort 15.0.2 version detection.''' full_output = dedent(""" ifort (IFORT) 15.0.2 20150121 Copyright (C) 1985-2015 Intel Corporation. All rights reserved. """) - ifort = Ifort() + ifort = compiler() with mock.patch.object(ifort, "run", mock.Mock(return_value=full_output)): assert ifort.get_version() == (15, 0, 2) -def test_ifort_get_version_17(): +@pytest.mark.parametrize("compiler", [Ifort, MpiIfort]) +def test_ifort_get_version_17(compiler): '''Test ifort 17.0.7 version detection.''' full_output = dedent(""" ifort (IFORT) 17.0.7 20180403 Copyright (C) 1985-2018 Intel Corporation. All rights reserved. """) - ifort = Ifort() + ifort = compiler() with mock.patch.object(ifort, "run", mock.Mock(return_value=full_output)): assert ifort.get_version() == (17, 0, 7) -def test_ifort_get_version_19(): +@pytest.mark.parametrize("compiler", [Ifort, MpiIfort]) +def test_ifort_get_version_19(compiler): '''Test ifort 19.0.0.117 version detection.''' full_output = dedent(""" ifort (IFORT) 19.0.0.117 20180804 Copyright (C) 1985-2018 Intel Corporation. All rights reserved. """) - ifort = Ifort() + ifort = compiler() with mock.patch.object(ifort, "run", mock.Mock(return_value=full_output)): assert ifort.get_version() == (19, 0, 0, 117) -def test_ifort_get_version_with_icc_string(): +@pytest.mark.parametrize("compiler", [Ifort, MpiIfort]) +def test_ifort_get_version_with_icc_string(compiler): '''Tests the ifort class with an icc version output.''' full_output = dedent(""" icc (ICC) 2021.10.0 20230609 Copyright (C) 1985-2023 Intel Corporation. All rights reserved. """) - ifort = Ifort() + ifort = compiler() with mock.patch.object(ifort, "run", mock.Mock(return_value=full_output)): with pytest.raises(RuntimeError) as err: ifort.get_version() assert "Unexpected version output format for compiler" in str(err.value) +@pytest.mark.parametrize("compiler", [Ifort, MpiIfort]) @pytest.mark.parametrize("version", ["5.15f.2", ".0.5.1", "0.5.1.", "0.5..1"]) -def test_ifort_get_version_invalid_version(version): +def test_ifort_get_version_invalid_version(compiler, version): '''Tests the icc class with an icc version string that contains an invalid version number.''' full_output = dedent(f""" @@ -573,7 +699,7 @@ def test_ifort_get_version_invalid_version(version): Copyright (C) 1985-2023 Intel Corporation. All rights reserved. """) - icc = Icc() + icc = compiler() with mock.patch.object(icc, "run", mock.Mock(return_value=full_output)): with pytest.raises(RuntimeError) as err: icc.get_version() @@ -589,8 +715,13 @@ def __init__(self): super().__init__(name="mpif90-intel", exec_name="mpif90") + @property + def mpi(self): + return True + mpif90 = MpiF90() assert mpif90.suite == "intel-classic" assert mpif90.category == Category.FORTRAN_COMPILER assert mpif90.name == "mpif90-intel" assert mpif90.exec_name == "mpif90" + assert mpif90.mpi diff --git a/tests/unit_tests/tools/test_linker.py b/tests/unit_tests/tools/test_linker.py index 927cd008..772cd7ec 100644 --- a/tests/unit_tests/tools/test_linker.py +++ b/tests/unit_tests/tools/test_linker.py @@ -75,30 +75,38 @@ def test_linker_check_available(mock_c_compiler): ["ld", "--version"], capture_output=True, env=None, cwd=None, check=False) - # Third test: assume the tool does not exist, run will raise - # runtime error: + # Third test: assume the tool does not exist, check_available + # will return False (and not raise an exception) + linker._is_available = None with mock.patch("fab.tools.tool.Tool.run", side_effect=RuntimeError("")) as tool_run: - linker.check_available() + assert linker.check_available() is False def test_linker_c(mock_c_compiler): - '''Test the link command line.''' + '''Test the link command line when no additional libraries are + specified.''' linker = Linker(compiler=mock_c_compiler) mock_result = mock.Mock(returncode=0) with mock.patch('fab.tools.tool.subprocess.run', return_value=mock_result) as tool_run: - linker.link([Path("a.o")], Path("a.out")) + linker.link([Path("a.o")], Path("a.out"), openmp=False) tool_run.assert_called_with( ["mock_c_compiler.exe", 'a.o', '-o', 'a.out'], capture_output=True, env=None, cwd=None, check=False) + +def test_linker_c_with_libraries(mock_c_compiler): + '''Test the link command line when additional libraries are specified.''' + linker = Linker(compiler=mock_c_compiler) with mock.patch.object(linker, "run") as link_run: - linker.link([Path("a.o")], Path("a.out"), add_libs=["-L", "/tmp"]) - link_run.assert_called_with(['a.o', '-L', '/tmp', '-o', 'a.out']) + linker.link([Path("a.o")], Path("a.out"), add_libs=["-L", "/tmp"], + openmp=True) + link_run.assert_called_with(['-fopenmp', 'a.o', '-L', '/tmp', + '-o', 'a.out']) -def test_linker_add_compiler_flag(mock_c_compiler): +def test_compiler_linker_add_compiler_flag(mock_c_compiler): '''Test that a flag added to the compiler will be automatically added to the link line (even if the flags are modified after creating the linker ... in case that the user specifies additional @@ -109,19 +117,22 @@ def test_linker_add_compiler_flag(mock_c_compiler): mock_result = mock.Mock(returncode=0) with mock.patch('fab.tools.tool.subprocess.run', return_value=mock_result) as tool_run: - linker.link([Path("a.o")], Path("a.out")) + linker.link([Path("a.o")], Path("a.out"), openmp=False) tool_run.assert_called_with( ['mock_c_compiler.exe', '-my-flag', 'a.o', '-o', 'a.out'], capture_output=True, env=None, cwd=None, check=False) - # Make also sure the code works if a linker is created without - # a compiler: + +def test_linker_add_compiler_flag(): + '''Make sure linker flags work if a linker is created without + a compiler: + ''' linker = Linker("no-compiler", "no-compiler.exe", "suite") linker.flags.append("-some-other-flag") mock_result = mock.Mock(returncode=0) with mock.patch('fab.tools.tool.subprocess.run', return_value=mock_result) as tool_run: - linker.link([Path("a.o")], Path("a.out")) + linker.link([Path("a.o")], Path("a.out"), openmp=False) tool_run.assert_called_with( ['no-compiler.exe', '-some-other-flag', 'a.o', '-o', 'a.out'], capture_output=True, env=None, cwd=None, check=False) diff --git a/tests/unit_tests/tools/test_tool_box.py b/tests/unit_tests/tools/test_tool_box.py index 5ac55ac4..29bedf30 100644 --- a/tests/unit_tests/tools/test_tool_box.py +++ b/tests/unit_tests/tools/test_tool_box.py @@ -24,15 +24,17 @@ def test_tool_box_get_tool(): '''Tests get_tool.''' tb = ToolBox() # No tool is defined, so the default Fortran compiler must be returned: - default_compiler = tb.get_tool(Category.FORTRAN_COMPILER) + default_compiler = tb.get_tool(Category.FORTRAN_COMPILER, + mpi=False, openmp=False) tr = ToolRepository() - assert default_compiler is tr.get_default(Category.FORTRAN_COMPILER) + assert default_compiler is tr.get_default(Category.FORTRAN_COMPILER, + mpi=False, openmp=False) # Check that dictionary-like access works as expected: assert tb[Category.FORTRAN_COMPILER] == default_compiler # Now add gfortran as Fortran compiler to the tool box tr_gfortran = tr.get_tool(Category.FORTRAN_COMPILER, "gfortran") - tb.add_tool(tr_gfortran) + tb.add_tool(tr_gfortran, silent_replace=True) gfortran = tb.get_tool(Category.FORTRAN_COMPILER) assert gfortran is tr_gfortran diff --git a/tests/unit_tests/tools/test_tool_repository.py b/tests/unit_tests/tools/test_tool_repository.py index 4a315150..8369668e 100644 --- a/tests/unit_tests/tools/test_tool_repository.py +++ b/tests/unit_tests/tools/test_tool_repository.py @@ -7,10 +7,11 @@ '''This module tests the ToolRepository. ''' +from unittest import mock import pytest - -from fab.tools import Category, Gcc, Gfortran, Ifort, Linker, ToolRepository +from fab.tools import (Ar, Category, FortranCompiler, Gcc, Gfortran, Ifort, + Linker, ToolRepository) def test_tool_repository_get_singleton_new(): @@ -57,38 +58,108 @@ def test_tool_repository_get_tool_error(): def test_tool_repository_get_default(): '''Tests get_default.''' tr = ToolRepository() - gfortran = tr.get_default(Category.FORTRAN_COMPILER) + gfortran = tr.get_default(Category.FORTRAN_COMPILER, mpi=False, + openmp=False) assert isinstance(gfortran, Gfortran) - gcc_linker = tr.get_default(Category.LINKER) + gcc_linker = tr.get_default(Category.LINKER, mpi=False, openmp=False) assert isinstance(gcc_linker, Linker) assert gcc_linker.name == "linker-gcc" - gcc = tr.get_default(Category.C_COMPILER) + gcc = tr.get_default(Category.C_COMPILER, mpi=False, openmp=False) assert isinstance(gcc, Gcc) + # Test a non-compiler + ar = tr.get_default(Category.AR) + assert isinstance(ar, Ar) + -def test_tool_repository_get_default_error(): - '''Tests error handling in get_default.''' +def test_tool_repository_get_default_error_invalid_category(): + '''Tests error handling in get_default, the category + must be a Category, not e.g. a string.''' tr = ToolRepository() with pytest.raises(RuntimeError) as err: - tr.get_default("unknown-category") + tr.get_default("unknown-category-type") assert "Invalid category type 'str'." in str(err.value) +def test_tool_repository_get_default_error_missing_mpi(): + '''Tests error handling in get_default when the optional MPI + parameter is missing (which is required for a compiler).''' + tr = ToolRepository() + with pytest.raises(RuntimeError) as err: + tr.get_default(Category.FORTRAN_COMPILER, openmp=True) + assert ("Invalid or missing mpi specification for 'FORTRAN_COMPILER'" + in str(err.value)) + with pytest.raises(RuntimeError) as err: + tr.get_default(Category.FORTRAN_COMPILER, mpi="123") + assert ("Invalid or missing mpi specification for 'FORTRAN_COMPILER'" + in str(err.value)) + + +def test_tool_repository_get_default_error_missing_openmp(): + '''Tests error handling in get_default when the optional openmp + parameter is missing (which is required for a compiler).''' + tr = ToolRepository() + with pytest.raises(RuntimeError) as err: + tr.get_default(Category.FORTRAN_COMPILER, mpi=True) + assert ("Invalid or missing openmp specification for 'FORTRAN_COMPILER'" + in str(err.value)) + with pytest.raises(RuntimeError) as err: + tr.get_default(Category.FORTRAN_COMPILER, mpi=True, openmp="123") + assert ("Invalid or missing openmp specification for 'FORTRAN_COMPILER'" + in str(err.value)) + + +@pytest.mark.parametrize("mpi, openmp, message", + [(False, False, "any 'FORTRAN_COMPILER'."), + (False, True, + "'FORTRAN_COMPILER' that supports OpenMP"), + (True, False, + "'FORTRAN_COMPILER' that supports MPI"), + (True, True, "'FORTRAN_COMPILER' that supports MPI " + "and OpenMP.")]) +def test_tool_repository_get_default_error_missing_compiler(mpi, openmp, + message): + '''Tests error handling in get_default when there is no compiler + that fulfils the requirements with regards to OpenMP and MPI.''' + tr = ToolRepository() + with mock.patch.dict(tr, {Category.FORTRAN_COMPILER: []}), \ + pytest.raises(RuntimeError) as err: + tr.get_default(Category.FORTRAN_COMPILER, mpi=mpi, openmp=openmp) + + assert f"Could not find {message}" in str(err.value) + + +def test_tool_repository_get_default_error_missing_openmp_compiler(): + '''Tests error handling in get_default when there is a compiler, but it + does not support OpenMP (which triggers additional tests in the + ToolRepository.''' + tr = ToolRepository() + fc = FortranCompiler("gfortran", "gfortran", "gnu", openmp_flag=None, + module_folder_flag="-J") + + with mock.patch.dict(tr, {Category.FORTRAN_COMPILER: [fc]}), \ + pytest.raises(RuntimeError) as err: + tr.get_default(Category.FORTRAN_COMPILER, mpi=False, openmp=True) + + assert ("Could not find 'FORTRAN_COMPILER' that supports OpenMP." + in str(err.value)) + + def test_tool_repository_default_compiler_suite(): '''Tests the setting of default suite for compiler and linker.''' tr = ToolRepository() tr.set_default_compiler_suite("gnu") for cat in [Category.C_COMPILER, Category.FORTRAN_COMPILER, Category.LINKER]: - def_tool = tr.get_default(cat) + def_tool = tr.get_default(cat, mpi=False, openmp=False) assert def_tool.suite == "gnu" tr.set_default_compiler_suite("intel-classic") for cat in [Category.C_COMPILER, Category.FORTRAN_COMPILER, Category.LINKER]: - def_tool = tr.get_default(cat) + def_tool = tr.get_default(cat, mpi=False, openmp=False) assert def_tool.suite == "intel-classic" with pytest.raises(RuntimeError) as err: tr.set_default_compiler_suite("does-not-exist") From 30488bfecafe0d6e49b45005110cefc77336b807 Mon Sep 17 00:00:00 2001 From: Matthew Hambley Date: Tue, 19 Nov 2024 14:36:48 +0000 Subject: [PATCH 2/2] Recovering anything of value from the old tests (#339) * Skip test dependent on clang module if it is not installed. * Inherit slightly more extensive testing of C pragma injector. * Migrated some real-life testing of Subversion interface. * Updated comment. * Just remove old tests. * Reverted mistaken clean-up. * Removed unused import. --- source/fab/tools/versioning.py | 62 +-- .../TestCases/CompiletimeDependency/Makefile | 33 -- .../TestCases/CompiletimeDependency/bisto.h | 6 - .../CompiletimeDependency/hillfort.F90 | 20 - .../TestCases/CompiletimeDependency/looper.c | 17 - .../TestCases/CompiletimeDependency/oxo.h | 6 - .../CompiletimeDependency/support_mod.f90 | 21 - .../CompiletimeDependency/with-beef.mk | 56 --- .../CompiletimeDependency/without-beef.mk | 55 --- tests-old/TestCases/FortranSourceTree/ReadMe | 8 - .../TestCases/FortranSourceTree/fpp_one.f90 | 23 -- .../TestCases/FortranSourceTree/fpp_two.f90 | 23 -- .../TestCases/FortranSourceTree/link1.f90 | 19 - .../TestCases/FortranSourceTree/link2.f90 | 19 - .../TestCases/FortranSourceTree/program.F90 | 13 - tests-old/TestCases/FortranSubmodule/Makefile | 47 --- .../TestCases/FortranSubmodule/class_impl.f90 | 28 -- .../TestCases/FortranSubmodule/class_mod.f90 | 59 --- .../FortranSubmodule/simple_impl.f90 | 18 - .../TestCases/FortranSubmodule/simple_mod.f90 | 13 - tests-old/TestCases/FortranSubmodule/test.f90 | 22 - tests-old/TestCases/Makefile | 13 - tests-old/TestCases/MinimalC/Makefile | 39 -- tests-old/TestCases/MinimalC/program.c | 11 - tests-old/TestCases/PSyclone/Makefile | 56 --- .../TestCases/PSyclone/algorithm_mod.x90 | 21 - .../PSyclone/kernels/my_kernel_mod.f90 | 49 --- .../TestCases/PSyclone/model/argument_mod.f90 | 21 - .../PSyclone/model/constants_mod.f90 | 11 - .../TestCases/PSyclone/model/field_mod.f90 | 32 -- .../PSyclone/model/functionspace_mod.f90 | 78 ---- .../TestCases/PSyclone/model/kernel_mod.f90 | 11 - .../TestCases/PSyclone/model/operator_mod.f90 | 23 -- tests-old/TestCases/PSyclone/optimisation.py | 44 -- tests-old/TestCases/SimpleLFRic/Makefile | 143 ------- .../TestCases/SimpleLFRic/algorithm_mod.x90 | 24 -- .../SimpleLFRic/kernels/my_kernel_mod.f90 | 47 --- .../SimpleLFRic/model/argument_mod.f90 | 21 - .../SimpleLFRic/model/constants_mod.f90 | 11 - .../TestCases/SimpleLFRic/model/field_mod.f90 | 59 --- .../SimpleLFRic/model/functionspace_mod.f90 | 78 ---- .../SimpleLFRic/model/kernel_mod.f90 | 11 - .../TestCases/SimpleLFRic/model/mesh_mod.f90 | 22 - .../SimpleLFRic/model/operator_mod.f90 | 23 -- .../SimpleLFRic/my_kernel_test_mod.pf | 27 -- .../TestCases/SimpleLFRic/optimisation.py | 44 -- tests-old/TestCases/SimpleLFRic/thing.f90 | 27 -- tests-old/TestCases/SimpleLFRic/util.c | 9 - tests-old/TestCases/SimpleLFRic/util.h | 8 - .../TestCases/SimpleLFRic/util_mod.template | 29 -- tests-old/TestCases/pFUnit/Build.txt | 5 - tests-old/TestCases/pFUnit/Makefile | 58 --- tests-old/TestCases/pFUnit/stuff_mod.f90 | 17 - tests-old/TestCases/pFUnit/stuff_test.pf | 22 - .../system-tests/GitRepository/expected/aleph | 1 - .../GitRepository/expected/beis/veis | 2 - tests-old/system-tests/GitRepository/repo.tar | Bin 51200 -> 0 bytes tests-old/system-tests/GitRepository_test.py | 17 - .../SubversionRepository/expected/trunk/alpha | 0 .../expected/trunk/beta/gamma | 0 .../SubversionRepository/repo.tar | Bin 92160 -> 0 bytes .../system-tests/SubversionRepository_test.py | 21 - tests-old/system-tests/common.py | 384 ------------------ tests-old/unit-tests/repository_test.py | 262 ------------ tests-old/unit-tests/tasks/__init__.py | 0 tests-old/unit-tests/tasks/c_test.py | 64 --- tests-old/unit-tests/tasks/common_test.py | 58 --- tests/unit_tests/parse/c/test_c_analyser.py | 4 +- .../steps/test_c_pragma_injector.py | 59 ++- tests/unit_tests/tools/test_versioning.py | 160 ++++++-- 70 files changed, 206 insertions(+), 2488 deletions(-) delete mode 100644 tests-old/TestCases/CompiletimeDependency/Makefile delete mode 100644 tests-old/TestCases/CompiletimeDependency/bisto.h delete mode 100644 tests-old/TestCases/CompiletimeDependency/hillfort.F90 delete mode 100644 tests-old/TestCases/CompiletimeDependency/looper.c delete mode 100644 tests-old/TestCases/CompiletimeDependency/oxo.h delete mode 100644 tests-old/TestCases/CompiletimeDependency/support_mod.f90 delete mode 100644 tests-old/TestCases/CompiletimeDependency/with-beef.mk delete mode 100644 tests-old/TestCases/CompiletimeDependency/without-beef.mk delete mode 100644 tests-old/TestCases/FortranSourceTree/ReadMe delete mode 100644 tests-old/TestCases/FortranSourceTree/fpp_one.f90 delete mode 100644 tests-old/TestCases/FortranSourceTree/fpp_two.f90 delete mode 100644 tests-old/TestCases/FortranSourceTree/link1.f90 delete mode 100644 tests-old/TestCases/FortranSourceTree/link2.f90 delete mode 100644 tests-old/TestCases/FortranSourceTree/program.F90 delete mode 100644 tests-old/TestCases/FortranSubmodule/Makefile delete mode 100644 tests-old/TestCases/FortranSubmodule/class_impl.f90 delete mode 100644 tests-old/TestCases/FortranSubmodule/class_mod.f90 delete mode 100644 tests-old/TestCases/FortranSubmodule/simple_impl.f90 delete mode 100644 tests-old/TestCases/FortranSubmodule/simple_mod.f90 delete mode 100644 tests-old/TestCases/FortranSubmodule/test.f90 delete mode 100644 tests-old/TestCases/Makefile delete mode 100644 tests-old/TestCases/MinimalC/Makefile delete mode 100644 tests-old/TestCases/MinimalC/program.c delete mode 100644 tests-old/TestCases/PSyclone/Makefile delete mode 100644 tests-old/TestCases/PSyclone/algorithm_mod.x90 delete mode 100644 tests-old/TestCases/PSyclone/kernels/my_kernel_mod.f90 delete mode 100644 tests-old/TestCases/PSyclone/model/argument_mod.f90 delete mode 100644 tests-old/TestCases/PSyclone/model/constants_mod.f90 delete mode 100644 tests-old/TestCases/PSyclone/model/field_mod.f90 delete mode 100644 tests-old/TestCases/PSyclone/model/functionspace_mod.f90 delete mode 100644 tests-old/TestCases/PSyclone/model/kernel_mod.f90 delete mode 100644 tests-old/TestCases/PSyclone/model/operator_mod.f90 delete mode 100644 tests-old/TestCases/PSyclone/optimisation.py delete mode 100644 tests-old/TestCases/SimpleLFRic/Makefile delete mode 100644 tests-old/TestCases/SimpleLFRic/algorithm_mod.x90 delete mode 100644 tests-old/TestCases/SimpleLFRic/kernels/my_kernel_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/model/argument_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/model/constants_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/model/field_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/model/functionspace_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/model/kernel_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/model/mesh_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/model/operator_mod.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/my_kernel_test_mod.pf delete mode 100644 tests-old/TestCases/SimpleLFRic/optimisation.py delete mode 100644 tests-old/TestCases/SimpleLFRic/thing.f90 delete mode 100644 tests-old/TestCases/SimpleLFRic/util.c delete mode 100644 tests-old/TestCases/SimpleLFRic/util.h delete mode 100644 tests-old/TestCases/SimpleLFRic/util_mod.template delete mode 100644 tests-old/TestCases/pFUnit/Build.txt delete mode 100644 tests-old/TestCases/pFUnit/Makefile delete mode 100644 tests-old/TestCases/pFUnit/stuff_mod.f90 delete mode 100644 tests-old/TestCases/pFUnit/stuff_test.pf delete mode 100644 tests-old/system-tests/GitRepository/expected/aleph delete mode 100644 tests-old/system-tests/GitRepository/expected/beis/veis delete mode 100644 tests-old/system-tests/GitRepository/repo.tar delete mode 100644 tests-old/system-tests/GitRepository_test.py delete mode 100644 tests-old/system-tests/SubversionRepository/expected/trunk/alpha delete mode 100644 tests-old/system-tests/SubversionRepository/expected/trunk/beta/gamma delete mode 100644 tests-old/system-tests/SubversionRepository/repo.tar delete mode 100644 tests-old/system-tests/SubversionRepository_test.py delete mode 100644 tests-old/system-tests/common.py delete mode 100644 tests-old/unit-tests/repository_test.py delete mode 100644 tests-old/unit-tests/tasks/__init__.py delete mode 100644 tests-old/unit-tests/tasks/c_test.py delete mode 100644 tests-old/unit-tests/tasks/common_test.py diff --git a/source/fab/tools/versioning.py b/source/fab/tools/versioning.py index 0ed6ae96..01c11264 100644 --- a/source/fab/tools/versioning.py +++ b/source/fab/tools/versioning.py @@ -3,11 +3,10 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution ############################################################################## - -"""This file contains the base class for versioning tools like git and -subversion. It also contains derived classes Git, Subversion, and Fcm. """ - +Versioning tools such as Subversion and Git. +""" +from abc import ABC from pathlib import Path from typing import Dict, List, Optional, Union @@ -15,25 +14,29 @@ from fab.tools.tool import Tool -class Versioning(Tool): - '''This is the base class for versioning tools like git and svn. - - :param name: the name of the tool. - :param exec_name: the name of the executable of this tool. - :param category: the category to which this tool belongs). - ''' - +class Versioning(Tool, ABC): + """ + Base class for versioning tools like Git and Subversion. + """ def __init__(self, name: str, exec_name: Union[str, Path], category: Category): + """ + Constructor. + + :param name: Display name of this tool. + :param exec_name: Executable for this tool. + :param category: Tool belongs to this category. + """ super().__init__(name, exec_name, category, availablility_option="help") # ============================================================================= class Git(Versioning): - '''This is the base class for git. - ''' + """ + Interface to Git version control system. + """ def __init__(self): super().__init__("git", "git", @@ -111,20 +114,23 @@ def merge(self, dst: Union[str, Path], # ============================================================================= class Subversion(Versioning): - '''This is the base class for subversion. Note that this is also the - base class for FCM, so it allows overwriting name, exec_name and - category, but will default to use svn. - - :param name: name of the tool, defaults to subversion. - :param exec_name: name of the executable, defaults to "svn". - :param category: the category, FCM or SUBVERSION (the latter is - the default) - ''' - + """ + Interface to the Subversion version control system. + """ def __init__(self, name: Optional[str] = None, exec_name: Optional[Union[str, Path]] = None, category: Category = Category.SUBVERSION): - name = name or "subversion" + """ + Constructor. + + This is class is extended by the FCM interface which is why name and + executable are mutable. + + :param name: Tool name, defaults to "subversion." + :param exec_name: Tool executable, defaults to "svn." + :param category: Tool category, defaults to SUBVERSION. + """ + name = name or "Subversion" exec_name = exec_name or "svn" super().__init__(name, exec_name, category=category) @@ -166,7 +172,9 @@ def export(self, src: Union[str, Path], :param dst: destination path. :param revision: revision to export. ''' - self.execute(['export', '--force'], revision, [str(src), str(dst)]) + self.execute(['export', '--force'], + revision, + [str(src), str(dst)]) def checkout(self, src: Union[str, Path], dst: Union[str, Path], @@ -214,4 +222,4 @@ class Fcm(Subversion): ''' def __init__(self): - super().__init__("fcm", "fcm", Category.FCM) + super().__init__("FCM", "fcm", Category.FCM) diff --git a/tests-old/TestCases/CompiletimeDependency/Makefile b/tests-old/TestCases/CompiletimeDependency/Makefile deleted file mode 100644 index 41b2f272..00000000 --- a/tests-old/TestCases/CompiletimeDependency/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -############################################################################### -# The best way to show how a test case should be built is to build it. -# While we don't have a build system we will use someone elses. -# -# This test builds the same set of source with and without the BEEF macro set. -# -# This will cause the resulting executables to behave differently. -############################################################################### -# The Fortran compiler has to be gfortran as we use the -J argument to redirect -# module file output. -# -export FC = gfortran -export CC ?= gcc - -# By default gmake sets FC to "f77" we need to detect that and force it to our -# default. If it is not set then we still have a default but we allow the user -# to override it. -# -ifeq "x$(shell command -v $(FC))" "x" -$(error Could not find gfortran on PATH) -endif - -all: with-beef without-beef - -with-beef: - $(MAKE) -f with-beef.mk - -without-beef: - $(MAKE) -f without-beef.mk - -clean: - $(MAKE) -f with-beef.mk clean - $(MAKE) -f without-beef.mk clean diff --git a/tests-old/TestCases/CompiletimeDependency/bisto.h b/tests-old/TestCases/CompiletimeDependency/bisto.h deleted file mode 100644 index bcddc2a7..00000000 --- a/tests-old/TestCases/CompiletimeDependency/bisto.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef BISTO_H -#define BISTO_H - -#define LIMIT 5 - -#endif diff --git a/tests-old/TestCases/CompiletimeDependency/hillfort.F90 b/tests-old/TestCases/CompiletimeDependency/hillfort.F90 deleted file mode 100644 index 27907780..00000000 --- a/tests-old/TestCases/CompiletimeDependency/hillfort.F90 +++ /dev/null @@ -1,20 +0,0 @@ -program hillfort - - use iso_fortran_env, only : input_unit, output_unit -#ifdef BEEF - use support_mod, only : characters_in_number -#endif - - implicit none - - integer :: input = 50 - - write(output_unit, '("Input is ", I0)') input - -#ifdef BEEF - write(output_unit, & - '("Number is ", I0, " characters long")') characters_in_number(input) -#endif - write(output_unit, '("Halving the number gives ", I0)') input / 2 - -end program hillfort diff --git a/tests-old/TestCases/CompiletimeDependency/looper.c b/tests-old/TestCases/CompiletimeDependency/looper.c deleted file mode 100644 index 857bac43..00000000 --- a/tests-old/TestCases/CompiletimeDependency/looper.c +++ /dev/null @@ -1,17 +0,0 @@ -#include - -#ifdef BEEF -#include "oxo.h" -#else -#include "bisto.h" -#endif - -int main(int argc, char **argv) { - int counter; - - for (counter=0; counter $@ - -$(DIR)hillfort.expected: | $(DIR) - printf "Input is 50\nNumber is 2 characters long\nHalving the number gives 25\n" > $@ - -test-looper: $(DIR)looper.out $(DIR)looper.expected - diff $^ - -$(DIR)looper.out: $(DIR)looper - ./$< > $@ - -$(DIR)looper.expected: | $(DIR) - printf "Test string\nTest string\nTest string\n" > $@ - -$(DIR)hillfort: $(DIR)support_mod.o $(DIR)hillfort.o - @echo Linking $@ - $(FC) -o $@ $(FFLAGS) $^ - -$(DIR)looper: $(DIR)looper.o - @echo Linking $@ - $(CC) -o $@ $(CFLAGS) $^ - -$(DIR)%.o: %.c | $(DIR) - @echo Compiling $@ - $(CC) -o $@ $(CFLAGS) -c $< - -$(DIR)%.o $(DIR)%.mod: %.f90 | $(DIR) - @echo Compiling $@ - $(FC) -o $(DIR)$*.o $(FFLAGS) -J$(DIR) -c $< - -$(DIR)%.o $(DIR)%.mod: %.F90 | $(DIR) - @echo Compiling $@ - $(FC) -o $(DIR)$*.o $(FFLAGS) -J$(DIR) -c $< - -$(DIR)hillfort.o: hillfort.F90 $(DIR)support_mod.mod -$(DIR)looper.o: looper.c oxo.h -$(DIR)support_mod.o $(DIR)support_mod.mod: support_mod.f90 - -$(DIR): - mkdir -p $@ - -clean: - -rm -r $(DIR) diff --git a/tests-old/TestCases/CompiletimeDependency/without-beef.mk b/tests-old/TestCases/CompiletimeDependency/without-beef.mk deleted file mode 100644 index 09eb3f0b..00000000 --- a/tests-old/TestCases/CompiletimeDependency/without-beef.mk +++ /dev/null @@ -1,55 +0,0 @@ -$(info Building without beef) - -export CFLAGS = -export FFLAGS = - -DIR = nobeef/ - -all: test-hillfort test-looper - -test-hillfort: $(DIR)hillfort.out $(DIR)hillfort.expected - diff $^ - -$(DIR)hillfort.out: $(DIR)hillfort - ./$< > $@ - -$(DIR)hillfort.expected: | $(DIR) - printf "Input is 50\nHalving the number gives 25\n" > $@ - -test-looper: $(DIR)looper.out $(DIR)looper.expected - diff $^ - -$(DIR)looper.out: $(DIR)looper - ./$< > $@ - -$(DIR)looper.expected: | $(DIR) - printf "Test string\nTest string\nTest string\nTest string\nTest string\n" > $@ - -$(DIR)hillfort: $(DIR)hillfort.o - @echo Linking $@ - $(FC) -o $@ $(FFLAGS) $^ - -$(DIR)looper: $(DIR)looper.o - @echo Linking $@ - $(CC) -o $@ $(CFLAGS) $^ - -$(DIR)%.o: %.c | $(DIR) - @echo Compiling $@ - $(CC) -o $@ $(CFLAGS) -c $< - -$(DIR)%.o $(DIR)%.mod: %.f90 | $(DIR) - @echo Compiling $@ - $(FC) -o $(DIR)$*.o $(FFLAGS) -c $< - -$(DIR)%.o $(DIR)%.mod: %.F90 | $(DIR) - @echo Compiling $@ - $(FC) -o $(DIR)$*.o $(FFLAGS) -c $< - -$(DIR)hillfort.o: hillfort.F90 -$(DIR)looper.o: looper.c bisto.h - -$(DIR): - mkdir -p $(DIR) - -clean: - -rm -r $(DIR) diff --git a/tests-old/TestCases/FortranSourceTree/ReadMe b/tests-old/TestCases/FortranSourceTree/ReadMe deleted file mode 100644 index dda01562..00000000 --- a/tests-old/TestCases/FortranSourceTree/ReadMe +++ /dev/null @@ -1,8 +0,0 @@ -Although this directory holds source it is not intended to be compiled. - -Instead it demonstrates the various aspects of a Fortran source tree. It is -intended to exercise corner cases for tools like the database explorer. To -that end there are multiple files containing identically named (but -different) modules. Obviously these cannot be linked as there are ambiguous -symbol names. But the source analysis stage has to be able to deal with -things like this. diff --git a/tests-old/TestCases/FortranSourceTree/fpp_one.f90 b/tests-old/TestCases/FortranSourceTree/fpp_one.f90 deleted file mode 100644 index 574ba1bf..00000000 --- a/tests-old/TestCases/FortranSourceTree/fpp_one.f90 +++ /dev/null @@ -1,23 +0,0 @@ -#if CHOOSE == ONE -module fpp_mod - - use nosuch_mod, only : nonexistant - - implicit none - - public fpp_choice - -contains - - function fpp_choice() - - implicit none - - character(3) :: fpp_choice - - fpp_choice = "ONE" - - end function fpp_choice - -end module fpp_mod -#endif diff --git a/tests-old/TestCases/FortranSourceTree/fpp_two.f90 b/tests-old/TestCases/FortranSourceTree/fpp_two.f90 deleted file mode 100644 index 0b792779..00000000 --- a/tests-old/TestCases/FortranSourceTree/fpp_two.f90 +++ /dev/null @@ -1,23 +0,0 @@ -#if CHOOSE == TWO -module fpp_mod - - implicit none - - public fpp_choice - -contains - - function fpp_choice() - - use unfound_mod, only : not_there - - implicit none - - character(3) :: fpp_choice - - fpp_choice = "TWO" - - end function fpp_choice - -end module fpp_mod -#endif diff --git a/tests-old/TestCases/FortranSourceTree/link1.f90 b/tests-old/TestCases/FortranSourceTree/link1.f90 deleted file mode 100644 index 00c8887e..00000000 --- a/tests-old/TestCases/FortranSourceTree/link1.f90 +++ /dev/null @@ -1,19 +0,0 @@ -module link_mod - - implicit none - - public link_choice - -contains - - function link_choice() - - implicit none - - integer :: link_choice - - link_choice = 1 - - end function link_choice - -end module link_mod diff --git a/tests-old/TestCases/FortranSourceTree/link2.f90 b/tests-old/TestCases/FortranSourceTree/link2.f90 deleted file mode 100644 index b7e86d5f..00000000 --- a/tests-old/TestCases/FortranSourceTree/link2.f90 +++ /dev/null @@ -1,19 +0,0 @@ -module link_mod - - implicit none - - public link_choice - -contains - - function link_choice() - - implicit none - - integer :: link_choice - - link_choice = 2 - - end function link_choice - -end module link_mod diff --git a/tests-old/TestCases/FortranSourceTree/program.F90 b/tests-old/TestCases/FortranSourceTree/program.F90 deleted file mode 100644 index 2e5caafe..00000000 --- a/tests-old/TestCases/FortranSourceTree/program.F90 +++ /dev/null @@ -1,13 +0,0 @@ -program thingumy - - use iso_fortran_env, only : output_unit - use link_mod, only : link_choice - use fpp_mod, only : fpp_choice - - implicit none - - write(output_unit, '("Someone made a decission")') - write(output_unit, '("By linking choice ", I0)') link_choice() - write(output_unit, '("By setting preprocessor variable CHOOSE to ", A)') fpp_choice() - -end program thingumy diff --git a/tests-old/TestCases/FortranSubmodule/Makefile b/tests-old/TestCases/FortranSubmodule/Makefile deleted file mode 100644 index def8a21e..00000000 --- a/tests-old/TestCases/FortranSubmodule/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -# The best way to show how a test case should be built is to build it. -# While we don't have a build system we will use someone elses. -# -.SUFFIXES: -.SUFFIXES: .f90 .o .mod - -# By default gmake sets FC to "f77" we need to detect that and force it to our -# default. If it is not set then we still have a default but we allow the user -# to override it. -# -ifeq "$(origin FC)" "default" -export FC = ifort -else -export FC ?= ifort -endif - -OBJECTS = simple_impl.o simple_mod.o class_mod.o class_impl.o test.o - -verify: test.out test.expected - diff $^ - -test.out: test - ./$< >$@ - -test.expected: - printf "Doubler in submodule 14\n\nInitial value 12\nAfter submodule method 29\n" >$@ - -test: $(OBJECTS) - @echo Linking $@ - $(FC) -o $@ $(OBJECTS) - -.f90.o: - @echo Building $@ - $(FC) -o $@ -c $< - -.f90.mod: - @echo Building $@ - $(FC) -o $*.o -c $< - -simple_mod.o simple_mod.mod: simple_mod.f90 -simple_impl.o: simple_impl.f90 simple_mod.mod -class_mod.o class_mod.mod: class_mod.f90 -class_impl.o: class_impl.f90 class_mod.mod -test.o: test.f90 simple_mod.mod class_mod.mod - -clean: - rm *.o *.mod *.smod test test.out test.expected diff --git a/tests-old/TestCases/FortranSubmodule/class_impl.f90 b/tests-old/TestCases/FortranSubmodule/class_impl.f90 deleted file mode 100644 index cd5c2fc6..00000000 --- a/tests-old/TestCases/FortranSubmodule/class_impl.f90 +++ /dev/null @@ -1,28 +0,0 @@ -submodule(class_mod) class_impl - - implicit none - -contains - - module function bar_initialiser( starter ) result(instance) - implicit none - integer, intent(in) :: starter - type(bar_type) :: instance - instance%stuff = starter - end function bar_initialiser - - - module subroutine bar_mangle(this, factor) - implicit none - class(bar_type), intent(inout) :: this - integer, intent(in) :: factor - this%stuff = ieor(this%stuff, factor) - end subroutine bar_mangle - - - module procedure bar_howmuch ! Alternative syntax - implicit none - bar_howmuch = this%stuff - end procedure bar_howmuch - -end submodule class_impl diff --git a/tests-old/TestCases/FortranSubmodule/class_mod.f90 b/tests-old/TestCases/FortranSubmodule/class_mod.f90 deleted file mode 100644 index 25d6616c..00000000 --- a/tests-old/TestCases/FortranSubmodule/class_mod.f90 +++ /dev/null @@ -1,59 +0,0 @@ -module class_mod - - implicit none - - type, abstract :: foo_type - private - integer :: stuff - contains - private - procedure(mangle_if), public, deferred :: mangle - procedure(how_much_if), public, deferred :: how_much - end type foo_type - - interface - subroutine mangle_if(this, factor) - import foo_type - implicit none - class(foo_type), intent(inout) :: this - integer, intent(in) :: factor - end subroutine mangle_if - function how_much_if(this) - import foo_type - implicit none - class(foo_type), intent(inout) :: this - integer :: how_much_if - end function how_much_if - end interface - - type, extends(foo_type) :: bar_type - private - contains - private - procedure, public :: mangle => bar_mangle - procedure, public :: how_much => bar_howmuch - end type bar_type - - interface bar_type - procedure bar_initialiser - end interface bar_type - - interface - module function bar_initialiser(starter) result(instance) - implicit none - integer,intent(in) :: starter - type(bar_type) :: instance - end function bar_initialiser - module subroutine bar_mangle(this, factor) - implicit none - class(bar_type), intent(inout) :: this - integer, intent(in) :: factor - end subroutine bar_mangle - module function bar_howmuch(this) - implicit none - class(bar_type), intent(inout) :: this - integer :: bar_howmuch - end function bar_howmuch - end interface - -end module class_mod diff --git a/tests-old/TestCases/FortranSubmodule/simple_impl.f90 b/tests-old/TestCases/FortranSubmodule/simple_impl.f90 deleted file mode 100644 index 74b63601..00000000 --- a/tests-old/TestCases/FortranSubmodule/simple_impl.f90 +++ /dev/null @@ -1,18 +0,0 @@ -submodule(simple_mod) simple_impl - - implicit none - -contains - - module function returnerer(thing) - - implicit none - - integer, intent(in) :: thing - integer :: returnerer - - returnerer = 2 * thing - - end function returnerer - -end submodule simple_impl diff --git a/tests-old/TestCases/FortranSubmodule/simple_mod.f90 b/tests-old/TestCases/FortranSubmodule/simple_mod.f90 deleted file mode 100644 index 8bd6b4d9..00000000 --- a/tests-old/TestCases/FortranSubmodule/simple_mod.f90 +++ /dev/null @@ -1,13 +0,0 @@ -module simple_mod - - implicit none - - interface - module function returnerer(thing) - implicit none - integer, intent(in) :: thing - integer :: returnerer - end function returnerer - end interface - -end module simple_mod diff --git a/tests-old/TestCases/FortranSubmodule/test.f90 b/tests-old/TestCases/FortranSubmodule/test.f90 deleted file mode 100644 index 3ac9f4c2..00000000 --- a/tests-old/TestCases/FortranSubmodule/test.f90 +++ /dev/null @@ -1,22 +0,0 @@ -program test - - use iso_fortran_env, only : output_unit - - use class_mod, only : bar_type - use simple_mod, only : returnerer - - implicit none - - type(bar_type) :: thing - - thing = bar_type(12) - - write(output_unit, '("Doubler in submodule ", I0)') returnerer(7) - write(output_unit, '()') - - write(output_unit, '("Initial value ", I0)') thing%how_much() - call thing%mangle(17) - write(output_unit, & - '("After submodule method ", I0)') thing%how_much() - -end program test diff --git a/tests-old/TestCases/Makefile b/tests-old/TestCases/Makefile deleted file mode 100644 index 506623c4..00000000 --- a/tests-old/TestCases/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST))) - -TEST_DIRS := $(shell find $(THIS_DIR) -type d -mindepth 1 -maxdepth 1 -print) - -run: $(addprefix run/, $(TEST_DIRS)) - -run/%: - $(MAKE) -C $* - -clean: $(addprefix clean/, $(TEST_DIRS)) - -clean/%: - $(MAKE) -C $* clean diff --git a/tests-old/TestCases/MinimalC/Makefile b/tests-old/TestCases/MinimalC/Makefile deleted file mode 100644 index 95242a70..00000000 --- a/tests-old/TestCases/MinimalC/Makefile +++ /dev/null @@ -1,39 +0,0 @@ -# The best way to show how a test case should be built is to build it. -# While we don't have a build system we will use someone elses. -# -.SUFFIXES: -.SUFFIXES: .c .o - -# By default gmake sets FC to "cc" we need to detect that and force it to our -# default. If it is not set then we still have a default but we allow the user -# to override it. -# -ifeq "$(origin CC)" "default" -export CC = gcc -else -export CC ?= gcc -endif - -OBJECTS = program.o - -verify: test.out test.expected - diff $^ - -test.out: test - ./$< >$@ - -test.expected: - printf "Hello world!\n" >$@ - -test: $(OBJECTS) - @echo Linking $@ - $(CC) -o $@ $(OBJECTS) - -.c.o: - @echo Compiling $@ - $(CC) -o $@ -c $< - -program.o: program.c - -clean: - rm *.o test test.out test.expected diff --git a/tests-old/TestCases/MinimalC/program.c b/tests-old/TestCases/MinimalC/program.c deleted file mode 100644 index ccec439e..00000000 --- a/tests-old/TestCases/MinimalC/program.c +++ /dev/null @@ -1,11 +0,0 @@ -/***************************************************************************** - * It's that simple example program again. - *****************************************************************************/ - -#include - -int main(int argc, char **argv) -{ - printf("Hello world!\n"); - return 0; -} diff --git a/tests-old/TestCases/PSyclone/Makefile b/tests-old/TestCases/PSyclone/Makefile deleted file mode 100644 index ca4e7069..00000000 --- a/tests-old/TestCases/PSyclone/Makefile +++ /dev/null @@ -1,56 +0,0 @@ -# The best way to show how a test case should be built is to build it. -# While we don't have a build system we will use someone elses. -# -# This test assumes that the PSyclone code generation tool is available on the -# executino path. i.e. That the directory containing it appears in the PATH -# environment variable. -# -.SUFFIXES: -.SUFFIXES: .x90 .f90 .F90 .o .mod - -# The compiler has to be ifort as we use the "-module" argument to redirect -# module file storage. -# -export FC = ifort - -PSYCLONE = psyclone - -ifeq "x$(shell command -v $(PSYCLONE))" "x" -$(error Could not find the PSyclone script on PATH) -endif - -DIRECTORIES = kernels model - -objects: kernels/my_kernel_mod.o algorithm_mod.o algorithm_mod_psy.o - -%.o %.mod: %.f90 - @echo Compiling $< - $(FC) -o $*.o -module $(dir $@) $(addprefix -I,$(DIRECTORIES)) -c $< - -%_psy.f90 %.f90: %.x90 optimisation.py - @echo Psyclone $< - $(PSYCLONE) -oalg $*.f90 -opsy $*_psy.f90 -d kernels \ - -s $(realpath optimisation.py) \ - -api dynamo0.3 -l -nodm $< - -algorithm_mod.f90 \ -algorithm_mod_psy.f90: algorithm_mod.x90 \ - kernels/my_kernel_mod.f90 optimisation.py -algorithm_mod.o \ -algorithm_mod.mod: algorithm_mod.f90 algorithm_mod_psy.mod \ - model/field_mod.mod kernels/my_kernel_mod.mod -algorithm_mod_psy.o \ -algorithm_mod_psy.mod: algorithm_mod_psy.f90 \ - model/field_mod.mod model/operator_mod.mod \ - kernels/my_kernel_mod.mod -kernels/my_kernel_mod.o \ -kernels/my_kernel_mod.mod: kernels/my_kernel_mod.f90 \ - model/argument_mod.mod model/constants_mod.mod \ - model/functionspace_mod.mod model/kernel_mod.mod \ - -model/field_mod.o \ -model/field_mod.mod: model/field_mod.f90 model/functionspace_mod.mod - -clean: - -rm -r *.o *.mod kernels/*.o kernels/*.mod model/*.o model/*.mod - -rm *.pyc algorithm_mod.f90 algorithm_mod_psy.f90 diff --git a/tests-old/TestCases/PSyclone/algorithm_mod.x90 b/tests-old/TestCases/PSyclone/algorithm_mod.x90 deleted file mode 100644 index cfac16bb..00000000 --- a/tests-old/TestCases/PSyclone/algorithm_mod.x90 +++ /dev/null @@ -1,21 +0,0 @@ -module algorithm_mod - - use field_mod, only : field_type - use my_kernel_mod, only : my_kernel_type - - implicit none - -contains - - subroutine algorithm() - - implicit none - - type(field_type) :: field - - field = field_type() - call invoke( name='a_test', my_kernel_type( field ) ) - - end subroutine algorithm - -end module algorithm_mod diff --git a/tests-old/TestCases/PSyclone/kernels/my_kernel_mod.f90 b/tests-old/TestCases/PSyclone/kernels/my_kernel_mod.f90 deleted file mode 100644 index 035de698..00000000 --- a/tests-old/TestCases/PSyclone/kernels/my_kernel_mod.f90 +++ /dev/null @@ -1,49 +0,0 @@ -module my_kernel_mod - - use argument_mod, only : arg_type, cells, gh_field, gh_write - use functionspace_mod, only : w3 - use kernel_mod, only : kernel_type - - implicit none - - private - - type, public, extends(kernel_type) :: my_kernel_type - private - type(arg_type) :: meta_args(1) = (/ & - arg_type( gh_field, gh_write, w3 ) & - /) - integer :: iterates_over = cells - contains - procedure, nopass :: my_kernel_code - end type - - public :: my_kernel_code - -contains - - subroutine my_kernel_code( nlayers, field_1_w3, ndf_w3, undf_w3, map_w3 ) - - use constants_mod, only : r_def - - implicit none - - integer, intent(in) :: nlayers - integer, intent(in) :: ndf_w3 - integer, intent(in) :: undf_w3 - real(kind=r_def), intent(out) :: field_1_w3(undf_w3) - integer, intent(in) :: map_w3(ndf_w3) - - integer :: d, k - real :: v(ndf_w3) - - call random_number(v) - do k=0, nlayers - do d=0, ndf_w3 - field_1_w3(map_w3(d)) = v(d) + k - end do - end do - - end subroutine my_kernel_code - -end module my_kernel_mod diff --git a/tests-old/TestCases/PSyclone/model/argument_mod.f90 b/tests-old/TestCases/PSyclone/model/argument_mod.f90 deleted file mode 100644 index 2cf7638c..00000000 --- a/tests-old/TestCases/PSyclone/model/argument_mod.f90 +++ /dev/null @@ -1,21 +0,0 @@ -module argument_mod - - implicit none - - private - - integer, public, parameter :: gh_field = 507 - integer, public, parameter :: gh_write = 65 - integer, public, parameter :: cells = 396 - - type, public :: arg_type - integer :: arg_type - integer :: arg_intent - integer :: wspace = -1 - integer :: from_wspace = -1 - integer :: stencil_map = -1 - integer :: mesh_arg = -1 - end type arg_type - -end module argument_mod - diff --git a/tests-old/TestCases/PSyclone/model/constants_mod.f90 b/tests-old/TestCases/PSyclone/model/constants_mod.f90 deleted file mode 100644 index bc5170e2..00000000 --- a/tests-old/TestCases/PSyclone/model/constants_mod.f90 +++ /dev/null @@ -1,11 +0,0 @@ -module constants_mod - - use iso_fortran_env, only : real64 - - implicit none - - private - - integer, public, parameter :: r_def = real64 - -end module constants_mod diff --git a/tests-old/TestCases/PSyclone/model/field_mod.f90 b/tests-old/TestCases/PSyclone/model/field_mod.f90 deleted file mode 100644 index 5fb9b872..00000000 --- a/tests-old/TestCases/PSyclone/model/field_mod.f90 +++ /dev/null @@ -1,32 +0,0 @@ -module field_mod - - use constants_mod, only : r_def - use functionspace_mod, only : functionspace_type - - implicit none - - private - - type, public :: field_type - private - contains - private - procedure, public :: get_proxy - end type field_type - - type, public :: field_proxy_type - private - real(r_def), public :: data(10) - type(functionspace_type), public :: vspace - end type field_proxy_type - -contains - - function get_proxy(this) - implicit none - class(field_type), intent(inout) :: this - type(field_proxy_type) :: get_proxy - get_proxy%vspace = functionspace_type() - end function get_proxy - -end module field_mod diff --git a/tests-old/TestCases/PSyclone/model/functionspace_mod.f90 b/tests-old/TestCases/PSyclone/model/functionspace_mod.f90 deleted file mode 100644 index 56c9b016..00000000 --- a/tests-old/TestCases/PSyclone/model/functionspace_mod.f90 +++ /dev/null @@ -1,78 +0,0 @@ -module functionspace_mod - - implicit none - - private - - integer, public, parameter :: W0 = 173 - integer, public, parameter :: W1 = 194 - integer, public, parameter :: W2 = 889 - integer, public, parameter :: W2V = 857 - integer, public, parameter :: W2H = 884 - integer, public, parameter :: W2broken = 211 - integer, public, parameter :: W2trace = 213 - integer, public, parameter :: W3 = 424 - integer, public, parameter :: Wtheta = 274 - integer, public, parameter :: Wchi = 869 - - type, public :: functionspace_type - private - integer, pointer :: dofmap(:, :) - contains - private - procedure, public :: get_ncell - procedure, public :: get_ndf - procedure, public :: get_nlayers - procedure, public :: get_undf - procedure, public :: get_whole_dofmap - ! There should be a finaliser but for testing it's too much work. - end type functionspace_type - - interface functionspace_type - procedure functionspace_initialise - end interface - -contains - - function functionspace_initialise() result(instance) - implicit none - type(functionspace_type) :: instance - allocate( instance%dofmap(2, 1) ) - end function functionspace_initialise - - function get_ncell(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_ncell - get_ncell = 1 - end function get_ncell - - function get_ndf(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_ndf - get_ndf = 1 - end function get_ndf - - function get_undf(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_undf - get_undf = 1 - end function get_undf - - function get_nlayers(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_nlayers - get_nlayers = 1 - end function get_nlayers - - function get_whole_dofmap(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer, pointer :: get_whole_dofmap(:, :) - get_whole_dofmap => this%dofmap - end function get_whole_dofmap - -end module functionspace_mod diff --git a/tests-old/TestCases/PSyclone/model/kernel_mod.f90 b/tests-old/TestCases/PSyclone/model/kernel_mod.f90 deleted file mode 100644 index 821c1ec3..00000000 --- a/tests-old/TestCases/PSyclone/model/kernel_mod.f90 +++ /dev/null @@ -1,11 +0,0 @@ -module kernel_mod - - implicit none - - private - - type, public, abstract :: kernel_type - private - end type - -end module kernel_mod diff --git a/tests-old/TestCases/PSyclone/model/operator_mod.f90 b/tests-old/TestCases/PSyclone/model/operator_mod.f90 deleted file mode 100644 index a06a63ca..00000000 --- a/tests-old/TestCases/PSyclone/model/operator_mod.f90 +++ /dev/null @@ -1,23 +0,0 @@ -module operator_mod - - implicit none - - private - - type, public :: operator_type - private - end type operator_type - - type, public :: operator_proxy_type - private - end type operator_proxy_type - - type, public :: columnwise_operator_type - private - end type columnwise_operator_type - - type, public :: columnwise_operator_proxy_type - private - end type columnwise_operator_proxy_type - -end module operator_mod diff --git a/tests-old/TestCases/PSyclone/optimisation.py b/tests-old/TestCases/PSyclone/optimisation.py deleted file mode 100644 index d995e883..00000000 --- a/tests-old/TestCases/PSyclone/optimisation.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -''' -PSyclone transformation script for the Dynamo0p3 API to apply -colouring and OpenMP. -''' -from __future__ import absolute_import, print_function - -from psyclone.transformations import Dynamo0p3ColourTrans, \ - Dynamo0p3OMPLoopTrans, \ - OMPParallelTrans -from psyclone.dynamo0p3 import DISCONTINUOUS_FUNCTION_SPACES - - -def trans(psy): - ''' - Applies PSyclone colouring and OpenMP transformations. - ''' - ctrans = Dynamo0p3ColourTrans() - otrans = Dynamo0p3OMPLoopTrans() - oregtrans = OMPParallelTrans() - - # Loop over all of the Invokes in the PSy object - for invoke in psy.invokes.invoke_list: - - print("Transforming invoke '{0}' ...".format(invoke.name)) - schedule = invoke.schedule - - # Colour loops over cells unless they are on discontinuous - # spaces (W3, WTHETA and W2V) or over dofs - for loop in schedule.loops(): - if loop.iteration_space == "cells" \ - and loop.field_space.orig_name \ - not in DISCONTINUOUS_FUNCTION_SPACES: - schedule, _ = ctrans.apply(loop) - - # Add OpenMP to loops over colours. - for loop in schedule.loops(): - if loop.loop_type != "colours": - schedule, _ = oregtrans.apply(loop) - schedule, _ = otrans.apply(loop, reprod=True) - - schedule.view() - - return psy diff --git a/tests-old/TestCases/SimpleLFRic/Makefile b/tests-old/TestCases/SimpleLFRic/Makefile deleted file mode 100644 index 78fa0630..00000000 --- a/tests-old/TestCases/SimpleLFRic/Makefile +++ /dev/null @@ -1,143 +0,0 @@ -# The best way to show how a test case should be built is to build it. -# While we don't have a build system we will use someone elses. -# -# This test expects the MPICH compiler wrapper scripts to be available on -# PATH. Furthermore it expects them to be wrapping Intel Fortran. -# -# It also expects the PSyclone source generator and pFUnit processor to be -# available on PATH. -# -# As recommended by the pFUnit developers it exptects a macro PFUNIT to -# contain the full path to the pFUnit install directory. -# -.SUFFIXES: -.SUFFIXES: .pf .f90 .F90 .o .mod - -APP_OBJECTS = util.o util_mod.o \ - algorithm_mod.o algorithm_mod_psy.o \ - model/field_mod.o model/functionspace_mod.o model/mesh_mod.o \ - model/kernel_mod.o kernels/my_kernel_mod.o \ - thing.o -TEST_OBJECTS = kernels/my_kernel_mod.o my_kernel_test_mod.o driver.o - -FC = mpif90 -FFLAGS = -Ikernels -Imodel -I$(PFUNIT)/mod -debug full -traceback -CC = gcc -CFLAGS = -std=c99 -g -LFLAGS = -qopenmp -debug full -traceback -PSYCLONE = psyclone -PFPROC = pFUnitParser.py - -# We use the "-module" argument to redirect module file creation so the -# compiler wrapped by MPI must be ifort. -# -ifneq "$(firstword $(shell $(FC) -show))" "ifort" -$(error Please build with MPICH for Intel Fortran) -endif - -ifeq "x$(shell command -v $(PSYCLONE))" "x" -$(error Could not find the PSyclone script on PATH) -endif - -ifeq "x$(shell command -v $(PFPROC))" "x" -$(error Could not find the pFUnit processor on PATH) -endif - -verify: test-thing test-test - -test-thing: thing.out thing.expected - diff $^ - -thing.out: thing - ./$< >$@ - -thing.expected: - printf "Some hash: 7\n" >$@ - for iter in 1 2 3 4 5 6 7 8 9 10; do printf "Field data: 1.0000\n"; done >>$@ - -test-test: test.out test.expected - diff $^ - -test.out: test - ./$< | tail -n 2 >$@ - -test.expected: - printf " OK\n (1 test)\n" >$@ - -thing: $(APP_OBJECTS) - @echo Linking $@ - $(FC) -o $@ -g $(APP_OBJECTS) -lstdc++ - -test: $(TEST_OBJECTS) - @echo Linking $@ - $(FC) $(LFLAGS) -o $@ -L$(PFUNIT)/lib $(TEST_OBJECTS) -l pfunit - -%.o: %.c - @echo Compiling $@ - $(CC) -o $@ $(CFLAGS) -c $< - -%.o %.mod: %.f90 - @echo Compiling $@ - $(FC) -o $*.o $(FFLAGS) -module $(dir $@) -c $< - -%.o %.mod: %.F90 - @echo Compiling $@ - $(FC) -o $*.o $(FFLAGS) -module $(dir $@) -c $< - -%.f90 %_psy.F90: %.x90 optimisation.py - @echo Generating $@ - $(PSYCLONE) -oalg $*.f90 -opsy $*_psy.F90 -d kernels \ - -s $(realpath optimisation.py) -api dynamo0.3 -l -dm $< - -util_mod.f90: util_mod.template # Really should be util.c - @echo Generating $@ - # This cat is in lue of a generator - cat util_mod.template > $@ - -.pf.F90: - @echo Generating $@ - $(PFPROC) $< $@ - -driver.o: $(PFUNIT)/include/driver.F90 testSuites.inc - @echo Compiling $@ - $(FC) -o $@ -I$(PFUNIT)/mod -c $< - -testSuites.inc: - @echo Creating $@ - echo ADD_TEST_SUITE\(my_kernel_test_mod_suite\) > $@ - -my_kernel_test_mod.o \ -my_kernel_test_mod.mod: my_kernel_test_mod.F90 \ - kernels/my_kernel_mod.mod model/constants_mod.mod -kernel_test_mod.F90: kernel_test_mod.pf - -algorithm_mod.o \ -algorithm_mod.mod: algorithm_mod.f90 algorithm_mod_psy.mod \ - model/field_mod.mod kernels/my_kernel_mod.mod -algorithm_mod.f90: algorithm_mod.x90 kernels/my_kernel_mod.f90 optimisation.py -algorithm_mod_psy.o \ -algorithm_mod_psy.mod: algorithm_mod_psy.F90 \ - model/field_mod.mod model/operator_mod.mod \ - kernels/my_kernel_mod.mod -algorithm_mod_psy.F90: algorithm_mod.x90 \ - kernels/my_kernel_mod.f90 optimisation.py -kernels/my_kernel_mod.o \ -kernels/my_kernel_mod.mod: kernels/my_kernel_mod.f90 model/mesh_mod.mod\ - model/argument_mod.mod model/constants_mod.mod \ - model/functionspace_mod.mod model/kernel_mod.mod - -model/field_mod.o: model/field_mod.f90 \ - model/constants_mod.mod model/functionspace_mod.mod \ - model/mesh_mod.mod - -util_mod.o: util_mod.f90 model/constants_mod.mod - -thing.o: thing.f90 algorithm_mod.mod algorithm_mod_psy.mod util_mod.mod \ - model/field_mod.mod - -clean: - -rm *.o *.mod *.pyc kernels/*.o kernels/*.mod model/*.o model/*.mod - -rm testSuites.inc - -rm algorithm_mod.f90 algorithm_mod_psy.F90 my_kernel_test_mod.F90 - -rm util_mod.f90 - -rm test test.out test.expected thing thing.out thing.expected diff --git a/tests-old/TestCases/SimpleLFRic/algorithm_mod.x90 b/tests-old/TestCases/SimpleLFRic/algorithm_mod.x90 deleted file mode 100644 index d37dbf98..00000000 --- a/tests-old/TestCases/SimpleLFRic/algorithm_mod.x90 +++ /dev/null @@ -1,24 +0,0 @@ -module algorithm_mod - - use field_mod, only : field_type - use my_kernel_mod, only : my_kernel_type - use util_mod, only : hash - - implicit none - - private - public :: algorithm - -contains - - subroutine algorithm(field) - - implicit none - - class(field_type), intent(inout) :: field - - call invoke( my_kernel_type(field) ) - - end subroutine algorithm - -end module algorithm_mod diff --git a/tests-old/TestCases/SimpleLFRic/kernels/my_kernel_mod.f90 b/tests-old/TestCases/SimpleLFRic/kernels/my_kernel_mod.f90 deleted file mode 100644 index d6e3dfc2..00000000 --- a/tests-old/TestCases/SimpleLFRic/kernels/my_kernel_mod.f90 +++ /dev/null @@ -1,47 +0,0 @@ -module my_kernel_mod - - use argument_mod, only : arg_type, cells, gh_field, gh_write - use functionspace_mod, only : w3 - use kernel_mod, only : kernel_type - - implicit none - - private - - type, public, extends(kernel_type) :: my_kernel_type - private - type(arg_type) :: meta_args(1) = (/ & - arg_type( gh_field, gh_write, w3 ) & - /) - integer :: iterates_over = cells - contains - procedure, nopass :: my_kernel_code - end type - - public :: my_kernel_code - -contains - - subroutine my_kernel_code( nlayers, field_1_w3, ndf_w3, undf_w3, map_w3 ) - - use constants_mod, only : r_def - - implicit none - - integer, intent(in) :: nlayers - integer, intent(in) :: ndf_w3 - integer, intent(in) :: undf_w3 - real(kind=r_def), intent(out) :: field_1_w3(undf_w3) - integer, intent(in) :: map_w3(ndf_w3) - - integer :: d, k - - do k=0, nlayers - do d=0, ndf_w3 - field_1_w3(map_w3(d)) = d + k - end do - end do - - end subroutine my_kernel_code - -end module my_kernel_mod diff --git a/tests-old/TestCases/SimpleLFRic/model/argument_mod.f90 b/tests-old/TestCases/SimpleLFRic/model/argument_mod.f90 deleted file mode 100644 index 2cf7638c..00000000 --- a/tests-old/TestCases/SimpleLFRic/model/argument_mod.f90 +++ /dev/null @@ -1,21 +0,0 @@ -module argument_mod - - implicit none - - private - - integer, public, parameter :: gh_field = 507 - integer, public, parameter :: gh_write = 65 - integer, public, parameter :: cells = 396 - - type, public :: arg_type - integer :: arg_type - integer :: arg_intent - integer :: wspace = -1 - integer :: from_wspace = -1 - integer :: stencil_map = -1 - integer :: mesh_arg = -1 - end type arg_type - -end module argument_mod - diff --git a/tests-old/TestCases/SimpleLFRic/model/constants_mod.f90 b/tests-old/TestCases/SimpleLFRic/model/constants_mod.f90 deleted file mode 100644 index bc5170e2..00000000 --- a/tests-old/TestCases/SimpleLFRic/model/constants_mod.f90 +++ /dev/null @@ -1,11 +0,0 @@ -module constants_mod - - use iso_fortran_env, only : real64 - - implicit none - - private - - integer, public, parameter :: r_def = real64 - -end module constants_mod diff --git a/tests-old/TestCases/SimpleLFRic/model/field_mod.f90 b/tests-old/TestCases/SimpleLFRic/model/field_mod.f90 deleted file mode 100644 index 9675d36c..00000000 --- a/tests-old/TestCases/SimpleLFRic/model/field_mod.f90 +++ /dev/null @@ -1,59 +0,0 @@ -module field_mod - - use constants_mod, only : r_def - use functionspace_mod, only : functionspace_type - use mesh_mod, only : mesh_type - - implicit none - - private - - type, public :: field_type - private - type(mesh_type), pointer :: mesh - contains - private - procedure, public :: get_mesh - procedure, public :: get_proxy - ! There should be a finalising but I can't be bothered - end type field_type - - interface field_type - procedure :: field_initialiser - end interface field_type - - type, public :: field_proxy_type - private - real(r_def), public :: data(10) - type(functionspace_type), public :: vspace - contains - procedure set_dirty - end type field_proxy_type - -contains - - function field_initialiser() result(instance) - implicit none - type(field_type) :: instance - allocate( instance%mesh ) - end function field_initialiser - - function get_mesh(this) - implicit none - class(field_type), intent(inout) :: this - type(mesh_type), pointer :: get_mesh - get_mesh => this%mesh - end function get_mesh - - function get_proxy(this) - implicit none - class(field_type), intent(inout) :: this - type(field_proxy_type) :: get_proxy - get_proxy%vspace = functionspace_type() - end function get_proxy - - subroutine set_dirty(this) - implicit none - class(field_Proxy_type), intent(inout) :: this - end subroutine set_dirty -end module field_mod diff --git a/tests-old/TestCases/SimpleLFRic/model/functionspace_mod.f90 b/tests-old/TestCases/SimpleLFRic/model/functionspace_mod.f90 deleted file mode 100644 index 56c9b016..00000000 --- a/tests-old/TestCases/SimpleLFRic/model/functionspace_mod.f90 +++ /dev/null @@ -1,78 +0,0 @@ -module functionspace_mod - - implicit none - - private - - integer, public, parameter :: W0 = 173 - integer, public, parameter :: W1 = 194 - integer, public, parameter :: W2 = 889 - integer, public, parameter :: W2V = 857 - integer, public, parameter :: W2H = 884 - integer, public, parameter :: W2broken = 211 - integer, public, parameter :: W2trace = 213 - integer, public, parameter :: W3 = 424 - integer, public, parameter :: Wtheta = 274 - integer, public, parameter :: Wchi = 869 - - type, public :: functionspace_type - private - integer, pointer :: dofmap(:, :) - contains - private - procedure, public :: get_ncell - procedure, public :: get_ndf - procedure, public :: get_nlayers - procedure, public :: get_undf - procedure, public :: get_whole_dofmap - ! There should be a finaliser but for testing it's too much work. - end type functionspace_type - - interface functionspace_type - procedure functionspace_initialise - end interface - -contains - - function functionspace_initialise() result(instance) - implicit none - type(functionspace_type) :: instance - allocate( instance%dofmap(2, 1) ) - end function functionspace_initialise - - function get_ncell(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_ncell - get_ncell = 1 - end function get_ncell - - function get_ndf(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_ndf - get_ndf = 1 - end function get_ndf - - function get_undf(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_undf - get_undf = 1 - end function get_undf - - function get_nlayers(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer :: get_nlayers - get_nlayers = 1 - end function get_nlayers - - function get_whole_dofmap(this) - implicit none - class(functionspace_type), intent(inout) :: this - integer, pointer :: get_whole_dofmap(:, :) - get_whole_dofmap => this%dofmap - end function get_whole_dofmap - -end module functionspace_mod diff --git a/tests-old/TestCases/SimpleLFRic/model/kernel_mod.f90 b/tests-old/TestCases/SimpleLFRic/model/kernel_mod.f90 deleted file mode 100644 index 821c1ec3..00000000 --- a/tests-old/TestCases/SimpleLFRic/model/kernel_mod.f90 +++ /dev/null @@ -1,11 +0,0 @@ -module kernel_mod - - implicit none - - private - - type, public, abstract :: kernel_type - private - end type - -end module kernel_mod diff --git a/tests-old/TestCases/SimpleLFRic/model/mesh_mod.f90 b/tests-old/TestCases/SimpleLFRic/model/mesh_mod.f90 deleted file mode 100644 index 3e8ccd91..00000000 --- a/tests-old/TestCases/SimpleLFRic/model/mesh_mod.f90 +++ /dev/null @@ -1,22 +0,0 @@ -module mesh_mod - - implicit none - - private - - type, public :: mesh_type - private - contains - procedure get_last_edge_cell - end type mesh_type - -contains - - function get_last_edge_cell(this) - implicit none - class(mesh_type), intent(inout) :: this - integer :: get_last_edge_cell - get_last_edge_cell = 1 - end function get_last_edge_cell - -end module mesh_mod diff --git a/tests-old/TestCases/SimpleLFRic/model/operator_mod.f90 b/tests-old/TestCases/SimpleLFRic/model/operator_mod.f90 deleted file mode 100644 index a06a63ca..00000000 --- a/tests-old/TestCases/SimpleLFRic/model/operator_mod.f90 +++ /dev/null @@ -1,23 +0,0 @@ -module operator_mod - - implicit none - - private - - type, public :: operator_type - private - end type operator_type - - type, public :: operator_proxy_type - private - end type operator_proxy_type - - type, public :: columnwise_operator_type - private - end type columnwise_operator_type - - type, public :: columnwise_operator_proxy_type - private - end type columnwise_operator_proxy_type - -end module operator_mod diff --git a/tests-old/TestCases/SimpleLFRic/my_kernel_test_mod.pf b/tests-old/TestCases/SimpleLFRic/my_kernel_test_mod.pf deleted file mode 100644 index 832926b1..00000000 --- a/tests-old/TestCases/SimpleLFRic/my_kernel_test_mod.pf +++ /dev/null @@ -1,27 +0,0 @@ -module my_kernel_test_mod - - use pFUnit_mod - use constants_mod, only : r_def - use my_kernel_mod, only : my_kernel_code - - implicit none - -contains - - @test - subroutine test_my_kernel - - implicit none - - real(r_def) :: dblock(27) - real(r_def) :: expected(27) = (/4,5,6,7,8,9,10,11,12, & - 0,0,0,0,0,0,0,0,0, & - 0,0,0,0,0,0,0,0,0/) - integer :: dofs(9) = (/1,2,3,4,5,6,7,8,9/) - - call my_kernel_code( 3, dblock, 9, 27, dofs) - @assertEqual(expected, dblock) - - end subroutine test_my_kernel - -end module my_kernel_test_mod diff --git a/tests-old/TestCases/SimpleLFRic/optimisation.py b/tests-old/TestCases/SimpleLFRic/optimisation.py deleted file mode 100644 index d995e883..00000000 --- a/tests-old/TestCases/SimpleLFRic/optimisation.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -''' -PSyclone transformation script for the Dynamo0p3 API to apply -colouring and OpenMP. -''' -from __future__ import absolute_import, print_function - -from psyclone.transformations import Dynamo0p3ColourTrans, \ - Dynamo0p3OMPLoopTrans, \ - OMPParallelTrans -from psyclone.dynamo0p3 import DISCONTINUOUS_FUNCTION_SPACES - - -def trans(psy): - ''' - Applies PSyclone colouring and OpenMP transformations. - ''' - ctrans = Dynamo0p3ColourTrans() - otrans = Dynamo0p3OMPLoopTrans() - oregtrans = OMPParallelTrans() - - # Loop over all of the Invokes in the PSy object - for invoke in psy.invokes.invoke_list: - - print("Transforming invoke '{0}' ...".format(invoke.name)) - schedule = invoke.schedule - - # Colour loops over cells unless they are on discontinuous - # spaces (W3, WTHETA and W2V) or over dofs - for loop in schedule.loops(): - if loop.iteration_space == "cells" \ - and loop.field_space.orig_name \ - not in DISCONTINUOUS_FUNCTION_SPACES: - schedule, _ = ctrans.apply(loop) - - # Add OpenMP to loops over colours. - for loop in schedule.loops(): - if loop.loop_type != "colours": - schedule, _ = oregtrans.apply(loop) - schedule, _ = otrans.apply(loop, reprod=True) - - schedule.view() - - return psy diff --git a/tests-old/TestCases/SimpleLFRic/thing.f90 b/tests-old/TestCases/SimpleLFRic/thing.f90 deleted file mode 100644 index b5dd42a0..00000000 --- a/tests-old/TestCases/SimpleLFRic/thing.f90 +++ /dev/null @@ -1,27 +0,0 @@ -program thing - - use iso_fortran_env, only : output_unit - - use algorithm_mod, only : algorithm - use field_mod, only : field_type, field_proxy_type - use util_mod, only : hash - - implicit none - - type(field_type) :: field - - real, target :: something(4) - real, pointer :: some_pointer(:) => null() - - type(field_proxy_type) :: accessor - - call random_number(something) - some_pointer => something - write(output_unit, '("Some hash: ", I0)') hash(some_pointer) - - accessor = field%get_proxy() - accessor%data = 1.0 - call algorithm(field) - write(output_unit, '("Field data: ", F17.4)') accessor%data - -end program thing diff --git a/tests-old/TestCases/SimpleLFRic/util.c b/tests-old/TestCases/SimpleLFRic/util.c deleted file mode 100644 index 9deefc1b..00000000 --- a/tests-old/TestCases/SimpleLFRic/util.c +++ /dev/null @@ -1,9 +0,0 @@ -#include "util.h" - -int8_t eor_hash(void *block, int length) { - int8_t hash = 0xff; - for (unsigned int index = 0; index < length; ++index) { - hash = hash ^ ((int8_t *)block)[index]; - } - return hash; -} diff --git a/tests-old/TestCases/SimpleLFRic/util.h b/tests-old/TestCases/SimpleLFRic/util.h deleted file mode 100644 index c254a41d..00000000 --- a/tests-old/TestCases/SimpleLFRic/util.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef UTIL_H -#define UTIL_H - -#include - -extern int8_t eor_hash(void *block, int length); - -#endif diff --git a/tests-old/TestCases/SimpleLFRic/util_mod.template b/tests-old/TestCases/SimpleLFRic/util_mod.template deleted file mode 100644 index b5100ca0..00000000 --- a/tests-old/TestCases/SimpleLFRic/util_mod.template +++ /dev/null @@ -1,29 +0,0 @@ -module util_mod - - use iso_c_binding, only : c_int, c_int8_t, c_loc, c_ptr - - implicit none - - private - public :: hash - - interface - function eor_hash(block, length) bind(c, name='eor_hash') - import c_int, c_int8_t, c_ptr - implicit none - integer(c_int8_t) eor_hash - type(c_ptr), intent(in) :: block - integer(c_int), intent(in), value :: length - end function eor_hash - end interface - -contains - - function hash(block) - implicit none - real, pointer :: block(:) - integer :: hash - hash = eor_hash(c_loc(block), size(block, 1)) - end function hash - -end module util_mod diff --git a/tests-old/TestCases/pFUnit/Build.txt b/tests-old/TestCases/pFUnit/Build.txt deleted file mode 100644 index 52d84952..00000000 --- a/tests-old/TestCases/pFUnit/Build.txt +++ /dev/null @@ -1,5 +0,0 @@ -stuff_test.o (Compile) - - stuff_test.F90 (pFUnit) - - stuff_test.pf -module.o (Compile) - - module.f90 diff --git a/tests-old/TestCases/pFUnit/Makefile b/tests-old/TestCases/pFUnit/Makefile deleted file mode 100644 index c1d304a6..00000000 --- a/tests-old/TestCases/pFUnit/Makefile +++ /dev/null @@ -1,58 +0,0 @@ -# The best way to show how a test case should be built is to build it. -# While we don't have a build system we will use someone elses. -# -# This test simulates using the pFUnit framework for unit testing. -# As distributed it expects version 3.x.x of the framework. The -# processing script has changed name with version 4.x.x so you would -# need to change the PFRPOC macro to be "pfproc". -# -# The test assumes that the processing script is visible on the execution -# path. i.e. That it is included in the PATH environment variable. -# -.SUFFIXES: -.SUFFIXES: .pf .f90 .F90 .o .mod - -# By default gmake sets FC to "f77" we need to detect that and force it to our -# default. If it is not set then we still have a default but we allow the user -# to override it. -# -ifeq "$(origin FC)" "default" -export FC = ifort -else -export FC ?= ifort -endif - -PFPROC ?= pFUnitParser.py - -ifeq "x$(shell command -v $(PFPROC))" "x" -$(error Could not find the pFUnit processor on PATH) -endif - -objects: stuff_mod.o stuff_test.o - -.f90.o: - @echo Compiling $@ - $(FC) -o $@ -c $< - -.f90.mod: - @echo Compiling $@ - $(FC) -o $*.o -c $< - -.F90.o: - @echo Compiling $@ - $(FC) -o $@ -I$(PFUNIT)/mod -c $< - -.F90.mod: - @echo Compiling $@ - $(FC) -o $*.o -I$(PFUNIT)/mod -c $< - -.pf.F90: - @echo Processing $@ - $(PFPROC) $< $@ - -stuff_mod.o stuff_mod.mod: stuff_mod.f90 -stuff_test.o stuff_test_mod.mod: stuff_test.F90 stuff_mod.mod -stuff_test.F90: stuff_test.pf - -clean: - -rm *.o *.mod stuff_test.F90 diff --git a/tests-old/TestCases/pFUnit/stuff_mod.f90 b/tests-old/TestCases/pFUnit/stuff_mod.f90 deleted file mode 100644 index cbee2cca..00000000 --- a/tests-old/TestCases/pFUnit/stuff_mod.f90 +++ /dev/null @@ -1,17 +0,0 @@ -module stuff_mod - - implicit none - -contains - - function number() - - implicit none - - integer :: number - - number = 42 - - end function number - -end module stuff_mod diff --git a/tests-old/TestCases/pFUnit/stuff_test.pf b/tests-old/TestCases/pFUnit/stuff_test.pf deleted file mode 100644 index e9300aa7..00000000 --- a/tests-old/TestCases/pFUnit/stuff_test.pf +++ /dev/null @@ -1,22 +0,0 @@ -module stuff_test_mod - - use pFUnit_mod - use stuff_mod, only : number - - implicit none - -contains - - @test - subroutine test_number_okay() - - implicit none - - integer :: result - - result = number() - @assertEqual( 42, result ) - - end subroutine test_number_okay - -end module stuff_test_mod diff --git a/tests-old/system-tests/GitRepository/expected/aleph b/tests-old/system-tests/GitRepository/expected/aleph deleted file mode 100644 index 3c3670a7..00000000 --- a/tests-old/system-tests/GitRepository/expected/aleph +++ /dev/null @@ -1 +0,0 @@ -File the first. diff --git a/tests-old/system-tests/GitRepository/expected/beis/veis b/tests-old/system-tests/GitRepository/expected/beis/veis deleted file mode 100644 index 44a7476d..00000000 --- a/tests-old/system-tests/GitRepository/expected/beis/veis +++ /dev/null @@ -1,2 +0,0 @@ -File the second. - diff --git a/tests-old/system-tests/GitRepository/repo.tar b/tests-old/system-tests/GitRepository/repo.tar deleted file mode 100644 index 32b262709fc428936199bf031023718ca96eef16..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51200 zcmeHwYm6jWc2=(~F;oJ=7VyK1k?ut2Zaq5RnU&R5y;VIm)!oyz+tc0duJ(-GkDADg z$f}&qjL38Lh=*;-lK0_7i)CY3wzXOw286V- zkPHZr_`Y**WJYGaYpQB`Oq6!EGb8Tf+;h+SoO?5Hdw%i4+5VOAw=h3{p;W5WDy2I9 zEtMDMOZ+$fyHKvy7v{_5@_e;&p@e_t%NNxA*@C41< z{a$`{p9kFkMqqo*Z8tn4Lf|GJZU0Nts;AuN zvhRvAL20vqG9Tt)^Qj2^rr+&CKXm*p)pf(r-hwW$pSqrE3^2loh*T@^yUNCh`L5mW z#ElCw(FU}Ko?+YXcO1-$A$Qu*w(@*0|DhZB3U+za50%}D+(5O%u;2E!Fr1zv@Oweq zj@)=u)ov+{V0%$WaKp&&C7>-RHJq^>a)?gH-+|ggQ={!%Wfm4Min`ALi$wm7T#G4D&4CvxCr8`8->K z|F#7)VNti`K(kgRa>GdFpDQan9Kupx{2~zNHpd@kUz9Id**C10nWZBrG3|AR}^8aN94)EFp>WUXn*(k$(JuCjxze%JM) zoN%SEj^C2c6_$vMH{XCyC1 zEJs;!Q9bbZ1ZWa0G+YQNiYVK0l&$W!w>;O$`>j?UXzsLuRlFm*jL70XT=OX=#dvRf z=p;gx3s4Q)QEkVy6`pUoo*USmoNDy-R6WQ6J8<(OfG}3vx9bP4g8YrNz-&iaEZ)EK z=CT%I&k#X*YkgyL^})u?JNGu%uU)^rp*~U}+UGqr9TuNFnkme!Jen=cWs76WSW=J5 z#oqKRX4?V?D*r*KtS4CvZ9%G)E%nhy%582#wCZOoEAp7Q+Mt6HYpW*B7B^H*zt?Hw zi@OK5LZDd0sAZ{T3^OA$PCuCdXxdDinNp!2L|8}PYk#2f&3Hxdh>A}hs;S3wW&VHB znVuCmUu32P)Riw*)U|`*m=b+g&b&N=ggzBE-*=FN{BMO_-=hnd-yt@;ws%T;;1Ky= zufo{Z@xR#rUs$MBQu)8{rk&yo$p3yAhz;0tgH9%cI41;Y&~8Ro^#-8L&_jqJwxmk z!7Vk@><2L1qRv2-XJrUmMQubgAQL^?^Fz1kdkzi6mLGKOh!PJf$rg-=`RORXANBgN zmFeIcm4k)=a#xhtGgmcS*TXPF2*bA5)IpQ2_A?mEoVB3t!rBW}7cnSwfGyauBfO2p zdf@w!@>>{&LVm{&o{DS_T&Pp5KBUM7hDGZFM7)HFdi%jGgNP$rtsnH=U@&BQ%M2c2 z-f6ANy;ge*dNFVdLm8pCbwz53usGg-8r#qvsF|!mdQN4z*jdQjH&*Yx_2WV~YeRQJ zD;ZErK%$6uDwBahKO90WFLS>jV5y!x3>+o5OX$Sx+>}g)xj=w=VpUXSN+9{Wl6k4z z4oDnhIc-;254^D7>p{)~jd~rUB?@RU=}gD#nJzr?ELDJdz@K#ko6lWo*mRFTuT9${ zzZ42=hUqdyMi$80^?FCn=zAe;!N0YsEOx5Yi5e;?X?~B^eHH29?h-Yymf8!&V!AdGN0FP=hSpO zn99Eo=g0H&FhZhe0AoN<>jz49NDnfrT5uv=XGvvWKMwv$GV5SBFpz97ECx&yiZsyn z)s8z2wAi6A7ua6Hn<4lv!P7!`+qSm_D$$&*$&BXHj*IRdI5k2RC2Z4pn*n7t16O!c z{8#oL_3(Un&loG9+Z#r+prwm!u>ni32@4D+Ot>vJTn?iXx;~j(7GcTsma*WVtVwi~ z*;paR2;8RYH3uMn$9=9DqXW6AK$cG1p*;3{#B+tmEy&J}6IyK6Y164UgJ==Q#}WXh zwGM=Mok4-()!Pl!2&ov%4Z7$`U|RhEBL?JIs1zGQ7GD8k*zZIT*JjmtM2l?;v1ej^ zghr9-=mMF#1NjHvo*=;rraijThSc4*AuVlACFrJzZ+3ie;!IQiou#S(02ss58?YIv zXLhHrC8&&HcS3};{YXX`st4H_7>q*AnsS!am%e08UB7(|Y&|vg(2}QC4(8$*cjF0! z%hZ(FDG)5hUrrtwgfFRw7LjT_){hhDlUII(K^|IkI$H8cZ@Q7Bm{G3|-8qo7wglkN zdi?k?W_pSL1E2sOVG^KC7;ycz$_mT?AJ_rSn8)}|p2fcOuu(1;*YVX0;L+}=Z1IZv zQp~?AFTwU&ZpK=DN&F?MBQS42fP(^D8Xb8ILCN^d2mtp3Wf953{|C&}_OO?bG@4$4 z>39i8mk==SVLF7Pu9V5}S6)DPYP7r``rc;m=@#H|F`aB~EKI+A|IY2DrFZY+-BdPi z2>e;`3n3K<(5aV-@(gH2%kkcK@9xw4?p^j4lyiH0j60kozXHO}uk@qV<@|~R-RHP7 z!_`wbPl^82B78-agx|A%e&vN51b*;x*LSSCTkM-Cmrow`JK!xbK?-w_#1JgZ4kJ*u*5M zg^=CZu?L}{|Aa9JVMKA)Y6mdyvuc;f)*Vc(|#$(C-!D5#Iov=QwgegNr4>FImZ5e(vi^e0?RADtKr(1v!0 z@m|>3u8j?W_GmwAJAIHlg)l_5*&>y&nHfvqW+5|dn@+bkO$+x|h|EUMhj*IM9 zuziyM5&s<}uuf3~9LE23)B;Y#|4LQl|I_-PvnQCN@;`X4-5RJJI%7;HG#L1T$$_wy z&KV$p0^UbrVnjl(R0dcF2ahg=#+YG`OfCeXUic9*xoB%LD`T15;SkA8ZsOtbGPx(C zl?ndWOFBi~Csc>~)&_kGS)Jd0+$v;xurW%&+R4E;>tA z>_}B0`{Z;n1}uert3Xa512jXB|KU*63`rJj7@>Ic`sy{cQgqyBMX!&jRAuFh(5Jr|8i02g5;g8j@# zkY^9Dub?elXOF20;Jd38PQ#@ijhs2GRP?5=J2F-a>K&Ic;EqqXm+rX@042tVZx}~B zMZo~Vxu|X8@c_UAwa3;WtY2unZ6scrtV?3x3s54E!P=#$-En|1P|8C@1;-F#F@9hX zqn3J%!X47VZ6D#-cxr7yw-jpeVPA8`!0%&pB&Y(VLBaOGM@K-_%^}1bO@|zW>&N;e zxbNh|7N^;5H`<*xsmNFN{SG!YL>2@AmtnRq3TAUUmd#OQZ6EYe0uTcLRgPpiBCI_G z@i|Nj>tVcrO%Xvx7*^uj0vZAYl3CZ~07B3Cd=rrbYym?12zBNeKE`Lh%fkT&@|IUs z{zH|G3I8L8{co*puHL$}yv9Z69X0(>sD%88+G!#OKPzDv!(50OBP9Fi`kfn@ z>x^nGsa0g`z&t`UQZq45G!oheD@*ABBI1ZUGjtAuLvdF(WQ-xnE=<_yjqSNUY9SE< zS3Z^>3~7}m1sh@nX`G`C$cpxn^Q;}ld^9W-V=#!(l#Joar;N{tNyTZ2d`HSR_>JH> z@fhSxU4bW=fTc`Ei7+yJxCa9Y^Ja*exYl4O5x4@xzZpZ{Moot%X$cF4IZghu$EY&_ zWs{W%w8@H)-kFFo987^IJF3$S*mKG24flSyx%qtRtQ}J`pHK4%`>)pzw@=ynJJkNG z%$I5t_TPM+=f6__@9b${WdA|;t?pa?dw}~#l5ri_bK@m|RHb->Y64xrtBdZ(0miq>#p0Wddbg5j(OG?f&?X&^hrQZcp3v>A>1wZ%qA77rsgCP8&c^ubj% zgP1}Q2vuEu0O1Pd2C%dir&ZvyK$yahH5Ogeo&@2Nbjy~Z#nf#)lVs&+9Z->wrwE>N zZJdaDK-(e?DjUB(v!xPD{4$g9!5=)(sJp1}?`Nva@? zt*3E|MDYlI>N5hBtV!2)8=#dvwcc63YpEI0=&)g+@lpI2hDg06(~N^;5G)RwinD-m}e4Vvs94?s&31ylNvXAZ`%-DT}4%{hZY zj5#HC92P$U1y9X17~z5B1xUD(XeL!o4#=vkJq8>BS162+PK$)=TN9%Jw4<;~W(d5{ zV=ccIdtJ;DvYwO-Xi7KHp5>YXyY(LW2tLs!E|Z6`ji&~aqw-AahS=7l=}5o^IBZ=` zvYUqcEDR9aP*YQYng0n#!;69aQ~FF>HWT`v#%{HryAY+oPA*JE7iD=ex zd72f|tesm%*`RpSfvcVWxGP54Va*ZcyQN=R$uX&?0GD&zqDN?xyrFgz-4PE~WCv^6 z?>@MHbMwyKjrBXXH`newxV?chI3keS%cabe7WmS`%C;?)?;wkx-KXCaCA}^lv84ED z`O&VX7zAyaHZW{urI7`h#BMQy=1>=>zoK4%CGm2;ifBnJB~OC}g)lELT}FD=C= z3wmB*VqPOqk3`{B<_w|T#6OJf%(T@dR9fP_2sjA3b0EQVicb*(3Hg5(sef77A@YBr zggZaR^MCWzh1CB$7pgd_{BOW6M12s7>z5MA;QO?KH;3P7{Nq~?83WLLeKo}a;XkB|i*K&BOLHqV` z?QJ2kasdbm=D@XU58|lH*R5#}7!P-}6DLK1<9Y*KsC^_*v?~MiMz47210`U>h;W$o z>b23b|E_@a@ISK(5-+-eW1$$bcKk5RU!6+bxr6p=o3x=Vgf2dvx5Ft3`W725xOI?52HwV zPGaUwImRskgMpj{#Dp@L!uaEzRkp+!0K)=$J@}v(vx0+kVy%c{RhA@{_UO0_g9#X0 zJ`)F4R`(u#*Msa@W}j!&VH52;>+VV8ePEVLC%_Wo;IJH6qUo&etT|ni*kQesZD%u+ z1bRU4$);m-gLx0=DkL}*N2cRYG91=%*ie#wIs-sw9_il1%`q91*5f0LC}%@)lCt~( z9e5hfAQ+@jhZhl9vl(LzCpwBvj59lN04y#+v*wT>ve3m)_T1c@x~tQONf>k{13^XB z!ZNN%kw3osG?T?3j(JfZR2uYLo(cmoW;KOwg4U?S{Fo&a&dc>voFl2ix)>@yriR% zNRK9XWh_G)3#!=R;f8L&)tQ_XM)Qvh0DMuUB zln>S8X-TJY@nd;G5M&QoOO(lu#)_RO#)rh!8m)OQr?*Gr&5#8Koj`?tf#-)J;u-^i zGzhd(KE{dcswdhU|CLrv%}jw`8hZS4ibaxjf8E?I7#EiC>WA#tZ+u$2vV`Klag1Cmzz}o zUWB~HqhT6$J;QN`_F(AuptQ+FB!sRsy+P34+Qt!Plv-7A zZD)@Ee_4GQccA!c4XH@DgvbHI7N$97jcCRi=F;g<%84|S&6f*-0+)nk#pQHT=R|`V z5tsMEj~}4#$S4UkDPzIXb1!Hbyh*guh;`qh{a+>1s~NVaQ`b&JN}( z3eyppXpL0pgoBdbxYw-RG<^V)be1ssY4}9d+zgSI)+TJh!cW>Is+i8 zE?iv$CsqMLYqOYgN4$At$Qi>zYIF#AY?<}?4IWa*Jcu0dTqqLh=Gii5&2YChN_6A` z3fW{!F48=zm}H79XBl}NI2qakWAi)F8y?66s$ zK+X^sPvZT_zA!BGl2=W^mwqqNy|Pbto}JZydvuSni&)tR4slNs55RCh0iuEi{j%Q^ zv*AuILqvK9QEL&8Eupw%SKwTar;XT&t&1fHjv6ya0xw!;g7vsef}Ui<;z||Gu2Q0* zg@^%+J20`N{4U~5ATZ_`taDf~&HIs59+3jT$OHMBFHo<;TWube9y7nn=eV$u2ao6C z7A#`ZkrM`cwP!mL(z4~TL|>9SV`P|_FC;+cOBcaL2ti<{ir2CTa`-C`~g%$1+0Y=}Yk#t{;nE+PHsC2_`>igFsQv=Y168Z@yq#p(uz zaCD@c7Mv`4Sh}0TnT^2VjUY4+S&AyATCC+6o6T?sfD;*2D?zXbTV7d^(ej#q##i5~?e1!OnRciw)!XCIQh-qJF zBIBtTQ~{sbA;C&;^PM)bWmJ-kFgB-7(!M?x5^tb@!4h1M4QR_4d7|e~#w&2!I9(yv z*~KSH86mk=MGlW)N>d~U+MnXZ zGd_~NhyG`{RC*K%DgNSrcYS6nzO*qi|7)mKwi)k$Mr@NDWREs{gul3D6Qd3PQ*Y{* ze6KHS8#Nps_R+3}@qqldAt0_vo4BPJFxhOcA&Y`_^gk4VahstE$Ye6BIV^$tYfb#L zq*CEMlGK2!kR!bGjAyo;YcdZdz@%*CIYoFk6vZ?x=#|7mLOVS;StqjJ$Ryrl-eOrN zFv3}JcO3W$`SQ%0rr~Jf4Jzm^s{LIB(M=+bQ$A9e9s4s+G&ZSQ_Gw{gLRx4)0$2B< zs)Tnz;eKdwX^H&A0uh5?^U?B>z*KevqY+yu$Y6jJ@<7xQnUJSzV-?@b0 zKD9~4KT_>c1-smC5s83-wBR{{LKPVm$u4IG7j-F3X`S z8ydhNT5z_C7s;AX?vcwE4Q>;cud^bZ5q6BJgVBR_)gU}3l!@TF|uHnSkJh}Op$d{15Ip)oM z&BICKk9Fh&3vi3eLd*>JGMNtG3+b=>Sn*|%qAlgeIfzh#46n{ar=fTqggjZc)5LwS z+A4#IgJ$p|&1H8X4!lVMGo~eI@;Ffe;g7Rw@gW7=4ZvERv5ViO+KxJJ3~-@Q7{XXp zh|~QYKqLf&O*X}&)YD)LnEAHa@_nETQD1D|+MT!GMpe_>_utyQv3~3Na<&{lx_{@v zy)`_jWbWTwEiY%Qnao6tsaJV;lNULu$#@n@XbghCs&e z0T@!sd+0euLmki-SwNB%LcVl~w>{&;|8C$FV)B>`_fN`)IC;JMcL?;-Hm;xerwHgF zFpmJvjPj@hKc*{Od7uT(Sw4OWFwKt@>_WAai;aTYWB4YUfY3-+2O&3sQy}Vf zjiF73(l1@w3qu(0m}fRb|MN1nGy3gEDzQWKKVMJee{m!{{r#_V(f_0Ucb|)>KZ9Ah z!3X{RW3@iH|Fu#*wg1jwCw32s-(7*Q>x5DtN)e}1n)qPoAbR0$VkOfTQNIU6(I|21V08m38W%^Jg4{0d8HKupOSh(f4QO=OUVE(2u|}8j zS|a%Y1T!n6KM>1asAg~<{phE15GEZJq*IC0rGm4 zKYPNF{nxjQp3&|f$N!Jzf3;GA|CjFnIqyGD>7RiJI6nW^%9YgqKl{zc3}gI%MsdKC z>_4uAO8NgBC}3>=8|5?L{u9Z6UJ07+|EIA3NAm`b2N?OkR;wWoP^nem*x_q=0l)dd z=lrXbD@bJ06-()cX17zWVLe%rBLG=#3xycZev6ylkdOv3-xz@?SB`Z|K^{q-T2P;SO4eL`Tk$}KY#8UzxJ&^`PG$+fBMGX zJJAZ#PM-t=WAeYb_{zwCh=f%Bf0B(oV&BRAU$|Vh7oBRk)+$$>@_fB%JB?=LauWf( zaN-QW84|G4vWU;Fv$Z-4LBAO8z)ygobskN?_Ft^dZ? z|L^br%y!3>8RYJ{^1Y*nezZDo#WgvFvkD(!mA?x=gVpS|6FKba{nE7v0*nZmk|tTLfT($ z)h;iz7OI!)W&B%l?DC?s=q?@)v_s{8ZBqUt{*U}``ulIk!}Cd65AFZ0q5VH|`|WT4 zo3GFTX#MnSzjKm7(>|XW3<&@8UbHiWnEZcW{byxivi`fgfb%~o|DWNen4wYnFTcYP zU%#j8f75Euv&a0aF@x~GzRBmz06L8S%P966zyGzeP(}VP<^MC=lHtG)ap#u%n3rtF zzomeiaBvd`ZV(IkbD=nj%iHvDT^ufy!Z%)Po|$U!gqQg;94h^gVj#spih&dZDF#vu vq!>sskYXUkK#GAB11Sbl45S!HF_2;)#XyRI6ay&+QVgURNHOpVVBr4&FNt_7 diff --git a/tests-old/system-tests/GitRepository_test.py b/tests-old/system-tests/GitRepository_test.py deleted file mode 100644 index f2267bfc..00000000 --- a/tests-old/system-tests/GitRepository_test.py +++ /dev/null @@ -1,17 +0,0 @@ -############################################################################## -# (c) Crown copyright Met Office. All rights reserved. -# For further details please refer to the file COPYRIGHT -# which you should have received as part of this distribution -############################################################################## -from pathlib import Path -from common import CompareFileTrees, RunGrab - -TEST_PATH = Path('system-tests') / Path(__file__).name.split('_test.py')[0] - - -def test_grab(): - # TODO: I can't test with the Git protocol as for some reason the - # Git daemon isn't installed. - command = RunGrab(TEST_PATH, 'git', 'file') - comparison = CompareFileTrees(command) - comparison.run() diff --git a/tests-old/system-tests/SubversionRepository/expected/trunk/alpha b/tests-old/system-tests/SubversionRepository/expected/trunk/alpha deleted file mode 100644 index e69de29b..00000000 diff --git a/tests-old/system-tests/SubversionRepository/expected/trunk/beta/gamma b/tests-old/system-tests/SubversionRepository/expected/trunk/beta/gamma deleted file mode 100644 index e69de29b..00000000 diff --git a/tests-old/system-tests/SubversionRepository/repo.tar b/tests-old/system-tests/SubversionRepository/repo.tar deleted file mode 100644 index 9aec198a9edd1535cb2b78964593882d0fc5561b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 92160 zcmeHwYm6jWc2;%wvS@Qz+epYp;>X^t8C>1t%J-|P$8Aqdcg;>Q{i3RR_Azd^BQqnb zVmmV2y*u4inGxsSd+s^sp7)LXaadlu+&>lkX*Ziol}fEqtF-WMrP^** z_^2F{iN~#m9a5+5J4? z{%>z?+_|?|nmnIe%0jrwtw!V6{jaxLbNgQhp)8rorOajy=xe_JyZb?G@PE(jOgqm& z;z2kvB#Jnggwf&K26S&G`@UI;pN+iUFc_^EZ`6}t2VwYh#S|uCIEc+8H2!lB-}@$* zl#IC(no&40Uaz-mdj7zl_^T$2%rNW){X^3a27YYz{m3_k=?Fi^<}jR^r=#$|?BhQ` z^PYu4&ve5mnvN&IXisL5&a6}@d>CMoW`8mn-z=BoWcnp<>~;73QW))(O<%^uFu@2v z4!sH1S1QcWz-#=g@P9WP^)K-dApf6F{?{69P%r6!A^428g zr;x0CJLvhNNznBMl;D$nd>HmA^PdQHJ;AH9Pz8h>1`2H>HZkP?$aMSxjsYKn+{AvD zQX8j8AEyhBs_e96a5);r-ejLglYqP);>-v~-rz>&Ff;Mk?*{$A@0re_yyHNRyrEsr zE{?h*0}L>ilG&J>yBYAX>v}vd`+kHK-;}XU5vOU46ALJ$*0`E}x)+7hF;ODgq+L-D zXA|GxnM8U-87YCVdpdXB&Y{+v0ga%R9Tb?f@ia zSH62|XXHKg&Fx@vSbkuCc@PX%&D}|_RDSU2*0vw}UW8?Bjp9i#nNIxj?fbX>1i)=z zecqmraqM5_9_}V*1ry~&X8eX3hBl)kNQ4FN#|30+& zxag0bl_taS;3AiE%K2YywwiP2f3sf6?Z1oMf@5KwbpDefOx%rv@x*{Gf+!pfaYn!< z8});|X_V>(dU(H|J|CNCI+Ak@>cnt7Ih+9ip^n4cn+_&tKlQT{c;-cc2WO2Q{+@{K zu@^z{m=55~$;`V1<;FK3+0l|SHPIz9PzA~yWSpECU^qvttr|jlM?nwD4i6|#9x7Vx zqAP%`W`Tnw!7$BZs#OksRAtPg-S>)XW)MPU@--xB~28( zXjPP?oe%FnihWFU&8SvohH#q5Y`g&`hmm4`!ckTZ{dk4DPMf?};zD)EZ}7Z!dZR+U zyK(>PGeqG~PxXin2lqIZ5%Lh10XcGJKzRo)WzXd;+RSu^SGDbUT39F+}>wDNugqR}<;9!2T18 z2&3NFOusl729&-T!_Tp>N&=G@caRX^CShnN6xH4XUqNV0ykHa?m<`SZG6N6a4O z64T|eN|jx3BrP{_NT!nouZrEsM3B4=>SK`-oE!EQzn8>HrIf2qMI8>qdA^a9S)rX*8BJi;+Zja?vvrNAT{^it{u@rw< z-`MddNqj~1DD0pY&Mzji05OWFul#q@lQR2AG>T3%OV-X;#_C>M=`I%IWQcFEVjq z2_zByaIrk&AhQ!zn#~TrY7X}4A)VQ@MPL%#$a%xSmK=y#SU|f>#nBG@DHxw0W(ew* zAXrI<2c0|LioK3g!)Lga3a7Y9Eu*^UWsW^>;&nW*C%FaO_b3gyIRtqafl4}};X)1K zV)IN{oq)tE-(88tR*`{(r3V-k2()YL1rewbaYJ{EV|!!*!m6_ZMW!NOLKN(5?A)dQ z*dNiZgZD1>ecjd@WOm2r@EZ=y{Rg`=0kD1QKg4RNlhZQgG{q~c6oFY$CS`~sL8D>< zICpp=UT^7d%5OwEH{_GEVw?{r?s8ceeU)mX61PR}0HlVBIJFuj zUbG=8J~22~X$@D*O-R66k7`zDq!whWZZ<@)x&Djg8;s||UF&t?Z~)h)pcIiQzaGGiY0IOv6=l}W<11a5ag$q=5}V=+nc!RywKe;%5c*|>x-+{mB` z`BCsmbg9GktW+UO7iIz(3JeVbNHhZyG=!>4K_y3K4AUJpJN~1T)q$V{!?)p0AmL3v z3WwZ&fipX8p*6dq=?oR|P^PU-O>NmmYhWyAAqknw<03Zq!|8z0cbN>M6POV8(0Qv9 z&HUt*>BeFhC$dr&S1&+bHZ9ID)QoYXUCW&%z2)S^06{QxF9L z!fJyAtP9EjgV(JHoKp~lQ`qms8D`5(!fuP+)ZKSG!)drJ>-gC6AdC=gdxJvh235r-u$;r)K z08viba3wG+Zq{U%PMik4i9$?vvQ$jiEk99%k~}K? zhB4dpbmm*9N&#A!h1>GM7Qni9G0FbTl}aUZM`H*)`1|lgClP$2#^wx&!pL~?9{3b{6!cRL z#$zHir@>&NK5!XZh82A@1`~zRgeZTt@ab!d_mD%YGi=ELbkMG3V# zs}|Erx~{o(aQfae556;KzNHoL0#H#T3ZZ#SqG`eai7awEuQ?1oKZe!~OJ@@Kqdlnb z?yUE_Q;GVHofDc5M{tvZ7f4!}L8LhOU`s$zaJfsaR_uad;lmf!EI*=nBpn4wOb zdz=nzuKT6ElBw3#?6mMx5vHijBYo^fNzMT=T#vH^DPWM9h~TDM6;NB@hu-tq4{OcV ztK^+u?6bMZPbqoIz0M_O|1C+L_c`ts0*0$|JIwe zJpb?5J-f&+s;`&0yGYc1E?_zS$JvVnyvteA$?+e2+pNy>f4yGM<3E?P8?#W`1*nI# z+hPPhdr%`;gH=G)1|kcR9y$p3n9rMwiCMgTUVcmXKQ2xV$Nr@(>s0=)%;*2r8}UpC*57as=ClD7`dcrv+V3NleHBet;@{zj!IHJ1um?=gF zExq^2LjuK4S|bt8#VV<0>T-j7pRqjWNFD^J4T)J41cV{rV9^8DQ?=GERq(&+&1$t) zt>S?Q$utwcn2d*qhKI>-SZXvb*;F~MTi&b5q;}z?ww`OjF}#V^*$KZTzSE2jd}`f- zRP@dJ4|aBMK76pf3(pw*g2_P1zF5CDl|tx*9e;A*BjF}-T2pFBfiV2lngD{LEgw7T z@_`VE#pnStb?9z>BZ*`dw4Vj_#SOl1Y#W;QE!CH2A)g0xj~E z@P%clZ!qV{Qw$&`AwE@utZvLC?81M8nP@Nyf#OV4qqk zlGihH8O{v%tirBha}`>Hzg>z9u*cj`CtW3CGSkUib*RUnHSo23GPgN@ZR2 zF>1jPLE(}(E0tOp%5f=4!Iqi>Dq$!gPR9e7J)jRjhbaWtm}J+)TnmFmm>fbi%qcSQ zKAx*NR<*>O?I6(mfW&pX7?ltRt5@1-F1JgFJ zG(=dRVbYiiD5~d={ShlL5&lEiR`$_FCN2A?tfEL6O@|$90lp18-+@b>bVBTD4lZ|> zP(0NA0czNvK@NktPBh@DA*=kRicr(X{$=Yy~B8I2}ra?Un7z&mKK_6VL{FCBY7M8Y8VDuUt z5TpijU?hqnFLPCT3P%1OOmT_j2{2Hb&2&>xGwV-Nre;I_iY$gM9dD!zgLVuPl)-Ag z*jZVI+yRQ`flP`<7YEe5BA`Mp(4e+#p8s4#I1zY0zM0_}0fkZEcwkA7B&20WB>x`A z8ru-h+(FJ%;7ANm#5{QkEWHnY#g^TE_z2<;rJ|0?Wo<5E*hhIL#g9ue+rfC`ung*u z-!#HW0QCh#14@G&6tsdJczaL@!SK01z;x$!C{K1tmdZ1kQ9On zEBZ4s4D#y`ibSNxt0W=K9=pVWJ!Ap@Lk<1Mh%6$!&+n?GHQ_ib>Mb79Ma-`BB<7!4 zn5^1`siBILezd$#;d~tbZ)Y?nCtpdW5-?2?j~M>91UN#`^90#CpjemMyM;^~h3=-6(jJNE>2D~*8{zv=*i$I!Ji!Ow zf>LPD4kj9azk_F#KRUBMH!l&+P&t8nevz2?qB;P};2MA`AHU`LC#*4Eg!Ax2aWY}M7hMwx#m^CW}Wn%;6UBQsZ z>P^tpZ4aYV?z^fbl9$!+EQtcNJ$5k!D*iMd2r5>ZVlg`@yC}?yDRDMQ6O{};XaKpC zZilf?Vl7+!i|ch~j-0VrHu71fg8{Q=MIh@jHu4YVu;8%R@u5h?y5F>EQre%6oSgua zV)0n_o<0W|&CC!Iw@sa}q6Ii)Hb_=xu!)mos=_jN>L&O{@FXX(qB?tEShn0LSvMv$2#HbuSLvTNsPEsOe zy{k%uq+Qfv!fe2~ApmwX=M@qV*<{R)6IqVbMnFJHQWaC#u;w%fwxYY1B(e!6oK-wNk6Ujl#8P5MX2oF=o%%OiI|HLV|z;k^X#^FN?xra4G-* z*CA`6gpHpDV|f&81Yr;H=Imi{;v~uo5QjqsTm~#t$peA*rX%=RbceY^9*_(%tnIUy zzJ;f#F-SeLK9J39q-lyH#V!IuSmqS=F{Ey!9|4)~CON9eF)FS7j3N|_gMJ^J24*X3 z5L|*VXfF#O4}hG;P2T5*g3PcfG8G~Xg}{-vkwN%vB2iM=D`y3!*-OPlhQVqef`AK5?&E3DeF`L7nN0%ZVNLCmswf|W z2D77Ol+39TnzyN2YNx4alaYChgkl)tHCi;;p~QKW9%eu`h)*VDxg0WRR(YU5!vRbZ z&}}xCV-qV^zh~Z77=f}Qk=OH!FtASMr&&I#a%8(wr2u#2>51w;?+(yB2KqJW z0ccFgI)?i}^kEiZgMyey3ga6~J&Dg+3%HD+tB$+$K$&GSI_+A7E+cFR0nUgndf+1e8CvH-yePu9-LJqu4mX-dAXA*2vKaAA ziG=_FAhMxomGM4K|EHLh=o+$=#B-$OWYoT__X11?b&FDa!hShJ_A3~KP@Kd#&?8zF zqgyU|G@rzHaMsch{e&R^BF~&KpiX1^GcUp{MSb#&;rWI&1 z2S_BGQjjV2Zig$K-(G3U-nHFJVd`GdsZfEr}`usGpefKk%Sf7 zh0<|D_C(BL^sm5{(*W;cTI@Prr5Ux zlrnlIC74_wnhaHI?b0Fc2(X7!lN~>LV|ZX~(y*ce6^Z zK(ps%dLH5^pm9&6+%z+@xMdoI zE3kL1>nz=x)rHLBNHLuvB;d{j^MR*SASE=3Efd2tb&Ej_Mf;)T@N_V6CWshKDw+ij z4hh7$sD2`1UV@6srj2cEQR+jI zEhZUKLC;7_*urWZ!HYPG`2bg~a<}L6*vR^C)g|tMz+p(@1yw@8^3$=Z9D-^a&$60$ z*Ng)IqD;x!8uTNGixoPND6MkFkX}}eQNvi)*+Km?)Db#S$PMx+RtKR;T?**ESiZ1N4HKZ&I zoPWv5pra{{shjWY+_9z5L@Nccu9A^d38i5*qAfUc-$OG9CwXKN)FkI+rk3X;e2lrl zMy8gnN1t?(7*E-g8aphQ#Bt7gdnh>+S(LzZy*a0*HFMlS)Wu1J)#1Q(!qc+3D(dRd z0^aGXfr3jPuxPusQ&q;7P}3K63Qi(x!>UAIko^-i9C1Qh-vru$399-8K-e`1fLr*6 z3z5Z;U*X^>PQv1N|B%9==RXHB#x`t&?w3R33`wj9mKu1HIy7X6==K5zqLgHt=m8=L zzzPZuKcKO;HzJAQBRsYTiV#fzT{`iZAb&a(P{5tnh0GxcC@dE2CsFb$^LkOo$eXiqR`SeO_Cx=hpUm*f>FD3Ar? z_fmiwGa`*s9KJW?WYDxwB{&6n{g+V?6(GbcuHRVu6O5k9G*Oxf<;!XjN`9E7watKp zZTbxKbC;Q+8j8C0)x#i=gn{Vc6|-PE&MX=bt#ugTYwdvn*!PYlxIt}zNMRtfEP=@pA|9)hny{0zlmsRcIG{Zd+9U&T zy$MP%e^JMVR)dJdLa_idEj$e{cahry#d%AW@iI%W?{3hMvz`#(Mf($Dy6rrTO(#w!3L|H{QLwNu5x?H`k4!(_r81ZhmuD zTG8EqboZ`#xV?36WBXg?gUxS2$)({V2B_J(zq|SM&F$nteyQfk!mmUVUi1#sURWhQ zZib)L?XRyPE>`W=>t21W z(yM=={6_VPzS8dXy?VR7=JlJEPP0{QHtUVG)_NbF=zgca)~Yq?e!IEWxmNrd)&Hy4 zetqfMuV4F5ugJplY2+BlF))jP)oYK>EC{at%(cf~I;#kPHX!~V{F~T+#qS_Z1Z9}B zQ+V;;PO<;^^_>0JY}fMo-xo*tIo?e7UlUO-qY)T4_L=N{|j#FIbK_^{}=9d=l-8`|97P|@BeSqa3x6Y|3Ak*zUZSLn6&~5 zLvAAG=odaTwRWLasa1-VX0g`Ztu}7fnm22e(i-~lRI1-DY~H_<#|vH*)vx4x3;4f! z(Gxw5|EsO}{%_#_JpOml+prMc2S`s70ifN;Ap+J4!+mdvgq*{|X~Lmixmiailge6c zt@&*{-4;m53;`IFOKgo$HHPE~q-r4<0_`kv@v(3Zev}_(_&>gQ%}-Z$e*F)pO|JjF z6tbAv|4Y#Qxri6o|5^k0|K|Gt<&(hN{$IH6@Am&B{lDHm(*LoZ_kXlxqdH1ii_M?SkB#iEj$5O2@yZj&$gTsr72>?RKx)U2pXp zYi(T9+3>19Oo(2uQtveULYH~AH%+-vDBP_*EN6r_w#>j zX{lOin}+YN)mq(lZN1X0tgY47`%SOgsWy5Qzt-xtYo=PSnohOdYDnH2snr=%EFeLX6wXhLKwD9y=m4F zWrbj>_8@gyUaQq@wd&P%e9#3=x2sss8N<0c&9%8{qSD?g?|H-Fk+~k-jt@}4s*l1N zYZhc5lyePD_$MD1wyN|)~a5!+G}*yYW2?SdQMu3vI18kgvGfwWv_{z#o{w!QVW=DJ_&cAF`k&rkXZPx%R$ zf$d$2#HQ34&iq~0H58uIvd;=7@n)h0&ss(*JZWa%I0<@LwzTl1m3`sFAu>#m)he!t zR`Eyv(9-vR{P(VW@5d(3|Kru)y88Tgul%Dc|K&=mV_f+cU!s!HUAkFY`g{NLzyF0V z|LjjJ{pElBuYUW-e*MZHUHV&p^MCxtm;e5ipIG|+|ML%i?Zi2P_8Y(beSZI2zyH_%^;Zsm^~%z}|G&TYSHAdf zps4)n@BGW({n9`Gz9=fSYTI~g6>q)Ph1LPe>eYK*f6ZIN50GGuPN#*P#D+m~w7Q)} zx8Clo)xAD4Jp8o|s-@cPHuRZJ%lqK64g+`oPoARy_>%5`IpzGv*GK&S`T2iY8$Jg& zzyD)(cF!xW_x(n(hOE+Vr2)@Lt6FGO*5S-)HH+>YGtxo(4m;Acg)A%3X&+&hwT5ab1 z|1xMr_Ft>NE^WBe_kUjg*V>H#TuLxvewFB&fDV#o$pyXVzjwc*@ktcBUp_zoHgmcC z`Ps}DSc&kz+}(Xy(f=2T|JGVa3C!caFR-8IeI>L1X!tQ<6F4-VEKSha_q?EveV+3F zg#7Pjr9L13ZNZ_R%l~5$dVycO@wGDA`P}+Y}HA01IaG9KJ4X_jrMTwE96zPJ1l^j?bY11~Yj#xh+6i5rdEAG2611 zgN_HdNk@Ccq0g{3K0|}u9(up60su$C1umQ)8u!_uW#8ORao_iFn}W2L+r{JAZvlbb zi`h(>kXDdaJOHt*7<3>)Ju;f3h3NS?81srDUS@`tyL=F@p|}0u(8?Z85WTn9%@f~n z4A%#Z4QEL>Njyxhr>ieKP>xa6~P*&Z6bf_>3`7%k4UHyh|qzGXKy zfRHtl<+OjF?VF3vR>J^y2a{5ho^J%4vP}lR-#~qOcz_;ha-E=F7iafKuCwEdb2OG+ zbgAP_R@kj>=0hN0h4jpxR|J6TB7&|#lS1^d2mbL%4Bko!EIWWMn(f!n5O6pK)%v4n zK@^TCC3r2FT+Ade1l!3dZ@K|&Z$Xyj|Gi7h^13ETV1lJowat>!eX-c6E4B~ zxVH({|0p?wCa1VUf!%Sh0}VnRqrDqh%qDBJ%y)a^(eBoR`@B4nm}6sPX9)-LqO=FF*4hjIuVY9g@He_L2dwS zIYIHbk^YC68JD5Qt?`1PNF$XOZ}#yhF-*w0)P2 z`*0Ei3j3>aDU{@)L2^kt1NMMKvpc+|Z?NI~7?XD$Gwk-i&l@Jd6w>I>lWQh~$ef}8 zY&qd<^%n?R>;=N+_}i^SI`mqmBP>#`U!AH*>ea!t3H(-HE);M9;Q)9UOIv>ufgHg3~XpACH`FY)=6jTPPOm3u38yH_P*b7w_&TrFaR zQN^;=Nqo3**O<57l4;z(RU$XTtpI{O59i6PuowJHx?J(%vNl}kQwna1r zl!xnYH1aR1<;4a==LUYgEZNz`&)KwdU*^_rZr$eAZEoGZ#z9hR<|07q6dxG9jpc%B zH2AqW6FN;5irSI*>GGRu$S%L>KrytcfL>+ucHaI&eha;!FEjtM(QeJh|5}y2{_jG~ zykmhKwf`R7KPCdGq@jkGW`KA;ePS;da5Wzep*2?Aeel8N{T&w(kr?^})psnG;RSnEGg!RF? ztK|&Xo4+tx)-tzc{?zkPV}kaL9uR1llDCJ-6DHf?J(L1D5Uf}k48j9OlaNr(c{tk! zc>*#wEom{j4OX-mDd}QLfsIVDEP|>2LoZ@EA(Uby5O9jN6a(;OBH;*mf1aC2T7ID+ z5@R-zjjx#pGI|0d27I}M9>rYSTSY(adSgjmnC!3GCHPFoH?CMJ_L11&O%ReyN<7TU zcURJf-Gk{i7+80hCu@#O0#lb_5vQ}nh7d;*%d{a7B$lvH0_;Hg!!`OVv#4|f?Kd`l26K;C<>%}U6v9mXQ*gWKHR-P+tS z*9XB<)O+EI6WmL{xFdhm%MNaEQ36E3>@v)Z{~zohA|V%eAht-TQ}Tdp9MV~OMIpo^ z#f?}IplxYlYkvwH0<#&R$h=g41dgx?uF;hBTnwtRCMDSz+%5|YqaF_RfQWmt+Dn4u zYAV{&5^N6cYe6v^Mtja;C@jk#pzIQ07@am|I#!DE%+O~HG>C`X&mp#*%o=-r3?8!1 z5Vb|ur79OwLq(>nQ$XmHq*O->jvT{kOf@F<-VYCa%xTp`!QTD^vpoQRB1$gBQYJGt zG0Q3*7SU;4rZR!Forz-2eU^GcD-9t|`qP1u$u3ytGnBNOZ0AeL+D*rMeS)3InlnCz z^t0T>kh>Ui7enr1c>P=q(2tV5-FrOjV`_$?W~g$$heJ7zy}d-ehN{6P61>)-#6IwM z(f8w4c^XGrSF0@?3UAtPpkVKrBFjfHk`|AmRmQUUuK9%m)Q&y46U_2QUh$KS;D7`7XOHroG<%nwn>O#m zFiwW01cSxpEVs<#$lKl7-Q2ial>Npmt8^?-TuJ3Bc6kyV$}lhyCq6oN@H+xPM<$w@ z74=gs@@Rz9$#@E%hBrFIRzvneJjatBK34CMes6syuv4WU4SR&JVFc5g?MUiIfuNACbq zjnyMH*lJap87w-JO$?SEX|~uE$3CM>JdANVhX;(GNF)pOZGANNfDWb=SgY{4k5&*a z6Z0@&Ku0~^@gy9eDb;nqv{$m>NyZpd=_wX9a8N3hR&L-TWhlt^yhEr=q8HhAUfRGc zo~hPNR4Gm=`uQ8jTlil`Kd4(`^V5hg>}=lA-=LG>k|o3-pFH@+eN+YiQpObkbu3fK zOXMY<098Wq_nQQ4R(|mTCvelOz*X=jfTKHP*?v?muly311a%5wmp#-HBT|JfjMKry z^rLXdhz1+4L50XwVr27oU=m>Vz46!|-7Exs^AV<(PJP+jGRw>6@!Qn@ zMsQbHgS;Qu(TOZuCr5DNHSi`d?NgN$g9|2KWN4cqe$4m}FuL>##OC`eu%z}7`a%0M z8ICRUhMbDH7BoVK%eN%%ce8*9PiU4mqbLM|-{dv`F$`553YwG@-~3Z4CjNA3*;Ie- zr)%nx$gco8);j+De=+C(nX3`Im@jL;x#>UzD%}P8UTo1Nkv5K-fha1+l2%tnkVs%jqUqe_rHD-912et#u%X7cYjr&?T;%3)A>yn%M zE_cr5&N-)YAXO2DY~I@(ZkL#mpuD&FYth?0_073jdv(sul*wCGDSGe- zxtGKrWD|oEw~%!Ty^UOwf_1FirV>QO0+M*fq@kXqIu?`^gg9-EF*VaD+z&dqL7;~O zm{;1*GH3tW8qJHb`<(@Qn*CqLWslW*tKF>P$!c|}Qf;;x`TegBeV6+05&M5(`9ExD zTVW;!04V%yIUq^V`OC-x(HVZ`^FVYNXUGJx$W@akX(|Ah`$y>HCIv&z2`?!q)P0}D;u;xu{G1AkgB2z~5}s#|0>&J<%~gmW9-mLOlZ zoCr(h(E46NL-2&}U$49f#qq2eW1`%`8%=s8BEQ4$Ik<~E#dz?YfNKy@+CW4HuM zFk}D66ZG@Hj1C~D*#GTnqc&&%w`$e={;x~e?V})P?EjtJjqSx3Kxb*=NX$P#kwGs& z50(wx9)CfdjwuZi_5Z#w(A8z zNJ9=F9DOpp4liz|pObQeZ6vVhMSpB7J8cy3w%n=7CL%b_Q3Yp*rdQ&`ORjZarSXN{ zk_~qQhQ#X_^*+F%Ndz1ZGxKMwjv?>-2iUV^ms26Zr)T1DCoNb*D93SJ98_jpmi9zZo2nEL0a-^Ec2 z`1Sro{6F-r#pO#w#3}wCem&>^skNH$|1>XcMj60Usu;#B4fnkv?1;n6S2I77o8p;I zJ{P}a_P^Ibn~%M?L~6LuwQ+-6jRyL_)Ec!)i~V4#xZzRy!tlFVb^iXxW+l)6xzH^* z7TB)yW{Lj<1~uqhVl^{PlzQnodrZ<{7nKT0*GHyyNDY9`YujNmM*hdsAOZkp@Z+Sy z%^*mFg9`~AHDS#cW`NcdsP9GSFbc58&N2Vu@-Q&N{}<2tGI8?w`S`z4MN6ps{@2Uf z_5^ay{|PGcA94)j7|1b@V<5*sj)5ElIR* z$3TvO90NH9at!1c$T5&(Ajd$CfgA%l267DK7|1b@V<5*sj)5ElIR TestParameters: - return self._parameters - - @abstractmethod - def description(self) -> str: - raise NotImplementedError("Abstract methods must be implemented.") - - @property - def debug_output(self) -> Optional[List[str]]: - return self._debug_output - - @debug_output.setter - def debug_output(self, additional_line: str): - if self._debug_output is None: - self._debug_output = [] - self._debug_output.append(additional_line) - - def set_up(self): - """ - Called prior to the run. - """ - pass - - def execute(self): - """ - Runs the command and changes state to reflect results. - """ - thread: subprocess.Popen = subprocess.Popen(self._command, - env=self._environment, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - stdout: bytes - stderr: bytes - stdout, stderr = thread.communicate() - self.return_code = thread.returncode - self.standard_out = stdout.decode('utf-8') - self.standard_error = stderr.decode('utf-8') - - if self.return_code != 0: - self._debug_output = ['Running command failed:'] - command = ' '.join(self._command) - self._debug_output.append(f' command: {command}') - self._debug_output.append(' stdout: ' + self.standard_out) - self._debug_output.append(' stderr: ' + self.standard_error) - - def tear_down(self): - """ - Called following the run. - """ - pass - - -class EnterPython(RunCommand, metaclass=ABCMeta): - """ - Run a Python entry point. - """ - def __init__(self, - tag: str, - test_directory: Path, - module: str, - args: Sequence[str] = (), - working_dir=True): - parameters = TestParameters(test_directory, tag) - - script = f'import sys; import fab.{module}; ' \ - f'sys.exit(fab.{module}.entry())' - command = ['python3', '-c', script] - if working_dir: - command.extend(['-w', str(parameters.work_directory)]) - command.extend(args) - - system_path = os.environ.get('PATH') or '' - user_path: List[str] = system_path.split(':') - try: - while True: - user_path.remove('') - except ValueError: - pass # No empty entries to be removed. - user_path.append(os.path.dirname(sys.executable)) - - environment = {'PATH': ':'.join(user_path), - 'PYTHONPATH': 'source'} - - super().__init__(parameters, command, environment) - self._working_dir = working_dir - - -class RunExec(RunCommand): - """ - Run an executable produced by fab. - """ - def __init__(self, test_directory: Path): - parameters = TestParameters(test_directory, 'exec') - args: List[str] = [] - executable = test_directory / 'working' / 'fab_test' - command = [str(executable)] + list(args) - super().__init__(parameters, command, {}) - - def description(self) -> str: - return f"{self.test_parameters.test_directory.stem} - Executing" - - -class RunFab(EnterPython): - """ - Run Fab build tool against a source tree. - """ - def __init__(self, - test_directory: Path, - target: str, - fpp_flags: str = None, - fc_flags: str = None, - ld_flags: str = None): - args: List[str] = [] - - if fpp_flags: - # different config file name for fpp flag test - self.conf_file = test_directory/('stay_config.ini') - else: - self.conf_file = test_directory/'config.ini' - - args.append(str(test_directory)) - args.append(str(self.conf_file)) - - with open(self.conf_file, 'wt') as configfile: - configfile.write('[settings] \n' - 'target = {}\n' - 'exec-name = fab_test \n' - '[flags] \n'.format(target)) - if fpp_flags: - configfile.write('fpp-flags = {}\n'.format(fpp_flags)) - else: - configfile.write('fpp-flags = ' + '\n') - if fc_flags: - configfile.write('fc-flags = {}\n'.format(fc_flags)) - else: - configfile.write('fc-flags = ' + '\n') - if ld_flags: - configfile.write('ld-flags = {}\n'.format(ld_flags)) - else: - configfile.write('ld-flags = ' + '\n') - super().__init__('fab', test_directory, 'builder', args) - - def description(self) -> str: - return f"{self.test_parameters.test_directory.stem} - Building" - - def set_up(self): - """ - Ensure there's no working directory left over from previous runs. - """ - if self.test_parameters.work_directory.is_dir(): - shutil.rmtree(self.test_parameters.work_directory) - - def tear_down(self): - """ - Clean up config files following the run. - """ - self.conf_file.unlink() - - -class RunDump(EnterPython): - """ - Run Fab dump tool against working directory. - """ - def __init__(self, test_directory: Path): - super().__init__('dump', test_directory, 'dumper') - - def description(self) -> str: - return f"{self.test_parameters.test_directory.stem} - Dumping" - - def teardown(self): - if self.test_parameters.work_directory.is_dir(): - shutil.rmtree(str(self.test_parameters.work_directory)) - - def tear_down(self): - """ - Tidy up now we're finished with the working directroy. - """ - shutil.rmtree(self.test_parameters.work_directory) - - -class RunGrab(EnterPython): - """ - Run Fab grab tool against a repository. - """ - def __init__(self, test_directory: Path, repo: str, protocol: str): - self._scheme = f"{repo}+{protocol}" - self._repo_path = test_directory.absolute() / "repo" - self._server: Optional[subprocess.Popen] = None - - if protocol == 'http': - # TODO: This scheme is included for completeness. Currently there - # is no obvious way to test this without an Apache server - # which is way too much to consider at the moment. - # repo_url = f'http://localhost/repo' - message = "Unable to test Fetch over HTTP protocol." - raise NotImplementedError(message) - - repo_url = f'{self._scheme}://' - if protocol == 'file': - repo_url += f'//{self._repo_path}' - # HTTP would be included here as well if we were able to test it. - elif protocol in ['git', 'svn']: - repo_url += 'localhost/' - else: - message = f"Unrecognised URL scheme '{self._scheme}'" - raise Exception(message) - - super().__init__('grab', - test_directory, - 'grabber', - [str(test_directory / 'working'), repo_url], - working_dir=False) - - def description(self) -> str: - name = self.test_parameters.test_directory.stem - return f"{name} - Grabbing with {self._scheme}" - - def set_up(self): - if self._repo_path.is_dir(): - shutil.rmtree(self._repo_path) - archiver = TarFile(self._repo_path.with_suffix('.tar')) - archiver.extractall(self._repo_path.parent) - - if self.test_parameters.work_directory.is_dir(): - shutil.rmtree(self.test_parameters.work_directory) - - if self._scheme.endswith('+git'): - # TODO: We would start the daemon here - raise NotImplementedError("Git protocol not supported") - elif self._scheme.endswith('+svn'): - command: List[str] = ['svnserve', '--root', str(self._repo_path), - '-X', '--foreground'] - self._server = subprocess.Popen(command) - - def tear_down(self): - shutil.rmtree(self.test_parameters.work_directory) - - if self._scheme.endswith('+git'): - # TODO: We would kill the daemon here - raise NotImplementedError("Git protocol not supported") - elif self._scheme.endswith('+svn'): - self._server.wait(timeout=1) - if self._server.returncode != 0: - message = f"Trouble with svnserve: {self._server.stderr}" - self.debug_output = message - - if self._repo_path.is_dir(): - shutil.rmtree(self._repo_path) - - -class CheckTask(ABC): - """ - Abstract parent of all checking test cases. - """ - def __init__(self, task: RunCommand, name: str): - self._name = name - self._task = task - - @property - def task(self): - return self._task - - def run(self): - self._task.set_up() - self._task.execute() - # - # We print this out for debug purposes. If a test fails this output - # should be visible. - # - if self._task.debug_output is not None: - print('\n'.join(self._task.debug_output)) - self.check() - self.task.tear_down() - - @abstractmethod - def check(self): - raise NotImplementedError("Abstract methods must be implemented.") - - -class CompareConsoleWithFile(CheckTask): - """ - Checks console output against expected result. - - The expected result is held in a file "expected.[.].txt. - Where "tag" comes from the task and "suffix" is specified. - """ - def __init__(self, task: RunCommand, expectation_suffix=None): - super().__init__(task, name=task.description()) - leaf_name = f'expected.{task.test_parameters.tag}' - if expectation_suffix is not None: - leaf_name = leaf_name + '.' + expectation_suffix - leaf_name = leaf_name + '.txt' - path = task.test_parameters.test_directory / leaf_name - self._expected = path.read_text() - - def check(self): - assert self.task.return_code == 0 - lines = self.task.standard_out - assert lines == self._expected - - -class CompareFileTrees(CheckTask): - """ - Checks filetree against expected result. - - The test tree is the tasks working directory and the expected result - is in "expected". - """ - def __init__(self, task: RunCommand): - super().__init__(task, name=task.description()) - self._expected = task.test_parameters.test_directory / 'expected' - - def check(self): - first = self.task.test_parameters.work_directory - second = self._expected - tree_comparison = filecmp.dircmp(first, second) - assert len(tree_comparison.left_only) == 0 - assert len(tree_comparison.right_only) == 0 - _, mismatch, errors = filecmp.cmpfiles(first, second, - tree_comparison.common_files, - shallow=False) - assert len(mismatch) == 0 - assert len(errors) == 0 diff --git a/tests-old/unit-tests/repository_test.py b/tests-old/unit-tests/repository_test.py deleted file mode 100644 index d9f878a3..00000000 --- a/tests-old/unit-tests/repository_test.py +++ /dev/null @@ -1,262 +0,0 @@ -############################################################################## -# (c) Crown copyright Met Office. All rights reserved. -# For further details please refer to the file COPYRIGHT -# which you should have received as part of this distribution -############################################################################## -""" -Exercise the 'repository' module. -""" -import filecmp -from pathlib import Path -from subprocess import run, Popen -import shutil -import signal -import time -from typing import List, Tuple - -from pytest import fixture, mark, raises # type: ignore -from _pytest.tmpdir import TempPathFactory # type: ignore - -from fab import FabException -from fab.repository import repository_from_url, GitRepo, SubversionRepo - - -def _tree_compare(first: Path, second: Path) -> None: - """ - Compare two file trees to ensure they are identical. - """ - tree_comparison = filecmp.dircmp(str(first), str(second)) - assert len(tree_comparison.left_only) == 0 \ - and len(tree_comparison.right_only) == 0 - _, mismatch, errors = filecmp.cmpfiles(str(first), str(second), - tree_comparison.common_files, - shallow=False) - assert len(mismatch) == 0 and len(errors) == 0 - - -class TestSubversion: - """ - Tests of the Subversion repository interface. - """ - @fixture(scope='class') - def repo(self, tmp_path_factory: TempPathFactory) -> Tuple[Path, Path]: - """ - Set up a repository and return its path along with the path of the - original file tree. - """ - repo_path = tmp_path_factory.mktemp('repo', numbered=True) - command = ['svnadmin', 'create', str(repo_path)] - assert run(command).returncode == 0 - tree_path = tmp_path_factory.mktemp('tree', numbered=True) - (tree_path / 'alpha').write_text("First file") - (tree_path / 'beta').mkdir() - (tree_path / 'beta' / 'gamma').write_text("Second file") - command = ['svn', 'import', '-m', "Initial import", - str(tree_path), f'file://{repo_path}/trunk'] - assert run(command).returncode == 0 - return repo_path, tree_path - - def test_extract_from_file(self, repo: Tuple[Path, Path], tmp_path: Path): - """ - Checks that a source tree can be extracted from a Subversion - repository stored on disc. - """ - test_unit = SubversionRepo(f'file://{repo[0]}/trunk') - test_unit.extract(tmp_path) - _tree_compare(repo[1], tmp_path) - assert not (tmp_path / '.svn').exists() - - def test_extract_from_svn(self, repo: Tuple[Path, Path], tmp_path: Path): - """ - Checks that a source tree can be extracted from a Subversion - repository accessed through its own protocol. - """ - command: List[str] = ['svnserve', '-r', str(repo[0]), '-X'] - process = Popen(command) - - test_unit = SubversionRepo('svn://localhost/trunk') - # - # It seems there can be a delay between the server starting and the - # listen socket opening. Thus we have a number of retries. - # - # TODO: Is there a better solution such that we don't try to connect - # until the socket is open? - # - for retry in range(3, 0, -1): - try: - test_unit.extract(tmp_path) - except FabException as ex: - if range == 0: - raise ex - time.sleep(1.0) - else: - break - _tree_compare(repo[1], tmp_path) - assert not (tmp_path / '.svn').exists() - - process.wait(timeout=1) - assert process.returncode == 0 - - @mark.skip(reason="Too hard to test at the moment.") - def test_extract_from_http(self, repo: Tuple[Path, Path], tmp_path: Path): - """ - Checks that a source tree can be extracted from a Subversion - repository accessed through HTTP. - - TODO: This is hard to test without a full Apache installation. For the - moment we forgo the test on the basis that it's too hard. - """ - pass - - -class TestGit: - """ - Tests of the Git repository interface. - """ - @fixture(scope='class') - def repo(self, tmp_path_factory: TempPathFactory) -> Tuple[Path, Path]: - """ - Set up a repository and return its path along with the path of the - original file tree. - """ - tree_path = tmp_path_factory.mktemp('tree', numbered=True) - (tree_path / 'alpha').write_text("First file") - (tree_path / 'beta').mkdir() - (tree_path / 'beta' / 'gamma').write_text("Second file") - - repo_path = tmp_path_factory.mktemp('repo', numbered=True) - command = ['git', 'init', str(repo_path)] - assert run(command).returncode == 0 - # - # We have to configure this information or the forthcoming commands - # will fail. - # - command = ['git', 'config', 'user.name', 'Testing Tester Tests'] - assert run(command, cwd=str(repo_path)).returncode == 0 - command = ['git', 'config', 'user.email', 'tester@example.com'] - assert run(command, cwd=str(repo_path)).returncode == 0 - - for file_object in tree_path.glob('*'): - if file_object.is_dir(): - shutil.copytree(str(file_object), - str(repo_path / file_object.name)) - else: - shutil.copy(str(file_object), - str(repo_path / file_object.name)) - command = ['git', 'add', '-A'] - assert run(command, cwd=str(repo_path)).returncode == 0 - command = ['git', 'commit', '-m', "Initial import"] - assert run(command, cwd=str(repo_path)).returncode == 0 - return repo_path.absolute(), tree_path.absolute() - - def test_extract_from_file(self, repo: Tuple[Path, Path], tmp_path: Path): - """ - Tests that a source tree can be extracted from a local repository. - """ - test_unit = GitRepo(f'file://{repo[0]}') - test_unit.extract(tmp_path) - _tree_compare(repo[1], tmp_path) - assert not (tmp_path / '.git').exists() - - def test_missing_repo(self, tmp_path: Path): - """ - Tests that an error is returned if the repository is not there. - """ - fake_repo = tmp_path / "nonsuch.repo" - fake_repo.mkdir() - test_unit = GitRepo(f'file://{fake_repo}') - with raises(FabException) as ex: - test_unit.extract(tmp_path / 'working') - expected = "Fault exporting tree from Git repository:" - assert str(ex.value).startswith(expected) - - @mark.skip(reason="The daemon doesn't seem to be installed.") - def test_extract_from_git(self, repo: Tuple[Path, Path], tmp_path: Path): - """ - Checks that a source tree can be extracted from a Git repository - accessed through its own protocol. - """ - command: List[str] = ['git', 'daemon', '--reuseaddr', - '--base-path='+str(repo[0].parent), - str(repo[0])] - process = Popen(command) - - test_unit = GitRepo('git://localhost/'+repo[0].name) - test_unit.extract(tmp_path) - _tree_compare(repo[1], tmp_path) - assert not (tmp_path / '.git').exists() - - process.send_signal(signal.SIGTERM) - process.wait(timeout=2) - assert process.returncode == -15 - - @mark.skip(reason="Too hard to test at the moment.") - def test_extract_from_http(self, repo: Tuple[Path, Path], tmp_path: Path): - """ - Checks that a source tree can be extracted from a Git repository - accessed through HTTP. - - TODO: This is hard to test without a full Apache installation. For the - moment we forgo the test on the basis that it's too hard. - """ - pass - - -class TestRepoFromURL: - """ - Tests that a URL can be converted into the correct Repository object. - """ - @fixture(scope='class', - params=[ - {'access_url': 'git://example.com/git', - 'repo_class': GitRepo, - 'repo_url': 'git://example.com/git'}, - {'access_url': 'git+file:///tmp/git', - 'repo_class': GitRepo, - 'repo_url': 'file:///tmp/git'}, - {'access_url': 'git+git://example.com/git', - 'repo_class': GitRepo, - 'repo_url': 'git://example.com/git'}, - {'access_url': 'git+http://example.com/git', - 'repo_class': GitRepo, - 'repo_url': 'http://example.com/git'}, - {'access_url': 'svn://example.com/svn', - 'repo_class': SubversionRepo, - 'repo_url': 'svn://example.com/svn'}, - {'access_url': 'svn+file:///tmp/svn', - 'repo_class': SubversionRepo, - 'repo_url': 'file:///tmp/svn'}, - {'access_url': 'svn+http://example.com/svn', - 'repo_class': SubversionRepo, - 'repo_url': 'http://example.com/svn'}, - {'access_url': 'svn+svn://example.com/svn', - 'repo_class': SubversionRepo, - 'repo_url': 'svn://example.com/svn'}, - {'access_url': 'file:///tmp/repo', - 'repo_class': FabException, - 'exception': "Unrecognised repository scheme: file+file"}, - {'access_url': 'http://example.com/repo', - 'repo_class': FabException, - 'exception': "Unrecognised repository scheme: http+http"}, - {'access_url': 'foo+file:///tmp/foo', - 'repo_class': FabException, - 'exception': "Unrecognised repository scheme: foo+file"} - ]) - def cases(self, request): - """ - Generates a set of test cases. - """ - yield request.param - - def test_action(self, cases): - """ - Checks that each URL creates an appropriate Repository object. - """ - if issubclass(cases['repo_class'], Exception): - with raises(cases['repo_class']) as ex: - _ = repository_from_url(cases['access_url']) - assert ex.value.args[0] == cases['exception'] - else: - repo = repository_from_url(cases['access_url']) - assert isinstance(repo, cases['repo_class']) - assert repo.url == cases['repo_url'] diff --git a/tests-old/unit-tests/tasks/__init__.py b/tests-old/unit-tests/tasks/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests-old/unit-tests/tasks/c_test.py b/tests-old/unit-tests/tasks/c_test.py deleted file mode 100644 index 3d188af6..00000000 --- a/tests-old/unit-tests/tasks/c_test.py +++ /dev/null @@ -1,64 +0,0 @@ -############################################################################## -# (c) Crown copyright Met Office. All rights reserved. -# For further details please refer to the file COPYRIGHT -# which you should have received as part of this distribution -############################################################################## -from pathlib import Path -from textwrap import dedent - - -class TestCPragmaInjector: - def test_run(self, tmp_path): - workspace = tmp_path / 'working' - workspace.mkdir() - - test_file: Path = tmp_path / 'test.c' - test_file.write_text( - dedent(''' - #include "user_include.h" - Unrelated text - #include 'another_user_include.h' - #include - More unrelated text - #include - ''')) - test_artifact = Artifact(test_file, - CSource, - HeadersAnalysed) - test_artifact.add_dependency('foo') - - # Run the Injector - injector = CPragmaInjector(workspace) - artifacts_out = injector.run([test_artifact]) - - assert len(artifacts_out) == 1 - assert artifacts_out[0].location == workspace / 'test.c' - assert artifacts_out[0].filetype is CSource - assert artifacts_out[0].state is Modified - assert artifacts_out[0].depends_on == ['foo'] - assert artifacts_out[0].defines == [] - - new_file = workspace / 'test.c' - assert new_file.exists() - with new_file.open('r') as fh: - new_text = fh.read() - - expected_text = ( - dedent(''' - #pragma FAB UsrIncludeStart - #include "user_include.h" - #pragma FAB UsrIncludeEnd - Unrelated text - #pragma FAB UsrIncludeStart - #include 'another_user_include.h' - #pragma FAB UsrIncludeEnd - #pragma FAB SysIncludeStart - #include - #pragma FAB SysIncludeEnd - More unrelated text - #pragma FAB SysIncludeStart - #include - #pragma FAB SysIncludeEnd - ''')) - - assert new_text == expected_text diff --git a/tests-old/unit-tests/tasks/common_test.py b/tests-old/unit-tests/tasks/common_test.py deleted file mode 100644 index 1ab27b9e..00000000 --- a/tests-old/unit-tests/tasks/common_test.py +++ /dev/null @@ -1,58 +0,0 @@ -############################################################################## -# (c) Crown copyright Met Office. All rights reserved. -# For further details please refer to the file COPYRIGHT -# which you should have received as part of this distribution -############################################################################## -from pathlib import Path -from textwrap import dedent - -from fab.tasks.common import Linker, HeaderAnalyser -from fab.artifact import \ - Artifact, \ - New, \ - Unknown, \ - Executable, \ - Linked, \ - HeadersAnalysed - - -class TestLinker: - def test_run(self, mocker, tmp_path: Path): - # Instantiate Linker - workspace = Path(tmp_path) - linker = Linker('foo', - ['--bar', '--baz'], - workspace, - 'qux') - - # Create artifacts (object files for linking) - file1 = '/path/to/file.1' - file2 = '/path/to/file.2' - artifacts = [Artifact(Path(file1), - Unknown, - New), - Artifact(Path(file2), - Unknown, - New)] - - # Monkeypatch the subprocess call out and run linker - patched_run = mocker.patch('subprocess.run') - artifacts_out = linker.run(artifacts) - - # Check that the subprocess call contained the command - # that we would expect based on the above - expected_command = ['foo', - '-o', - str(workspace / 'qux'), - file1, - file2, - '--bar', - '--baz'] - patched_run.assert_called_once_with(expected_command, - check=True) - assert len(artifacts_out) == 1 - assert artifacts_out[0].location == workspace / 'qux' - assert artifacts_out[0].filetype is Executable - assert artifacts_out[0].state is Linked - assert artifacts_out[0].depends_on == [] - assert artifacts_out[0].defines == [] diff --git a/tests/unit_tests/parse/c/test_c_analyser.py b/tests/unit_tests/parse/c/test_c_analyser.py index b4f84c94..c288baf9 100644 --- a/tests/unit_tests/parse/c/test_c_analyser.py +++ b/tests/unit_tests/parse/c/test_c_analyser.py @@ -7,12 +7,14 @@ from unittest import mock from unittest.mock import Mock -import clang # type: ignore +from pytest import importorskip from fab.build_config import BuildConfig from fab.parse.c import CAnalyser, AnalysedC from fab.tools import ToolBox +clang = importorskip('clang') + def test_simple_result(tmp_path): c_analyser = CAnalyser() diff --git a/tests/unit_tests/steps/test_c_pragma_injector.py b/tests/unit_tests/steps/test_c_pragma_injector.py index e75e9f19..666bae1b 100644 --- a/tests/unit_tests/steps/test_c_pragma_injector.py +++ b/tests/unit_tests/steps/test_c_pragma_injector.py @@ -1,4 +1,5 @@ -import sys +from sys import version_info as python_version +from textwrap import dedent from unittest import mock from unittest.mock import mock_open @@ -9,32 +10,52 @@ class Test_inject_pragmas(object): - @pytest.mark.skipif(sys.version_info < (3, 8), reason="requires python3.8 or higher for mock_open iteration") + @pytest.mark.skipif(python_version < (3, 8), + reason="Requires python version 3.8 or higher for " + "mock_open iteration") def test_vanilla(self): - input = [ - '', - '// hi there, ignore me', - '', - '#include ', - '', - '#include "bar.h"', - '', - ] - data = "\n".join(input) + source = dedent( + """ + // C++ style comment, ignore this. + #include "user_include.h" + #include "second_user_include.h" + Unrelated text + /* Including C style comment */ + #include 'another_user_include.h' + #include + More unrelated text + #include + #include "final_user_include.h" + """ + ) - with mock.patch('fab.steps.c_pragma_injector.open', mock_open(read_data=data)): + with mock.patch('fab.steps.c_pragma_injector.open', + mock_open(read_data=source)): result = inject_pragmas(fpath="foo") output = list(result) assert output == [ '\n', - '// hi there, ignore me\n', - '\n', + '// C++ style comment, ignore this.\n', + '#pragma FAB UsrIncludeStart\n', + '#include "user_include.h"\n', + '#pragma FAB UsrIncludeEnd\n', + '#pragma FAB UsrIncludeStart\n', + '#include "second_user_include.h"\n', + '#pragma FAB UsrIncludeEnd\n', + 'Unrelated text\n', + '/* Including C style comment */\n', + '#pragma FAB UsrIncludeStart\n', + "#include 'another_user_include.h'\n", + '#pragma FAB UsrIncludeEnd\n', '#pragma FAB SysIncludeStart\n', - '#include \n', + '#include \n', + '#pragma FAB SysIncludeEnd\n', + "More unrelated text\n", + '#pragma FAB SysIncludeStart\n', + '#include \n', '#pragma FAB SysIncludeEnd\n', - '\n', '#pragma FAB UsrIncludeStart\n', - '#include "bar.h"\n', - '#pragma FAB UsrIncludeEnd\n', + '#include "final_user_include.h"\n', + '#pragma FAB UsrIncludeEnd\n' ] diff --git a/tests/unit_tests/tools/test_versioning.py b/tests/unit_tests/tools/test_versioning.py index a3b21896..fb825000 100644 --- a/tests/unit_tests/tools/test_versioning.py +++ b/tests/unit_tests/tools/test_versioning.py @@ -3,28 +3,26 @@ # For further details please refer to the file COPYRIGHT # which you should have received as part of this distribution ############################################################################## - -'''Tests the compiler implementation. -''' - +""" +Tests version control interfaces. +""" +from filecmp import cmpfiles, dircmp +from pathlib import Path +from shutil import which from unittest import mock +from subprocess import Popen, run +from time import sleep +from typing import List, Tuple -import pytest +from pytest import TempPathFactory, fixture, mark, raises -from fab.tools import Category, Fcm, Git, Subversion, Versioning +from fab.tools import Category, Fcm, Git, Subversion class TestGit: - '''Contains all git related tests.''' - - def test_versioning_constructor(self): - '''Test the versioning constructor.''' - versioning = Versioning("versioning", "versioning.exe", Category.GIT) - assert versioning.category == Category.GIT - assert versioning.name == "versioning" - assert versioning.flags == [] - assert versioning.exec_name == "versioning.exe" - + """ + Tests of the Git repository interface. + """ def test_git_constructor(self): '''Test the git constructor.''' git = Git() @@ -116,7 +114,7 @@ def test_git_fetch(self): with mock.patch.object(git, "run", side_effect=RuntimeError("ERR")) as run: - with pytest.raises(RuntimeError) as err: + with raises(RuntimeError) as err: git.fetch("/src", "/dst", revision="revision") assert "ERR" in str(err.value) run.assert_called_once_with(['fetch', "/src", "revision"], cwd="/dst", @@ -143,7 +141,7 @@ def test_git_checkout(self): with mock.patch.object(git, "run", side_effect=RuntimeError("ERR")) as run: - with pytest.raises(RuntimeError) as err: + with raises(RuntimeError) as err: git.checkout("/src", "/dst", revision="revision") assert "ERR" in str(err.value) run.assert_called_with(['fetch', "/src", "revision"], cwd="/dst", @@ -173,7 +171,7 @@ def raise_1st_time(): with mock.patch.object(git, "run", side_effect=raise_1st_time()) as run: - with pytest.raises(RuntimeError) as err: + with raises(RuntimeError) as err: git.merge("/dst", revision="revision") assert "Error merging revision. Merge aborted." in str(err.value) run.assert_any_call(['merge', "FETCH_HEAD"], cwd="/dst", @@ -184,7 +182,7 @@ def raise_1st_time(): # Test behaviour if both merge and merge --abort fail with mock.patch.object(git, "run", side_effect=RuntimeError("ERR")) as run: - with pytest.raises(RuntimeError) as err: + with raises(RuntimeError) as err: git.merge("/dst", revision="revision") assert "ERR" in str(err.value) run.assert_called_with(['merge', "--abort"], cwd="/dst", @@ -192,22 +190,27 @@ def raise_1st_time(): # ============================================================================ -class TestSvn: - '''Contains all svn related tests.''' - +class TestSubversion: + """ + Tests the Subversion interface. + """ def test_svn_constructor(self): - '''Test the git constructor.''' + """ + Test the git constructor. + """ svn = Subversion() assert svn.category == Category.SUBVERSION assert svn.flags == [] - assert svn.name == "subversion" + assert svn.name == "Subversion" assert svn.exec_name == "svn" def test_svn_export(self): - '''Check export svn functionality. The tests here will actually - mock the git results, so they will work even if subversion is not - installed. The system_tests will test an actual check out etc. ''' + """ + Ensures an export from repository works. + Subversion is mocked here to allow testing without the executable. + Testing with happens below in TestSubversionReal. + """ svn = Subversion() mock_result = mock.Mock(returncode=0) with mock.patch('fab.tools.tool.subprocess.run', @@ -282,14 +285,107 @@ def test_svn_merge(self): env=None, cwd="/dst", capture_output=True, check=False) +def _tree_compare(first: Path, second: Path) -> None: + """ + Compare two file trees to ensure they are identical. + """ + tree_comparison = dircmp(str(first), str(second)) + assert len(tree_comparison.left_only) == 0 \ + and len(tree_comparison.right_only) == 0 + _, mismatch, errors = cmpfiles(str(first), str(second), + tree_comparison.common_files, + shallow=False) + assert len(mismatch) == 0 and len(errors) == 0 + + +@mark.skipif(which('svn') is None, + reason="No Subversion executable found on path.") +class TestSubversionReal: + """ + Tests the Subversion interface against a real executable. + """ + @fixture(scope='class') + def repo(self, tmp_path_factory: TempPathFactory) -> Tuple[Path, Path]: + """ + Set up a repository and return its path along with the path of the + original file tree. + """ + repo_path = tmp_path_factory.mktemp('repo', numbered=True) + command = ['svnadmin', 'create', str(repo_path)] + assert run(command).returncode == 0 + tree_path = tmp_path_factory.mktemp('tree', numbered=True) + (tree_path / 'alpha').write_text("First file") + (tree_path / 'beta').mkdir() + (tree_path / 'beta' / 'gamma').write_text("Second file") + command = ['svn', 'import', '-m', "Initial import", + str(tree_path), f'file://{repo_path}/trunk'] + assert run(command).returncode == 0 + return repo_path, tree_path + + def test_extract_from_file(self, repo: Tuple[Path, Path], tmp_path: Path): + """ + Checks that a source tree can be extracted from a Subversion + repository stored on disc. + """ + test_unit = Subversion() + test_unit.export(f'file://{repo[0]}/trunk', tmp_path) + _tree_compare(repo[1], tmp_path) + assert not (tmp_path / '.svn').exists() + + def test_extract_from_svn(self, repo: Tuple[Path, Path], tmp_path: Path): + """ + Checks that a source tree can be extracted from a Subversion + repository accessed through its own protocol. + """ + command: List[str] = ['svnserve', '-r', str(repo[0]), '-X'] + process = Popen(command) + + test_unit = Subversion() + # + # It seems there can be a delay between the server starting and the + # listen socket opening. Thus we have a number of retries. + # + # TODO: Is there a better solution such that we don't try to connect + # until the socket is open? + # + for retry in range(3, 0, -1): + try: + test_unit.export('svn://localhost/trunk', tmp_path) + except Exception as ex: + if range == 0: + raise ex + sleep(1.0) + else: + break + _tree_compare(repo[1], tmp_path) + assert not (tmp_path / '.svn').exists() + + process.wait(timeout=1) + assert process.returncode == 0 + + @mark.skip(reason="Too hard to test at the moment.") + def test_extract_from_http(self, repo: Tuple[Path, Path], tmp_path: Path): + """ + Checks that a source tree can be extracted from a Subversion + repository accessed through HTTP. + + TODO: This is hard to test without a full Apache installation. For the + moment we forgo the test on the basis that it's too hard. + """ + pass + + # ============================================================================ class TestFcm: - '''Contains all FCM related tests.''' - + """ + Tests the FCM interface task. + """ def test_fcm_constructor(self): - '''Test the fcb constructor.''' + """ + Tests this constructor. + """ fcm = Fcm() assert fcm.category == Category.FCM assert fcm.flags == [] - assert fcm.name == "fcm" + assert fcm.name == "FCM" assert fcm.exec_name == "fcm"