From b89a788154381d770e6740f392c8259de9fa4578 Mon Sep 17 00:00:00 2001 From: Paul Madden <136389411+maddenp-noaa@users.noreply.github.com> Date: Wed, 21 Feb 2024 13:28:46 -0700 Subject: [PATCH] UW-506 sfc_climo_gen driver (#410) --- Makefile | 6 +- docs/sections/user_guide/api/index.rst | 1 + .../sections/user_guide/api/sfc_climo_gen.rst | 5 + docs/sections/user_guide/index.rst | 2 +- .../{uw_yaml => yaml}/field_table.rst | 0 .../user_guide/{uw_yaml => yaml}/fv3.rst | 0 .../user_guide/{uw_yaml => yaml}/index.rst | 0 .../user_guide/{uw_yaml => yaml}/rocoto.rst | 0 format | 15 + recipe/run_test.sh | 1 - src/pyproject.toml | 1 + src/uwtools/api/config.py | 3 + src/uwtools/api/fv3.py | 5 +- src/uwtools/api/logging.py | 3 + src/uwtools/api/rocoto.py | 3 + src/uwtools/api/sfc_climo_gen.py | 42 ++ src/uwtools/api/template.py | 3 + src/uwtools/cli.py | 69 ++- src/uwtools/config/tools.py | 2 - src/uwtools/config/validator.py | 26 +- src/uwtools/drivers/driver.py | 10 +- src/uwtools/drivers/fv3.py | 85 +--- src/uwtools/drivers/sfc_climo_gen.py | 162 ++++++ src/uwtools/resources/jsonschema/__init__.py | 0 .../resources/jsonschema/execution.jsonschema | 34 ++ .../jsonschema/files-to-stage.jsonschema | 7 + .../resources/{ => jsonschema}/fv3.jsonschema | 72 +-- .../resources/jsonschema/namelist.jsonschema | 16 + .../{ => jsonschema}/platform.jsonschema | 0 .../{ => jsonschema}/rocoto.jsonschema | 0 .../jsonschema/sfc-climo-gen.jsonschema | 137 +++++ .../{ => jsonschema}/workflow.jsonschema | 0 src/uwtools/resources/rocoto/__init__.py | 0 .../{ => rocoto}/schema_with_metatasks.rng | 0 src/uwtools/rocoto.py | 8 +- src/uwtools/tests/api/test_fv3.py | 29 +- src/uwtools/tests/api/test_sfc_climo_gen.py | 39 ++ src/uwtools/tests/config/formats/test_nml.py | 2 +- src/uwtools/tests/config/test_validator.py | 4 +- src/uwtools/tests/drivers/test_driver.py | 10 +- src/uwtools/tests/drivers/test_fv3.py | 469 +++--------------- .../tests/drivers/test_schema_platform.py | 23 - .../tests/drivers/test_sfc_climo_gen.py | 196 ++++++++ src/uwtools/tests/support.py | 10 +- src/uwtools/tests/test_cli.py | 38 ++ src/uwtools/tests/test_rocoto.py | 85 +--- src/uwtools/tests/test_schemas.py | 465 +++++++++++++++++ src/uwtools/tests/utils/test_file.py | 4 +- src/uwtools/tests/utils/test_tasks.py | 40 ++ src/uwtools/utils/file.py | 2 +- src/uwtools/utils/tasks.py | 49 ++ 51 files changed, 1496 insertions(+), 687 deletions(-) create mode 100644 docs/sections/user_guide/api/sfc_climo_gen.rst rename docs/sections/user_guide/{uw_yaml => yaml}/field_table.rst (100%) rename docs/sections/user_guide/{uw_yaml => yaml}/fv3.rst (100%) rename docs/sections/user_guide/{uw_yaml => yaml}/index.rst (100%) rename docs/sections/user_guide/{uw_yaml => yaml}/rocoto.rst (100%) create mode 100755 format create mode 100644 src/uwtools/api/sfc_climo_gen.py create mode 100644 src/uwtools/drivers/sfc_climo_gen.py create mode 100644 src/uwtools/resources/jsonschema/__init__.py create mode 100644 src/uwtools/resources/jsonschema/execution.jsonschema create mode 100644 src/uwtools/resources/jsonschema/files-to-stage.jsonschema rename src/uwtools/resources/{ => jsonschema}/fv3.jsonschema (74%) create mode 100644 src/uwtools/resources/jsonschema/namelist.jsonschema rename src/uwtools/resources/{ => jsonschema}/platform.jsonschema (100%) rename src/uwtools/resources/{ => jsonschema}/rocoto.jsonschema (100%) create mode 100644 src/uwtools/resources/jsonschema/sfc-climo-gen.jsonschema rename src/uwtools/resources/{ => jsonschema}/workflow.jsonschema (100%) create mode 100644 src/uwtools/resources/rocoto/__init__.py rename src/uwtools/resources/{ => rocoto}/schema_with_metatasks.rng (100%) create mode 100644 src/uwtools/tests/api/test_sfc_climo_gen.py delete mode 100644 src/uwtools/tests/drivers/test_schema_platform.py create mode 100644 src/uwtools/tests/drivers/test_sfc_climo_gen.py create mode 100644 src/uwtools/tests/test_schemas.py create mode 100644 src/uwtools/tests/utils/test_tasks.py create mode 100644 src/uwtools/utils/tasks.py diff --git a/Makefile b/Makefile index efc76c1f1..89738bf49 100644 --- a/Makefile +++ b/Makefile @@ -28,11 +28,7 @@ env: package conda create -y -n $(call spec,buildnum,-) $(CHANNELS) $(call spec,build,=) format: - @echo "=> Running formatters" - black src - isort src - cd src && docformatter . || test $$? -eq 3 - for a in $$(find src -type f -name "*.jsonschema"); do b=$$(jq -S . $$a) && echo "$$b" >$$a || exit 1; done + @./format lint: recipe/run_test.sh lint diff --git a/docs/sections/user_guide/api/index.rst b/docs/sections/user_guide/api/index.rst index 2eb583e9b..fb19f7ba2 100644 --- a/docs/sections/user_guide/api/index.rst +++ b/docs/sections/user_guide/api/index.rst @@ -6,4 +6,5 @@ API fv3 logging rocoto + sfc_climo_gen template diff --git a/docs/sections/user_guide/api/sfc_climo_gen.rst b/docs/sections/user_guide/api/sfc_climo_gen.rst new file mode 100644 index 000000000..e94a611d2 --- /dev/null +++ b/docs/sections/user_guide/api/sfc_climo_gen.rst @@ -0,0 +1,5 @@ +``uwtools.api.sfc_climo_gen`` +============================= + +.. automodule:: uwtools.api.sfc_climo_gen + :members: diff --git a/docs/sections/user_guide/index.rst b/docs/sections/user_guide/index.rst index 1e5d39734..a385a1801 100644 --- a/docs/sections/user_guide/index.rst +++ b/docs/sections/user_guide/index.rst @@ -7,4 +7,4 @@ User Guide installation cli/index api/index - uw_yaml/index + yaml/index diff --git a/docs/sections/user_guide/uw_yaml/field_table.rst b/docs/sections/user_guide/yaml/field_table.rst similarity index 100% rename from docs/sections/user_guide/uw_yaml/field_table.rst rename to docs/sections/user_guide/yaml/field_table.rst diff --git a/docs/sections/user_guide/uw_yaml/fv3.rst b/docs/sections/user_guide/yaml/fv3.rst similarity index 100% rename from docs/sections/user_guide/uw_yaml/fv3.rst rename to docs/sections/user_guide/yaml/fv3.rst diff --git a/docs/sections/user_guide/uw_yaml/index.rst b/docs/sections/user_guide/yaml/index.rst similarity index 100% rename from docs/sections/user_guide/uw_yaml/index.rst rename to docs/sections/user_guide/yaml/index.rst diff --git a/docs/sections/user_guide/uw_yaml/rocoto.rst b/docs/sections/user_guide/yaml/rocoto.rst similarity index 100% rename from docs/sections/user_guide/uw_yaml/rocoto.rst rename to docs/sections/user_guide/yaml/rocoto.rst diff --git a/format b/format new file mode 100755 index 000000000..082223844 --- /dev/null +++ b/format @@ -0,0 +1,15 @@ +#!/bin/bash -eu + +echo "=> Running black" +black src + +echo "=> Running isort" +isort -q src + +echo "=> Running docformatter" +(cd src && docformatter . || test $$? -eq 3) + +echo "=> Running jq" +for a in $(find src -type f -name "*.jsonschema"); do + b=$(jq -S . $a) && echo "$b" >$a || (echo " in $a"; false) +done diff --git a/recipe/run_test.sh b/recipe/run_test.sh index dff445c8e..00de2b162 100755 --- a/recipe/run_test.sh +++ b/recipe/run_test.sh @@ -56,7 +56,6 @@ unittest() { } test "${CONDA_BUILD:-}" = 1 && cd ../test_files || cd $(dirname $0)/../src -msg Running in $PWD if [[ -n "${1:-}" ]]; then # Run single specified code-quality tool. $1 diff --git a/src/pyproject.toml b/src/pyproject.toml index 4e6036746..e3c26057f 100644 --- a/src/pyproject.toml +++ b/src/pyproject.toml @@ -28,6 +28,7 @@ warn_return_any = true [tool.pylint.messages_control] disable = [ "consider-using-f-string", + "duplicate-code", "invalid-name", "missing-module-docstring", "too-few-public-methods", diff --git a/src/uwtools/api/config.py b/src/uwtools/api/config.py index c86ca27ca..18357d459 100644 --- a/src/uwtools/api/config.py +++ b/src/uwtools/api/config.py @@ -1,3 +1,6 @@ +""" +API access to uwtools configuration management tools. +""" import os from pathlib import Path from typing import List, Optional, Union diff --git a/src/uwtools/api/fv3.py b/src/uwtools/api/fv3.py index 65150b483..dbeb194be 100644 --- a/src/uwtools/api/fv3.py +++ b/src/uwtools/api/fv3.py @@ -1,3 +1,6 @@ +""" +API access to the uwtools FV3 driver. +""" import datetime as dt from pathlib import Path from typing import Dict @@ -21,7 +24,7 @@ def execute( Otherwise, the forecast will be run directly on the current system. :param task: The task to execute - :param config_file: Path to UW YAML config file + :param config_file: Path to YAML config file :param cycle: The cycle to run :param batch: Submit run to the batch system :param dry_run: Do not run forecast, just report what would have been done diff --git a/src/uwtools/api/logging.py b/src/uwtools/api/logging.py index 23354cc54..aa42286bd 100644 --- a/src/uwtools/api/logging.py +++ b/src/uwtools/api/logging.py @@ -1,3 +1,6 @@ +""" +API access to uwtools logging logic. +""" import logging from uwtools.logging import setup_logging as _setup_logging diff --git a/src/uwtools/api/rocoto.py b/src/uwtools/api/rocoto.py index af1e47b06..e321f1f9f 100644 --- a/src/uwtools/api/rocoto.py +++ b/src/uwtools/api/rocoto.py @@ -1,3 +1,6 @@ +""" +API access to uwtools Rocoto support. +""" from pathlib import Path from typing import Optional, Union diff --git a/src/uwtools/api/sfc_climo_gen.py b/src/uwtools/api/sfc_climo_gen.py new file mode 100644 index 000000000..3dcce7c41 --- /dev/null +++ b/src/uwtools/api/sfc_climo_gen.py @@ -0,0 +1,42 @@ +""" +API access to the uwtools sfc_climo_gen driver. +""" +from pathlib import Path +from typing import Dict + +import iotaa + +from uwtools.drivers.sfc_climo_gen import SfcClimoGen + + +def execute( + task: str, + config_file: Path, + batch: bool = False, + dry_run: bool = False, +) -> bool: + """ + Execute an sfc_climo_gen task. + + If ``batch`` is specified, a runscript will be written and submitted to the batch system. + Otherwise, the forecast will be run directly on the current system. + + :param task: The task to execute + :param config_file: Path to YAML config file + :param batch: Submit run to the batch system + :param dry_run: Do not run forecast, just report what would have been done + :return: True if task completes without raising an exception + """ + obj = SfcClimoGen(config_file=config_file, batch=batch, dry_run=dry_run) + getattr(obj, task)() + return True + + +def tasks() -> Dict[str, str]: + """ + Returns a mapping from task names to their one-line descriptions. + """ + return { + task: getattr(SfcClimoGen, task).__doc__.strip().split("\n")[0] + for task in iotaa.tasknames(SfcClimoGen) + } diff --git a/src/uwtools/api/template.py b/src/uwtools/api/template.py index ca3cdc7a8..3524cc6b9 100644 --- a/src/uwtools/api/template.py +++ b/src/uwtools/api/template.py @@ -1,3 +1,6 @@ +""" +API access to uwtools templating logic. +""" from pathlib import Path from typing import Dict, Optional, Union diff --git a/src/uwtools/cli.py b/src/uwtools/cli.py index 6f8c4dd0a..6f88bf902 100644 --- a/src/uwtools/cli.py +++ b/src/uwtools/cli.py @@ -16,6 +16,7 @@ import uwtools.api.config import uwtools.api.fv3 import uwtools.api.rocoto +import uwtools.api.sfc_climo_gen import uwtools.api.template import uwtools.config.jinja2 import uwtools.rocoto @@ -51,6 +52,7 @@ def main() -> None: STR.config: _dispatch_config, STR.fv3: _dispatch_fv3, STR.rocoto: _dispatch_rocoto, + STR.sfcclimogen: _dispatch_sfc_climo_gen, STR.template: _dispatch_template, } sys.exit(0 if modes[args[STR.mode]](args) else 1) @@ -327,6 +329,58 @@ def _dispatch_rocoto_validate(args: Args) -> bool: return uwtools.api.rocoto.validate(xml_file=args[STR.infile]) +# Mode sfc_climo_gen + + +def _add_subparser_sfc_climo_gen(subparsers: Subparsers) -> ModeChecks: + """ + Subparser for mode: sfc_climo_gen + + :param subparsers: Parent parser's subparsers, to add this subparser to. + """ + parser = _add_subparser(subparsers, STR.sfcclimogen, "Execute sfc_climo_gen tasks") + _basic_setup(parser) + subparsers = _add_subparsers(parser, STR.action, STR.task.upper()) + return { + task: _add_subparser_sfc_climo_gen_task(subparsers, task, helpmsg) + for task, helpmsg in uwtools.api.sfc_climo_gen.tasks().items() + } + + +def _add_subparser_sfc_climo_gen_task( + subparsers: Subparsers, task: str, helpmsg: str +) -> ActionChecks: + """ + Subparser for mode: sfc_climo_gen + + :param subparsers: Parent parser's subparsers, to add this subparser to. + :param task: The task to add a subparser for. + :param helpmsg: Help message for task. + """ + parser = _add_subparser(subparsers, task, helpmsg.rstrip(".")) + required = parser.add_argument_group(TITLE_REQ_ARG) + _add_arg_config_file(required) + optional = _basic_setup(parser) + _add_arg_batch(optional) + _add_arg_dry_run(optional) + checks = _add_args_verbosity(optional) + return checks + + +def _dispatch_sfc_climo_gen(args: Args) -> bool: + """ + Dispatch logic for sfc_climo_gen mode. + + :param args: Parsed command-line args. + """ + return uwtools.api.sfc_climo_gen.execute( + task=args[STR.action], + config_file=args[STR.cfgfile], + batch=args[STR.batch], + dry_run=args[STR.dryrun], + ) + + # Mode template @@ -442,7 +496,7 @@ def _add_arg_config_file(group: Group) -> None: help="Path to config file", metavar="PATH", required=True, - type=str, + type=Path, ) @@ -491,7 +545,7 @@ def _add_arg_file_path(group: Group, switch: str, helpmsg: str, required: bool = help=helpmsg, metavar="PATH", required=required, - type=str, + type=Path, ) @@ -502,7 +556,7 @@ def _add_arg_input_file(group: Group, required: bool = False) -> None: help="Path to input file (defaults to stdin)", metavar="PATH", required=required, - type=str, + type=Path, ) @@ -532,7 +586,7 @@ def _add_arg_output_file(group: Group, required: bool = False) -> None: help="Path to output file (defaults to stdout)", metavar="PATH", required=required, - type=str, + type=Path, ) @@ -561,7 +615,7 @@ def _add_arg_schema_file(group: Group) -> None: help="Path to schema file to use for validation", metavar="PATH", required=True, - type=str, + type=Path, ) @@ -571,6 +625,7 @@ def _add_arg_supplemental_files(group: Group) -> None: help="Additional files to supplement primary input", metavar="PATH", nargs="*", + type=Path, ) @@ -580,7 +635,7 @@ def _add_arg_values_file(group: Group, required: bool = False) -> None: help="Path to file providing override or interpolation values", metavar="PATH", required=required, - type=str, + type=Path, ) @@ -742,6 +797,7 @@ def _parse_args(raw_args: List[str]) -> Tuple[Args, Checks]: STR.config: _add_subparser_config(subparsers), STR.fv3: _add_subparser_fv3(subparsers), STR.rocoto: _add_subparser_rocoto(subparsers), + STR.sfcclimogen: _add_subparser_sfc_climo_gen(subparsers), STR.template: _add_subparser_template(subparsers), } return vars(parser.parse_args(raw_args)), checks @@ -790,6 +846,7 @@ class STR: rocoto: str = "rocoto" run: str = "run" schemafile: str = "schema_file" + sfcclimogen: str = "sfc_climo_gen" suppfiles: str = "supplemental_files" task: str = "task" tasks: str = "tasks" diff --git a/src/uwtools/config/tools.py b/src/uwtools/config/tools.py index 48dd4dba7..07802e9c4 100644 --- a/src/uwtools/config/tools.py +++ b/src/uwtools/config/tools.py @@ -281,8 +281,6 @@ def _validate_format_supplemental( # Import-time code -# pylint: disable=duplicate-code - # The following statements dynamically interpolate values into functions' docstrings, which will not # work if the docstrings are inlined in the functions. They must remain separate statements to avoid # hardcoding values into them. diff --git a/src/uwtools/config/validator.py b/src/uwtools/config/validator.py index a7a77f597..6bc67d4af 100644 --- a/src/uwtools/config/validator.py +++ b/src/uwtools/config/validator.py @@ -7,9 +7,12 @@ from typing import List, Optional, Union import jsonschema +from referencing import Registry, Resource +from referencing.jsonschema import DRAFT202012 from uwtools.config.formats.yaml import YAMLConfig from uwtools.logging import log +from uwtools.utils.file import resource_path # Public functions @@ -18,7 +21,7 @@ def validate_yaml( schema_file: Path, config: Union[dict, YAMLConfig, Optional[Path]] = None ) -> bool: """ - Check whether the given config conforms to the given JSON Schema spec. + Report any errors arising from validation of the given config against the given JSON Schema. :param schema_file: The JSON Schema file to use for validation. :param config: The config to validate. @@ -27,12 +30,10 @@ def validate_yaml( with open(schema_file, "r", encoding="utf-8") as f: schema = json.load(f) cfgobj = _prep_config(config) - # Collect and report on schema-validation errors. errors = _validation_errors(cfgobj.data, schema) log_method = log.error if errors else log.info - log_method( - "%s UW schema-validation error%s found", len(errors), "" if len(errors) == 1 else "s" - ) + log_msg = "%s UW schema-validation error%s found" + log_method(log_msg, len(errors), "" if len(errors) == 1 else "s") for error in errors: for line in str(error).split("\n"): log.error(line) @@ -57,6 +58,19 @@ def _prep_config(config: Union[dict, YAMLConfig, Optional[Path]]) -> YAMLConfig: def _validation_errors(config: Union[dict, list], schema: dict) -> List[str]: """ Identify schema-validation errors. + + :param config: A config to validate. + :param schema: JSON Schema to validate the config against. + :return: Any validation errors. """ - validator = jsonschema.Draft202012Validator(schema) + + # See https://github.com/python-jsonschema/referencing/issues/61 about typing issues. + + def retrieve(uri: str) -> Resource: + name = uri.split(":")[-1] + with open(resource_path(f"jsonschema/{name}.jsonschema"), "r", encoding="utf-8") as f: + return Resource(contents=json.load(f), specification=DRAFT202012) # type: ignore + + registry = Registry(retrieve=retrieve) # type: ignore + validator = jsonschema.Draft202012Validator(schema, registry=registry) return list(validator.iter_errors(config)) diff --git a/src/uwtools/drivers/driver.py b/src/uwtools/drivers/driver.py index 1f77964dd..0367a90a2 100644 --- a/src/uwtools/drivers/driver.py +++ b/src/uwtools/drivers/driver.py @@ -13,6 +13,7 @@ from uwtools.exceptions import UWConfigError from uwtools.logging import log from uwtools.scheduler import JobScheduler +from uwtools.utils.file import resource_path class Driver(ABC): @@ -133,13 +134,16 @@ def _validate(self) -> None: Perform all necessary schema validation. """ - def _validate_one(self, schema_file: Path) -> None: + def _validate_one(self, schema_name: str) -> None: """ Validate the config. - :param schema_file: The schema file to validate the config against. + :param schema_name: Name of uwtools schema to validate the config against. :raises: UWConfigError if config fails validation. """ - log.info("Validating config per %s", schema_file) + + log.info("Validating config per schema %s", schema_name) + schema_file = resource_path("jsonschema") / f"{schema_name}.jsonschema" + log.debug("Using schema file: %s", schema_file) if not validator.validate_yaml(config=self._config, schema_file=schema_file): raise UWConfigError("YAML validation errors") diff --git a/src/uwtools/drivers/fv3.py b/src/uwtools/drivers/fv3.py index f37cd5a84..c06d10797 100644 --- a/src/uwtools/drivers/fv3.py +++ b/src/uwtools/drivers/fv3.py @@ -9,15 +9,15 @@ from shutil import copy from typing import Any, Dict -from iotaa import asset, dryrun, external, task, tasks +from iotaa import asset, dryrun, task, tasks from uwtools.config.formats.fieldtable import FieldTableConfig from uwtools.config.formats.nml import NMLConfig from uwtools.config.formats.yaml import YAMLConfig from uwtools.drivers.driver import Driver from uwtools.logging import log -from uwtools.utils.file import resource_pathobj from uwtools.utils.processing import execute +from uwtools.utils.tasks import filecopy, symlink class FV3(Driver): @@ -29,7 +29,7 @@ def __init__( self, config_file: Path, cycle: datetime, dry_run: bool = False, batch: bool = False ): """ - The FV3 driver. + The driver. :param config_file: Path to config file. :param cycle: The forecast cycle. @@ -48,9 +48,9 @@ def __init__( @tasks def boundary_files(self): """ - The FV3 lateral boundary-condition files. + Lateral boundary-condition files. """ - yield self._taskname("lateral boundary condition files") + yield self._taskname("lateral boundary-condition files") lbcs = self._driver_config["lateral_boundary_conditions"] offset = abs(lbcs["offset"]) endhour = self._driver_config["length"] + offset + 1 @@ -63,12 +63,12 @@ def boundary_files(self): self._rundir / "INPUT" / f"gfs_bndy.tile{n}.{(boundary_hour - offset):03d}.nc" ) symlinks[target] = linkname - yield [self._symlink(target=t, linkname=l) for t, l in symlinks.items()] + yield [symlink(target=t, linkname=l) for t, l in symlinks.items()] @task def diag_table(self): """ - The FV3 diag_table file. + The diag_table file. """ fn = "diag_table" yield self._taskname(fn) @@ -84,7 +84,7 @@ def diag_table(self): @task def field_table(self): """ - The FV3 field_table file. + The field_table file. """ fn = "field_table" yield self._taskname(fn) @@ -100,29 +100,29 @@ def field_table(self): @tasks def files_copied(self): """ - Files copied for FV3 run. + Files copied for run. """ yield self._taskname("files copied") yield [ - self._filecopy(src=Path(src), dst=self._rundir / dst) + filecopy(src=Path(src), dst=self._rundir / dst) for dst, src in self._driver_config.get("files_to_copy", {}).items() ] @tasks def files_linked(self): """ - Files linked for FV3 run. + Files linked for run. """ yield self._taskname("files linked") yield [ - self._symlink(target=Path(target), linkname=self._rundir / linkname) + symlink(target=Path(target), linkname=self._rundir / linkname) for linkname, target in self._driver_config.get("files_to_link", {}).items() ] @task def model_configure(self): """ - The FV3 model_configure file. + The model_configure file. """ fn = "model_configure" yield self._taskname(fn) @@ -138,7 +138,7 @@ def model_configure(self): @task def namelist_file(self): """ - The FV3 namelist file. + The namelist file. """ fn = "input.nml" yield self._taskname(fn) @@ -154,7 +154,7 @@ def namelist_file(self): @tasks def provisioned_run_directory(self): """ - The run directory provisioned with all required content. + Run directory provisioned with all required content. """ yield self._taskname("provisioned run directory") yield [ @@ -172,7 +172,7 @@ def provisioned_run_directory(self): @task def restart_directory(self): """ - The FV3 RESTART directory. + The RESTART directory. """ yield self._taskname("RESTART directory") path = self._rundir / "RESTART" @@ -183,7 +183,7 @@ def restart_directory(self): @tasks def run(self): """ - FV3 run execution. + A run. """ yield self._taskname("run") yield (self._run_via_batch_submission() if self._batch else self._run_via_local_execution()) @@ -191,10 +191,10 @@ def run(self): @task def runscript(self): """ - A runscript suitable for submission to the scheduler. + The runscript. """ - yield self._taskname("runscript") path = self._runscript_path + yield self._taskname(path.name) yield asset(path, path.is_file) yield None envvars = { @@ -217,33 +217,10 @@ def runscript(self): # Private workflow tasks - @external - def _file(self, path: Path): - """ - An existing file. - - :param path: Path to the file. - """ - yield "File %s" % path - yield asset(path, path.is_file) - - @task - def _filecopy(self, src: Path, dst: Path): - """ - A copy of an existing file. - - :param src: Path to the source file. - :param dst: Path to the destination file to create. - """ - yield "Copy %s -> %s" % (src, dst) - yield asset(dst, dst.is_file) - yield self._file(src) - copy(src, dst) - @task def _run_via_batch_submission(self): """ - FV3 run Execution via the batch system. + A run executed via the batch system. """ yield self._taskname("run via batch submission") path = Path("%s.submit" % self._runscript_path) @@ -254,7 +231,7 @@ def _run_via_batch_submission(self): @task def _run_via_local_execution(self): """ - FV3 run execution directly on the local system. + A run executed directly on the local system. """ yield self._taskname("run via local execution") path = self._rundir / "done" @@ -263,20 +240,6 @@ def _run_via_local_execution(self): cmd = "{x} >{x}.out 2>&1".format(x=self._runscript_path) execute(cmd=cmd, cwd=self._rundir, log_output=True) - @task - def _symlink(self, target: Path, linkname: Path): - """ - A symbolic link. - - :param target: The existing file or directory. - :param linkname: The symlink to create. - """ - yield "Link %s -> %s" % (linkname, target) - yield asset(linkname, linkname.exists) - yield self._file(target) - linkname.parent.mkdir(parents=True, exist_ok=True) - os.symlink(src=target, dst=linkname) - # Private helper methods @property @@ -290,7 +253,7 @@ def _driver_config(self) -> Dict[str, Any]: @property def _resources(self) -> Dict[str, Any]: """ - Returns configuration data for the FV3 runscript. + Returns configuration data for the runscript. """ return { "account": self._config["platform"]["account"], @@ -318,5 +281,5 @@ def _validate(self) -> None: """ Perform all necessary schema validation. """ - for schema_file in ("fv3.jsonschema", "platform.jsonschema"): - self._validate_one(resource_pathobj(schema_file)) + for schema_name in ("fv3", "platform"): + self._validate_one(schema_name=schema_name) diff --git a/src/uwtools/drivers/sfc_climo_gen.py b/src/uwtools/drivers/sfc_climo_gen.py new file mode 100644 index 000000000..91f302cad --- /dev/null +++ b/src/uwtools/drivers/sfc_climo_gen.py @@ -0,0 +1,162 @@ +""" +A driver for sfc_climo_gen. +""" + +import os +import stat +from pathlib import Path +from typing import Any, Dict + +from iotaa import asset, dryrun, task, tasks + +from uwtools.config.formats.nml import NMLConfig +from uwtools.drivers.driver import Driver +from uwtools.utils.processing import execute +from uwtools.utils.tasks import file + + +class SfcClimoGen(Driver): + """ + A driver for sfc_climo_gen. + """ + + def __init__(self, config_file: Path, dry_run: bool = False, batch: bool = False): + """ + The driver. + + :param config_file: Path to config file. + :param dry_run: Run in dry-run mode? + :param batch: Run component via the batch system? + """ + super().__init__(config_file=config_file, dry_run=dry_run, batch=batch) + if self._dry_run: + dryrun() + self._rundir = Path(self._driver_config["run_dir"]) + + # Workflow tasks + + @task + def namelist_file(self): + """ + The namelist file. + """ + fn = "fort.41" + yield self._taskname(f"namelist file {fn}") + path = self._rundir / fn + yield asset(path, path.is_file) + vals = self._driver_config["namelist"]["update_values"]["config"] + input_paths = [Path(v) for k, v in vals.items() if k.startswith("input_")] + input_paths += [Path(vals["mosaic_file_mdl"])] + input_paths += [Path(vals["orog_dir_mdl"]) / fn for fn in vals["orog_files_mdl"]] + yield [file(input_path) for input_path in input_paths] + self._create_user_updated_config( + config_class=NMLConfig, + config_values=self._driver_config.get("namelist", {}), + path=path, + ) + + @tasks + def provisioned_run_directory(self): + """ + Run directory provisioned with all required content. + """ + yield self._taskname("provisioned run directory") + yield [ + self.namelist_file(), + self.runscript(), + ] + + @tasks + def run(self): + """ + A run. + """ + yield self._taskname("run") + yield (self._run_via_batch_submission() if self._batch else self._run_via_local_execution()) + + @task + def runscript(self): + """ + The runscript. + """ + path = self._runscript_path + yield self._taskname(path.name) + yield asset(path, path.is_file) + yield None + envcmds = self._driver_config.get("execution", {}).get("envcmds", []) + execution = [self._runcmd, "test $? -eq 0 && touch %s/done" % self._rundir] + scheduler = self._scheduler if self._batch else None + path.parent.mkdir(parents=True, exist_ok=True) + rs = self._runscript(envcmds=envcmds, execution=execution, scheduler=scheduler) + with open(path, "w", encoding="utf-8") as f: + print(rs, file=f) + os.chmod(path, os.stat(path).st_mode | stat.S_IEXEC) + + # Private workflow tasks + + @task + def _run_via_batch_submission(self): + """ + A run executed via the batch system. + """ + yield self._taskname("run via batch submission") + path = Path("%s.submit" % self._runscript_path) + yield asset(path, path.is_file) + yield self.provisioned_run_directory() + self._scheduler.submit_job(runscript=self._runscript_path, submit_file=path) + + @task + def _run_via_local_execution(self): + """ + A run executed directly on the local system. + """ + yield self._taskname("run via local execution") + path = self._rundir / "done" + yield asset(path, path.is_file) + yield self.provisioned_run_directory() + cmd = "{x} >{x}.out 2>&1".format(x=self._runscript_path) + execute(cmd=cmd, cwd=self._rundir, log_output=True) + + # Private helper methods + + @property + def _driver_config(self) -> Dict[str, Any]: + """ + Returns the config block specific to this driver. + """ + driver_config: Dict[str, Any] = self._config["sfc_climo_gen"] + return driver_config + + @property + def _resources(self) -> Dict[str, Any]: + """ + Returns configuration data for the runscript. + """ + return { + "account": self._config["platform"]["account"], + "rundir": self._rundir, + "scheduler": self._config["platform"]["scheduler"], + **self._driver_config.get("execution", {}).get("batchargs", {}), + } + + @property + def _runscript_path(self) -> Path: + """ + Returns the path to the runscript. + """ + return self._rundir / "runscript" + + def _taskname(self, suffix: str) -> str: + """ + Returns a common tag for graph-task log messages. + + :param suffix: Log-string suffix. + """ + return "sfc_climo_gen %s" % suffix + + def _validate(self) -> None: + """ + Perform all necessary schema validation. + """ + for schema_name in ("sfc-climo-gen", "platform"): + self._validate_one(schema_name=schema_name) diff --git a/src/uwtools/resources/jsonschema/__init__.py b/src/uwtools/resources/jsonschema/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/uwtools/resources/jsonschema/execution.jsonschema b/src/uwtools/resources/jsonschema/execution.jsonschema new file mode 100644 index 000000000..5589543ff --- /dev/null +++ b/src/uwtools/resources/jsonschema/execution.jsonschema @@ -0,0 +1,34 @@ +{ + "additionalProperties": false, + "properties": { + "batchargs": { + "type": "object" + }, + "envcmds": { + "items": { + "type": "string" + }, + "type": "array" + }, + "executable": { + "type": "string" + }, + "mpiargs": { + "items": { + "type": "string" + }, + "type": "array" + }, + "mpicmd": { + "type": "string" + }, + "threads": { + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "executable" + ], + "type": "object" +} diff --git a/src/uwtools/resources/jsonschema/files-to-stage.jsonschema b/src/uwtools/resources/jsonschema/files-to-stage.jsonschema new file mode 100644 index 000000000..7c83c61f3 --- /dev/null +++ b/src/uwtools/resources/jsonschema/files-to-stage.jsonschema @@ -0,0 +1,7 @@ +{ + "additionalProperties": { + "type": "string" + }, + "minProperties": 1, + "type": "object" +} diff --git a/src/uwtools/resources/fv3.jsonschema b/src/uwtools/resources/jsonschema/fv3.jsonschema similarity index 74% rename from src/uwtools/resources/fv3.jsonschema rename to src/uwtools/resources/jsonschema/fv3.jsonschema index 51a88af04..fd6a5762b 100644 --- a/src/uwtools/resources/fv3.jsonschema +++ b/src/uwtools/resources/jsonschema/fv3.jsonschema @@ -1,15 +1,4 @@ { - "$defs": { - "filesToStage": { - "minProperties": 1, - "patternProperties": { - "^.*$": { - "type": "string" - } - }, - "type": "object" - } - }, "properties": { "fv3": { "additionalProperties": false, @@ -25,38 +14,7 @@ "type": "string" }, "execution": { - "additionalProperties": false, - "properties": { - "batchargs": { - "type": "object" - }, - "envcmds": { - "items": { - "type": "string" - }, - "type": "array" - }, - "executable": { - "type": "string" - }, - "mpiargs": { - "items": { - "type": "string" - }, - "type": "array" - }, - "mpicmd": { - "type": "string" - }, - "threads": { - "minimum": 0, - "type": "integer" - } - }, - "required": [ - "executable" - ], - "type": "object" + "$ref": "urn:uwtools:execution" }, "field_table": { "additionalProperties": false, @@ -141,10 +99,10 @@ "type": "object" }, "files_to_copy": { - "$ref": "#/$defs/filesToStage" + "$ref": "urn:uwtools:files-to-stage" }, "files_to_link": { - "$ref": "#/$defs/filesToStage" + "$ref": "urn:uwtools:files-to-stage" }, "lateral_boundary_conditions": { "additionalProperties": false, @@ -225,26 +183,7 @@ "type": "string" }, "update_values": { - "minProperties": 1, - "patternProperties": { - "^.*$": { - "minProperties": 1, - "patternProperties": { - "^.*$": { - "minProperties": 1, - "type": [ - "array", - "boolean", - "number", - "string" - ] - }, - "type": "object" - }, - "type": "object" - } - }, - "type": "object" + "$ref": "urn:uwtools:namelist" } }, "type": "object" @@ -261,9 +200,6 @@ "run_dir" ], "type": "object" - }, - "user": { - "type": "object" } }, "type": "object" diff --git a/src/uwtools/resources/jsonschema/namelist.jsonschema b/src/uwtools/resources/jsonschema/namelist.jsonschema new file mode 100644 index 000000000..0cea81894 --- /dev/null +++ b/src/uwtools/resources/jsonschema/namelist.jsonschema @@ -0,0 +1,16 @@ +{ + "additionalProperties": { + "additionalProperties": { + "type": [ + "array", + "boolean", + "number", + "string" + ] + }, + "minProperties": 1, + "type": "object" + }, + "minProperties": 1, + "type": "object" +} diff --git a/src/uwtools/resources/platform.jsonschema b/src/uwtools/resources/jsonschema/platform.jsonschema similarity index 100% rename from src/uwtools/resources/platform.jsonschema rename to src/uwtools/resources/jsonschema/platform.jsonschema diff --git a/src/uwtools/resources/rocoto.jsonschema b/src/uwtools/resources/jsonschema/rocoto.jsonschema similarity index 100% rename from src/uwtools/resources/rocoto.jsonschema rename to src/uwtools/resources/jsonschema/rocoto.jsonschema diff --git a/src/uwtools/resources/jsonschema/sfc-climo-gen.jsonschema b/src/uwtools/resources/jsonschema/sfc-climo-gen.jsonschema new file mode 100644 index 000000000..c5ed23199 --- /dev/null +++ b/src/uwtools/resources/jsonschema/sfc-climo-gen.jsonschema @@ -0,0 +1,137 @@ +{ + "properties": { + "sfc_climo_gen": { + "additionalProperties": false, + "properties": { + "execution": { + "$ref": "urn:uwtools:execution" + }, + "namelist": { + "additionalProperties": false, + "anyOf": [ + { + "required": [ + "base_file" + ] + }, + { + "required": [ + "update_values" + ] + } + ], + "properties": { + "base_file": { + "type": "string" + }, + "update_values": { + "properties": { + "config": { + "additionalProperties": { + "type": [ + "array", + "boolean", + "number", + "string" + ] + }, + "properties": { + "fract_vegsoil_type": { + "type": "boolean" + }, + "halo": { + "type": "integer" + }, + "input_facsf_file": { + "type": "string" + }, + "input_leaf_area_index_file": { + "type": "string" + }, + "input_maximum_snow_albedo_file": { + "type": "string" + }, + "input_slope_type_file": { + "type": "string" + }, + "input_snowfree_albedo_file": { + "type": "string" + }, + "input_soil_color_file": { + "type": "string" + }, + "input_soil_type_file": { + "type": "string" + }, + "input_substrate_temperature_file": { + "type": "string" + }, + "input_vegetation_greenness_file": { + "type": "string" + }, + "input_vegetation_type_file": { + "type": "string" + }, + "leaf_area_index_method": { + "enum": [ + "bilinear", + "conservative" + ] + }, + "maximum_snow_albedo_method": { + "enum": [ + "bilinear", + "conservative" + ] + }, + "mosaic_file_mdl": { + "type": "string" + }, + "orog_dir_mdl": { + "type": "string" + }, + "orog_files_mdl": { + "items": { + "type": "string" + }, + "maxContains": 6, + "minContains": 1, + "type": "array" + }, + "snowfree_albedo_method": { + "enum": [ + "bilinear", + "conservative" + ] + }, + "vegetation_greenness_method": { + "enum": [ + "bilinear", + "conservative" + ] + } + }, + "type": "object" + } + }, + "required": [ + "config" + ], + "type": "object" + } + }, + "type": "object" + }, + "run_dir": { + "type": "string" + } + }, + "required": [ + "execution", + "run_dir" + ], + "type": "object" + } + }, + "type": "object" +} diff --git a/src/uwtools/resources/workflow.jsonschema b/src/uwtools/resources/jsonschema/workflow.jsonschema similarity index 100% rename from src/uwtools/resources/workflow.jsonschema rename to src/uwtools/resources/jsonschema/workflow.jsonschema diff --git a/src/uwtools/resources/rocoto/__init__.py b/src/uwtools/resources/rocoto/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/uwtools/resources/schema_with_metatasks.rng b/src/uwtools/resources/rocoto/schema_with_metatasks.rng similarity index 100% rename from src/uwtools/resources/schema_with_metatasks.rng rename to src/uwtools/resources/rocoto/schema_with_metatasks.rng diff --git a/src/uwtools/rocoto.py b/src/uwtools/rocoto.py index 89f6ecdbe..8796b0b46 100644 --- a/src/uwtools/rocoto.py +++ b/src/uwtools/rocoto.py @@ -15,7 +15,7 @@ from uwtools.config.validator import validate_yaml from uwtools.exceptions import UWConfigError, UWError from uwtools.logging import log -from uwtools.utils.file import readable, resource_pathobj, writable +from uwtools.utils.file import readable, resource_path, writable def realize_rocoto_xml( @@ -57,7 +57,7 @@ def validate_rocoto_xml_string(xml: str) -> bool: :return: Did the XML conform to the schema? """ tree = etree.fromstring(xml.encode("utf-8")) - with open(resource_pathobj("schema_with_metatasks.rng"), "r", encoding="utf-8") as f: + with open(resource_path("rocoto/schema_with_metatasks.rng"), "r", encoding="utf-8") as f: schema = etree.RelaxNG(etree.parse(f)) valid: bool = schema.validate(tree) nerr = len(schema.error_log) @@ -76,7 +76,7 @@ def validate_rocoto_xml_string(xml: str) -> bool: class _RocotoXML: """ - Generate a Rocoto XML document from a UW YAML config. + Generate a Rocoto XML document from a YAML config. """ def __init__(self, config: Union[dict, YAMLConfig, Optional[Path]] = None) -> None: @@ -346,7 +346,7 @@ def _config_validate(self, config: Union[dict, YAMLConfig, Optional[Path]]) -> N :param config: YAMLConfig object or path to YAML file (None => read stdin). :raises: UWConfigError if config fails validation. """ - schema_file = resource_pathobj("rocoto.jsonschema") + schema_file = resource_path("jsonschema/rocoto.jsonschema") ok = validate_yaml(schema_file=schema_file, config=config) if not ok: raise UWConfigError("YAML validation errors") diff --git a/src/uwtools/tests/api/test_fv3.py b/src/uwtools/tests/api/test_fv3.py index d89c80919..55b4df52b 100644 --- a/src/uwtools/tests/api/test_fv3.py +++ b/src/uwtools/tests/api/test_fv3.py @@ -1,4 +1,4 @@ -# pylint: disable=missing-function-docstring,protected-access +# pylint: disable=missing-function-docstring import datetime as dt from unittest.mock import patch @@ -8,21 +8,6 @@ from uwtools.api import fv3 -@external -def t1(): - "@external t1" - - -@task -def t2(): - "@task t2" - - -@tasks -def t3(): - "@tasks t3" - - def test_execute(): args: dict = { "config_file": "config.yaml", @@ -37,6 +22,18 @@ def test_execute(): def test_tasks(): + @external + def t1(): + "@external t1" + + @task + def t2(): + "@task t2" + + @tasks + def t3(): + "@tasks t3" + with patch.object(fv3, "FV3") as FV3: FV3.t1 = t1 FV3.t2 = t2 diff --git a/src/uwtools/tests/api/test_sfc_climo_gen.py b/src/uwtools/tests/api/test_sfc_climo_gen.py new file mode 100644 index 000000000..7cbc5342c --- /dev/null +++ b/src/uwtools/tests/api/test_sfc_climo_gen.py @@ -0,0 +1,39 @@ +# pylint: disable=missing-function-docstring + +from unittest.mock import patch + +from iotaa import external, task, tasks + +from uwtools.api import sfc_climo_gen + + +def test_execute(): + args: dict = { + "config_file": "config.yaml", + "batch": False, + "dry_run": True, + } + with patch.object(sfc_climo_gen, "SfcClimoGen") as SfcClimoGen: + assert sfc_climo_gen.execute(**args, task="foo") is True + SfcClimoGen.assert_called_once_with(**args) + SfcClimoGen().foo.assert_called_once_with() + + +def test_tasks(): + @external + def t1(): + "@external t1" + + @task + def t2(): + "@task t2" + + @tasks + def t3(): + "@tasks t3" + + with patch.object(sfc_climo_gen, "SfcClimoGen") as SfcClimoGen: + SfcClimoGen.t1 = t1 + SfcClimoGen.t2 = t2 + SfcClimoGen.t3 = t3 + assert sfc_climo_gen.tasks() == {"t2": "@task t2", "t3": "@tasks t3", "t1": "@external t1"} diff --git a/src/uwtools/tests/config/formats/test_nml.py b/src/uwtools/tests/config/formats/test_nml.py index e15f6f7e4..ee44fb718 100644 --- a/src/uwtools/tests/config/formats/test_nml.py +++ b/src/uwtools/tests/config/formats/test_nml.py @@ -1,4 +1,4 @@ -# pylint: disable=duplicate-code,missing-function-docstring,redefined-outer-name +# pylint: disable=missing-function-docstring,redefined-outer-name """ Tests for uwtools.config.formats.nml module. """ diff --git a/src/uwtools/tests/config/test_validator.py b/src/uwtools/tests/config/test_validator.py index 4ba496b5a..6e6890bf2 100644 --- a/src/uwtools/tests/config/test_validator.py +++ b/src/uwtools/tests/config/test_validator.py @@ -13,7 +13,7 @@ from uwtools.config import validator from uwtools.config.formats.yaml import YAMLConfig from uwtools.logging import log -from uwtools.utils.file import resource_pathobj +from uwtools.utils.file import resource_path # Fixtures @@ -46,7 +46,7 @@ def prep_config_dict(): @fixture def rocoto_assets(): - schema_file = resource_pathobj("rocoto.jsonschema") + schema_file = resource_path("jsonschema/rocoto.jsonschema") kwargs = {"schema_file": schema_file, "config_file": "/not/used"} config = { "workflow": { diff --git a/src/uwtools/tests/drivers/test_driver.py b/src/uwtools/tests/drivers/test_driver.py index 637595edd..27eefea2f 100644 --- a/src/uwtools/tests/drivers/test_driver.py +++ b/src/uwtools/tests/drivers/test_driver.py @@ -154,11 +154,13 @@ def test_Driver__scheduler(driver_good): JobScheduler.get_scheduler.assert_called_with(driver_good._resources) -def test_driver__validate_one_no(driver_bad, schema): - with raises(UWConfigError) as e: - driver_bad._validate_one(schema) +def test_Driver__validate_one_no(driver_bad, schema): + with patch.object(driver, "resource_path", return_value=schema.parent): + with raises(UWConfigError) as e: + driver_bad._validate_one(schema.stem) assert str(e.value) == "YAML validation errors" def test_Driver__validate_one_ok(driver_good, schema): - driver_good._validate_one(schema) + with patch.object(driver, "resource_path", return_value=schema.parent): + driver_good._validate_one(schema.stem) diff --git a/src/uwtools/tests/drivers/test_fv3.py b/src/uwtools/tests/drivers/test_fv3.py index eefc90b4d..9a0da5d07 100644 --- a/src/uwtools/tests/drivers/test_fv3.py +++ b/src/uwtools/tests/drivers/test_fv3.py @@ -3,7 +3,6 @@ FV3 driver tests. """ import datetime as dt -from functools import partial from pathlib import Path from unittest.mock import DEFAULT as D from unittest.mock import PropertyMock, patch @@ -13,7 +12,7 @@ from pytest import fixture from uwtools.drivers import fv3 -from uwtools.tests.support import logged, validator, with_del, with_set +from uwtools.tests.support import logged # Fixtures @@ -45,64 +44,64 @@ def config(tmp_path): @fixture def config_file(config, tmp_path): - path = tmp_path / "fv3.yaml" + path = tmp_path / "config.yaml" with open(path, "w", encoding="utf-8") as f: yaml.dump(config, f) return path @fixture -def fv3obj(config_file, cycle): +def driverobj(config_file, cycle): return fv3.FV3(config_file=config_file, cycle=cycle, batch=True) # Driver tests -def test_FV3(fv3obj): - assert isinstance(fv3obj, fv3.FV3) +def test_FV3(driverobj): + assert isinstance(driverobj, fv3.FV3) def test_FV3_dry_run(config_file, cycle): with patch.object(fv3, "dryrun") as dryrun: - fv3obj = fv3.FV3(config_file=config_file, cycle=cycle, batch=True, dry_run=True) - assert fv3obj._dry_run is True + driverobj = fv3.FV3(config_file=config_file, cycle=cycle, batch=True, dry_run=True) + assert driverobj._dry_run is True dryrun.assert_called_once_with() -def test_FV3_boundary_files(fv3obj): +def test_FV3_boundary_files(driverobj): ns = (0, 1) - links = [fv3obj._rundir / "INPUT" / f"gfs_bndy.tile7.{n:03d}.nc" for n in ns] + links = [driverobj._rundir / "INPUT" / f"gfs_bndy.tile7.{n:03d}.nc" for n in ns] assert not any(link.is_file() for link in links) for n in ns: - (fv3obj._rundir / f"f{n}").touch() - fv3obj.boundary_files() + (driverobj._rundir / f"f{n}").touch() + driverobj.boundary_files() assert all(link.is_symlink() for link in links) -def test_FV3_diag_table(fv3obj): - src = fv3obj._rundir / "diag_table.in" +def test_FV3_diag_table(driverobj): + src = driverobj._rundir / "diag_table.in" src.touch() - fv3obj._driver_config["diag_table"] = src - dst = fv3obj._rundir / "diag_table" + driverobj._driver_config["diag_table"] = src + dst = driverobj._rundir / "diag_table" assert not dst.is_file() - fv3obj.diag_table() + driverobj.diag_table() assert dst.is_file() -def test_FV3_diag_table_warn(caplog, fv3obj): - fv3obj.diag_table() +def test_FV3_diag_table_warn(caplog, driverobj): + driverobj.diag_table() assert logged(caplog, "No 'diag_table' defined in config") -def test_FV3_field_table(fv3obj): - src = fv3obj._rundir / "field_table.in" +def test_FV3_field_table(driverobj): + src = driverobj._rundir / "field_table.in" with open(src, "w", encoding="utf-8") as f: yaml.dump({}, f) - dst = fv3obj._rundir / "field_table" + dst = driverobj._rundir / "field_table" assert not dst.is_file() - fv3obj._driver_config["field_table"] = {"base_file": src} - fv3obj.field_table() + driverobj._driver_config["field_table"] = {"base_file": src} + driverobj.field_table() assert dst.is_file() @@ -115,44 +114,44 @@ def test_FV3_files_copied(config, cycle, key, task, test, tmp_path): atm_cfg_dst, sfc_cfg_dst = [x % "{{ cycle.strftime('%H') }}" for x in [atm, sfc]] atm_cfg_src, sfc_cfg_src = [str(tmp_path / (x + ".in")) for x in [atm_cfg_dst, sfc_cfg_dst]] config["fv3"].update({key: {atm_cfg_dst: atm_cfg_src, sfc_cfg_dst: sfc_cfg_src}}) - path = tmp_path / "fv3.yaml" + path = tmp_path / "config.yaml" with open(path, "w", encoding="utf-8") as f: yaml.dump(config, f) - fv3obj = fv3.FV3(config_file=path, cycle=cycle, batch=True) + driverobj = fv3.FV3(config_file=path, cycle=cycle, batch=True) atm_dst, sfc_dst = [tmp_path / (x % cycle.strftime("%H")) for x in [atm, sfc]] assert not any(dst.is_file() for dst in [atm_dst, sfc_dst]) atm_src, sfc_src = [Path(str(x) + ".in") for x in [atm_dst, sfc_dst]] for src in (atm_src, sfc_src): src.touch() - getattr(fv3obj, task)() + getattr(driverobj, task)() assert all(getattr(dst, test)() for dst in [atm_dst, sfc_dst]) -def test_FV3_model_configure(fv3obj): - src = fv3obj._rundir / "model_configure.in" +def test_FV3_model_configure(driverobj): + src = driverobj._rundir / "model_configure.in" with open(src, "w", encoding="utf-8") as f: yaml.dump({}, f) - dst = fv3obj._rundir / "model_configure" + dst = driverobj._rundir / "model_configure" assert not dst.is_file() - fv3obj._driver_config["model_configure"] = {"base_file": src} - fv3obj.model_configure() + driverobj._driver_config["model_configure"] = {"base_file": src} + driverobj.model_configure() assert dst.is_file() -def test_FV3_namelist_file(fv3obj): - src = fv3obj._rundir / "input.nml.in" +def test_FV3_namelist_file(driverobj): + src = driverobj._rundir / "input.nml.in" with open(src, "w", encoding="utf-8") as f: yaml.dump({}, f) - dst = fv3obj._rundir / "input.nml" + dst = driverobj._rundir / "input.nml" assert not dst.is_file() - fv3obj._driver_config["namelist_file"] = {"base_file": src} - fv3obj.namelist_file() + driverobj._driver_config["namelist_file"] = {"base_file": src} + driverobj.namelist_file() assert dst.is_file() -def test_FV3_provisioned_run_directory(fv3obj): +def test_FV3_provisioned_run_directory(driverobj): with patch.multiple( - fv3obj, + driverobj, boundary_files=D, diag_table=D, field_table=D, @@ -163,35 +162,35 @@ def test_FV3_provisioned_run_directory(fv3obj): restart_directory=D, runscript=D, ) as mocks: - fv3obj.provisioned_run_directory() + driverobj.provisioned_run_directory() for m in mocks: mocks[m].assert_called_once_with() -def test_FV3_restart_directory(fv3obj): - path = fv3obj._rundir / "RESTART" +def test_FV3_restart_directory(driverobj): + path = driverobj._rundir / "RESTART" assert not path.is_dir() - fv3obj.restart_directory() + driverobj.restart_directory() assert path.is_dir() -def test_FV3_run_batch(fv3obj): - with patch.object(fv3obj, "_run_via_batch_submission") as func: - fv3obj.run() +def test_FV3_run_batch(driverobj): + with patch.object(driverobj, "_run_via_batch_submission") as func: + driverobj.run() func.assert_called_once_with() -def test_FV3_run_local(fv3obj): - fv3obj._batch = False - with patch.object(fv3obj, "_run_via_local_execution") as func: - fv3obj.run() +def test_FV3_run_local(driverobj): + driverobj._batch = False + with patch.object(driverobj, "_run_via_local_execution") as func: + driverobj.run() func.assert_called_once_with() -def test_FV3_runscript(fv3obj): - dst = fv3obj._rundir / "runscript" +def test_FV3_runscript(driverobj): + dst = driverobj._rundir / "runscript" assert not dst.is_file() - fv3obj._driver_config["execution"].update( + driverobj._driver_config["execution"].update( { "batchargs": {"walltime": "01:10:00"}, "envcmds": ["cmd1", "cmd2"], @@ -199,8 +198,8 @@ def test_FV3_runscript(fv3obj): "threads": 8, } ) - fv3obj._config["platform"] = {"account": "me", "scheduler": "slurm"} - fv3obj.runscript() + driverobj._config["platform"] = {"account": "me", "scheduler": "slurm"} + driverobj.runscript() with open(dst, "r", encoding="utf-8") as f: lines = f.read().split("\n") # Check directives: @@ -217,371 +216,57 @@ def test_FV3_runscript(fv3obj): assert "cmd2" in lines # Check execution: assert "runit fv3" in lines - assert "test $? -eq 0 && touch %s/done" % fv3obj._rundir + assert "test $? -eq 0 && touch %s/done" % driverobj._rundir -def test_FV3__run_via_batch_submission(fv3obj): - runscript = fv3obj._runscript_path - with patch.object(fv3obj, "provisioned_run_directory") as prd: +def test_FV3__run_via_batch_submission(driverobj): + runscript = driverobj._runscript_path + with patch.object(driverobj, "provisioned_run_directory") as prd: with patch.object(fv3.FV3, "_scheduler", new_callable=PropertyMock) as scheduler: - fv3obj._run_via_batch_submission() + driverobj._run_via_batch_submission() scheduler().submit_job.assert_called_once_with( runscript=runscript, submit_file=Path(f"{runscript}.submit") ) prd.assert_called_once_with() -def test_FV3__run_via_local_execution(fv3obj): - with patch.object(fv3obj, "provisioned_run_directory") as prd: +def test_FV3__run_via_local_execution(driverobj): + with patch.object(driverobj, "provisioned_run_directory") as prd: with patch.object(fv3, "execute") as execute: - fv3obj._run_via_local_execution() + driverobj._run_via_local_execution() execute.assert_called_once_with( - cmd="{x} >{x}.out 2>&1".format(x=fv3obj._runscript_path), - cwd=fv3obj._rundir, + cmd="{x} >{x}.out 2>&1".format(x=driverobj._runscript_path), + cwd=driverobj._rundir, log_output=True, ) prd.assert_called_once_with() -def test_FV3__driver_config(fv3obj): - assert fv3obj._driver_config == fv3obj._config["fv3"] +def test_FV3__driver_config(driverobj): + assert driverobj._driver_config == driverobj._config["fv3"] -def test_FV3__resources(fv3obj): +def test_FV3__resources(driverobj): account = "me" scheduler = "slurm" walltime = "01:10:00" - fv3obj._driver_config["execution"].update({"batchargs": {"walltime": walltime}}) - fv3obj._config["platform"] = {"account": account, "scheduler": scheduler} - assert fv3obj._resources == { + driverobj._driver_config["execution"].update({"batchargs": {"walltime": walltime}}) + driverobj._config["platform"] = {"account": account, "scheduler": scheduler} + assert driverobj._resources == { "account": account, - "rundir": fv3obj._rundir, + "rundir": driverobj._rundir, "scheduler": scheduler, "walltime": walltime, } -def test_FV3__runscript_path(fv3obj): - assert fv3obj._runscript_path == fv3obj._rundir / "runscript" +def test_FV3__runscript_path(driverobj): + assert driverobj._runscript_path == driverobj._rundir / "runscript" -def test_FV3__taskanme(fv3obj): - assert fv3obj._taskname("foo") == "20240201 18Z FV3 foo" +def test_FV3__taskanme(driverobj): + assert driverobj._taskname("foo") == "20240201 18Z FV3 foo" -def test_FV3__validate(fv3obj): - fv3obj._validate() - - -# Schema fixtures - - -@fixture -def field_table_vals(): - return ( - { - "foo": { - "longname": "foofoo", - "profile_type": {"name": "fixed", "surface_value": 1}, - "units": "cubits", - } - }, - { - "bar": { - "longname": "barbar", - "profile_type": {"name": "profile", "surface_value": 2, "top_value": 3}, - "units": "rods", - } - }, - ) - - -@fixture -def fcstprop(): - return partial(validator, "fv3.jsonschema", "properties", "fv3", "properties") - - -# Schema tests - - -def test_fv3_schema_filesToStage(): - errors = validator("fv3.jsonschema", "$defs", "filesToStage") - # The input must be an dict: - assert "is not of type 'object'" in errors([]) - # A str -> str dict is ok: - assert not errors({"file1": "/path/to/file1", "file2": "/path/to/file2"}) - # An empty dict is not allowed: - assert "does not have enough properties" in errors({}) - # Non-string values are not allowed: - assert "True is not of type 'string'" in errors({"file1": True}) - - -def test_fv3_schema_forecast(): - d = { - "domain": "regional", - "execution": {"executable": "fv3"}, - "lateral_boundary_conditions": {"interval_hours": 1, "offset": 0, "path": "/tmp/file"}, - "length": 3, - "run_dir": "/tmp", - } - errors = validator("fv3.jsonschema", "properties", "fv3") - # Basic correctness: - assert not errors(d) - # Some top-level keys are required: - for key in ("domain", "execution", "lateral_boundary_conditions", "length", "run_dir"): - assert f"'{key}' is a required property" in errors(with_del(d, key)) - # Some top-level keys are optional: - assert not errors( - { - **d, - "diag_table": "/path", - "field_table": {"base_file": "/path"}, - "files_to_copy": {"fn": "/path"}, - "files_to_link": {"fn": "/path"}, - "model_configure": {"base_file": "/path"}, - "namelist": {"base_file": "/path"}, - } - ) - # Additional top-level keys are not allowed: - assert "Additional properties are not allowed" in errors({**d, "foo": "bar"}) - - -def test_fv3_schema_forecast_diag_table(fcstprop): - errors = fcstprop("diag_table") - # String value is ok: - assert not errors("/path/to/file") - # Anything else is not: - assert "88 is not of type 'string'" in errors(88) - - -def test_fv3_schema_forecast_domain(fcstprop): - errors = fcstprop("domain") - # There is a fixed set of domain values: - assert "'foo' is not one of ['global', 'regional']" in errors("foo") - - -def test_fv3_schema_forecast_execution(fcstprop): - d = {"executable": "fv3"} - batchargs = {"batchargs": {"queue": "string", "walltime": "string"}} - mpiargs = {"mpiargs": ["--flag1", "--flag2"]} - threads = {"threads": 32} - errors = fcstprop("execution") - # Basic correctness: - assert not errors(d) - # batchargs may optionally be specified: - assert not errors({**d, **batchargs}) - # mpiargs may be optionally specified: - assert not errors({**d, **mpiargs}) - # threads may optionally be specified: - assert not errors({**d, **threads}) - # All properties are ok: - assert not errors({**d, **batchargs, **mpiargs, **threads}) - # Additional properties are not allowed: - assert "Additional properties are not allowed" in errors( - {**d, **mpiargs, **threads, "foo": "bar"} - ) - - -def test_fv3_schema_forecast_execution_batchargs(fcstprop): - errors = fcstprop("execution", "properties", "batchargs") - # Basic correctness, empty map is ok: - assert not errors({}) - # Managed properties are fine: - assert not errors({"queue": "string", "walltime": "string"}) - # But so are unknown ones: - assert not errors({"--foo": 88}) - # It just has to be a map: - assert "[] is not of type 'object'" in errors([]) - - -def test_fv3_schema_forecast_execution_executable(fcstprop): - errors = fcstprop("execution", "properties", "executable") - # String value is ok: - assert not errors("fv3.exe") - # Anything else is not: - assert "88 is not of type 'string'" in errors(88) - - -def test_fv3_schema_forecast_execution_mpiargs(fcstprop): - errors = fcstprop("execution", "properties", "mpiargs") - # Basic correctness: - assert not errors(["string1", "string2"]) - # mpiargs may be empty: - assert not errors([]) - # String values are expected: - assert "88 is not of type 'string'" in errors(["string1", 88]) - - -def test_fv3_schema_forecast_execution_threads(fcstprop): - errors = fcstprop("execution", "properties", "threads") - # threads must be non-negative, and an integer: - assert not errors(0) - assert not errors(4) - assert "-1 is less than the minimum of 0" in errors(-1) - assert "3.14 is not of type 'integer'" in errors(3.14) - - -def test_fv3_schema_forecast_field_table(fcstprop, field_table_vals): - val, _ = field_table_vals - base_file = {"base_file": "/some/path"} - update_values = {"update_values": val} - errors = fcstprop("field_table") - # Just base_file is ok: - assert not errors(base_file) - # Just update_values is ok: - assert not errors(update_values) - # A combination of base_file and update_values is ok: - assert not errors({**base_file, **update_values}) - # At least one is required: - assert "is not valid" in errors({}) - - -def test_fv3_schema_forecast_field_table_update_values(fcstprop, field_table_vals): - val1, val2 = field_table_vals - errors = fcstprop("field_table", "properties", "update_values") - # A "fixed" profile-type entry is ok: - assert not errors(val1) - # A "profile" profile-type entry is ok: - assert not errors(val2) - # A combination of two valid entries is ok: - assert not errors({**val1, **val2}) - # At least one entry is required: - assert "does not have enough properties" in errors({}) - # longname is required: - assert "'longname' is a required property" in errors(with_del(val1, "foo", "longname")) - # longname must be a string: - assert "88 is not of type 'string'" in errors(with_set(val1, 88, "foo", "longname")) - # units is required: - assert "'units' is a required property" in errors(with_del(val1, "foo", "units")) - # units must be a string: - assert "88 is not of type 'string'" in errors(with_set(val1, 88, "foo", "units")) - # profile_type is required: - assert "'profile_type' is a required property" in errors(with_del(val1, "foo", "profile_type")) - # profile_type name has to be "fixed" or "profile": - assert "'bogus' is not one of ['fixed', 'profile']" in errors( - with_set(val1, "bogus", "foo", "profile_type", "name") - ) - # surface_value is required: - assert "'surface_value' is a required property" in errors( - with_del(val1, "foo", "profile_type", "surface_value") - ) - # surface_value is numeric: - assert "'a string' is not of type 'number'" in errors( - with_set(val1, "a string", "foo", "profile_type", "surface_value") - ) - # top_value is required if name is "profile": - assert "'top_value' is a required property" in errors( - with_del(val2, "bar", "profile_type", "top_value") - ) - # top_value is numeric: - assert "'a string' is not of type 'number'" in errors( - with_set(val2, "a string", "bar", "profile_type", "top_value") - ) - - -def test_fv3_schema_forecast_files_to_copy(): - test_fv3_schema_filesToStage() - - -def test_fv3_schema_forecast_files_to_link(): - test_fv3_schema_filesToStage() - - -def test_fv3_schema_forecast_lateral_boundary_conditions(fcstprop): - d = { - "interval_hours": 1, - "offset": 0, - "path": "/some/path", - } - errors = fcstprop("lateral_boundary_conditions") - # Basic correctness: - assert not errors(d) - # All lateral_boundary_conditions items are required: - assert "'interval_hours' is a required property" in errors(with_del(d, "interval_hours")) - assert "'offset' is a required property" in errors(with_del(d, "offset")) - assert "'path' is a required property" in errors(with_del(d, "path")) - # interval_hours must be an integer of at least 1: - assert "0 is less than the minimum of 1" in errors(with_set(d, 0, "interval_hours")) - assert "'s' is not of type 'integer'" in errors(with_set(d, "s", "interval_hours")) - # offset must be an integer of at least 0: - assert "-1 is less than the minimum of 0" in errors(with_set(d, -1, "offset")) - assert "'s' is not of type 'integer'" in errors(with_set(d, "s", "offset")) - # path must be a string: - assert "88 is not of type 'string'" in errors(with_set(d, 88, "path")) - - -def test_fv3_schema_forecast_length(fcstprop): - errors = fcstprop("length") - # Positive int is ok: - assert not errors(6) - # Zero is not ok: - assert "0 is less than the minimum of 1" in errors(0) - # A negative number is not ok: - assert "-1 is less than the minimum of 1" in errors(-1) - # Something other than an int is not ok: - assert "'a string' is not of type 'integer'" in errors("a string") - - -def test_fv3_schema_forecast_model_configure(fcstprop): - base_file = {"base_file": "/some/path"} - update_values = {"update_values": {"foo": 88}} - errors = fcstprop("model_configure") - # Just base_file is ok: - assert not errors(base_file) - # But base_file must be a string: - assert "88 is not of type 'string'" in errors({"base_file": 88}) - # Just update_values is ok: - assert not errors(update_values) - # A combination of base_file and update_values is ok: - assert not errors({**base_file, **update_values}) - # At least one is required: - assert "is not valid" in errors({}) - - -def test_fv3_schema_forecast_model_configure_update_values(fcstprop): - errors = fcstprop("model_configure", "properties", "update_values") - # boolean, number, and string values are ok: - assert not errors({"bool": True, "int": 88, "float": 3.14, "string": "foo"}) - # Other types are not, e.g.: - assert "None is not of type 'boolean', 'number', 'string'" in errors({"null": None}) - # At least one entry is required: - assert "does not have enough properties" in errors({}) - - -def test_fv3_schema_forecast_namelist(fcstprop): - base_file = {"base_file": "/some/path"} - update_values = {"update_values": {"nml": {"var": "val"}}} - errors = fcstprop("namelist") - # Just base_file is ok: - assert not errors(base_file) - # base_file must be a string: - assert "88 is not of type 'string'" in errors({"base_file": 88}) - # Just update_values is ok: - assert not errors(update_values) - # A combination of base_file and update_values is ok: - assert not errors({**base_file, **update_values}) - # At least one is required: - assert "is not valid" in errors({}) - - -def test_fv3_schema_forecast_namelist_update_values(fcstprop): - errors = fcstprop("namelist", "properties", "update_values") - # array, boolean, number, and string values are ok: - assert not errors( - {"nml": {"array": [1, 2, 3], "bool": True, "int": 88, "float": 3.14, "string": "foo"}} - ) - # Other types are not, e.g.: - assert "None is not of type 'array', 'boolean', 'number', 'string'" in errors( - {"nml": {"null": None}} - ) - # At least one namelist entry is required: - assert "does not have enough properties" in errors({}) - # At least one val/var pair ir required: - assert "does not have enough properties" in errors({"nml": {}}) - - -def test_fv3_schema_forecast_run_dir(fcstprop): - errors = fcstprop("run_dir") - # Must be a string: - assert not errors("/some/path") - assert "88 is not of type 'string'" in errors(88) +def test_FV3__validate(driverobj): + driverobj._validate() diff --git a/src/uwtools/tests/drivers/test_schema_platform.py b/src/uwtools/tests/drivers/test_schema_platform.py deleted file mode 100644 index 8360885fc..000000000 --- a/src/uwtools/tests/drivers/test_schema_platform.py +++ /dev/null @@ -1,23 +0,0 @@ -# pylint: disable=missing-function-docstring -""" -Tests for the "platform" schema. -""" - -from uwtools.tests.support import validator, with_del, with_set - - -def test_fv3_schema_platform(): - d = {"account": "me", "scheduler": "slurm"} - errors = validator("platform.jsonschema", "properties", "platform") - # Basic correctness: - assert not errors(d) - # Extra top-level keys are forbidden: - assert "Additional properties are not allowed" in errors(with_set(d, "bar", "foo")) - # There is a fixed set of supported schedulers: - assert "'foo' is not one of ['lsf', 'pbs', 'slurm']" in errors(with_set(d, "foo", "scheduler")) - # account and scheduler are optional: - assert not errors({}) - # account is required if scheduler is specified: - assert "'account' is a dependency of 'scheduler'" in errors(with_del(d, "account")) - # scheduler is required if account is specified: - assert "'scheduler' is a dependency of 'account'" in errors(with_del(d, "scheduler")) diff --git a/src/uwtools/tests/drivers/test_sfc_climo_gen.py b/src/uwtools/tests/drivers/test_sfc_climo_gen.py new file mode 100644 index 000000000..911f6af6c --- /dev/null +++ b/src/uwtools/tests/drivers/test_sfc_climo_gen.py @@ -0,0 +1,196 @@ +# pylint: disable=missing-function-docstring,protected-access,redefined-outer-name +""" +sfc_climo_gen driver tests. +""" +from pathlib import Path +from unittest.mock import DEFAULT as D +from unittest.mock import PropertyMock, patch + +import f90nml # type: ignore +import yaml +from iotaa import asset, external +from pytest import fixture + +from uwtools.drivers import sfc_climo_gen + +config: dict = { + "sfc_climo_gen": { + "execution": { + "batchargs": { + "export": "NONE", + "nodes": 1, + "stdout": "/path/to/file", + "walltime": "00:02:00", + }, + "envcmds": ["cmd1", "cmd2"], + "executable": "/path/to/sfc_climo_gen", + "mpiargs": ["--export=ALL", "--ntasks $SLURM_CPUS_ON_NODE"], + "mpicmd": "srun", + }, + "namelist": { + "update_values": { + "config": { + "halo": 4, + "input_facsf_file": "/path/to/file", + "input_maximum_snow_albedo_file": "/path/to/file", + "input_slope_type_file": "/path/to/file", + "input_snowfree_albedo_file": "/path/to/file", + "input_soil_type_file": "/path/to/file", + "input_substrate_temperature_file": "/path/to/file", + "input_vegetation_greenness_file": "/path/to/file", + "input_vegetation_type_file": "/path/to/file", + "maximum_snow_albedo_method": "bilinear", + "mosaic_file_mdl": "/path/to/file", + "orog_dir_mdl": "/path/to/dir", + "orog_files_mdl": ["C403_oro_data.tile7.halo4.nc"], + "snowfree_albedo_method": "bilinear", + "vegetation_greenness_method": "bilinear", + } + } + }, + "run_dir": "/path/to/dir", + }, + "platform": { + "account": "me", + "scheduler": "slurm", + }, +} + + +@fixture +def config_file(tmp_path): + path = tmp_path / "config.yaml" + with open(path, "w", encoding="utf-8") as f: + yaml.dump(config, f) + return path + + +@fixture +def driverobj(config_file): + return sfc_climo_gen.SfcClimoGen(config_file=config_file, batch=True) + + +# Driver tests + + +def test_SfcClimoGen(driverobj): + assert isinstance(driverobj, sfc_climo_gen.SfcClimoGen) + + +def test_SfcClimoGen_dry_run(config_file): + with patch.object(sfc_climo_gen, "dryrun") as dryrun: + driverobj = sfc_climo_gen.SfcClimoGen(config_file=config_file, batch=True, dry_run=True) + assert driverobj._dry_run is True + dryrun.assert_called_once_with() + + +def test_SfcClimoGen_namelist_file(driverobj, tmp_path): + @external + def ready(x): + yield x + yield asset(x, lambda: True) + + driverobj._rundir = tmp_path + dst = driverobj._rundir / "fort.41" + assert not dst.is_file() + with patch.object(sfc_climo_gen, "file", new=ready): + driverobj.namelist_file() + assert dst.is_file() + assert isinstance(f90nml.read(dst), f90nml.Namelist) + + +def test_SfcClimoGen_provisioned_run_directory(driverobj): + with patch.multiple( + driverobj, + namelist_file=D, + runscript=D, + ) as mocks: + driverobj.provisioned_run_directory() + for m in mocks: + mocks[m].assert_called_once_with() + + +def test_SfcClimoGen_run_batch(driverobj): + with patch.object(driverobj, "_run_via_batch_submission") as func: + driverobj.run() + func.assert_called_once_with() + + +def test_SfcClimoGen_run_local(driverobj): + driverobj._batch = False + with patch.object(driverobj, "_run_via_local_execution") as func: + driverobj.run() + func.assert_called_once_with() + + +def test_SfcClimoGen_runscript(driverobj, tmp_path): + driverobj._rundir = tmp_path + dst = driverobj._rundir / "runscript" + assert not dst.is_file() + driverobj.runscript() + with open(dst, "r", encoding="utf-8") as f: + lines = f.read().split("\n") + # Check directives: + assert "#SBATCH --account=me" in lines + assert "#SBATCH --time=00:02:00" in lines + # Check environment commands: + assert "cmd1" in lines + assert "cmd2" in lines + # Check execution: + assert "srun --export=ALL --ntasks $SLURM_CPUS_ON_NODE /path/to/sfc_climo_gen" in lines + assert "test $? -eq 0 && touch %s/done" % driverobj._rundir + + +def test_SfcClimoGen__run_via_batch_submission(driverobj): + runscript = driverobj._runscript_path + with patch.object(driverobj, "provisioned_run_directory") as prd: + with patch.object( + sfc_climo_gen.SfcClimoGen, "_scheduler", new_callable=PropertyMock + ) as scheduler: + driverobj._run_via_batch_submission() + scheduler().submit_job.assert_called_once_with( + runscript=runscript, submit_file=Path(f"{runscript}.submit") + ) + prd.assert_called_once_with() + + +def test_SfcClimoGen__run_via_local_execution(driverobj): + with patch.object(driverobj, "provisioned_run_directory") as prd: + with patch.object(sfc_climo_gen, "execute") as execute: + driverobj._run_via_local_execution() + execute.assert_called_once_with( + cmd="{x} >{x}.out 2>&1".format(x=driverobj._runscript_path), + cwd=driverobj._rundir, + log_output=True, + ) + prd.assert_called_once_with() + + +def test_SfcClimoGen__driver_config(driverobj): + assert driverobj._driver_config == driverobj._config["sfc_climo_gen"] + + +def test_SfcClimoGen__resources(driverobj): + account = "me" + scheduler = "slurm" + walltime = "01:10:00" + driverobj._driver_config["execution"].update({"batchargs": {"walltime": walltime}}) + driverobj._config["platform"] = {"account": account, "scheduler": scheduler} + assert driverobj._resources == { + "account": account, + "rundir": driverobj._rundir, + "scheduler": scheduler, + "walltime": walltime, + } + + +def test_SfcClimoGen__runscript_path(driverobj): + assert driverobj._runscript_path == driverobj._rundir / "runscript" + + +def test_SfcClimoGen__taskanme(driverobj): + assert driverobj._taskname("foo") == "sfc_climo_gen foo" + + +def test_SfcClimoGen__validate(driverobj): + driverobj._validate() diff --git a/src/uwtools/tests/support.py b/src/uwtools/tests/support.py index 3e87a610c..ee5634558 100644 --- a/src/uwtools/tests/support.py +++ b/src/uwtools/tests/support.py @@ -10,7 +10,7 @@ from _pytest.logging import LogCaptureFixture from uwtools.config.validator import _validation_errors -from uwtools.utils.file import resource_pathobj +from uwtools.utils.file import resource_path def compare_files(path1: str, path2: str) -> bool: @@ -94,16 +94,18 @@ def regex_logged(caplog: LogCaptureFixture, msg: str) -> bool: return any(pattern.search(record.message) for record in caplog.records) -def validator(schema_fn: str, *args: Any) -> Callable: +def schema_validator(schema_name: str, *args: Any) -> Callable: """ Create a lambda that returns errors from validating a config input. - :param schema_fn: The schema filename, relative to package resources. + :param schema_name: The uwtools schema name. :param args: Keys leading to sub-schema to be used to validate eventual input. :returns: A lambda that, when called with an input to test, returns a string (possibly empty) containing the validation errors. """ - with open(resource_pathobj(schema_fn), "r", encoding="utf-8") as f: + with open( + resource_path("jsonschema") / f"{schema_name}.jsonschema", "r", encoding="utf-8" + ) as f: schema = yaml.safe_load(f) defs = schema.get("$defs", {}) for arg in args: diff --git a/src/uwtools/tests/test_cli.py b/src/uwtools/tests/test_cli.py index 2ae1f8bc3..b0a86ad3b 100644 --- a/src/uwtools/tests/test_cli.py +++ b/src/uwtools/tests/test_cli.py @@ -13,8 +13,10 @@ import uwtools.api.config import uwtools.api.fv3 import uwtools.api.rocoto +import uwtools.api.sfc_climo_gen import uwtools.api.template import uwtools.drivers.fv3 +import uwtools.drivers.sfc_climo_gen from uwtools import cli from uwtools.cli import STR from uwtools.exceptions import UWError @@ -69,6 +71,31 @@ def test__add_subparser_fv3(subparsers): ] +def test__add_subparser_rocoto(subparsers): + cli._add_subparser_rocoto(subparsers) + assert subparsers.choices[STR.rocoto] + + +def test__add_subparser_rocoto_realize(subparsers): + cli._add_subparser_rocoto_realize(subparsers) + assert subparsers.choices[STR.realize] + + +def test__add_subparser_rocoto_validate(subparsers): + cli._add_subparser_rocoto_validate(subparsers) + assert subparsers.choices[STR.validate] + + +def test__add_subparser_sfc_climo_gen(subparsers): + cli._add_subparser_sfc_climo_gen(subparsers) + assert actions(subparsers.choices[STR.sfcclimogen]) == [ + "namelist_file", + "provisioned_run_directory", + "run", + "runscript", + ] + + def test__add_subparser_template(subparsers): cli._add_subparser_template(subparsers) assert actions(subparsers.choices[STR.template]) == [STR.render, STR.translate] @@ -324,6 +351,17 @@ def test__dispatch_rocoto_validate_xml_no_optional(): validate.assert_called_once_with(xml_file=None) +def test__dispatch_sfc_climo_gen(): + args: dict = { + "batch": True, + "config_file": "config.yaml", + "dry_run": False, + } + with patch.object(uwtools.api.sfc_climo_gen, "execute") as execute: + cli._dispatch_sfc_climo_gen({**args, "action": "foo"}) + execute.assert_called_once_with(**{**args, "task": "foo"}) + + @pytest.mark.parametrize( "params", [(STR.render, "_dispatch_template_render"), (STR.translate, "_dispatch_template_translate")], diff --git a/src/uwtools/tests/test_rocoto.py b/src/uwtools/tests/test_rocoto.py index 9dacad4db..690c34b16 100644 --- a/src/uwtools/tests/test_rocoto.py +++ b/src/uwtools/tests/test_rocoto.py @@ -13,7 +13,7 @@ from uwtools import rocoto from uwtools.config.formats.yaml import YAMLConfig from uwtools.exceptions import UWConfigError, UWError -from uwtools.tests.support import fixture_path, validator +from uwtools.tests.support import fixture_path # Fixtures @@ -433,86 +433,3 @@ def test_dump(self, instance, tmp_path): path = tmp_path / "out.xml" instance.dump(path=path) assert rocoto.validate_rocoto_xml_file(path) - - -# Schema tests - - -def test_rocoto_schema_compoundTimeString(): - errors = validator("rocoto.jsonschema", "$defs", "compoundTimeString") - # Just a string is ok: - assert not errors("foo") - # An int value is ok: - assert not errors(20240103120000) - # A simple cycle string is ok: - assert not errors({"cyclestr": {"value": "@Y@m@d@H"}}) - # The "value" entry is required: - assert "is not valid" in errors({"cyclestr": {}}) - # Unknown properties are not allowed: - assert "is not valid" in errors({"cyclestr": {"foo": "bar"}}) - # An "offset" attribute may be provided: - assert not errors({"cyclestr": {"value": "@Y@m@d@H", "attrs": {"offset": "06:00:00"}}}) - # The "offset" value must be a valid time string: - assert "is not valid" in errors({"cyclestr": {"value": "@Y@m@d@H", "attrs": {"offset": "x"}}}) - - -def test_rocoto_schema_dependency_sh(): - errors = validator("rocoto.jsonschema", "$defs", "dependency") - # Basic spec: - assert not errors({"sh": {"command": "foo"}}) - # The "command" property is mandatory: - assert "command' is a required property" in errors({"sh": {}}) - # A _ suffix is allowed: - assert not errors({"sh_foo": {"command": "foo"}}) - # Optional attributes "runopt" and "shell" are supported: - assert not errors( - {"sh_foo": {"attrs": {"runopt": "-c", "shell": "/bin/bash"}, "command": "foo"}} - ) - # Other attributes are not allowed: - assert "Additional properties are not allowed ('color' was unexpected)" in errors( - {"sh_foo": {"attrs": {"color": "blue"}, "command": "foo"}} - ) - # The command is a compoundTimeString: - assert not errors({"sh": {"command": {"cyclestr": {"value": "foo-@Y@m@d@H"}}}}) - - -def test_rocoto_schema_metatask_attrs(): - errors = validator("rocoto.jsonschema", "$defs", "metatask", "properties", "attrs") - # Valid modes are "parallel" and "serial": - assert not errors({"mode": "parallel"}) - assert not errors({"mode": "serial"}) - assert "'foo' is not one of ['parallel', 'serial']" in errors({"mode": "foo"}) - # Positive int is ok for throttle: - assert not errors({"throttle": 88}) - assert not errors({"throttle": 0}) - assert "-1 is less than the minimum of 0" in errors({"throttle": -1}) - assert "'foo' is not of type 'integer'" in errors({"throttle": "foo"}) - - -def test_rocoto_schema_workflow_cycledef(): - errors = validator("rocoto.jsonschema", "properties", "workflow", "properties", "cycledef") - # Basic spec: - spec = "202311291200 202312011200 06:00:00" - assert not errors([{"spec": spec}]) - # Spec with step specified as seconds: - assert not errors([{"spec": "202311291200 202312011200 3600"}]) - # Basic spec with group attribute: - assert not errors([{"attrs": {"group": "g"}, "spec": spec}]) - # Spec with positive activation offset attribute: - assert not errors([{"attrs": {"activation_offset": "12:00:00"}, "spec": spec}]) - # Spec with negative activation offset attribute: - assert not errors([{"attrs": {"activation_offset": "-12:00:00"}, "spec": spec}]) - # Spec with activation offset specified as positive seconds: - assert not errors([{"attrs": {"activation_offset": 3600}, "spec": spec}]) - # Spec with activation offset specified as negative seconds: - assert not errors([{"attrs": {"activation_offset": -3600}, "spec": spec}]) - # Property spec is required: - assert "'spec' is a required property" in errors([{}]) - # Additional properties are not allowed: - assert "'foo' was unexpected" in errors([{"spec": spec, "foo": "bar"}]) - # Additional attributes are not allowed: - assert "'foo' was unexpected" in errors([{"attrs": {"foo": "bar"}, "spec": spec}]) - # Bad spec: - assert "'x 202312011200 06:00:00' is not valid" in errors([{"spec": "x 202312011200 06:00:00"}]) - # Spec with bad activation offset attribute: - assert "'foo' is not valid" in errors([{"attrs": {"activation_offset": "foo"}, "spec": spec}]) diff --git a/src/uwtools/tests/test_schemas.py b/src/uwtools/tests/test_schemas.py new file mode 100644 index 000000000..daa440092 --- /dev/null +++ b/src/uwtools/tests/test_schemas.py @@ -0,0 +1,465 @@ +# pylint: disable=missing-function-docstring,redefined-outer-name +""" +Granular tests of JSON Schema schemas. +""" + +from functools import partial + +from pytest import fixture + +from uwtools.tests.drivers import test_sfc_climo_gen +from uwtools.tests.support import schema_validator, with_del, with_set + +# execution + + +def test_execution(): + d = {"executable": "fv3"} + batchargs = {"batchargs": {"queue": "string", "walltime": "string"}} + mpiargs = {"mpiargs": ["--flag1", "--flag2"]} + threads = {"threads": 32} + errors = schema_validator("execution") + # Basic correctness: + assert not errors(d) + # batchargs may optionally be specified: + assert not errors({**d, **batchargs}) + # mpiargs may be optionally specified: + assert not errors({**d, **mpiargs}) + # threads may optionally be specified: + assert not errors({**d, **threads}) + # All properties are ok: + assert not errors({**d, **batchargs, **mpiargs, **threads}) + # Additional properties are not allowed: + assert "Additional properties are not allowed" in errors( + {**d, **mpiargs, **threads, "foo": "bar"} + ) + + +def test_execution_batchargs(): + errors = schema_validator("execution", "properties", "batchargs") + # Basic correctness, empty map is ok: + assert not errors({}) + # Managed properties are fine: + assert not errors({"queue": "string", "walltime": "string"}) + # But so are unknown ones: + assert not errors({"--foo": 88}) + # It just has to be a map: + assert "[] is not of type 'object'" in errors([]) + + +def test_execution_executable(): + errors = schema_validator("execution", "properties", "executable") + # String value is ok: + assert not errors("fv3.exe") + # Anything else is not: + assert "88 is not of type 'string'" in errors(88) + + +def test_execution_mpiargs(): + errors = schema_validator("execution", "properties", "mpiargs") + # Basic correctness: + assert not errors(["string1", "string2"]) + # mpiargs may be empty: + assert not errors([]) + # String values are expected: + assert "88 is not of type 'string'" in errors(["string1", 88]) + + +def test_execution_threads(): + errors = schema_validator("execution", "properties", "threads") + # threads must be non-negative, and an integer: + assert not errors(0) + assert not errors(4) + assert "-1 is less than the minimum of 0" in errors(-1) + assert "3.14 is not of type 'integer'" in errors(3.14) + + +# files-to-stage + + +def test_schema_files_to_stage(): + errors = schema_validator("files-to-stage") + # The input must be an dict: + assert "is not of type 'object'" in errors([]) + # A str -> str dict is ok: + assert not errors({"file1": "/path/to/file1", "file2": "/path/to/file2"}) + # An empty dict is not allowed: + assert "does not have enough properties" in errors({}) + # Non-string values are not allowed: + assert "True is not of type 'string'" in errors({"file1": True}) + + +# fv3 + + +@fixture +def fv3_field_table_vals(): + return ( + { + "foo": { + "longname": "foofoo", + "profile_type": {"name": "fixed", "surface_value": 1}, + "units": "cubits", + } + }, + { + "bar": { + "longname": "barbar", + "profile_type": {"name": "profile", "surface_value": 2, "top_value": 3}, + "units": "rods", + } + }, + ) + + +@fixture +def fv3_fcstprop(): + return partial(schema_validator, "fv3", "properties", "fv3", "properties") + + +def test_schema_fv3(): + d = { + "domain": "regional", + "execution": {"executable": "fv3"}, + "lateral_boundary_conditions": {"interval_hours": 1, "offset": 0, "path": "/tmp/file"}, + "length": 3, + "run_dir": "/tmp", + } + errors = schema_validator("fv3", "properties", "fv3") + # Basic correctness: + assert not errors(d) + # Some top-level keys are required: + for key in ("domain", "execution", "lateral_boundary_conditions", "length", "run_dir"): + assert f"'{key}' is a required property" in errors(with_del(d, key)) + # Some top-level keys are optional: + assert not errors( + { + **d, + "diag_table": "/path", + "field_table": {"base_file": "/path"}, + "files_to_copy": {"fn": "/path"}, + "files_to_link": {"fn": "/path"}, + "model_configure": {"base_file": "/path"}, + "namelist": {"base_file": "/path"}, + } + ) + # Additional top-level keys are not allowed: + assert "Additional properties are not allowed" in errors({**d, "foo": "bar"}) + + +def test_schema_fv3_diag_table(fv3_fcstprop): + errors = fv3_fcstprop("diag_table") + # String value is ok: + assert not errors("/path/to/file") + # Anything else is not: + assert "88 is not of type 'string'" in errors(88) + + +def test_schema_fv3_domain(fv3_fcstprop): + errors = fv3_fcstprop("domain") + # There is a fixed set of domain values: + assert "'foo' is not one of ['global', 'regional']" in errors("foo") + + +def test_schema_fv3_field_table(fv3_fcstprop, fv3_field_table_vals): + val, _ = fv3_field_table_vals + base_file = {"base_file": "/some/path"} + update_values = {"update_values": val} + errors = fv3_fcstprop("field_table") + # Just base_file is ok: + assert not errors(base_file) + # Just update_values is ok: + assert not errors(update_values) + # A combination of base_file and update_values is ok: + assert not errors({**base_file, **update_values}) + # At least one is required: + assert "is not valid" in errors({}) + + +def test_schema_fv3_field_table_update_values(fv3_fcstprop, fv3_field_table_vals): + val1, val2 = fv3_field_table_vals + errors = fv3_fcstprop("field_table", "properties", "update_values") + # A "fixed" profile-type entry is ok: + assert not errors(val1) + # A "profile" profile-type entry is ok: + assert not errors(val2) + # A combination of two valid entries is ok: + assert not errors({**val1, **val2}) + # At least one entry is required: + assert "does not have enough properties" in errors({}) + # longname is required: + assert "'longname' is a required property" in errors(with_del(val1, "foo", "longname")) + # longname must be a string: + assert "88 is not of type 'string'" in errors(with_set(val1, 88, "foo", "longname")) + # units is required: + assert "'units' is a required property" in errors(with_del(val1, "foo", "units")) + # units must be a string: + assert "88 is not of type 'string'" in errors(with_set(val1, 88, "foo", "units")) + # profile_type is required: + assert "'profile_type' is a required property" in errors(with_del(val1, "foo", "profile_type")) + # profile_type name has to be "fixed" or "profile": + assert "'bogus' is not one of ['fixed', 'profile']" in errors( + with_set(val1, "bogus", "foo", "profile_type", "name") + ) + # surface_value is required: + assert "'surface_value' is a required property" in errors( + with_del(val1, "foo", "profile_type", "surface_value") + ) + # surface_value is numeric: + assert "'a string' is not of type 'number'" in errors( + with_set(val1, "a string", "foo", "profile_type", "surface_value") + ) + # top_value is required if name is "profile": + assert "'top_value' is a required property" in errors( + with_del(val2, "bar", "profile_type", "top_value") + ) + # top_value is numeric: + assert "'a string' is not of type 'number'" in errors( + with_set(val2, "a string", "bar", "profile_type", "top_value") + ) + + +def test_schema_fv3_lateral_boundary_conditions(fv3_fcstprop): + d = { + "interval_hours": 1, + "offset": 0, + "path": "/some/path", + } + errors = fv3_fcstprop("lateral_boundary_conditions") + # Basic correctness: + assert not errors(d) + # All lateral_boundary_conditions items are required: + assert "'interval_hours' is a required property" in errors(with_del(d, "interval_hours")) + assert "'offset' is a required property" in errors(with_del(d, "offset")) + assert "'path' is a required property" in errors(with_del(d, "path")) + # interval_hours must be an integer of at least 1: + assert "0 is less than the minimum of 1" in errors(with_set(d, 0, "interval_hours")) + assert "'s' is not of type 'integer'" in errors(with_set(d, "s", "interval_hours")) + # offset must be an integer of at least 0: + assert "-1 is less than the minimum of 0" in errors(with_set(d, -1, "offset")) + assert "'s' is not of type 'integer'" in errors(with_set(d, "s", "offset")) + # path must be a string: + assert "88 is not of type 'string'" in errors(with_set(d, 88, "path")) + + +def test_schema_fv3_length(fv3_fcstprop): + errors = fv3_fcstprop("length") + # Positive int is ok: + assert not errors(6) + # Zero is not ok: + assert "0 is less than the minimum of 1" in errors(0) + # A negative number is not ok: + assert "-1 is less than the minimum of 1" in errors(-1) + # Something other than an int is not ok: + assert "'a string' is not of type 'integer'" in errors("a string") + + +def test_schema_fv3_model_configure(fv3_fcstprop): + base_file = {"base_file": "/some/path"} + update_values = {"update_values": {"foo": 88}} + errors = fv3_fcstprop("model_configure") + # Just base_file is ok: + assert not errors(base_file) + # But base_file must be a string: + assert "88 is not of type 'string'" in errors({"base_file": 88}) + # Just update_values is ok: + assert not errors(update_values) + # A combination of base_file and update_values is ok: + assert not errors({**base_file, **update_values}) + # At least one is required: + assert "is not valid" in errors({}) + + +def test_schema_fv3_model_configure_update_values(fv3_fcstprop): + errors = fv3_fcstprop("model_configure", "properties", "update_values") + # boolean, number, and string values are ok: + assert not errors({"bool": True, "int": 88, "float": 3.14, "string": "foo"}) + # Other types are not, e.g.: + assert "None is not of type 'boolean', 'number', 'string'" in errors({"null": None}) + # At least one entry is required: + assert "does not have enough properties" in errors({}) + + +def test_schema_fv3_namelist(fv3_fcstprop): + base_file = {"base_file": "/some/path"} + update_values = {"update_values": {"nml": {"var": "val"}}} + errors = fv3_fcstprop("namelist") + # Just base_file is ok: + assert not errors(base_file) + # base_file must be a string: + assert "88 is not of type 'string'" in errors({"base_file": 88}) + # Just update_values is ok: + assert not errors(update_values) + # A combination of base_file and update_values is ok: + assert not errors({**base_file, **update_values}) + # At least one is required: + assert "is not valid" in errors({}) + + +def test_schema_fv3_namelist_update_values(fv3_fcstprop): + errors = fv3_fcstprop("namelist", "properties", "update_values") + # array, boolean, number, and string values are ok: + assert not errors( + {"nml": {"array": [1, 2, 3], "bool": True, "int": 88, "float": 3.14, "string": "foo"}} + ) + # Other types are not, e.g.: + assert "None is not of type 'array', 'boolean', 'number', 'string'" in errors( + {"nml": {"null": None}} + ) + # At least one namelist entry is required: + assert "does not have enough properties" in errors({}) + # At least one val/var pair ir required: + assert "does not have enough properties" in errors({"nml": {}}) + + +def test_schema_fv3_run_dir(fv3_fcstprop): + errors = fv3_fcstprop("run_dir") + # Must be a string: + assert not errors("/some/path") + assert "88 is not of type 'string'" in errors(88) + + +# namelist + + +def test_schema_namelist(): + errors = schema_validator("namelist") + # Basic correctness (see also namelist_names_values test): + assert not errors( + { + "namelist": { + "array": [1, 2, 3], + "boolean": True, + "float": 3.14, + "integer": 88, + "string": "foo", + } + } + ) + # Other types at the name-value level are not allowed: + errormsg = "%s is not of type 'array', 'boolean', 'number', 'string'" + assert errormsg % "None" in errors({"namelist": {"nonetype": None}}) + assert errormsg % "{}" in errors({"namelist": {"dict": {}}}) + # Needs at least one namelist value: + assert "does not have enough properties" in errors({}) + # Needs at least one name-value value: + assert "does not have enough properties" in errors({"namelist": {}}) + # Namelist level must be a mapping: + assert "[] is not of type 'object'" in errors([]) + # Name-value level level must be a mapping: + assert "[] is not of type 'object'" in errors({"namelist": []}) + + +# platform + + +def test_schema_platform(): + d = {"account": "me", "scheduler": "slurm"} + errors = schema_validator("platform", "properties", "platform") + # Basic correctness: + assert not errors(d) + # Extra top-level keys are forbidden: + assert "Additional properties are not allowed" in errors(with_set(d, "bar", "foo")) + # There is a fixed set of supported schedulers: + assert "'foo' is not one of ['lsf', 'pbs', 'slurm']" in errors(with_set(d, "foo", "scheduler")) + # account and scheduler are optional: + assert not errors({}) + # account is required if scheduler is specified: + assert "'account' is a dependency of 'scheduler'" in errors(with_del(d, "account")) + # scheduler is required if account is specified: + assert "'scheduler' is a dependency of 'account'" in errors(with_del(d, "scheduler")) + + +# rocoto + + +def test_schema_rocoto_compoundTimeString(): + errors = schema_validator("rocoto", "$defs", "compoundTimeString") + # Just a string is ok: + assert not errors("foo") + # An int value is ok: + assert not errors(20240103120000) + # A simple cycle string is ok: + assert not errors({"cyclestr": {"value": "@Y@m@d@H"}}) + # The "value" entry is required: + assert "is not valid" in errors({"cyclestr": {}}) + # Unknown properties are not allowed: + assert "is not valid" in errors({"cyclestr": {"foo": "bar"}}) + # An "offset" attribute may be provided: + assert not errors({"cyclestr": {"value": "@Y@m@d@H", "attrs": {"offset": "06:00:00"}}}) + # The "offset" value must be a valid time string: + assert "is not valid" in errors({"cyclestr": {"value": "@Y@m@d@H", "attrs": {"offset": "x"}}}) + + +def test_schema_rocoto_dependency_sh(): + errors = schema_validator("rocoto", "$defs", "dependency") + # Basic spec: + assert not errors({"sh": {"command": "foo"}}) + # The "command" property is mandatory: + assert "command' is a required property" in errors({"sh": {}}) + # A _ suffix is allowed: + assert not errors({"sh_foo": {"command": "foo"}}) + # Optional attributes "runopt" and "shell" are supported: + assert not errors( + {"sh_foo": {"attrs": {"runopt": "-c", "shell": "/bin/bash"}, "command": "foo"}} + ) + # Other attributes are not allowed: + assert "Additional properties are not allowed ('color' was unexpected)" in errors( + {"sh_foo": {"attrs": {"color": "blue"}, "command": "foo"}} + ) + # The command is a compoundTimeString: + assert not errors({"sh": {"command": {"cyclestr": {"value": "foo-@Y@m@d@H"}}}}) + + +def test_schema_rocoto_metatask_attrs(): + errors = schema_validator("rocoto", "$defs", "metatask", "properties", "attrs") + # Valid modes are "parallel" and "serial": + assert not errors({"mode": "parallel"}) + assert not errors({"mode": "serial"}) + assert "'foo' is not one of ['parallel', 'serial']" in errors({"mode": "foo"}) + # Positive int is ok for throttle: + assert not errors({"throttle": 88}) + assert not errors({"throttle": 0}) + assert "-1 is less than the minimum of 0" in errors({"throttle": -1}) + assert "'foo' is not of type 'integer'" in errors({"throttle": "foo"}) + + +def test_schema_rocoto_workflow_cycledef(): + errors = schema_validator("rocoto", "properties", "workflow", "properties", "cycledef") + # Basic spec: + spec = "202311291200 202312011200 06:00:00" + assert not errors([{"spec": spec}]) + # Spec with step specified as seconds: + assert not errors([{"spec": "202311291200 202312011200 3600"}]) + # Basic spec with group attribute: + assert not errors([{"attrs": {"group": "g"}, "spec": spec}]) + # Spec with positive activation offset attribute: + assert not errors([{"attrs": {"activation_offset": "12:00:00"}, "spec": spec}]) + # Spec with negative activation offset attribute: + assert not errors([{"attrs": {"activation_offset": "-12:00:00"}, "spec": spec}]) + # Spec with activation offset specified as positive seconds: + assert not errors([{"attrs": {"activation_offset": 3600}, "spec": spec}]) + # Spec with activation offset specified as negative seconds: + assert not errors([{"attrs": {"activation_offset": -3600}, "spec": spec}]) + # Property spec is required: + assert "'spec' is a required property" in errors([{}]) + # Additional properties are not allowed: + assert "'foo' was unexpected" in errors([{"spec": spec, "foo": "bar"}]) + # Additional attributes are not allowed: + assert "'foo' was unexpected" in errors([{"attrs": {"foo": "bar"}, "spec": spec}]) + # Bad spec: + assert "'x 202312011200 06:00:00' is not valid" in errors([{"spec": "x 202312011200 06:00:00"}]) + # Spec with bad activation offset attribute: + assert "'foo' is not valid" in errors([{"attrs": {"activation_offset": "foo"}, "spec": spec}]) + + +# sfc-climo-gen + + +def test_schema_sfc_climo_gen(): + errors = schema_validator("sfc-climo-gen", "properties", "sfc_climo_gen") + d = test_sfc_climo_gen.config["sfc_climo_gen"] + # Basic correctness: + assert not errors(d) + # Additional properties are not allowed: + assert "Additional properties are not allowed" in errors({**d, "foo": "bar"}) diff --git a/src/uwtools/tests/utils/test_file.py b/src/uwtools/tests/utils/test_file.py index a241ad79d..177c34f91 100644 --- a/src/uwtools/tests/utils/test_file.py +++ b/src/uwtools/tests/utils/test_file.py @@ -104,8 +104,8 @@ def test_readable_nofile(): assert hasattr(f, "read") -def test_resource_pathobj(): - assert file.resource_pathobj().is_dir() +def test_resource_path(): + assert file.resource_path().is_dir() def test_writable_file(tmp_path): diff --git a/src/uwtools/tests/utils/test_tasks.py b/src/uwtools/tests/utils/test_tasks.py new file mode 100644 index 000000000..4f888edcc --- /dev/null +++ b/src/uwtools/tests/utils/test_tasks.py @@ -0,0 +1,40 @@ +# pylint: disable=missing-function-docstring + +from iotaa import refs + +from uwtools.utils import tasks + + +def test_tasks_file_missing(tmp_path): + path = tmp_path / "file" + assert not path.is_file() + asset_id = refs(tasks.file(path=path)) + assert asset_id == path + assert not asset_id.is_file() + + +def test_tasks_file_present(tmp_path): + path = tmp_path / "file" + path.touch() + assert path.is_file() + asset_id = refs(tasks.file(path=path)) + assert asset_id == path + assert asset_id.is_file() + + +def test_tasks_filecopy(tmp_path): + src = tmp_path / "src" + dst = tmp_path / "dst" + src.touch() + assert not dst.is_file() + tasks.filecopy(src=src, dst=dst) + assert dst.is_file() + + +def test_tasks_symlink(tmp_path): + target = tmp_path / "target" + link = tmp_path / "link" + target.touch() + assert not link.is_file() + tasks.symlink(target=target, linkname=link) + assert link.is_symlink() diff --git a/src/uwtools/utils/file.py b/src/uwtools/utils/file.py index 6ef3ea281..0844c8e62 100644 --- a/src/uwtools/utils/file.py +++ b/src/uwtools/utils/file.py @@ -141,7 +141,7 @@ def readable( yield _stdinproxy() -def resource_pathobj(suffix: str = "") -> Path: +def resource_path(suffix: str = "") -> Path: """ Returns a pathlib Path object to a uwtools resource file. diff --git a/src/uwtools/utils/tasks.py b/src/uwtools/utils/tasks.py new file mode 100644 index 000000000..5ee18473d --- /dev/null +++ b/src/uwtools/utils/tasks.py @@ -0,0 +1,49 @@ +""" +Common iotaa tasks. +""" + +import os +from pathlib import Path +from shutil import copy + +from iotaa import asset, external, task + + +@external +def file(path: Path): + """ + An existing file. + + :param path: Path to the file. + """ + yield "File %s" % path + yield asset(path, path.is_file) + + +@task +def filecopy(src: Path, dst: Path): + """ + A copy of an existing file. + + :param src: Path to the source file. + :param dst: Path to the destination file to create. + """ + yield "Copy %s -> %s" % (src, dst) + yield asset(dst, dst.is_file) + yield file(src) + copy(src, dst) + + +@task +def symlink(target: Path, linkname: Path): + """ + A symbolic link. + + :param target: The existing file or directory. + :param linkname: The symlink to create. + """ + yield "Link %s -> %s" % (linkname, target) + yield asset(linkname, linkname.exists) + yield file(target) + linkname.parent.mkdir(parents=True, exist_ok=True) + os.symlink(src=target, dst=linkname)