From 0dce3de116120d71672fb99a13108c60ef014c8f Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 31 Jul 2024 12:24:37 -0400 Subject: [PATCH 01/34] Implement infrastructure to generate workflow.toml and execute actions. --- .gitignore | 6 +- ARCHITECTURE.md | 44 +++++---- hoomd_validation/init.py | 30 ++++--- hoomd_validation/project.py | 60 ++++++++----- hoomd_validation/project_class.py | 12 --- hoomd_validation/workflow.py | 137 +++++++++++++++++++++++++++++ hoomd_validation/workflow_class.py | 12 +++ requirements.txt | 9 +- 8 files changed, 238 insertions(+), 72 deletions(-) delete mode 100644 hoomd_validation/project_class.py create mode 100644 hoomd_validation/workflow.py create mode 100644 hoomd_validation/workflow_class.py diff --git a/.gitignore b/.gitignore index dff1e436..7b31d2c5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,9 @@ workspace -templates +workflow.toml *.out *.svg hoomd_validation/__pycache__ -hoomd_validation/config.json -signac.rc signac_project_document.json .signac_sp_cache.json.gz __pycache__ .signac -.bundles -*.code-workspace diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 5d82aad5..fdf115f6 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -6,32 +6,44 @@ The code in this repository is designed to run validation tests using HOOMD-blue for longer periods of time than CI testing can handle. Users of this repository should be able to: -1. Run a set of validation test workflows on a variety of hardware setups. +1. Run a set of validation test workflows on the machine of their choice (workstation + and/or HPC). -2. Choose specific validation workflows to run, and be able to select subsets of -the operations in one workflow to run. +2. Choose specific validation workflows to run and be able to select subsets of + the actions in one workflow to run. -3. Visualize the validation test output and analysis using signac-dashboard. +3. Visualize the test output and analysis using signac-dashboard. ## Implementation To minimize the amount of effort needed to execute all test workflows (1), -Each validation test workflow is defined as a "subproject" of a single signac-flow -project. All operations on a subproject are prefixed with the subprojet's name -to allow for regex selection of operations at the command line (2). All operations -in a subproject use a precondition or `select` argument to limit their operations -only to the signac jobs specific to that subproject. - -To further facilitate (2), all subprojects that require it will have an operation -`_create_initial_state` as the first step in the workflow to prepare the -initial conditions used for later steps. All subprojects will also suffix operation +Each validation test workflow is defined as a "subproject" of a single row +project. All actions on a subproject are prefixed with the subprojet's name +to allow for glob selection of actions at the command line (2). All actions +in a subproject limit their actions to the signac jobs specific to that +subproject. + +To further facilitate (2), all subprojects that require it will have an action +`.create_initial_state` as the first step in the workflow to prepare the +initial conditions used for later steps. All subprojects will also suffix action names with `_cpu` or `_gpu` according to the HOOMD device they execute on. Each subproject is defined in its own module file (e.g. `lj_fluid.py`). Each module -must have a function `job_statepoints` that generates the statepoints needed for the job. -Each statepoint must have a key `"subproject"` with its name matching the subproject. -The subproject module file also includes all the flow operations for that subproject. +must have a function `job_statepoints` that generates the state points needed for the +job. Every state point must have a key `"subproject"` with its name matching the +subproject. The subproject module file implements all the actions. To add a subproject, implement its module, then: 1. Import the subproject module in `project.py`. 2. Import the subproject module in `init.py` and add it to the list of subprojects. + +## Configuration + +`hoomd-validation` allows user configuration of many parameters (such as walltime, +cores per job, etc...). Therefore, the row `workflow.toml` file must be dynamically +generated which is facilitated by the module `workflow.py`. Each subproject file (e.g. +`lj_fluid.py`) adds actions to the global list of actions in `action.py` with the +computed parameters based on the configuration file. The list of actions is used in +two ways. First, `init.py` will write out the `workflow.toml` that corresponds to +the current configuration. Second, `project.py` will dispatch actions to the methods +registered in `action.py`. diff --git a/hoomd_validation/init.py b/hoomd_validation/init.py index 5d13109e..f8d19346 100644 --- a/hoomd_validation/init.py +++ b/hoomd_validation/init.py @@ -1,27 +1,31 @@ # Copyright (c) 2022-2024 The Regents of the University of Michigan. # Part of HOOMD-blue, released under the BSD 3-Clause License. -"""Populate the signac project with jobs and job document parameters.""" +"""Populate the signac project with jobs and job document parameters. +Also, write the row `workflow.toml`. +""" + +import action # import subprojects -import alj_2d +# import alj_2d import config -import hard_disk -import hard_sphere +# import hard_disk +# import hard_sphere import lj_fluid -import lj_union -import patchy_particle_pressure +# import lj_union +# import patchy_particle_pressure import signac -import simple_polygon +# import simple_polygon subprojects = [ - alj_2d, + # alj_2d, lj_fluid, - lj_union, - hard_disk, - hard_sphere, - simple_polygon, - patchy_particle_pressure, + # lj_union, + # hard_disk, + # hard_sphere, + # simple_polygon, + # patchy_particle_pressure, ] project = signac.init_project(path=config.project_root) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 4b0c72a1..4e509688 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -4,29 +4,45 @@ """Project workflow entry point.""" # Define subproject flow operations -import alj_2d +# import alj_2d import config -import flow -import hard_disk -import hard_sphere -import lj_fluid -import lj_union -import patchy_particle_pressure -import simple_polygon -from project_class import Project - -# use srun on delta (mpiexec fails on multiple nodes) -flow.environments.xsede.DeltaEnvironment.mpi_cmd = 'srun' - -__all__ = [ - 'alj_2d', - 'lj_fluid', - 'lj_union', - 'hard_disk', - 'hard_sphere', - 'simple_polygon', - 'patchy_particle_pressure', +# import hard_disk +# import hard_sphere +# import lj_fluid +# import lj_union +# import patchy_particle_pressure +# import simple_polygon +from workflow_class import ValidationWorkflow + +import signac + +all_subprojects = [ + # 'alj_2d', + # lj_fluid, + # 'lj_union', + # 'hard_disk', + # 'hard_sphere', + # 'simple_polygon', + # 'patchy_particle_pressure', ] +def init(args): + """Initialize the workspace.""" + if (config.project_root / 'workspace').exists(): + message = "The project already initialized." + raise RuntimeError(message) + + project = signac.init_project(path=config.project_root) + + # TODO: Add command line arguments to limit which projects are initialized. + # Will need to selectively remove actions from the other projects from the workflow. + + # initialize jobs for validation test projects + for subproject in all_subprojects: + # add all the jobs to the project + for job_sp in subproject.job_statepoints(): + project.open_job(job_sp).init() + + if __name__ == '__main__': - Project.get_project(config.project_root).main() + ValidationWorkflow.main(entrypoint = config.project_root / 'hoomd_validation' / 'project.py', init=init, path=config.project_root) diff --git a/hoomd_validation/project_class.py b/hoomd_validation/project_class.py deleted file mode 100644 index faae160f..00000000 --- a/hoomd_validation/project_class.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) 2022-2024 The Regents of the University of Michigan. -# Part of HOOMD-blue, released under the BSD 3-Clause License. - -"""FlowProject class for the validation test suite.""" - -from flow import FlowProject - - -class Project(FlowProject): - """Validation test projet.""" - - pass diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py new file mode 100644 index 00000000..4b857016 --- /dev/null +++ b/hoomd_validation/workflow.py @@ -0,0 +1,137 @@ +# Copyright (c) 2022-2024 The Regents of the University of Michigan. +# Part of HOOMD-blue, released under the BSD 3-Clause License. + +"""Manage row actions from Python. + +* Subclass `Workflow` to create a new workflow. +* Call ``YourWorkflow.add_action`` to add a new action to the workflow. +* Call ``YourWorkflow.write_workflow`` to write ``workflow.toml`` with the configuration + of all actions. +* Call ``YourWorkflow.main()`` in ``project.py`` to parse command line arguments and + dispatch the correct action method. +""" + +import argparse +from pathlib import Path + +import rtoml + + +class Action: + """Represent a row action. + + An `Action` consists of a method that implements the action and the row + configuration options for that action. The method is called by `__call__`. The + configuration is stored as a raw dictionary that maps directly to the ``[action]`` + element of the row ``workflow.toml``. + + The method must be a function that takes the argument(s) ``*jobs``. + + Args: + method(callable): The method that implements this action. It must take the + argument ``*jobs``. + configuration(dict): Configuration options for the action to be written to + ``workflow.toml``. + """ + + def __init__(self, method, configuration): + if 'name' in configuration: + message = 'configuration must not contain "name"' + raise ValueError(message) + + self._method = method + self._configuration = configuration + + def __call__(self, *jobs): + """Call the `method` given on construction.""" + self._method(*jobs) + +class Workflow: + """Represent a single workflow.""" + + _actions = {} + + @classmethod + def add_action(cls, name, action): + """Add an action. + + Args: + name(str): The action's name. Must be unique. + action(Action): The action itself. + """ + if name in cls._actions: + message = f'Action {name} cannot be added twice.' + raise ValueError(message) + + cls._actions[name] = action + + @classmethod + def write_workflow(cls, entrypoint, path=None, default=None): + """Write the file ``workflow.toml``. + + ``workflow.toml`` will include the signac workspace definition, the given + ``default`` mapping (when provided), and configurations for all added actions. + + Note: + ``default.action.command`` will be automatically set based on the value of + `entrypoint`. + + Args: + entrypoint(str): Name of the python file that calls the `main` entrypoint. + path(Path): Path to write ``workflow.toml``. + default(dict): The ``[default]`` mapping. + """ + workflow = {'workspace': {'path': 'workspace', 'value_file': 'signac_statepoint.json'}} + + workflow['default'] = {'action': {'command': f'python -u {entrypoint} $ACTION_NAME {{directories}}'}} + + if default is not None: + workflow['default'].update(default) + + workflow['actions'] = [] + for name, action_item in cls._actions.items(): + action = {'name': name} + action.update(action_item._configuration) + workflow['actions'].append(action) + + if path is None: + path = Path('.') + + with open(path / 'workflow.toml', 'w', encoding='utf-8') as workflow_file: + rtoml.dump(workflow, workflow_file) + + @classmethod + def main(cls, init = None, init_args = None, **kwargs): + """Implement the main entrypoint for ``project.py``. + + Valid commands are: + * ``python project.py init`` + * ``python project.py action action_name directories`` + + ``init`` will call the user-provided method ``init``, then generate the file + ``workflow.toml``. When provided, items in the ``init_args`` list will be added + as options to the ``init`` subparser with ``add_argument``. + + Args: + init(callable): User-provided initializaiton routine. Must take one + argument: ``args`` - the ``argparse`` parsed arguments. + init_args(list[str]): List of args to add to the ``init`` subparser. + **kwargs: Fowarded to `make_workflow`. + """ + parser = argparse.ArgumentParser() + command = parser.add_subparsers(dest="command", required=True) + init_parser = command.add_parser('init') + if init_args is not None: + for arg in init_args: + init_parser.add_argument(arg) + + args = parser.parse_args() + + if args.command == 'init': + if init is not None: + init(args) + + cls.write_workflow(**kwargs) + else: + message = f'Invalid command: {args.command}' + raise RuntimeError(message) diff --git a/hoomd_validation/workflow_class.py b/hoomd_validation/workflow_class.py new file mode 100644 index 00000000..756de9d0 --- /dev/null +++ b/hoomd_validation/workflow_class.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2024 The Regents of the University of Michigan. +# Part of HOOMD-blue, released under the BSD 3-Clause License. + +"""Workflow class for the validation test suite.""" + +from workflow import Workflow + + +class ValidationWorkflow(Workflow): + """Validation test workflow.""" + + pass diff --git a/requirements.txt b/requirements.txt index 52444d86..94b5f2d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,8 @@ -signac >= 2.2.0 -signac-flow >= 0.25.1 -signac-dashboard -matplotlib gsd +matplotlib numpy +rtoml scipy +signac >= 2.2.0 +signac-flow >= 0.25.1 +signac-dashboard From 6150d02c1a1a996ee60645008a9008df88ff41fe Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 31 Jul 2024 14:12:09 -0400 Subject: [PATCH 02/34] Working lj_fluid.create_initial_state. --- .gitignore | 11 +- hoomd_validation/.gitignore | 2 +- hoomd_validation/config-sample.toml | 23 + hoomd_validation/config-sample.yaml | 35 - hoomd_validation/config_parser.py | 42 +- hoomd_validation/lj_fluid.py | 2774 +++++++++++++-------------- hoomd_validation/project.py | 4 +- hoomd_validation/workflow.py | 18 +- 8 files changed, 1429 insertions(+), 1480 deletions(-) create mode 100644 hoomd_validation/config-sample.toml delete mode 100644 hoomd_validation/config-sample.yaml diff --git a/.gitignore b/.gitignore index 7b31d2c5..8e3abc2c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,10 @@ -workspace -workflow.toml +__pycache__ +.row +.signac_sp_cache.json.gz +.signac *.out *.svg hoomd_validation/__pycache__ signac_project_document.json -.signac_sp_cache.json.gz -__pycache__ -.signac +workflow.toml +workspace diff --git a/hoomd_validation/.gitignore b/hoomd_validation/.gitignore index 5b6b0720..5b6c0960 100644 --- a/hoomd_validation/.gitignore +++ b/hoomd_validation/.gitignore @@ -1 +1 @@ -config.yaml +config.toml diff --git a/hoomd_validation/config-sample.toml b/hoomd_validation/config-sample.toml new file mode 100644 index 00000000..28f8e791 --- /dev/null +++ b/hoomd_validation/config-sample.toml @@ -0,0 +1,23 @@ +## Number of replicate simulations to average over for ensemble comparisons. +# replicates = 32 + +## Maximum CPU cores to use in a single simulation. +# max_cores_sim = 16 + +## Maximum CPU cores to use in a single aggregate job submission. +# max_cores_submission = 16 + +## Maximum GPUs to use in a single aggregate job submission. +# max_gpus_submission = 1 + +## Maximum wall time (in hours) that a submitted cluster job is allowed to run. +# max_walltime = "24:00:00" + +## Wall time (in hours) To use for short, non-restartable jobs +# short_walltime = "02:00:00" + +## Whether the HOOMD build has ENABLE_LLVM on. +# enable_llvm = true + +## Whether the HOOMD build has ENABLE_GPU on. +# enable_gpu = true diff --git a/hoomd_validation/config-sample.yaml b/hoomd_validation/config-sample.yaml deleted file mode 100644 index 2f2875b3..00000000 --- a/hoomd_validation/config-sample.yaml +++ /dev/null @@ -1,35 +0,0 @@ -## Define the executable to use. -## * Use `sys.executable` by default. -## * Use `python_exec` when set. -## * When `singularity_container` is set, use: -## `singularity exec --nv {singularity_options} {singularity_container} python` -## * When `singularity_container` and `python_exec` are set, use: -## `singularity exec --nv {singularity_options} {singularity_container} {python_exec}` -# executable: - # python_exec: "python" - # singularity_container: "container.sif" - # singularity_options: "" - -## Number of replicate simulations to average over for ensemble comparisons. -# replicates: 32 - -## Maximum CPU cores to use in a single simulation. -# max_cores_sim: 16 - -## Maximum CPU cores to use in a single aggregate job submission. -# max_cores_submission: 16 - -## Maximum GPUs to use in a single aggregate job submission. -# max_gpus_submission: 1 - -## Maximum wall time (in hours) that a submitted cluster job is allowed to run. -# max_walltime: 24 - -## Wall time (in hours) To use for short, non-restartable jobs -# short_walltime: 2 - -## Whether the HOOMD build has ENABLE_LLVM on. -# enable_llvm: true - -## Whether the HOOMD build has ENABLE_GPU on. -# enable_gpu: true diff --git a/hoomd_validation/config_parser.py b/hoomd_validation/config_parser.py index c67e8c2e..b1e80009 100644 --- a/hoomd_validation/config_parser.py +++ b/hoomd_validation/config_parser.py @@ -4,10 +4,9 @@ """Class for parsing config files.""" import os -import sys from pathlib import Path -import yaml +import rtoml class ConfigFile(dict): @@ -20,48 +19,19 @@ class ConfigFile(dict): instance. """ - DEFAULT_CONFIG_PATH = str(Path(__file__).parent / 'config.yaml') + DEFAULT_CONFIG_PATH = str(Path(__file__).parent / 'config.toml') def __init__(self, config_file_path=DEFAULT_CONFIG_PATH): if not os.path.exists(config_file_path): config = dict() else: - with open(config_file_path) as file: - config = yaml.safe_load(file) + with open(config_file_path, encoding='utf-8') as file: + config = rtoml.load(file) - self['executable'] = self._parse_executable_string(config) self['max_cores_sim'] = int(config.get('max_cores_sim', 16)) self['max_cores_submission'] = int(config.get('max_cores_submission', 16)) self['max_gpus_submission'] = int(config.get('max_gpus_submission', 1)) - self['max_walltime'] = float(config.get('max_walltime', 24)) - self['short_walltime'] = float(config.get('short_walltime', 2)) + self['max_walltime'] = str(config.get('max_walltime', '24:00:00')) + self['short_walltime'] = str(config.get('short_walltime', '02:00:00')) self['replicates'] = int(config.get('replicates', 32)) - self['enable_llvm'] = bool(config.get('enable_llvm', True)) self['enable_gpu'] = bool(config.get('enable_gpu', True)) - - @staticmethod - def _parse_executable_string(config_file): - """Search the config file and determine the executable. - - Searches the executable section of the config file and builds the string - needed by flow's directives. If no config file is present, we use the - python executable used to run this code. - """ - if 'executable' not in config_file: - return sys.executable - - return_string = '' - executable_options = config_file['executable'] - using_container = 'singularity_container' in executable_options - if using_container: - return_string += ( - 'singularity exec --nv ' - + executable_options.get('singularity_options', '') - + ' ' - ) - return_string += executable_options['singularity_container'] + ' ' - - return_string += executable_options.get( - 'python_exec', 'python' if using_container else sys.executable - ) - return return_string diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 09836810..115424b6 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -8,11 +8,16 @@ import math import os import pathlib +import itertools +import numpy + +import hoomd import util from config import CONFIG from flow import aggregator -from project_class import Project +from workflow_class import ValidationWorkflow +from workflow import Action # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -27,7 +32,7 @@ LJ_PARAMS = {'epsilon': 1.0, 'sigma': 1.0} NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = (int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10) * 60 # Limit the number of long NVE runs to reduce the number of CPU hours needed. NUM_NVE_RUNS = 2 @@ -59,7 +64,7 @@ def job_statepoints(): for idx in replicate_indices: yield ( { - 'subproject': 'lj_fluid', + 'subproject': __name__, 'kT': param['kT'], 'density': param['density'], 'pressure': param['pressure'], @@ -71,50 +76,23 @@ def job_statepoints(): ) -def is_lj_fluid(job): - """Test if a given job is part of the lj_fluid subproject.""" - return job.cached_statepoint['subproject'] == 'lj_fluid' - - -def sort_key(job): - """Aggregator sort key.""" - return (job.cached_statepoint['density'], job.cached_statepoint['num_particles']) - - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by=sort_key, - select=is_lj_fluid, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by=sort_key, - select=is_lj_fluid, -) +_group = {'sort_by': ["/density", "/num_particles"], 'include': [{'condition': ["/subproject", "==", __name__]}]} +_resources_cpu = {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | {'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS)} +_resources_gpu = {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} -@Project.post.isfile('lj_fluid_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=CONFIG['short_walltime'], - ), - aggregator=partition_jobs_cpu_mpi, -) -def lj_fluid_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting lj_fluid_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) sp = job.sp device = hoomd.device.CPU( @@ -158,1370 +136,1372 @@ def lj_fluid_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('lj_fluid_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed lj_fluid_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') +ValidationWorkflow.add_action(f'{__name__}.create_initial_state', Action(method = create_initial_state, +configuration = {'products': ['initial_state.gsd'], 'launchers': ['mpi'], 'group': _group_cpu, 'resources': _resources_cpu | {'walltime': {'per_submission': CONFIG['short_walltime']}}})) ################################# # MD ensemble simulations ################################# -def make_md_simulation( - job, - device, - initial_state, - method, - sim_mode, - extra_loggables=None, - period_multiplier=1, -): - """Make an MD simulation. - - Args: - job (`signac.job.Job`): Signac job object. - - device (`hoomd.device.Device`): hoomd device object. - - initial_state (str): Path to the gsd file to be used as an initial state - for the simulation. - - method (`hoomd.md.methods.Method`): hoomd integration method. - - sim_mode (str): String identifying the simulation mode. - - extra_loggables (list): List of quantities to add to the gsd logger. - - ThermodynamicQuantities is added by default, any more quantities should - be in this list. - - period_multiplier (int): Factor to multiply the GSD file periods by. - """ - import hoomd - from hoomd import md - - # pair force - if extra_loggables is None: - extra_loggables = [] - nlist = md.nlist.Cell(buffer=0.4) - lj = md.pair.LJ( - default_r_cut=job.cached_statepoint['r_cut'], - default_r_on=job.cached_statepoint['r_on'], - nlist=nlist, - ) - lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) - lj.mode = 'xplor' - - # integrator - integrator = md.Integrator(dt=0.001, methods=[method], forces=[lj]) - - # compute thermo - thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) - - # add gsd log quantities - logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) - logger.add( - thermo, - quantities=[ - 'pressure', - 'potential_energy', - 'kinetic_temperature', - 'kinetic_energy', - ], - ) - logger.add(integrator, quantities=['linear_momentum']) - for loggable in extra_loggables: - logger.add(loggable) - - # simulation - sim = util.make_simulation( - job=job, - device=device, - initial_state=initial_state, - integrator=integrator, - sim_mode=sim_mode, - logger=logger, - table_write_period=WRITE_PERIOD, - trajectory_write_period=LOG_PERIOD['trajectory'] * period_multiplier, - log_write_period=LOG_PERIOD['quantities'] * period_multiplier, - log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, - ) - sim.operations.add(thermo) - for loggable in extra_loggables: - # call attach explicitly so we can access sim state when computing the - # loggable quantity - if hasattr(loggable, 'attach'): - loggable.attach(sim) - - return sim - - -def run_md_sim(job, device, ensemble, thermostat, complete_filename): - """Run the MD simulation with the given ensemble and thermostat.""" - import hoomd - from custom_actions import ComputeDensity - from hoomd import md - - initial_state = job.fn('lj_fluid_initial_state.gsd') - - if ensemble == 'nvt': - if thermostat == 'langevin': - method = md.methods.Langevin( - hoomd.filter.All(), kT=job.cached_statepoint['kT'] - ) - method.gamma.default = 1.0 - elif thermostat == 'mttk': - method = md.methods.ConstantVolume(filter=hoomd.filter.All()) - method.thermostat = hoomd.md.methods.thermostats.MTTK( - kT=job.cached_statepoint['kT'], tau=0.25 - ) - elif thermostat == 'bussi': - method = md.methods.ConstantVolume(filter=hoomd.filter.All()) - method.thermostat = hoomd.md.methods.thermostats.Bussi( - kT=job.cached_statepoint['kT'] - ) - else: - raise ValueError(f'Unsupported thermostat {thermostat}') - elif ensemble == 'npt': - p = job.cached_statepoint['pressure'] - method = md.methods.ConstantPressure( - hoomd.filter.All(), S=[p, p, p, 0, 0, 0], tauS=3, couple='xyz' - ) - if thermostat == 'bussi': - method.thermostat = hoomd.md.methods.thermostats.Bussi( - kT=job.cached_statepoint['kT'] - ) - else: - raise ValueError(f'Unsupported thermostat {thermostat}') - - sim_mode = f'{ensemble}_{thermostat}_md' - - density_compute = ComputeDensity() - sim = make_md_simulation( - job, device, initial_state, method, sim_mode, extra_loggables=[density_compute] - ) - - # thermalize momenta - sim.state.thermalize_particle_momenta( - hoomd.filter.All(), job.cached_statepoint['kT'] - ) - - # thermalize the thermostat (if applicable) - if ( - isinstance(method, (md.methods.ConstantPressure, md.methods.ConstantVolume)) - ) and hasattr(method.thermostat, 'thermalize_dof'): - sim.run(0) - method.thermostat.thermalize_dof() - - # equilibrate - device.notice('Equilibrating...') - sim.run(EQUILIBRATE_STEPS) - device.notice('Done.') - - # run - device.notice('Running...') - sim.run(RUN_STEPS) - - pathlib.Path(job.fn(complete_filename)).touch() - device.notice('Done.') - - -md_sampling_jobs = [] -md_job_definitions = [ - { - 'ensemble': 'nvt', - 'thermostat': 'langevin', - 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, - }, - { - 'ensemble': 'nvt', - 'thermostat': 'mttk', - 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, - }, - { - 'ensemble': 'nvt', - 'thermostat': 'bussi', - 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, - }, - { - 'ensemble': 'npt', - 'thermostat': 'bussi', - 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, - }, -] - -if CONFIG['enable_gpu']: - md_job_definitions.extend( - [ - { - 'ensemble': 'nvt', - 'thermostat': 'langevin', - 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, - }, - { - 'ensemble': 'nvt', - 'thermostat': 'mttk', - 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, - }, - { - 'ensemble': 'nvt', - 'thermostat': 'bussi', - 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, - }, - { - 'ensemble': 'npt', - 'thermostat': 'bussi', - 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, - }, - ] - ) - - -def add_md_sampling_job( - ensemble, thermostat, device_name, ranks_per_partition, aggregator -): - """Add a MD sampling job to the workflow.""" - sim_mode = f'{ensemble}_{thermostat}_md' - - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_fluid_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_fluid_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def md_sampling_operation(*jobs): - """Perform sampling simulation given the definition.""" - import hoomd - - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition - ) - job = jobs[communicator.partition] - - if communicator.rank == 0: - print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) - - if device_name == 'gpu': - device_cls = hoomd.device.GPU - elif device_name == 'cpu': - device_cls = hoomd.device.CPU - - device = device_cls( - communicator=communicator, - message_filename=util.get_message_filename( - job, f'{sim_mode}_{device_name}.log' - ), - ) - - run_md_sim( - job, - device, - ensemble, - thermostat, - complete_filename=f'{sim_mode}_{device_name}_complete', - ) - - if communicator.rank == 0: - print(f'completed lj_fluid_{sim_mode}_{device_name}: {job}') - - md_sampling_jobs.append(md_sampling_operation) - - -for definition in md_job_definitions: - add_md_sampling_job(**definition) - -################################# -# MC simulations -################################# - - -def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): - """Make an MC Simulation. - - Args: - job (`signac.job.Job`): Signac job object. - device (`hoomd.device.Device`): Device object. - initial_state (str): Path to the gsd file to be used as an initial state - for the simulation. - sim_mode (str): String defining the simulation mode. - extra_loggables (list): List of extra loggables to log to gsd files. - Patch energies are logged by default. - """ - import hoomd - import numpy - from custom_actions import ComputeDensity - from hoomd import hpmc - - if extra_loggables is None: - extra_loggables = [] - - # integrator - mc = hpmc.integrate.Sphere(nselect=1) - mc.shape['A'] = dict(diameter=0.0) - - # pair potential - epsilon = LJ_PARAMS['epsilon'] / job.cached_statepoint['kT'] # noqa F841 - sigma = LJ_PARAMS['sigma'] - r_on = job.cached_statepoint['r_on'] - r_cut = job.cached_statepoint['r_cut'] - - lennard_jones_mc = hoomd.hpmc.pair.LennardJones() - lennard_jones_mc.params[('A', 'A')] = dict( - epsilon=epsilon, sigma=sigma, r_cut=r_cut, r_on=r_on - ) - lennard_jones_mc.mode = 'xplor' - mc.pair_potentials = [lennard_jones_mc] - - # pair force to compute virial pressure - nlist = hoomd.md.nlist.Cell(buffer=0.4) - lj = hoomd.md.pair.LJ( - default_r_cut=job.cached_statepoint['r_cut'], - default_r_on=job.cached_statepoint['r_on'], - nlist=nlist, - ) - lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) - lj.mode = 'xplor' - - # compute the density - compute_density = ComputeDensity() - - logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) - logger.add(lennard_jones_mc, quantities=['energy']) - logger.add(mc, quantities=['translate_moves']) - logger.add(compute_density) - for loggable in extra_loggables: - logger.add(loggable) - - # make simulation - sim = util.make_simulation( - job=job, - device=device, - initial_state=initial_state, - integrator=mc, - sim_mode=sim_mode, - logger=logger, - table_write_period=WRITE_PERIOD, - trajectory_write_period=LOG_PERIOD['trajectory'], - log_write_period=LOG_PERIOD['quantities'], - log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, - ) - for loggable in extra_loggables: - # call attach method explicitly so we can access simulation state when - # computing the loggable quantity - if hasattr(loggable, 'attach'): - loggable.attach(sim) - - compute_density.attach(sim) - - def _compute_virial_pressure(): - virials = numpy.sum(lj.virials, 0) - w = 0 - if virials is not None: - w = virials[0] + virials[3] + virials[5] - V = sim.state.box.volume - return job.cached_statepoint['num_particles'] * job.cached_statepoint[ - 'kT' - ] / V + w / (3 * V) - - logger[('custom', 'virial_pressure')] = (_compute_virial_pressure, 'scalar') - - # move size tuner - mstuner = hpmc.tune.MoveSize.scale_solver( - moves=['d'], - target=0.2, - max_translation_move=0.5, - trigger=hoomd.trigger.And( - [ - hoomd.trigger.Periodic(100), - hoomd.trigger.Before(RANDOMIZE_STEPS | EQUILIBRATE_STEPS // 2), - ] - ), - ) - sim.operations.add(mstuner) - sim.operations.computes.append(lj) - - return sim - - -def run_nvt_mc_sim(job, device, complete_filename): - """Run MC sim in NVT.""" - import hoomd - - # simulation - sim_mode = 'nvt_mc' - restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') - if job.isfile(restart_filename): - initial_state = job.fn(restart_filename) - restart = True - else: - initial_state = job.fn('lj_fluid_initial_state.gsd') - restart = False - - sim = make_mc_simulation(job, device, initial_state, sim_mode) - - if not restart: - # equilibrate - device.notice('Equilibrating...') - sim.run(EQUILIBRATE_STEPS // 2) - sim.run(EQUILIBRATE_STEPS // 2) - device.notice('Done.') - - # Print acceptance ratio as measured during the 2nd half of the - # equilibration. - translate_moves = sim.operations.integrator.translate_moves - translate_acceptance = translate_moves[0] / sum(translate_moves) - device.notice(f'Translate move acceptance: {translate_acceptance}') - device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') - - # save move size to a file - if device.communicator.rank == 0: - name = util.get_job_filename(sim_mode, device, 'move_size', 'json') - with open(job.fn(name), 'w') as f: - json.dump(dict(d_A=sim.operations.integrator.d['A']), f) - else: - device.notice('Restarting...') - # read move size from the file - name = util.get_job_filename(sim_mode, device, 'move_size', 'json') - with open(job.fn(name)) as f: - data = json.load(f) - - sim.operations.integrator.d['A'] = data['d_A'] - device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') - - # run - device.notice('Running...') - util.run_up_to_walltime( - sim=sim, - end_step=TOTAL_STEPS, - steps=RESTART_STEPS, - walltime_stop=WALLTIME_STOP_SECONDS, - ) - - hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') - - if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() - device.notice('Done.') - else: - device.notice( - 'Ending run early due to walltime limits at:' - f'{device.communicator.walltime}' - ) - - -def run_npt_mc_sim(job, device, complete_filename): - """Run MC sim in NPT.""" - import hoomd - from hoomd import hpmc - - # device - sim_mode = 'npt_mc' - restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') - if job.isfile(restart_filename): - initial_state = job.fn(restart_filename) - restart = True - else: - initial_state = job.fn('lj_fluid_initial_state.gsd') - restart = False - - # box updates - boxmc = hpmc.update.BoxMC( - betaP=job.cached_statepoint['pressure'] / job.cached_statepoint['kT'], - trigger=hoomd.trigger.Periodic(1), - ) - boxmc.volume = dict(weight=1.0, mode='ln', delta=0.01) - - # simulation - sim = make_mc_simulation( - job, device, initial_state, sim_mode, extra_loggables=[boxmc] - ) - - sim.operations.add(boxmc) - - boxmc_tuner = hpmc.tune.BoxMCMoveSize.scale_solver( - trigger=hoomd.trigger.And( - [ - hoomd.trigger.Periodic(400), - hoomd.trigger.Before(RANDOMIZE_STEPS + EQUILIBRATE_STEPS // 2), - ] - ), - boxmc=boxmc, - moves=['volume'], - target=0.5, - ) - sim.operations.add(boxmc_tuner) - - if not restart: - # equilibrate - device.notice('Equilibrating...') - sim.run(EQUILIBRATE_STEPS // 2) - sim.run(EQUILIBRATE_STEPS // 2) - device.notice('Done.') - - # Print acceptance ratio as measured during the 2nd half of the - # equilibration. - translate_moves = sim.operations.integrator.translate_moves - translate_acceptance = translate_moves[0] / sum(translate_moves) - device.notice(f'Translate move acceptance: {translate_acceptance}') - device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') - - volume_moves = boxmc.volume_moves - volume_acceptance = volume_moves[0] / sum(volume_moves) - device.notice(f'Volume move acceptance: {volume_acceptance}') - device.notice(f'Volume move size: {boxmc.volume["delta"]}') - - # save move sizes to a file - if device.communicator.rank == 0: - name = util.get_job_filename(sim_mode, device, 'move_size', 'json') - with open(job.fn(name), 'w') as f: - json.dump( - dict( - d_A=sim.operations.integrator.d['A'], - volume_delta=boxmc.volume['delta'], - ), - f, - ) - else: - device.notice('Restarting...') - # read move size from the file - name = util.get_job_filename(sim_mode, device, 'move_size', 'json') - with open(job.fn(name)) as f: - data = json.load(f) - - sim.operations.integrator.d['A'] = data['d_A'] - device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') - boxmc.volume = dict(weight=1.0, mode='ln', delta=data['volume_delta']) - device.notice(f'Restored volume move size: {boxmc.volume["delta"]}') - - # run - device.notice('Running...') - util.run_up_to_walltime( - sim=sim, - end_step=TOTAL_STEPS, - steps=RESTART_STEPS, - walltime_stop=WALLTIME_STOP_SECONDS, - ) - - hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') - - if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() - device.notice('Done.') - else: - device.notice( - 'Ending run early due to walltime limits at:' - f'{device.communicator.walltime}' - ) - - -mc_sampling_jobs = [] -mc_job_definitions = [ - { - 'mode': 'nvt', - 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, - }, - { - 'mode': 'npt', - 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, - }, -] - - -def add_mc_sampling_job(mode, device_name, ranks_per_partition, aggregator): - """Add a MC sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_fluid_create_initial_state) - @Project.post.isfile(f'{mode}_mc_{device_name}_complete') - @Project.operation( - name=f'lj_fluid_{mode}_mc_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def sampling_operation(*jobs): - """Perform sampling simulation given the definition.""" - import hoomd - - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition - ) - job = jobs[communicator.partition] - - if communicator.rank == 0: - print(f'starting lj_fluid_{mode}_mc_{device_name}:', job) - - if device_name == 'gpu': - device_cls = hoomd.device.GPU - elif device_name == 'cpu': - device_cls = hoomd.device.CPU - - device = device_cls( - communicator=communicator, - message_filename=util.get_message_filename( - job, f'{mode}_mc_{device_name}.log' - ), - ) - - globals().get(f'run_{mode}_mc_sim')( - job, device, complete_filename=f'{mode}_mc_{device_name}_complete' - ) - - if communicator.rank == 0: - print(f'completed lj_fluid_{mode}_mc_{device_name}: {job}') - - mc_sampling_jobs.append(sampling_operation) - - -for definition in mc_job_definitions: - add_mc_sampling_job(**definition) - - -@Project.pre(is_lj_fluid) -@Project.pre.after(*md_sampling_jobs) -@Project.pre.after(*mc_sampling_jobs) -@Project.post.true('lj_fluid_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def lj_fluid_analyze(job): - """Analyze the output of all simulation modes.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - - matplotlib.style.use('fivethirtyeight') - - print('starting lj_fluid_analyze:', job) - - sim_modes = [ - 'nvt_langevin_md_cpu', - 'nvt_mttk_md_cpu', - 'nvt_bussi_md_cpu', - 'npt_bussi_md_cpu', - ] - - if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): - sim_modes.extend( - [ - 'nvt_langevin_md_gpu', - 'nvt_mttk_md_gpu', - 'nvt_bussi_md_gpu', - 'npt_bussi_md_gpu', - ] - ) - - if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): - sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) - - util._sort_sim_modes(sim_modes) - - timesteps = {} - energies = {} - pressures = {} - densities = {} - linear_momentum = {} - - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - - if 'md' in sim_mode: - energies[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' - ] - else: - energies[sim_mode] = ( - log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] - * job.cached_statepoint['kT'] - ) - - energies[sim_mode] /= job.cached_statepoint['num_particles'] - - if 'md' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' - ] - else: - pressures[sim_mode] = log_traj['hoomd-data/custom/virial_pressure'] - - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] - - if 'md' in sim_mode and 'langevin' not in sim_mode: - momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] - linear_momentum[sim_mode] = [ - math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector - ] - else: - linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) - - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - potential_energy=float(numpy.mean(energies[mode])), - density=float(numpy.mean(densities[mode])), - ) - - # Plot results - fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') - ax = fig.add_subplot(2, 2, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 2, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - - ax = fig.add_subplot(2, 2, 3) - util.plot_timeseries( - ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 - ) - - ax = fig.add_subplot(2, 2, 4) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data={ - mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] - for mode, lm in linear_momentum.items() - }, - ylabel=r'$|\vec{p}| / N$', - max_points=500, - ) - - fig.suptitle( - f'$kT={job.cached_statepoint["kT"]}$, ' - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') - - job.document['lj_fluid_analysis_complete'] = True - - -analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles', 'r_cut'], - sort_by='replicate_idx', - select=is_lj_fluid, -) - - -@Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_fluid_analysis_complete')) -@Project.post(lambda *jobs: util.true_all(*jobs, key='lj_fluid_compare_modes_complete')) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, -) -def lj_fluid_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - - matplotlib.style.use('fivethirtyeight') - - print('starting lj_fluid_compare_modes:', jobs[0]) - - sim_modes = [ - 'nvt_langevin_md_cpu', - 'nvt_mttk_md_cpu', - 'nvt_bussi_md_cpu', - 'npt_bussi_md_cpu', - ] - - if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): - sim_modes.extend( - [ - 'nvt_langevin_md_gpu', - 'nvt_mttk_md_gpu', - 'nvt_bussi_md_gpu', - 'npt_bussi_md_gpu', - ] - ) - - if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): - sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) - - util._sort_sim_modes(sim_modes) - - quantity_names = ['density', 'pressure', 'potential_energy'] - labels = { - 'density': r'$\frac{\rho_\mathrm{sample} - \rho}{\rho} \cdot 1000$', - 'pressure': r'$\frac{P_\mathrm{sample} - P}{P} \cdot 1000$', - 'potential_energy': r'$\frac{U_\mathrm{sample} - }{} \cdot 1000$', - } - - # grab the common statepoint parameters - kT = jobs[0].sp.kT - set_density = jobs[0].sp.density - set_pressure = jobs[0].sp.pressure - num_particles = jobs[0].sp.num_particles - - quantity_reference = dict( - density=set_density, pressure=set_pressure, potential_energy=None - ) - - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 3), layout='tight') - fig.suptitle( - f'$kT={kT}$, $\\rho={set_density}$, ' - f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' - f'$N={num_particles}$' - ) - - for i, quantity_name in enumerate(quantity_names): - ax = fig.add_subplot(3, 1, i + 1) - - # organize data from jobs - quantities = {mode: [] for mode in sim_modes} - for jb in jobs: - for mode in sim_modes: - quantities[mode].append(getattr(getattr(jb.doc, mode), quantity_name)) - - if quantity_reference[quantity_name] is not None: - reference = quantity_reference[quantity_name] - else: - avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} - reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - - avg_quantity, stderr_quantity = util.plot_vs_expected( - ax=ax, - values=quantities, - ylabel=labels[quantity_name], - expected=reference, - relative_scale=1000, - separate_nvt_npt=True, - ) - - if quantity_name == 'density': - if 'npt_mc_cpu' in avg_quantity: - print( - f'Average npt_mc_cpu density {num_particles}:', - avg_quantity['npt_mc_cpu'], - '+/-', - stderr_quantity['npt_mc_cpu'], - ) - print( - f'Average npt_md_cpu density {num_particles}:', - avg_quantity['npt_bussi_md_cpu'], - '+/-', - stderr_quantity['npt_bussi_md_cpu'], - ) - if quantity_name == 'pressure': - if 'nvt_mc_cpu' in avg_quantity: - print( - f'Average nvt_mc_cpu pressure {num_particles}:', - avg_quantity['nvt_mc_cpu'], - '+/-', - stderr_quantity['nvt_mc_cpu'], - ) - if 'npt_mc_cpu' in avg_quantity: - print( - f'Average npt_mc_cpu pressure {num_particles}:', - avg_quantity['npt_mc_cpu'], - '+/-', - stderr_quantity['npt_mc_cpu'], - ) - - filename = ( - f'lj_fluid_compare_kT{kT}_density{round(set_density, 2)}_' - f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' - f'N{num_particles}.svg' - ) - - fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - - for job in jobs: - job.document['lj_fluid_compare_modes_complete'] = True - - -@Project.pre.after(*md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_fluid_distribution_analyze_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, -) -def lj_fluid_distribution_analyze(*jobs): - """Checks that MD follows the correct KE distribution.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - import scipy - - matplotlib.style.use('fivethirtyeight') - - print('starting lj_fluid_distribution_analyze:', jobs[0]) - - sim_modes = [ - 'nvt_langevin_md_cpu', - 'nvt_mttk_md_cpu', - 'nvt_bussi_md_cpu', - 'npt_bussi_md_cpu', - ] - - if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): - sim_modes.extend( - [ - 'nvt_langevin_md_gpu', - 'nvt_mttk_md_gpu', - 'nvt_bussi_md_gpu', - 'npt_bussi_md_gpu', - ] - ) - - if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): - sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) - - util._sort_sim_modes(sim_modes) - - # grab the common statepoint parameters - kT = jobs[0].sp.kT - set_density = jobs[0].sp.density - num_particles = jobs[0].sp.num_particles - - fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') - fig.suptitle( - f'$kT={kT}$, $\\rho={set_density}$, ' - f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' - f'$N={num_particles}$' - ) - - ke_means_expected = collections.defaultdict(list) - ke_sigmas_expected = collections.defaultdict(list) - ke_samples = collections.defaultdict(list) - potential_energy_samples = collections.defaultdict(list) - density_samples = collections.defaultdict(list) - pressure_samples = collections.defaultdict(list) - - for job in jobs: - for sim_mode in sim_modes: - if sim_mode.startswith('nvt_langevin'): - n_dof = num_particles * 3 - else: - n_dof = num_particles * 3 - 3 - - print('Reading' + job.fn(sim_mode + '_quantities.h5')) - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - - if 'md' in sim_mode: - ke = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' - ] - ke_means_expected[sim_mode].append(numpy.mean(ke) - 1 / 2 * n_dof * kT) - ke_sigmas_expected[sim_mode].append( - numpy.std(ke) - 1 / math.sqrt(2) * math.sqrt(n_dof) * kT - ) - - ke_samples[sim_mode].extend(ke) - else: - ke_samples[sim_mode].extend( - [ - 3 - / 2 - * job.cached_statepoint['num_particles'] - * job.cached_statepoint['kT'] - ] - ) - - if 'md' in sim_mode: - potential_energy_samples[sim_mode].extend( - list( - log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities' - '/potential_energy' - ] - ) - ) - else: - potential_energy_samples[sim_mode].extend( - list( - log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] - * job.cached_statepoint['kT'] - ) - ) - - if 'md' in sim_mode: - pressure_samples[sim_mode].extend( - list( - log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' - ] - ) - ) - else: - pressure_samples[sim_mode].extend( - list(log_traj['hoomd-data/custom/virial_pressure']) - ) - - density_samples[sim_mode].extend( - list(log_traj['hoomd-data/custom_actions/ComputeDensity/density']) - ) - - ax = fig.add_subplot(2, 2, 1) - util.plot_vs_expected(ax, ke_means_expected, '$ - 1/2 N_{dof} k T$') - - ax = fig.add_subplot(2, 2, 2) - # https://doi.org/10.1371/journal.pone.0202764 - util.plot_vs_expected( - ax, ke_sigmas_expected, r'$\Delta K - 1/\sqrt{2} \sqrt{N_{dof}} k T$' - ) - - ax = fig.add_subplot(2, 4, 5) - rv = scipy.stats.gamma( - 3 * job.cached_statepoint['num_particles'] / 2, - scale=job.cached_statepoint['kT'], - ) - util.plot_distribution(ax, ke_samples, 'K', expected=rv.pdf) - ax.legend(loc='upper right', fontsize='xx-small') - - ax = fig.add_subplot(2, 4, 6) - util.plot_distribution(ax, potential_energy_samples, 'U') - - ax = fig.add_subplot(2, 4, 7) - util.plot_distribution( - ax, density_samples, r'$\rho$', expected=job.cached_statepoint['density'] - ) - - ax = fig.add_subplot(2, 4, 8) - util.plot_distribution( - ax, pressure_samples, 'P', expected=job.cached_statepoint['pressure'] - ) - - filename = ( - f'lj_fluid_distribution_analyze_kT{kT}' - f'_density{round(set_density, 2)}_' - f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' - f'N{num_particles}.svg' - ) - fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - - for job in jobs: - job.document['lj_fluid_distribution_analyze_complete'] = True - - -################################# -# MD conservation simulations -################################# - - -def run_nve_md_sim(job, device, run_length, complete_filename): - """Run the MD simulation in NVE.""" - import hoomd - - sim_mode = 'nve_md' - restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') - is_restarting = job.isfile(restart_filename) - - if is_restarting: - initial_state = job.fn(restart_filename) - else: - initial_state = job.fn('lj_fluid_initial_state.gsd') - - nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) - - sim = make_md_simulation( - job, device, initial_state, nve, sim_mode, period_multiplier=200 - ) - - if not is_restarting: - sim.state.thermalize_particle_momenta( - hoomd.filter.All(), job.cached_statepoint['kT'] - ) - - # Run for a long time to look for energy and momentum drift - device.notice('Running...') - - util.run_up_to_walltime( - sim=sim, - end_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length, - steps=500_000, - walltime_stop=WALLTIME_STOP_SECONDS, - ) - - if sim.timestep == RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length: - pathlib.Path(job.fn(complete_filename)).touch() - device.notice('Done.') - else: - device.notice( - 'Ending run early due to walltime limits at:' - f'{device.communicator.walltime}' - ) - - hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') - - -def is_lj_fluid_nve(job): - """Test if a given job should be run for NVE conservation.""" - return ( - job.cached_statepoint['subproject'] == 'lj_fluid' - and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS - ) - - -partition_jobs_cpu_mpi_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by=sort_key, - select=is_lj_fluid_nve, -) - -partition_jobs_gpu_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by=sort_key, - select=is_lj_fluid_nve, -) - -nve_md_sampling_jobs = [] -nve_md_job_definitions = [ - { - 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_nve, - 'run_length': 10_000_000, - }, -] - -if CONFIG['enable_gpu']: - nve_md_job_definitions.extend( - [ - { - 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu_nve, - 'run_length': 100_000_000, - }, - ] - ) - - -def add_nve_md_job(device_name, ranks_per_partition, aggregator, run_length): - """Add a MD NVE conservation job to the workflow.""" - sim_mode = 'nve_md' - - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_fluid_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_fluid_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def lj_fluid_nve_md_job(*jobs): - """Run NVE MD.""" - import hoomd - - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition - ) - job = jobs[communicator.partition] - - if communicator.rank == 0: - print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) - - if device_name == 'gpu': - device_cls = hoomd.device.GPU - elif device_name == 'cpu': - device_cls = hoomd.device.CPU - - device = device_cls( - communicator=communicator, - message_filename=util.get_message_filename( - job, f'{sim_mode}_{device_name}.log' - ), - ) - run_nve_md_sim( - job, - device, - run_length=run_length, - complete_filename=f'{sim_mode}_{device_name}_complete', - ) - - if communicator.rank == 0: - print(f'completed lj_fluid_{sim_mode}_{device_name} {job}') - - nve_md_sampling_jobs.append(lj_fluid_nve_md_job) - - -for definition in nve_md_job_definitions: - add_nve_md_job(**definition) - -nve_analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles', 'r_cut'], - sort_by='replicate_idx', - select=is_lj_fluid_nve, -) - - -@Project.pre.after(*nve_md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_fluid_conservation_analysis_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=nve_analysis_aggregator, -) -def lj_fluid_conservation_analyze(*jobs): - """Analyze the output of NVE simulations and inspect conservation.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - - matplotlib.style.use('fivethirtyeight') - - print('starting lj_fluid_conservation_analyze:', jobs[0]) - - sim_modes = ['nve_md_cpu'] - if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): - sim_modes.extend(['nve_md_gpu']) - - timesteps = [] - energies = [] - linear_momenta = [] - - for job in jobs: - job_timesteps = {} - job_energies = {} - job_linear_momentum = {} - - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - - job_timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - - job_energies[sim_mode] = ( - log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' - ] - + log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' - ] - ) - job_energies[sim_mode] = ( - job_energies[sim_mode] - job_energies[sim_mode][0] - ) / job.cached_statepoint['num_particles'] - - momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] - job_linear_momentum[sim_mode] = [ - math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) - / job.cached_statepoint['num_particles'] - for v in momentum_vector - ] - - timesteps.append(job_timesteps) - energies.append(job_energies) - linear_momenta.append(job_linear_momentum) - - # Plot results - def plot(*, ax, data, quantity_name, legend=False): - for i, job in enumerate(jobs): - for mode in sim_modes: - ax.plot( - timesteps[i][mode], - numpy.asarray(data[i][mode]), - label=f'{mode}_{job.cached_statepoint["replicate_idx"]}', - ) - ax.set_xlabel('time step') - ax.set_ylabel(quantity_name) - - if legend: - ax.legend() - - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.68 * 2), layout='tight') - ax = fig.add_subplot(2, 1, 1) - plot(ax=ax, data=energies, quantity_name=r'$E / N$', legend=True) - - ax = fig.add_subplot(2, 1, 2) - plot(ax=ax, data=linear_momenta, quantity_name=r'$\left| \vec{p} \right| / N$') - - fig.suptitle( - 'LJ conservation tests: ' - f'$kT={job.cached_statepoint["kT"]}$, ' - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$' - ) - filename = ( - f'lj_fluid_conservation_kT{job.cached_statepoint["kT"]}_' - f'density{round(job.cached_statepoint["density"], 2)}_' - f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' - f'N{job.cached_statepoint["num_particles"]}.svg' - ) - - fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - - for job in jobs: - job.document['lj_fluid_conservation_analysis_complete'] = True +# def make_md_simulation( +# job, +# device, +# initial_state, +# method, +# sim_mode, +# extra_loggables=None, +# period_multiplier=1, +# ): +# """Make an MD simulation. + +# Args: +# job (`signac.job.Job`): Signac job object. + +# device (`hoomd.device.Device`): hoomd device object. + +# initial_state (str): Path to the gsd file to be used as an initial state +# for the simulation. + +# method (`hoomd.md.methods.Method`): hoomd integration method. + +# sim_mode (str): String identifying the simulation mode. + +# extra_loggables (list): List of quantities to add to the gsd logger. + +# ThermodynamicQuantities is added by default, any more quantities should +# be in this list. + +# period_multiplier (int): Factor to multiply the GSD file periods by. +# """ +# import hoomd +# from hoomd import md + +# # pair force +# if extra_loggables is None: +# extra_loggables = [] +# nlist = md.nlist.Cell(buffer=0.4) +# lj = md.pair.LJ( +# default_r_cut=job.cached_statepoint['r_cut'], +# default_r_on=job.cached_statepoint['r_on'], +# nlist=nlist, +# ) +# lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) +# lj.mode = 'xplor' + +# # integrator +# integrator = md.Integrator(dt=0.001, methods=[method], forces=[lj]) + +# # compute thermo +# thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) + +# # add gsd log quantities +# logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) +# logger.add( +# thermo, +# quantities=[ +# 'pressure', +# 'potential_energy', +# 'kinetic_temperature', +# 'kinetic_energy', +# ], +# ) +# logger.add(integrator, quantities=['linear_momentum']) +# for loggable in extra_loggables: +# logger.add(loggable) + +# # simulation +# sim = util.make_simulation( +# job=job, +# device=device, +# initial_state=initial_state, +# integrator=integrator, +# sim_mode=sim_mode, +# logger=logger, +# table_write_period=WRITE_PERIOD, +# trajectory_write_period=LOG_PERIOD['trajectory'] * period_multiplier, +# log_write_period=LOG_PERIOD['quantities'] * period_multiplier, +# log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, +# ) +# sim.operations.add(thermo) +# for loggable in extra_loggables: +# # call attach explicitly so we can access sim state when computing the +# # loggable quantity +# if hasattr(loggable, 'attach'): +# loggable.attach(sim) + +# return sim + + +# def run_md_sim(job, device, ensemble, thermostat, complete_filename): +# """Run the MD simulation with the given ensemble and thermostat.""" +# import hoomd +# from custom_actions import ComputeDensity +# from hoomd import md + +# initial_state = job.fn('initial_state.gsd') + +# if ensemble == 'nvt': +# if thermostat == 'langevin': +# method = md.methods.Langevin( +# hoomd.filter.All(), kT=job.cached_statepoint['kT'] +# ) +# method.gamma.default = 1.0 +# elif thermostat == 'mttk': +# method = md.methods.ConstantVolume(filter=hoomd.filter.All()) +# method.thermostat = hoomd.md.methods.thermostats.MTTK( +# kT=job.cached_statepoint['kT'], tau=0.25 +# ) +# elif thermostat == 'bussi': +# method = md.methods.ConstantVolume(filter=hoomd.filter.All()) +# method.thermostat = hoomd.md.methods.thermostats.Bussi( +# kT=job.cached_statepoint['kT'] +# ) +# else: +# raise ValueError(f'Unsupported thermostat {thermostat}') +# elif ensemble == 'npt': +# p = job.cached_statepoint['pressure'] +# method = md.methods.ConstantPressure( +# hoomd.filter.All(), S=[p, p, p, 0, 0, 0], tauS=3, couple='xyz' +# ) +# if thermostat == 'bussi': +# method.thermostat = hoomd.md.methods.thermostats.Bussi( +# kT=job.cached_statepoint['kT'] +# ) +# else: +# raise ValueError(f'Unsupported thermostat {thermostat}') + +# sim_mode = f'{ensemble}_{thermostat}_md' + +# density_compute = ComputeDensity() +# sim = make_md_simulation( +# job, device, initial_state, method, sim_mode, extra_loggables=[density_compute] +# ) + +# # thermalize momenta +# sim.state.thermalize_particle_momenta( +# hoomd.filter.All(), job.cached_statepoint['kT'] +# ) + +# # thermalize the thermostat (if applicable) +# if ( +# isinstance(method, (md.methods.ConstantPressure, md.methods.ConstantVolume)) +# ) and hasattr(method.thermostat, 'thermalize_dof'): +# sim.run(0) +# method.thermostat.thermalize_dof() + +# # equilibrate +# device.notice('Equilibrating...') +# sim.run(EQUILIBRATE_STEPS) +# device.notice('Done.') + +# # run +# device.notice('Running...') +# sim.run(RUN_STEPS) + +# pathlib.Path(job.fn(complete_filename)).touch() +# device.notice('Done.') + + +# md_sampling_jobs = [] +# md_job_definitions = [ +# { +# 'ensemble': 'nvt', +# 'thermostat': 'langevin', +# 'device_name': 'cpu', +# 'ranks_per_partition': NUM_CPU_RANKS, +# 'aggregator': partition_jobs_cpu_mpi, +# }, +# { +# 'ensemble': 'nvt', +# 'thermostat': 'mttk', +# 'device_name': 'cpu', +# 'ranks_per_partition': NUM_CPU_RANKS, +# 'aggregator': partition_jobs_cpu_mpi, +# }, +# { +# 'ensemble': 'nvt', +# 'thermostat': 'bussi', +# 'device_name': 'cpu', +# 'ranks_per_partition': NUM_CPU_RANKS, +# 'aggregator': partition_jobs_cpu_mpi, +# }, +# { +# 'ensemble': 'npt', +# 'thermostat': 'bussi', +# 'device_name': 'cpu', +# 'ranks_per_partition': NUM_CPU_RANKS, +# 'aggregator': partition_jobs_cpu_mpi, +# }, +# ] + +# if CONFIG['enable_gpu']: +# md_job_definitions.extend( +# [ +# { +# 'ensemble': 'nvt', +# 'thermostat': 'langevin', +# 'device_name': 'gpu', +# 'ranks_per_partition': 1, +# 'aggregator': partition_jobs_gpu, +# }, +# { +# 'ensemble': 'nvt', +# 'thermostat': 'mttk', +# 'device_name': 'gpu', +# 'ranks_per_partition': 1, +# 'aggregator': partition_jobs_gpu, +# }, +# { +# 'ensemble': 'nvt', +# 'thermostat': 'bussi', +# 'device_name': 'gpu', +# 'ranks_per_partition': 1, +# 'aggregator': partition_jobs_gpu, +# }, +# { +# 'ensemble': 'npt', +# 'thermostat': 'bussi', +# 'device_name': 'gpu', +# 'ranks_per_partition': 1, +# 'aggregator': partition_jobs_gpu, +# }, +# ] +# ) + + +# def add_md_sampling_job( +# ensemble, thermostat, device_name, ranks_per_partition, aggregator +# ): +# """Add a MD sampling job to the workflow.""" +# sim_mode = f'{ensemble}_{thermostat}_md' + +# directives = dict( +# walltime=CONFIG['max_walltime'], +# executable=CONFIG['executable'], +# nranks=util.total_ranks_function(ranks_per_partition), +# ) + +# if device_name == 'gpu': +# directives['ngpu'] = util.total_ranks_function(ranks_per_partition) + +# @Project.pre.after(lj_fluid_create_initial_state) +# @Project.post.isfile(f'{sim_mode}_{device_name}_complete') +# @Project.operation( +# name=f'lj_fluid_{sim_mode}_{device_name}', +# directives=directives, +# aggregator=aggregator, +# ) +# def md_sampling_operation(*jobs): +# """Perform sampling simulation given the definition.""" +# import hoomd + +# communicator = hoomd.communicator.Communicator( +# ranks_per_partition=ranks_per_partition +# ) +# job = jobs[communicator.partition] + +# if communicator.rank == 0: +# print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) + +# if device_name == 'gpu': +# device_cls = hoomd.device.GPU +# elif device_name == 'cpu': +# device_cls = hoomd.device.CPU + +# device = device_cls( +# communicator=communicator, +# message_filename=util.get_message_filename( +# job, f'{sim_mode}_{device_name}.log' +# ), +# ) + +# run_md_sim( +# job, +# device, +# ensemble, +# thermostat, +# complete_filename=f'{sim_mode}_{device_name}_complete', +# ) + +# if communicator.rank == 0: +# print(f'completed lj_fluid_{sim_mode}_{device_name}: {job}') + +# md_sampling_jobs.append(md_sampling_operation) + + +# for definition in md_job_definitions: +# add_md_sampling_job(**definition) + +# ################################# +# # MC simulations +# ################################# + + +# def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): +# """Make an MC Simulation. + +# Args: +# job (`signac.job.Job`): Signac job object. +# device (`hoomd.device.Device`): Device object. +# initial_state (str): Path to the gsd file to be used as an initial state +# for the simulation. +# sim_mode (str): String defining the simulation mode. +# extra_loggables (list): List of extra loggables to log to gsd files. +# Patch energies are logged by default. +# """ +# import hoomd +# import numpy +# from custom_actions import ComputeDensity +# from hoomd import hpmc + +# if extra_loggables is None: +# extra_loggables = [] + +# # integrator +# mc = hpmc.integrate.Sphere(nselect=1) +# mc.shape['A'] = dict(diameter=0.0) + +# # pair potential +# epsilon = LJ_PARAMS['epsilon'] / job.cached_statepoint['kT'] # noqa F841 +# sigma = LJ_PARAMS['sigma'] +# r_on = job.cached_statepoint['r_on'] +# r_cut = job.cached_statepoint['r_cut'] + +# lennard_jones_mc = hoomd.hpmc.pair.LennardJones() +# lennard_jones_mc.params[('A', 'A')] = dict( +# epsilon=epsilon, sigma=sigma, r_cut=r_cut, r_on=r_on +# ) +# lennard_jones_mc.mode = 'xplor' +# mc.pair_potentials = [lennard_jones_mc] + +# # pair force to compute virial pressure +# nlist = hoomd.md.nlist.Cell(buffer=0.4) +# lj = hoomd.md.pair.LJ( +# default_r_cut=job.cached_statepoint['r_cut'], +# default_r_on=job.cached_statepoint['r_on'], +# nlist=nlist, +# ) +# lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) +# lj.mode = 'xplor' + +# # compute the density +# compute_density = ComputeDensity() + +# logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) +# logger.add(lennard_jones_mc, quantities=['energy']) +# logger.add(mc, quantities=['translate_moves']) +# logger.add(compute_density) +# for loggable in extra_loggables: +# logger.add(loggable) + +# # make simulation +# sim = util.make_simulation( +# job=job, +# device=device, +# initial_state=initial_state, +# integrator=mc, +# sim_mode=sim_mode, +# logger=logger, +# table_write_period=WRITE_PERIOD, +# trajectory_write_period=LOG_PERIOD['trajectory'], +# log_write_period=LOG_PERIOD['quantities'], +# log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, +# ) +# for loggable in extra_loggables: +# # call attach method explicitly so we can access simulation state when +# # computing the loggable quantity +# if hasattr(loggable, 'attach'): +# loggable.attach(sim) + +# compute_density.attach(sim) + +# def _compute_virial_pressure(): +# virials = numpy.sum(lj.virials, 0) +# w = 0 +# if virials is not None: +# w = virials[0] + virials[3] + virials[5] +# V = sim.state.box.volume +# return job.cached_statepoint['num_particles'] * job.cached_statepoint[ +# 'kT' +# ] / V + w / (3 * V) + +# logger[('custom', 'virial_pressure')] = (_compute_virial_pressure, 'scalar') + +# # move size tuner +# mstuner = hpmc.tune.MoveSize.scale_solver( +# moves=['d'], +# target=0.2, +# max_translation_move=0.5, +# trigger=hoomd.trigger.And( +# [ +# hoomd.trigger.Periodic(100), +# hoomd.trigger.Before(RANDOMIZE_STEPS | EQUILIBRATE_STEPS // 2), +# ] +# ), +# ) +# sim.operations.add(mstuner) +# sim.operations.computes.append(lj) + +# return sim + + +# def run_nvt_mc_sim(job, device, complete_filename): +# """Run MC sim in NVT.""" +# import hoomd + +# # simulation +# sim_mode = 'nvt_mc' +# restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') +# if job.isfile(restart_filename): +# initial_state = job.fn(restart_filename) +# restart = True +# else: +# initial_state = job.fn('initial_state.gsd') +# restart = False + +# sim = make_mc_simulation(job, device, initial_state, sim_mode) + +# if not restart: +# # equilibrate +# device.notice('Equilibrating...') +# sim.run(EQUILIBRATE_STEPS // 2) +# sim.run(EQUILIBRATE_STEPS // 2) +# device.notice('Done.') + +# # Print acceptance ratio as measured during the 2nd half of the +# # equilibration. +# translate_moves = sim.operations.integrator.translate_moves +# translate_acceptance = translate_moves[0] / sum(translate_moves) +# device.notice(f'Translate move acceptance: {translate_acceptance}') +# device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') + +# # save move size to a file +# if device.communicator.rank == 0: +# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') +# with open(job.fn(name), 'w') as f: +# json.dump(dict(d_A=sim.operations.integrator.d['A']), f) +# else: +# device.notice('Restarting...') +# # read move size from the file +# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') +# with open(job.fn(name)) as f: +# data = json.load(f) + +# sim.operations.integrator.d['A'] = data['d_A'] +# device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') + +# # run +# device.notice('Running...') +# util.run_up_to_walltime( +# sim=sim, +# end_step=TOTAL_STEPS, +# steps=RESTART_STEPS, +# walltime_stop=WALLTIME_STOP_SECONDS, +# ) + +# hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') + +# if sim.timestep == TOTAL_STEPS: +# pathlib.Path(job.fn(complete_filename)).touch() +# device.notice('Done.') +# else: +# device.notice( +# 'Ending run early due to walltime limits at:' +# f'{device.communicator.walltime}' +# ) + + +# def run_npt_mc_sim(job, device, complete_filename): +# """Run MC sim in NPT.""" +# import hoomd +# from hoomd import hpmc + +# # device +# sim_mode = 'npt_mc' +# restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') +# if job.isfile(restart_filename): +# initial_state = job.fn(restart_filename) +# restart = True +# else: +# initial_state = job.fn('initial_state.gsd') +# restart = False + +# # box updates +# boxmc = hpmc.update.BoxMC( +# betaP=job.cached_statepoint['pressure'] / job.cached_statepoint['kT'], +# trigger=hoomd.trigger.Periodic(1), +# ) +# boxmc.volume = dict(weight=1.0, mode='ln', delta=0.01) + +# # simulation +# sim = make_mc_simulation( +# job, device, initial_state, sim_mode, extra_loggables=[boxmc] +# ) + +# sim.operations.add(boxmc) + +# boxmc_tuner = hpmc.tune.BoxMCMoveSize.scale_solver( +# trigger=hoomd.trigger.And( +# [ +# hoomd.trigger.Periodic(400), +# hoomd.trigger.Before(RANDOMIZE_STEPS + EQUILIBRATE_STEPS // 2), +# ] +# ), +# boxmc=boxmc, +# moves=['volume'], +# target=0.5, +# ) +# sim.operations.add(boxmc_tuner) + +# if not restart: +# # equilibrate +# device.notice('Equilibrating...') +# sim.run(EQUILIBRATE_STEPS // 2) +# sim.run(EQUILIBRATE_STEPS // 2) +# device.notice('Done.') + +# # Print acceptance ratio as measured during the 2nd half of the +# # equilibration. +# translate_moves = sim.operations.integrator.translate_moves +# translate_acceptance = translate_moves[0] / sum(translate_moves) +# device.notice(f'Translate move acceptance: {translate_acceptance}') +# device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') + +# volume_moves = boxmc.volume_moves +# volume_acceptance = volume_moves[0] / sum(volume_moves) +# device.notice(f'Volume move acceptance: {volume_acceptance}') +# device.notice(f'Volume move size: {boxmc.volume["delta"]}') + +# # save move sizes to a file +# if device.communicator.rank == 0: +# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') +# with open(job.fn(name), 'w') as f: +# json.dump( +# dict( +# d_A=sim.operations.integrator.d['A'], +# volume_delta=boxmc.volume['delta'], +# ), +# f, +# ) +# else: +# device.notice('Restarting...') +# # read move size from the file +# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') +# with open(job.fn(name)) as f: +# data = json.load(f) + +# sim.operations.integrator.d['A'] = data['d_A'] +# device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') +# boxmc.volume = dict(weight=1.0, mode='ln', delta=data['volume_delta']) +# device.notice(f'Restored volume move size: {boxmc.volume["delta"]}') + +# # run +# device.notice('Running...') +# util.run_up_to_walltime( +# sim=sim, +# end_step=TOTAL_STEPS, +# steps=RESTART_STEPS, +# walltime_stop=WALLTIME_STOP_SECONDS, +# ) + +# hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') + +# if sim.timestep == TOTAL_STEPS: +# pathlib.Path(job.fn(complete_filename)).touch() +# device.notice('Done.') +# else: +# device.notice( +# 'Ending run early due to walltime limits at:' +# f'{device.communicator.walltime}' +# ) + + +# mc_sampling_jobs = [] +# mc_job_definitions = [ +# { +# 'mode': 'nvt', +# 'device_name': 'cpu', +# 'ranks_per_partition': NUM_CPU_RANKS, +# 'aggregator': partition_jobs_cpu_mpi, +# }, +# { +# 'mode': 'npt', +# 'device_name': 'cpu', +# 'ranks_per_partition': NUM_CPU_RANKS, +# 'aggregator': partition_jobs_cpu_mpi, +# }, +# ] + + +# def add_mc_sampling_job(mode, device_name, ranks_per_partition, aggregator): +# """Add a MC sampling job to the workflow.""" +# directives = dict( +# walltime=CONFIG['max_walltime'], +# executable=CONFIG['executable'], +# nranks=util.total_ranks_function(ranks_per_partition), +# ) + +# if device_name == 'gpu': +# directives['ngpu'] = util.total_ranks_function(ranks_per_partition) + +# @Project.pre.after(lj_fluid_create_initial_state) +# @Project.post.isfile(f'{mode}_mc_{device_name}_complete') +# @Project.operation( +# name=f'lj_fluid_{mode}_mc_{device_name}', +# directives=directives, +# aggregator=aggregator, +# ) +# def sampling_operation(*jobs): +# """Perform sampling simulation given the definition.""" +# import hoomd + +# communicator = hoomd.communicator.Communicator( +# ranks_per_partition=ranks_per_partition +# ) +# job = jobs[communicator.partition] + +# if communicator.rank == 0: +# print(f'starting lj_fluid_{mode}_mc_{device_name}:', job) + +# if device_name == 'gpu': +# device_cls = hoomd.device.GPU +# elif device_name == 'cpu': +# device_cls = hoomd.device.CPU + +# device = device_cls( +# communicator=communicator, +# message_filename=util.get_message_filename( +# job, f'{mode}_mc_{device_name}.log' +# ), +# ) + +# globals().get(f'run_{mode}_mc_sim')( +# job, device, complete_filename=f'{mode}_mc_{device_name}_complete' +# ) + +# if communicator.rank == 0: +# print(f'completed lj_fluid_{mode}_mc_{device_name}: {job}') + +# mc_sampling_jobs.append(sampling_operation) + + +# for definition in mc_job_definitions: +# add_mc_sampling_job(**definition) + + +# @Project.pre(is_lj_fluid) +# @Project.pre.after(*md_sampling_jobs) +# @Project.pre.after(*mc_sampling_jobs) +# @Project.post.true('lj_fluid_analysis_complete') +# @Project.operation( +# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) +# ) +# def lj_fluid_analyze(job): +# """Analyze the output of all simulation modes.""" +# import math + +# import matplotlib +# import matplotlib.figure +# import matplotlib.style +# import numpy + +# matplotlib.style.use('fivethirtyeight') + +# print('starting lj_fluid_analyze:', job) + +# sim_modes = [ +# 'nvt_langevin_md_cpu', +# 'nvt_mttk_md_cpu', +# 'nvt_bussi_md_cpu', +# 'npt_bussi_md_cpu', +# ] + +# if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): +# sim_modes.extend( +# [ +# 'nvt_langevin_md_gpu', +# 'nvt_mttk_md_gpu', +# 'nvt_bussi_md_gpu', +# 'npt_bussi_md_gpu', +# ] +# ) + +# if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): +# sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + +# util._sort_sim_modes(sim_modes) + +# timesteps = {} +# energies = {} +# pressures = {} +# densities = {} +# linear_momentum = {} + +# for sim_mode in sim_modes: +# log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + +# timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + +# if 'md' in sim_mode: +# energies[sim_mode] = log_traj[ +# 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' +# ] +# else: +# energies[sim_mode] = ( +# log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] +# * job.cached_statepoint['kT'] +# ) + +# energies[sim_mode] /= job.cached_statepoint['num_particles'] + +# if 'md' in sim_mode: +# pressures[sim_mode] = log_traj[ +# 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' +# ] +# else: +# pressures[sim_mode] = log_traj['hoomd-data/custom/virial_pressure'] + +# densities[sim_mode] = log_traj[ +# 'hoomd-data/custom_actions/ComputeDensity/density' +# ] + +# if 'md' in sim_mode and 'langevin' not in sim_mode: +# momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] +# linear_momentum[sim_mode] = [ +# math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector +# ] +# else: +# linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) + +# # save averages +# for mode in sim_modes: +# job.document[mode] = dict( +# pressure=float(numpy.mean(pressures[mode])), +# potential_energy=float(numpy.mean(energies[mode])), +# density=float(numpy.mean(densities[mode])), +# ) + +# # Plot results +# fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') +# ax = fig.add_subplot(2, 2, 1) +# util.plot_timeseries( +# ax=ax, +# timesteps=timesteps, +# data=densities, +# ylabel=r'$\rho$', +# expected=job.cached_statepoint['density'], +# max_points=500, +# ) +# ax.legend() + +# ax = fig.add_subplot(2, 2, 2) +# util.plot_timeseries( +# ax=ax, +# timesteps=timesteps, +# data=pressures, +# ylabel=r'$P$', +# expected=job.cached_statepoint['pressure'], +# max_points=500, +# ) + +# ax = fig.add_subplot(2, 2, 3) +# util.plot_timeseries( +# ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 +# ) + +# ax = fig.add_subplot(2, 2, 4) +# util.plot_timeseries( +# ax=ax, +# timesteps=timesteps, +# data={ +# mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] +# for mode, lm in linear_momentum.items() +# }, +# ylabel=r'$|\vec{p}| / N$', +# max_points=500, +# ) + +# fig.suptitle( +# f'$kT={job.cached_statepoint["kT"]}$, ' +# f'$\\rho={job.cached_statepoint["density"]}$, ' +# f'$N={job.cached_statepoint["num_particles"]}$, ' +# f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' +# f'replicate={job.cached_statepoint["replicate_idx"]}' +# ) +# fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + +# job.document['lj_fluid_analysis_complete'] = True + + +# analysis_aggregator = aggregator.groupby( +# key=['kT', 'density', 'num_particles', 'r_cut'], +# sort_by='replicate_idx', +# select=is_lj_fluid, +# ) + + +# @Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_fluid_analysis_complete')) +# @Project.post(lambda *jobs: util.true_all(*jobs, key='lj_fluid_compare_modes_complete')) +# @Project.operation( +# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), +# aggregator=analysis_aggregator, +# ) +# def lj_fluid_compare_modes(*jobs): +# """Compares the tested simulation modes.""" +# import matplotlib +# import matplotlib.figure +# import matplotlib.style +# import numpy + +# matplotlib.style.use('fivethirtyeight') + +# print('starting lj_fluid_compare_modes:', jobs[0]) + +# sim_modes = [ +# 'nvt_langevin_md_cpu', +# 'nvt_mttk_md_cpu', +# 'nvt_bussi_md_cpu', +# 'npt_bussi_md_cpu', +# ] + +# if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): +# sim_modes.extend( +# [ +# 'nvt_langevin_md_gpu', +# 'nvt_mttk_md_gpu', +# 'nvt_bussi_md_gpu', +# 'npt_bussi_md_gpu', +# ] +# ) + +# if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): +# sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + +# util._sort_sim_modes(sim_modes) + +# quantity_names = ['density', 'pressure', 'potential_energy'] +# labels = { +# 'density': r'$\frac{\rho_\mathrm{sample} - \rho}{\rho} \cdot 1000$', +# 'pressure': r'$\frac{P_\mathrm{sample} - P}{P} \cdot 1000$', +# 'potential_energy': r'$\frac{U_\mathrm{sample} - }{} \cdot 1000$', +# } + +# # grab the common statepoint parameters +# kT = jobs[0].sp.kT +# set_density = jobs[0].sp.density +# set_pressure = jobs[0].sp.pressure +# num_particles = jobs[0].sp.num_particles + +# quantity_reference = dict( +# density=set_density, pressure=set_pressure, potential_energy=None +# ) + +# fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 3), layout='tight') +# fig.suptitle( +# f'$kT={kT}$, $\\rho={set_density}$, ' +# f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' +# f'$N={num_particles}$' +# ) + +# for i, quantity_name in enumerate(quantity_names): +# ax = fig.add_subplot(3, 1, i + 1) + +# # organize data from jobs +# quantities = {mode: [] for mode in sim_modes} +# for jb in jobs: +# for mode in sim_modes: +# quantities[mode].append(getattr(getattr(jb.doc, mode), quantity_name)) + +# if quantity_reference[quantity_name] is not None: +# reference = quantity_reference[quantity_name] +# else: +# avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} +# reference = numpy.mean([avg_value[mode] for mode in sim_modes]) + +# avg_quantity, stderr_quantity = util.plot_vs_expected( +# ax=ax, +# values=quantities, +# ylabel=labels[quantity_name], +# expected=reference, +# relative_scale=1000, +# separate_nvt_npt=True, +# ) + +# if quantity_name == 'density': +# if 'npt_mc_cpu' in avg_quantity: +# print( +# f'Average npt_mc_cpu density {num_particles}:', +# avg_quantity['npt_mc_cpu'], +# '+/-', +# stderr_quantity['npt_mc_cpu'], +# ) +# print( +# f'Average npt_md_cpu density {num_particles}:', +# avg_quantity['npt_bussi_md_cpu'], +# '+/-', +# stderr_quantity['npt_bussi_md_cpu'], +# ) +# if quantity_name == 'pressure': +# if 'nvt_mc_cpu' in avg_quantity: +# print( +# f'Average nvt_mc_cpu pressure {num_particles}:', +# avg_quantity['nvt_mc_cpu'], +# '+/-', +# stderr_quantity['nvt_mc_cpu'], +# ) +# if 'npt_mc_cpu' in avg_quantity: +# print( +# f'Average npt_mc_cpu pressure {num_particles}:', +# avg_quantity['npt_mc_cpu'], +# '+/-', +# stderr_quantity['npt_mc_cpu'], +# ) + +# filename = ( +# f'lj_fluid_compare_kT{kT}_density{round(set_density, 2)}_' +# f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' +# f'N{num_particles}.svg' +# ) + +# fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') + +# for job in jobs: +# job.document['lj_fluid_compare_modes_complete'] = True + + +# @Project.pre.after(*md_sampling_jobs) +# @Project.post( +# lambda *jobs: util.true_all(*jobs, key='lj_fluid_distribution_analyze_complete') +# ) +# @Project.operation( +# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), +# aggregator=analysis_aggregator, +# ) +# def lj_fluid_distribution_analyze(*jobs): +# """Checks that MD follows the correct KE distribution.""" +# import matplotlib +# import matplotlib.figure +# import matplotlib.style +# import numpy +# import scipy + +# matplotlib.style.use('fivethirtyeight') + +# print('starting lj_fluid_distribution_analyze:', jobs[0]) + +# sim_modes = [ +# 'nvt_langevin_md_cpu', +# 'nvt_mttk_md_cpu', +# 'nvt_bussi_md_cpu', +# 'npt_bussi_md_cpu', +# ] + +# if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): +# sim_modes.extend( +# [ +# 'nvt_langevin_md_gpu', +# 'nvt_mttk_md_gpu', +# 'nvt_bussi_md_gpu', +# 'npt_bussi_md_gpu', +# ] +# ) + +# if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): +# sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + +# util._sort_sim_modes(sim_modes) + +# # grab the common statepoint parameters +# kT = jobs[0].sp.kT +# set_density = jobs[0].sp.density +# num_particles = jobs[0].sp.num_particles + +# fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') +# fig.suptitle( +# f'$kT={kT}$, $\\rho={set_density}$, ' +# f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' +# f'$N={num_particles}$' +# ) + +# ke_means_expected = collections.defaultdict(list) +# ke_sigmas_expected = collections.defaultdict(list) +# ke_samples = collections.defaultdict(list) +# potential_energy_samples = collections.defaultdict(list) +# density_samples = collections.defaultdict(list) +# pressure_samples = collections.defaultdict(list) + +# for job in jobs: +# for sim_mode in sim_modes: +# if sim_mode.startswith('nvt_langevin'): +# n_dof = num_particles * 3 +# else: +# n_dof = num_particles * 3 - 3 + +# print('Reading' + job.fn(sim_mode + '_quantities.h5')) +# log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + +# if 'md' in sim_mode: +# ke = log_traj[ +# 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' +# ] +# ke_means_expected[sim_mode].append(numpy.mean(ke) - 1 / 2 * n_dof * kT) +# ke_sigmas_expected[sim_mode].append( +# numpy.std(ke) - 1 / math.sqrt(2) * math.sqrt(n_dof) * kT +# ) + +# ke_samples[sim_mode].extend(ke) +# else: +# ke_samples[sim_mode].extend( +# [ +# 3 +# / 2 +# * job.cached_statepoint['num_particles'] +# * job.cached_statepoint['kT'] +# ] +# ) + +# if 'md' in sim_mode: +# potential_energy_samples[sim_mode].extend( +# list( +# log_traj[ +# 'hoomd-data/md/compute/ThermodynamicQuantities' +# '/potential_energy' +# ] +# ) +# ) +# else: +# potential_energy_samples[sim_mode].extend( +# list( +# log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] +# * job.cached_statepoint['kT'] +# ) +# ) + +# if 'md' in sim_mode: +# pressure_samples[sim_mode].extend( +# list( +# log_traj[ +# 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' +# ] +# ) +# ) +# else: +# pressure_samples[sim_mode].extend( +# list(log_traj['hoomd-data/custom/virial_pressure']) +# ) + +# density_samples[sim_mode].extend( +# list(log_traj['hoomd-data/custom_actions/ComputeDensity/density']) +# ) + +# ax = fig.add_subplot(2, 2, 1) +# util.plot_vs_expected(ax, ke_means_expected, '$ - 1/2 N_{dof} k T$') + +# ax = fig.add_subplot(2, 2, 2) +# # https://doi.org/10.1371/journal.pone.0202764 +# util.plot_vs_expected( +# ax, ke_sigmas_expected, r'$\Delta K - 1/\sqrt{2} \sqrt{N_{dof}} k T$' +# ) + +# ax = fig.add_subplot(2, 4, 5) +# rv = scipy.stats.gamma( +# 3 * job.cached_statepoint['num_particles'] / 2, +# scale=job.cached_statepoint['kT'], +# ) +# util.plot_distribution(ax, ke_samples, 'K', expected=rv.pdf) +# ax.legend(loc='upper right', fontsize='xx-small') + +# ax = fig.add_subplot(2, 4, 6) +# util.plot_distribution(ax, potential_energy_samples, 'U') + +# ax = fig.add_subplot(2, 4, 7) +# util.plot_distribution( +# ax, density_samples, r'$\rho$', expected=job.cached_statepoint['density'] +# ) + +# ax = fig.add_subplot(2, 4, 8) +# util.plot_distribution( +# ax, pressure_samples, 'P', expected=job.cached_statepoint['pressure'] +# ) + +# filename = ( +# f'lj_fluid_distribution_analyze_kT{kT}' +# f'_density{round(set_density, 2)}_' +# f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' +# f'N{num_particles}.svg' +# ) +# fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') + +# for job in jobs: +# job.document['lj_fluid_distribution_analyze_complete'] = True + + +# ################################# +# # MD conservation simulations +# ################################# + + +# def run_nve_md_sim(job, device, run_length, complete_filename): +# """Run the MD simulation in NVE.""" +# import hoomd + +# sim_mode = 'nve_md' +# restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') +# is_restarting = job.isfile(restart_filename) + +# if is_restarting: +# initial_state = job.fn(restart_filename) +# else: +# initial_state = job.fn('initial_state.gsd') + +# nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) + +# sim = make_md_simulation( +# job, device, initial_state, nve, sim_mode, period_multiplier=200 +# ) + +# if not is_restarting: +# sim.state.thermalize_particle_momenta( +# hoomd.filter.All(), job.cached_statepoint['kT'] +# ) + +# # Run for a long time to look for energy and momentum drift +# device.notice('Running...') + +# util.run_up_to_walltime( +# sim=sim, +# end_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length, +# steps=500_000, +# walltime_stop=WALLTIME_STOP_SECONDS, +# ) + +# if sim.timestep == RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length: +# pathlib.Path(job.fn(complete_filename)).touch() +# device.notice('Done.') +# else: +# device.notice( +# 'Ending run early due to walltime limits at:' +# f'{device.communicator.walltime}' +# ) + +# hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') + + +# def is_lj_fluid_nve(job): +# """Test if a given job should be run for NVE conservation.""" +# return ( +# job.cached_statepoint['subproject'] == 'lj_fluid' +# and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS +# ) + + +# partition_jobs_cpu_mpi_nve = aggregator.groupsof( +# num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), +# sort_by=sort_key, +# select=is_lj_fluid_nve, +# ) + +# partition_jobs_gpu_nve = aggregator.groupsof( +# num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), +# sort_by=sort_key, +# select=is_lj_fluid_nve, +# ) + +# nve_md_sampling_jobs = [] +# nve_md_job_definitions = [ +# { +# 'device_name': 'cpu', +# 'ranks_per_partition': NUM_CPU_RANKS, +# 'aggregator': partition_jobs_cpu_mpi_nve, +# 'run_length': 10_000_000, +# }, +# ] + +# if CONFIG['enable_gpu']: +# nve_md_job_definitions.extend( +# [ +# { +# 'device_name': 'gpu', +# 'ranks_per_partition': 1, +# 'aggregator': partition_jobs_gpu_nve, +# 'run_length': 100_000_000, +# }, +# ] +# ) + + +# def add_nve_md_job(device_name, ranks_per_partition, aggregator, run_length): +# """Add a MD NVE conservation job to the workflow.""" +# sim_mode = 'nve_md' + +# directives = dict( +# walltime=CONFIG['max_walltime'], +# executable=CONFIG['executable'], +# nranks=util.total_ranks_function(ranks_per_partition), +# ) + +# if device_name == 'gpu': +# directives['ngpu'] = util.total_ranks_function(ranks_per_partition) + +# @Project.pre.after(lj_fluid_create_initial_state) +# @Project.post.isfile(f'{sim_mode}_{device_name}_complete') +# @Project.operation( +# name=f'lj_fluid_{sim_mode}_{device_name}', +# directives=directives, +# aggregator=aggregator, +# ) +# def lj_fluid_nve_md_job(*jobs): +# """Run NVE MD.""" +# import hoomd + +# communicator = hoomd.communicator.Communicator( +# ranks_per_partition=ranks_per_partition +# ) +# job = jobs[communicator.partition] + +# if communicator.rank == 0: +# print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) + +# if device_name == 'gpu': +# device_cls = hoomd.device.GPU +# elif device_name == 'cpu': +# device_cls = hoomd.device.CPU + +# device = device_cls( +# communicator=communicator, +# message_filename=util.get_message_filename( +# job, f'{sim_mode}_{device_name}.log' +# ), +# ) +# run_nve_md_sim( +# job, +# device, +# run_length=run_length, +# complete_filename=f'{sim_mode}_{device_name}_complete', +# ) + +# if communicator.rank == 0: +# print(f'completed lj_fluid_{sim_mode}_{device_name} {job}') + +# nve_md_sampling_jobs.append(lj_fluid_nve_md_job) + + +# for definition in nve_md_job_definitions: +# add_nve_md_job(**definition) + +# nve_analysis_aggregator = aggregator.groupby( +# key=['kT', 'density', 'num_particles', 'r_cut'], +# sort_by='replicate_idx', +# select=is_lj_fluid_nve, +# ) + + +# @Project.pre.after(*nve_md_sampling_jobs) +# @Project.post( +# lambda *jobs: util.true_all(*jobs, key='lj_fluid_conservation_analysis_complete') +# ) +# @Project.operation( +# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), +# aggregator=nve_analysis_aggregator, +# ) +# def lj_fluid_conservation_analyze(*jobs): +# """Analyze the output of NVE simulations and inspect conservation.""" +# import math + +# import matplotlib +# import matplotlib.figure +# import matplotlib.style +# import numpy + +# matplotlib.style.use('fivethirtyeight') + +# print('starting lj_fluid_conservation_analyze:', jobs[0]) + +# sim_modes = ['nve_md_cpu'] +# if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): +# sim_modes.extend(['nve_md_gpu']) + +# timesteps = [] +# energies = [] +# linear_momenta = [] + +# for job in jobs: +# job_timesteps = {} +# job_energies = {} +# job_linear_momentum = {} + +# for sim_mode in sim_modes: +# log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + +# job_timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + +# job_energies[sim_mode] = ( +# log_traj[ +# 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' +# ] +# + log_traj[ +# 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' +# ] +# ) +# job_energies[sim_mode] = ( +# job_energies[sim_mode] - job_energies[sim_mode][0] +# ) / job.cached_statepoint['num_particles'] + +# momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] +# job_linear_momentum[sim_mode] = [ +# math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) +# / job.cached_statepoint['num_particles'] +# for v in momentum_vector +# ] + +# timesteps.append(job_timesteps) +# energies.append(job_energies) +# linear_momenta.append(job_linear_momentum) + +# # Plot results +# def plot(*, ax, data, quantity_name, legend=False): +# for i, job in enumerate(jobs): +# for mode in sim_modes: +# ax.plot( +# timesteps[i][mode], +# numpy.asarray(data[i][mode]), +# label=f'{mode}_{job.cached_statepoint["replicate_idx"]}', +# ) +# ax.set_xlabel('time step') +# ax.set_ylabel(quantity_name) + +# if legend: +# ax.legend() + +# fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.68 * 2), layout='tight') +# ax = fig.add_subplot(2, 1, 1) +# plot(ax=ax, data=energies, quantity_name=r'$E / N$', legend=True) + +# ax = fig.add_subplot(2, 1, 2) +# plot(ax=ax, data=linear_momenta, quantity_name=r'$\left| \vec{p} \right| / N$') + +# fig.suptitle( +# 'LJ conservation tests: ' +# f'$kT={job.cached_statepoint["kT"]}$, ' +# f'$\\rho={job.cached_statepoint["density"]}$, ' +# f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' +# f'$N={job.cached_statepoint["num_particles"]}$' +# ) +# filename = ( +# f'lj_fluid_conservation_kT{job.cached_statepoint["kT"]}_' +# f'density{round(job.cached_statepoint["density"], 2)}_' +# f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' +# f'N{job.cached_statepoint["num_particles"]}.svg' +# ) + +# fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') + +# for job in jobs: +# job.document['lj_fluid_conservation_analysis_complete'] = True diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 4e509688..3c62c36c 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -8,7 +8,7 @@ import config # import hard_disk # import hard_sphere -# import lj_fluid +import lj_fluid # import lj_union # import patchy_particle_pressure # import simple_polygon @@ -18,7 +18,7 @@ all_subprojects = [ # 'alj_2d', - # lj_fluid, + lj_fluid, # 'lj_union', # 'hard_disk', # 'hard_sphere', diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py index 4b857016..038ef137 100644 --- a/hoomd_validation/workflow.py +++ b/hoomd_validation/workflow.py @@ -13,6 +13,7 @@ import argparse from pathlib import Path +import signac import rtoml @@ -54,7 +55,7 @@ class Workflow: @classmethod def add_action(cls, name, action): """Add an action. - + Args: name(str): The action's name. Must be unique. action(Action): The action itself. @@ -83,16 +84,16 @@ def write_workflow(cls, entrypoint, path=None, default=None): """ workflow = {'workspace': {'path': 'workspace', 'value_file': 'signac_statepoint.json'}} - workflow['default'] = {'action': {'command': f'python -u {entrypoint} $ACTION_NAME {{directories}}'}} + workflow['default'] = {'action': {'command': f'python -u {entrypoint} action $ACTION_NAME {{directories}}'}} if default is not None: workflow['default'].update(default) - workflow['actions'] = [] + workflow['action'] = [] for name, action_item in cls._actions.items(): action = {'name': name} action.update(action_item._configuration) - workflow['actions'].append(action) + workflow['action'].append(action) if path is None: path = Path('.') @@ -125,6 +126,10 @@ def main(cls, init = None, init_args = None, **kwargs): for arg in init_args: init_parser.add_argument(arg) + action_parser = command.add_parser('action') + action_parser.add_argument('action') + action_parser.add_argument('directories', nargs='+') + args = parser.parse_args() if args.command == 'init': @@ -132,6 +137,11 @@ def main(cls, init = None, init_args = None, **kwargs): init(args) cls.write_workflow(**kwargs) + elif args.command == 'action': + project = signac.get_project() + jobs = [project.open_job(id=directory) for directory in args.directories] + cls._actions[args.action](*jobs) + else: message = f'Invalid command: {args.command}' raise RuntimeError(message) From 82b8487ac574d83db67c6dff0205d26240432268 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 31 Jul 2024 16:05:19 -0400 Subject: [PATCH 03/34] Initial MD simulations working. --- hoomd_validation/lj_fluid.py | 503 +++++++++++++++++------------------ hoomd_validation/util.py | 84 ++++-- hoomd_validation/workflow.py | 2 +- 3 files changed, 295 insertions(+), 294 deletions(-) diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 115424b6..51da0a66 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -18,6 +18,7 @@ from flow import aggregator from workflow_class import ValidationWorkflow from workflow import Action +from custom_actions import ComputeDensity # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -77,9 +78,10 @@ def job_statepoints(): _group = {'sort_by': ["/density", "/num_particles"], 'include': [{'condition': ["/subproject", "==", __name__]}]} -_resources_cpu = {'processes': {'per_directory': NUM_CPU_RANKS}} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} _group_cpu = _group | {'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS)} -_resources_gpu = {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} _group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} @@ -150,294 +152,265 @@ def create_initial_state(*jobs): ################################# -# def make_md_simulation( -# job, -# device, -# initial_state, -# method, -# sim_mode, -# extra_loggables=None, -# period_multiplier=1, -# ): -# """Make an MD simulation. +def make_md_simulation( + job, + device, + initial_state, + method, + sim_mode, + extra_loggables=None, + period_multiplier=1, +): + """Make an MD simulation. -# Args: -# job (`signac.job.Job`): Signac job object. - -# device (`hoomd.device.Device`): hoomd device object. - -# initial_state (str): Path to the gsd file to be used as an initial state -# for the simulation. + Args: + job (`signac.job.Job`): Signac job object. -# method (`hoomd.md.methods.Method`): hoomd integration method. + device (`hoomd.device.Device`): hoomd device object. -# sim_mode (str): String identifying the simulation mode. - -# extra_loggables (list): List of quantities to add to the gsd logger. - -# ThermodynamicQuantities is added by default, any more quantities should -# be in this list. - -# period_multiplier (int): Factor to multiply the GSD file periods by. -# """ -# import hoomd -# from hoomd import md + initial_state (str): Path to the gsd file to be used as an initial state + for the simulation. -# # pair force -# if extra_loggables is None: -# extra_loggables = [] -# nlist = md.nlist.Cell(buffer=0.4) -# lj = md.pair.LJ( -# default_r_cut=job.cached_statepoint['r_cut'], -# default_r_on=job.cached_statepoint['r_on'], -# nlist=nlist, -# ) -# lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) -# lj.mode = 'xplor' + method (`hoomd.md.methods.Method`): hoomd integration method. -# # integrator -# integrator = md.Integrator(dt=0.001, methods=[method], forces=[lj]) + sim_mode (str): String identifying the simulation mode. -# # compute thermo -# thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) + extra_loggables (list): List of quantities to add to the gsd logger. -# # add gsd log quantities -# logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) -# logger.add( -# thermo, -# quantities=[ -# 'pressure', -# 'potential_energy', -# 'kinetic_temperature', -# 'kinetic_energy', -# ], -# ) -# logger.add(integrator, quantities=['linear_momentum']) -# for loggable in extra_loggables: -# logger.add(loggable) - -# # simulation -# sim = util.make_simulation( -# job=job, -# device=device, -# initial_state=initial_state, -# integrator=integrator, -# sim_mode=sim_mode, -# logger=logger, -# table_write_period=WRITE_PERIOD, -# trajectory_write_period=LOG_PERIOD['trajectory'] * period_multiplier, -# log_write_period=LOG_PERIOD['quantities'] * period_multiplier, -# log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, -# ) -# sim.operations.add(thermo) -# for loggable in extra_loggables: -# # call attach explicitly so we can access sim state when computing the -# # loggable quantity -# if hasattr(loggable, 'attach'): -# loggable.attach(sim) - -# return sim + ThermodynamicQuantities is added by default, any more quantities should + be in this list. + period_multiplier (int): Factor to multiply the GSD file periods by. + """ + # pair force + if extra_loggables is None: + extra_loggables = [] + nlist = hoomd.md.nlist.Cell(buffer=0.4) + lj = hoomd.md.pair.LJ( + default_r_cut=job.cached_statepoint['r_cut'], + default_r_on=job.cached_statepoint['r_on'], + nlist=nlist, + ) + lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) + lj.mode = 'xplor' + + # integrator + integrator = hoomd.md.Integrator(dt=0.001, methods=[method], forces=[lj]) + + # compute thermo + thermo = hoomd.md.compute.ThermodynamicQuantities(hoomd.filter.All()) + + # add gsd log quantities + logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) + logger.add( + thermo, + quantities=[ + 'pressure', + 'potential_energy', + 'kinetic_temperature', + 'kinetic_energy', + ], + ) + logger.add(integrator, quantities=['linear_momentum']) + for loggable in extra_loggables: + logger.add(loggable) + + # simulation + sim = util.make_simulation( + job=job, + device=device, + initial_state=initial_state, + integrator=integrator, + sim_mode=sim_mode, + logger=logger, + table_write_period=WRITE_PERIOD, + trajectory_write_period=LOG_PERIOD['trajectory'] * period_multiplier, + log_write_period=LOG_PERIOD['quantities'] * period_multiplier, + log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, + ) + sim.operations.add(thermo) + for loggable in extra_loggables: + # call attach explicitly so we can access sim state when computing the + # loggable quantity + if hasattr(loggable, 'attach'): + loggable.attach(sim) -# def run_md_sim(job, device, ensemble, thermostat, complete_filename): -# """Run the MD simulation with the given ensemble and thermostat.""" -# import hoomd -# from custom_actions import ComputeDensity -# from hoomd import md - -# initial_state = job.fn('initial_state.gsd') - -# if ensemble == 'nvt': -# if thermostat == 'langevin': -# method = md.methods.Langevin( -# hoomd.filter.All(), kT=job.cached_statepoint['kT'] -# ) -# method.gamma.default = 1.0 -# elif thermostat == 'mttk': -# method = md.methods.ConstantVolume(filter=hoomd.filter.All()) -# method.thermostat = hoomd.md.methods.thermostats.MTTK( -# kT=job.cached_statepoint['kT'], tau=0.25 -# ) -# elif thermostat == 'bussi': -# method = md.methods.ConstantVolume(filter=hoomd.filter.All()) -# method.thermostat = hoomd.md.methods.thermostats.Bussi( -# kT=job.cached_statepoint['kT'] -# ) -# else: -# raise ValueError(f'Unsupported thermostat {thermostat}') -# elif ensemble == 'npt': -# p = job.cached_statepoint['pressure'] -# method = md.methods.ConstantPressure( -# hoomd.filter.All(), S=[p, p, p, 0, 0, 0], tauS=3, couple='xyz' -# ) -# if thermostat == 'bussi': -# method.thermostat = hoomd.md.methods.thermostats.Bussi( -# kT=job.cached_statepoint['kT'] -# ) -# else: -# raise ValueError(f'Unsupported thermostat {thermostat}') - -# sim_mode = f'{ensemble}_{thermostat}_md' - -# density_compute = ComputeDensity() -# sim = make_md_simulation( -# job, device, initial_state, method, sim_mode, extra_loggables=[density_compute] -# ) - -# # thermalize momenta -# sim.state.thermalize_particle_momenta( -# hoomd.filter.All(), job.cached_statepoint['kT'] -# ) - -# # thermalize the thermostat (if applicable) -# if ( -# isinstance(method, (md.methods.ConstantPressure, md.methods.ConstantVolume)) -# ) and hasattr(method.thermostat, 'thermalize_dof'): -# sim.run(0) -# method.thermostat.thermalize_dof() - -# # equilibrate -# device.notice('Equilibrating...') -# sim.run(EQUILIBRATE_STEPS) -# device.notice('Done.') - -# # run -# device.notice('Running...') -# sim.run(RUN_STEPS) - -# pathlib.Path(job.fn(complete_filename)).touch() -# device.notice('Done.') - + return sim -# md_sampling_jobs = [] -# md_job_definitions = [ -# { -# 'ensemble': 'nvt', -# 'thermostat': 'langevin', -# 'device_name': 'cpu', -# 'ranks_per_partition': NUM_CPU_RANKS, -# 'aggregator': partition_jobs_cpu_mpi, -# }, -# { -# 'ensemble': 'nvt', -# 'thermostat': 'mttk', -# 'device_name': 'cpu', -# 'ranks_per_partition': NUM_CPU_RANKS, -# 'aggregator': partition_jobs_cpu_mpi, -# }, -# { -# 'ensemble': 'nvt', -# 'thermostat': 'bussi', -# 'device_name': 'cpu', -# 'ranks_per_partition': NUM_CPU_RANKS, -# 'aggregator': partition_jobs_cpu_mpi, -# }, -# { -# 'ensemble': 'npt', -# 'thermostat': 'bussi', -# 'device_name': 'cpu', -# 'ranks_per_partition': NUM_CPU_RANKS, -# 'aggregator': partition_jobs_cpu_mpi, -# }, -# ] -# if CONFIG['enable_gpu']: -# md_job_definitions.extend( -# [ -# { -# 'ensemble': 'nvt', -# 'thermostat': 'langevin', -# 'device_name': 'gpu', -# 'ranks_per_partition': 1, -# 'aggregator': partition_jobs_gpu, -# }, -# { -# 'ensemble': 'nvt', -# 'thermostat': 'mttk', -# 'device_name': 'gpu', -# 'ranks_per_partition': 1, -# 'aggregator': partition_jobs_gpu, -# }, -# { -# 'ensemble': 'nvt', -# 'thermostat': 'bussi', -# 'device_name': 'gpu', -# 'ranks_per_partition': 1, -# 'aggregator': partition_jobs_gpu, -# }, -# { -# 'ensemble': 'npt', -# 'thermostat': 'bussi', -# 'device_name': 'gpu', -# 'ranks_per_partition': 1, -# 'aggregator': partition_jobs_gpu, -# }, -# ] -# ) +def run_md_sim(job, device, ensemble, thermostat): + """Run the MD simulation with the given ensemble and thermostat.""" + initial_state = job.fn('initial_state.gsd') + if ensemble == 'nvt': + if thermostat == 'langevin': + method = hoomd.md.methods.Langevin( + hoomd.filter.All(), kT=job.cached_statepoint['kT'] + ) + method.gamma.default = 1.0 + elif thermostat == 'mttk': + method = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All()) + method.thermostat = hoomd.md.methods.thermostats.MTTK( + kT=job.cached_statepoint['kT'], tau=0.25 + ) + elif thermostat == 'bussi': + method = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All()) + method.thermostat = hoomd.md.methods.thermostats.Bussi( + kT=job.cached_statepoint['kT'] + ) + else: + raise ValueError(f'Unsupported thermostat {thermostat}') + elif ensemble == 'npt': + p = job.cached_statepoint['pressure'] + method = hoomd.md.methods.ConstantPressure( + hoomd.filter.All(), S=[p, p, p, 0, 0, 0], tauS=3, couple='xyz' + ) + if thermostat == 'bussi': + method.thermostat = hoomd.md.methods.thermostats.Bussi( + kT=job.cached_statepoint['kT'] + ) + else: + raise ValueError(f'Unsupported thermostat {thermostat}') -# def add_md_sampling_job( -# ensemble, thermostat, device_name, ranks_per_partition, aggregator -# ): -# """Add a MD sampling job to the workflow.""" -# sim_mode = f'{ensemble}_{thermostat}_md' + sim_mode = f'{ensemble}_{thermostat}_md' -# directives = dict( -# walltime=CONFIG['max_walltime'], -# executable=CONFIG['executable'], -# nranks=util.total_ranks_function(ranks_per_partition), -# ) + if util.is_simulation_complete(job, device, sim_mode): + return -# if device_name == 'gpu': -# directives['ngpu'] = util.total_ranks_function(ranks_per_partition) + density_compute = ComputeDensity() + sim = make_md_simulation( + job, device, initial_state, method, sim_mode, extra_loggables=[density_compute] + ) -# @Project.pre.after(lj_fluid_create_initial_state) -# @Project.post.isfile(f'{sim_mode}_{device_name}_complete') -# @Project.operation( -# name=f'lj_fluid_{sim_mode}_{device_name}', -# directives=directives, -# aggregator=aggregator, -# ) -# def md_sampling_operation(*jobs): -# """Perform sampling simulation given the definition.""" -# import hoomd + # thermalize momenta + sim.state.thermalize_particle_momenta( + hoomd.filter.All(), job.cached_statepoint['kT'] + ) -# communicator = hoomd.communicator.Communicator( -# ranks_per_partition=ranks_per_partition -# ) -# job = jobs[communicator.partition] + # thermalize the thermostat (if applicable) + if ( + isinstance(method, (hoomd.md.methods.ConstantPressure, hoomd.md.methods.ConstantVolume)) + ) and hasattr(method.thermostat, 'thermalize_dof'): + sim.run(0) + method.thermostat.thermalize_dof() -# if communicator.rank == 0: -# print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) + # equilibrate + device.notice('Equilibrating...') + sim.run(EQUILIBRATE_STEPS) + device.notice('Done.') -# if device_name == 'gpu': -# device_cls = hoomd.device.GPU -# elif device_name == 'cpu': -# device_cls = hoomd.device.CPU + # run + device.notice('Running...') + sim.run(RUN_STEPS) -# device = device_cls( -# communicator=communicator, -# message_filename=util.get_message_filename( -# job, f'{sim_mode}_{device_name}.log' -# ), -# ) + util.mark_simulation_complete(job, device, sim_mode) -# run_md_sim( -# job, -# device, -# ensemble, -# thermostat, -# complete_filename=f'{sim_mode}_{device_name}_complete', -# ) + device.notice('Done.') -# if communicator.rank == 0: -# print(f'completed lj_fluid_{sim_mode}_{device_name}: {job}') -# md_sampling_jobs.append(md_sampling_operation) +md_sampling_jobs = [] +md_job_definitions = [ + { + 'ensemble': 'nvt', + 'thermostat': 'langevin', + 'device_name': 'cpu', + }, + { + 'ensemble': 'nvt', + 'thermostat': 'mttk', + 'device_name': 'cpu', + }, + { + 'ensemble': 'nvt', + 'thermostat': 'bussi', + 'device_name': 'cpu', + }, + { + 'ensemble': 'npt', + 'thermostat': 'bussi', + 'device_name': 'cpu', + }, +] + +if CONFIG['enable_gpu']: + md_job_definitions.extend( + [ + { + 'ensemble': 'nvt', + 'thermostat': 'langevin', + 'device_name': 'gpu', + }, + { + 'ensemble': 'nvt', + 'thermostat': 'mttk', + 'device_name': 'gpu', + }, + { + 'ensemble': 'nvt', + 'thermostat': 'bussi', + 'device_name': 'gpu', + }, + { + 'ensemble': 'npt', + 'thermostat': 'bussi', + 'device_name': 'gpu', + }, + ] + ) -# for definition in md_job_definitions: -# add_md_sampling_job(**definition) +def add_md_sampling_job( + ensemble, thermostat, device_name +): + """Add a MD sampling job to the workflow.""" + sim_mode = f'{ensemble}_{thermostat}_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' + + def md_sampling_operation(*jobs): + """Perform sampling simulation given the definition.""" + communicator = hoomd.communicator.Communicator( + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) + ) + job = jobs[communicator.partition] + + if communicator.rank == 0: + print(f'starting {action_name}:', job) + + if device_name == 'gpu': + device_cls = hoomd.device.GPU + elif device_name == 'cpu': + device_cls = hoomd.device.CPU + + device = device_cls( + communicator=communicator, + message_filename=util.get_message_filename( + job, f'{sim_mode}_{device_name}.log' + ), + ) + + run_md_sim( + job, + device, + ensemble, + thermostat, + ) + + if communicator.rank == 0: + print(f'completed {action_name}: {job}') + + md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action(action_name, Action(method = md_sampling_operation, + configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'trajectory', 'h5')], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'] + })) + + +for definition in md_job_definitions: + add_md_sampling_job(**definition) # ################################# # # MC simulations diff --git a/hoomd_validation/util.py b/hoomd_validation/util.py index 14fbfb04..292bd4b2 100644 --- a/hoomd_validation/util.py +++ b/hoomd_validation/util.py @@ -4,27 +4,21 @@ """Helper functions for grabbing data and plotting.""" import os +import numpy +import h5py import signac - - -def true_all(*jobs, key): - """Check that a given key is true in all jobs.""" - return all(job.document.get(key, False) for job in jobs) - - -def total_ranks_function(ranks_per_job): - """Make a function that computes the number of ranks for an aggregate.""" - return lambda *jobs: ranks_per_job * len(jobs) +import hoomd def get_job_filename(sim_mode, device, name, file_type): """Construct a job filename.""" - import hoomd - - suffix = 'cpu' - if isinstance(device, hoomd.device.GPU): - suffix = 'gpu' + if isinstance(device, str): + suffix = device + else: + suffix = 'cpu' + if isinstance(device, hoomd.device.GPU): + suffix = 'gpu' return f'{sim_mode}_{suffix}_{name}.{file_type}' @@ -103,8 +97,6 @@ def make_simulation( trajectory_logger (`hoomd.logging.Logger`): Logger to add to trajectory writer. """ - import hoomd - sim = hoomd.Simulation(device) sim.seed = make_seed(job, sim_mode) sim.create_state_from_gsd(initial_state) @@ -123,7 +115,7 @@ def make_simulation( # write particle trajectory to a gsd file trajectory_writer = hoomd.write.GSD( - filename=job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd')), + filename=job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd.tmp')), trigger=hoomd.trigger.And( [ hoomd.trigger.Periodic(trajectory_write_period), @@ -139,7 +131,7 @@ def make_simulation( logger.add(sim, quantities=['timestep']) quantity_writer = hoomd.write.HDF5Log( - filename=job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5')), + filename=job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5.tmp')), trigger=hoomd.trigger.And( [ hoomd.trigger.Periodic(log_write_period), @@ -154,6 +146,51 @@ def make_simulation( return sim +def is_simulation_complete( + job, + device, + sim_mode, +): + """Check if a simulation is complete. + + Check if all output files are present. + + Args: + job (`signac.Job`): signac job object. + + device (`hoomd.device.Device`): hoomd device object. + + sim_mode (str): String defining the simulation mode. + """ + gsd_exists = job.isfile(get_job_filename(sim_mode, device, 'trajectory', 'gsd')) + h5_exists = job.isfile(get_job_filename(sim_mode, device, 'quantities', 'h5')) + + return gsd_exists and h5_exists + +def mark_simulation_complete( + job, + device, + sim_mode, +): + """Mark that simulation is complete. + + Moves .tmp files to the final filename. + + Args: + job (`signac.Job`): signac job object. + + device (`hoomd.device.Device`): hoomd device object. + + sim_mode (str): String defining the simulation mode. + """ + if device.communicator.rank == 0: + os.rename(job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd.tmp')), + job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd'))) + + os.rename(job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5.tmp')), + job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5'))) + + def make_seed(job, sim_mode=None): """Make a random number seed from a job. @@ -171,8 +208,6 @@ def plot_distribution( ax, data, independent_variable_label, expected=None, bins=100, plot_rotated=False ): """Plot distributions.""" - import numpy - max_density_histogram = 0 sim_modes = data.keys() @@ -241,8 +276,6 @@ def plot_vs_expected( ax, values, ylabel, expected=0, relative_scale=None, separate_nvt_npt=False ): """Plot values vs an expected value.""" - import numpy - sim_modes = values.keys() avg_value = {} @@ -309,8 +342,6 @@ def plot_vs_expected( def plot_timeseries(ax, timesteps, data, ylabel, expected=None, max_points=None): """Plot data as a time series.""" - import numpy - provided_modes = list(data.keys()) for mode in provided_modes: @@ -344,9 +375,6 @@ def _sort_sim_modes(sim_modes): def read_log(filename): """Read a HDF5 log as a dictionary of logged quantities.""" - import h5py - import numpy - with h5py.File(mode='r', name=filename) as f: keys = [] f.visit(lambda name: keys.append(name)) diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py index 038ef137..28883e52 100644 --- a/hoomd_validation/workflow.py +++ b/hoomd_validation/workflow.py @@ -99,7 +99,7 @@ def write_workflow(cls, entrypoint, path=None, default=None): path = Path('.') with open(path / 'workflow.toml', 'w', encoding='utf-8') as workflow_file: - rtoml.dump(workflow, workflow_file) + rtoml.dump(workflow, workflow_file, pretty=True) @classmethod def main(cls, init = None, init_args = None, **kwargs): From 547c6f5c02d9d55b97702fc79659e9645cf2a94a Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Fri, 2 Aug 2024 12:39:17 -0400 Subject: [PATCH 04/34] Working lj_fluid.analyze. --- hoomd_validation/lj_fluid.py | 884 +++++++++++++++++------------------ hoomd_validation/project.py | 7 +- 2 files changed, 441 insertions(+), 450 deletions(-) diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 51da0a66..fa542b39 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -10,6 +10,9 @@ import pathlib import itertools import numpy +import matplotlib +import matplotlib.figure +import matplotlib.style import hoomd @@ -23,8 +26,11 @@ # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. RANDOMIZE_STEPS = 20_000 -EQUILIBRATE_STEPS = 100_000 -RUN_STEPS = 500_000 +# TODO: revert +# EQUILIBRATE_STEPS = 100_000 +# RUN_STEPS = 500_000 +EQUILIBRATE_STEPS = 10_000 +RUN_STEPS = 50_000 RESTART_STEPS = RUN_STEPS // 10 TOTAL_STEPS = RANDOMIZE_STEPS + EQUILIBRATE_STEPS + RUN_STEPS @@ -401,7 +407,7 @@ def md_sampling_operation(*jobs): md_sampling_jobs.append(action_name) ValidationWorkflow.add_action(action_name, Action(method = md_sampling_operation, - configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'trajectory', 'h5')], + configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'quantities', 'h5')], 'launchers': ['mpi'], 'group': globals().get(f'_group_{device_name}'), 'resources': globals().get(f'_resources_{device_name}'), @@ -417,501 +423,485 @@ def md_sampling_operation(*jobs): # ################################# -# def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): -# """Make an MC Simulation. +def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): + """Make a MC Simulation. -# Args: -# job (`signac.job.Job`): Signac job object. -# device (`hoomd.device.Device`): Device object. -# initial_state (str): Path to the gsd file to be used as an initial state -# for the simulation. -# sim_mode (str): String defining the simulation mode. -# extra_loggables (list): List of extra loggables to log to gsd files. -# Patch energies are logged by default. -# """ -# import hoomd -# import numpy -# from custom_actions import ComputeDensity -# from hoomd import hpmc - -# if extra_loggables is None: -# extra_loggables = [] - -# # integrator -# mc = hpmc.integrate.Sphere(nselect=1) -# mc.shape['A'] = dict(diameter=0.0) - -# # pair potential -# epsilon = LJ_PARAMS['epsilon'] / job.cached_statepoint['kT'] # noqa F841 -# sigma = LJ_PARAMS['sigma'] -# r_on = job.cached_statepoint['r_on'] -# r_cut = job.cached_statepoint['r_cut'] - -# lennard_jones_mc = hoomd.hpmc.pair.LennardJones() -# lennard_jones_mc.params[('A', 'A')] = dict( -# epsilon=epsilon, sigma=sigma, r_cut=r_cut, r_on=r_on -# ) -# lennard_jones_mc.mode = 'xplor' -# mc.pair_potentials = [lennard_jones_mc] - -# # pair force to compute virial pressure -# nlist = hoomd.md.nlist.Cell(buffer=0.4) -# lj = hoomd.md.pair.LJ( -# default_r_cut=job.cached_statepoint['r_cut'], -# default_r_on=job.cached_statepoint['r_on'], -# nlist=nlist, -# ) -# lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) -# lj.mode = 'xplor' - -# # compute the density -# compute_density = ComputeDensity() - -# logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) -# logger.add(lennard_jones_mc, quantities=['energy']) -# logger.add(mc, quantities=['translate_moves']) -# logger.add(compute_density) -# for loggable in extra_loggables: -# logger.add(loggable) - -# # make simulation -# sim = util.make_simulation( -# job=job, -# device=device, -# initial_state=initial_state, -# integrator=mc, -# sim_mode=sim_mode, -# logger=logger, -# table_write_period=WRITE_PERIOD, -# trajectory_write_period=LOG_PERIOD['trajectory'], -# log_write_period=LOG_PERIOD['quantities'], -# log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, -# ) -# for loggable in extra_loggables: -# # call attach method explicitly so we can access simulation state when -# # computing the loggable quantity -# if hasattr(loggable, 'attach'): -# loggable.attach(sim) - -# compute_density.attach(sim) - -# def _compute_virial_pressure(): -# virials = numpy.sum(lj.virials, 0) -# w = 0 -# if virials is not None: -# w = virials[0] + virials[3] + virials[5] -# V = sim.state.box.volume -# return job.cached_statepoint['num_particles'] * job.cached_statepoint[ -# 'kT' -# ] / V + w / (3 * V) - -# logger[('custom', 'virial_pressure')] = (_compute_virial_pressure, 'scalar') - -# # move size tuner -# mstuner = hpmc.tune.MoveSize.scale_solver( -# moves=['d'], -# target=0.2, -# max_translation_move=0.5, -# trigger=hoomd.trigger.And( -# [ -# hoomd.trigger.Periodic(100), -# hoomd.trigger.Before(RANDOMIZE_STEPS | EQUILIBRATE_STEPS // 2), -# ] -# ), -# ) -# sim.operations.add(mstuner) -# sim.operations.computes.append(lj) - -# return sim - - -# def run_nvt_mc_sim(job, device, complete_filename): -# """Run MC sim in NVT.""" -# import hoomd - -# # simulation -# sim_mode = 'nvt_mc' -# restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') -# if job.isfile(restart_filename): -# initial_state = job.fn(restart_filename) -# restart = True -# else: -# initial_state = job.fn('initial_state.gsd') -# restart = False - -# sim = make_mc_simulation(job, device, initial_state, sim_mode) - -# if not restart: -# # equilibrate -# device.notice('Equilibrating...') -# sim.run(EQUILIBRATE_STEPS // 2) -# sim.run(EQUILIBRATE_STEPS // 2) -# device.notice('Done.') - -# # Print acceptance ratio as measured during the 2nd half of the -# # equilibration. -# translate_moves = sim.operations.integrator.translate_moves -# translate_acceptance = translate_moves[0] / sum(translate_moves) -# device.notice(f'Translate move acceptance: {translate_acceptance}') -# device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') - -# # save move size to a file -# if device.communicator.rank == 0: -# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') -# with open(job.fn(name), 'w') as f: -# json.dump(dict(d_A=sim.operations.integrator.d['A']), f) -# else: -# device.notice('Restarting...') -# # read move size from the file -# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') -# with open(job.fn(name)) as f: -# data = json.load(f) - -# sim.operations.integrator.d['A'] = data['d_A'] -# device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') - -# # run -# device.notice('Running...') -# util.run_up_to_walltime( -# sim=sim, -# end_step=TOTAL_STEPS, -# steps=RESTART_STEPS, -# walltime_stop=WALLTIME_STOP_SECONDS, -# ) - -# hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') - -# if sim.timestep == TOTAL_STEPS: -# pathlib.Path(job.fn(complete_filename)).touch() -# device.notice('Done.') -# else: -# device.notice( -# 'Ending run early due to walltime limits at:' -# f'{device.communicator.walltime}' -# ) + Args: + job (`signac.job.Job`): Signac job object. + device (`hoomd.device.Device`): Device object. + initial_state (str): Path to the gsd file to be used as an initial state + for the simulation. + sim_mode (str): String defining the simulation mode. + extra_loggables (list): List of extra loggables to log to gsd files. + Patch energies are logged by default. + """ + if extra_loggables is None: + extra_loggables = [] + # integrator + mc = hoomd.hpmc.integrate.Sphere(nselect=1) + mc.shape['A'] = dict(diameter=0.0) + + # pair potential + epsilon = LJ_PARAMS['epsilon'] / job.cached_statepoint['kT'] # noqa F841 + sigma = LJ_PARAMS['sigma'] + r_on = job.cached_statepoint['r_on'] + r_cut = job.cached_statepoint['r_cut'] + + lennard_jones_mc = hoomd.hpmc.pair.LennardJones() + lennard_jones_mc.params[('A', 'A')] = dict( + epsilon=epsilon, sigma=sigma, r_cut=r_cut, r_on=r_on + ) + lennard_jones_mc.mode = 'xplor' + mc.pair_potentials = [lennard_jones_mc] -# def run_npt_mc_sim(job, device, complete_filename): -# """Run MC sim in NPT.""" -# import hoomd -# from hoomd import hpmc + # pair force to compute virial pressure + nlist = hoomd.md.nlist.Cell(buffer=0.4) + lj = hoomd.md.pair.LJ( + default_r_cut=job.cached_statepoint['r_cut'], + default_r_on=job.cached_statepoint['r_on'], + nlist=nlist, + ) + lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) + lj.mode = 'xplor' -# # device -# sim_mode = 'npt_mc' -# restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') -# if job.isfile(restart_filename): -# initial_state = job.fn(restart_filename) -# restart = True -# else: -# initial_state = job.fn('initial_state.gsd') -# restart = False + # compute the density + compute_density = ComputeDensity() -# # box updates -# boxmc = hpmc.update.BoxMC( -# betaP=job.cached_statepoint['pressure'] / job.cached_statepoint['kT'], -# trigger=hoomd.trigger.Periodic(1), -# ) -# boxmc.volume = dict(weight=1.0, mode='ln', delta=0.01) + logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) + logger.add(lennard_jones_mc, quantities=['energy']) + logger.add(mc, quantities=['translate_moves']) + logger.add(compute_density) + for loggable in extra_loggables: + logger.add(loggable) -# # simulation -# sim = make_mc_simulation( -# job, device, initial_state, sim_mode, extra_loggables=[boxmc] -# ) + # make simulation + sim = util.make_simulation( + job=job, + device=device, + initial_state=initial_state, + integrator=mc, + sim_mode=sim_mode, + logger=logger, + table_write_period=WRITE_PERIOD, + trajectory_write_period=LOG_PERIOD['trajectory'], + log_write_period=LOG_PERIOD['quantities'], + log_start_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS, + ) + for loggable in extra_loggables: + # call attach method explicitly so we can access simulation state when + # computing the loggable quantity + if hasattr(loggable, 'attach'): + loggable.attach(sim) -# sim.operations.add(boxmc) + compute_density.attach(sim) + + def _compute_virial_pressure(): + virials = numpy.sum(lj.virials, 0) + w = 0 + if virials is not None: + w = virials[0] + virials[3] + virials[5] + V = sim.state.box.volume + return job.cached_statepoint['num_particles'] * job.cached_statepoint[ + 'kT' + ] / V + w / (3 * V) + + logger[('custom', 'virial_pressure')] = (_compute_virial_pressure, 'scalar') + + # move size tuner + mstuner = hoomd.hpmc.tune.MoveSize.scale_solver( + moves=['d'], + target=0.2, + max_translation_move=0.5, + trigger=hoomd.trigger.And( + [ + hoomd.trigger.Periodic(100), + hoomd.trigger.Before(RANDOMIZE_STEPS | EQUILIBRATE_STEPS // 2), + ] + ), + ) + sim.operations.add(mstuner) + sim.operations.computes.append(lj) -# boxmc_tuner = hpmc.tune.BoxMCMoveSize.scale_solver( -# trigger=hoomd.trigger.And( -# [ -# hoomd.trigger.Periodic(400), -# hoomd.trigger.Before(RANDOMIZE_STEPS + EQUILIBRATE_STEPS // 2), -# ] -# ), -# boxmc=boxmc, -# moves=['volume'], -# target=0.5, -# ) -# sim.operations.add(boxmc_tuner) + return sim -# if not restart: -# # equilibrate -# device.notice('Equilibrating...') -# sim.run(EQUILIBRATE_STEPS // 2) -# sim.run(EQUILIBRATE_STEPS // 2) -# device.notice('Done.') -# # Print acceptance ratio as measured during the 2nd half of the -# # equilibration. -# translate_moves = sim.operations.integrator.translate_moves -# translate_acceptance = translate_moves[0] / sum(translate_moves) -# device.notice(f'Translate move acceptance: {translate_acceptance}') -# device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') - -# volume_moves = boxmc.volume_moves -# volume_acceptance = volume_moves[0] / sum(volume_moves) -# device.notice(f'Volume move acceptance: {volume_acceptance}') -# device.notice(f'Volume move size: {boxmc.volume["delta"]}') - -# # save move sizes to a file -# if device.communicator.rank == 0: -# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') -# with open(job.fn(name), 'w') as f: -# json.dump( -# dict( -# d_A=sim.operations.integrator.d['A'], -# volume_delta=boxmc.volume['delta'], -# ), -# f, -# ) -# else: -# device.notice('Restarting...') -# # read move size from the file -# name = util.get_job_filename(sim_mode, device, 'move_size', 'json') -# with open(job.fn(name)) as f: -# data = json.load(f) - -# sim.operations.integrator.d['A'] = data['d_A'] -# device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') -# boxmc.volume = dict(weight=1.0, mode='ln', delta=data['volume_delta']) -# device.notice(f'Restored volume move size: {boxmc.volume["delta"]}') - -# # run -# device.notice('Running...') -# util.run_up_to_walltime( -# sim=sim, -# end_step=TOTAL_STEPS, -# steps=RESTART_STEPS, -# walltime_stop=WALLTIME_STOP_SECONDS, -# ) +def run_nvt_mc_sim(job, device): + """Run MC sim in NVT.""" + # simulation + sim_mode = 'nvt_mc' -# hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') + if util.is_simulation_complete(job, device, sim_mode): + return -# if sim.timestep == TOTAL_STEPS: -# pathlib.Path(job.fn(complete_filename)).touch() -# device.notice('Done.') -# else: -# device.notice( -# 'Ending run early due to walltime limits at:' -# f'{device.communicator.walltime}' -# ) + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') + if job.isfile(restart_filename): + initial_state = job.fn(restart_filename) + restart = True + else: + initial_state = job.fn('initial_state.gsd') + restart = False + + sim = make_mc_simulation(job, device, initial_state, sim_mode) + + if not restart: + # equilibrate + device.notice('Equilibrating...') + sim.run(EQUILIBRATE_STEPS // 2) + sim.run(EQUILIBRATE_STEPS // 2) + device.notice('Done.') + + # Print acceptance ratio as measured during the 2nd half of the + # equilibration. + translate_moves = sim.operations.integrator.translate_moves + translate_acceptance = translate_moves[0] / sum(translate_moves) + device.notice(f'Translate move acceptance: {translate_acceptance}') + device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') + + # save move size to a file + if device.communicator.rank == 0: + name = util.get_job_filename(sim_mode, device, 'move_size', 'json') + with open(job.fn(name), 'w') as f: + json.dump(dict(d_A=sim.operations.integrator.d['A']), f) + else: + device.notice('Restarting...') + # read move size from the file + name = util.get_job_filename(sim_mode, device, 'move_size', 'json') + with open(job.fn(name)) as f: + data = json.load(f) + + sim.operations.integrator.d['A'] = data['d_A'] + device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') + # run + device.notice('Running...') + util.run_up_to_walltime( + sim=sim, + end_step=TOTAL_STEPS, + steps=RESTART_STEPS, + walltime_stop=WALLTIME_STOP_SECONDS, + ) -# mc_sampling_jobs = [] -# mc_job_definitions = [ -# { -# 'mode': 'nvt', -# 'device_name': 'cpu', -# 'ranks_per_partition': NUM_CPU_RANKS, -# 'aggregator': partition_jobs_cpu_mpi, -# }, -# { -# 'mode': 'npt', -# 'device_name': 'cpu', -# 'ranks_per_partition': NUM_CPU_RANKS, -# 'aggregator': partition_jobs_cpu_mpi, -# }, -# ] + hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') + if sim.timestep == TOTAL_STEPS: + util.mark_simulation_complete(job, device, sim_mode) + device.notice('Done.') + else: + device.notice( + 'Ending run early due to walltime limits at:' + f'{device.communicator.walltime}' + ) -# def add_mc_sampling_job(mode, device_name, ranks_per_partition, aggregator): -# """Add a MC sampling job to the workflow.""" -# directives = dict( -# walltime=CONFIG['max_walltime'], -# executable=CONFIG['executable'], -# nranks=util.total_ranks_function(ranks_per_partition), -# ) -# if device_name == 'gpu': -# directives['ngpu'] = util.total_ranks_function(ranks_per_partition) +def run_npt_mc_sim(job, device): + """Run MC sim in NPT.""" + sim_mode = 'npt_mc' -# @Project.pre.after(lj_fluid_create_initial_state) -# @Project.post.isfile(f'{mode}_mc_{device_name}_complete') -# @Project.operation( -# name=f'lj_fluid_{mode}_mc_{device_name}', -# directives=directives, -# aggregator=aggregator, -# ) -# def sampling_operation(*jobs): -# """Perform sampling simulation given the definition.""" -# import hoomd + if util.is_simulation_complete(job, device, sim_mode): + return -# communicator = hoomd.communicator.Communicator( -# ranks_per_partition=ranks_per_partition -# ) -# job = jobs[communicator.partition] + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') + if job.isfile(restart_filename): + initial_state = job.fn(restart_filename) + restart = True + else: + initial_state = job.fn('initial_state.gsd') + restart = False + + # box updates + boxmc = hoomd.hpmc.update.BoxMC( + betaP=job.cached_statepoint['pressure'] / job.cached_statepoint['kT'], + trigger=hoomd.trigger.Periodic(1), + ) + boxmc.volume = dict(weight=1.0, mode='ln', delta=0.01) -# if communicator.rank == 0: -# print(f'starting lj_fluid_{mode}_mc_{device_name}:', job) + # simulation + sim = make_mc_simulation( + job, device, initial_state, sim_mode, extra_loggables=[boxmc] + ) -# if device_name == 'gpu': -# device_cls = hoomd.device.GPU -# elif device_name == 'cpu': -# device_cls = hoomd.device.CPU + sim.operations.add(boxmc) -# device = device_cls( -# communicator=communicator, -# message_filename=util.get_message_filename( -# job, f'{mode}_mc_{device_name}.log' -# ), -# ) + boxmc_tuner = hoomd.hpmc.tune.BoxMCMoveSize.scale_solver( + trigger=hoomd.trigger.And( + [ + hoomd.trigger.Periodic(400), + hoomd.trigger.Before(RANDOMIZE_STEPS + EQUILIBRATE_STEPS // 2), + ] + ), + boxmc=boxmc, + moves=['volume'], + target=0.5, + ) + sim.operations.add(boxmc_tuner) + + if not restart: + # equilibrate + device.notice('Equilibrating...') + sim.run(EQUILIBRATE_STEPS // 2) + sim.run(EQUILIBRATE_STEPS // 2) + device.notice('Done.') + + # Print acceptance ratio as measured during the 2nd half of the + # equilibration. + translate_moves = sim.operations.integrator.translate_moves + translate_acceptance = translate_moves[0] / sum(translate_moves) + device.notice(f'Translate move acceptance: {translate_acceptance}') + device.notice(f'Trial move size: {sim.operations.integrator.d["A"]}') + + volume_moves = boxmc.volume_moves + volume_acceptance = volume_moves[0] / sum(volume_moves) + device.notice(f'Volume move acceptance: {volume_acceptance}') + device.notice(f'Volume move size: {boxmc.volume["delta"]}') + + # save move sizes to a file + if device.communicator.rank == 0: + name = util.get_job_filename(sim_mode, device, 'move_size', 'json') + with open(job.fn(name), 'w') as f: + json.dump( + dict( + d_A=sim.operations.integrator.d['A'], + volume_delta=boxmc.volume['delta'], + ), + f, + ) + else: + device.notice('Restarting...') + # read move size from the file + name = util.get_job_filename(sim_mode, device, 'move_size', 'json') + with open(job.fn(name)) as f: + data = json.load(f) + + sim.operations.integrator.d['A'] = data['d_A'] + device.notice(f'Restored trial move size: {sim.operations.integrator.d["A"]}') + boxmc.volume = dict(weight=1.0, mode='ln', delta=data['volume_delta']) + device.notice(f'Restored volume move size: {boxmc.volume["delta"]}') -# globals().get(f'run_{mode}_mc_sim')( -# job, device, complete_filename=f'{mode}_mc_{device_name}_complete' -# ) + # run + device.notice('Running...') + util.run_up_to_walltime( + sim=sim, + end_step=TOTAL_STEPS, + steps=RESTART_STEPS, + walltime_stop=WALLTIME_STOP_SECONDS, + ) -# if communicator.rank == 0: -# print(f'completed lj_fluid_{mode}_mc_{device_name}: {job}') + hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') -# mc_sampling_jobs.append(sampling_operation) + if sim.timestep == TOTAL_STEPS: + util.mark_simulation_complete(job, device, sim_mode) + device.notice('Done.') + else: + device.notice( + 'Ending run early due to walltime limits at:' + f'{device.communicator.walltime}' + ) -# for definition in mc_job_definitions: -# add_mc_sampling_job(**definition) +mc_sampling_jobs = [] +mc_job_definitions = [ + { + 'mode': 'nvt', + 'device_name': 'cpu', + }, + { + 'mode': 'npt', + 'device_name': 'cpu', + }, +] -# @Project.pre(is_lj_fluid) -# @Project.pre.after(*md_sampling_jobs) -# @Project.pre.after(*mc_sampling_jobs) -# @Project.post.true('lj_fluid_analysis_complete') -# @Project.operation( -# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -# ) -# def lj_fluid_analyze(job): -# """Analyze the output of all simulation modes.""" -# import math +def add_mc_sampling_job(mode, device_name): + """Add a MC sampling job to the workflow.""" + action_name = f'{__name__}.{mode}_mc_{device_name}' -# import matplotlib -# import matplotlib.figure -# import matplotlib.style -# import numpy + def sampling_operation(*jobs): + """Perform sampling simulation given the definition.""" + communicator = hoomd.communicator.Communicator( + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) + ) + job = jobs[communicator.partition] -# matplotlib.style.use('fivethirtyeight') + if communicator.rank == 0: + print(f'starting {action_name}:', job) -# print('starting lj_fluid_analyze:', job) + if device_name == 'gpu': + device_cls = hoomd.device.GPU + elif device_name == 'cpu': + device_cls = hoomd.device.CPU -# sim_modes = [ -# 'nvt_langevin_md_cpu', -# 'nvt_mttk_md_cpu', -# 'nvt_bussi_md_cpu', -# 'npt_bussi_md_cpu', -# ] + device = device_cls( + communicator=communicator, + message_filename=util.get_message_filename( + job, f'{mode}_mc_{device_name}.log' + ), + ) -# if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): -# sim_modes.extend( -# [ -# 'nvt_langevin_md_gpu', -# 'nvt_mttk_md_gpu', -# 'nvt_bussi_md_gpu', -# 'npt_bussi_md_gpu', -# ] -# ) + globals().get(f'run_{mode}_mc_sim')( + job, device + ) -# if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): -# sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + if communicator.rank == 0: + print(f'completed {action_name}: {job}') -# util._sort_sim_modes(sim_modes) + mc_sampling_jobs.append(action_name) -# timesteps = {} -# energies = {} -# pressures = {} -# densities = {} -# linear_momentum = {} + sim_mode = mode + '_mc' + ValidationWorkflow.add_action(action_name, Action(method = sampling_operation, + configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'quantities', 'h5')], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'] + })) -# for sim_mode in sim_modes: -# log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) -# timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] +for definition in mc_job_definitions: + add_mc_sampling_job(**definition) -# if 'md' in sim_mode: -# energies[sim_mode] = log_traj[ -# 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' -# ] -# else: -# energies[sim_mode] = ( -# log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] -# * job.cached_statepoint['kT'] -# ) -# energies[sim_mode] /= job.cached_statepoint['num_particles'] +def analyze(*jobs): + """Analyze the output of all simulation modes.""" + matplotlib.style.use('fivethirtyeight') -# if 'md' in sim_mode: -# pressures[sim_mode] = log_traj[ -# 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' -# ] -# else: -# pressures[sim_mode] = log_traj['hoomd-data/custom/virial_pressure'] + for job in jobs: + print('starting lj_fluid_analyze:', job) -# densities[sim_mode] = log_traj[ -# 'hoomd-data/custom_actions/ComputeDensity/density' -# ] + sim_modes = [ + 'nvt_langevin_md_cpu', + 'nvt_mttk_md_cpu', + 'nvt_bussi_md_cpu', + 'npt_bussi_md_cpu', + ] -# if 'md' in sim_mode and 'langevin' not in sim_mode: -# momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] -# linear_momentum[sim_mode] = [ -# math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector -# ] -# else: -# linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) - -# # save averages -# for mode in sim_modes: -# job.document[mode] = dict( -# pressure=float(numpy.mean(pressures[mode])), -# potential_energy=float(numpy.mean(energies[mode])), -# density=float(numpy.mean(densities[mode])), -# ) + if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): + sim_modes.extend( + [ + 'nvt_langevin_md_gpu', + 'nvt_mttk_md_gpu', + 'nvt_bussi_md_gpu', + 'npt_bussi_md_gpu', + ] + ) -# # Plot results -# fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') -# ax = fig.add_subplot(2, 2, 1) -# util.plot_timeseries( -# ax=ax, -# timesteps=timesteps, -# data=densities, -# ylabel=r'$\rho$', -# expected=job.cached_statepoint['density'], -# max_points=500, -# ) -# ax.legend() + if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): + sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + + util._sort_sim_modes(sim_modes) + + timesteps = {} + energies = {} + pressures = {} + densities = {} + linear_momentum = {} + + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + + if 'md' in sim_mode: + energies[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' + ] + else: + energies[sim_mode] = ( + log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] + * job.cached_statepoint['kT'] + ) + + energies[sim_mode] /= job.cached_statepoint['num_particles'] + + if 'md' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' + ] + else: + pressures[sim_mode] = log_traj['hoomd-data/custom/virial_pressure'] + + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' + ] + + if 'md' in sim_mode and 'langevin' not in sim_mode: + momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] + linear_momentum[sim_mode] = [ + math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector + ] + else: + linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) + + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + potential_energy=float(numpy.mean(energies[mode])), + density=float(numpy.mean(densities[mode])), + ) -# ax = fig.add_subplot(2, 2, 2) -# util.plot_timeseries( -# ax=ax, -# timesteps=timesteps, -# data=pressures, -# ylabel=r'$P$', -# expected=job.cached_statepoint['pressure'], -# max_points=500, -# ) + # Plot results + fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') + ax = fig.add_subplot(2, 2, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, + ) + ax.legend() + + ax = fig.add_subplot(2, 2, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) -# ax = fig.add_subplot(2, 2, 3) -# util.plot_timeseries( -# ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 -# ) + ax = fig.add_subplot(2, 2, 3) + util.plot_timeseries( + ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 + ) -# ax = fig.add_subplot(2, 2, 4) -# util.plot_timeseries( -# ax=ax, -# timesteps=timesteps, -# data={ -# mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] -# for mode, lm in linear_momentum.items() -# }, -# ylabel=r'$|\vec{p}| / N$', -# max_points=500, -# ) + ax = fig.add_subplot(2, 2, 4) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data={ + mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] + for mode, lm in linear_momentum.items() + }, + ylabel=r'$|\vec{p}| / N$', + max_points=500, + ) -# fig.suptitle( -# f'$kT={job.cached_statepoint["kT"]}$, ' -# f'$\\rho={job.cached_statepoint["density"]}$, ' -# f'$N={job.cached_statepoint["num_particles"]}$, ' -# f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' -# f'replicate={job.cached_statepoint["replicate_idx"]}' -# ) -# fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + fig.suptitle( + f'$kT={job.cached_statepoint["kT"]}$, ' + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') -# job.document['lj_fluid_analysis_complete'] = True +ValidationWorkflow.add_action(f'{__name__}.analyze', Action(method = analyze, +configuration = {'products': ['nvt_npt_plots.svg'], +'previous_actions': md_sampling_jobs + mc_sampling_jobs, +'group': _group | {'submit_whole': True}, +'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:01:00'}}})) +# @Project.pre(is_lj_fluid) +# @Project.pre.after(*md_sampling_jobs) +# @Project.pre.after(*mc_sampling_jobs) +# @Project.post.true('lj_fluid_analysis_complete') +# @Project.operation( +# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) +# ) # analysis_aggregator = aggregator.groupby( # key=['kT', 'density', 'num_particles', 'r_cut'], diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 3c62c36c..8b7fa3b3 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -28,9 +28,10 @@ def init(args): """Initialize the workspace.""" - if (config.project_root / 'workspace').exists(): - message = "The project already initialized." - raise RuntimeError(message) + # TODO: uncomment + # if (config.project_root / 'workspace').exists(): + # message = "The project already initialized." + # raise RuntimeError(message) project = signac.init_project(path=config.project_root) From 75200f3e39be189ecd7b7965f6e3c168283ccfaa Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Fri, 2 Aug 2024 13:30:03 -0400 Subject: [PATCH 05/34] lj_fluid.compare_modes working. --- hoomd_validation/lj_fluid.py | 237 +++++++++++++++++------------------ 1 file changed, 112 insertions(+), 125 deletions(-) diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index fa542b39..99ee274f 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -766,7 +766,7 @@ def analyze(*jobs): matplotlib.style.use('fivethirtyeight') for job in jobs: - print('starting lj_fluid_analyze:', job) + print(f'starting {__name__}.analyze:', job) sim_modes = [ 'nvt_langevin_md_cpu', @@ -892,151 +892,138 @@ def analyze(*jobs): ValidationWorkflow.add_action(f'{__name__}.analyze', Action(method = analyze, configuration = {'products': ['nvt_npt_plots.svg'], 'previous_actions': md_sampling_jobs + mc_sampling_jobs, -'group': _group | {'submit_whole': True}, +'group': _group, 'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:01:00'}}})) -# @Project.pre(is_lj_fluid) -# @Project.pre.after(*md_sampling_jobs) -# @Project.pre.after(*mc_sampling_jobs) -# @Project.post.true('lj_fluid_analysis_complete') -# @Project.operation( -# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -# ) -# analysis_aggregator = aggregator.groupby( -# key=['kT', 'density', 'num_particles', 'r_cut'], -# sort_by='replicate_idx', -# select=is_lj_fluid, -# ) +def compare_modes(*jobs): + """Compares the tested simulation modes.""" + matplotlib.style.use('fivethirtyeight') + print(f'starting {__name__}.compare_modes:', jobs[0]) -# @Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_fluid_analysis_complete')) -# @Project.post(lambda *jobs: util.true_all(*jobs, key='lj_fluid_compare_modes_complete')) -# @Project.operation( -# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), -# aggregator=analysis_aggregator, -# ) -# def lj_fluid_compare_modes(*jobs): -# """Compares the tested simulation modes.""" -# import matplotlib -# import matplotlib.figure -# import matplotlib.style -# import numpy + sim_modes = [ + 'nvt_langevin_md_cpu', + 'nvt_mttk_md_cpu', + 'nvt_bussi_md_cpu', + 'npt_bussi_md_cpu', + ] -# matplotlib.style.use('fivethirtyeight') + if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): + sim_modes.extend( + [ + 'nvt_langevin_md_gpu', + 'nvt_mttk_md_gpu', + 'nvt_bussi_md_gpu', + 'npt_bussi_md_gpu', + ] + ) -# print('starting lj_fluid_compare_modes:', jobs[0]) + if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): + sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) -# sim_modes = [ -# 'nvt_langevin_md_cpu', -# 'nvt_mttk_md_cpu', -# 'nvt_bussi_md_cpu', -# 'npt_bussi_md_cpu', -# ] + util._sort_sim_modes(sim_modes) -# if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): -# sim_modes.extend( -# [ -# 'nvt_langevin_md_gpu', -# 'nvt_mttk_md_gpu', -# 'nvt_bussi_md_gpu', -# 'npt_bussi_md_gpu', -# ] -# ) + quantity_names = ['density', 'pressure', 'potential_energy'] + labels = { + 'density': r'$\frac{\rho_\mathrm{sample} - \rho}{\rho} \cdot 1000$', + 'pressure': r'$\frac{P_\mathrm{sample} - P}{P} \cdot 1000$', + 'potential_energy': r'$\frac{U_\mathrm{sample} - }{} \cdot 1000$', + } -# if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): -# sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + # grab the common statepoint parameters + kT = jobs[0].sp.kT + set_density = jobs[0].sp.density + set_pressure = jobs[0].sp.pressure + num_particles = jobs[0].sp.num_particles -# util._sort_sim_modes(sim_modes) + quantity_reference = dict( + density=set_density, pressure=set_pressure, potential_energy=None + ) -# quantity_names = ['density', 'pressure', 'potential_energy'] -# labels = { -# 'density': r'$\frac{\rho_\mathrm{sample} - \rho}{\rho} \cdot 1000$', -# 'pressure': r'$\frac{P_\mathrm{sample} - P}{P} \cdot 1000$', -# 'potential_energy': r'$\frac{U_\mathrm{sample} - }{} \cdot 1000$', -# } + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 3), layout='tight') + fig.suptitle( + f'$kT={kT}$, $\\rho={set_density}$, ' + f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' + f'$N={num_particles}$' + ) -# # grab the common statepoint parameters -# kT = jobs[0].sp.kT -# set_density = jobs[0].sp.density -# set_pressure = jobs[0].sp.pressure -# num_particles = jobs[0].sp.num_particles + for i, quantity_name in enumerate(quantity_names): + ax = fig.add_subplot(3, 1, i + 1) -# quantity_reference = dict( -# density=set_density, pressure=set_pressure, potential_energy=None -# ) + # organize data from jobs + quantities = {mode: [] for mode in sim_modes} + for jb in jobs: + for mode in sim_modes: + quantities[mode].append(getattr(getattr(jb.doc, mode), quantity_name)) -# fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 3), layout='tight') -# fig.suptitle( -# f'$kT={kT}$, $\\rho={set_density}$, ' -# f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' -# f'$N={num_particles}$' -# ) + if quantity_reference[quantity_name] is not None: + reference = quantity_reference[quantity_name] + else: + avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} + reference = numpy.mean([avg_value[mode] for mode in sim_modes]) -# for i, quantity_name in enumerate(quantity_names): -# ax = fig.add_subplot(3, 1, i + 1) + avg_quantity, stderr_quantity = util.plot_vs_expected( + ax=ax, + values=quantities, + ylabel=labels[quantity_name], + expected=reference, + relative_scale=1000, + separate_nvt_npt=True, + ) -# # organize data from jobs -# quantities = {mode: [] for mode in sim_modes} -# for jb in jobs: -# for mode in sim_modes: -# quantities[mode].append(getattr(getattr(jb.doc, mode), quantity_name)) - -# if quantity_reference[quantity_name] is not None: -# reference = quantity_reference[quantity_name] -# else: -# avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} -# reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - -# avg_quantity, stderr_quantity = util.plot_vs_expected( -# ax=ax, -# values=quantities, -# ylabel=labels[quantity_name], -# expected=reference, -# relative_scale=1000, -# separate_nvt_npt=True, -# ) + if quantity_name == 'density': + if 'npt_mc_cpu' in avg_quantity: + print( + f'Average npt_mc_cpu density {num_particles}:', + avg_quantity['npt_mc_cpu'], + '+/-', + stderr_quantity['npt_mc_cpu'], + ) + print( + f'Average npt_md_cpu density {num_particles}:', + avg_quantity['npt_bussi_md_cpu'], + '+/-', + stderr_quantity['npt_bussi_md_cpu'], + ) + if quantity_name == 'pressure': + if 'nvt_mc_cpu' in avg_quantity: + print( + f'Average nvt_mc_cpu pressure {num_particles}:', + avg_quantity['nvt_mc_cpu'], + '+/-', + stderr_quantity['nvt_mc_cpu'], + ) + if 'npt_mc_cpu' in avg_quantity: + print( + f'Average npt_mc_cpu pressure {num_particles}:', + avg_quantity['npt_mc_cpu'], + '+/-', + stderr_quantity['npt_mc_cpu'], + ) -# if quantity_name == 'density': -# if 'npt_mc_cpu' in avg_quantity: -# print( -# f'Average npt_mc_cpu density {num_particles}:', -# avg_quantity['npt_mc_cpu'], -# '+/-', -# stderr_quantity['npt_mc_cpu'], -# ) -# print( -# f'Average npt_md_cpu density {num_particles}:', -# avg_quantity['npt_bussi_md_cpu'], -# '+/-', -# stderr_quantity['npt_bussi_md_cpu'], -# ) -# if quantity_name == 'pressure': -# if 'nvt_mc_cpu' in avg_quantity: -# print( -# f'Average nvt_mc_cpu pressure {num_particles}:', -# avg_quantity['nvt_mc_cpu'], -# '+/-', -# stderr_quantity['nvt_mc_cpu'], -# ) -# if 'npt_mc_cpu' in avg_quantity: -# print( -# f'Average npt_mc_cpu pressure {num_particles}:', -# avg_quantity['npt_mc_cpu'], -# '+/-', -# stderr_quantity['npt_mc_cpu'], -# ) + filename = ( + f'lj_fluid_compare_kT{kT}_density{round(set_density, 2)}_' + f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' + f'N{num_particles}.svg' + ) -# filename = ( -# f'lj_fluid_compare_kT{kT}_density{round(set_density, 2)}_' -# f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' -# f'N{num_particles}.svg' -# ) + fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') -# fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') -# for job in jobs: -# job.document['lj_fluid_compare_modes_complete'] = True +ValidationWorkflow.add_action(f'{__name__}.compare_modes', Action(method = compare_modes, +configuration = { +'previous_actions': [f'{__name__}.analyze'], +'group': _group | {'sort_by': ['/kT', '/density', '/num_particles', '/r_cut'], 'split_by_sort_key': True, 'submit_whole': True}, +'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:02:00'}}})) + + +# @Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_fluid_analysis_complete')) +# @Project.post(lambda *jobs: util.true_all(*jobs, key='lj_fluid_compare_modes_complete')) +# @Project.operation( +# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), +# aggregator=analysis_aggregator, +# ) # @Project.pre.after(*md_sampling_jobs) From 5a93e457ffdd61fab08a412149594554de843e5c Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Fri, 2 Aug 2024 13:44:06 -0400 Subject: [PATCH 06/34] lj_fluid.distrubution_analyze working. --- hoomd_validation/lj_fluid.py | 285 ++++++++++++++++------------------- 1 file changed, 133 insertions(+), 152 deletions(-) diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 99ee274f..34fa1a7b 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -13,6 +13,7 @@ import matplotlib import matplotlib.figure import matplotlib.style +import scipy import hoomd @@ -89,7 +90,7 @@ def job_statepoints(): _group_cpu = _group | {'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS)} _resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} _group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} - +_group_compare = _group | {'sort_by': ['/kT', '/density', '/num_particles', '/r_cut'], 'split_by_sort_key': True, 'submit_whole': True} def create_initial_state(*jobs): """Create initial system configuration.""" @@ -1014,184 +1015,164 @@ def compare_modes(*jobs): ValidationWorkflow.add_action(f'{__name__}.compare_modes', Action(method = compare_modes, configuration = { 'previous_actions': [f'{__name__}.analyze'], -'group': _group | {'sort_by': ['/kT', '/density', '/num_particles', '/r_cut'], 'split_by_sort_key': True, 'submit_whole': True}, +'group': _group_compare, 'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:02:00'}}})) -# @Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_fluid_analysis_complete')) -# @Project.post(lambda *jobs: util.true_all(*jobs, key='lj_fluid_compare_modes_complete')) -# @Project.operation( -# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), -# aggregator=analysis_aggregator, -# ) +def distribution_analyze(*jobs): + """Checks that MD follows the correct KE distribution.""" + matplotlib.style.use('fivethirtyeight') + print('starting lj_fluid_distribution_analyze:', jobs[0]) -# @Project.pre.after(*md_sampling_jobs) -# @Project.post( -# lambda *jobs: util.true_all(*jobs, key='lj_fluid_distribution_analyze_complete') -# ) -# @Project.operation( -# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), -# aggregator=analysis_aggregator, -# ) -# def lj_fluid_distribution_analyze(*jobs): -# """Checks that MD follows the correct KE distribution.""" -# import matplotlib -# import matplotlib.figure -# import matplotlib.style -# import numpy -# import scipy - -# matplotlib.style.use('fivethirtyeight') + sim_modes = [ + 'nvt_langevin_md_cpu', + 'nvt_mttk_md_cpu', + 'nvt_bussi_md_cpu', + 'npt_bussi_md_cpu', + ] -# print('starting lj_fluid_distribution_analyze:', jobs[0]) - -# sim_modes = [ -# 'nvt_langevin_md_cpu', -# 'nvt_mttk_md_cpu', -# 'nvt_bussi_md_cpu', -# 'npt_bussi_md_cpu', -# ] - -# if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): -# sim_modes.extend( -# [ -# 'nvt_langevin_md_gpu', -# 'nvt_mttk_md_gpu', -# 'nvt_bussi_md_gpu', -# 'npt_bussi_md_gpu', -# ] -# ) + if os.path.exists(jobs[0].fn('nvt_langevin_md_gpu_quantities.h5')): + sim_modes.extend( + [ + 'nvt_langevin_md_gpu', + 'nvt_mttk_md_gpu', + 'nvt_bussi_md_gpu', + 'npt_bussi_md_gpu', + ] + ) -# if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): -# sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + if os.path.exists(jobs[0].fn('nvt_mc_cpu_quantities.h5')): + sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) -# util._sort_sim_modes(sim_modes) + util._sort_sim_modes(sim_modes) -# # grab the common statepoint parameters -# kT = jobs[0].sp.kT -# set_density = jobs[0].sp.density -# num_particles = jobs[0].sp.num_particles + # grab the common statepoint parameters + kT = jobs[0].sp.kT + set_density = jobs[0].sp.density + num_particles = jobs[0].sp.num_particles -# fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') -# fig.suptitle( -# f'$kT={kT}$, $\\rho={set_density}$, ' -# f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' -# f'$N={num_particles}$' -# ) + fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') + fig.suptitle( + f'$kT={kT}$, $\\rho={set_density}$, ' + f'$r_\\mathrm{{cut}}={jobs[0].statepoint.r_cut}$, ' + f'$N={num_particles}$' + ) -# ke_means_expected = collections.defaultdict(list) -# ke_sigmas_expected = collections.defaultdict(list) -# ke_samples = collections.defaultdict(list) -# potential_energy_samples = collections.defaultdict(list) -# density_samples = collections.defaultdict(list) -# pressure_samples = collections.defaultdict(list) + ke_means_expected = collections.defaultdict(list) + ke_sigmas_expected = collections.defaultdict(list) + ke_samples = collections.defaultdict(list) + potential_energy_samples = collections.defaultdict(list) + density_samples = collections.defaultdict(list) + pressure_samples = collections.defaultdict(list) -# for job in jobs: -# for sim_mode in sim_modes: -# if sim_mode.startswith('nvt_langevin'): -# n_dof = num_particles * 3 -# else: -# n_dof = num_particles * 3 - 3 + for job in jobs: + for sim_mode in sim_modes: + if sim_mode.startswith('nvt_langevin'): + n_dof = num_particles * 3 + else: + n_dof = num_particles * 3 - 3 -# print('Reading' + job.fn(sim_mode + '_quantities.h5')) -# log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) -# if 'md' in sim_mode: -# ke = log_traj[ -# 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' -# ] -# ke_means_expected[sim_mode].append(numpy.mean(ke) - 1 / 2 * n_dof * kT) -# ke_sigmas_expected[sim_mode].append( -# numpy.std(ke) - 1 / math.sqrt(2) * math.sqrt(n_dof) * kT -# ) + if 'md' in sim_mode: + ke = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' + ] + ke_means_expected[sim_mode].append(numpy.mean(ke) - 1 / 2 * n_dof * kT) + ke_sigmas_expected[sim_mode].append( + numpy.std(ke) - 1 / math.sqrt(2) * math.sqrt(n_dof) * kT + ) -# ke_samples[sim_mode].extend(ke) -# else: -# ke_samples[sim_mode].extend( -# [ -# 3 -# / 2 -# * job.cached_statepoint['num_particles'] -# * job.cached_statepoint['kT'] -# ] -# ) + ke_samples[sim_mode].extend(ke) + else: + ke_samples[sim_mode].extend( + [ + 3 + / 2 + * job.cached_statepoint['num_particles'] + * job.cached_statepoint['kT'] + ] + ) -# if 'md' in sim_mode: -# potential_energy_samples[sim_mode].extend( -# list( -# log_traj[ -# 'hoomd-data/md/compute/ThermodynamicQuantities' -# '/potential_energy' -# ] -# ) -# ) -# else: -# potential_energy_samples[sim_mode].extend( -# list( -# log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] -# * job.cached_statepoint['kT'] -# ) -# ) + if 'md' in sim_mode: + potential_energy_samples[sim_mode].extend( + list( + log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities' + '/potential_energy' + ] + ) + ) + else: + potential_energy_samples[sim_mode].extend( + list( + log_traj['hoomd-data/hpmc/pair/LennardJones/energy'] + * job.cached_statepoint['kT'] + ) + ) -# if 'md' in sim_mode: -# pressure_samples[sim_mode].extend( -# list( -# log_traj[ -# 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' -# ] -# ) -# ) -# else: -# pressure_samples[sim_mode].extend( -# list(log_traj['hoomd-data/custom/virial_pressure']) -# ) + if 'md' in sim_mode: + pressure_samples[sim_mode].extend( + list( + log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' + ] + ) + ) + else: + pressure_samples[sim_mode].extend( + list(log_traj['hoomd-data/custom/virial_pressure']) + ) -# density_samples[sim_mode].extend( -# list(log_traj['hoomd-data/custom_actions/ComputeDensity/density']) -# ) + density_samples[sim_mode].extend( + list(log_traj['hoomd-data/custom_actions/ComputeDensity/density']) + ) -# ax = fig.add_subplot(2, 2, 1) -# util.plot_vs_expected(ax, ke_means_expected, '$ - 1/2 N_{dof} k T$') + ax = fig.add_subplot(2, 2, 1) + util.plot_vs_expected(ax, ke_means_expected, '$ - 1/2 N_{dof} k T$') -# ax = fig.add_subplot(2, 2, 2) -# # https://doi.org/10.1371/journal.pone.0202764 -# util.plot_vs_expected( -# ax, ke_sigmas_expected, r'$\Delta K - 1/\sqrt{2} \sqrt{N_{dof}} k T$' -# ) + ax = fig.add_subplot(2, 2, 2) + # https://doi.org/10.1371/journal.pone.0202764 + util.plot_vs_expected( + ax, ke_sigmas_expected, r'$\Delta K - 1/\sqrt{2} \sqrt{N_{dof}} k T$' + ) -# ax = fig.add_subplot(2, 4, 5) -# rv = scipy.stats.gamma( -# 3 * job.cached_statepoint['num_particles'] / 2, -# scale=job.cached_statepoint['kT'], -# ) -# util.plot_distribution(ax, ke_samples, 'K', expected=rv.pdf) -# ax.legend(loc='upper right', fontsize='xx-small') + ax = fig.add_subplot(2, 4, 5) + rv = scipy.stats.gamma( + 3 * job.cached_statepoint['num_particles'] / 2, + scale=job.cached_statepoint['kT'], + ) + util.plot_distribution(ax, ke_samples, 'K', expected=rv.pdf) + ax.legend(loc='upper right', fontsize='xx-small') -# ax = fig.add_subplot(2, 4, 6) -# util.plot_distribution(ax, potential_energy_samples, 'U') + ax = fig.add_subplot(2, 4, 6) + util.plot_distribution(ax, potential_energy_samples, 'U') -# ax = fig.add_subplot(2, 4, 7) -# util.plot_distribution( -# ax, density_samples, r'$\rho$', expected=job.cached_statepoint['density'] -# ) + ax = fig.add_subplot(2, 4, 7) + util.plot_distribution( + ax, density_samples, r'$\rho$', expected=job.cached_statepoint['density'] + ) -# ax = fig.add_subplot(2, 4, 8) -# util.plot_distribution( -# ax, pressure_samples, 'P', expected=job.cached_statepoint['pressure'] -# ) + ax = fig.add_subplot(2, 4, 8) + util.plot_distribution( + ax, pressure_samples, 'P', expected=job.cached_statepoint['pressure'] + ) -# filename = ( -# f'lj_fluid_distribution_analyze_kT{kT}' -# f'_density{round(set_density, 2)}_' -# f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' -# f'N{num_particles}.svg' -# ) -# fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') + filename = ( + f'lj_fluid_distribution_analyze_kT{kT}' + f'_density{round(set_density, 2)}_' + f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' + f'N{num_particles}.svg' + ) + fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') -# for job in jobs: -# job.document['lj_fluid_distribution_analyze_complete'] = True +ValidationWorkflow.add_action(f'{__name__}.distribution_analyze', Action(method = distribution_analyze, +configuration = { +'previous_actions': [f'{__name__}.analyze'], +'group': _group_compare, +'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:02:00'}}})) # ################################# # # MD conservation simulations From d87d1672eb706cf3a6da41e236ae057421d1a6a0 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Fri, 2 Aug 2024 14:20:06 -0400 Subject: [PATCH 07/34] lj_fluid.nve is working. --- hoomd_validation/lj_fluid.py | 481 ++++++++++++++++------------------- 1 file changed, 219 insertions(+), 262 deletions(-) diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 34fa1a7b..c69fd8a4 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -27,11 +27,8 @@ # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. RANDOMIZE_STEPS = 20_000 -# TODO: revert -# EQUILIBRATE_STEPS = 100_000 -# RUN_STEPS = 500_000 -EQUILIBRATE_STEPS = 10_000 -RUN_STEPS = 50_000 +EQUILIBRATE_STEPS = 100_000 +RUN_STEPS = 500_000 RESTART_STEPS = RUN_STEPS // 10 TOTAL_STEPS = RANDOMIZE_STEPS + EQUILIBRATE_STEPS + RUN_STEPS @@ -92,6 +89,10 @@ def job_statepoints(): _group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} _group_compare = _group | {'sort_by': ['/kT', '/density', '/num_particles', '/r_cut'], 'split_by_sort_key': True, 'submit_whole': True} +_include_nve = {'include': [{'all': [["/subproject", "==", __name__], ["/replicate_idx", "<", NUM_NVE_RUNS]]}]} +_group_nve_cpu = _group_cpu | _include_nve +_group_nve_gpu = _group_gpu | _include_nve + def create_initial_state(*jobs): """Create initial system configuration.""" communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) @@ -1179,260 +1180,216 @@ def distribution_analyze(*jobs): # ################################# -# def run_nve_md_sim(job, device, run_length, complete_filename): -# """Run the MD simulation in NVE.""" -# import hoomd - -# sim_mode = 'nve_md' -# restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') -# is_restarting = job.isfile(restart_filename) - -# if is_restarting: -# initial_state = job.fn(restart_filename) -# else: -# initial_state = job.fn('initial_state.gsd') - -# nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) - -# sim = make_md_simulation( -# job, device, initial_state, nve, sim_mode, period_multiplier=200 -# ) - -# if not is_restarting: -# sim.state.thermalize_particle_momenta( -# hoomd.filter.All(), job.cached_statepoint['kT'] -# ) - -# # Run for a long time to look for energy and momentum drift -# device.notice('Running...') - -# util.run_up_to_walltime( -# sim=sim, -# end_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length, -# steps=500_000, -# walltime_stop=WALLTIME_STOP_SECONDS, -# ) - -# if sim.timestep == RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length: -# pathlib.Path(job.fn(complete_filename)).touch() -# device.notice('Done.') -# else: -# device.notice( -# 'Ending run early due to walltime limits at:' -# f'{device.communicator.walltime}' -# ) - -# hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') - - -# def is_lj_fluid_nve(job): -# """Test if a given job should be run for NVE conservation.""" -# return ( -# job.cached_statepoint['subproject'] == 'lj_fluid' -# and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS -# ) - - -# partition_jobs_cpu_mpi_nve = aggregator.groupsof( -# num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), -# sort_by=sort_key, -# select=is_lj_fluid_nve, -# ) - -# partition_jobs_gpu_nve = aggregator.groupsof( -# num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), -# sort_by=sort_key, -# select=is_lj_fluid_nve, -# ) - -# nve_md_sampling_jobs = [] -# nve_md_job_definitions = [ -# { -# 'device_name': 'cpu', -# 'ranks_per_partition': NUM_CPU_RANKS, -# 'aggregator': partition_jobs_cpu_mpi_nve, -# 'run_length': 10_000_000, -# }, -# ] - -# if CONFIG['enable_gpu']: -# nve_md_job_definitions.extend( -# [ -# { -# 'device_name': 'gpu', -# 'ranks_per_partition': 1, -# 'aggregator': partition_jobs_gpu_nve, -# 'run_length': 100_000_000, -# }, -# ] -# ) - - -# def add_nve_md_job(device_name, ranks_per_partition, aggregator, run_length): -# """Add a MD NVE conservation job to the workflow.""" -# sim_mode = 'nve_md' - -# directives = dict( -# walltime=CONFIG['max_walltime'], -# executable=CONFIG['executable'], -# nranks=util.total_ranks_function(ranks_per_partition), -# ) - -# if device_name == 'gpu': -# directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - -# @Project.pre.after(lj_fluid_create_initial_state) -# @Project.post.isfile(f'{sim_mode}_{device_name}_complete') -# @Project.operation( -# name=f'lj_fluid_{sim_mode}_{device_name}', -# directives=directives, -# aggregator=aggregator, -# ) -# def lj_fluid_nve_md_job(*jobs): -# """Run NVE MD.""" -# import hoomd - -# communicator = hoomd.communicator.Communicator( -# ranks_per_partition=ranks_per_partition -# ) -# job = jobs[communicator.partition] - -# if communicator.rank == 0: -# print(f'starting lj_fluid_{sim_mode}_{device_name}:', job) - -# if device_name == 'gpu': -# device_cls = hoomd.device.GPU -# elif device_name == 'cpu': -# device_cls = hoomd.device.CPU - -# device = device_cls( -# communicator=communicator, -# message_filename=util.get_message_filename( -# job, f'{sim_mode}_{device_name}.log' -# ), -# ) -# run_nve_md_sim( -# job, -# device, -# run_length=run_length, -# complete_filename=f'{sim_mode}_{device_name}_complete', -# ) - -# if communicator.rank == 0: -# print(f'completed lj_fluid_{sim_mode}_{device_name} {job}') - -# nve_md_sampling_jobs.append(lj_fluid_nve_md_job) - - -# for definition in nve_md_job_definitions: -# add_nve_md_job(**definition) - -# nve_analysis_aggregator = aggregator.groupby( -# key=['kT', 'density', 'num_particles', 'r_cut'], -# sort_by='replicate_idx', -# select=is_lj_fluid_nve, -# ) - - -# @Project.pre.after(*nve_md_sampling_jobs) -# @Project.post( -# lambda *jobs: util.true_all(*jobs, key='lj_fluid_conservation_analysis_complete') -# ) -# @Project.operation( -# directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), -# aggregator=nve_analysis_aggregator, -# ) -# def lj_fluid_conservation_analyze(*jobs): -# """Analyze the output of NVE simulations and inspect conservation.""" -# import math - -# import matplotlib -# import matplotlib.figure -# import matplotlib.style -# import numpy - -# matplotlib.style.use('fivethirtyeight') - -# print('starting lj_fluid_conservation_analyze:', jobs[0]) - -# sim_modes = ['nve_md_cpu'] -# if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): -# sim_modes.extend(['nve_md_gpu']) - -# timesteps = [] -# energies = [] -# linear_momenta = [] - -# for job in jobs: -# job_timesteps = {} -# job_energies = {} -# job_linear_momentum = {} - -# for sim_mode in sim_modes: -# log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - -# job_timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - -# job_energies[sim_mode] = ( -# log_traj[ -# 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' -# ] -# + log_traj[ -# 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' -# ] -# ) -# job_energies[sim_mode] = ( -# job_energies[sim_mode] - job_energies[sim_mode][0] -# ) / job.cached_statepoint['num_particles'] - -# momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] -# job_linear_momentum[sim_mode] = [ -# math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) -# / job.cached_statepoint['num_particles'] -# for v in momentum_vector -# ] - -# timesteps.append(job_timesteps) -# energies.append(job_energies) -# linear_momenta.append(job_linear_momentum) - -# # Plot results -# def plot(*, ax, data, quantity_name, legend=False): -# for i, job in enumerate(jobs): -# for mode in sim_modes: -# ax.plot( -# timesteps[i][mode], -# numpy.asarray(data[i][mode]), -# label=f'{mode}_{job.cached_statepoint["replicate_idx"]}', -# ) -# ax.set_xlabel('time step') -# ax.set_ylabel(quantity_name) - -# if legend: -# ax.legend() - -# fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.68 * 2), layout='tight') -# ax = fig.add_subplot(2, 1, 1) -# plot(ax=ax, data=energies, quantity_name=r'$E / N$', legend=True) - -# ax = fig.add_subplot(2, 1, 2) -# plot(ax=ax, data=linear_momenta, quantity_name=r'$\left| \vec{p} \right| / N$') - -# fig.suptitle( -# 'LJ conservation tests: ' -# f'$kT={job.cached_statepoint["kT"]}$, ' -# f'$\\rho={job.cached_statepoint["density"]}$, ' -# f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' -# f'$N={job.cached_statepoint["num_particles"]}$' -# ) -# filename = ( -# f'lj_fluid_conservation_kT{job.cached_statepoint["kT"]}_' -# f'density{round(job.cached_statepoint["density"], 2)}_' -# f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' -# f'N{job.cached_statepoint["num_particles"]}.svg' -# ) - -# fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - -# for job in jobs: -# job.document['lj_fluid_conservation_analysis_complete'] = True +def run_nve_md_sim(job, device,run_length): + """Run the MD simulation in NVE.""" + sim_mode = 'nve_md' + if util.is_simulation_complete(job, device, sim_mode): + return + + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') + is_restarting = job.isfile(restart_filename) + + if is_restarting: + initial_state = job.fn(restart_filename) + else: + initial_state = job.fn('initial_state.gsd') + + nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) + + sim = make_md_simulation( + job, device, initial_state, nve, sim_mode, period_multiplier=200 + ) + + if not is_restarting: + sim.state.thermalize_particle_momenta( + hoomd.filter.All(), job.cached_statepoint['kT'] + ) + + # Run for a long time to look for energy and momentum drift + device.notice('Running...') + + util.run_up_to_walltime( + sim=sim, + end_step=RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length, + steps=500_000, + walltime_stop=WALLTIME_STOP_SECONDS, + ) + + if sim.timestep == RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length: + util.mark_simulation_complete(job, device, sim_mode) + device.notice('Done.') + else: + device.notice( + 'Ending run early due to walltime limits at:' + f'{device.communicator.walltime}' + ) + + hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') + + +def is_lj_fluid_nve(job): + """Test if a given job should be run for NVE conservation.""" + return ( + job.cached_statepoint['subproject'] == 'lj_fluid' + and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS + ) + + +nve_md_sampling_jobs = [] +nve_md_job_definitions = [ + { + 'device_name': 'cpu', + 'run_length': 10_000_000, + }, +] + +if CONFIG['enable_gpu']: + nve_md_job_definitions.extend( + [ + { + 'device_name': 'gpu', + 'run_length': 100_000_000, + }, + ] + ) + + +def add_nve_md_job(device_name, run_length): + """Add a MD NVE conservation job to the workflow.""" + sim_mode = 'nve_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' + + def nve_action(*jobs): + """Run NVE MD.""" + communicator = hoomd.communicator.Communicator( + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) + ) + job = jobs[communicator.partition] + + if communicator.rank == 0: + print(f'starting {action_name}:', job) + + if device_name == 'gpu': + device_cls = hoomd.device.GPU + elif device_name == 'cpu': + device_cls = hoomd.device.CPU + + device = device_cls( + communicator=communicator, + message_filename=util.get_message_filename( + job, f'{sim_mode}_{device_name}.log' + ), + ) + run_nve_md_sim( + job, + device, + run_length=run_length, + ) + + if communicator.rank == 0: + print(f'completed {action_name} {job}') + + nve_md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action(action_name, Action(method = nve_action, + configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'quantities', 'h5')], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_nve_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'] + })) + + +for definition in nve_md_job_definitions: + add_nve_md_job(**definition) + +def conservation_analyze(*jobs): + """Analyze the output of NVE simulations and inspect conservation.""" + matplotlib.style.use('fivethirtyeight') + + print(f'starting {__name__}.conservation_analyze:', jobs[0]) + + sim_modes = ['nve_md_cpu'] + if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): + sim_modes.extend(['nve_md_gpu']) + + timesteps = [] + energies = [] + linear_momenta = [] + + for job in jobs: + job_timesteps = {} + job_energies = {} + job_linear_momentum = {} + + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + + job_timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + + job_energies[sim_mode] = ( + log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' + ] + + log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' + ] + ) + job_energies[sim_mode] = ( + job_energies[sim_mode] - job_energies[sim_mode][0] + ) / job.cached_statepoint['num_particles'] + + momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] + job_linear_momentum[sim_mode] = [ + math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) + / job.cached_statepoint['num_particles'] + for v in momentum_vector + ] + + timesteps.append(job_timesteps) + energies.append(job_energies) + linear_momenta.append(job_linear_momentum) + + # Plot results + def plot(*, ax, data, quantity_name, legend=False): + for i, job in enumerate(jobs): + for mode in sim_modes: + ax.plot( + timesteps[i][mode], + numpy.asarray(data[i][mode]), + label=f'{mode}_{job.cached_statepoint["replicate_idx"]}', + ) + ax.set_xlabel('time step') + ax.set_ylabel(quantity_name) + + if legend: + ax.legend() + + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.68 * 2), layout='tight') + ax = fig.add_subplot(2, 1, 1) + plot(ax=ax, data=energies, quantity_name=r'$E / N$', legend=True) + + ax = fig.add_subplot(2, 1, 2) + plot(ax=ax, data=linear_momenta, quantity_name=r'$\left| \vec{p} \right| / N$') + + fig.suptitle( + 'LJ conservation tests: ' + f'$kT={job.cached_statepoint["kT"]}$, ' + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$r_\\mathrm{{cut}}={job.cached_statepoint["r_cut"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$' + ) + filename = ( + f'lj_fluid_conservation_kT{job.cached_statepoint["kT"]}_' + f'density{round(job.cached_statepoint["density"], 2)}_' + f'r_cut{round(jobs[0].statepoint.r_cut, 2)}_' + f'N{job.cached_statepoint["num_particles"]}.svg' + ) + + fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') + +ValidationWorkflow.add_action(f'{__name__}.conservation_analyze', Action(method = conservation_analyze, +configuration = { +'previous_actions': nve_md_sampling_jobs, +'group': _group_compare | _include_nve, +'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:02:00'}}})) From fb6e87b2cbb8c373296403da40bad357a425acc7 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Fri, 2 Aug 2024 14:21:57 -0400 Subject: [PATCH 08/34] Run pre-commit. --- hoomd_validation/init.py | 5 +- hoomd_validation/lj_fluid.py | 236 +++++++++++++++++++++++++---------- hoomd_validation/project.py | 12 +- hoomd_validation/util.py | 19 +-- hoomd_validation/workflow.py | 21 ++-- 5 files changed, 206 insertions(+), 87 deletions(-) diff --git a/hoomd_validation/init.py b/hoomd_validation/init.py index f8d19346..460770f0 100644 --- a/hoomd_validation/init.py +++ b/hoomd_validation/init.py @@ -6,16 +6,19 @@ Also, write the row `workflow.toml`. """ -import action + # import subprojects # import alj_2d import config + # import hard_disk # import hard_sphere import lj_fluid + # import lj_union # import patchy_particle_pressure import signac + # import simple_polygon subprojects = [ diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index c69fd8a4..7efaf39a 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -4,25 +4,22 @@ """Lennard Jones phase behavior validation test.""" import collections +import itertools import json import math import os -import pathlib -import itertools -import numpy + +import hoomd import matplotlib import matplotlib.figure import matplotlib.style +import numpy import scipy - -import hoomd - import util from config import CONFIG -from flow import aggregator -from workflow_class import ValidationWorkflow -from workflow import Action from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -37,7 +34,9 @@ LJ_PARAMS = {'epsilon': 1.0, 'sigma': 1.0} NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = (int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10) * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 # Limit the number of long NVE runs to reduce the number of CPU hours needed. NUM_NVE_RUNS = 2 @@ -81,18 +80,39 @@ def job_statepoints(): ) -_group = {'sort_by': ["/density", "/num_particles"], 'include': [{'condition': ["/subproject", "==", __name__]}]} +_group = { + 'sort_by': ['/density', '/num_particles'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} _resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} _resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} -_group_cpu = _group | {'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS)} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} _resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} _group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} -_group_compare = _group | {'sort_by': ['/kT', '/density', '/num_particles', '/r_cut'], 'split_by_sort_key': True, 'submit_whole': True} - -_include_nve = {'include': [{'all': [["/subproject", "==", __name__], ["/replicate_idx", "<", NUM_NVE_RUNS]]}]} +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles', '/r_cut'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +_include_nve = { + 'include': [ + { + 'all': [ + ['/subproject', '==', __name__], + ['/replicate_idx', '<', NUM_NVE_RUNS], + ] + } + ] +} _group_nve_cpu = _group_cpu | _include_nve _group_nve_gpu = _group_gpu | _include_nve + def create_initial_state(*jobs): """Create initial system configuration.""" communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) @@ -152,8 +172,20 @@ def create_initial_state(*jobs): if communicator.rank == 0: print(f'completed {__name__}.create_initial_state: {job}') -ValidationWorkflow.add_action(f'{__name__}.create_initial_state', Action(method = create_initial_state, -configuration = {'products': ['initial_state.gsd'], 'launchers': ['mpi'], 'group': _group_cpu, 'resources': _resources_cpu | {'walltime': {'per_submission': CONFIG['short_walltime']}}})) + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) ################################# # MD ensemble simulations @@ -297,7 +329,9 @@ def run_md_sim(job, device, ensemble, thermostat): # thermalize the thermostat (if applicable) if ( - isinstance(method, (hoomd.md.methods.ConstantPressure, hoomd.md.methods.ConstantVolume)) + isinstance( + method, (hoomd.md.methods.ConstantPressure, hoomd.md.methods.ConstantVolume) + ) ) and hasattr(method.thermostat, 'thermalize_dof'): sim.run(0) method.thermostat.thermalize_dof() @@ -367,9 +401,7 @@ def run_md_sim(job, device, ensemble, thermostat): ) -def add_md_sampling_job( - ensemble, thermostat, device_name -): +def add_md_sampling_job(ensemble, thermostat, device_name): """Add a MD sampling job to the workflow.""" sim_mode = f'{ensemble}_{thermostat}_md' action_name = f'{__name__}.{sim_mode}_{device_name}' @@ -407,14 +439,23 @@ def md_sampling_operation(*jobs): print(f'completed {action_name}: {job}') md_sampling_jobs.append(action_name) - - ValidationWorkflow.add_action(action_name, Action(method = md_sampling_operation, - configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'quantities', 'h5')], - 'launchers': ['mpi'], - 'group': globals().get(f'_group_{device_name}'), - 'resources': globals().get(f'_resources_{device_name}'), - 'previous_actions': [f'{__name__}.create_initial_state'] - })) + + ValidationWorkflow.add_action( + action_name, + Action( + method=md_sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in md_job_definitions: @@ -740,9 +781,7 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_mc_sim')( - job, device - ) + globals().get(f'run_{mode}_mc_sim')(job, device) if communicator.rank == 0: print(f'completed {action_name}: {job}') @@ -750,13 +789,22 @@ def sampling_operation(*jobs): mc_sampling_jobs.append(action_name) sim_mode = mode + '_mc' - ValidationWorkflow.add_action(action_name, Action(method = sampling_operation, - configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'quantities', 'h5')], - 'launchers': ['mpi'], - 'group': globals().get(f'_group_{device_name}'), - 'resources': globals().get(f'_resources_{device_name}'), - 'previous_actions': [f'{__name__}.create_initial_state'] - })) + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in mc_job_definitions: @@ -829,7 +877,8 @@ def analyze(*jobs): if 'md' in sim_mode and 'langevin' not in sim_mode: momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] linear_momentum[sim_mode] = [ - math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector + math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) + for v in momentum_vector ] else: linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) @@ -891,11 +940,22 @@ def analyze(*jobs): ) fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') -ValidationWorkflow.add_action(f'{__name__}.analyze', Action(method = analyze, -configuration = {'products': ['nvt_npt_plots.svg'], -'previous_actions': md_sampling_jobs + mc_sampling_jobs, -'group': _group, -'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:01:00'}}})) + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': md_sampling_jobs + mc_sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, + ), +) def compare_modes(*jobs): @@ -1013,11 +1073,20 @@ def compare_modes(*jobs): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') -ValidationWorkflow.add_action(f'{__name__}.compare_modes', Action(method = compare_modes, -configuration = { -'previous_actions': [f'{__name__}.analyze'], -'group': _group_compare, -'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:02:00'}}})) +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) def distribution_analyze(*jobs): @@ -1169,18 +1238,27 @@ def distribution_analyze(*jobs): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') -ValidationWorkflow.add_action(f'{__name__}.distribution_analyze', Action(method = distribution_analyze, -configuration = { -'previous_actions': [f'{__name__}.analyze'], -'group': _group_compare, -'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:02:00'}}})) +ValidationWorkflow.add_action( + f'{__name__}.distribution_analyze', + Action( + method=distribution_analyze, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) # ################################# # # MD conservation simulations # ################################# -def run_nve_md_sim(job, device,run_length): +def run_nve_md_sim(job, device, run_length): """Run the MD simulation in NVE.""" sim_mode = 'nve_md' if util.is_simulation_complete(job, device, sim_mode): @@ -1258,7 +1336,7 @@ def add_nve_md_job(device_name, run_length): """Add a MD NVE conservation job to the workflow.""" sim_mode = 'nve_md' action_name = f'{__name__}.{sim_mode}_{device_name}' - + def nve_action(*jobs): """Run NVE MD.""" communicator = hoomd.communicator.Communicator( @@ -1291,18 +1369,28 @@ def nve_action(*jobs): nve_md_sampling_jobs.append(action_name) - ValidationWorkflow.add_action(action_name, Action(method = nve_action, - configuration={'products': [util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), util.get_job_filename(sim_mode, device_name, 'quantities', 'h5')], - 'launchers': ['mpi'], - 'group': globals().get(f'_group_nve_{device_name}'), - 'resources': globals().get(f'_resources_{device_name}'), - 'previous_actions': [f'{__name__}.create_initial_state'] - })) + ValidationWorkflow.add_action( + action_name, + Action( + method=nve_action, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_nve_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in nve_md_job_definitions: add_nve_md_job(**definition) + def conservation_analyze(*jobs): """Analyze the output of NVE simulations and inspect conservation.""" matplotlib.style.use('fivethirtyeight') @@ -1388,8 +1476,18 @@ def plot(*, ax, data, quantity_name, legend=False): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') -ValidationWorkflow.add_action(f'{__name__}.conservation_analyze', Action(method = conservation_analyze, -configuration = { -'previous_actions': nve_md_sampling_jobs, -'group': _group_compare | _include_nve, -'resources': {'processes': {'per_submission': 1}, 'walltime': {'per_directory': '00:02:00'}}})) + +ValidationWorkflow.add_action( + f'{__name__}.conservation_analyze', + Action( + method=conservation_analyze, + configuration={ + 'previous_actions': nve_md_sampling_jobs, + 'group': _group_compare | _include_nve, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 8b7fa3b3..f05089a8 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -6,16 +6,17 @@ # Define subproject flow operations # import alj_2d import config + # import hard_disk # import hard_sphere import lj_fluid +import signac + # import lj_union # import patchy_particle_pressure # import simple_polygon from workflow_class import ValidationWorkflow -import signac - all_subprojects = [ # 'alj_2d', lj_fluid, @@ -26,6 +27,7 @@ # 'patchy_particle_pressure', ] + def init(args): """Initialize the workspace.""" # TODO: uncomment @@ -46,4 +48,8 @@ def init(args): if __name__ == '__main__': - ValidationWorkflow.main(entrypoint = config.project_root / 'hoomd_validation' / 'project.py', init=init, path=config.project_root) + ValidationWorkflow.main( + entrypoint=config.project_root / 'hoomd_validation' / 'project.py', + init=init, + path=config.project_root, + ) diff --git a/hoomd_validation/util.py b/hoomd_validation/util.py index 292bd4b2..5c83ab89 100644 --- a/hoomd_validation/util.py +++ b/hoomd_validation/util.py @@ -4,11 +4,11 @@ """Helper functions for grabbing data and plotting.""" import os -import numpy -import h5py -import signac +import h5py import hoomd +import numpy +import signac def get_job_filename(sim_mode, device, name, file_type): @@ -167,6 +167,7 @@ def is_simulation_complete( return gsd_exists and h5_exists + def mark_simulation_complete( job, device, @@ -184,11 +185,15 @@ def mark_simulation_complete( sim_mode (str): String defining the simulation mode. """ if device.communicator.rank == 0: - os.rename(job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd.tmp')), - job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd'))) + os.rename( + job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd.tmp')), + job.fn(get_job_filename(sim_mode, device, 'trajectory', 'gsd')), + ) - os.rename(job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5.tmp')), - job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5'))) + os.rename( + job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5.tmp')), + job.fn(get_job_filename(sim_mode, device, 'quantities', 'h5')), + ) def make_seed(job, sim_mode=None): diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py index 28883e52..a3f0c99e 100644 --- a/hoomd_validation/workflow.py +++ b/hoomd_validation/workflow.py @@ -13,9 +13,9 @@ import argparse from pathlib import Path -import signac import rtoml +import signac class Action: @@ -47,6 +47,7 @@ def __call__(self, *jobs): """Call the `method` given on construction.""" self._method(*jobs) + class Workflow: """Represent a single workflow.""" @@ -82,9 +83,15 @@ def write_workflow(cls, entrypoint, path=None, default=None): path(Path): Path to write ``workflow.toml``. default(dict): The ``[default]`` mapping. """ - workflow = {'workspace': {'path': 'workspace', 'value_file': 'signac_statepoint.json'}} + workflow = { + 'workspace': {'path': 'workspace', 'value_file': 'signac_statepoint.json'} + } - workflow['default'] = {'action': {'command': f'python -u {entrypoint} action $ACTION_NAME {{directories}}'}} + workflow['default'] = { + 'action': { + 'command': f'python -u {entrypoint} action $ACTION_NAME {{directories}}' + } + } if default is not None: workflow['default'].update(default) @@ -102,7 +109,7 @@ def write_workflow(cls, entrypoint, path=None, default=None): rtoml.dump(workflow, workflow_file, pretty=True) @classmethod - def main(cls, init = None, init_args = None, **kwargs): + def main(cls, init=None, init_args=None, **kwargs): """Implement the main entrypoint for ``project.py``. Valid commands are: @@ -120,7 +127,7 @@ def main(cls, init = None, init_args = None, **kwargs): **kwargs: Fowarded to `make_workflow`. """ parser = argparse.ArgumentParser() - command = parser.add_subparsers(dest="command", required=True) + command = parser.add_subparsers(dest='command', required=True) init_parser = command.add_parser('init') if init_args is not None: for arg in init_args: @@ -139,9 +146,9 @@ def main(cls, init = None, init_args = None, **kwargs): cls.write_workflow(**kwargs) elif args.command == 'action': project = signac.get_project() - jobs = [project.open_job(id=directory) for directory in args.directories] + jobs = [project.open_job(id=directory) for directory in args.directories] cls._actions[args.action](*jobs) - + else: message = f'Invalid command: {args.command}' raise RuntimeError(message) From 59c47bcedb473869a91a0359a59e4d02e8c02a33 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 12 Aug 2024 15:20:32 -0400 Subject: [PATCH 09/34] Convert lj_union. --- hoomd_validation/lj_fluid.py | 8 - hoomd_validation/lj_union.py | 735 +++++++++++++++++------------------ hoomd_validation/project.py | 4 +- 3 files changed, 355 insertions(+), 392 deletions(-) diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 7efaf39a..a9e091f3 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -1305,14 +1305,6 @@ def run_nve_md_sim(job, device, run_length): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') -def is_lj_fluid_nve(job): - """Test if a given job should be run for NVE conservation.""" - return ( - job.cached_statepoint['subproject'] == 'lj_fluid' - and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS - ) - - nve_md_sampling_jobs = [] nve_md_job_definitions = [ { diff --git a/hoomd_validation/lj_union.py b/hoomd_validation/lj_union.py index a9878f91..cbabbb11 100644 --- a/hoomd_validation/lj_union.py +++ b/hoomd_validation/lj_union.py @@ -4,15 +4,22 @@ """Lennard Jones phase behavior validation test (union particles).""" import collections +import itertools import json import math import os -import pathlib +import hoomd +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy +import scipy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -37,7 +44,9 @@ (0.5, 0.5, 0.5), ] -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 # Limit the number of long NVE runs to reduce the number of CPU hours needed. NUM_NVE_RUNS = 2 @@ -62,47 +71,51 @@ def job_statepoints(): ) -def is_lj_union(job): - """Test if a given job is part of the lj_union subproject.""" - return job.cached_statepoint['subproject'] == 'lj_union' - - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_lj_union, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_lj_union, -) +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +_include_nve = { + 'include': [ + { + 'all': [ + ['/subproject', '==', __name__], + ['/replicate_idx', '<', NUM_NVE_RUNS], + ] + } + ] +} +_group_nve_cpu = _group_cpu | _include_nve +_group_nve_gpu = _group_gpu | _include_nve -@Project.post.isfile('lj_union_initial_state_md.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=CONFIG['short_walltime'], - ), - aggregator=partition_jobs_cpu_mpi, -) -def lj_union_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - min_spacing = math.sqrt(3) + 1 communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd') and job.isfile('initial_state_md.gsd'): + return + if communicator.rank == 0: - print('starting lj_union_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) sp = job.sp device = hoomd.device.CPU( @@ -148,7 +161,7 @@ def lj_union_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('lj_union_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) # Create rigid bodies for MD @@ -163,12 +176,26 @@ def lj_union_create_initial_state(*jobs): rigid.create_bodies(sim.state) hoomd.write.GSD.write( - state=sim.state, filename=job.fn('lj_union_initial_state_md.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state_md.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed lj_union_create_initial_state: {job}') - + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd', 'initial_state_md.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) ################################# # MD ensemble simulations @@ -205,15 +232,12 @@ def make_md_simulation( period_multiplier (int): Factor to multiply the GSD file periods by. """ - import hoomd - from hoomd import md - if extra_loggables is None: extra_loggables = [] # pair force - nlist = md.nlist.Cell(buffer=0.4, exclusions=('body',)) - lj = md.pair.LJ( + nlist = hoomd.md.nlist.Cell(buffer=0.4, exclusions=('body',)) + lj = hoomd.md.pair.LJ( default_r_cut=LJ_PARAMS['r_cut'], default_r_on=LJ_PARAMS['r_on'], nlist=nlist ) lj.params[('A', 'A')] = dict(sigma=LJ_PARAMS['sigma'], epsilon=LJ_PARAMS['epsilon']) @@ -227,7 +251,7 @@ def make_md_simulation( lj.mode = 'xplor' # integrator - integrator = md.Integrator( + integrator = hoomd.md.Integrator( dt=0.0005, methods=[method], forces=[lj], integrate_rotational_dof=True ) @@ -241,7 +265,7 @@ def make_md_simulation( integrator.rigid = rigid # compute thermo - thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) + thermo = hoomd.md.compute.ThermodynamicQuantities(hoomd.filter.All()) # add gsd log quantities logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) @@ -283,29 +307,25 @@ def make_md_simulation( return sim -def run_md_sim(job, device, ensemble, thermostat, complete_filename): +def run_md_sim(job, device, ensemble, thermostat): """Run the MD simulation with the given ensemble and thermostat.""" - import hoomd - from custom_actions import ComputeDensity - from hoomd import md - - initial_state = job.fn('lj_union_initial_state_md.gsd') + initial_state = job.fn('initial_state_md.gsd') integrate_filter = hoomd.filter.Rigid(flags=('center',)) if ensemble == 'nvt': if thermostat == 'langevin': - method = md.methods.Langevin( + method = hoomd.md.methods.Langevin( filter=integrate_filter, kT=job.cached_statepoint['kT'] ) method.gamma.default = 1.0 elif thermostat == 'mttk': - method = md.methods.ConstantVolume(filter=integrate_filter) + method = hoomd.md.methods.ConstantVolume(filter=integrate_filter) method.thermostat = hoomd.md.methods.thermostats.MTTK( kT=job.cached_statepoint['kT'], tau=0.25 ) elif thermostat == 'bussi': - method = md.methods.ConstantVolume(filter=integrate_filter) + method = hoomd.md.methods.ConstantVolume(filter=integrate_filter) method.thermostat = hoomd.md.methods.thermostats.Bussi( kT=job.cached_statepoint['kT'] ) @@ -313,7 +333,7 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): raise ValueError(f'Unsupported thermostat {thermostat}') elif ensemble == 'npt': p = job.cached_statepoint['pressure'] - method = md.methods.ConstantPressure( + method = hoomd.md.methods.ConstantPressure( integrate_filter, S=[p, p, p, 0, 0, 0], tauS=3, couple='xyz' ) if thermostat == 'bussi': @@ -325,6 +345,9 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): sim_mode = f'{ensemble}_{thermostat}_md' + if util.is_simulation_complete(job, device, sim_mode): + return + density_compute = ComputeDensity(job.cached_statepoint['num_particles']) sim = make_md_simulation( job, device, initial_state, method, sim_mode, extra_loggables=[density_compute] @@ -337,7 +360,9 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): # thermalize the thermostat (if applicable) if ( - isinstance(method, (md.methods.ConstantPressure, md.methods.ConstantVolume)) + isinstance( + method, (hoomd.md.methods.ConstantPressure, hoomd.md.methods.ConstantVolume) + ) ) and hasattr(method.thermostat, 'thermalize_dof'): sim.run(0) method.thermostat.thermalize_dof() @@ -352,7 +377,7 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) md_sampling_jobs = [] @@ -361,29 +386,21 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): 'ensemble': 'nvt', 'thermostat': 'langevin', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'nvt', 'thermostat': 'mttk', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'nvt', 'thermostat': 'bussi', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'ensemble': 'npt', 'thermostat': 'bussi', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, ] @@ -394,67 +411,40 @@ def run_md_sim(job, device, ensemble, thermostat, complete_filename): 'ensemble': 'nvt', 'thermostat': 'langevin', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'nvt', 'thermostat': 'mttk', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'nvt', 'thermostat': 'bussi', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, { 'ensemble': 'npt', 'thermostat': 'bussi', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, ] ) -def add_md_sampling_job( - ensemble, thermostat, device_name, ranks_per_partition, aggregator -): +def add_md_sampling_job(ensemble, thermostat, device_name): """Add a MD sampling job to the workflow.""" sim_mode = f'{ensemble}_{thermostat}_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_union_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_union_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def md_sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_union_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -473,13 +463,29 @@ def md_sampling_operation(*jobs): device, ensemble, thermostat, - complete_filename=f'{sim_mode}_{device_name}_complete', ) if communicator.rank == 0: - print(f'completed lj_union_{sim_mode}_{device_name}: {job}') - - md_sampling_jobs.append(md_sampling_operation) + print(f'completed {action_name}: {job}') + + md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=md_sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in md_job_definitions: @@ -502,20 +508,16 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non extra_loggables (list): List of extra loggables to log to gsd files. Patch energies are logged by default. """ - import hoomd - from custom_actions import ComputeDensity - from hoomd import hpmc - if extra_loggables is None: extra_loggables = [] # integrator - mc = hpmc.integrate.Sphere(nselect=1) + mc = hoomd.hpmc.integrate.Sphere(nselect=1) mc.shape['A'] = dict(diameter=0.0) mc.shape['R'] = dict(diameter=0.0, orientable=True) # pair potential - lennard_jones = hpmc.pair.LennardJones() + lennard_jones = hoomd.hpmc.pair.LennardJones() lennard_jones.params[('A', 'A')] = dict( epsilon=LJ_PARAMS['epsilon'] / job.cached_statepoint['kT'], sigma=LJ_PARAMS['sigma'], @@ -527,7 +529,7 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non lennard_jones.mode = 'xplor' - lj_union = hpmc.pair.Union(constituent_potential=lennard_jones) + lj_union = hoomd.hpmc.pair.Union(constituent_potential=lennard_jones) lj_union.body['A'] = dict(positions=[], types=[]) lj_union.body['R'] = dict(positions=CUBE_VERTS, types=['A'] * len(CUBE_VERTS)) @@ -565,7 +567,7 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non compute_density.attach(sim) # move size tuner - mstuner = hpmc.tune.MoveSize.scale_solver( + mstuner = hoomd.hpmc.tune.MoveSize.scale_solver( moves=['a', 'd'], types=['R'], target=0.2, @@ -585,17 +587,19 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non def run_nvt_mc_sim(job, device, complete_filename): """Run MC sim in NVT.""" - import hoomd - # simulation sim_mode = 'nvt_mc' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('lj_union_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode) @@ -656,7 +660,7 @@ def run_nvt_mc_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -667,21 +671,21 @@ def run_nvt_mc_sim(job, device, complete_filename): def run_npt_mc_sim(job, device, complete_filename): """Run MC sim in NPT.""" - import hoomd - from hoomd import hpmc - sim_mode = 'npt_mc' + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('lj_union_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates - boxmc = hpmc.update.BoxMC( + boxmc = hoomd.hpmc.update.BoxMC( betaP=job.cached_statepoint['pressure'] / job.cached_statepoint['kT'], trigger=hoomd.trigger.Periodic(1), ) @@ -694,7 +698,7 @@ def run_npt_mc_sim(job, device, complete_filename): sim.operations.add(boxmc) - boxmc_tuner = hpmc.tune.BoxMCMoveSize.scale_solver( + boxmc_tuner = hoomd.hpmc.tune.BoxMCMoveSize.scale_solver( trigger=hoomd.trigger.And( [ hoomd.trigger.Periodic(400), @@ -771,7 +775,7 @@ def run_npt_mc_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -785,47 +789,27 @@ def run_npt_mc_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, }, ] -def add_mc_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_mc_sampling_job(mode, device_name): """Add a MC sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) + action_name = f'{__name__}.{mode}_mc_{device_name}' - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_union_create_initial_state) - @Project.post.isfile(f'{mode}_mc_{device_name}_complete') - @Project.operation( - name=f'lj_union_{mode}_mc_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_union_{mode}_mc_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -846,178 +830,183 @@ def sampling_operation(*jobs): ) if communicator.rank == 0: - print(f'completed lj_union_{mode}_mc_{device_name} {job}') - - mc_sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + mc_sampling_jobs.append(action_name) + + sim_mode = mode + '_mc' + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in mc_job_definitions: add_mc_sampling_job(**definition) -@Project.pre(is_lj_union) -@Project.pre.after(*md_sampling_jobs) -@Project.pre.after(*mc_sampling_jobs) -@Project.post.true('lj_union_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def lj_union_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting lj_union_analyze:', job) - - sim_modes = [ - 'nvt_langevin_md_cpu', - 'nvt_mttk_md_cpu', - 'nvt_bussi_md_cpu', - 'npt_bussi_md_cpu', - ] + for job in jobs: + print(f'starting {__name__}.analyze:', job) - if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): - sim_modes.extend( - [ - 'nvt_langevin_md_gpu', - 'nvt_mttk_md_gpu', - 'nvt_bussi_md_gpu', - 'npt_bussi_md_gpu', - ] - ) + sim_modes = [ + 'nvt_langevin_md_cpu', + 'nvt_mttk_md_cpu', + 'nvt_bussi_md_cpu', + 'npt_bussi_md_cpu', + ] - if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): - sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) + if os.path.exists(job.fn('nvt_langevin_md_gpu_quantities.h5')): + sim_modes.extend( + [ + 'nvt_langevin_md_gpu', + 'nvt_mttk_md_gpu', + 'nvt_bussi_md_gpu', + 'npt_bussi_md_gpu', + ] + ) - util._sort_sim_modes(sim_modes) + if os.path.exists(job.fn('nvt_mc_cpu_quantities.h5')): + sim_modes.extend(['nvt_mc_cpu', 'npt_mc_cpu']) - timesteps = {} - energies = {} - pressures = {} - densities = {} - linear_momentum = {} + util._sort_sim_modes(sim_modes) - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + timesteps = {} + energies = {} + pressures = {} + densities = {} + linear_momentum = {} - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - if 'md' in sim_mode: - energies[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' - ] - else: - energies[sim_mode] = ( - log_traj['hoomd-data/hpmc/pair/Union/energy'] - * job.cached_statepoint['kT'] - ) + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - energies[sim_mode] /= job.cached_statepoint['num_particles'] + if 'md' in sim_mode: + energies[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/potential_energy' + ] + else: + energies[sim_mode] = ( + log_traj['hoomd-data/hpmc/pair/Union/energy'] + * job.cached_statepoint['kT'] + ) - if 'md' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' - ] - else: - pressures[sim_mode] = numpy.full(len(energies[sim_mode]), numpy.nan) + energies[sim_mode] /= job.cached_statepoint['num_particles'] - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + if 'md' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/md/compute/ThermodynamicQuantities/pressure' + ] + else: + pressures[sim_mode] = numpy.full(len(energies[sim_mode]), numpy.nan) - if 'md' in sim_mode and 'langevin' not in sim_mode: - momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] - linear_momentum[sim_mode] = [ - math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) for v in momentum_vector + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' ] - else: - linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) - - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - potential_energy=float(numpy.mean(energies[mode])), - density=float(numpy.mean(densities[mode])), - ) - fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') - ax = fig.add_subplot(2, 2, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 2, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - - ax = fig.add_subplot(2, 2, 3) - util.plot_timeseries( - ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 - ) + if 'md' in sim_mode and 'langevin' not in sim_mode: + momentum_vector = log_traj['hoomd-data/md/Integrator/linear_momentum'] + linear_momentum[sim_mode] = [ + math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2) + for v in momentum_vector + ] + else: + linear_momentum[sim_mode] = numpy.zeros(len(energies[sim_mode])) + + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + potential_energy=float(numpy.mean(energies[mode])), + density=float(numpy.mean(densities[mode])), + ) - ax = fig.add_subplot(2, 2, 4) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data={ - mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] - for mode, lm in linear_momentum.items() - }, - ylabel=r'$|\vec{p}| / N$', - max_points=500, - ) + fig = matplotlib.figure.Figure(figsize=(20, 20 / 3.24 * 2), layout='tight') + ax = fig.add_subplot(2, 2, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, + ) + ax.legend() - fig.suptitle( - f'$kT={job.cached_statepoint["kT"]}$, ' - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + ax = fig.add_subplot(2, 2, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) - job.document['lj_union_analysis_complete'] = True + ax = fig.add_subplot(2, 2, 3) + util.plot_timeseries( + ax=ax, timesteps=timesteps, data=energies, ylabel='$U / N$', max_points=500 + ) + ax = fig.add_subplot(2, 2, 4) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data={ + mode: numpy.asarray(lm) / job.cached_statepoint['num_particles'] + for mode, lm in linear_momentum.items() + }, + ylabel=r'$|\vec{p}| / N$', + max_points=500, + ) -analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles'], sort_by='replicate_idx', select=is_lj_union + fig.suptitle( + f'$kT={job.cached_statepoint["kT"]}$, ' + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': md_sampling_jobs + mc_sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, + ), ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='lj_union_analysis_complete')) -@Project.post(lambda *jobs: util.true_all(*jobs, key='lj_union_compare_modes_complete')) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, -) -def lj_union_compare_modes(*jobs): +def compare_modes(*jobs): """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting lj_union_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [ 'nvt_langevin_md_cpu', @@ -1104,29 +1093,28 @@ def lj_union_compare_modes(*jobs): filename = f'lj_union_compare_kT{kT}_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_union_compare_modes_complete'] = True - -@Project.pre.after(*md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_union_distribution_analyze_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), ) -def lj_union_distribution_analyze(*jobs): - """Checks that MD follows the correct KE distribution.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - import scipy + +def distribution_analyze(*jobs): + """Checks that MD follows the correct KE distribution.""" matplotlib.style.use('fivethirtyeight') - print('starting lj_union_distribution_analyze:', jobs[0]) + print(f'starting {__name__}.distribution_analyze:', jobs[0]) sim_modes = [ 'nvt_langevin_md_cpu', @@ -1183,7 +1171,6 @@ def lj_union_distribution_analyze(*jobs): n_rotate_dof = num_particles * 3 - print('Reading' + job.fn(sim_mode + '_quantities.h5')) log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) if 'md' in sim_mode: @@ -1312,8 +1299,21 @@ def lj_union_distribution_analyze(*jobs): ) fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_union_distribution_analyze_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.distribution_analyze', + Action( + method=distribution_analyze, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) ################################# @@ -1323,16 +1323,18 @@ def lj_union_distribution_analyze(*jobs): def run_nve_md_sim(job, device, run_length, complete_filename): """Run the MD simulation in NVE.""" - import hoomd - sim_mode = 'nve_md' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') is_restarting = job.isfile(restart_filename) if is_restarting: initial_state = job.fn(restart_filename) else: - initial_state = job.fn('lj_union_initial_state_md.gsd') + initial_state = job.fn('initial_state_md.gsd') nve = hoomd.md.methods.ConstantVolume(hoomd.filter.Rigid(flags=('center',))) @@ -1356,7 +1358,7 @@ def run_nve_md_sim(job, device, run_length, complete_filename): ) if sim.timestep == RANDOMIZE_STEPS + EQUILIBRATE_STEPS + run_length: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -1367,32 +1369,10 @@ def run_nve_md_sim(job, device, run_length, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') -def is_lj_union_nve(job): - """Test if a given job should be run for NVE conservation.""" - return ( - job.cached_statepoint['subproject'] == 'lj_union' - and job.cached_statepoint['replicate_idx'] < NUM_NVE_RUNS - ) - - -partition_jobs_cpu_mpi_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_lj_union_nve, -) - -partition_jobs_gpu_nve = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_lj_union_nve, -) - nve_md_sampling_jobs = [] nve_md_job_definitions = [ { 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_nve, 'run_length': 10_000_000, }, ] @@ -1402,45 +1382,26 @@ def is_lj_union_nve(job): [ { 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu_nve, 'run_length': 100_000_000, }, ] ) -def add_nve_md_job(device_name, ranks_per_partition, aggregator, run_length): +def add_nve_md_job(device_name, run_length): """Add a MD NVE conservation job to the workflow.""" sim_mode = 'nve_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(lj_union_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'lj_union_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def lj_union_nve_md_job(*jobs): + def nve_action(*jobs): """Run NVE MD.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting lj_union_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -1461,40 +1422,37 @@ def lj_union_nve_md_job(*jobs): ) if communicator.rank == 0: - print(f'completed lj_union_{sim_mode}_{device_name} {job}') - - nve_md_sampling_jobs.append(lj_union_nve_md_job) + print(f'completed {action_name} {job}') + + nve_md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=nve_action, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_nve_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in nve_md_job_definitions: add_nve_md_job(**definition) -nve_analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles'], - sort_by='replicate_idx', - select=is_lj_union_nve, -) - -@Project.pre.after(*nve_md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='lj_union_conservation_analysis_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=nve_analysis_aggregator, -) -def lj_union_conservation_analyze(*jobs): +def conservation_analyze(*jobs): """Analyze the output of NVE simulations and inspect conservation.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - matplotlib.style.use('fivethirtyeight') - print('starting lj_union_conservation_analyze:', jobs[0]) + print(f'starting {__name__}.conservation_analyze:', jobs[0]) sim_modes = ['nve_md_cpu'] if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): @@ -1573,5 +1531,18 @@ def plot(*, ax, data, quantity_name, legend=False): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['lj_union_conservation_analysis_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.conservation_analyze', + Action( + method=conservation_analyze, + configuration={ + 'previous_actions': nve_md_sampling_jobs, + 'group': _group_compare | _include_nve, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index f05089a8..c0d5d0f1 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -10,9 +10,9 @@ # import hard_disk # import hard_sphere import lj_fluid +import lj_union import signac -# import lj_union # import patchy_particle_pressure # import simple_polygon from workflow_class import ValidationWorkflow @@ -20,7 +20,7 @@ all_subprojects = [ # 'alj_2d', lj_fluid, - # 'lj_union', + lj_union, # 'hard_disk', # 'hard_sphere', # 'simple_polygon', From 08902612de0f4137a9435c2e36c9dc19afa51430 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 12 Aug 2024 15:25:29 -0400 Subject: [PATCH 10/34] Remove unused init.py --- hoomd_validation/init.py | 40 ---------------------------------------- 1 file changed, 40 deletions(-) delete mode 100644 hoomd_validation/init.py diff --git a/hoomd_validation/init.py b/hoomd_validation/init.py deleted file mode 100644 index 460770f0..00000000 --- a/hoomd_validation/init.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) 2022-2024 The Regents of the University of Michigan. -# Part of HOOMD-blue, released under the BSD 3-Clause License. - -"""Populate the signac project with jobs and job document parameters. - -Also, write the row `workflow.toml`. -""" - - -# import subprojects -# import alj_2d -import config - -# import hard_disk -# import hard_sphere -import lj_fluid - -# import lj_union -# import patchy_particle_pressure -import signac - -# import simple_polygon - -subprojects = [ - # alj_2d, - lj_fluid, - # lj_union, - # hard_disk, - # hard_sphere, - # simple_polygon, - # patchy_particle_pressure, -] - -project = signac.init_project(path=config.project_root) - -# initialize jobs for validation test projects -for subproject in subprojects: - # add all the jobs to the project - for job_sp in subproject.job_statepoints(): - job = project.open_job(job_sp).init() From bb97ab7c56b4df55c81b5491470a640b9e4f2c4f Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 19 Aug 2024 10:38:25 -0400 Subject: [PATCH 11/34] Port alj_2d to row. --- hoomd_validation/alj_2d.py | 209 ++++++++++++++++++----------------- hoomd_validation/lj_fluid.py | 2 +- hoomd_validation/lj_union.py | 2 +- hoomd_validation/project.py | 4 +- 4 files changed, 110 insertions(+), 107 deletions(-) diff --git a/hoomd_validation/alj_2d.py b/hoomd_validation/alj_2d.py index 93097cd8..7bcaf515 100644 --- a/hoomd_validation/alj_2d.py +++ b/hoomd_validation/alj_2d.py @@ -3,13 +3,19 @@ """ALJ 2D energy conservation validation test.""" +import itertools +import math import os -import pathlib +import hoomd +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -36,7 +42,9 @@ NUM_REPLICATES = min(4, CONFIG['replicates']) NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -57,45 +65,37 @@ def job_statepoints(): ) -def is_alj_2d(job): - """Test if a given job is part of the alj_2d subproject.""" - return job.cached_statepoint['subproject'] == 'alj_2d' - - -partition_jobs_cpu = aggregator.groupsof( - num=min(NUM_REPLICATES, CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_alj_2d, -) +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} -partition_jobs_gpu = aggregator.groupsof( - num=min(NUM_REPLICATES, CONFIG['max_gpus_submission']), - sort_by='density', - select=is_alj_2d, -) +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} -@Project.post.isfile('alj_2d_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=CONFIG['short_walltime'], - ), - aggregator=partition_jobs_cpu, -) -def alj_2d_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting alj2_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) init_diameter = CIRCUMCIRCLE_RADIUS * 2 * 1.15 @@ -143,11 +143,26 @@ def alj_2d_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('alj_2d_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed alj_2d_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_md_simulation( @@ -172,9 +187,6 @@ def make_md_simulation( period_multiplier (int): Factor to multiply the GSD file periods by. """ - import hoomd - from hoomd import md - incircle_d = INCIRCLE_RADIUS * 2 circumcircle_d = CIRCUMCIRCLE_RADIUS * 2 r_cut = max( @@ -182,8 +194,8 @@ def make_md_simulation( ) # pair force - nlist = md.nlist.Cell(buffer=0.4) - alj = md.pair.aniso.ALJ(default_r_cut=r_cut, nlist=nlist) + nlist = hoomd.md.nlist.Cell(buffer=0.4) + alj = hoomd.md.pair.aniso.ALJ(default_r_cut=r_cut, nlist=nlist) alj.shape['A'] = {'vertices': PARTICLE_VERTICES, 'faces': [], 'rounding_radii': 0} alj.params[('A', 'A')] = { 'epsilon': ALJ_PARAMS['epsilon'], @@ -193,12 +205,12 @@ def make_md_simulation( } # integrator - integrator = md.Integrator( + integrator = hoomd.md.Integrator( dt=0.0001, methods=[method], forces=[alj], integrate_rotational_dof=True ) # compute thermo - thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) + thermo = hoomd.md.compute.ThermodynamicQuantities(hoomd.filter.All()) # add gsd log quantities logger = hoomd.logging.Logger(categories=['scalar', 'sequence']) @@ -238,16 +250,18 @@ def make_md_simulation( return sim -def run_nve_md_sim(job, device, complete_filename): +def run_nve_md_sim(job, device): """Run the MD simulation in NVE.""" - import hoomd - sim_mode = 'nve_md' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) else: - initial_state = job.fn('alj_2d_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') nve = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) @@ -268,7 +282,7 @@ def run_nve_md_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -281,8 +295,6 @@ def run_nve_md_sim(job, device, complete_filename): nve_md_job_definitions = [ { 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu, }, ] @@ -291,44 +303,25 @@ def run_nve_md_sim(job, device, complete_filename): [ { 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, }, ] ) -def add_nve_md_job(device_name, ranks_per_partition, aggregator): +def add_nve_md_job(device_name): """Add a MD NVE conservation job to the workflow.""" sim_mode = 'nve_md' + action_name = f'{__name__}.{sim_mode}_{device_name}' - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = util.total_ranks_function(ranks_per_partition) - - @Project.pre.after(alj_2d_create_initial_state) - @Project.post.isfile(f'{sim_mode}_{device_name}_complete') - @Project.operation( - name=f'alj_2d_{sim_mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) - def alj_2d_nve_md_job(*jobs): + def nve_action(*jobs): """Run NVE MD.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting alj_2d_{sim_mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -341,43 +334,40 @@ def alj_2d_nve_md_job(*jobs): job, f'{sim_mode}_{device_name}.log' ), ) - run_nve_md_sim( - job, device, complete_filename=f'{sim_mode}_{device_name}_complete' - ) + run_nve_md_sim(job, device) if communicator.rank == 0: - print(f'completed alj_2d_{sim_mode}_{device_name}: {job}') - - nve_md_sampling_jobs.append(alj_2d_nve_md_job) + print(f'completed {action_name}: {job}') + + nve_md_sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=nve_action, + configuration={ + 'products': [ + util.get_job_filename(sim_mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(sim_mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': globals().get(f'_group_{device_name}'), + 'resources': globals().get(f'_resources_{device_name}'), + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in nve_md_job_definitions: add_nve_md_job(**definition) -analysis_aggregator = aggregator.groupby( - key=['kT', 'density', 'num_particles'], sort_by='replicate_idx', select=is_alj_2d -) - -@Project.pre.after(*nve_md_sampling_jobs) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='alj_2d_conservation_analysis_complete') -) -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']), - aggregator=analysis_aggregator, -) -def alj_2d_conservation_analyze(*jobs): +def conservation_analyze(*jobs): """Analyze the output of NVE simulations and inspect conservation.""" - import math - - import matplotlib - import matplotlib.figure - import matplotlib.style - matplotlib.style.use('fivethirtyeight') - print('starting alj_2d_conservation_analyze:', jobs[0]) + print(f'starting {__name__}.conservation_analyze:', jobs[0]) sim_modes = ['nve_md_cpu'] if os.path.exists(jobs[0].fn('nve_md_gpu_quantities.h5')): @@ -456,5 +446,18 @@ def plot(*, ax, data, quantity_name, legend=False): fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['alj_2d_conservation_analysis_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.conservation_analyze', + Action( + method=conservation_analyze, + configuration={ + 'previous_actions': nve_md_sampling_jobs, + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index a9e091f3..be48e206 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -1357,7 +1357,7 @@ def nve_action(*jobs): ) if communicator.rank == 0: - print(f'completed {action_name} {job}') + print(f'completed {action_name}: {job}') nve_md_sampling_jobs.append(action_name) diff --git a/hoomd_validation/lj_union.py b/hoomd_validation/lj_union.py index cbabbb11..46f9622b 100644 --- a/hoomd_validation/lj_union.py +++ b/hoomd_validation/lj_union.py @@ -1422,7 +1422,7 @@ def nve_action(*jobs): ) if communicator.rank == 0: - print(f'completed {action_name} {job}') + print(f'completed {action_name}: {job}') nve_md_sampling_jobs.append(action_name) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index c0d5d0f1..31ab30f5 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -4,7 +4,7 @@ """Project workflow entry point.""" # Define subproject flow operations -# import alj_2d +import alj_2d import config # import hard_disk @@ -18,7 +18,7 @@ from workflow_class import ValidationWorkflow all_subprojects = [ - # 'alj_2d', + alj_2d, lj_fluid, lj_union, # 'hard_disk', From ecf4496002e66215ab6adb499581cfbaa4ddba60 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 19 Aug 2024 12:21:40 -0400 Subject: [PATCH 12/34] Port hard_disk to row. --- hoomd_validation/hard_disk.py | 279 ++++++++++++++++++---------------- hoomd_validation/project.py | 4 +- 2 files changed, 152 insertions(+), 131 deletions(-) diff --git a/hoomd_validation/hard_disk.py b/hoomd_validation/hard_disk.py index 47e977e5..508b3d49 100644 --- a/hoomd_validation/hard_disk.py +++ b/hoomd_validation/hard_disk.py @@ -3,14 +3,20 @@ """Hard disk equation of state validation test.""" +import itertools import json import os -import pathlib +import hoomd +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -24,7 +30,9 @@ LOG_PERIOD = {'trajectory': 50_000, 'quantities': 100} NUM_CPU_RANKS = min(64, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -47,51 +55,46 @@ def job_statepoints(): ) -def is_hard_disk(job): - """Test if a given job is part of the hard_disk subproject.""" - return job.cached_statepoint['subproject'] == 'hard_disk' - - -partition_jobs_cpu_serial = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission']), - sort_by='density', - select=is_hard_disk, -) - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_hard_disk, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_hard_disk, -) - - -@Project.post.isfile('hard_disk_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi, -) -def hard_disk_create_initial_state(*jobs): +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_serial = _resources | {'processes': {'per_directory': 1}} +_group_serial = _group | { + 'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission']) +} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +_group_compare = _group | { + 'sort_by': ['/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + + +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting hard_disk_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -136,11 +139,26 @@ def hard_disk_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('hard_disk_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed hard_disk_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): @@ -160,9 +178,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -221,17 +236,19 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" - import hoomd - sim_mode = 'nvt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('hard_disk_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) @@ -278,7 +295,7 @@ def run_nvt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -287,18 +304,20 @@ def run_nvt_sim(job, device, complete_filename): ) -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" - import hoomd - # device sim_mode = 'npt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('hard_disk_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates @@ -387,7 +406,7 @@ def run_npt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -398,16 +417,17 @@ def run_npt_sim(job, device, complete_filename): def run_nec_sim(job, device, complete_filename): """Run MC sim in NVT with NEC.""" - import hoomd - from custom_actions import ComputeDensity - sim_mode = 'nec' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('hard_disk_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False mc = hoomd.hpmc.nec.integrate.Sphere( @@ -518,7 +538,7 @@ def run_nec_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -532,20 +552,20 @@ def run_nec_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'nec', 'device_name': 'cpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_cpu_serial, + 'resources': _resources_serial, + 'group': _group_serial, }, ] @@ -555,42 +575,26 @@ def run_nec_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, + 'resources': _resources_gpu, + 'group': _group_gpu, }, ] ) -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = directives['nranks'] + action_name = f'{__name__}.{mode}_{device_name}' - @Project.pre.after(hard_disk_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'hard_disk_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting hard_disk_{mode}_{device_name}:', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -604,33 +608,37 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: - print(f'completed hard_disk_{mode}_{device_name}: {job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_hard_disk) -@Project.pre.after(*sampling_jobs) -@Project.post.true('hard_disk_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def hard_disk_analyze(job): +def analyze(job): """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') print('starting hard_disk_analyze:', job) @@ -703,26 +711,26 @@ def hard_disk_analyze(job): ) fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') - job.document['hard_disk_analysis_complete'] = True - -@Project.pre(lambda *jobs: util.true_all(*jobs, key='hard_disk_analysis_complete')) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='hard_disk_compare_modes_complete') -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=['density', 'num_particles'], sort_by='replicate_idx', select=is_hard_disk +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def hard_disk_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy + +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') print('starting hard_disk_compare_modes:', jobs[0]) @@ -770,7 +778,7 @@ def hard_disk_compare_modes(*jobs): avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - avg_quantity, stderr_quantity = util.plot_vs_expected( + util.plot_vs_expected( ax=ax, values=quantities, ylabel=labels[quantity_name], @@ -782,5 +790,18 @@ def hard_disk_compare_modes(*jobs): filename = f'hard_disk_compare_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['hard_disk_compare_modes_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 31ab30f5..656d8e3e 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -6,8 +6,8 @@ # Define subproject flow operations import alj_2d import config +import hard_disk -# import hard_disk # import hard_sphere import lj_fluid import lj_union @@ -21,7 +21,7 @@ alj_2d, lj_fluid, lj_union, - # 'hard_disk', + hard_disk, # 'hard_sphere', # 'simple_polygon', # 'patchy_particle_pressure', From d582897c980bb1e07dfd3bb4717297e13686f19f Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 19 Aug 2024 13:57:49 -0400 Subject: [PATCH 13/34] Fix hard_disk syntax errors. --- hoomd_validation/hard_disk.py | 119 +++++++++++++++++----------------- 1 file changed, 60 insertions(+), 59 deletions(-) diff --git a/hoomd_validation/hard_disk.py b/hoomd_validation/hard_disk.py index 508b3d49..0b658d08 100644 --- a/hoomd_validation/hard_disk.py +++ b/hoomd_validation/hard_disk.py @@ -415,7 +415,7 @@ def run_npt_sim(job, device): ) -def run_nec_sim(job, device, complete_filename): +def run_nec_sim(job, device): """Run MC sim in NVT with NEC.""" sim_mode = 'nec' @@ -637,79 +637,80 @@ def sampling_operation(*jobs): add_sampling_job(**definition) -def analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting hard_disk_analyze:', job) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - sim_modes = [ - 'nvt_cpu', - 'nec_cpu', - 'npt_cpu', - ] + sim_modes = [ + 'nvt_cpu', + 'nec_cpu', + 'npt_cpu', + ] - if os.path.exists(job.fn('nvt_gpu_quantities.h5')): - sim_modes.extend(['nvt_gpu']) + if os.path.exists(job.fn('nvt_gpu_quantities.h5')): + sim_modes.extend(['nvt_gpu']) - util._sort_sim_modes(sim_modes) + util._sort_sim_modes(sim_modes) - timesteps = {} - pressures = {} - densities = {} + timesteps = {} + pressures = {} + densities = {} - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - if 'nec' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + if 'nec' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + ] + else: + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] + + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' ] - else: - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), + # Plot results + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 1, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, ) + ax.legend() - # Plot results - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 1, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 1, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) + ax = fig.add_subplot(2, 1, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') ValidationWorkflow.add_action( @@ -733,7 +734,7 @@ def compare_modes(*jobs): """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting hard_disk_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [ 'nvt_cpu', From 78625042b531b64b22e2e9c781947b616972cc53 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 19 Aug 2024 15:04:27 -0400 Subject: [PATCH 14/34] Convert hard_sphere to row. --- hoomd_validation/hard_sphere.py | 393 +++++++++++++++++--------------- hoomd_validation/project.py | 4 +- 2 files changed, 212 insertions(+), 185 deletions(-) diff --git a/hoomd_validation/hard_sphere.py b/hoomd_validation/hard_sphere.py index 4428641f..c4050236 100644 --- a/hoomd_validation/hard_sphere.py +++ b/hoomd_validation/hard_sphere.py @@ -8,8 +8,19 @@ import util from config import CONFIG -from flow import aggregator -from project_class import Project +import itertools + +import hoomd +import numpy +from workflow import Action +from workflow_class import ValidationWorkflow +from custom_actions import ComputeDensity +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy + + # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -42,51 +53,46 @@ def job_statepoints(): ) -def is_hard_sphere(job): - """Test if a given job is part of the hard_sphere subproject.""" - return job.cached_statepoint['subproject'] == 'hard_sphere' - - -partition_jobs_cpu_serial = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission']), - sort_by='density', - select=is_hard_sphere, -) - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_hard_sphere, -) - -partition_jobs_gpu = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_gpus_submission']), - sort_by='density', - select=is_hard_sphere, -) - - -@Project.post.isfile('hard_sphere_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi, -) -def hard_sphere_create_initial_state(*jobs): +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_serial = _resources | {'processes': {'per_directory': 1}} +_group_serial = _group | { + 'maximum_size': min(CONFIG['replicates'], CONFIG['max_cores_submission']) +} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} +_group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} +_group_compare = _group | { + 'sort_by': ['/kT', '/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +_group_compare = _group | { + 'sort_by': ['/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + + +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting hard_sphere_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -131,12 +137,26 @@ def hard_sphere_create_initial_state(*jobs): device.notice('Done.') hoomd.write.GSD.write( - state=sim.state, filename=job.fn('hard_sphere_initial_state.gsd'), mode='wb' + state=sim.state, filename=job.fn('initial_state.gsd'), mode='wb' ) if communicator.rank == 0: - print(f'completed hard_sphere_create_initial_state: {job}') - + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): """Make a hard sphere MC Simulation. @@ -155,9 +175,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -216,11 +233,14 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" - initial_state = job.fn('hard_sphere_initial_state.gsd') sim_mode = 'nvt' + if util.is_simulation_complete(job, device, sim_mode): + return + + initial_state = job.fn('initial_state.gsd') sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) # equilibrate @@ -241,17 +261,18 @@ def run_nvt_sim(job, device, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" - import hoomd - # device - initial_state = job.fn('hard_sphere_initial_state.gsd') sim_mode = 'npt' + if util.is_simulation_complete(job, device, sim_mode): + return + + initial_state = job.fn('initial_state.gsd') # box updates boxmc = hoomd.hpmc.update.BoxMC( betaP=job.cached_statepoint['pressure'], trigger=hoomd.trigger.Periodic(1) @@ -301,17 +322,18 @@ def run_npt_sim(job, device, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) -def run_nec_sim(job, device, complete_filename): +def run_nec_sim(job, device): """Run MC sim in NVT with NEC.""" - import hoomd - from custom_actions import ComputeDensity - - initial_state = job.fn('hard_sphere_initial_state.gsd') sim_mode = 'nec' + if util.is_simulation_complete(job, device, sim_mode): + return + + initial_state = job.fn('initial_state.gsd') + mc = hoomd.hpmc.nec.integrate.Sphere( default_d=0.05, update_fraction=0.01, nselect=1 ) @@ -389,7 +411,7 @@ def run_nec_sim(job, device, complete_filename): sim.run(RUN_STEPS) device.notice('Done.') - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) sampling_jobs = [] @@ -397,65 +419,50 @@ def run_nec_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'nec', 'device_name': 'cpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_cpu_serial, + 'resources': _resources_serial, + 'group': _group_serial, }, ] + if CONFIG['enable_gpu']: job_definitions.extend( [ { 'mode': 'nvt', 'device_name': 'gpu', - 'ranks_per_partition': 1, - 'aggregator': partition_jobs_gpu, + 'resources': _resources_gpu, + 'group': _group_gpu, }, ] ) -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, group, resources): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - if device_name == 'gpu': - directives['ngpu'] = directives['nranks'] + action_name = f'{__name__}.{mode}_{device_name}' - @Project.pre.after(hard_sphere_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'hard_sphere_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting hard_sphere_{mode}_{device_name}', job) + print(f'starting {action_name}:', job) if device_name == 'gpu': device_cls = hoomd.device.GPU @@ -470,125 +477,132 @@ def sampling_operation(*jobs): ) globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' + job, device ) if communicator.rank == 0: - print(f'completed hard_sphere_{mode}_{device_name}: {job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_hard_sphere) -@Project.pre.after(*sampling_jobs) -@Project.post.true('hard_sphere_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def hard_sphere_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting hard_sphere_analyze:', job) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - sim_modes = [ - 'nvt_cpu', - 'nec_cpu', - 'npt_cpu', - ] + sim_modes = [ + 'nvt_cpu', + 'nec_cpu', + 'npt_cpu', + ] - if os.path.exists(job.fn('nvt_gpu_quantities.h5')): - sim_modes.extend(['nvt_gpu']) + if os.path.exists(job.fn('nvt_gpu_quantities.h5')): + sim_modes.extend(['nvt_gpu']) - util._sort_sim_modes(sim_modes) + util._sort_sim_modes(sim_modes) + + timesteps = {} + pressures = {} + densities = {} - timesteps = {} - pressures = {} - densities = {} + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + if 'nec' in sim_mode: + pressures[sim_mode] = log_traj[ + 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + ] + else: + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - if 'nec' in sim_mode: - pressures[sim_mode] = log_traj[ - 'hoomd-data/hpmc/nec/integrate/Sphere/virial_pressure' + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' ] - else: - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 1, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, ) + ax.legend() - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 1, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 1, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') - - job.document['hard_sphere_analysis_complete'] = True - + ax = fig.add_subplot(2, 1, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='hard_sphere_analysis_complete')) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='hard_sphere_compare_modes_complete') -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=['density', 'num_particles'], sort_by='replicate_idx', select=is_hard_sphere + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def hard_sphere_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy + +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting hard_sphere_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [ 'nvt_cpu', @@ -633,7 +647,7 @@ def hard_sphere_compare_modes(*jobs): avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - avg_quantity, stderr_quantity = util.plot_vs_expected( + util.plot_vs_expected( ax=ax, values=quantities, ylabel=labels[quantity_name], @@ -645,5 +659,18 @@ def hard_sphere_compare_modes(*jobs): filename = f'hard_sphere_compare_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['hard_sphere_compare_modes_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 656d8e3e..73f75403 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -8,7 +8,7 @@ import config import hard_disk -# import hard_sphere +import hard_sphere import lj_fluid import lj_union import signac @@ -22,7 +22,7 @@ lj_fluid, lj_union, hard_disk, - # 'hard_sphere', + hard_sphere, # 'simple_polygon', # 'patchy_particle_pressure', ] From db7872215d944e19764680c91ff1e2adb8844d34 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 19 Aug 2024 15:05:44 -0400 Subject: [PATCH 15/34] Remove complete_filename from lj_union --- hoomd_validation/lj_union.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/hoomd_validation/lj_union.py b/hoomd_validation/lj_union.py index 46f9622b..d876d3e2 100644 --- a/hoomd_validation/lj_union.py +++ b/hoomd_validation/lj_union.py @@ -585,7 +585,7 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_mc_sim(job, device, complete_filename): +def run_nvt_mc_sim(job, device): """Run MC sim in NVT.""" # simulation @@ -669,7 +669,7 @@ def run_nvt_mc_sim(job, device, complete_filename): ) -def run_npt_mc_sim(job, device, complete_filename): +def run_npt_mc_sim(job, device): """Run MC sim in NPT.""" sim_mode = 'npt_mc' @@ -826,7 +826,7 @@ def sampling_operation(*jobs): ) globals().get(f'run_{mode}_mc_sim')( - job, device, complete_filename=f'{mode}_mc_{device_name}_complete' + job, device ) if communicator.rank == 0: @@ -1321,7 +1321,7 @@ def distribution_analyze(*jobs): ################################# -def run_nve_md_sim(job, device, run_length, complete_filename): +def run_nve_md_sim(job, device, run_length): """Run the MD simulation in NVE.""" sim_mode = 'nve_md' @@ -1417,8 +1417,7 @@ def nve_action(*jobs): run_nve_md_sim( job, device, - run_length=run_length, - complete_filename=f'{sim_mode}_{device_name}_complete', + run_length=run_length ) if communicator.rank == 0: From e3ca6df5b688f855f197a2b8ec68390c37444a76 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 19 Aug 2024 19:57:13 -0400 Subject: [PATCH 16/34] Port simple_polygon --- hoomd_validation/project.py | 4 +- hoomd_validation/simple_polygon.py | 348 +++++++++++++++-------------- 2 files changed, 186 insertions(+), 166 deletions(-) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 73f75403..d5e3c49b 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -14,7 +14,7 @@ import signac # import patchy_particle_pressure -# import simple_polygon +import simple_polygon from workflow_class import ValidationWorkflow all_subprojects = [ @@ -23,7 +23,7 @@ lj_union, hard_disk, hard_sphere, - # 'simple_polygon', + simple_polygon, # 'patchy_particle_pressure', ] diff --git a/hoomd_validation/simple_polygon.py b/hoomd_validation/simple_polygon.py index 3ee45e46..80a43ab4 100644 --- a/hoomd_validation/simple_polygon.py +++ b/hoomd_validation/simple_polygon.py @@ -9,14 +9,25 @@ import util from config import CONFIG -from flow import aggregator -from project_class import Project +import itertools + +import hoomd +import numpy +from workflow import Action +from workflow_class import ValidationWorkflow +import hoomd +import numpy +from custom_actions import ComputeDensity +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. RANDOMIZE_STEPS = 20_000 EQUILIBRATE_STEPS = 100_000 -RUN_STEPS = 500_000 +RUN_STEPS = 100_000 RESTART_STEPS = RUN_STEPS // 10 TOTAL_STEPS = RANDOMIZE_STEPS + EQUILIBRATE_STEPS + RUN_STEPS SHAPE_VERTICES = [ @@ -34,7 +45,9 @@ LOG_PERIOD = {'trajectory': 50_000, 'quantities': 100} NUM_CPU_RANKS = min(8, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -58,45 +71,34 @@ def job_statepoints(): ) -def is_simple_polygon(job): - """Test if a given job is part of the simple_polygon subproject.""" - return job.cached_statepoint['subproject'] == 'simple_polygon' - -partition_jobs_cpu_serial = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission']), - sort_by='density', - select=is_simple_polygon, -) - -partition_jobs_cpu_mpi = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_simple_polygon, -) - - -@Project.post.isfile('simple_polygon_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi, -) -def simple_polygon_create_initial_state(*jobs): +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS + ) +} +_group_compare = _group | { + 'sort_by': ['/density', '/num_particles'], + 'split_by_sort_key': True, + 'submit_whole': True, +} + +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting simple_polygon_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -145,13 +147,28 @@ def simple_polygon_create_initial_state(*jobs): hoomd.write.GSD.write( state=sim.state, - filename=job.fn('simple_polygon_initial_state.gsd'), + filename=job.fn('initial_state.gsd'), mode='wb', logger=trajectory_logger, ) if communicator.rank == 0: - print(f'completed simple_polygon_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): @@ -171,10 +188,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - import numpy - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -238,17 +251,19 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" - import hoomd - sim_mode = 'nvt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('simple_polygon_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) @@ -309,7 +324,7 @@ def run_nvt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -318,18 +333,19 @@ def run_nvt_sim(job, device, complete_filename): ) -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" - import hoomd - - # device sim_mode = 'npt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('simple_polygon_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates @@ -427,7 +443,7 @@ def run_npt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice( @@ -441,39 +457,26 @@ def run_npt_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi, + 'resources': _resources_cpu, + 'group': _group_cpu, }, ] -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - @Project.pre.after(simple_polygon_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'simple_polygon_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) + action_name = f'{__name__}.{mode}_{device_name}' + def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] @@ -488,120 +491,124 @@ def sampling_operation(*jobs): ) globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' + job, device ) if communicator.rank == 0: - print(f'completed simple_polygon_{mode}_{device_name} {job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_simple_polygon) -@Project.pre.after(*sampling_jobs) -@Project.post.true('simple_polygon_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def simple_polygon_analyze(job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy - matplotlib.style.use('fivethirtyeight') - print('starting simple_polygon_analyze:', job) - - sim_modes = [] - for _ensemble in ['nvt', 'npt']: - if job.isfile(f'{_ensemble}_cpu_quantities.h5'): - sim_modes.append(f'{_ensemble}_cpu') - - util._sort_sim_modes(sim_modes) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - timesteps = {} - pressures = {} - densities = {} + sim_modes = [] + for _ensemble in ['nvt', 'npt']: + if job.isfile(f'{_ensemble}_cpu_quantities.h5'): + sim_modes.append(f'{_ensemble}_cpu') - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + util._sort_sim_modes(sim_modes) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + timesteps = {} + pressures = {} + densities = {} - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), - ) + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - # Plot results - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 1, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax = fig.add_subplot(2, 1, 2) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' + ] - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - job.document['simple_polygon_analysis_complete'] = True + # Plot results + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 1, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, + ) + ax.legend() + ax = fig.add_subplot(2, 1, 2) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) -@Project.pre(lambda *jobs: util.true_all(*jobs, key='simple_polygon_analysis_complete')) -@Project.post( - lambda *jobs: util.true_all(*jobs, key='simple_polygon_compare_modes_complete') -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=['density', 'num_particles'], - sort_by='replicate_idx', - select=is_simple_polygon, + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight') + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def simple_polygon_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting simple_polygon_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [] for _ensemble in ['nvt', 'npt']: @@ -642,7 +649,7 @@ def simple_polygon_compare_modes(*jobs): avg_value = {mode: numpy.mean(quantities[mode]) for mode in sim_modes} reference = numpy.mean([avg_value[mode] for mode in sim_modes]) - avg_quantity, stderr_quantity = util.plot_vs_expected( + util.plot_vs_expected( ax=ax, values=quantities, ylabel=labels[quantity_name], @@ -654,5 +661,18 @@ def simple_polygon_compare_modes(*jobs): filename = f'simple_polygon_compare_density{round(set_density, 2)}.svg' fig.savefig(os.path.join(jobs[0]._project.path, filename), bbox_inches='tight') - for job in jobs: - job.document['simple_polygon_compare_modes_complete'] = True + +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) From b4d3ed07f4bd2fb834134a323d77f93714a93e64 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 19 Aug 2024 19:57:51 -0400 Subject: [PATCH 17/34] Implement patchy_particle_pressure. --- hoomd_validation/hard_sphere.py | 29 +- hoomd_validation/lj_union.py | 10 +- hoomd_validation/patchy_particle_pressure.py | 411 ++++++++++--------- hoomd_validation/project.py | 6 +- hoomd_validation/simple_polygon.py | 28 +- 5 files changed, 230 insertions(+), 254 deletions(-) diff --git a/hoomd_validation/hard_sphere.py b/hoomd_validation/hard_sphere.py index c4050236..b7030cf2 100644 --- a/hoomd_validation/hard_sphere.py +++ b/hoomd_validation/hard_sphere.py @@ -3,24 +3,19 @@ """Hard sphere equation of state validation test.""" -import os -import pathlib - -import util -from config import CONFIG import itertools +import os import hoomd -import numpy -from workflow import Action -from workflow_class import ValidationWorkflow -from custom_actions import ComputeDensity import matplotlib import matplotlib.figure import matplotlib.style import numpy - - +import util +from config import CONFIG +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -70,19 +65,12 @@ def job_statepoints(): } _resources_gpu = _resources | {'processes': {'per_directory': 1}, 'gpus_per_process': 1} _group_gpu = _group | {'maximum_size': CONFIG['max_gpus_submission']} -_group_compare = _group | { - 'sort_by': ['/kT', '/density', '/num_particles'], - 'split_by_sort_key': True, - 'submit_whole': True, -} - _group_compare = _group | { 'sort_by': ['/density', '/num_particles'], 'split_by_sort_key': True, 'submit_whole': True, } - def create_initial_state(*jobs): """Create initial system configuration.""" communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) @@ -158,6 +146,7 @@ def create_initial_state(*jobs): ), ) + def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): """Make a hard sphere MC Simulation. @@ -476,9 +465,7 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: print(f'completed {action_name}: {job}') diff --git a/hoomd_validation/lj_union.py b/hoomd_validation/lj_union.py index d876d3e2..a60fbad6 100644 --- a/hoomd_validation/lj_union.py +++ b/hoomd_validation/lj_union.py @@ -825,9 +825,7 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_mc_sim')( - job, device - ) + globals().get(f'run_{mode}_mc_sim')(job, device) if communicator.rank == 0: print(f'completed {action_name}: {job}') @@ -1414,11 +1412,7 @@ def nve_action(*jobs): job, f'{sim_mode}_{device_name}.log' ), ) - run_nve_md_sim( - job, - device, - run_length=run_length - ) + run_nve_md_sim(job, device, run_length=run_length) if communicator.rank == 0: print(f'completed {action_name}: {job}') diff --git a/hoomd_validation/patchy_particle_pressure.py b/hoomd_validation/patchy_particle_pressure.py index 07c65eb5..b452796c 100644 --- a/hoomd_validation/patchy_particle_pressure.py +++ b/hoomd_validation/patchy_particle_pressure.py @@ -7,16 +7,29 @@ import os import pathlib +import hoomd +import itertools + +import numpy import util from config import CONFIG -from flow import aggregator -from project_class import Project +from workflow import Action +from workflow_class import ValidationWorkflow +from custom_actions import ComputeDensity +import matplotlib +import matplotlib.figure +import matplotlib.style +import numpy + # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. RANDOMIZE_STEPS = 20_000 -EQUILIBRATE_STEPS = 200_000 -RUN_STEPS = 500_000 +EQUILIBRATE_STEPS = 50_000 +RUN_STEPS = 100_00 +# TODO: restore +# EQUILIBRATE_STEPS = 200_000 +# RUN_STEPS = 500_000 RESTART_STEPS = RUN_STEPS // 10 TOTAL_STEPS = RANDOMIZE_STEPS + EQUILIBRATE_STEPS + RUN_STEPS @@ -24,7 +37,9 @@ LOG_PERIOD = {'trajectory': 50_000, 'quantities': 500} NUM_CPU_RANKS = min(16, CONFIG['max_cores_sim']) -WALLTIME_STOP_SECONDS = CONFIG['max_walltime'] * 3600 - 10 * 60 +WALLTIME_STOP_SECONDS = ( + int(os.environ.get('ACTION_WALLTIME_IN_MINUTES', 10)) - 10 +) * 60 def job_statepoints(): @@ -60,31 +75,24 @@ def job_statepoints(): ) -def is_patchy_particle_pressure(job): - """Test if a job is part of the patchy_particle_pressure subproject.""" - return job.cached_statepoint['subproject'] == 'patchy_particle_pressure' - - -def is_patchy_particle_pressure_positive_pressure(job): - """Test if a job is part of the patchy_particle_pressure subproject.""" - return ( - job.cached_statepoint['subproject'] == 'patchy_particle_pressure' - and job.cached_statepoint['pressure'] > 0 +_group = { + 'sort_by': ['/density'], + 'include': [{'condition': ['/subproject', '==', __name__]}], +} +_resources = {'walltime': {'per_submission': CONFIG['max_walltime']}} +_resources_cpu = _resources | {'processes': {'per_directory': NUM_CPU_RANKS}} +_group_cpu = _group | { + 'maximum_size': min( + CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS ) +} +_group_cpu_postive_pressure = _group_cpu | {'include': [{'all': [['/subproject', '==', __name__], ['/pressure', '>', 0]]}]} - -partition_jobs_cpu_mpi_nvt = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_patchy_particle_pressure, -) - -partition_jobs_cpu_mpi_npt = aggregator.groupsof( - num=min(CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS), - sort_by='density', - select=is_patchy_particle_pressure_positive_pressure, -) - +_group_compare = _group | { + 'sort_by': ['/pressure', '/density', '/temperature', '/chi', '/num_particles', '/long_range_interaction_scale_factor'], + 'split_by_sort_key': True, + 'submit_whole': True, +} def make_potential( delta_rad, sq_well_lambda, sigma, kT, long_range_interaction_scale_factor @@ -108,8 +116,6 @@ def make_potential( The terminology (e.g., `ehat`) comes from the "Modelling Patchy Particles" HOOMD-blue tutorial. """ - import hoomd - r = [ (sigma + sq_well_lambda * sigma) / 2.0, sq_well_lambda * sigma, @@ -126,27 +132,16 @@ def make_potential( return angular_step -@Project.post.isfile('patchy_particle_pressure_initial_state.gsd') -@Project.operation( - directives=dict( - executable=CONFIG['executable'], - nranks=util.total_ranks_function(NUM_CPU_RANKS), - walltime=1, - ), - aggregator=partition_jobs_cpu_mpi_nvt, -) -def patchy_particle_pressure_create_initial_state(*jobs): +def create_initial_state(*jobs): """Create initial system configuration.""" - import itertools - - import hoomd - import numpy - communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) job = jobs[communicator.partition] + if job.isfile('initial_state.gsd'): + return + if communicator.rank == 0: - print('starting patchy_particle_pressure_create_initial_state:', job) + print(f'starting {__name__}.create_initial_state:', job) num_particles = job.cached_statepoint['num_particles'] density = job.cached_statepoint['density'] @@ -212,13 +207,28 @@ def patchy_particle_pressure_create_initial_state(*jobs): hoomd.write.GSD.write( state=sim.state, - filename=job.fn('patchy_particle_pressure_initial_state.gsd'), + filename=job.fn('initial_state.gsd'), mode='wb', logger=trajectory_logger, ) if communicator.rank == 0: - print(f'completed patchy_particle_pressure_create_initial_state: {job}') + print(f'completed {__name__}.create_initial_state: {job}') + + +ValidationWorkflow.add_action( + f'{__name__}.create_initial_state', + Action( + method=create_initial_state, + configuration={ + 'products': ['initial_state.gsd'], + 'launchers': ['mpi'], + 'group': _group_cpu, + 'resources': _resources_cpu + | {'walltime': {'per_submission': CONFIG['short_walltime']}}, + }, + ), +) def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=None): @@ -238,10 +248,6 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non files. Each tuple is a pair of the instance and the loggable quantity name. """ - import hoomd - import numpy - from custom_actions import ComputeDensity - if extra_loggables is None: extra_loggables = [] @@ -324,15 +330,17 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non def run_nvt_sim(job, device, complete_filename): """Run MC sim in NVT.""" - import hoomd - sim_mode = 'nvt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('patchy_particle_pressure_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False sim = make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=[]) @@ -393,7 +401,7 @@ def run_nvt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice(f'Ending {job} run early due to walltime limits.') @@ -401,16 +409,18 @@ def run_nvt_sim(job, device, complete_filename): def run_npt_sim(job, device, complete_filename): """Run MC sim in NPT.""" - import hoomd - # device sim_mode = 'npt' + + if util.is_simulation_complete(job, device, sim_mode): + return + restart_filename = util.get_job_filename(sim_mode, device, 'restart', 'gsd') if job.isfile(restart_filename): initial_state = job.fn(restart_filename) restart = True else: - initial_state = job.fn('patchy_particle_pressure_initial_state.gsd') + initial_state = job.fn('initial_state.gsd') restart = False # box updates @@ -508,7 +518,7 @@ def run_npt_sim(job, device, complete_filename): hoomd.write.GSD.write(state=sim.state, filename=job.fn(restart_filename), mode='wb') if sim.timestep == TOTAL_STEPS: - pathlib.Path(job.fn(complete_filename)).touch() + util.mark_simulation_complete(job, device, sim_mode) device.notice('Done.') else: device.notice(f'Ending {job} run early due to walltime limits.') @@ -519,44 +529,31 @@ def run_npt_sim(job, device, complete_filename): { 'mode': 'nvt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_nvt, + 'resources': _resources_cpu, + 'group': _group_cpu, }, { 'mode': 'npt', 'device_name': 'cpu', - 'ranks_per_partition': NUM_CPU_RANKS, - 'aggregator': partition_jobs_cpu_mpi_npt, + 'resources': _resources_cpu, + 'group': _group_cpu_postive_pressure, }, ] -def add_sampling_job(mode, device_name, ranks_per_partition, aggregator): +def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" - directives = dict( - walltime=CONFIG['max_walltime'], - executable=CONFIG['executable'], - nranks=util.total_ranks_function(ranks_per_partition), - ) - - @Project.pre.after(patchy_particle_pressure_create_initial_state) - @Project.post.isfile(f'{mode}_{device_name}_complete') - @Project.operation( - name=f'patchy_particle_pressure_{mode}_{device_name}', - directives=directives, - aggregator=aggregator, - ) + action_name = f'{__name__}.{mode}_{device_name}' + def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" - import hoomd - communicator = hoomd.communicator.Communicator( - ranks_per_partition=ranks_per_partition + ranks_per_partition=int(os.environ['ACTION_PROCESSES_PER_DIRECTORY']) ) job = jobs[communicator.partition] if communicator.rank == 0: - print(f'starting patchy_particle_pressure_{mode}_{device_name}:', job) + print(f'starting {action_name}:', job) device = hoomd.device.CPU( communicator=communicator, @@ -566,154 +563,148 @@ def sampling_operation(*jobs): ) globals().get(f'run_{mode}_sim')( - job, device, complete_filename=f'{mode}_{device_name}_complete' + job, device ) if communicator.rank == 0: - print(f'completed patchy_particle_pressure_{mode}_{device_name} ' f'{job}') - - sampling_jobs.append(sampling_operation) + print(f'completed {action_name}: {job}') + + sampling_jobs.append(action_name) + + ValidationWorkflow.add_action( + action_name, + Action( + method=sampling_operation, + configuration={ + 'products': [ + util.get_job_filename(mode, device_name, 'trajectory', 'gsd'), + util.get_job_filename(mode, device_name, 'quantities', 'h5'), + ], + 'launchers': ['mpi'], + 'group': group, + 'resources': resources, + 'previous_actions': [f'{__name__}.create_initial_state'], + }, + ), + ) for definition in job_definitions: add_sampling_job(**definition) -@Project.pre(is_patchy_particle_pressure) -@Project.pre.after(*sampling_jobs) -@Project.post.true('patchy_particle_pressure_analysis_complete') -@Project.operation( - directives=dict(walltime=CONFIG['short_walltime'], executable=CONFIG['executable']) -) -def patchy_particle_pressure_analyze(job): - """Analyze the output of all simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy +def analyze(*job): + """Analyze the output of all simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting patchy_particle_pressure_analyze:', job) - - sim_modes = [] - for _ensemble in ['nvt', 'npt']: - if job.isfile(f'{_ensemble}_cpu_quantities.h5'): - sim_modes.append(f'{_ensemble}_cpu') - - util._sort_sim_modes(sim_modes) + for job in jobs: + print(f'starting {__name__}.analyze:', job) - timesteps = {} - pressures = {} - densities = {} + sim_modes = [] + for _ensemble in ['nvt', 'npt']: + if job.isfile(f'{_ensemble}_cpu_quantities.h5'): + sim_modes.append(f'{_ensemble}_cpu') - for sim_mode in sim_modes: - log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) + util._sort_sim_modes(sim_modes) - timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] + timesteps = {} + pressures = {} + densities = {} - pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] + for sim_mode in sim_modes: + log_traj = util.read_log(job.fn(sim_mode + '_quantities.h5')) - densities[sim_mode] = log_traj[ - 'hoomd-data/custom_actions/ComputeDensity/density' - ] + timesteps[sim_mode] = log_traj['hoomd-data/Simulation/timestep'] - # save averages - for mode in sim_modes: - job.document[mode] = dict( - pressure=float(numpy.mean(pressures[mode])), - density=float(numpy.mean(densities[mode])), - ) + pressures[sim_mode] = log_traj['hoomd-data/hpmc/compute/SDF/betaP'] - # Plot results - fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') - ax = fig.add_subplot(2, 2, 1) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=densities, - ylabel=r'$\rho$', - expected=job.cached_statepoint['density'], - max_points=500, - ) - ax.legend() - - ax_distribution = fig.add_subplot(2, 2, 2, sharey=ax) - util.plot_distribution( - ax_distribution, - {k: v for k, v in densities.items() if not k.startswith('nvt')}, - r'', - expected=job.cached_statepoint['density'], - bins=50, - plot_rotated=True, - ) - - ax = fig.add_subplot(2, 2, 3) - util.plot_timeseries( - ax=ax, - timesteps=timesteps, - data=pressures, - ylabel=r'$\beta P$', - expected=job.cached_statepoint['pressure'], - max_points=500, - ) - ax_distribution = fig.add_subplot(2, 2, 4, sharey=ax) - util.plot_distribution( - ax_distribution, - pressures, - r'', - expected=job.cached_statepoint['pressure'], - bins=50, - plot_rotated=True, - ) + densities[sim_mode] = log_traj[ + 'hoomd-data/custom_actions/ComputeDensity/density' + ] - fig.suptitle( - f'$\\rho={job.cached_statepoint["density"]}$, ' - f'$N={job.cached_statepoint["num_particles"]}$, ' - f'T={job.cached_statepoint["temperature"]}, ' - f'$\\chi={job.cached_statepoint["chi"]}$, ' - f'replicate={job.cached_statepoint["replicate_idx"]}, ' - '$\\varepsilon_{\\mathrm{rep}}/\\varepsilon_{\\mathrm{att}}$' - f'$={job.cached_statepoint["long_range_interaction_scale_factor"]}$' - ) - fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight', transparent=False) + # save averages + for mode in sim_modes: + job.document[mode] = dict( + pressure=float(numpy.mean(pressures[mode])), + density=float(numpy.mean(densities[mode])), + ) - job.document['patchy_particle_pressure_analysis_complete'] = True + # Plot results + fig = matplotlib.figure.Figure(figsize=(10, 10 / 1.618 * 2), layout='tight') + ax = fig.add_subplot(2, 2, 1) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=densities, + ylabel=r'$\rho$', + expected=job.cached_statepoint['density'], + max_points=500, + ) + ax.legend() + + ax_distribution = fig.add_subplot(2, 2, 2, sharey=ax) + util.plot_distribution( + ax_distribution, + {k: v for k, v in densities.items() if not k.startswith('nvt')}, + r'', + expected=job.cached_statepoint['density'], + bins=50, + plot_rotated=True, + ) + ax = fig.add_subplot(2, 2, 3) + util.plot_timeseries( + ax=ax, + timesteps=timesteps, + data=pressures, + ylabel=r'$\beta P$', + expected=job.cached_statepoint['pressure'], + max_points=500, + ) + ax_distribution = fig.add_subplot(2, 2, 4, sharey=ax) + util.plot_distribution( + ax_distribution, + pressures, + r'', + expected=job.cached_statepoint['pressure'], + bins=50, + plot_rotated=True, + ) -@Project.pre( - lambda *jobs: util.true_all(*jobs, key='patchy_particle_pressure_analysis_complete') -) -@Project.post( - lambda *jobs: util.true_all( - *jobs, key='patchy_particle_pressure_compare_modes_complete' - ) -) -@Project.operation( - directives=dict(executable=CONFIG['executable']), - aggregator=aggregator.groupby( - key=[ - 'pressure', - 'density', - 'temperature', - 'chi', - 'num_particles', - 'long_range_interaction_scale_factor', - ], - sort_by='replicate_idx', - select=is_patchy_particle_pressure, + fig.suptitle( + f'$\\rho={job.cached_statepoint["density"]}$, ' + f'$N={job.cached_statepoint["num_particles"]}$, ' + f'T={job.cached_statepoint["temperature"]}, ' + f'$\\chi={job.cached_statepoint["chi"]}$, ' + f'replicate={job.cached_statepoint["replicate_idx"]}, ' + '$\\varepsilon_{\\mathrm{rep}}/\\varepsilon_{\\mathrm{att}}$' + f'$={job.cached_statepoint["long_range_interaction_scale_factor"]}$' + ) + fig.savefig(job.fn('nvt_npt_plots.svg'), bbox_inches='tight', transparent=False) + + +ValidationWorkflow.add_action( + f'{__name__}.analyze', + Action( + method=analyze, + configuration={ + 'products': ['nvt_npt_plots.svg'], + 'previous_actions': sampling_jobs, + 'group': _group, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:01:00'}, + }, + }, ), ) -def patchy_particle_pressure_compare_modes(*jobs): - """Compares the tested simulation modes.""" - import matplotlib - import matplotlib.figure - import matplotlib.style - import numpy +def compare_modes(*jobs): + """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') - print('starting patchy_particle_pressure_compare_modes:', jobs[0]) + print(f'starting {__name__}.compare_modes:', jobs[0]) sim_modes = [] for _ensemble in ['nvt', 'npt']: @@ -811,5 +802,17 @@ def patchy_particle_pressure_compare_modes(*jobs): transparent=False, ) - for job in jobs: - job.document['patchy_particle_pressure_compare_modes_complete'] = True +ValidationWorkflow.add_action( + f'{__name__}.compare_modes', + Action( + method=compare_modes, + configuration={ + 'previous_actions': [f'{__name__}.analyze'], + 'group': _group_compare, + 'resources': { + 'processes': {'per_submission': 1}, + 'walltime': {'per_directory': '00:02:00'}, + }, + }, + ), +) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index d5e3c49b..89c8499b 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -7,13 +7,11 @@ import alj_2d import config import hard_disk - import hard_sphere import lj_fluid import lj_union import signac - -# import patchy_particle_pressure +import patchy_particle_pressure import simple_polygon from workflow_class import ValidationWorkflow @@ -24,7 +22,7 @@ hard_disk, hard_sphere, simple_polygon, - # 'patchy_particle_pressure', + patchy_particle_pressure, ] diff --git a/hoomd_validation/simple_polygon.py b/hoomd_validation/simple_polygon.py index 80a43ab4..0b33470d 100644 --- a/hoomd_validation/simple_polygon.py +++ b/hoomd_validation/simple_polygon.py @@ -3,31 +3,26 @@ """Simple polygon equation of state validation test.""" +import itertools import json import os -import pathlib - -import util -from config import CONFIG -import itertools import hoomd -import numpy -from workflow import Action -from workflow_class import ValidationWorkflow -import hoomd -import numpy -from custom_actions import ComputeDensity import matplotlib import matplotlib.figure import matplotlib.style import numpy +import util +from config import CONFIG +from custom_actions import ComputeDensity +from workflow import Action +from workflow_class import ValidationWorkflow # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. RANDOMIZE_STEPS = 20_000 EQUILIBRATE_STEPS = 100_000 -RUN_STEPS = 100_000 +RUN_STEPS = 500_000 RESTART_STEPS = RUN_STEPS // 10 TOTAL_STEPS = RANDOMIZE_STEPS + EQUILIBRATE_STEPS + RUN_STEPS SHAPE_VERTICES = [ @@ -71,7 +66,6 @@ def job_statepoints(): ) - _group = { 'sort_by': ['/density'], 'include': [{'condition': ['/subproject', '==', __name__]}], @@ -89,6 +83,7 @@ def job_statepoints(): 'submit_whole': True, } + def create_initial_state(*jobs): """Create initial system configuration.""" communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) @@ -472,7 +467,7 @@ def run_npt_sim(job, device): def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" action_name = f'{__name__}.{mode}_{device_name}' - + def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" communicator = hoomd.communicator.Communicator( @@ -490,9 +485,7 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: print(f'completed {action_name}: {job}') @@ -604,6 +597,7 @@ def analyze(*jobs): ), ) + def compare_modes(*jobs): """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') From a6d3e99a77011f70a6a8a616cbc63802d62cfdd0 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Tue, 20 Aug 2024 09:39:37 -0400 Subject: [PATCH 18/34] Fix errors in patchy particle pressure. --- hoomd_validation/patchy_particle_pressure.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hoomd_validation/patchy_particle_pressure.py b/hoomd_validation/patchy_particle_pressure.py index b452796c..57d5468f 100644 --- a/hoomd_validation/patchy_particle_pressure.py +++ b/hoomd_validation/patchy_particle_pressure.py @@ -328,7 +328,7 @@ def make_mc_simulation(job, device, initial_state, sim_mode, extra_loggables=Non return sim -def run_nvt_sim(job, device, complete_filename): +def run_nvt_sim(job, device): """Run MC sim in NVT.""" sim_mode = 'nvt' @@ -407,7 +407,7 @@ def run_nvt_sim(job, device, complete_filename): device.notice(f'Ending {job} run early due to walltime limits.') -def run_npt_sim(job, device, complete_filename): +def run_npt_sim(job, device): """Run MC sim in NPT.""" # device sim_mode = 'npt' @@ -594,7 +594,7 @@ def sampling_operation(*jobs): -def analyze(*job): +def analyze(*jobs): """Analyze the output of all simulation modes.""" matplotlib.style.use('fivethirtyeight') From e6818e6bc80068079fa46598855a6718dcc9de1b Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Tue, 20 Aug 2024 09:40:12 -0400 Subject: [PATCH 19/34] Run pre-commit. --- hoomd_validation/hard_sphere.py | 1 + hoomd_validation/patchy_particle_pressure.py | 37 +++++++++++--------- hoomd_validation/project.py | 2 +- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/hoomd_validation/hard_sphere.py b/hoomd_validation/hard_sphere.py index b7030cf2..22cde5b7 100644 --- a/hoomd_validation/hard_sphere.py +++ b/hoomd_validation/hard_sphere.py @@ -71,6 +71,7 @@ def job_statepoints(): 'submit_whole': True, } + def create_initial_state(*jobs): """Create initial system configuration.""" communicator = hoomd.communicator.Communicator(ranks_per_partition=NUM_CPU_RANKS) diff --git a/hoomd_validation/patchy_particle_pressure.py b/hoomd_validation/patchy_particle_pressure.py index 57d5468f..8c2a72dd 100644 --- a/hoomd_validation/patchy_particle_pressure.py +++ b/hoomd_validation/patchy_particle_pressure.py @@ -3,24 +3,20 @@ """Test for consistency between NVT and NPT simulations of patchy particles.""" +import itertools import json import os -import pathlib import hoomd -import itertools - +import matplotlib +import matplotlib.figure +import matplotlib.style import numpy import util from config import CONFIG +from custom_actions import ComputeDensity from workflow import Action from workflow_class import ValidationWorkflow -from custom_actions import ComputeDensity -import matplotlib -import matplotlib.figure -import matplotlib.style -import numpy - # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. @@ -86,14 +82,24 @@ def job_statepoints(): CONFIG['replicates'], CONFIG['max_cores_submission'] // NUM_CPU_RANKS ) } -_group_cpu_postive_pressure = _group_cpu | {'include': [{'all': [['/subproject', '==', __name__], ['/pressure', '>', 0]]}]} +_group_cpu_postive_pressure = _group_cpu | { + 'include': [{'all': [['/subproject', '==', __name__], ['/pressure', '>', 0]]}] +} _group_compare = _group | { - 'sort_by': ['/pressure', '/density', '/temperature', '/chi', '/num_particles', '/long_range_interaction_scale_factor'], + 'sort_by': [ + '/pressure', + '/density', + '/temperature', + '/chi', + '/num_particles', + '/long_range_interaction_scale_factor', + ], 'split_by_sort_key': True, 'submit_whole': True, } + def make_potential( delta_rad, sq_well_lambda, sigma, kT, long_range_interaction_scale_factor ): @@ -544,7 +550,7 @@ def run_npt_sim(job, device): def add_sampling_job(mode, device_name, resources, group): """Add a sampling job to the workflow.""" action_name = f'{__name__}.{mode}_{device_name}' - + def sampling_operation(*jobs): """Perform sampling simulation given the definition.""" communicator = hoomd.communicator.Communicator( @@ -562,9 +568,7 @@ def sampling_operation(*jobs): ), ) - globals().get(f'run_{mode}_sim')( - job, device - ) + globals().get(f'run_{mode}_sim')(job, device) if communicator.rank == 0: print(f'completed {action_name}: {job}') @@ -593,7 +597,6 @@ def sampling_operation(*jobs): add_sampling_job(**definition) - def analyze(*jobs): """Analyze the output of all simulation modes.""" matplotlib.style.use('fivethirtyeight') @@ -700,6 +703,7 @@ def analyze(*jobs): ), ) + def compare_modes(*jobs): """Compares the tested simulation modes.""" matplotlib.style.use('fivethirtyeight') @@ -802,6 +806,7 @@ def compare_modes(*jobs): transparent=False, ) + ValidationWorkflow.add_action( f'{__name__}.compare_modes', Action( diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index 89c8499b..c4c49baf 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -10,8 +10,8 @@ import hard_sphere import lj_fluid import lj_union -import signac import patchy_particle_pressure +import signac import simple_polygon from workflow_class import ValidationWorkflow From c7b0411c58957c9905732eb1432a1623946068cc Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Tue, 20 Aug 2024 09:52:54 -0400 Subject: [PATCH 20/34] Restore run length. --- hoomd_validation/patchy_particle_pressure.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/hoomd_validation/patchy_particle_pressure.py b/hoomd_validation/patchy_particle_pressure.py index 8c2a72dd..1bf58a9d 100644 --- a/hoomd_validation/patchy_particle_pressure.py +++ b/hoomd_validation/patchy_particle_pressure.py @@ -21,11 +21,8 @@ # Run parameters shared between simulations. # Step counts must be even and a multiple of the log quantity period. RANDOMIZE_STEPS = 20_000 -EQUILIBRATE_STEPS = 50_000 -RUN_STEPS = 100_00 -# TODO: restore -# EQUILIBRATE_STEPS = 200_000 -# RUN_STEPS = 500_000 +EQUILIBRATE_STEPS = 200_000 +RUN_STEPS = 500_000 RESTART_STEPS = RUN_STEPS // 10 TOTAL_STEPS = RANDOMIZE_STEPS + EQUILIBRATE_STEPS + RUN_STEPS From f24180672e9d536d7cda30d958d768e6ec01c2a1 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Tue, 20 Aug 2024 09:55:06 -0400 Subject: [PATCH 21/34] Use group-wide stale configuration. --- .github/workflows/stale.yaml | 11 +++++++++++ .github/workflows/stale.yml | 36 ------------------------------------ 2 files changed, 11 insertions(+), 36 deletions(-) create mode 100644 .github/workflows/stale.yaml delete mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 00000000..696e5440 --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,11 @@ +name: Close stale issues and PRs + +on: + schedule: + - cron: '0 19 * * *' + + workflow_dispatch: + +jobs: + stale: + uses: glotzerlab/workflows/.github/workflows/stale.yaml@ea2e25d07af862a1c696a932c2bd6b242d142049 # 0.2.0 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index a356f559..00000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Close stale issues and PRs - -on: - schedule: - - cron: '0 19 * * *' - - # Trigger on request. - workflow_dispatch: - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9.0.0 - with: - days-before-close: 10 - stale-issue-label: stale - stale-pr-label: stale - exempt-issue-labels: essential - exempt-pr-labels: essential - - days-before-issue-stale: 170 - stale-issue-message: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. - close-issue-message: > - This issue has been automatically closed because it has not had - recent activity. - - days-before-pr-stale: 20 - stale-pr-message: > - This pull request has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. - close-pr-message: > - This pull request has been automatically closed because it has not had - recent activity. From 3baec62c3048b31e57dff77af06cb23fee99a31f Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Tue, 20 Aug 2024 10:43:01 -0400 Subject: [PATCH 22/34] Test with row. --- .github/workflows/CI.yml | 41 ------------------------------------- .github/workflows/test.yaml | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 41 deletions(-) delete mode 100644 .github/workflows/CI.yml create mode 100644 .github/workflows/test.yaml diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml deleted file mode 100644 index 25b4fa37..00000000 --- a/.github/workflows/CI.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: CI - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -on: - # Trigger on pull requests. - pull_request: - - # Trigger on pushes to the mainline branches. This prevents building commits twice when the pull - # request source branch is in the same repository. - push: - branches: - - "main" - - # Trigger on request. - workflow_dispatch: - -jobs: - flow-status: - name: flow-status - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.7 - - name: Set up Python - uses: actions/setup-python@v5.1.0 - with: - python-version: '3.11' - - uses: actions/cache@v4.0.2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('.github/workflows/requirements-test.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - - name: Install signac - run: python3 -m pip install -r .github/workflows/requirements-test.txt - - name: Initialize workspace - run: python3 hoomd_validation/init.py - - name: Check flow status - run: python3 hoomd_validation/project.py status diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 00000000..a49f327e --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,36 @@ +name: test + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + + push: + branches: + - "main" + + workflow_dispatch: + +jobs: + status: + name: Initialize and show status + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: Set up Python + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + with: + python-version: "3.12" + - name: Set up Python environment + uses: glotzerlab/workflows/setup-uv@ea2e25d07af862a1c696a932c2bd6b242d142049 # 0.2.0 + with: + lockfile: ".github/workflows/requirements-test.txt" + - name: Initialize workspace + run: python3 hoomd_validation/project.py init + - name: Set up row + uses: glotzerlab/workflows/setup-row@eb2fa7acfe52cc995a323bce2e26c763a9dee632 # 0.3.0 + - name: Show project status + run: row show status From eb135123363797bb20f152827e9bdca6835e9c6d Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Tue, 20 Aug 2024 11:12:26 -0400 Subject: [PATCH 23/34] Make hoomd import optional. For use in CI which tests project init, but doesn't execute any simulations. --- hoomd_validation/alj_2d.py | 6 +++++- hoomd_validation/custom_actions.py | 6 ++++-- hoomd_validation/hard_disk.py | 6 +++++- hoomd_validation/lj_fluid.py | 6 +++++- hoomd_validation/lj_union.py | 6 +++++- hoomd_validation/util.py | 5 ++++- 6 files changed, 28 insertions(+), 7 deletions(-) diff --git a/hoomd_validation/alj_2d.py b/hoomd_validation/alj_2d.py index 7bcaf515..3fa6f097 100644 --- a/hoomd_validation/alj_2d.py +++ b/hoomd_validation/alj_2d.py @@ -7,7 +7,11 @@ import math import os -import hoomd +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") + import matplotlib import matplotlib.figure import matplotlib.style diff --git a/hoomd_validation/custom_actions.py b/hoomd_validation/custom_actions.py index 7d5d982e..03387bd6 100644 --- a/hoomd_validation/custom_actions.py +++ b/hoomd_validation/custom_actions.py @@ -3,8 +3,10 @@ """This file contains all custom actions needed for this project.""" -import hoomd - +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") class ComputeDensity(hoomd.custom.Action): """Compute the density of particles in the system. diff --git a/hoomd_validation/hard_disk.py b/hoomd_validation/hard_disk.py index 0b658d08..478fb117 100644 --- a/hoomd_validation/hard_disk.py +++ b/hoomd_validation/hard_disk.py @@ -7,7 +7,11 @@ import json import os -import hoomd +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") + import matplotlib import matplotlib.figure import matplotlib.style diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index be48e206..73363b8f 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -9,7 +9,11 @@ import math import os -import hoomd +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") + import matplotlib import matplotlib.figure import matplotlib.style diff --git a/hoomd_validation/lj_union.py b/hoomd_validation/lj_union.py index a60fbad6..79760b43 100644 --- a/hoomd_validation/lj_union.py +++ b/hoomd_validation/lj_union.py @@ -9,7 +9,11 @@ import math import os -import hoomd +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") + import matplotlib import matplotlib.figure import matplotlib.style diff --git a/hoomd_validation/util.py b/hoomd_validation/util.py index 5c83ab89..a1c2609c 100644 --- a/hoomd_validation/util.py +++ b/hoomd_validation/util.py @@ -6,10 +6,13 @@ import os import h5py -import hoomd import numpy import signac +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") def get_job_filename(sim_mode, device, name, file_type): """Construct a job filename.""" From 9e9331dc0ea6c460b02bca546f3bba8207a24187 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 21 Aug 2024 08:29:45 -0400 Subject: [PATCH 24/34] Install packages required for CI checks. --- .github/workflows/requirements-test.in | 6 +++ .github/workflows/requirements-test.txt | 49 ++++++++++++++++++-- .github/workflows/test.yaml | 2 +- hoomd_validation/alj_2d.py | 2 +- hoomd_validation/custom_actions.py | 43 +++++++++-------- hoomd_validation/hard_disk.py | 2 +- hoomd_validation/hard_sphere.py | 6 ++- hoomd_validation/lj_fluid.py | 2 +- hoomd_validation/lj_union.py | 2 +- hoomd_validation/patchy_particle_pressure.py | 6 ++- hoomd_validation/simple_polygon.py | 6 ++- hoomd_validation/util.py | 3 +- requirements.txt | 8 ---- 13 files changed, 96 insertions(+), 41 deletions(-) create mode 100644 .github/workflows/requirements-test.in delete mode 100644 requirements.txt diff --git a/.github/workflows/requirements-test.in b/.github/workflows/requirements-test.in new file mode 100644 index 00000000..98eb30c7 --- /dev/null +++ b/.github/workflows/requirements-test.in @@ -0,0 +1,6 @@ +h5py +matplotlib +numpy +rtoml +scipy +signac diff --git a/.github/workflows/requirements-test.txt b/.github/workflows/requirements-test.txt index bc6f9784..389526ee 100644 --- a/.github/workflows/requirements-test.txt +++ b/.github/workflows/requirements-test.txt @@ -1,6 +1,45 @@ -h5py==3.10.0 -gsd==3.2.1 -numpy==1.26.4 -PyYAML==6.0.1 +# This file was autogenerated by uv via the following command: +# uv pip compile requirements-test.in +contourpy==1.2.1 + # via matplotlib +cycler==0.12.1 + # via matplotlib +filelock==3.15.4 + # via signac +fonttools==4.53.1 + # via matplotlib +h5py==3.11.0 + # via -r requirements-test.in +kiwisolver==1.4.5 + # via matplotlib +matplotlib==3.9.2 + # via -r requirements-test.in +numpy==2.1.0 + # via + # -r requirements-test.in + # contourpy + # h5py + # matplotlib + # scipy +packaging==24.1 + # via + # matplotlib + # signac +pillow==10.4.0 + # via matplotlib +pyparsing==3.1.2 + # via matplotlib +python-dateutil==2.9.0.post0 + # via matplotlib +rtoml==0.11.0 + # via -r requirements-test.in +scipy==1.14.1 + # via -r requirements-test.in signac==2.2.0 -signac-flow==0.28.0 + # via -r requirements-test.in +six==1.16.0 + # via python-dateutil +synced-collections==1.0.0 + # via signac +tqdm==4.66.5 + # via signac diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index a49f327e..cc69fbdc 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,4 +1,4 @@ -name: test +name: Unit test concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/hoomd_validation/alj_2d.py b/hoomd_validation/alj_2d.py index 3fa6f097..50d9073c 100644 --- a/hoomd_validation/alj_2d.py +++ b/hoomd_validation/alj_2d.py @@ -10,7 +10,7 @@ try: import hoomd except ModuleNotFoundError as e: - print(f"Warning: {e}") + print(f'Warning: {e}') import matplotlib import matplotlib.figure diff --git a/hoomd_validation/custom_actions.py b/hoomd_validation/custom_actions.py index 03387bd6..b4f1dca9 100644 --- a/hoomd_validation/custom_actions.py +++ b/hoomd_validation/custom_actions.py @@ -5,30 +5,35 @@ try: import hoomd -except ModuleNotFoundError as e: - print(f"Warning: {e}") -class ComputeDensity(hoomd.custom.Action): - """Compute the density of particles in the system. + class ComputeDensity(hoomd.custom.Action): + """Compute the density of particles in the system. + + The density computed is a number density. - The density computed is a number density. + Args: + N: When not None, Use N instead of the number of particles when + computing the density. + """ - Args: - N: When not None, Use N instead of the number of particles when - computing the density. - """ + def __init__(self, N=None): + self.N = N - def __init__(self, N=None): - self.N = N + @hoomd.logging.log + def density(self): + """float: The density of the system.""" + if self.N is None: + return self._state.N_particles / self._state.box.volume - @hoomd.logging.log - def density(self): - """float: The density of the system.""" - if self.N is None: - return self._state.N_particles / self._state.box.volume + return self.N / self._state.box.volume - return self.N / self._state.box.volume + def act(self, timestep): + """Dummy act method.""" + pass +except ModuleNotFoundError as e: + print(f'Warning: {e}') - def act(self, timestep): - """Dummy act method.""" + # This workaround is to allow `python project.py init` to succeed in CI checks + # without requiring a working HOOMD installation. + class ComputeDensity: pass diff --git a/hoomd_validation/hard_disk.py b/hoomd_validation/hard_disk.py index 478fb117..3fcaa8fe 100644 --- a/hoomd_validation/hard_disk.py +++ b/hoomd_validation/hard_disk.py @@ -10,7 +10,7 @@ try: import hoomd except ModuleNotFoundError as e: - print(f"Warning: {e}") + print(f'Warning: {e}') import matplotlib import matplotlib.figure diff --git a/hoomd_validation/hard_sphere.py b/hoomd_validation/hard_sphere.py index 22cde5b7..4d418928 100644 --- a/hoomd_validation/hard_sphere.py +++ b/hoomd_validation/hard_sphere.py @@ -6,7 +6,11 @@ import itertools import os -import hoomd +try: + import hoomd +except ModuleNotFoundError as e: + print(f'Warning: {e}') + import matplotlib import matplotlib.figure import matplotlib.style diff --git a/hoomd_validation/lj_fluid.py b/hoomd_validation/lj_fluid.py index 73363b8f..933cc372 100644 --- a/hoomd_validation/lj_fluid.py +++ b/hoomd_validation/lj_fluid.py @@ -12,7 +12,7 @@ try: import hoomd except ModuleNotFoundError as e: - print(f"Warning: {e}") + print(f'Warning: {e}') import matplotlib import matplotlib.figure diff --git a/hoomd_validation/lj_union.py b/hoomd_validation/lj_union.py index 79760b43..921eacc7 100644 --- a/hoomd_validation/lj_union.py +++ b/hoomd_validation/lj_union.py @@ -12,7 +12,7 @@ try: import hoomd except ModuleNotFoundError as e: - print(f"Warning: {e}") + print(f'Warning: {e}') import matplotlib import matplotlib.figure diff --git a/hoomd_validation/patchy_particle_pressure.py b/hoomd_validation/patchy_particle_pressure.py index 1bf58a9d..d76c5e39 100644 --- a/hoomd_validation/patchy_particle_pressure.py +++ b/hoomd_validation/patchy_particle_pressure.py @@ -7,7 +7,11 @@ import json import os -import hoomd +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") + import matplotlib import matplotlib.figure import matplotlib.style diff --git a/hoomd_validation/simple_polygon.py b/hoomd_validation/simple_polygon.py index 0b33470d..6e60e9f4 100644 --- a/hoomd_validation/simple_polygon.py +++ b/hoomd_validation/simple_polygon.py @@ -7,7 +7,11 @@ import json import os -import hoomd +try: + import hoomd +except ModuleNotFoundError as e: + print(f"Warning: {e}") + import matplotlib import matplotlib.figure import matplotlib.style diff --git a/hoomd_validation/util.py b/hoomd_validation/util.py index a1c2609c..cfa5349b 100644 --- a/hoomd_validation/util.py +++ b/hoomd_validation/util.py @@ -12,7 +12,8 @@ try: import hoomd except ModuleNotFoundError as e: - print(f"Warning: {e}") + print(f'Warning: {e}') + def get_job_filename(sim_mode, device, name, file_type): """Construct a job filename.""" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 94b5f2d3..00000000 --- a/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -gsd -matplotlib -numpy -rtoml -scipy -signac >= 2.2.0 -signac-flow >= 0.25.1 -signac-dashboard From 3d3694734690cbd4f3143f0e5035596aee2051f6 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 21 Aug 2024 08:40:59 -0400 Subject: [PATCH 25/34] Check the generated workflow.toml in CI. --- .github/workflows/test.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index cc69fbdc..5415334c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -30,6 +30,8 @@ jobs: lockfile: ".github/workflows/requirements-test.txt" - name: Initialize workspace run: python3 hoomd_validation/project.py init + - name: Show workflow + run: cat --number workflow.toml - name: Set up row uses: glotzerlab/workflows/setup-row@eb2fa7acfe52cc995a323bce2e26c763a9dee632 # 0.3.0 - name: Show project status From 5862f4694e90c4b05aa78c1b75e0d10244848c02 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 21 Aug 2024 08:49:46 -0400 Subject: [PATCH 26/34] Fix default walltime --- hoomd_validation/config-sample.toml | 2 +- hoomd_validation/config_parser.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hoomd_validation/config-sample.toml b/hoomd_validation/config-sample.toml index 28f8e791..0b39036f 100644 --- a/hoomd_validation/config-sample.toml +++ b/hoomd_validation/config-sample.toml @@ -11,7 +11,7 @@ # max_gpus_submission = 1 ## Maximum wall time (in hours) that a submitted cluster job is allowed to run. -# max_walltime = "24:00:00" +# max_walltime = "1 day, 00:00:00" ## Wall time (in hours) To use for short, non-restartable jobs # short_walltime = "02:00:00" diff --git a/hoomd_validation/config_parser.py b/hoomd_validation/config_parser.py index b1e80009..2c0bb218 100644 --- a/hoomd_validation/config_parser.py +++ b/hoomd_validation/config_parser.py @@ -31,7 +31,7 @@ def __init__(self, config_file_path=DEFAULT_CONFIG_PATH): self['max_cores_sim'] = int(config.get('max_cores_sim', 16)) self['max_cores_submission'] = int(config.get('max_cores_submission', 16)) self['max_gpus_submission'] = int(config.get('max_gpus_submission', 1)) - self['max_walltime'] = str(config.get('max_walltime', '24:00:00')) + self['max_walltime'] = str(config.get('max_walltime', '1 day, 00:00:00')) self['short_walltime'] = str(config.get('short_walltime', '02:00:00')) self['replicates'] = int(config.get('replicates', 32)) self['enable_gpu'] = bool(config.get('enable_gpu', True)) From 2ea7adb5664f1cdcddcb8c567e752fa263a6b13d Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 21 Aug 2024 12:08:15 -0400 Subject: [PATCH 27/34] Test with row 0.3.0. --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 5415334c..48163df0 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -33,6 +33,6 @@ jobs: - name: Show workflow run: cat --number workflow.toml - name: Set up row - uses: glotzerlab/workflows/setup-row@eb2fa7acfe52cc995a323bce2e26c763a9dee632 # 0.3.0 + uses: glotzerlab/workflows/setup-row@20092ebdae3ec74a21b71959ad980e7ab7fd1f8d # 0.3.0 - name: Show project status run: row show status From 569ba95864e3df38c7d9160b51c801c22439a674 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 21 Aug 2024 12:40:02 -0400 Subject: [PATCH 28/34] Update documenation. --- README.md | 119 ++++++++++++++---------------------- documentation/delta.md | 31 ++-------- documentation/frontier.md | 36 ++--------- documentation/greatlakes.md | 30 ++------- documentation/summit.md | 35 ----------- 5 files changed, 59 insertions(+), 192 deletions(-) delete mode 100644 documentation/summit.md diff --git a/README.md b/README.md index bb5adaaa..2420499e 100644 --- a/README.md +++ b/README.md @@ -1,91 +1,66 @@ # HOOMD-blue Validation -This repository contains longer running validation tests for HOOMD-blue. The -validation test workflows in this repository are organized into signac projects. - -## Requirements - -* gsd >= 2.8.0 -* numpy -* PyYAML -* signac >= 2.2.0 -* signac-flow >= 0.25.1 -* signac-dashboard [optional] -* Simulation workflow steps require either the [glotzerlab-software container] - or the following software: - * HOOMD-blue >=3.0 *(with MPI support enabled, GPU and LLVM support are optional)*, -* Analysis workflow steps require either the [glotzerlab-software container] or - the following software: - * matplotlib - * numpy - * scipy -* Workstation or HPC system with at least 16 CPU cores and 1 GPU supported by - HOOMD-blue. +This repository contains validation tests for HOOMD-blue. The workflows are organized in +a [signac] workspace and use [row]. ## Preparation Clone this repository: +```bash +git clone https://github.com/glotzerlab/hoomd-validation.git +``` +Then change to the repository's directory: ```bash -$ git clone https://github.com/glotzerlab/hoomd-validation.git -$ cd hoomd-validation +cd hoomd-validation ``` ## Configuration -Install the prerequisites into a Python environment of your choice. To use the -[glotzerlab-software container], copy `hoomd_validation/config-sample.yaml` to -`hoomd_validation/config.yaml`, uncomment the executable mapping, and set -`singularity_container` to your container image's path. +1. Install the requirements (see below) into a Python environment of your choice. +2. Copy `hoomd_validation/config-sample.toml` to `hoomd_validation/config.toml` + and set the parameters as desired. Each option is documented by a comment in the + sample configuration file. +3. Initialize the signac project directories and create `workflow.toml`. + ```bash + python3 hoomd_validation/project.py init + ``` +4. Configure [row] as necessary for your workstation or HPC resources. + > Note: `project.py init` will overwrite `workflow.toml`. -`hoomd_validation/config.yaml` also controls a number of job submission -parameters. See the commented options in `hoomd_validation/config-sample.yaml` -for a list and their default values. +[row]: https://row.readthedocs.io -## Usage +## Execute tests -1. Initialize the signac project directories, populate them with jobs and job -documents: - ```bash - python3 hoomd_validation/init.py - ``` -2. Run and analyze all validation tests: - * On a workstation (this takes a long time to complete): - ``` - $ python hoomd_validation/project.py run - ``` - * On a cluster: - 1. Populate the flow script template or your shell environment appropriately. - ``` - $ flow template create - $ vim templates/script.sh # make changes to e.g. load modules - ``` - 2. Create the simulation initial states: - ``` - $ python hoomd_validation/project.py submit -o '.*create_initial_state' - ``` - *(wait for all jobs to complete)* - 3. Run the simulations (adjust partition names according to your cluster) - ``` - $ python3 hoomd_validation/project.py submit -o '.*_cpu' --partition standard - $ python3 hoomd_validation/project.py submit -o '.*_gpu' --partition gpu - ``` - *(wait for all jobs to complete)* - 4. Run the analysis (assuming you have the analysis workflow prerequisites in your Python environment): - ``` - $ python hoomd_validation/project.py run - ``` - *(alternately, submit the analysis in stages until no jobs remain eligible)* -3. Inspect the plots produced in: - * `workspace/*.svg` - -## Dashboard - -Run the provided [signac-dashboard] application to explore the results in a web browser: +Run +```bash +row submit +``` + +To submit the first stage of the workflow. Wait for all the jobs to complete, then run +`row submit` again to start the second stage. Most subprojects in the validation +workflow have 4 stages ending with `compare_modes`. + +> Note: You can execute a single subproject with `row submit --action 'subproject_name.*' + +After you execute `compare_mode`, inspect the `svg` files saved in the repository root. +You can also run the provided [signac-dashboard] application to explore the results in a +web browser: ```bash -$ python3 dashboard.py run +python3 dashboard.py run ``` -[glotzerlab-software container]: https://glotzerlab-software.readthedocs.io/ -[signac-dashboard]: https://docs.signac.io/projects/dashboard/ +[signac]: https://signac.readthedocs.io +[signac-dashboard]: https://signac-dashboard.readthedocs.io + +## Requirements + +* h5py +* hoomd >= 4.6.0 +* matplotlib +* numpy +* rtoml +* scipy +* signac >= 2.2.0 +* signac-dashboard [optional] diff --git a/documentation/delta.md b/documentation/delta.md index 32131abf..94ab6b0e 100644 --- a/documentation/delta.md +++ b/documentation/delta.md @@ -3,31 +3,8 @@ # Recommended configuration ``` -max_cores_sim: 64 -max_cores_submission: 512 -max_gpus_submission: 8 -max_walltime: 48 -``` - -# Compiling HOOMD from source - -* When building with `ENABLE_LLVM=on`, build separate CPU and GPU builds in: - * `/scratch/bbgw/${USER}/build/hoomd-cpu` - * and `/scratch/bbgw/${USER}/build/hoomd-gpu`. -* To link to `libcuda.so`, compile `hoomd-gpu` in an interactive job: - `srun --account=bbgw-delta-gpu --partition=gpuA40x4 --nodes=1 --tasks=1 --tasks-per-node=1 --cpus-per-task=16 --mem=48g --gpus=1 --pty zsh` - -* Submitting jobs - -Unset your accounts in `signac.rc` and use environment variables to choose the account and -hoomd build at submission time: - -CPU: -``` -SBATCH_ACCOUNT=bbgw-delta-cpu PYTHONPATH=/scratch/bbgw/${USER}/build/hoomd-cpu SBATCH_EXPORT=PYTHONPATH python hoomd_validation/project.py submit -o '.*_cpu' -``` - -GPU: -``` -SBATCH_ACCOUNT=bbgw-delta-gpu PYTHONPATH=/scratch/bbgw/${USER}/build/hoomd-gpu SBATCH_EXPORT=PYTHONPATH python hoomd_validation/project.py submit -o '.*_gpu' --partition gpuA100x4 +max_cores_sim = 64 +max_cores_submission = 512 +max_gpus_submission = 8 +max_walltime = "2 days, 00:00:00" ``` diff --git a/documentation/frontier.md b/documentation/frontier.md index c7a51992..7acb7b78 100644 --- a/documentation/frontier.md +++ b/documentation/frontier.md @@ -3,36 +3,8 @@ # Recommended configuration ``` -max_cores_sim: 56 -max_cores_submission: 7168 -max_gpus_submission: 256 -max_walltime: 2 -enable_llvm: false -enable_gpu: true -``` - -## Recommended template - -``` -{% extends "frontier.sh" %} - -{% block header %} - {{- super () -}} -#SBATCH -C nvme -{% endblock header %} -{% block custom_content %} - -echo "Loading software environment." - -export GLOTZERLAB_SOFTWARE_ROOT=/mnt/bb/${USER}/software -time srun --ntasks-per-node 1 mkdir ${GLOTZERLAB_SOFTWARE_ROOT} -time srun --ntasks-per-node 1 tar --directory ${GLOTZERLAB_SOFTWARE_ROOT} -xpf ${MEMBERWORK}/mat110/software.tar -source ${GLOTZERLAB_SOFTWARE_ROOT}/variables.sh - -{% endblock custom_content %} -{% block body %} - {{- super () -}} - -echo "Completed job in $SECONDS seconds" -{% endblock body %} +max_cores_sim = 56 +max_cores_submission = 7168 +max_gpus_submission = 256 +max_walltime = "02:00:00" ``` diff --git a/documentation/greatlakes.md b/documentation/greatlakes.md index 699a0c03..702b5fc0 100644 --- a/documentation/greatlakes.md +++ b/documentation/greatlakes.md @@ -3,30 +3,8 @@ # Recommended configuration ``` -max_cores_sim: 32 -max_cores_submission: 32 -max_gpus_submission: 1 -max_walltime: 96 -``` - -# Compiling HOOMD from source - -* When building with `ENABLE_LLVM=on`, built separate CPU and GPU builds in: - * `${HOME}/build/hoomd-cpu` - * and `${HOME}/build/hoomd-gpu`. -* To link to `libcuda.so`, compile `hoomd-gpu` in an interactive job: - `srun -Asglotzer --gres=gpu:1 --nodes=1 --ntasks-per-node=1 --cpus-per-task=8 --partition gpu -t 8:00:00 --mem=64G --pty /bin/zsh` - -* Submitting jobs - -Set environment variables to choose the hoomd build and memory requirement at submission time: - -CPU: -``` -SBATCH_MEM_PER_CPU="4g" PYTHONPATH=${HOME}/build/hoomd-cpu SBATCH_EXPORT=PYTHONPATH python3 hoomd_validation/project.py submit -o '.*_cpu' -``` - -GPU: -``` -SBATCH_MEM_PER_CPU="64g" PYTHONPATH=${HOME}/build/hoomd-gpu SBATCH_EXPORT=PYTHONPATH python hoomd_validation/project.py submit -o '.*_gpu' --partition gpu +max_cores_sim = 32 +max_cores_submission = 32 +max_gpus_submission = 1 +max_walltime = "4 days, 00:00:00" ``` diff --git a/documentation/summit.md b/documentation/summit.md deleted file mode 100644 index ed307a45..00000000 --- a/documentation/summit.md +++ /dev/null @@ -1,35 +0,0 @@ -# Tips for running on OLCF Summit - -# Recommended configuration - -``` -max_cores_sim: 42 -max_cores_submission: 1344 -max_gpus_submission: 192 -max_walltime: 2 -``` - -## Recommended template - -* Write stdout/stderr to files. -* Unload `darshan-runtime` to prevent jobs from hanging on exit. - -``` -{% extends "summit.sh" %} - -{% block header %} - {{- super () -}} -#BSUB -o hoomd-validation.%J.out -#BSUB -e hoomd-validation.%J.out - -{% endblock header %} -{% block custom_content %} -echo "Loading modules." -source /ccs/proj/mat110/glotzerlab-software/joaander-test/environment.sh -module unload darshan-runtime -set -x -{% endblock custom_content %} -{% block body %} - {{- super () -}} -{% endblock body %} -``` From aaafbf669d224bb9951b3c19354ee24bf4a669c5 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 21 Aug 2024 12:41:01 -0400 Subject: [PATCH 29/34] pre-commit. --- README.md | 4 ++-- hoomd_validation/custom_actions.py | 2 ++ hoomd_validation/patchy_particle_pressure.py | 2 +- hoomd_validation/simple_polygon.py | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2420499e..b40b65ee 100644 --- a/README.md +++ b/README.md @@ -19,14 +19,14 @@ cd hoomd-validation 1. Install the requirements (see below) into a Python environment of your choice. 2. Copy `hoomd_validation/config-sample.toml` to `hoomd_validation/config.toml` - and set the parameters as desired. Each option is documented by a comment in the + and set the parameters as desired. Each option is documented by a comment in the sample configuration file. 3. Initialize the signac project directories and create `workflow.toml`. ```bash python3 hoomd_validation/project.py init ``` 4. Configure [row] as necessary for your workstation or HPC resources. - > Note: `project.py init` will overwrite `workflow.toml`. + > Note: `project.py init` will overwrite `workflow.toml`. [row]: https://row.readthedocs.io diff --git a/hoomd_validation/custom_actions.py b/hoomd_validation/custom_actions.py index b4f1dca9..37f38f3d 100644 --- a/hoomd_validation/custom_actions.py +++ b/hoomd_validation/custom_actions.py @@ -36,4 +36,6 @@ def act(self, timestep): # This workaround is to allow `python project.py init` to succeed in CI checks # without requiring a working HOOMD installation. class ComputeDensity: + """Placeholder class.""" + pass diff --git a/hoomd_validation/patchy_particle_pressure.py b/hoomd_validation/patchy_particle_pressure.py index d76c5e39..0ff12e4e 100644 --- a/hoomd_validation/patchy_particle_pressure.py +++ b/hoomd_validation/patchy_particle_pressure.py @@ -10,7 +10,7 @@ try: import hoomd except ModuleNotFoundError as e: - print(f"Warning: {e}") + print(f'Warning: {e}') import matplotlib import matplotlib.figure diff --git a/hoomd_validation/simple_polygon.py b/hoomd_validation/simple_polygon.py index 6e60e9f4..8b715859 100644 --- a/hoomd_validation/simple_polygon.py +++ b/hoomd_validation/simple_polygon.py @@ -10,7 +10,7 @@ try: import hoomd except ModuleNotFoundError as e: - print(f"Warning: {e}") + print(f'Warning: {e}') import matplotlib import matplotlib.figure From 4e2b16b70db416707c5a1cadf5bc0af5786cb681 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Wed, 21 Aug 2024 12:54:55 -0400 Subject: [PATCH 30/34] Use latest glotzerlab/workflows. --- .github/workflows/test.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 48163df0..71727c7e 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -25,7 +25,7 @@ jobs: with: python-version: "3.12" - name: Set up Python environment - uses: glotzerlab/workflows/setup-uv@ea2e25d07af862a1c696a932c2bd6b242d142049 # 0.2.0 + uses: glotzerlab/workflows/setup-uv@ae7e7c6931098a313ef8069ef04b88a55c3a40f6 # 0.3.0 with: lockfile: ".github/workflows/requirements-test.txt" - name: Initialize workspace @@ -33,6 +33,6 @@ jobs: - name: Show workflow run: cat --number workflow.toml - name: Set up row - uses: glotzerlab/workflows/setup-row@20092ebdae3ec74a21b71959ad980e7ab7fd1f8d # 0.3.0 + uses: glotzerlab/workflows/setup-row@ae7e7c6931098a313ef8069ef04b88a55c3a40f6 # 0.3.0 - name: Show project status run: row show status From 26bbb2ef77d92338fcfef343a8f193ac5ac834ba Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Thu, 22 Aug 2024 09:37:41 -0400 Subject: [PATCH 31/34] Add account to init. --- hoomd_validation/project.py | 3 --- hoomd_validation/workflow.py | 18 ++++++++++++++++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/hoomd_validation/project.py b/hoomd_validation/project.py index c4c49baf..80d32c58 100644 --- a/hoomd_validation/project.py +++ b/hoomd_validation/project.py @@ -35,9 +35,6 @@ def init(args): project = signac.init_project(path=config.project_root) - # TODO: Add command line arguments to limit which projects are initialized. - # Will need to selectively remove actions from the other projects from the workflow. - # initialize jobs for validation test projects for subproject in all_subprojects: # add all the jobs to the project diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py index a3f0c99e..236a1b17 100644 --- a/hoomd_validation/workflow.py +++ b/hoomd_validation/workflow.py @@ -13,11 +13,19 @@ import argparse from pathlib import Path +import subprocess import rtoml import signac +def _get_cluster_name(): + """Get the current cluster name.""" + result = subprocess.run( + ['row', 'show', 'cluster', '--name'], capture_output=True, check=True, text=True + ) + return result.stdout.strip() + class Action: """Represent a row action. @@ -68,7 +76,7 @@ def add_action(cls, name, action): cls._actions[name] = action @classmethod - def write_workflow(cls, entrypoint, path=None, default=None): + def write_workflow(cls, entrypoint, path=None, default=None, account=None): """Write the file ``workflow.toml``. ``workflow.toml`` will include the signac workspace definition, the given @@ -82,6 +90,7 @@ def write_workflow(cls, entrypoint, path=None, default=None): entrypoint(str): Name of the python file that calls the `main` entrypoint. path(Path): Path to write ``workflow.toml``. default(dict): The ``[default]`` mapping. + account(str): Name of the cluster account to use. """ workflow = { 'workspace': {'path': 'workspace', 'value_file': 'signac_statepoint.json'} @@ -92,6 +101,9 @@ def write_workflow(cls, entrypoint, path=None, default=None): 'command': f'python -u {entrypoint} action $ACTION_NAME {{directories}}' } } + if account is not None: + print(account) + workflow['default']['action'].update({'submit_options': {_get_cluster_name(): {'account': account}}}) if default is not None: workflow['default'].update(default) @@ -129,6 +141,7 @@ def main(cls, init=None, init_args=None, **kwargs): parser = argparse.ArgumentParser() command = parser.add_subparsers(dest='command', required=True) init_parser = command.add_parser('init') + init_parser.add_argument('--account') if init_args is not None: for arg in init_args: init_parser.add_argument(arg) @@ -138,12 +151,13 @@ def main(cls, init=None, init_args=None, **kwargs): action_parser.add_argument('directories', nargs='+') args = parser.parse_args() + print(args) if args.command == 'init': if init is not None: init(args) - cls.write_workflow(**kwargs) + cls.write_workflow(account=args.account, **kwargs) elif args.command == 'action': project = signac.get_project() jobs = [project.open_job(id=directory) for directory in args.directories] From 2e6b304a24d63ecbeaa78e846e2d0ef890ca00e3 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Thu, 22 Aug 2024 10:02:03 -0400 Subject: [PATCH 32/34] pre-commit --- hoomd_validation/workflow.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py index 236a1b17..f321bb8e 100644 --- a/hoomd_validation/workflow.py +++ b/hoomd_validation/workflow.py @@ -12,8 +12,8 @@ """ import argparse -from pathlib import Path import subprocess +from pathlib import Path import rtoml import signac @@ -26,6 +26,7 @@ def _get_cluster_name(): ) return result.stdout.strip() + class Action: """Represent a row action. @@ -103,7 +104,9 @@ def write_workflow(cls, entrypoint, path=None, default=None, account=None): } if account is not None: print(account) - workflow['default']['action'].update({'submit_options': {_get_cluster_name(): {'account': account}}}) + workflow['default']['action'].update( + {'submit_options': {_get_cluster_name(): {'account': account}}} + ) if default is not None: workflow['default'].update(default) From 630c80454ee6a7ae4e715738ba39a52856287539 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 26 Aug 2024 08:19:56 -0500 Subject: [PATCH 33/34] Fix misc issues. --- hoomd_validation/workflow.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py index f321bb8e..ece97db6 100644 --- a/hoomd_validation/workflow.py +++ b/hoomd_validation/workflow.py @@ -22,7 +22,7 @@ def _get_cluster_name(): """Get the current cluster name.""" result = subprocess.run( - ['row', 'show', 'cluster', '--name'], capture_output=True, check=True, text=True + ['row', 'show', 'cluster', '--short'], capture_output=True, check=True, text=True ) return result.stdout.strip() @@ -103,7 +103,6 @@ def write_workflow(cls, entrypoint, path=None, default=None, account=None): } } if account is not None: - print(account) workflow['default']['action'].update( {'submit_options': {_get_cluster_name(): {'account': account}}} ) @@ -154,7 +153,6 @@ def main(cls, init=None, init_args=None, **kwargs): action_parser.add_argument('directories', nargs='+') args = parser.parse_args() - print(args) if args.command == 'init': if init is not None: From 6f746631bcbe4b5edc3a3ef6be5766cc08dee524 Mon Sep 17 00:00:00 2001 From: "Joshua A. Anderson" Date: Mon, 26 Aug 2024 08:21:58 -0500 Subject: [PATCH 34/34] pre-commit --- hoomd_validation/workflow.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hoomd_validation/workflow.py b/hoomd_validation/workflow.py index ece97db6..83605d8d 100644 --- a/hoomd_validation/workflow.py +++ b/hoomd_validation/workflow.py @@ -22,7 +22,10 @@ def _get_cluster_name(): """Get the current cluster name.""" result = subprocess.run( - ['row', 'show', 'cluster', '--short'], capture_output=True, check=True, text=True + ['row', 'show', 'cluster', '--short'], + capture_output=True, + check=True, + text=True, ) return result.stdout.strip()