Revision 3657e4066889b03de1ae808db02c049a13ccca7d authored by Alan V Di Vittorio on 08 December 2023, 23:14:38 UTC, committed by Alan V Di Vittorio on 08 December 2023, 23:14:38 UTC
The model successfully runs to completion for ZLND  with functional CO2
downscaling. The linearly downscaled CO2 data have been verified
within the GCAM repo code, but not yet in the fortran ehc code
(giac repo). Convergence downscaling has not been verified. Restarts
work properly. The gcam namelist has been updated to add several
options for convergence downscaling and the gcam co2 passing namelist
item has been corrected in name. A shell script has been added to
gcam/tools that generates the new input base co2 gridded files.
The fortran floating point exception checks are still off because
the model still fails due to untrapped GCAM nans if these are on
(which usually is only when DEBUG=TRUE). This allows for DEBUG to
be set to TRUE and for the model to run. The configuration for
the boost library for the ehc/gcam now points to the files in
the gcam repo for portability. The xerces library configuration
has been removed as it is no longer needed by GCAM.  The three repo
branches (e3sm, giac, gcam) at this commit work together
(at least for ZLND), and thse branches are set in the .gitmodules files.
1 parent 2cf7280
Raw File
config_machines.xml
<?xml version="1.0"?>

<config_machines version="2.0">

  <!--

   ===============================================================
   COMPILER and COMPILERS
   ===============================================================
   If a machine supports multiple compilers - then
    - the settings for COMPILERS should reflect the supported compilers
      as a comma separated string
    - the setting for COMPILER should be the default compiler
      (which is one of the values in COMPILERS)

   ===============================================================
   MPILIB and MPILIBS
   ===============================================================
   If a machine supports only one MPILIB is supported - then
   the setting for  MPILIB and MPILIBS should be blank ("")
   If a machine supports multiple mpi libraries (e.g. mpich and openmpi)
    - the settings for MPILIBS should reflect the supported mpi libraries
      as a comma separated string

   The default settings for COMPILERS and MPILIBS is blank (in config_machines.xml)

   Normally variable substitutions are not made until the case scripts are run, however variables
   of the form $ENV{VARIABLE_NAME} are substituted in create_newcase from the environment
   variable of the same name if it exists.

   ===============================================================
   PROJECT_REQUIRED
   ===============================================================
   A machine may need the PROJECT xml variable to be defined either because it is
   used in some paths, or because it is used to give an account number in the job
   submission script. If either of these are the case, then PROJECT_REQUIRED
   should be set to TRUE for the given machine.


   walltimes:
   Denotes the walltimes that can be used for a particular machine.
   walltime: as before, if default="true" is defined, this walltime will be used
   by default.
   Alternatively, ccsm_estcost must be used to choose the queue based on the estimated cost of the run.

   mpirun: the mpirun command that will be used to actually launch the model.
   The attributes used to choose the mpirun command are:

   mpilib: can either be 'default' the name of an mpi library, or a compiler name so one can choose the mpirun
           based on the mpi library in use.

     the 'executable' tag must have arguments required for the chosen mpirun, as well as the executable name.

   unit_testing: can be 'true' or 'false'.
     This allows using a different mpirun command to launch unit tests

  -->



  <machine MACH="miller">
    <DESC> ORNL AF cluster 800-node AMD Epyc 2-sockets 64-cores per node</DESC>
    <OS>CNL</OS>
    <COMPILERS>gnu,cray,intel</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <PROJECT>nwp501</PROJECT>
    <SAVE_TIMING_DIR>/lustre/storm/nwp501/proj-shared/e3sm</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>e3sm,nwp501</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/lustre/storm/nwp501/proj-shared/e3sm/e3sm_scratch/</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lustre/storm/nwp501/proj-shared/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lustre/storm/nwp501/proj-shared/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lustre/storm/nwp501/proj-shared/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lustre/storm/nwp501/proj-shared/e3sm/tools/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>miller_slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>128</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>128</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks">-n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit </arg>
        <arg name="thread_count">-c $SHELL{echo 128/ {{ tasks_per_node }} |bc}</arg>
        <arg name="binding"> $SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
        <arg name="placement">-m plane={{ tasks_per_node }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/cray/pe/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/cray/pe/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/cray/pe/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/cray/pe/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/cray/pe/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/cray/pe/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="unload">craype</command>
        <command name="unload">cray-mpich</command>
        <command name="unload">cray-parallel-netcdf</command>
        <command name="unload">cray-hdf5-parallel</command>
        <command name="unload">cray-hdf5</command>
        <command name="unload">cray-netcdf</command>
        <command name="unload">cray-netcdf-hdf5parallel</command>
        <command name="load">craype/2.7.12</command>
      </modules>
      <modules compiler="intel">
        <command name="rm">PrgEnv-gnu</command>
        <command name="rm">PrgEnv-cray</command>
        <command name="load">PrgEnv-intel/8.1.0</command>
        <command name="swap">intel/19.1.0.166</command>
      </modules>
      <modules compiler="gnu">
        <command name="unload">PrgEnv-cray</command>
        <command name="load">PrgEnv-gnu/8.1.0</command>
        <command name="swap">gcc/9.3.0</command>
      </modules>
      <modules compiler="cray">
        <command name="unload">PrgEnv-intel</command>
        <command name="unload">PrgEnv-gnu</command>
        <command name="load">gcc/9.3.0</command>
        <command name="load">PrgEnv-cray/8.1.0</command>
        <command name="rm">darshan</command>
      </modules>
      <modules compiler="!intel">
        <command name="swap">cray-libsci/21.06.1.1</command>
      </modules>
      <modules>
        <command name="load">cray-mpich/8.1.11</command>
        <command name="load">cray-hdf5-parallel/1.12.0.7</command>
        <command name="load">cray-netcdf-hdf5parallel/4.7.4.7</command>
        <command name="load">cray-parallel-netcdf/1.12.1.7</command>
      </modules>
    </module_system>

    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="PERL5LIB">/usr/lib/perl5/5.26.1</env>
      <env name="NETCDF_C_PATH">opt/cray/pe/netcdf-hdf5parallel/4.7.4.4/gnu/9.1/</env>
      <env name="NETCDF_FORTRAN_PATH">opt/cray/pe/netcdf-hdf5parallel/4.7.4.4/gnu/9.1/</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="intel" MAX_TASKS_PER_NODE="!128">
      <env name="KMP_AFFINITY">granularity=core,balanced</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="intel" MAX_TASKS_PER_NODE="128">
      <env name="KMP_AFFINITY">granularity=thread,balanced</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="gnu">
      <env name="OMP_PLACES">cores</env>
    </environment_variables>
  </machine>


  <machine MACH="perlmutter">
    <DESC>Perlmutter at NERSC.  Phase1 only: Each GPU node has single AMD EPYC 7713 64-Core (Milan) and 4 nvidia A100's.</DESC>
    <NODENAME_REGEX>$ENV{NERSC_HOST}:perlmutter</NODENAME_REGEX>
    <OS>Linux</OS>
    <COMPILERS>gnu,gnugpu,nvidia,nvidiagpu</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <PROJECT>e3sm_g</PROJECT>
    <SAVE_TIMING_DIR>/global/cfs/cdirs/e3sm</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>e3sm,m3411,m3412</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>$ENV{PSCRATCH}/e3sm_scratch/perlmutter</CIME_OUTPUT_ROOT>
    <CIME_HTML_ROOT>/global/cfs/cdirs/e3sm/www/$ENV{USER}</CIME_HTML_ROOT>
    <CIME_URL_ROOT>http://portal.nersc.gov/project/e3sm/$ENV{USER}</CIME_URL_ROOT>
    <DIN_LOC_ROOT>/global/cfs/cdirs/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/global/cfs/cdirs/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/global/cfs/cdirs/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/global/cfs/cdirs/e3sm/tools/cprnc.cori/cprnc</CCSM_CPRNC>
    <GMAKE_J>10</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>nersc_slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>256</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="gnugpu">128</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="nvidiagpu">128</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="gnugpu">4</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="nvidiagpu">4</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
	<arg name="label"> --label</arg>
	<arg name="num_tasks"> -n {{ total_tasks }} -N {{ num_nodes }}</arg>
	<arg name="thread_count">-c $SHELL{echo 128/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}</arg>
	<arg name="binding"> $SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
	<arg name="placement"> -m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}</arg>
    </arguments>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="perl">/usr/share/lmod/8.3.1/init/perl</init_path>
      <init_path lang="python">/usr/share/lmod/8.3.1/init/python</init_path>
      <init_path lang="sh">/usr/share/lmod/8.3.1/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/8.3.1/init/csh</init_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>

      <modules>
	<command name="unload">cray-hdf5-parallel</command>
	<command name="unload">cray-netcdf-hdf5parallel</command>
	<command name="unload">cray-parallel-netcdf</command>
	<command name="unload">PrgEnv-gnu</command>
	<command name="unload">PrgEnv-nvidia</command>
	<command name="unload">cudatoolkit</command>
	<command name="unload">craype-accel-nvidia80</command>
	<command name="unload">craype-accel-host</command>
	<command name="unload">perftools-base</command>
	<command name="unload">perftools</command>
	<command name="unload">darshan</command>
      </modules>

      <modules compiler="gnu.*">
	<command name="load">PrgEnv-gnu/8.2.0</command>
	<command name="load">gcc/10.3.0</command>
      </modules>

      <modules compiler="nvidia.*">
	<command name="load">PrgEnv-nvidia</command>
	<command name="load">nvidia/21.9</command>
      </modules>

      <modules compiler="gnugpu">
	<command name="load">cudatoolkit</command>
	<command name="load">craype-accel-nvidia80</command>
      </modules>

      <modules compiler="nvidiagpu">
	<command name="load">cudatoolkit</command>
	<command name="load">craype-accel-nvidia80</command>
      </modules>

      <modules compiler="gnu">
	<command name="load">craype-accel-host</command>
      </modules>

      <modules compiler="nvidia">
	<command name="load">craype-accel-host</command>
      </modules>

      <modules>
	<command name="load">cray-libsci</command>
	<command name="load">craype</command>
	<command name="load">cray-mpich/8.1.13</command>
	<command name="load">cray-hdf5-parallel/1.12.1.1</command>
	<command name="load">cray-netcdf-hdf5parallel/4.8.1.1</command>
	<command name="load">cray-parallel-netcdf/1.12.2.1</command>
	<command name="load">cmake/3.22.0</command>
      </modules>
    </module_system>

    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>

    <environment_variables>
      <env name="MPICH_ENV_DISPLAY">1</env>
      <env name="MPICH_VERSION_DISPLAY">1</env>
      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
      <env name="HDF5_USE_FILE_LOCKING">FALSE</env>
      <env name="PERL5LIB">/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch</env>
    </environment_variables>
    <environment_variables compiler="gnugpu">
      <env name="MPICH_GPU_SUPPORT_ENABLED">1</env>
    </environment_variables>
    <environment_variables compiler="nvidiagpu">
      <env name="MPICH_GPU_SUPPORT_ENABLED">1</env>
    </environment_variables>
    <resource_limits>
      <resource name="RLIMIT_STACK">-1</resource>
    </resource_limits>
  </machine>

  <machine MACH="spock">
    <DESC>Spock. NCCS moderate-security system that contains similar hardware and software as the upcoming Frontier system at ORNL.</DESC>
    <NODENAME_REGEX>.*spock.*</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>gnu,cray</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <PROJECT>cli133</PROJECT>
    <CIME_OUTPUT_ROOT>/gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch/spock</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/gpfs/alpine/cli115/world-shared/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/gpfs/alpine/cli115/world-shared/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <CCSM_CPRNC>/gpfs/alpine/cli133/world-shared/grnydawn/e3sm/tools/cprnc_spock/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <NTEST_PARALLEL_JOBS>1</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>

    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="label"> --label</arg>
        <arg name="num_tasks"> -l -K -n {{ total_tasks }} -N {{ num_nodes }} </arg>
        <arg name="binding">--cpu_bind=cores</arg>
        <arg name="thread_count">-c $ENV{OMP_NUM_THREADS}</arg>
        <arg name="placement">-m plane={{ tasks_per_node }}</arg>
      </arguments>
    </mpirun>

    <module_system type="module" allow_error="true">
      <init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>
      <init_path lang="perl">/usr/share/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/usr/share/lmod/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>

      <modules>
        <command name="purge"/>
        <command name="load">DefApps</command>
        <command name="load">cray-python/3.8.5.1</command>
        <command name="load">subversion/1.14.0</command>
        <command name="load">git/2.31.1</command>
        <command name="load">cmake/3.20.2</command>
        <command name="load">zlib/1.2.11</command>
        <command name="load">cray-libsci/21.06.1.1</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">PrgEnv-gnu/8.0.0</command>
        <command name="load">cray-mpich/8.1.7</command>
        <command name="load">cray-hdf5-parallel/1.12.0.6</command>
        <command name="load">cray-netcdf-hdf5parallel/4.7.4.6</command>
        <command name="load">cray-parallel-netcdf/1.12.1.5</command>
      </modules>
      <modules compiler="cray">
        <command name="load">PrgEnv-cray/8.0.0</command>
        <command name="load">cray-mpich/8.1.7</command>
        <command name="load">cray-hdf5-parallel/1.12.0.6</command>
        <command name="load">cray-netcdf-hdf5parallel/4.7.4.6</command>
        <command name="load">cray-parallel-netcdf/1.12.1.5</command>
      </modules>

    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <environment_variables>
      <env name="NETCDF_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>

  <machine MACH="crusher">
    <DESC>Crusher. NCCS moderate-security system that contains similar hardware and software as the upcoming Frontier system at ORNL. 192 AMD EPYC 7A53 64C nodes, 128 hwthreads, 512GB DDR4, 4 MI250X GPUs</DESC>
    <NODENAME_REGEX>.*crusher.*</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>gnu,crayclang,amdclang</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <PROJECT>cli133</PROJECT>
    <CIME_OUTPUT_ROOT>/gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch/crusher</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/gpfs/alpine/cli115/world-shared/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/gpfs/alpine/cli115/world-shared/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
       <CCSM_CPRNC>/gpfs/alpine/cli133/world-shared/e3sm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <NTEST_PARALLEL_JOBS>1</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>

    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks"> -l -K -n {{ total_tasks }} -N {{ num_nodes }} </arg>
        <arg name="binding">--cpu_bind=cores</arg>
        <arg name="thread_count">-c $ENV{OMP_NUM_THREADS}</arg>
        <arg name="placement">-m plane={{ tasks_per_node }}</arg>
      </arguments>
    </mpirun>

    <module_system type="module" allow_error="true">
      <init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>
      <init_path lang="perl">/usr/share/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/usr/share/lmod/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>

      <modules compiler="amdclang">
        <command name="reset"></command>
        <command name="switch">PrgEnv-cray PrgEnv-amd/8.2.0</command>
      </modules>


      <modules compiler="crayclang">
        <command name="reset"></command>
        <command name="switch">PrgEnv-cray PrgEnv-cray/8.2.0</command>
        <command name="switch">cce cce/13.0.1</command>
      </modules>

      <modules compiler="gnu">
        <command name="reset"></command>
        <command name="switch">PrgEnv-cray PrgEnv-gnu/8.2.0</command>
      </modules>
      <modules>
        <command name="load">cray-mpich/8.1.12</command>
        <command name="load">cray-python/3.9.4.2</command>
        <command name="load">subversion/1.14.0</command>
        <command name="load">git/2.31.1</command>
        <command name="load">cmake/3.21.3</command>
        <command name="load">zlib/1.2.11</command>
        <command name="load">cray-libsci/21.08.1.2</command>
        <command name="load">cray-hdf5-parallel/1.12.0.7</command>
        <command name="load">cray-netcdf-hdf5parallel/4.7.4.7</command>
        <command name="load">cray-parallel-netcdf/1.12.1.7</command>
      </modules>

    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <environment_variables>
      <env name="NETCDF_PATH">$ENV{NETCDF_DIR}</env>
      <env name="PNETCDF_PATH">$ENV{PNETCDF_DIR}</env>
    </environment_variables>

    <environment_variables compiler="amdclang">
      <env name="LD_LIBRARY_PATH">$ENV{CRAY_LIBSCI_DIR}/amd/4.0/x86_64/lib:$ENV{LD_LIBRARY_PATH}</env>
    </environment_variables>

    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>

  <machine MACH="cori-haswell">
    <DESC>Cori. XC40 Cray system at NERSC. Haswell partition. os is CNL, 32 pes/node, batch system is SLURM</DESC>
    <NODENAME_REGEX>cori-knl-is-default</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <PROJECT>e3sm</PROJECT>
    <SAVE_TIMING_DIR>/global/cfs/cdirs/e3sm</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>e3sm,m3411,m3412</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}/e3sm_scratch/cori-haswell</CIME_OUTPUT_ROOT>
    <CIME_HTML_ROOT>/global/cfs/cdirs/e3sm/www/$ENV{USER}</CIME_HTML_ROOT>
    <CIME_URL_ROOT>http://portal.nersc.gov/project/e3sm/$ENV{USER}</CIME_URL_ROOT>
    <DIN_LOC_ROOT>/global/cfs/cdirs/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/global/cfs/cdirs/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/global/cfs/cdirs/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/global/cfs/cdirs/e3sm/tools/cprnc.cori/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>nersc_slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="label"> --label</arg>
        <arg name="num_tasks"> -n {{ total_tasks }} -N {{ num_nodes }}</arg>
        <arg name="thread_count">-c $SHELL{echo 64/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}</arg>
        <arg name="binding"> $SHELL{if [ 32 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
        <arg name="placement"> -m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}</arg>
    </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl</init_path>
      <init_path lang="python">/opt/modules/default/init/python</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>

      <modules>
        <command name="rm">PrgEnv-intel</command>
        <command name="rm">PrgEnv-cray</command>
        <command name="rm">PrgEnv-gnu</command>
        <command name="rm">intel</command>
        <command name="rm">cce</command>
        <command name="rm">gcc</command>
        <command name="rm">cray-parallel-netcdf</command>
        <command name="rm">cray-hdf5-parallel</command>
        <command name="rm">pmi</command>
        <command name="rm">cray-libsci</command>
        <command name="rm">cray-mpich2</command>
        <command name="rm">cray-mpich</command>
        <command name="rm">cray-netcdf</command>
        <command name="rm">cray-hdf5</command>
        <command name="rm">cray-netcdf-hdf5parallel</command>
        <command name="rm">craype-sandybridge</command>
        <command name="rm">craype-ivybridge</command>
        <command name="rm">craype</command>
        <command name="rm">papi</command>
        <command name="rm">cmake</command>
        <command name="rm">cray-petsc</command>
        <command name="rm">esmf</command>
        <command name="rm">zlib</command>
        <command name="rm">craype-hugepages2M</command>
        <command name="rm">darshan</command>
        
        <!-- first load basic defaults, then remove/swap/load as necessary -->
        <command name="load">craype</command>
        <command name="load">PrgEnv-intel</command>
        <command name="load">cray-mpich</command>
        <command name="rm">craype-mic-knl</command>
        <command name="load">craype-haswell</command>
      </modules>

      <modules mpilib="mpt">
        <command name="swap">cray-mpich cray-mpich/7.7.10</command>
      </modules>

      <modules compiler="intel">
        <command name="load">PrgEnv-intel/6.0.10</command>
        <command name="rm">intel</command>
        <command name="load">intel/19.0.3.199</command>
      </modules>

      <modules compiler="gnu">
        <command name="swap">PrgEnv-intel PrgEnv-gnu/6.0.10</command>
        <command name="rm">gcc</command>
        <command name="load">gcc/10.3.0</command>
        <command name="rm">cray-libsci</command>
        <command name="load">cray-libsci/20.09.1</command>
      </modules>

      <modules>
        <command name="swap">craype craype/2.6.2</command>
        <command name="rm">pmi</command>
        <command name="load">pmi/5.0.14</command>
        <command name="rm">craype-mic-knl</command>
        <command name="load">craype-haswell</command>
      </modules>

      <modules mpilib="mpi-serial">
        <command name="rm">cray-netcdf-hdf5parallel</command>
        <command name="rm">cray-hdf5-parallel</command>
        <command name="rm">cray-parallel-netcdf</command>
        <command name="load">cray-netcdf/4.6.3.2</command>
        <command name="load">cray-hdf5/1.10.5.2</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="rm">cray-netcdf-hdf5parallel</command>
        <command name="load">cray-netcdf-hdf5parallel/4.6.3.2</command>
        <command name="load">cray-hdf5-parallel/1.10.5.2</command>
        <command name="load">cray-parallel-netcdf/1.11.1.1</command>
      </modules>

      <modules>
        <command name="rm">cmake</command>
        <command name="load">cmake</command>
        <command name="load">perl5-extras</command>
      </modules>
    </module_system>

    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <environment_variables>

      <env name="MPICH_ENV_DISPLAY">1</env>
      <env name="MPICH_VERSION_DISPLAY">1</env>
      <!--env name="MPICH_CPUMASK_DISPLAY">1</env-->

      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
      <env name="HDF5_USE_FILE_LOCKING">FALSE</env>
      <env name="CRAYPE_LINK_TYPE">static</env>
    </environment_variables>
    <environment_variables compiler="intel">
      <env name="FORT_BUFFERED">yes</env>
    </environment_variables>
  </machine>

  <!-- KNL nodes of Cori -->
  <machine MACH="cori-knl">
    <DESC>Cori. XC40 Cray system at NERSC. KNL partition. os is CNL, 68 pes/node (for now only use 64), batch system is SLURM</DESC>
    <NODENAME_REGEX>cori</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>mpt,impi</MPILIBS>
    <PROJECT>e3sm</PROJECT>
    <SAVE_TIMING_DIR>/global/cfs/cdirs/e3sm</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>e3sm,m3411,m3412</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}/e3sm_scratch/cori-knl</CIME_OUTPUT_ROOT>
    <CIME_HTML_ROOT>/global/cfs/cdirs/e3sm/www/$ENV{USER}</CIME_HTML_ROOT>
    <CIME_URL_ROOT>http://portal.nersc.gov/project/e3sm/$ENV{USER}</CIME_URL_ROOT>
    <DIN_LOC_ROOT>/global/cfs/cdirs/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/global/cfs/cdirs/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/global/cfs/cdirs/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/global/cfs/cdirs/e3sm/tools/cprnc.cori/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>nersc_slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>128</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="label"> --label</arg>
        <arg name="num_tasks"> -n {{ total_tasks }} -N {{ num_nodes }}</arg>
        <arg name="thread_count">-c $SHELL{mpn=`./xmlquery --value MAX_MPITASKS_PER_NODE`; if [ 68 -ge $mpn ]; then c0=`expr 272 / $mpn`; c1=`expr $c0 / 4`; cflag=`expr $c1 \* 4`; echo $cflag|bc ; else echo 272/$mpn|bc;fi;} </arg>
        <arg name="binding"> $SHELL{if [ 68 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
        <arg name="placement"> -m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}</arg>
    </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl</init_path>
      <init_path lang="python">/opt/modules/default/init/python</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="rm">craype</command>
        <command name="rm">craype-mic-knl</command>
        <command name="rm">craype-haswell</command>
        <command name="rm">PrgEnv-intel</command>
        <command name="rm">PrgEnv-cray</command>
        <command name="rm">PrgEnv-gnu</command>
        <command name="rm">intel</command>
        <command name="rm">cce</command>
        <command name="rm">gcc</command>
        <command name="rm">cray-parallel-netcdf</command>
        <command name="rm">cray-hdf5-parallel</command>
        <command name="rm">pmi</command>
        <command name="rm">cray-mpich2</command>
        <command name="rm">cray-mpich</command>
        <command name="rm">cray-netcdf</command>
        <command name="rm">cray-hdf5</command>
        <command name="rm">cray-netcdf-hdf5parallel</command>
        <command name="rm">cray-libsci</command>
        <command name="rm">papi</command>
        <command name="rm">cmake</command>
        <command name="rm">cray-petsc</command>
        <command name="rm">esmf</command>
        <command name="rm">zlib</command>
        <command name="rm">craype-hugepages2M</command>
        <command name="rm">darshan</command>
        
        <!-- first load basic defaults, then remove/swap/load as necessary -->
        <command name="load">craype</command>
        <command name="load">PrgEnv-intel</command>
        <command name="load">cray-mpich</command>
        <command name="rm">craype-haswell</command>
        <command name="load">craype-mic-knl</command>
      </modules>

      <modules mpilib="mpt">
        <command name="swap">cray-mpich cray-mpich/7.7.10</command>
      </modules>

      <modules mpilib="impi">
        <command name="swap">cray-mpich impi/2020.up4</command>
      </modules>

      <modules compiler="intel">
        <command name="load">PrgEnv-intel/6.0.10</command>
        <command name="rm">intel</command>
        <command name="load">intel/19.0.3.199</command>
      </modules>

      <modules compiler="gnu">
        <command name="swap">PrgEnv-intel PrgEnv-gnu/6.0.10</command>
        <command name="rm">gcc</command>
        <command name="load">gcc/10.3.0</command>
        <command name="rm">cray-libsci</command>
        <command name="load">cray-libsci/20.09.1</command>
      </modules>

      <modules>
        <command name="swap">craype craype/2.6.2</command>
        <command name="rm">pmi</command>
        <command name="load">pmi/5.0.14</command>
        <command name="rm">craype-haswell</command>
        <command name="load">craype-mic-knl</command>
      </modules>

      <modules mpilib="mpi-serial">
        <command name="rm">cray-netcdf-hdf5parallel</command>
        <command name="rm">cray-hdf5-parallel</command>
        <command name="rm">cray-parallel-netcdf</command>
        <command name="load">cray-netcdf/4.6.3.2</command>
        <command name="load">cray-hdf5/1.10.5.2</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="rm">cray-netcdf-hdf5parallel</command>
        <command name="load">cray-netcdf-hdf5parallel/4.6.3.2</command>
        <command name="load">cray-hdf5-parallel/1.10.5.2</command>
        <command name="load">cray-parallel-netcdf/1.11.1.1</command>
      </modules>

      <modules>
        <command name="rm">cmake</command>
        <command name="load">cmake</command>
        <command name="load">perl5-extras</command>
      </modules>

      <!--command name="list">&gt;&amp; ml.txt</command-->

    </module_system>

    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <environment_variables>
      <env name="MPICH_ENV_DISPLAY">1</env>
      <env name="MPICH_VERSION_DISPLAY">1</env>
      <!--env name="MPICH_CPUMASK_DISPLAY">1</env-->

      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
      <env name="HDF5_USE_FILE_LOCKING">FALSE</env>
      <env name="CRAYPE_LINK_TYPE">static</env>
    </environment_variables>

    <environment_variables mpilib="mpt">
      <env name="MPICH_GNI_DYNAMIC_CONN">disabled</env>
    </environment_variables>
    <environment_variables compiler="intel">
      <env name="MPICH_MEMORY_REPORT">1</env>
    </environment_variables>
  </machine>

  <!-- Skylake nodes of Stampede2 at TACC -->
  <machine MACH="stampede2">
    <DESC>Stampede2. Intel skylake nodes at TACC. 48 cores per node, batch system is SLURM</DESC>
    <NODENAME_REGEX>.*stampede2.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>impi</MPILIBS>
    <SAVE_TIMING_DIR>$ENV{SCRATCH}</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>acme</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}/acme_scratch/stampede2</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{SCRATCH}/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{SCRATCH}/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{SCRATCH}/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>$ENV{SCRATCH}/tools/cprnc.cori/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>96</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>48</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>ibrun</executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/apps/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/opt/apps/lmod/lmod/init/python</init_path>
      <init_path lang="sh">/opt/apps/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/opt/apps/lmod/lmod/init/csh</init_path>
      <cmd_path lang="perl">/opt/apps/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/opt/apps/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module -q</cmd_path>
      <cmd_path lang="csh">module -q</cmd_path>

      <modules>
        <command name="purge"/>
      </modules>

      <modules compiler="intel">
        <command name="load">intel/18.0.0</command>
      </modules>

      <modules compiler="gnu">
        <command name="load">gcc/6.3.0</command>
      </modules>

      <modules mpilib="impi">
        <command name="load">impi/18.0.0</command>
      </modules>

      <modules mpilib="mpi-serial">
        <command name="load">hdf5/1.8.16</command>
        <command name="load">netcdf/4.3.3.1</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">phdf5/1.8.16</command>
        <command name="load">parallel-netcdf/4.3.3.1</command>
        <command name="load">pnetcdf/1.8.1</command>
      </modules>
      <modules>
        <command name="load">git</command>
        <command name="load">cmake</command>
        <command name="load">autotools</command>
        <command name="load">xalt</command>
        <!--command name="load">TACC</command-->
        <!--command name="load">python/2.7.13</command-->
      </modules>

    </module_system>

    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <environment_variables>
      <env name="MPICH_ENV_DISPLAY">1</env>
      <env name="MPICH_VERSION_DISPLAY">1</env>

      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
      <env name="I_MPI_PIN">1</env>
      <env name="MY_MPIRUN_OPTIONS">-l</env>
    </environment_variables>
  </machine>

  <machine MACH="mac">
    <DESC>Mac OS/X workstation or laptop</DESC>
    <NODENAME_REGEX/>
    <OS>Darwin</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>openmpi,mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/projects/acme/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{HOME}/projects/acme/cesm-inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{HOME}/projects/acme/ptclm-data</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{HOME}/projects/acme/scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{HOME}/projects/acme/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>$CCSMROOT/tools/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jnjohnson at lbl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>4</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>2</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments/>
    </mpirun>
    <module_system type="none"/>
    <RUNDIR>$ENV{HOME}/projects/acme/scratch/$CASE/run</RUNDIR>
    <EXEROOT>$ENV{HOME}/projects/acme/scratch/$CASE/bld</EXEROOT>
    <!-- cmake -DCMAKE_Fortran_COMPILER=/opt/local/bin/mpif90-mpich-gcc48 -DHDF5_DIR=/opt/local -DNetcdf_INCLUDE_DIR=/opt/local/include .. -->
    <!--    <GMAKE>make</GMAKE> <- this doesn't actually work! -->
  </machine>

  <machine MACH="linux-generic">
    <DESC>Linux workstation or laptop</DESC>
    <NODENAME_REGEX>none</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>openmpi,mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/projects/acme/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{HOME}/projects/acme/cesm-inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{HOME}/projects/acme/ptclm-data</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{HOME}/projects/acme/scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{HOME}/projects/acme/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>$CCSMROOT/tools/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jayesh at mcs dot anl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>4</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>2</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="none"/>
    <RUNDIR>$ENV{HOME}/projects/acme/scratch/$CASE/run</RUNDIR>
    <EXEROOT>$ENV{HOME}/projects/acme/scratch/$CASE/bld</EXEROOT>
    <!-- cmake -DCMAKE_Fortran_COMPILER=/opt/local/bin/mpif90-mpich-gcc48 -DHDF5_DIR=/opt/local -DNetcdf_INCLUDE_DIR=/opt/local/include .. -->
    <!--    <GMAKE>make</GMAKE> <- this doesn't actually work! -->
  </machine>

  <machine MACH="singularity">
    <DESC>Singularity container</DESC>
    <NODENAME_REGEX>singularity</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/projects/e3sm/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{HOME}/projects/e3sm/cesm-inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{HOME}/projects/e3sm/ptclm-data</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{HOME}/projects/e3sm/scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{HOME}/projects/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>$CCSMROOT/tools/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>4</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>lukasz at uchicago dot edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -launcher fork -hosts localhost -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="none"/>
    <RUNDIR>$ENV{HOME}/projects/e3sm/scratch/$CASE/run</RUNDIR>
    <EXEROOT>$ENV{HOME}/projects/e3sm/scratch/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="E3SM_SRCROOT">$SRCROOT</env>
    </environment_variables>
    <environment_variables mpilib="mpi-serial">
      <env name="NETCDF_PATH">/usr/local/packages/netcdf-serial</env>
      <env name="PATH">/usr/local/packages/cmake/bin:/usr/local/packages/hdf5-serial/bin:/usr/local/packages/netcdf-serial/bin:$ENV{PATH}</env>
      <env name="LD_LIBRARY_PATH">/usr/local/packages/szip/lib:/usr/local/packages/hdf5-serial/lib:/usr/local/packages/netcdf-serial/lib</env>
    </environment_variables>
    <environment_variables mpilib="!mpi-serial">
      <env name="NETCDF_PATH">/usr/local/packages/netcdf-parallel</env>
      <env name="PNETCDF_PATH">/usr/local/packages/pnetcdf</env>
      <env name="HDF5_PATH">/usr/local/packages/hdf5-parallel</env>
      <env name="PATH">/usr/local/packages/cmake/bin:/usr/local/packages/mpich/bin:/usr/local/packages/hdf5-parallel/bin:/usr/local/packages/netcdf-parallel/bin:/usr/local/packages/pnetcdf/bin:$ENV{PATH}</env>
      <env name="LD_LIBRARY_PATH">/usr/local/packages/mpich/lib:/usr/local/packages/szip/lib:/usr/local/packages/hdf5-parallel/lib:/usr/local/packages/netcdf-parallel/lib:/usr/local/packages/pnetcdf/lib</env>
    </environment_variables>
  </machine>

  <machine MACH="melvin">
    <DESC>Linux workstation for Jenkins testing</DESC>
    <NODENAME_REGEX>(melvin|watson|s999964|climate|penn|sems)</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY>proxy.sandia.gov:80</PROXY>
    <COMPILERS>gnu,intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <SAVE_TIMING_DIR>/sems-data-store/ACME/timings</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/acme/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/sems-data-store/ACME/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/sems-data-store/ACME/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/sems-data-store/ACME/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/sems-data-store/ACME/cprnc/build.new/cprnc</CCSM_CPRNC>
    <GMAKE_J>32</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>48</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>48</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread:overload-allowed</arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="false">
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">sems-env</command>
        <command name="load">acme-env</command>
        <command name="load">sems-git</command>
        <command name="load">acme-binutils</command>
        <command name="load">sems-python/3.5.2</command>
        <command name="load">sems-cmake/3.12.2</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">sems-gcc/7.3.0</command>
      </modules>
      <modules compiler="intel">
        <command name="load">sems-intel/16.0.3</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">sems-netcdf/4.4.1/exo</command>
        <command name="load">acme-pfunit/3.2.8/base</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">acme-openmpi/2.1.5</command>
        <command name="load">acme-netcdf/4.7.4/acme</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <!--    <GMAKE>make</GMAKE> <- this doesn't actually work! -->
    <environment_variables>
      <env name="NETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>

  <machine MACH="mappy">
    <DESC>Huge Linux workstation for Sandia climate scientists</DESC>
    <NODENAME_REGEX>mappy</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY>proxy.sandia.gov:80</PROXY>
    <COMPILERS>gnu,intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <SAVE_TIMING_DIR>/sems-data-store/ACME/mappy/timings</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/acme/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/sems-data-store/ACME/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/sems-data-store/ACME/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/sems-data-store/ACME/baselines/mappy/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/sems-data-store/ACME/mappy/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>64</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread:overload-allowed</arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="false">
      <init_path lang="python">/projects/sems/install/rhel7-x86_64/sems/v2/lmod/lmod/8.3/gcc/10.1.0/zbzzu7k/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="perl">/projects/sems/install/rhel7-x86_64/sems/v2/lmod/lmod/8.3/gcc/10.1.0/zbzzu7k/lmod/lmod/init/perl</init_path>
      <init_path lang="sh">/projects/sems/install/rhel7-x86_64/sems/v2/lmod/lmod/8.3/gcc/10.1.0/zbzzu7k/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/projects/sems/install/rhel7-x86_64/sems/v2/lmod/lmod/8.3/gcc/10.1.0/zbzzu7k/lmod/lmod/init/csh</init_path>
      <cmd_path lang="python">/projects/sems/install/rhel7-x86_64/sems/v2/lmod/lmod/8.3/gcc/10.1.0/zbzzu7k/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="perl">/projects/sems/install/rhel7-x86_64/sems/v2/lmod/lmod/8.3/gcc/10.1.0/zbzzu7k/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">sems-archive-env</command>
        <command name="load">acme-env</command>
        <command name="load">sems-archive-git</command>
        <command name="load">sems-archive-cmake/3.19.1</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">acme-gcc/8.1.0</command>
      </modules>
      <modules compiler="intel">
        <command name="load">sems-archive-intel/19.0.5</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">acme-netcdf/4.4.1/exo_acme</command>
        <command name="load">acme-pfunit/3.2.8/base</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">acme-openmpi/2.1.5</command>
        <command name="load">acme-netcdf/4.7.4/acme</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>0</MAX_GB_OLD_TEST_DATA>
    <!--    <GMAKE>make</GMAKE> <- this doesn't actually work! -->
    <environment_variables>
      <env name="NETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>

  <machine MACH="snl-white">
    <DESC>IBM Power 8 Testbed machine</DESC>
    <NODENAME_REGEX>white</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/projects/e3sm/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{HOME}/projects/e3sm/cesm-inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{HOME}/projects/e3sm/ptclm-data</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{HOME}/projects/e3sm/scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{HOME}/projects/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>$CCSMROOT/tools/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE_J>32</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>lsf</BATCH_SYSTEM>
    <SUPPORTED_BY>mdeakin at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>4</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>1</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments/>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <modules>
        <command name="load">devpack/20181011/openmpi/2.1.2/gcc/7.2.0/cuda/9.2.88</command>
      </modules>
    </module_system>
    <RUNDIR>$ENV{HOME}/projects/e3sm/scratch/$CASE/run</RUNDIR>
    <EXEROOT>$ENV{HOME}/projects/e3sm/scratch/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="NETCDF_C_PATH">$ENV{NETCDF_ROOT}</env>
      <env name="NETCDF_FORTRAN_PATH">/ascldap/users/jgfouca/packages/netcdf-fortran-4.4.4-white</env>
      <env name="E3SM_SRCROOT">$SRCROOT</env>
    </environment_variables>
  </machine>

  <machine MACH="snl-blake">
    <DESC>Skylake Testbed machine</DESC>
    <NODENAME_REGEX>blake</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel18</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/projects/e3sm/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{HOME}/projects/e3sm/cesm-inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{HOME}/projects/e3sm/ptclm-data</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{HOME}/projects/e3sm/scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{HOME}/projects/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>$CCSMROOT/tools/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE_J>48</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>mdeakin at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>48</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>48</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments/>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="python">module</cmd_path>
      <modules>
        <command name="load">zlib/1.2.11</command>
        <command name="load">intel/compilers/18.1.163</command>
        <command name="load">openmpi/2.1.2/intel/18.1.163</command>
        <command name="load">hdf5/1.10.1/openmpi/2.1.2/intel/18.1.163</command>
        <command name="load">netcdf-exo/4.4.1.1/openmpi/2.1.2/intel/18.1.163</command>
      </modules>
    </module_system>
    <RUNDIR>$ENV{HOME}/projects/e3sm/scratch/$CASE/run</RUNDIR>
    <EXEROOT>$ENV{HOME}/projects/e3sm/scratch/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="NETCDF_C_PATH">$ENV{NETCDF_ROOT}</env>
      <env name="NETCDF_FORTRAN_PATH">$ENV{NETCDFF_ROOT}</env>
    </environment_variables>
  </machine>

  <machine MACH="anlworkstation">
    <DESC>Linux workstation for ANL</DESC>
    <NODENAME_REGEX>compute.*mcs.anl.gov</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpich,openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/acme/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/home/climate1/acme/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/home/climate1/acme/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/home/climate1/acme/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/home/climate1/acme/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>32</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpich">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -l -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="soft">
      <init_path lang="csh">/software/common/adm/packages/softenv-1.6.2/etc/softenv-load.csh</init_path>
      <init_path lang="sh">/software/common/adm/packages/softenv-1.6.2/etc/softenv-load.sh</init_path>
      <cmd_path lang="csh">source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.csh ; soft</cmd_path>
      <cmd_path lang="sh">source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.sh ; soft</cmd_path>
      <modules compiler="gnu">
        <command name="add">+gcc-8.2.0</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables mpilib="mpi-serial">
      <env name="PATH">/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}</env>
      <!-- We currently don't have a soft env for serial hdf5 and szip built with gcc 8.2.0 -->
      <env name="LD_LIBRARY_PATH">/soft/apps/packages/climate/hdf5/1.8.16-serial/gcc-8.2.0/lib:/soft/apps/packages/climate/szip/2.1/gcc-8.2.0/lib:$ENV{LD_LIBRARY_PATH}</env>
      <!-- We currently don't have a soft env for netcdf serial built with gcc 8.2.0 -->
      <env name="NETCDF_PATH">/soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-serial/gcc-8.2.0</env>
    </environment_variables>
    <environment_variables mpilib="mpich">
      <!-- We currently don't have a soft env for parallel hdf5 and szip built with gcc 8.2.0 -->
      <env name="LD_LIBRARY_PATH">/soft/apps/packages/climate/hdf5/1.8.16-parallel/mpich-3.3.2/gcc-8.2.0/lib:/soft/apps/packages/climate/szip/2.1/gcc-8.2.0/lib:$ENV{LD_LIBRARY_PATH}</env>
      <!-- We currently don't have a soft env for mpich 3.3.2 built with gcc 8.2.0 -->
      <env name="PATH">/soft/apps/packages/climate/mpich/3.3.2/gcc-8.2.0/bin:/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}</env>
      <!-- We currently don't have a soft env for parallel hdf5 built with mpich 3.3.2 and gcc 8.2.0 -->
      <env name="HDF5_PATH">/soft/apps/packages/climate/hdf5/1.8.16-parallel/mpich-3.3.2/gcc-8.2.0</env>
      <!-- We currently don't have a soft env for netcdf parallel built with mpich 3.3.2 and gcc 8.2.0 -->
      <env name="NETCDF_PATH">/soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-parallel/mpich-3.3.2/gcc-8.2.0</env>
      <!-- We currently don't have a soft env for pnetcdf built with mpich 3.3.2 and gcc 8.2.0 -->
      <env name="PNETCDF_PATH">/soft/apps/packages/climate/pnetcdf/1.12.0/mpich-3.3.2/gcc-8.2.0</env>
    </environment_variables>
    <environment_variables mpilib="openmpi">
      <!-- We currently don't have a soft env for openmpi 2.1.5, zlib, szip, hdf5, NetCDF and PnetCDF libraries -->
      <env name="PATH">/soft/apps/packages/climate/openmpi/2.1.5/gcc-8.2.0/bin:/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}</env>
      <env name="ZLIB_PATH">/soft/apps/packages/climate/zlib/1.2.11/gcc-8.2.0-static</env>
      <env name="SZIP_PATH">/soft/apps/packages/climate/szip/2.1/gcc-8.2.0-static</env>
      <env name="HDF5_PATH">/soft/apps/packages/climate/hdf5/1.8.12-parallel/openmpi-2.1.5/gcc-8.2.0-static</env>
      <env name="NETCDF_PATH">/soft/apps/packages/climate/netcdf/4.7.4c-4.3.1cxx-4.4.4f-parallel/openmpi-2.1.5/gcc-8.2.0-static-hdf5-1.8.12-pnetcdf-1.12.0</env>
      <env name="PNETCDF_PATH">/soft/apps/packages/climate/pnetcdf/1.12.0/openmpi-2.1.5/gcc-8.2.0</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables>
      <env name="PERL5LIB">/soft/apps/packages/climate/perl5/lib/perl5</env>
    </environment_variables>
  </machine>

  <machine MACH="anlgce-ub18">
    <DESC>ANL CELS General Computing Environment (Linux) workstation (Ubuntu 18.04)</DESC>
    <NODENAME_REGEX>compute-386-01|compute-386-02</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpich,openmpi</MPILIBS>
    <SAVE_TIMING_DIR>/scratch/$ENV{USER}/e3sm/timings</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/scratch/$ENV{USER}/e3sm/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/nfs/gce/projects/climate/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$DIN_LOC_ROOT/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/nfs/gce/projects/climate/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/nfs/gce/projects/climate/e3sm/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jayesh at mcs dot anl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpich">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -l -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> --oversubscribe -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.3.0/lmod-7.7.29-zg24dcc/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="perl">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.3.0/lmod-7.7.29-zg24dcc/lmod/lmod/init/perl</init_path>
      <init_path lang="bash">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.3.0/lmod-7.7.29-zg24dcc/lmod/lmod/init/bash</init_path>
      <init_path lang="sh">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.3.0/lmod-7.7.29-zg24dcc/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.3.0/lmod-7.7.29-zg24dcc/lmod/lmod/init/csh</init_path>
      <cmd_path lang="python">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.3.0/lmod-7.7.29-zg24dcc/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="perl">module</cmd_path>
      <cmd_path lang="bash">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">autoconf/2.69-tz6eue5</command>
        <command name="load">automake/1.16.3-fm5m6qc</command>
        <command name="load">libtool/2.4.6-jdxbjft</command>
        <command name="load">m4/1.4.19-wq3bm42</command>
        <command name="load">cmake/3.20.5-yjp2hz6</command>
        <command name="load">gcc/11.1.0-5ikoznk</command>
        <command name="load">zlib/1.2.11-smoyzzo</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables mpilib="mpi-serial">
      <!-- We currently don't have modules for serial NetCDF -->
      <env name="NETCDF_PATH">/nfs/gce/projects/climate/software/netcdf/4.8.0c-4.3.1cxx-4.5.3f-serial/gcc-11.1.0</env>
    </environment_variables>
    <environment_variables mpilib="mpich">
      <!-- We currently don't have modules for HDF5, NetCDF & PnetCDF -->
      <env name="LD_LIBRARY_PATH">/nfs/gce/projects/climate/software/mpich/3.4.2/gcc-11.1.0/lib:$ENV{LD_LIBRARY_PATH}</env>
      <env name="PATH">/nfs/gce/projects/climate/software/mpich/3.4.2/gcc-11.1.0/bin:$ENV{PATH}</env>
      <env name="ZLIB_PATH">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.5.0/zlib-1.2.11-smoyzzo</env>
      <env name="HDF5_PATH">/nfs/gce/projects/climate/software/hdf5/1.12.1/mpich-3.4.2/gcc-11.1.0</env>
      <env name="NETCDF_PATH">/nfs/gce/projects/climate/software/netcdf/4.8.0c-4.3.1cxx-4.5.3f-parallel/mpich-3.4.2/gcc-11.1.0</env>
      <env name="PNETCDF_PATH">/nfs/gce/projects/climate/software/pnetcdf/1.12.2/mpich-3.4.2/gcc-11.1.0</env>
    </environment_variables>
    <environment_variables mpilib="openmpi">
      <!-- We currently don't have modules for HDF5, NetCDF & PnetCDF -->
      <env name="LD_LIBRARY_PATH">/nfs/gce/projects/climate/software/openmpi/4.1.3/gcc-11.1.0/lib:$ENV{LD_LIBRARY_PATH}</env>
      <env name="PATH">/nfs/gce/projects/climate/software/openmpi/4.1.3/gcc-11.1.0/bin:$ENV{PATH}</env>
      <env name="ZLIB_PATH">/nfs/gce/software/spack/opt/spack/linux-ubuntu18.04-x86_64/gcc-7.5.0/zlib-1.2.11-smoyzzo</env>
      <env name="HDF5_PATH">/nfs/gce/projects/climate/software/hdf5/1.12.1/openmpi-4.1.3/gcc-11.1.0</env>
      <env name="NETCDF_PATH">/nfs/gce/projects/climate/software/netcdf/4.8.0c-4.3.1cxx-4.5.3f-parallel/openmpi-4.1.3/gcc-11.1.0</env>
      <env name="PNETCDF_PATH">/nfs/gce/projects/climate/software/pnetcdf/1.12.2/openmpi-4.1.3/gcc-11.1.0</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables>
      <env name="PERL5LIB">/nfs/gce/projects/climate/software/perl5/lib/perl5</env>
    </environment_variables>
  </machine>

  <machine MACH="anlgce">
    <DESC>ANL CELS General Computing Environment (Linux) workstation</DESC>
    <NODENAME_REGEX>compute-(240|386)-[0-9][0-9]</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpich,openmpi</MPILIBS>
    <SAVE_TIMING_DIR>/scratch/$ENV{USER}/e3sm/timings</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/scratch/$ENV{USER}/e3sm/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/nfs/gce/projects/climate/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$DIN_LOC_ROOT/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/nfs/gce/projects/climate/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/nfs/gce/projects/climate/e3sm/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jayesh at mcs dot anl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpich">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -l -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> --oversubscribe -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-6fjdtku/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="perl">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-6fjdtku/lmod/lmod/init/perl</init_path>
      <init_path lang="bash">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-6fjdtku/lmod/lmod/init/bash</init_path>
      <init_path lang="sh">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-6fjdtku/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-6fjdtku/lmod/lmod/init/csh</init_path>
      <cmd_path lang="python">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-6fjdtku/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="perl">module</cmd_path>
      <cmd_path lang="bash">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">autoconf/2.69-bmnwajj</command>
        <command name="load">automake/1.16.3-r7w24o4</command>
        <command name="load">libtool/2.4.6-uh3mpsu</command>
        <command name="load">m4/1.4.19-7fztfyz</command>
        <command name="load">cmake/3.20.5-zyz2eld</command>
        <command name="load">gcc/11.1.0-qsjmpcg</command>
        <command name="load">zlib/1.2.11-p7dmb5p</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables mpilib="mpi-serial">
      <!-- We currently don't have modules for serial NetCDF -->
      <env name="NETCDF_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/netcdf/4.8.0c-4.3.1cxx-4.5.3f-serial/gcc-11.1.0</env>
    </environment_variables>
    <environment_variables mpilib="mpich">
      <!-- We currently don't have modules for HDF5, NetCDF & PnetCDF -->
      <env name="LD_LIBRARY_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/mpich/4.0/gcc-11.1.0/lib:$ENV{LD_LIBRARY_PATH}</env>
      <env name="PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/mpich/4.0/gcc-11.1.0/bin:$ENV{PATH}</env>
      <env name="ZLIB_PATH">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/zlib-1.2.11-p7dmb5p</env>
      <env name="HDF5_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/hdf5/1.12.1/mpich-4.0/gcc-11.1.0</env>
      <env name="NETCDF_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/netcdf/4.8.0c-4.3.1cxx-4.5.3f-parallel/mpich-4.0/gcc-11.1.0</env>
      <env name="PNETCDF_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/pnetcdf/1.12.2/mpich-4.0/gcc-11.1.0</env>
    </environment_variables>
    <environment_variables mpilib="openmpi">
      <!-- We currently don't have modules for HDF5, NetCDF & PnetCDF -->
      <env name="LD_LIBRARY_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/openmpi/4.1.3/gcc-11.1.0/lib:$ENV{LD_LIBRARY_PATH}</env>
      <env name="PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/openmpi/4.1.3/gcc-11.1.0/bin:$ENV{PATH}</env>
      <env name="ZLIB_PATH">/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/zlib-1.2.11-p7dmb5p</env>
      <env name="HDF5_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/hdf5/1.12.1/openmpi-4.1.3/gcc-11.1.0</env>
      <env name="NETCDF_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/netcdf/4.8.0c-4.3.1cxx-4.5.3f-parallel/openmpi-4.1.3/gcc-11.1.0</env>
      <env name="PNETCDF_PATH">/nfs/gce/projects/climate/software/linux-ubuntu20.04-x86_64/pnetcdf/1.12.2/openmpi-4.1.3/gcc-11.1.0</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables>
      <env name="PERL5LIB">/nfs/gce/projects/climate/software/perl5/lib/perl5</env>
    </environment_variables>
  </machine>

  <machine MACH="sandiatoss3">
    <DESC>SNL clust</DESC>
    <NODENAME_REGEX>(skybridge|chama)</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY>proxy.sandia.gov:80</PROXY>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <PROJECT>fy210162</PROJECT>
    <SAVE_TIMING_DIR>/projects/ccsm/timings</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/gpfs/$USER/acme_scratch/sandiatoss3</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/projects/ccsm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/projects/ccsm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/projects/ccsm/ccsm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/projects/ccsm/cprnc/build.toss3/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec</executable>
      <arguments>
        <arg name="num_tasks"> --n {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/usr/share/lmod/lmod/init/python.py</init_path>
      <init_path lang="perl">/usr/share/lmod/lmod/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">sems-archive-env</command>
        <command name="load">acme-env</command>
        <command name="load">sems-archive-git</command>
        <command name="load">sems-archive-cmake/3.19.1</command>
        <command name="load">gnu/6.3.1</command>
        <command name="load">sems-archive-intel/17.0.0</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">sems-archive-openmpi/1.10.5</command>
        <command name="load">acme-netcdf/4.7.4/acme</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">sems-archive-netcdf/4.4.1/exo</command>
      </modules>
    </module_system>
    <RUNDIR>/nscratch/$USER/acme_scratch/sandiatoss3/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <!-- complete path to a short term archiving directory -->
    <!-- path to the cprnc tool used to compare netcdf history files in testing -->
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>

    <environment_variables>
      <env name="NETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
      <env name="NETCDF_INCLUDES">$ENV{SEMS_NETCDF_ROOT}/include</env>
      <env name="NETCDF_LIBS">$ENV{SEMS_NETCDF_ROOT}/lib</env>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables mpilib="!mpi-serial">
      <env name="PNETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
    </environment_variables>
  </machine>

  <machine MACH="ghost">
    <DESC>SNL clust</DESC>
    <NODENAME_REGEX>ghost-login</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY>proxy.sandia.gov:80</PROXY>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <PROJECT>fy210162</PROJECT>

    <CIME_OUTPUT_ROOT>/gscratch/$USER/acme_scratch/ghost</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/projects/ccsm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/projects/ccsm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/projects/ccsm/ccsm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/projects/ccsm/cprnc/build.toss3/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>36</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec</executable>
      <arguments>
        <arg name="num_tasks"> --n {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/usr/share/lmod/lmod/init/python.py</init_path>
      <init_path lang="perl">/usr/share/lmod/lmod/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">sems-env</command>
        <command name="load">sems-git</command>
        <command name="load">sems-python/3.5.2</command>
        <command name="load">sems-cmake</command>
        <command name="load">gnu/4.9.2</command>
        <command name="load">sems-intel/16.0.2</command>
        <command name="load">mkl/16.0</command>
        <command name="load">sems-netcdf/4.4.1/exo_parallel</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">sems-openmpi/1.10.5</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <!-- complete path to a short term archiving directory -->
    <!-- path to the cprnc tool used to compare netcdf history files in testing -->
    <environment_variables>
      <env name="NETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
      <env name="NETCDF_INCLUDES">$ENV{SEMS_NETCDF_ROOT}/include</env>
      <env name="NETCDF_LIBS">$ENV{SEMS_NETCDF_ROOT}/lib</env>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables mpilib="!mpi-serial">
      <env name="PNETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
    </environment_variables>
  </machine>

  <machine MACH="anvil">
    <DESC>ANL/LCRC Linux Cluster</DESC>
    <NODENAME_REGEX>b.*.lcrc.anl.gov</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>impi,openmpi,mvapich</MPILIBS>
    <PROJECT>condo</PROJECT>
    <SAVE_TIMING_DIR>/lcrc/group/e3sm</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/lcrc/group/e3sm/$USER/scratch/anvil</CIME_OUTPUT_ROOT>
    <CIME_HTML_ROOT>/lcrc/group/e3sm/public_html/$ENV{USER}</CIME_HTML_ROOT>
    <CIME_URL_ROOT>https://web.lcrc.anl.gov/public/e3sm/$ENV{USER}</CIME_URL_ROOT>
    <DIN_LOC_ROOT>/lcrc/group/e3sm/data/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lcrc/group/e3sm/data/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lcrc/group/e3sm/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lcrc/group/e3sm/baselines/anvil/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lcrc/group/e3sm/soft/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>E3SM</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>36</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks"> -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit </arg>
        <arg name="binding">--cpu_bind=cores</arg>
        <arg name="thread_count">-c $ENV{OMP_NUM_THREADS}</arg>
        <arg name="placement">-m plane={{ tasks_per_node }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh;export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core</init_path>
      <init_path lang="csh">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh;setenv MODULEPATH $MODULEPATH\:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core</init_path>
      <init_path lang="python">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="python">export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core;/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake/3.20.3-vedypwm</command>
      </modules>
      <modules compiler="intel">
        <command name="load">gcc/7.4.0</command>
        <command name="load">intel/20.0.4-lednsve</command>
        <command name="load">intel-mkl/2020.4.304-voqlapk</command>
      </modules>
      <modules compiler="intel" mpilib="mvapich">
        <command name="load">mvapich2/2.3.6-verbs-x4iz7lq</command>
        <command name="load">netcdf-c/4.4.1-gei7x7w</command>
        <command name="load">netcdf-cxx/4.2-db2f5or</command>
        <command name="load">netcdf-fortran/4.4.4-b4ldb3a</command>
        <command name="load">parallel-netcdf/1.11.0-kj4jsvt</command>
      </modules>
      <modules compiler="intel" mpilib="impi">
        <command name="load">intel-mpi/2019.9.304-i42whlw</command>
        <command name="load">netcdf-c/4.4.1-blyisdg</command>
        <command name="load">netcdf-cxx/4.2-gkqc6fq</command>
        <command name="load">netcdf-fortran/4.4.4-eanrh5t</command>
        <command name="load">parallel-netcdf/1.11.0-y3nmmej</command>
      </modules>
      <modules compiler="intel" mpilib="openmpi">
        <command name="load">openmpi/4.1.1-v3b3npd</command>
        <command name="load">netcdf-c/4.4.1-smyuxme</command>
        <command name="load">netcdf-cxx/4.2-kfb2aag</command>
        <command name="load">netcdf-fortran/4.4.4-mablvyc</command>
        <command name="load">parallel-netcdf/1.11.0-x4n5s7k</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gcc/8.2.0-xhxgy33</command>
        <command name="load">intel-mkl/2020.4.304-d6zw4xa</command>
      </modules>
      <modules compiler="gnu" mpilib="mvapich">
        <command name="load">netcdf/4.4.1-ve2zfkw</command>
        <command name="load">netcdf-cxx/4.2-2rkopdl</command>
        <command name="load">netcdf-fortran/4.4.4-thtylny</command>
        <command name="load">mvapich2/2.2-verbs-ppznoge</command>
        <command name="load">parallel-netcdf/1.11.0-c22b2bn</command>
      </modules>
      <modules compiler="gnu" mpilib="impi">
        <command name="load">intel-mpi/2019.9.304-rxpzd6p</command>
        <command name="load">netcdf-c/4.4.1-fysjgfx</command>
        <command name="load">netcdf-cxx/4.2-oaiw2v6</command>
        <command name="load">netcdf-fortran/4.4.4-kxgkaop</command>
        <command name="load">parallel-netcdf/1.11.0-fce7akl</command>
      </modules>
      <modules compiler="gnu" mpilib="openmpi">
        <command name="load">openmpi/4.1.1-x5n4m36</command>
        <command name="load">netcdf-c/4.4.1-mtfptpl</command>
        <command name="load">netcdf-cxx/4.2-osp27dq</command>
        <command name="load">netcdf-fortran/4.4.4-5yd6dos</command>
        <command name="load">parallel-netcdf/1.11.0-a7ohxsg</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
      <env name="PATH">/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}</env>
    </environment_variables>
    <environment_variables mpilib="mvapich">
      <env name="MV2_ENABLE_AFFINITY">0</env>
      <env name="MV2_SHOW_CPU_BINDING">1</env>
      <env name="MV2_HOMOGENEOUS_CLUSTER">1</env>
    </environment_variables>
    <environment_variables mpilib="mvapich" DEBUG="TRUE">
      <env name="MV2_DEBUG_SHOW_BACKTRACE">1</env>
      <env name="MV2_SHOW_ENV_INFO">2</env>
    </environment_variables>
    <environment_variables mpilib="impi" DEBUG="TRUE">
      <env name="I_MPI_DEBUG">10</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="intel">
      <env name="KMP_AFFINITY">granularity=core,balanced</env>
      <env name="KMP_HOT_TEAMS_MODE">1</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="gnu">
      <env name="OMP_PLACES">cores</env>
    </environment_variables>
  </machine>

  <machine MACH="chrysalis">
    <DESC>ANL LCRC cluster 512-node AMD Epyc 7532 2-sockets 64-cores per node</DESC>
    <NODENAME_REGEX>chr.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>openmpi,impi</MPILIBS>
    <PROJECT>e3sm</PROJECT>
    <SAVE_TIMING_DIR>/lcrc/group/e3sm/PERF_Chrysalis</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/lcrc/group/e3sm/$USER/scratch/chrys</CIME_OUTPUT_ROOT>
    <CIME_HTML_ROOT>/lcrc/group/e3sm/public_html/$ENV{USER}</CIME_HTML_ROOT>
    <CIME_URL_ROOT>https://web.lcrc.anl.gov/public/e3sm/$ENV{USER}</CIME_URL_ROOT>
    <DIN_LOC_ROOT>/lcrc/group/e3sm/data/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lcrc/group/e3sm/data/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lcrc/group/e3sm/$USER/scratch/chrys/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lcrc/group/e3sm/baselines/chrys/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lcrc/group/e3sm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>E3SM</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>128</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks">--mpi=pmi2 -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit </arg>
        <arg name="binding"> $SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>	
        <arg name="thread_count">-c $SHELL{echo 128/ {{ tasks_per_node }} |bc}</arg>
        <arg name="placement">-m plane={{ tasks_per_node }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/csh</init_path>
      <init_path lang="python">/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="python">/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">subversion/1.14.0-e4smcy3</command>
        <command name="load">perl/5.32.0-bsnc6lt</command>
        <command name="load">cmake/3.19.1-yisciec</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/20.0.4-kodw73g</command>
        <command name="load">intel-mkl/2020.4.304-g2qaxzf</command>
      </modules>
      <modules compiler="intel" mpilib="openmpi">
        <command name="load">openmpi/4.1.3-pin4k7o</command>
        <command name="load">hdf5/1.10.7-eewgp6v</command>
        <command name="load">netcdf-c/4.4.1-ihoo4zq</command>
        <command name="load">netcdf-cxx/4.2-soitsxm</command>
        <command name="load">netcdf-fortran/4.4.4-tplolxh</command>
        <command name="load">parallel-netcdf/1.11.0-gvcfihh</command>
      </modules>
      <modules compiler="intel" mpilib="impi">
        <command name="load">intel-mpi/2019.9.304-tkzvizk</command>
        <command name="load">hdf5/1.8.16-se4xyo7</command>
        <command name="load">netcdf-c/4.4.1-qvxyzq2</command>
        <command name="load">netcdf-cxx/4.2-binixgj</command>
        <command name="load">netcdf-fortran/4.4.4-rdxohvp</command>
        <command name="load">parallel-netcdf/1.11.0-b74wv4m</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gcc/9.2.0-ugetvbp</command>
        <command name="load">intel-mkl/2020.4.304-n3b5fye</command>
      </modules>
      <modules compiler="gnu" mpilib="openmpi">
        <command name="load">openmpi/4.1.3-sxfyy4k</command>
        <command name="load">hdf5/1.10.7-j3zxncu</command>
        <command name="load">netcdf-c/4.4.1-7ohuiwq</command>
        <command name="load">netcdf-cxx/4.2-tkg465k</command>
        <command name="load">netcdf-fortran/4.4.4-k2zu3y5</command>
        <command name="load">parallel-netcdf/1.11.0-mirrcz7</command>
      </modules>
      <modules compiler="gnu" mpilib="impi">
        <command name="load">intel-mpi/2019.9.304-jdih7h5</command>
        <command name="load">hdf5/1.8.16-dtbpce3</command>
        <command name="load">netcdf-c/4.4.1-zcoa44z</command>
        <command name="load">netcdf-cxx/4.2-ayxg4c7</command>
        <command name="load">netcdf-fortran/4.4.4-2lfr2lr</command>
        <command name="load">parallel-netcdf/1.11.0-ifdodru</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.05</TEST_TPUT_TOLERANCE>
    <TEST_MEMLEAK_TOLERANCE>0.05</TEST_MEMLEAK_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="PERL5LIB">/lcrc/group/e3sm/soft/perl/chrys/lib/perl5</env>
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="intel" MAX_TASKS_PER_NODE="!128">
      <env name="KMP_AFFINITY">granularity=core,balanced</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="intel" MAX_TASKS_PER_NODE="128">
      <env name="KMP_AFFINITY">granularity=thread,balanced</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="gnu">
      <env name="OMP_PLACES">cores</env>
    </environment_variables>
  </machine>

  <machine MACH="blues">
    <DESC>ANL/LCRC Linux Cluster</DESC>
    <OS>LINUX</OS>
    <COMPILERS>pgigpu</COMPILERS>
    <MPILIBS>mvapich</MPILIBS>
    <PROJECT>e3sm</PROJECT>
    <SAVE_TIMING_DIR>/lcrc/group/e3sm</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/lcrc/group/e3sm/$USER/scratch/blues</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lcrc/group/e3sm/data/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lcrc/group/e3sm/data/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lcrc/group/e3sm/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lcrc/group/e3sm/baselines/blues/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lcrc/group/e3sm/soft/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>E3SM</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks"> -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit </arg>
        <arg name="binding">--cpu_bind=cores</arg>
        <arg name="thread_count">-c $ENV{OMP_NUM_THREADS}</arg>
        <arg name="placement">-m plane=$SHELL{echo 16/$OMP_NUM_THREADS|bc} </arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh;export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core</init_path>
      <init_path lang="csh">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh;setenv MODULEPATH $MODULEPATH\:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core\:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core</init_path>
      <init_path lang="python">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="python">export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core;/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake/3.20.3-vedypwm</command>
      </modules>
      <modules compiler="pgigpu">
        <command name="load">nvhpc/20.9-5brtudu</command>
        <command name="load">cuda/11.1.0-6dvax5z</command>
        <command name="load">netcdf-c/4.7.4-ltqliri</command>
        <command name="load">netcdf-cxx/4.2-kf5ox4e</command>
        <command name="load">netcdf-fortran/4.5.3-6mgyroo</command>
        <command name="load">mvapich2/2.3.4-blues-5fwicb5</command>
        <command name="load">parallel-netcdf/1.12.1-nyuvwhn</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
      <env name="PATH">/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}</env>
    </environment_variables>
    <environment_variables mpilib="mvapich">
      <env name="MV2_ENABLE_AFFINITY">0</env>
      <env name="MV2_SHOW_CPU_BINDING">1</env>
    </environment_variables>
    <environment_variables mpilib="mvapich" DEBUG="TRUE">
      <env name="MV2_DEBUG_SHOW_BACKTRACE">1</env>
      <env name="MV2_SHOW_ENV_INFO">2</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">64M</env>
      <env name="OMP_PLACES">cores</env>
    </environment_variables>
  </machine>

  <machine MACH="swing">
    <DESC>ANL/LCRC Linux Cluster: 6x 128c EPYC nodes with 8x A100 GPUs</DESC>
    <NODENAME_REGEX>gpulogin.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>pgigpu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <PROJECT>e3sm</PROJECT>
    <SAVE_TIMING_DIR>/lcrc/group/e3sm</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/lcrc/group/e3sm/$USER/scratch/swing</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lcrc/group/e3sm/data/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lcrc/group/e3sm/data/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lcrc/group/e3sm/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lcrc/group/e3sm/baselines/swing/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lcrc/group/e3sm/soft/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_gpu</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>E3SM</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>128</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="pgigpu">16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>128</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="pgigpu">16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks"> -l -n {{ total_tasks }} -N {{ num_nodes }} -K </arg>
        <arg name="binding">$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}</arg>
        <arg name="thread_count">-c $SHELL{echo 256/ {{ tasks_per_node }} |bc}</arg>
        <arg name="placement">-m plane={{ tasks_per_node }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/csh</init_path>
      <init_path lang="python">/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="python">/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake/3.21.1-e5i6eks</command>
      </modules>
      <modules compiler="pgigpu">
        <command name="load">nvhpc/20.9-37zsymt</command>
        <command name="load">cuda/11.1.1-nkh7mm7</command>
        <command name="load">openmpi/4.1.1-r6ebr2e</command>
        <command name="load">netcdf-c/4.7.4-zppo53l</command>
        <command name="load">netcdf-cxx/4.2-wjm7fye</command>
        <command name="load">netcdf-fortran/4.5.3-srsajjs</command>
        <command name="load">parallel-netcdf/1.12.1-75szceu</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
      <env name="PATH">/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">64M</env>
      <env name="OMP_PLACES">cores</env>
    </environment_variables>
  </machine>

  <machine MACH="bebop">
    <DESC>ANL/LCRC Cluster, Cray CS400, 352-nodes Xeon Phi 7230 KNLs 64C/1.3GHz + 672-nodes Xeon E5-2695v4 Broadwells 36C/2.10GHz, Intel Omni-Path network, SLURM batch system, Lmod module environment.</DESC>
    <NODENAME_REGEX>beboplogin.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>impi,mvapich</MPILIBS>
    <PROJECT>e3sm</PROJECT>
    <CIME_OUTPUT_ROOT>/lcrc/group/e3sm/$USER/scratch/bebop</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lcrc/group/e3sm/data/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lcrc/group/e3sm/data/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lcrc/group/e3sm/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lcrc/group/e3sm/baselines/bebop/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lcrc/group/e3sm/soft/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>E3SM</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>36</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="impi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -l -n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mvapich">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks"> -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit </arg>
        <arg name="binding">--cpu_bind=cores</arg>
        <arg name="thread_count">-c $ENV{OMP_NUM_THREADS}</arg>
        <arg name="placement">-m plane=$SHELL{echo 36/$OMP_NUM_THREADS|bc}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh</init_path>
      <init_path lang="python">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="python">/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake/3.20.3-vedypwm</command>
        <command name="load">anaconda3/5.2.0</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/18.0.4-443hhug</command>
        <command name="load">intel-mkl/2018.4.274-jwaeshj</command>
        <command name="load">hdf5/1.10.5-3mk3uik</command>
        <command name="load">netcdf/4.7.0-krelxcz</command>
        <command name="load">netcdf-fortran/4.4.5-74lj75q</command>
      </modules>
      <modules compiler="intel" mpilib="impi">
        <command name="load">intel-mpi/2018.4.274-4hmwfl6</command>
        <command name="load">parallel-netcdf/1.11.0-acswzws</command>
      </modules>
      <modules compiler="intel" mpilib="mvapich">
        <command name="load">mvapich2/2.3.1-verbs-omjz3ck</command>
        <command name="load">parallel-netcdf/1.11.2-7fy6qz3</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gcc/8.2.0-g7hppkz</command>
        <command name="load">intel-mkl/2018.4.274-2amycpi</command>
        <command name="load">hdf5/1.8.16-mz7lmxh</command>
        <command name="load">netcdf/4.4.1-xkjcghm</command>
        <command name="load">netcdf-fortran/4.4.4-mpstomu</command>
      </modules>
      <modules compiler="gnu" mpilib="impi">
        <command name="load">intel-mpi/2018.4.274-ozfo327</command>
        <command name="load">parallel-netcdf/1.11.0-filvnis</command>
      </modules>
      <modules compiler="gnu" mpilib="mvapich">
        <command name="load">mvapich2/2.3-bebop-3xi4hiu</command>
        <command name="load">parallel-netcdf/1.11.2-hfn33fd</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <environment_variables>
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PATH">/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}</env>
    </environment_variables>
    <environment_variables mpilib="!mpi-serial">
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
    <environment_variables mpilib="impi">
      <env name="I_MPI_FABRICS">shm:tmi</env>
    </environment_variables>
  </machine>

  <machine MACH="syrah">
    <DESC>LLNL Linux Cluster, Linux (pgi), 16 pes/node, batch system is Slurm</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/p/lscratchh/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/usr/gdata/climdat/ccsm3data/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/usr/gdata/climdat/ccsm3data/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/p/lscratchh/$CCSMUSER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/p/lscratchh/$CCSMUSER/ccsm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/p/lscratchd/ma21/ccsm3data/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>lc_slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>donahue5 -at- llnl.gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <mpirun mpilib="default">
      <executable>srun</executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/usr/share/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="perl">/usr/share/lmod/lmod/init/perl</init_path>
      <init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <modules compiler="intel">
        <command name="load">python</command>
        <command name="load">git</command>
        <command name="load">intel/19.0.4</command>
        <command name="load">mvapich2/2.3</command>
        <command name="load">cmake/3.14.5</command>
        <command name="load">netcdf-fortran/4.4.4</command>
        <command name="load">pnetcdf/1.9.0</command>
      </modules>
    </module_system>
    <RUNDIR>/p/lscratchh/$CCSMUSER/ACME/$CASE/run</RUNDIR>
    <EXEROOT>/p/lscratchh/$CCSMUSER/$CASE/bld</EXEROOT>
    <environment_variables compiler="intel">
      <env name="NETCDFROOT">/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/</env>
      <env name="NETCDF_PATH">/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/</env>
    </environment_variables>
    <environment_variables compiler="intel" mpilib="!mpi-serial">
      <env name="PNETCDFROOT">/usr/tce/packages/pnetcdf/pnetcdf-1.9.0-intel-18.0.1-mvapich2-2.2/</env>
    </environment_variables>
  </machine>

  <machine MACH="quartz">
    <DESC>LLNL Linux Cluster, Linux (pgi), 36 pes/node, batch system is Slurm</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/p/lscratchh/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/usr/gdata/climdat/ccsm3data/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/usr/gdata/climdat/ccsm3data/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/p/lscratchh/$CCSMUSER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/p/lscratchh/$CCSMUSER/ccsm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/p/lscratchd/ma21/ccsm3data/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>lc_slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>donahue5 -at- llnl.gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>36</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <mpirun mpilib="default">
      <executable>srun</executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/usr/share/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="perl">/usr/share/lmod/lmod/init/perl</init_path>
      <init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <modules compiler="intel">
        <command name="load">python</command>
        <command name="load">git</command>
        <command name="load">intel/19.0.4</command>
        <command name="load">mvapich2/2.3</command>
        <command name="load">cmake/3.14.5</command>
        <command name="load">netcdf-fortran/4.4.4</command>
        <command name="load">pnetcdf/1.9.0</command>
      </modules>
    </module_system>
    <RUNDIR>/p/lscratchh/$CCSMUSER/ACME/$CASE/run</RUNDIR>
    <EXEROOT>/p/lscratchh/$CCSMUSER/$CASE/bld</EXEROOT>
    <environment_variables compiler="intel">
      <env name="NETCDFROOT">/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/</env>
      <env name="NETCDF_PATH">/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/</env>
    </environment_variables>
    <environment_variables compiler="intel" mpilib="!mpi-serial">
      <env name="PNETCDFROOT">/usr/tce/packages/pnetcdf/pnetcdf-1.9.0-intel-18.0.1-mvapich2-2.2/</env>
    </environment_variables>
  </machine>

  <machine MACH="theta">
    <DESC>ALCF Cray XC40 KNL, os is CNL, 64 pes/node, batch system is cobalt</DESC>
    <NODENAME_REGEX>theta.*</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu,cray</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <SAVE_TIMING_DIR>/projects/$PROJECT</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>ClimateEnergy_4</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/projects/$PROJECT/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/projects/ccsm/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/projects/ccsm/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/projects/$PROJECT/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/projects/ccsm/e3sm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>16</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>cobalt_theta</BATCH_SYSTEM>
    <SUPPORTED_BY>E3SM</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>128</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>aprun</executable>
      <arguments>
        <arg name="num_tasks">-n {{ total_tasks }}</arg>
        <arg name="tasks_per_node">-N $SHELL{if [ `./xmlquery --value MAX_MPITASKS_PER_NODE` -gt `./xmlquery --value TOTAL_TASKS` ];then echo `./xmlquery --value TOTAL_TASKS`;else echo `./xmlquery --value MAX_MPITASKS_PER_NODE`;fi;}</arg>
        <arg name="hyperthreading">--cc depth -d $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc} -j $SHELL{if [ 64 -ge `./xmlquery --value MAX_TASKS_PER_NODE` ];then echo 1;else echo `./xmlquery --value MAX_TASKS_PER_NODE`/64|bc;fi;}</arg>
        <arg name="env_vars">$ENV{SMP_VARS} $ENV{labeling}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="rm">cray-mpich</command>
        <command name="rm">cray-parallel-netcdf</command>
        <command name="rm">cray-hdf5-parallel</command>
        <command name="rm">cray-hdf5</command>
        <command name="rm">cray-netcdf</command>
        <command name="rm">cray-netcdf-hdf5parallel</command>
        <command name="load">craype/2.6.5</command>
        <command name="load">cmake/3.18.0</command>
      </modules>
      <modules compiler="intel">
        <command name="rm">PrgEnv-gnu</command>
        <command name="rm">PrgEnv-cray</command>
        <command name="load">PrgEnv-intel/6.0.7</command>
        <command name="swap">intel/19.1.0.166</command>
      </modules>
      <modules compiler="gnu">
        <command name="rm">PrgEnv-intel</command>
        <command name="rm">PrgEnv-cray</command>
        <command name="load">PrgEnv-gnu/6.0.7</command>
        <command name="swap">gcc/9.3.0</command>
      </modules>
      <modules compiler="cray">
        <command name="rm">PrgEnv-intel</command>
        <command name="rm">PrgEnv-gnu</command>
        <command name="load">gcc/9.3.0</command>
        <command name="load">PrgEnv-cray/6.0.9</command>
        <command name="swap">cce/10.0.3</command>
        <command name="rm">darshan</command>
      </modules>
      <modules compiler="!intel">
        <command name="swap">cray-libsci/20.09.1</command>
      </modules>
      <modules>
        <command name="load">cray-mpich/7.7.14</command>
        <command name="load">cray-hdf5-parallel/1.10.6.1</command>
        <command name="load">cray-netcdf-hdf5parallel/4.7.3.3</command>
        <command name="load">cray-parallel-netcdf/1.12.0.1</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="PATH">/projects/ccsm/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}</env>
      <env name="MPAS_TOOL_DIR">/projects/ccsm/e3sm/tools/mpas</env>
      <env name="HDF5_DISABLE_VERSION_CHECK">1</env>
      <env name="labeling">-e PMI_LABEL_ERROUT=1</env>
      <env name="SMP_VARS"> </env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="intel">
      <env name="SMP_VARS">-e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e KMP_AFFINITY=granularity=thread,scatter</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="!intel">
      <env name="SMP_VARS">-e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e OMP_PROC_BIND=spread -e OMP_PLACES=threads</env>
    </environment_variables>
  </machine>

  <machine MACH="jlse">
    <DESC>ANL experimental/evaluation cluster, batch system is cobalt</DESC>
    <NODENAME_REGEX>jlse.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>oneapi-ifx,oneapi-ifort,gnu</COMPILERS>
    <MPILIBS>mpich,impi,openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/gpfs/jlse-fs0/projects/climate/$USER/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/gpfs/jlse-fs0/projects/climate/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/gpfs/jlse-fs0/projects/climate/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/gpfs/jlse-fs0/projects/climate/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/gpfs/jlse-fs0/projects/climate/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>16</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>cobalt_theta</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>112</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="oneapi-ifx">96</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>112</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="oneapi-ifx">24</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-l -n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">--tag-output -n {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread</arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <modules>
	<command name="purge"/>
        <command name="use">/soft/restricted/CNDA/modules</command>
      </modules>
      <modules compiler="!gnu">
	<command name="load">oneapi/release/2021.10.30.001</command>
        <command name="load">cmake/3.22.1</command>
      </modules>
      <modules compiler="gnu">
        <command name="unload">cmake</command>
	<command name="load">gcc/8.2.0</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="PATH">/home/azamat/soft/perl/5.32.0/bin:$ENV{PATH}</env>
      <env name="NETCDF_PATH">/home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/oneapi-2020.12.15.004-intel_mpi-2019.4.243</env>
      <env name="PNETCDF_PATH">/home/azamat/soft/pnetcdf/1.12.1/oneapi-2020.12.15.004-intel_mpi-2019.4.243</env>
    </environment_variables>
    <environment_variables mpilib="impi">
      <env name="I_MPI_DEBUG">10</env>
      <env name="I_MPI_PIN_DOMAIN">omp</env>
      <env name="I_MPI_PIN_ORDER">spread</env>
      <env name="I_MPI_PIN_CELL">unit</env>
    </environment_variables>
    <environment_variables compiler="intel" mpilib="openmpi">
      <env name="OMPI_CC">icc</env>
      <env name="OMPI_CXX">icpc</env>
      <env name="OMPI_FC">ifort</env>
      <env name="PATH">/home/azamat/soft/openmpi/2.1.6/intel19/bin:$ENV{PATH}</env>
      <env name="LD_LIBRARY_PATH">/home/azamat/soft/openmpi/2.1.6/intel19/lib:$ENV{LD_LIBRARY_PATH}</env>
      <env name="NETCDF_PATH">/home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/intel19-openmpi2.1.6</env>
      <env name="PNETCDF_PATH">/home/azamat/soft/pnetcdf/1.12.1/intel19-openmpi2.1.6</env>
    </environment_variables>
    <environment_variables compiler="gnu" mpilib="openmpi">
      <env name="OMPI_CC">gcc</env>
      <env name="OMPI_CXX">g++</env>
      <env name="OMPI_FC">gfortran</env>
      <env name="LD_LIBRARY_PATH">/home/azamat/soft/openmpi/2.1.6/gcc8.2.0/lib:/home/azamat/soft/libs:$ENV{LD_LIBRARY_PATH}</env>
      <env name="PATH">/home/azamat/soft/openmpi/2.1.6/gcc8.2.0/bin:/home/azamat/soft/cmake/3.18.5/bin:$ENV{PATH}</env>
      <env name="CMAKE_ROOT">/home/azamat/soft/cmake/3.18.5</env>
      <env name="ACLOCAL_PATH">/home/azamat/soft/cmake/3.18.5/share/aclocal</env>
      <env name="CMAKE_PREFIX_PATH">/home/azamat/soft/cmake/3.18.5</env>
      <env name="NETCDF_PATH">/home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc8.2.0-openmpi2.1.6</env>
      <env name="PNETCDF_PATH">/home/azamat/soft/pnetcdf/1.12.1/gcc8.2.0-openmpi2.1.6</env>
    </environment_variables>
    <environment_variables compiler="oneapi-ifx">
      <env name="LIBOMPTARGET_DEBUG">0</env><!--default 0, max 5 -->
      <!--env name="OMP_TARGET_OFFLOAD">DISABLED</env--><!--set this for CPU-only runs-->
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="intel">
      <env name="KMP_AFFINITY">verbose,granularity=thread,balanced</env>
      <env name="OMP_STACKSIZE">128M</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="!intel">
      <env name="OMP_PLACES">threads</env>
      <env name="OMP_STACKSIZE">128M</env>
    </environment_variables>
    <resource_limits>
      <resource name="RLIMIT_STACK">-1</resource>
    </resource_limits>
  </machine>

  <machine MACH="sooty">
    <DESC>PNL cluster, OS is Linux, batch system is SLURM</DESC>
    <NODENAME_REGEX>sooty</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi</COMPILERS>
    <MPILIBS>mvapich2</MPILIBS>
    <CIME_OUTPUT_ROOT>/lustre/$USER/cime_output_root</CIME_OUTPUT_ROOT>
    <!--
    <DIN_LOC_ROOT>/lustre/climate/csmdata/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lustre/climate/csmdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    -->
    <DIN_LOC_ROOT>/pic/projects/sooty2/$ENV{USER}/e3sm_inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/pic/projects/sooty2/$ENV{USER}/e3sm_inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lustre/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lustre/climate/acme_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lustre/climate/acme_baselines/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>balwinder.singh -at- pnnl.gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>8</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>8</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <mpirun mpilib="mvapich2">
      <executable>srun</executable>
      <arguments>
        <arg name="mpi">--mpi=none</arg>
        <arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
        <arg name="cpu_bind">--cpu_bind=sockets --cpu_bind=verbose</arg>
        <arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/share/apps/modules/Modules/3.2.10/init/perl.pm</init_path>
      <init_path lang="python">/share/apps/modules/Modules/3.2.10/init/python.py</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <cmd_path lang="perl">/share/apps/modules/Modules/3.2.10/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/share/apps/modules/Modules/3.2.10/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
      </modules>
      <modules>
        <command name="load">perl/5.20.0</command>
        <command name="load">cmake/3.17.1</command>
        <command name="load">svn/1.8.13</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/19.0.5</command>
        <command name="load">mkl/2019u5</command>
      </modules>
      <modules compiler="pgi">
        <command name="load">pgi/14.10</command>
      </modules>
      <modules mpilib="mvapich2">
        <command name="load">mvapich2/2.3.1</command>
      </modules>
      <modules>
        <command name="load">netcdf/4.6.3</command>
      </modules>
    </module_system>
    <RUNDIR>/lustre/$USER/csmruns/$CASE/run</RUNDIR>
    <EXEROOT>/lustre/$USER/csmruns/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="MKL_PATH">$ENV{MKLROOT} </env>
      <env name="NETCDF_PATH">$ENV{NETCDF_LIB}/../</env>
      <env name="OMP_STACKSIZE">64M</env>
      <!-- PJR next lines force a modern python from python/3.7.2
      module first and supply some gcc libraries last -->
      <env name="LD_LIBRARY_PATH">/share/apps/python/3.7.2/lib/:/share/apps/openssl/1.0.2r/lib:$ENV{LD_LIBRARY_PATH}:/share/apps/gcc/8.1.0/lib:/share/apps/gcc/8.1.0/lib64:</env>
      <env name="PATH">/share/apps/python/3.7.2/bin:$ENV{PATH}</env>
    </environment_variables>
  </machine>

  <machine MACH="cascade">
    <DESC>PNNL Intel KNC cluster, OS is Linux, batch system is SLURM</DESC>
    <NODENAME_REGEX>glogin</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>impi,mvapich2</MPILIBS>
    <CIME_OUTPUT_ROOT>/dtemp/$PROJECT/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/dtemp/st49401/sing201/acme/inputdata/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/dtemp/st49401/sing201/acme/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$CIME_OUTPUT_ROOT/acme/acme_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>$CIME_OUTPUT_ROOT/acme/acme_baselines/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>balwinder.singh -at- pnnl.gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <mpirun mpilib="impi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mvapich2">
      <executable>srun</executable>
      <arguments>
        <arg name="mpi">--mpi=none</arg>
        <arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
        <arg name="cpu_bind">--cpu_bind=sockets --cpu_bind=verbose</arg>
        <arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/opt/lmod/7.8.4/init/env_modules_python.py</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <cmd_path lang="python">/opt/lmod/7.8.4/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
      </modules>
      <modules>
        <command name="load">python/2.7.9</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/ips_18</command>
        <command name="load">mkl/14.0</command>
      </modules>
      <modules mpilib="impi">
        <command name="load">impi/4.1.2.040</command>
      </modules>
      <modules mpilib="mvapich2">
        <command name="load">mvapich2/1.9</command>
      </modules>
      <modules>
        <command name="load">netcdf/4.3.0</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/csmruns/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/csmruns/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="NETCDF_HOME">$ENV{NETCDF_ROOT}</env>
    </environment_variables>
    <environment_variables compiler="intel">
      <env name="MKL_PATH">$ENV{MLIBHOME}</env>
      <env name="COMPILER">intel</env>
    </environment_variables>
  </machine>

  <machine MACH="constance">
    <DESC>PNL Haswell cluster, OS is Linux, batch system is SLURM</DESC>
    <NODENAME_REGEX>constance</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi,nag</COMPILERS>
    <MPILIBS>mvapich2,openmpi,intelmpi,mvapich</MPILIBS>
    <CIME_OUTPUT_ROOT>/pic/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/pic/projects/climate/csmdata/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/pic/projects/climate/csmdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/pic/scratch/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/pic/projects/climate/acme_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/pic/projects/climate/acme_baselines/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>balwinder.singh -at- pnnl.gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>24</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <mpirun mpilib="mvapich2">
      <executable>srun</executable>
      <arguments>
        <arg name="mpi">--mpi=none</arg>
        <arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
        <arg name="cpu_bind">--cpu_bind=sockets --cpu_bind=verbose</arg>
        <arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mvapich">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
        <arg name="cpu_bind">--cpu_bind=sockets --cpu_bind=verbose</arg>
        <arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="intelmpi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/share/apps/modules/Modules/3.2.10/init/perl.pm</init_path>
      <init_path lang="python">/share/apps/modules/Modules/3.2.10/init/python.py</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <cmd_path lang="perl">/share/apps/modules/Modules/3.2.10/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/share/apps/modules/Modules/3.2.10/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
      </modules>
      <modules>
        <command name="load">perl/5.20.0</command>
        <!--command name="load">cmake/3.3.0</command-->
        <command name="load">cmake/3.17.1</command>
      </modules>
      <modules compiler="intel">
        <command name="load">gcc/8.1.0</command>
        <command name="load">intel/19.0.5</command>
        <command name="load">mkl/2019u5</command>
      </modules>
      <modules compiler="pgi">
        <command name="load">pgi/14.10</command>
      </modules>
      <modules compiler="nag">
        <command name="load">nag/6.0</command>
        <command name="load">mkl/15.0.1</command>
      </modules>
      <modules mpilib="mvapich">
        <command name="load">mvapich2/2.1</command>
      </modules>
      <modules mpilib="mvapich2" compiler="intel">
        <command name="load">mvapich2/2.3.1</command>
      </modules>
      <modules mpilib="mvapich2" compiler="pgi">
        <command name="load">mvapich2/2.1</command>
      </modules>
      <modules mpilib="mvapich2" compiler="nag">
        <command name="load">mvapich2/2.3b</command>
      </modules>
      <modules mpilib="intelmpi">
        <command name="load">intelmpi/5.0.1.035</command>
      </modules>
      <modules mpilib="openmpi">
        <command name="load">openmpi/1.8.3</command>
      </modules>
      <modules compiler="intel">
        <command name="load">netcdf/4.6.3</command>
      </modules>
      <modules compiler="pgi">
        <command name="load">netcdf/4.3.2</command>
      </modules>
      <modules compiler="nag">
        <command name="load">netcdf/4.4.1.1</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables compiler="intel">
      <env name="LD_LIBRARY_PATH">/share/apps/gcc/8.1.0/lib:/share/apps/gcc/8.1.0/lib64:$ENV{LD_LIBRARY_PATH}</env>
    </environment_variables>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="NETCDF_HOME">$ENV{NETCDF_LIB}/../</env>
    </environment_variables>
    <environment_variables compiler="intel">
      <env name="MKL_PATH">$ENV{MKLROOT}</env>
    </environment_variables>
    <environment_variables compiler="nag">
      <env name="MKL_PATH">$ENV{MKLROOT}</env>
    </environment_variables>
  </machine>

  <machine MACH="compy">
    <DESC>PNL E3SM Intel Xeon Gold 6148(Skylake) nodes, OS is Linux, SLURM</DESC>
    <NODENAME_REGEX>compy</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi</COMPILERS>
    <MPILIBS>impi,mvapich2</MPILIBS>
    <SAVE_TIMING_DIR>/compyfs</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/compyfs/$USER/e3sm_scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/compyfs/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/compyfs/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/compyfs/$USER/e3sm_scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/compyfs/e3sm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/compyfs/e3sm_baselines/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>	  
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>bibi.mathew -at- pnnl.gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>40</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>40</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <mpirun mpilib="mvapich2">
      <executable>srun</executable>
      <arguments>
        <arg name="mpi">--mpi=none</arg>
        <arg name="num_tasks">--ntasks={{ total_tasks }} --nodes={{ num_nodes }}</arg>
        <arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
        <arg name="cpu_bind">-l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="impi">
      <executable>srun</executable>
      <arguments>
        <arg name="mpi">--mpi=pmi2</arg>
        <arg name="num_tasks">--ntasks={{ total_tasks }} --nodes={{ num_nodes }}</arg>
        <arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
        <arg name="cpu_bind">-l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/share/apps/modules/init/perl.pm</init_path>
      <init_path lang="python">/share/apps/modules/init/python.py</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <cmd_path lang="perl"> /share/apps/modules/bin/modulecmd  perl</cmd_path>
      <cmd_path lang="python">/share/apps/modules/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
      </modules>
      <modules>
        <command name="load">cmake/3.19.6</command>
      </modules>
      <modules compiler="intel">
        <command name="load">gcc/8.1.0</command>
        <command name="load">intel/19.0.5</command>
      </modules>
      <modules compiler="pgi">
        <command name="load">pgi/19.10</command>
      </modules>
      <modules mpilib="mvapich2">
        <command name="load">mvapich2/2.3.1</command>
      </modules>
      <modules mpilib="impi" compiler="intel">
	<command name="load">intelmpi/2019u4</command>
      </modules>
      <modules mpilib="impi" compiler="pgi">
	<command name="load">intelmpi/2019u3</command>
      </modules>      
      <modules>
        <command name="load">netcdf/4.6.3</command>
        <command name="load">pnetcdf/1.9.0</command>
        <command name="load">mkl/2019u5</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.05</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>0</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="NETCDF_HOME">$ENV{NETCDF_ROOT}/</env>
      <env name="MKL_PATH">$ENV{MKLROOT}</env>
    </environment_variables>
    <environment_variables compiler="intel">
      <env name="LD_LIBRARY_PATH">/share/apps/gcc/8.1.0/lib:/share/apps/gcc/8.1.0/lib64:$ENV{LD_LIBRARY_PATH}</env>
    </environment_variables>
    <environment_variables mpilib="mvapich2">
      <env name="MV2_ENABLE_AFFINITY">0</env>
      <env name="MV2_SHOW_CPU_BINDING">1</env>
    </environment_variables>
    <environment_variables mpilib="impi">
      <env name="I_MPI_ADJUST_ALLREDUCE">1</env>
    </environment_variables>
    <environment_variables mpilib="impi" DEBUG="TRUE">
      <env name="I_MPI_DEBUG">10</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">64M</env>
      <env name="OMP_PLACES">cores</env>
    </environment_variables>
  </machine>

  <machine MACH="oic5">
    <DESC>ORNL XK6, os is Linux, 32 pes/node, batch system is PBS</DESC>
    <NODENAME_REGEX>oic5</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpich,openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/home/$USER/models/ACME</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/home/zdr/models/ccsm_inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/home/zdr/models/ccsm_inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/home/$USER/models/ACME/run/archive/$CASE</DOUT_S_ROOT>
    <GMAKE_J>32</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>dmricciuto</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpich">
      <executable>/projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
        <arg name="machine_file">--hostfile $ENV{PBS_NODEFILE}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable> </executable>
    </mpirun>
    <module_system type="none" />
    <RUNDIR>/home/$USER/models/ACME/run/$CASE/run</RUNDIR>
    <EXEROOT>/home/$USER/models/ACME/run/$CASE/bld</EXEROOT>
  </machine>

  <machine MACH="cades">
    <DESC>OR-CONDO, CADES-CCSI, os is Linux, 16 pes/nodes, batch system is PBS</DESC>
    <NODENAME_REGEX>or-condo</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu,intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/lustre/or-hydra/cades-ccsi/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lustre/or-hydra/cades-ccsi/proj-shared/project_acme/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/lustre/or-hydra/cades-ccsi/proj-shared/tools/cprnc.orcondo</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>yinj -at- ornl.gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="openmpi" compiler="gnu">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable> </executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <modules>
        <command name="purge"/>
      </modules>
      <modules compiler="gnu">
        <command name="load">PE-gnu</command>
      </modules>
      <modules>
        <command name="load">mkl/2017</command>
        <command name="load">cmake/3.12.0</command>
        <command name="load">python/2.7.12</command>
        <command name="load">nco/4.6.9</command>
        <command name="load">hdf5-parallel/1.8.17</command>
        <command name="load">netcdf-hdf5parallel/4.3.3.1</command>
        <command name="load">pnetcdf/1.9.0</command>
      </modules>
    </module_system>

    <!-- customize these fields as appropriate for your system (max tasks) and
                            desired layout (change '${group}/${USER}' to your
      prefered location). -->
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <!-- for CLM-PFLOTRAN coupling, the PETSC_PATH must be defined specifically upon machines -->
    <environment_variables compiler="gnu" mpilib="openmpi">
      <env name="PETSC_PATH">/software/user_tools/current/cades-ccsi/petsc4pf/openmpi-1.10-gcc-5.3</env>
    </environment_variables>
    <environment_variables>
      <env name="PERL5LIB">/software/user_tools/current/cades-ccsi/perl5/lib/perl5/</env>
    </environment_variables>

  </machine>

  <machine MACH="grizzly">
    <DESC>LANL Linux Cluster, 36 pes/node, batch system slurm</DESC>
    <NODENAME_REGEX>gr-fe.*.lanl.gov</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>openmpi,impi,mvapich</MPILIBS>
    <PROJECT>climateacme</PROJECT>
    <CIME_OUTPUT_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/input_data</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/input_data/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/turquoise/usr/projects/climate/SHARED_CLIMATE/software/wolf/cprnc/v0.40/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>luke.vanroekel @ gmail.com</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks"> -n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="sh">/etc/profile.d/z00_lmod.sh</init_path>
      <init_path lang="csh">/etc/profile.d/z00_lmod.csh</init_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake/3.16.2</command>
      </modules>
      <modules compiler="gnu" mpilib="openmpi">
        <command name="load">gcc/6.4.0</command>
        <command name="load">openmpi/2.1.2</command>
      </modules>
      <modules compiler="gnu" mpilib="mvapich">
        <command name="load">gcc/6.4.0</command>
        <command name="load">mvapich2/2.3</command>
      </modules>
      <modules compiler="intel" mpilib="impi">
        <command name="load">intel/19.0.4</command>
        <command name="load">intel-mpi/2019.4</command>
      </modules>
      <modules compiler="intel" mpilib="mvapich">
        <command name="load">intel/18.0.2</command>
        <command name="load">mvapich2/2.2</command>
      </modules>
      <modules compiler="intel" mpilib="openmpi">
        <command name="load">intel/19.0.4</command>
        <command name="load">openmpi/2.1.2</command>
      </modules>
      <modules>
        <command name="load">friendly-testing</command>
        <command name="load">hdf5-parallel/1.8.16</command>
        <command name="load">pnetcdf/1.11.2</command>
        <command name="load">netcdf-h5parallel/4.7.3</command>
        <command name="load">mkl/2019.0.4</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="PNETCDF_PATH">$ENV{PNETCDF_PATH}</env>
      <env name="NETCDF_PATH">$ENV{NETCDF_PATH}</env>
      <env name="MKLROOT">$ENV{MKLROOT}</env>
      <env name="PNETCDF_HINTS">romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable</env>
    </environment_variables>
  </machine>

  <machine MACH="badger">
    <DESC>LANL Linux Cluster, 36 pes/node, batch system slurm</DESC>
    <NODENAME_REGEX>ba-fe.*.lanl.gov</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>openmpi,impi,mvapich</MPILIBS>
    <PROJECT>climateacme</PROJECT>
    <CIME_OUTPUT_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/input_data</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/input_data/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lustre/scratch4/turquoise/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/turquoise/usr/projects/climate/SHARED_CLIMATE/software/wolf/cprnc/v0.40/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks"> -n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="sh">/etc/profile.d/z00_lmod.sh</init_path>
      <init_path lang="csh">/etc/profile.d/z00_lmod.csh</init_path>
      <cmd_path lang="perl">/usr/share/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake/3.16.2</command>
      </modules>
      <modules compiler="gnu" mpilib="openmpi">
        <command name="load">gcc/6.4.0</command>
        <command name="load">openmpi/2.1.2</command>
      </modules>
      <modules compiler="gnu" mpilib="mvapich">
        <command name="load">gcc/6.4.0</command>
        <command name="load">mvapich2/2.3</command>
      </modules>
      <modules compiler="intel" mpilib="impi">
        <command name="load">intel/19.0.4</command>
        <command name="load">intel-mpi/2019.4</command>
      </modules>
      <modules compiler="intel" mpilib="mvapich">
        <command name="load">intel/18.0.2</command>
        <command name="load">mvapich2/2.2</command>
      </modules>
      <modules compiler="intel" mpilib="openmpi">
        <command name="load">intel/19.0.4</command>
        <command name="load">openmpi/2.1.2</command>
      </modules>
      <modules>
        <command name="load">friendly-testing</command>
        <command name="load">hdf5-parallel/1.8.16</command>
        <command name="load">pnetcdf/1.11.2</command>
        <command name="load">netcdf-h5parallel/4.7.3</command>
        <command name="load">mkl/2019.0.4</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="PNETCDF_PATH">$ENV{PNETCDF_PATH}</env>
      <env name="NETCDF_PATH">$ENV{NETCDF_PATH}</env>
      <env name="MKLROOT">$ENV{MKLROOT}</env>
      <env name="PNETCDF_HINTS">romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable</env>
    </environment_variables>
  </machine>

  <machine MACH="mesabi">
    <DESC>Mesabi batch queue</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/home/reichpb/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/home/reichpb/shared/cesm_inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/home/reichpb/shared/cesm_inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>USERDEFINED_optional_run</DOUT_S_ROOT>
    <BASELINE_ROOT>USERDEFINED_optional_run/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>USERDEFINED_optional_test</CCSM_CPRNC>
    <GMAKE_J>2</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>chen1718 at umn dot edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>24</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>aprun</executable>
      <arguments>
        <arg name="num_tasks"> -n {{ total_tasks }}</arg>
        <arg name="tasks_per_numa"> -S {{ tasks_per_numa }}</arg>
        <arg name="tasks_per_node"> -N $MAX_MPITASKS_PER_NODE</arg>
        <arg name="thread_count"> -d $ENV{OMP_NUM_THREADS}</arg>
      </arguments>
    </mpirun>
    <module_system type="none"/>
    <RUNDIR>$CASEROOT/run</RUNDIR>
    <!-- complete path to the run directory -->
    <EXEROOT>$CASEROOT/exedir</EXEROOT>
    <!-- complete path to the build directory -->
    <!-- complete path to the inputdata directory -->

    <!-- path to the optional forcing data for CLM (for CRUNCEP forcing) -->
    <!--<DOUT_S>FALSE</DOUT_S>-->
    <!-- logical for short term archiving -->
    <!-- complete path to a short term archiving directory -->
    <!-- complete path to a long term archiving directory -->
    <!-- where the cesm testing scripts write and read baseline results -->
    <!-- path to the cprnc tool used to compare netcdf history files in testing -->
  </machine>

  <machine MACH="itasca">
    <DESC>Itasca batch queue</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/home/reichpb/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/home/reichpb/shared/cesm_inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/home/reichpb/shared/cesm_inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>USERDEFINED_optional_run</DOUT_S_ROOT>
    <BASELINE_ROOT>USERDEFINED_optional_run/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>USERDEFINED_optional_test</CCSM_CPRNC>
    <GMAKE_J>2</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>chen1718 at umn dot edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>8</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>8</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>aprun</executable>
      <arguments>
        <arg name="num_tasks"> -n {{ total_tasks }}</arg>
        <arg name="tasks_per_numa"> -S {{ tasks_per_numa }}</arg>
        <arg name="tasks_per_node"> -N $MAX_MPITASKS_PER_NODE</arg>
        <arg name="thread_count"> -d $ENV{OMP_NUM_THREADS}</arg>
      </arguments>
    </mpirun>
    <module_system type="none"/>
    <RUNDIR>$CASEROOT/run</RUNDIR>
    <!-- complete path to the run directory -->
    <EXEROOT>$CASEROOT/exedir</EXEROOT>
    <!-- complete path to the build directory -->
    <!-- complete path to the inputdata directory -->

    <!-- path to the optional forcing data for CLM (for CRUNCEP forcing) -->
    <!--<DOUT_S>FALSE</DOUT_S>-->
    <!-- logical for short term archiving -->
    <!-- complete path to a short term archiving directory -->
    <!-- complete path to a long term archiving directory -->
    <!-- where the cesm testing scripts write and read baseline results -->
    <!-- path to the cprnc tool used to compare netcdf history files in testing -->
  </machine>

  <machine MACH="lawrencium-lr3">
    <DESC>Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM</DESC>
    <NODENAME_REGEX>n000*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CHARGE_ACCOUNT>ac_acme</CHARGE_ACCOUNT>
    <CIME_OUTPUT_ROOT>/global/scratch/$ENV{USER}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/global/scratch/$ENV{USER}/cesm_input_datasets/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/cesm_archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>rgknox and glemieux at lbl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>8</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>8</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="mpi-serial">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-np {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> -npernode $MAX_MPITASKS_PER_NODE</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-np {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> -npernode $MAX_MPITASKS_PER_NODE</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="perl">/usr/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/Modules/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="perl">/usr/Modules/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/Modules/bin/modulecmd python</cmd_path>
      <modules>
        <command name="purge"/>
	<command name="load">cmake/3.15.0</command>
        <command name="load">perl</command>
	<command name="load">xml-libxml/2.0116</command>
	<command name="load">python/3.6</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/2016.4.072</command>
        <command name="load">mkl</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial">
        <command name="load">netcdf/4.4.1.1-intel-s</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial">
        <command name="load">openmpi</command>
        <command name="load">netcdf/4.4.1.1-intel-p</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gcc/6.3.0</command>
        <command name="load">lapack/3.8.0-gcc</command>
      </modules>
      <modules compiler="gnu" mpilib="mpi-serial">
        <command name="load">netcdf/5.4.1.1-gcc-s</command>
        <command name="unload">openmpi/2.0.2-gcc</command>
      </modules>
      <modules compiler="gnu" mpilib="!mpi-serial">
        <command name="load">openmpi/3.0.1-gcc</command>
        <command name="load">netcdf/4.4.1.1-gcc-p</command>
        <command name="unload">openmpi/2.0.2-gcc</command>
      </modules>

    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
  </machine>

  <machine MACH="lawrencium-lr6">
    <DESC>Lawrencium LR6 cluster at LBL, OS is Linux (intel), batch system is SLURM</DESC>
    <NODENAME_REGEX>n000*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CHARGE_ACCOUNT>ac_acme</CHARGE_ACCOUNT>
    <CIME_OUTPUT_ROOT>/global/scratch/$ENV{USER}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/global/scratch/$ENV{USER}/cesm_input_datasets/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/cesm_archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>rgknox and glemieux at lbl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="mpi-serial">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="default">

      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-np {{ total_tasks }}</arg>
      </arguments>
      
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="perl">/usr/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/Modules/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="perl">/usr/Modules/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/Modules/bin/modulecmd python</cmd_path>
      <modules>
	<command name="purge"/>
        <command name="load">cmake/3.15.0</command>
        <command name="load">perl</command>
	<command name="load">xml-libxml/2.0116</command>
        <command name="load">python/3.6</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/2016.4.072</command>
        <command name="load">mkl</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial">
        <command name="load">netcdf/4.4.1.1-intel-s</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial">
        <command name="load">openmpi</command>
        <command name="load">netcdf/4.4.1.1-intel-p</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gcc/6.3.0</command>
        <command name="load">lapack/3.8.0-gcc</command>
      </modules>
      <modules compiler="gnu" mpilib="mpi-serial">
        <command name="load">netcdf/5.4.1.1-gcc-s</command>
        <command name="unload">openmpi/2.0.2-gcc</command>
      </modules>
      <modules compiler="gnu" mpilib="!mpi-serial">
        <command name="load">openmpi/3.0.1-gcc</command>
        <command name="load">netcdf/4.4.1.1-gcc-p</command>
        <command name="unload">openmpi/2.0.2-gcc</command>
      </modules>

    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
  </machine>

  <machine MACH="eddi">
    <DESC>small developer workhorse at lbl climate sciences</DESC>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpi-serial</MPILIBS>
    <PROJECT>ngeet</PROJECT>
    <CIME_OUTPUT_ROOT>/raid1/lbleco/e3sm/</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/raid1/lbleco/cesm/cesm_input_datasets/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/raid1/lbleco/cesm/cesm_input_datasets/atm/datm7/</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/raid1/lbleco/acme/cesm_archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/raid1/lbleco/acme/cesm_baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/raid1/lbleco/cesm/cesm_tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>1</GMAKE_J>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>rgknox at lbl gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>4</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>4</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="mpi-serial">
      <executable/>
    </mpirun>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-np {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> -npernode $MAX_MPITASKS_PER_NODE</arg>
      </arguments>
    </mpirun>
    <module_system type="none"/>
  </machine>

  <machine MACH="summit">
    <DESC>ORNL Summit. Node: 2x POWER9 + 6x Volta V100, 22 cores/socket, 4 HW threads/core.</DESC>
    <NODENAME_REGEX>.*summit.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>ibm,ibmgpu,pgi,pgigpu,gnu,gnugpu</COMPILERS>
    <MPILIBS>spectrum-mpi</MPILIBS>
    <PROJECT>cli115</PROJECT>
    <CHARGE_ACCOUNT>cli115</CHARGE_ACCOUNT>
    <SAVE_TIMING_DIR>/gpfs/alpine/proj-shared/cli115</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/gpfs/alpine/cli115/world-shared/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/gpfs/alpine/cli115/world-shared/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/gpfs/alpine/cli115/world-shared/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/gpfs/alpine/cli115/world-shared/e3sm/tools/cprnc.summit/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>lsf</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>84</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="pgigpu">18</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="gnugpu">42</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="ibmgpu">42</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>84</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="pgigpu">18</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="gnugpu">42</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="ibmgpu">42</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="spectrum-mpi">
      <executable>jsrun</executable>
      <arguments>
        <arg name="exit_on_error">-X 1</arg>
        <arg name="num_rs">$SHELL{if [ {{ total_tasks }} -eq 1 ];then echo --nrs 1 --rs_per_host 1;else echo --nrs $NUM_RS --rs_per_host $RS_PER_NODE;fi}</arg>
        <arg name="tasks_per_rs">--tasks_per_rs $SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}</arg>
        <arg name="distribute">-d plane:$SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}</arg>
        <arg name="cpu_per_rs">--cpu_per_rs $ENV{CPU_PER_RS}</arg>
        <arg name="gpu_per_rs">--gpu_per_rs $ENV{GPU_PER_RS}</arg>
        <arg name="task_bind">--bind packed:smt:$ENV{OMP_NUM_THREADS}</arg>
        <arg name="latency_priority">--latency_priority $ENV{LTC_PRT}</arg>
        <arg name="stdio_mode">--stdio_mode prepended</arg>
        <arg name="thread_vars">$ENV{JSRUN_THREAD_VARS}</arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="sh">/sw/summit/lmod/8.4/init/sh</init_path>
      <init_path lang="csh">/sw/summit/lmod/8.4/init/csh</init_path>
      <init_path lang="python">/sw/summit/lmod/8.4/init/env_modules_python.py</init_path>
      <init_path lang="perl">/sw/summit/lmod/8.4/init/perl</init_path>
      <cmd_path lang="perl">module</cmd_path>
      <cmd_path lang="python">/sw/summit/lmod/8.4/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">DefApps</command>
        <command name="load">python/3.7-anaconda3</command>
        <command name="load">subversion/1.14.0</command>
        <command name="load">git/2.31.1</command>
        <command name="load">cmake/3.20.2</command>
        <command name="load">essl/6.3.0</command>
        <command name="load">netlib-lapack/3.8.0</command>
      </modules>
      <modules compiler="pgi.*">
        <command name="load">nvhpc/21.11</command>
      </modules>
      <modules compiler="pgigpu">
        <command name="load">cuda/10.1.243</command>
      </modules>
      <modules compiler="gnugpu">
        <command name="load">cuda/11.0.3</command>
      </modules>
      <modules compiler="ibm.*">
        <command name="load">xl/16.1.1-10</command>
      </modules>
      <modules compiler="ibmgpu">
        <command name="load">cuda/10.1.243</command>
      </modules>
      <modules compiler="gnu.*">
        <command name="load">gcc/9.1.0</command>
      </modules>
      <modules>
        <command name="load">spectrum-mpi/10.4.0.3-20210112</command>
        <command name="load">hdf5/1.10.7</command>
        <command name="load">netcdf-c/4.8.0</command>
        <command name="load">netcdf-fortran/4.4.5</command>
        <command name="load">parallel-netcdf/1.12.2</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <environment_variables>
      <env name="NETCDF_C_PATH">$ENV{OLCF_NETCDF_C_ROOT}</env>
      <env name="NETCDF_FORTRAN_PATH">$ENV{OLCF_NETCDF_FORTRAN_ROOT}</env>
      <env name="ESSL_PATH">$ENV{OLCF_ESSL_ROOT}</env>
      <env name="HDF5_PATH">$ENV{OLCF_HDF5_ROOT}</env>
      <env name="PNETCDF_PATH">$ENV{OLCF_PARALLEL_NETCDF_ROOT}</env>
      <env name="PGI_ACC_POOL_ALLOC">0</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="FALSE">
      <env name="JSRUN_THREAD_VARS"> </env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="JSRUN_THREAD_VARS">-E OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -E OMP_PROC_BIND=spread -E OMP_PLACES=threads -E OMP_STACKSIZE=256M</env>
    </environment_variables>
    <environment_variables>
      <env name="RS_PER_NODE">2</env>
      <env name="CPU_PER_RS">21</env>
      <env name="GPU_PER_RS">0</env>
      <env name="LTC_PRT">cpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
      <env name="SMT_MODE">$SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}</env>
    </environment_variables>
    <environment_variables compiler="ibmgpu">
      <env name="RS_PER_NODE">6</env>
      <env name="CPU_PER_RS">7</env>
      <env name="GPU_PER_RS">1</env>
      <env name="LTC_PRT">gpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
    </environment_variables>
    <environment_variables compiler="pgigpu">
      <env name="RS_PER_NODE">6</env>
      <env name="CPU_PER_RS">3</env>
      <env name="GPU_PER_RS">1</env>
      <env name="LTC_PRT">gpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
    </environment_variables>
    <environment_variables compiler="gnugpu">
      <env name="RS_PER_NODE">6</env>
      <env name="CPU_PER_RS">7</env>
      <env name="GPU_PER_RS">1</env>
      <env name="LTC_PRT">gpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
    </environment_variables>
  </machine>

  <machine MACH="ascent">
    <DESC>ORNL Ascent. Node: 2x POWER9 + 6x Volta V100, 22 cores/socket, 4 HW threads/core.</DESC>
    <NODENAME_REGEX>.*ascent.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>ibm,ibmgpu,pgi,pgigpu,gnu,gnugpu</COMPILERS>
    <MPILIBS>spectrum-mpi</MPILIBS>
    <PROJECT>cli115</PROJECT>
    <CHARGE_ACCOUNT>cli115</CHARGE_ACCOUNT>
    <SAVE_TIMING_DIR>/gpfs/wolf/proj-shared/$PROJECT</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>cli115</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/gpfs/wolf/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/gpfs/wolf/cli115/world-shared/e3sm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/gpfs/wolf/cli115/world-shared/e3sm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/gpfs/wolf/$PROJECT/proj-shared/$ENV{USER}/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/gpfs/wolf/cli115/world-shared/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/gpfs/wolf/cli115/world-shared/e3sm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>lsf</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>84</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="pgigpu">18</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="gnugpu">42</MAX_TASKS_PER_NODE>
    <MAX_TASKS_PER_NODE compiler="ibmgpu">42</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>84</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="pgigpu">18</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="gnugpu">42</MAX_MPITASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE compiler="ibmgpu">42</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="spectrum-mpi">
      <executable>jsrun</executable>
      <arguments>
        <arg name="exit_on_error">-X 1</arg>
        <arg name="num_rs">$SHELL{if [ {{ total_tasks }} -eq 1 ];then echo --nrs 1 --rs_per_host 1;else echo --nrs $NUM_RS --rs_per_host $RS_PER_NODE;fi}</arg>
        <arg name="tasks_per_rs">--tasks_per_rs $SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}</arg>
        <arg name="distribute">-d plane:$SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}</arg>
        <arg name="cpu_per_rs">--cpu_per_rs $ENV{CPU_PER_RS}</arg>
        <arg name="gpu_per_rs">--gpu_per_rs $ENV{GPU_PER_RS}</arg>
        <arg name="task_bind">--bind packed:smt:$ENV{OMP_NUM_THREADS}</arg>
        <arg name="latency_priority">--latency_priority $ENV{LTC_PRT}</arg>
        <arg name="stdio_mode">--stdio_mode prepended</arg>
        <arg name="thread_vars">$ENV{JSRUN_THREAD_VARS}</arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="sh">/sw/ascent/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/sw/ascent/lmod/lmod/init/csh</init_path>
      <init_path lang="python">/sw/ascent/lmod/init/env_modules_python.py</init_path>
      <cmd_path lang="python">/sw/ascent/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">DefApps</command>
        <command name="load">python/3.8-anaconda3</command>
        <command name="load">git/2.31.1</command>
        <command name="load">cmake/3.22.2</command>
        <command name="load">essl/6.3.0</command>
        <command name="load">netlib-lapack/3.9.1</command>
      </modules>
      <modules compiler="pgi.*">
        <command name="load">nvhpc/21.11</command>
      </modules>
      <modules compiler="ibmgpu">
        <command name="load">cuda/10.1.243</command>
      </modules>
      <modules compiler="ibm.*">
        <command name="load">xl/16.1.1-10</command>
      </modules>
      <modules compiler="gnugpu">
        <command name="load">cuda/11.0.3</command>
      </modules>
      <modules compiler="gnu.*">
        <command name="load">gcc/9.1.0</command>
      </modules>
      <modules>
        <command name="load">spectrum-mpi/10.4.0.3-20210112</command>
        <command name="load">hdf5/1.10.7</command>
        <command name="load">netcdf-c/4.8.1</command>
        <command name="load">netcdf-fortran/4.4.5</command>
        <command name="load">parallel-netcdf/1.12.2</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
    <MAX_GB_OLD_TEST_DATA>9000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="PATH">/gpfs/wolf/cli115/world-shared/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}</env>
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
      <env name="ESSL_PATH">$ENV{OLCF_ESSL_ROOT}</env>
      <env name="HDF5_PATH">$ENV{OLCF_HDF5_ROOT}</env>
      <env name="PGI_ACC_POOL_ALLOC">0</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="FALSE">
      <env name="JSRUN_THREAD_VARS"> </env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="JSRUN_THREAD_VARS">-E OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -E OMP_PROC_BIND=spread -E OMP_PLACES=threads -E OMP_STACKSIZE=256M</env>
    </environment_variables>
    <environment_variables>
      <env name="RS_PER_NODE">2</env>
      <env name="CPU_PER_RS">21</env>
      <env name="GPU_PER_RS">0</env>
      <env name="LTC_PRT">cpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
      <env name="SMT_MODE">$SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}</env>
    </environment_variables>
    <environment_variables compiler="ibmgpu">
      <env name="RS_PER_NODE">6</env>
      <env name="CPU_PER_RS">7</env>
      <env name="GPU_PER_RS">1</env>
      <env name="LTC_PRT">gpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
    </environment_variables>
    <environment_variables compiler="pgigpu">
      <env name="RS_PER_NODE">6</env>
      <env name="CPU_PER_RS">3</env>
      <env name="GPU_PER_RS">1</env>
      <env name="LTC_PRT">gpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
      <env name="NVCC_WRAPPER_DEFAULT_COMPILER">pgc++</env>
    </environment_variables>
    <environment_variables compiler="gnugpu">
      <env name="RS_PER_NODE">6</env>
      <env name="CPU_PER_RS">7</env>
      <env name="GPU_PER_RS">1</env>
      <env name="LTC_PRT">gpu-cpu</env>
      <env name="NUM_RS">$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}</env>
    </environment_variables>
  </machine>

  <machine MACH="modex">
      <DESC>Medium sized linux cluster at BNL, torque scheduler.</DESC>
      <OS>LINUX</OS>
      <COMPILERS>gnu</COMPILERS>
      <MPILIBS>openmpi,mpi-serial</MPILIBS>
      <CIME_OUTPUT_ROOT>/data/$ENV{USER}</CIME_OUTPUT_ROOT>
      <DIN_LOC_ROOT>/data/Model_Data/cesm_input_datasets/</DIN_LOC_ROOT>
      <DIN_LOC_ROOT_CLMFORC>/data/Model_Data/cesm_input_datasets/atm/datm7</DIN_LOC_ROOT_CLMFORC>
      <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/cesm_archive/$CASE</DOUT_S_ROOT>
      <BASELINE_ROOT>$CIME_OUTPUT_ROOT/cesm_baselines</BASELINE_ROOT>
      <CCSM_CPRNC>/data/software/cesm_tools/cprnc/cprnc</CCSM_CPRNC>
      <GMAKE_J>4</GMAKE_J>
      <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
      <SUPPORTED_BY>sserbin@bnl.gov</SUPPORTED_BY>
      <MAX_TASKS_PER_NODE>12</MAX_TASKS_PER_NODE>
      <MAX_MPITASKS_PER_NODE>12</MAX_MPITASKS_PER_NODE>
      <COSTPES_PER_NODE>12</COSTPES_PER_NODE>
      <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
      <mpirun mpilib="mpi-serial">
      		<executable></executable>
      </mpirun>
      <mpirun mpilib="default">
          <executable>mpirun</executable>
          <arguments>
              <arg name="num_tasks">-np {{ total_tasks }}</arg>
              <arg name="tasks_per_node">-npernode $MAX_TASKS_PER_NODE</arg>
          </arguments>
      </mpirun>
      <module_system type="module">
          <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
          <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
          <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
          <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
          <cmd_path lang="sh">module</cmd_path>
          <cmd_path lang="csh">module</cmd_path>
          <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
          <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
          <modules>
              <command name="purge"/>
              <command name="load">perl/5.22.1</command>
              <command name="load">libxml2/2.9.2</command>
              <command name="load">maui/3.3.1</command>
              <command name="load">python/2.7.15</command>
              <command name="load">python/3.6.2</command> 
          </modules>
          <modules compiler="gnu">
              <command name="load">gcc/5.4.0</command>
              <command name="load">gfortran/5.4.0</command>
              <command name="load">hdf5/1.8.19fates</command>
              <command name="load">netcdf/4.4.1.1-gnu540-fates</command>
              <command name="load">openmpi/2.1.1-gnu540</command>
          </modules>
          <modules compiler="gnu" mpilib="!mpi-serial">
              <command name="load">openmpi/2.1.1-gnu540</command>
          </modules>
       </module_system>
       <environment_variables>
         <env name="HDF5_HOME">/data/software/hdf5/1.8.19fates</env>
         <env name="NETCDF_PATH">/data/software/netcdf/4.4.1.1-gnu540-fates</env>
       </environment_variables>
  </machine>

  <machine MACH="tulip">
    <DESC>ORNL experimental/evaluation cluster</DESC>
    <NODENAME_REGEX>tulip.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/home/groups/coegroup/e3sm/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/home/groups/coegroup/e3sm/inputdata2</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/home/groups/coegroup/e3sm/inputdata2/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/home/groups/coegroup/e3sm/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/home/groups/coegroup/e3sm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>16</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">--tag-output -n {{ total_tasks }} </arg>
        <arg name="tasks_per_node"> --map-by ppr:1:core:PE=$ENV{OMP_NUM_THREADS} --bind-to core </arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/cm/local/apps/environment-modules/current/init/python</init_path>
      <init_path lang="sh">/cm/local/apps/environment-modules/current/init/sh</init_path>
      <init_path lang="csh">/cm/local/apps/environment-modules/current/init/csh</init_path>
      <cmd_path lang="python">/cm/local/apps/environment-modules/current/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="rm">gcc</command>
        <command name="rm">cce</command>
        <command name="rm">PrgEnv-cray</command>
        <command name="rm">cray-mvapich2</command>
        <command name="load">cmake/3.17.0</command>
        <command name="use">/home/users/twhite/share/modulefiles</command>
        <command name="load">svn/1.10.6</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gcc/8.1.0</command>
        <command name="load">blas/gcc/64/3.8.0</command>
        <command name="load">lapack/gcc/64/3.8.0</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables>
      <env name="PERL5LIB">/home/groups/coegroup/e3sm/soft/perl5/lib/perl5</env>
    </environment_variables>
    <environment_variables compiler="gnu">
      <env name="NETCDF_PATH">/home/groups/coegroup/e3sm/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc/8.2.0</env>
    </environment_variables>
    <environment_variables compiler="gnu" mpilib="openmpi">
      <env name="OMPI_CC">gcc</env>
      <env name="OMPI_CXX">g++</env>
      <env name="OMPI_FC">gfortran</env>
      <env name="PATH">/home/groups/coegroup/e3sm/soft/openmpi/2.1.6/gcc/8.2.0/bin:$ENV{PATH}</env>
      <env name="LD_LIBRARY_PATH">/home/groups/coegroup/e3sm/soft/openmpi/2.1.6/gcc/8.2.0/lib:/home/groups/coegroup/e3sm/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc/8.2.0/lib:$ENV{LD_LIBRARY_PATH}</env>
      <env name="PNETCDF_PATH">/home/groups/coegroup/e3sm/soft/pnetcdf/1.12.1/gcc/8.2.0/openmpi/2.1.6</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>

  <machine MACH="gcp">
    <DESC>Google Cloud cluster with c2-compute-60's</DESC>
    <NODENAME_REGEX>gcp*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/home/$USER/e3sm/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/home/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/home/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/home/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/home/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>16</GMAKE_J>
    <TESTS>e3sm_developer</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>e3sm</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>60</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>30</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="openmpi">
      <executable>srun</executable>
      <arguments>
	<arg name="pmi_layer"> --mpi=pmi2</arg>
	<arg name="label"> --label</arg>
	<arg name="num_tasks"> -n {{ total_tasks }} -N {{ num_nodes }}</arg>
	<arg name="thread_count">-c $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/ {{ tasks_per_node }} |bc}</arg>
	<arg name="binding"> $SHELL{if [ `./xmlquery --value MAX_TASKS_PER_NODE` -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
	<arg name="placement">-m plane={{ tasks_per_node }}</arg>
      </arguments>
    </mpirun>

    <module_system type="module">
      <init_path lang="python">/usr/share/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>

      <cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>

      <modules>
	<command name="unload">gcc</command>
	<command name="unload">openmpi</command>
      </modules>

      <modules compiler="gnu">
	<command name="load">gcc/8.5.0-gcc@8.5.0</command>
      </modules>

      <modules mpilib="openmpi">
	<command name="load">openmpi/4.1.1-gcc@8.5.0</command>
      </modules>

      <modules compiler="gnu">
	<command name="load">cmake</command>
	<command name="load">perl</command>
	<command name="load">perl-xml-libxml</command>
	<command name="load">netcdf-c</command>
	<command name="load">netcdf-cxx</command>
	<command name="load">netcdf-fortran</command>
	<command name="load">parallel-netcdf</command>
	<command name="load">hdf5</command>
	<command name="load">netlib-lapack</command>
	<command name="load">openblas</command>
      </modules>

    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <environment_variables compiler="gnu">
      <env name="HDF5_PATH">$SHELL{dirname $(dirname $(which h5diff))}</env>
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf-config))}</env>
      <env name="OPENBLAS_PATH">/apps/spack/opt/spack/linux-centos7-skylake_avx512/gcc-8.5.0/openblas-0.3.17-avqlf5r7hpzvwesfqg25awhqrpypwgfe</env>
      <env name="LAPACK_PATH">/apps/spack/opt/spack/linux-centos7-skylake_avx512/gcc-8.5.0/netlib-lapack-3.9.1-gton74muo2csn5l3bzf636276ig6h3yx</env>
      <env name="HDF5_USE_FILE_LOCKING">FALSE</env>
    </environment_variables>

    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>

  <machine MACH="fugaku">
    <DESC>RIKEN-CCS Fugaku: Fujitsu A64FX 48 cores/node.</DESC>
    <NODENAME_REGEX>fn01sv.*</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu,fj</COMPILERS>
    <MPILIBS>fujitsu</MPILIBS>
    <PROJECT>hp210190</PROJECT>
    <SAVE_TIMING_DIR>/data/hp210190/</SAVE_TIMING_DIR>
    <SAVE_TIMING_DIR_PROJECTS>.*</SAVE_TIMING_DIR_PROJECTS>
    <CIME_OUTPUT_ROOT>/data/hp210190/$USER/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/data/hp210190/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/data/hp210190/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/data/hp210190/$USER/scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/data/hp210190/baselines/$COMPILER</BASELINE_ROOT>
    <CCSM_CPRNC>/data/hp210190/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <TESTS>e3sm_integration</TESTS>
    <NTEST_PARALLEL_JOBS>4</NTEST_PARALLEL_JOBS>
    <BATCH_SYSTEM>moab</BATCH_SYSTEM>
    <SUPPORTED_BY>E3SM</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>48</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>48</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks">-n {{ total_tasks }} -std e3sm.log.$LID </arg>
      </arguments>
    </mpirun>
    <module_system type="soft">
      <init_path lang="sh">/vol0003/hp210190/data/soft/spack-v0.17.0/share/spack/setup-env.sh</init_path>
      <init_path lang="csh">/vol0003/hp210190/data/soft/spack-v0.17.0/share/spack/setup-env.csh</init_path>
      <cmd_path lang="csh">spack</cmd_path>
      <cmd_path lang="sh">spack</cmd_path>
      <modules compiler="gnu">
        <command name="unload">--all</command>
        <command name="load">gcc @11.2.0%gcc@8.4.1  arch=linux-rhel8-a64fx</command>
        <command name="load">fujitsu-mpi @head%gcc@11.2.0  arch=linux-rhel8-a64fx</command>
        <command name="find">--loaded;ln -sf /lib64/libhwloc.so.15 /tmp/libhwloc.so.5</command>
      </modules>
      <modules compiler="fj">
        <command name="unload">--all</command>
        <command name="load">netcdf-c       @4.8.1 %fj@4.7.0 arch=linux-rhel8-a64fx</command>
        <command name="load">netcdf-cxx     @4.2   %fj@4.7.0 arch=linux-rhel8-a64fx</command>
        <command name="load">netcdf-fortran @4.5.3 %fj@4.7.0 arch=linux-rhel8-a64fx</command>
        <command name="load">parallel-netcdf@1.12.2%fj@4.7.0 arch=linux-rhel8-a64fx</command>
        <command name="load">netlib-lapack  @3.9.1 %fj@4.7.0 arch=linux-rhel8-a64fx</command>
        <command name="find">--loaded</command>
      </modules>
    </module_system>
    <RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR> 
    <EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
    <TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE> 
    <MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
    <environment_variables>
      <env name="PERL5LIB">/data/hp210190/soft/perl5/lib/perl5:/home/hp210190/u02380/perl5/lib/perl5</env>
      <env name="OMPI_MCA_plm_ple_numanode_assign_policy">share_band</env>
    </environment_variables>
    <environment_variables compiler="gnu">
      <env name="NETCDF_PATH">/data/hp210190/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc8.3.1</env>
      <env name="LAPACK_PATH">/data/hp210190/soft/spack-v0.16/opt/spack/linux-rhel8-a64fx/gcc-8.3.1/netlib-lapack-3.8.0-jhmofiqoky6ajxmda5caawfhqnrirmm5</env>
      <env name="LD_LIBRARY_PATH">/tmp:/data/hp210190/soft/spack-v0.16/opt/spack/linux-rhel8-a64fx/gcc-8.3.1/netlib-lapack-3.8.0-jhmofiqoky6ajxmda5caawfhqnrirmm5/lib64:$ENV{LD_LIBRARY_PATH}</env>
    </environment_variables>
    <environment_variables compiler="fj">
      <env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
      <env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
      <env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE">
      <env name="OMP_STACKSIZE">128M</env>
    </environment_variables>
    <environment_variables SMP_PRESENT="TRUE" compiler="gnu">
      <env name="OMP_PLACES">cores</env>
    </environment_variables>
  </machine>

  <default_run_suffix>
    <default_run_exe>${EXEROOT}/e3sm.exe </default_run_exe>
    <default_run_misc_suffix> &gt;&gt; e3sm.log.$LID 2&gt;&amp;1 </default_run_misc_suffix>
  </default_run_suffix>

</config_machines>
back to top