From fe0f4f659ea8759d7262856834e9410b10df3c25 Mon Sep 17 00:00:00 2001 From: Paul Kent Date: Fri, 21 Feb 2025 09:19:49 -0500 Subject: [PATCH] Update ORNL nightlies --- .../nightly_test_scripts/nightly_ornl.sh | 105 +++------ .../nightly_test_scripts/ornl_setup.sh | 20 +- .../ornl_setup_environments.sh | 215 +++++++++--------- .../nightly_test_scripts/ornl_versions.sh | 25 +- 4 files changed, 154 insertions(+), 211 deletions(-) diff --git a/tests/test_automation/nightly_test_scripts/nightly_ornl.sh b/tests/test_automation/nightly_test_scripts/nightly_ornl.sh index b3449ff121..0c2709e9ed 100755 --- a/tests/test_automation/nightly_test_scripts/nightly_ornl.sh +++ b/tests/test_automation/nightly_test_scripts/nightly_ornl.sh @@ -84,14 +84,14 @@ case "$ourhostname" in ;; nitrogen2 ) if [[ $jobtype == "nightly" ]]; then - buildsys="gccnewnompi gccnewnompi_complex gccnewnompi_debug gccnewnompi_complex_debug gccnewnompi_mixed_debug gccnewnompi_mixed_complex_debug gccnewmpi clangnewmpi \ + buildsys="amdclangnompi_offloadhip_complex gccnewnompi gccnewnompi_aocl gccnewnompi_complex gccnewnompi_debug gccnewnompi_complex_debug gccnewnompi_mixed_debug gccnewnompi_mixed_complex_debug gccnewnompi_aocl_mixed_complex_debug gccnewmpi gccnewmpi_aocl clangnewmpi \ amdclangnompi amdclangnompi_debug \ amdclangnompi_offloadhip amdclangnompi_offloadhip_debug \ - amdclangnompi_offloadhip_complex amdclangnompi_offloadhip_complex_debug \ + amdclangnompi_offloadhip_complex_debug \ amdclangnompi_offloadhip_mixed amdclangnompi_offloadhip_mixed_debug \ amdclangnompi_offloadhip_mixed_complex amdclangnompi_offloadhip_mixed_complex_debug" else - buildsys="gccnewmpi amdclangnompi gccnewnompi clangnewmpi amdclangnompi_offloadhip" + buildsys="gccnewmpi gccnewmpi_aocl amdclangnompi gccnewnompi gccnewnompi_aocl clangnewmpi amdclangnompi_offloadhip" fi export QMC_DATA=/scratch/${USER}/QMC_DATA_FULL # Route to directory containing performance test files export amdgpuarch=`/usr/bin/rocminfo | awk '/gfx/ {print $2; exit 0;}'` @@ -114,15 +114,21 @@ esac case "$jobtype" in weekly ) export PARALLELCFG="-j 48" - export QMC_OPTIONS="-DQMC_PERFORMANCE_NIO_MAX_ATOMS=256;-DQMC_PERFORMANCE_C_MOLECULE_MAX_ATOMS=64;-DQMC_PERFORMANCE_C_GRAPHITE_MAX_ATOMS=64" + export QMC_OPTIONS="-DQMC_PERFORMANCE_NIO_MAX_ATOMS=256;-DQMC_PERFORMANCE_C_GRAPHITE_MAX_ATOMS=64" + if [[ $sys == *"complex"* ]]; then + export QMC_OPTIONS="${QMC_OPTIONS};-DQMC_PERFORMANCE_C_MOLECULE_MAX_ATOMS=64" + fi export LIMITEDTESTS="--exclude-regex long-" export LESSLIMITEDTESTS="" ;; nightly ) export PARALLELCFG="-j 48" - export QMC_OPTIONS="-DQMC_PERFORMANCE_NIO_MAX_ATOMS=16;-DQMC_PERFORMANCE_C_MOLECULE_MAX_ATOMS=12;-DQMC_PERFORMANCE_C_GRAPHITE_MAX_ATOMS=16" -# export LIMITEDTESTS="--exclude-regex 'short-|long-|example'" - export LIMITEDTESTS="--label-regex deterministic" + export QMC_OPTIONS="-DQMC_PERFORMANCE_NIO_MAX_ATOMS=16;-DQMC_PERFORMANCE_C_GRAPHITE_MAX_ATOMS=16" + if [[ $sys == *"complex"* ]]; then + export QMC_OPTIONS="${QMC_OPTIONS};-DQMC_PERFORMANCE_C_MOLECULE_MAX_ATOMS=12" + fi + export LIMITEDTESTS="--label-regex deterministic" +# export LIMITEDTESTS="--exclude-regex 'long-|short-'" export LESSLIMITEDTESTS="--exclude-regex long-" ;; * ) @@ -310,8 +316,9 @@ if [[ $sys == *"nompi"* ]]; then QMCPACK_TEST_SUBMIT_NAME=GCC${compilerversion}-NoMPI CMCFG="-DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ -DQMC_MPI=0" else -QMCPACK_TEST_SUBMIT_NAME=GCC${compilerversion} +QMCPACK_TEST_SUBMIT_NAME=GCC${compilerversion}-MPI CMCFG="-DCMAKE_C_COMPILER=mpicc -DCMAKE_CXX_COMPILER=mpicxx -DQMC_MPI=1" +export QMC_OPTIONS="${QMC_OPTIONS};-DMPIEXEC_PREFLAGS=--bind-to\;none\;--oversubscribe" export OMPI_CC=gcc export OMPI_CXX=g++ @@ -335,8 +342,9 @@ if [[ $sys == *"clang"* ]]; then QMCPACK_TEST_SUBMIT_NAME=Clang${compilerversion}-NoMPI CMCFG="-DCMAKE_C_COMPILER=$clangname -DCMAKE_CXX_COMPILER=$clangname++ -DQMC_MPI=0" else - QMCPACK_TEST_SUBMIT_NAME=Clang${compilerversion} + QMCPACK_TEST_SUBMIT_NAME=Clang${compilerversion}-MPI CMCFG="-DCMAKE_C_COMPILER=mpicc -DCMAKE_CXX_COMPILER=mpicxx -DQMC_MPI=1" + export QMC_OPTIONS="${QMC_OPTIONS};-DMPIEXEC_PREFLAGS=--bind-to\;none\;--oversubscribe" export OMPI_CC=$clangname export OMPI_CXX=$clangname++ fi @@ -346,15 +354,13 @@ if [[ $sys == *"clang"* ]]; then # Clang OpenMP offload CUDA builds. Setup here due to clang specific arguments if [[ $sys == *"offloadcuda"* ]]; then - QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-Offload-CUDA + QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-NVGPU CMCFG="$CMCFG -DCMAKE_CXX_FLAGS=-Wno-unknown-cuda-version" -# QMC_OPTIONS="${QMC_OPTIONS};-DENABLE_OFFLOAD=ON;-DUSE_OBJECT_TARGET=ON;-DENABLE_CUDA=ON;-DCMAKE_CUDA_ARCHITECTURES=70;-DCMAKE_CUDA_HOST_COMPILER=`which gcc`" - QMC_OPTIONS="${QMC_OPTIONS};-DENABLE_OFFLOAD=ON;-DUSE_OBJECT_TARGET=ON;-DENABLE_CUDA=ON;-DCMAKE_CUDA_ARCHITECTURES=70" + QMC_OPTIONS="${QMC_OPTIONS};-DQMC_GPU_ARCHS=sm_70" fi if [[ $sys == *"offloadhip"* ]]; then - QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-Offload-CUDA2HIP -# QMC_OPTIONS="${QMC_OPTIONS};-DENABLE_OFFLOAD=ON;-DENABLE_CUDA=ON;-DQMC_CUDA2HIP=ON;-DOFFLOAD_TARGET=amdgcn-amd-amdhsa;-DOFFLOAD_ARCH=$amdgpuarch" - QMC_OPTIONS="${QMC_OPTIONS};-DENABLE_OFFLOAD=ON;-DENABLE_CUDA=ON;-DQMC_CUDA2HIP=ON;-DCMAKE_HIP_ARCHITECTURES=$amdgpuarch" + QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-AMDGPU + QMC_OPTIONS="${QMC_OPTIONS};-DQMC_GPU_ARCHS=$amdgpuarch" fi fi @@ -385,8 +391,8 @@ if [[ $sys == *"nompi"* ]]; then QMCPACK_TEST_SUBMIT_NAME=Intel20${compilerversion}-NoMPI CMCFG="-DCMAKE_C_COMPILER=icc -DCMAKE_CXX_COMPILER=icpc -DQMC_MPI=0" else -QMCPACK_TEST_SUBMIT_NAME=Intel20${compilerversion} -CMCFG="-DCMAKE_C_COMPILER=mpiicc -DCMAKE_CXX_COMPILER=mpiicpc -DQMC_MPI=1" +QMCPACK_TEST_SUBMIT_NAME=Intel20${compilerversion}-MPI +CMCFG="-DCMAKE_C_COMPILER=mpiicc -DCMAKE_CXX_COMPILER=mpiicpc -DQMC_MPI=1" # Verify thread binding OK when Intel compiler added back to nightlies fi fi @@ -406,59 +412,17 @@ if [[ $sys == *"nompi"* ]]; then QMCPACK_TEST_SUBMIT_NAME=NVHPC${compilerversion}-NoMPI CMCFG="-DCMAKE_C_COMPILER=nvc -DCMAKE_CXX_COMPILER=nvc++ -DQMC_MPI=0" else -QMCPACK_TEST_SUBMIT_NAME=NVHPC${compilerversion} +QMCPACK_TEST_SUBMIT_NAME=NVHPC${compilerversion}-MPI CMCFG="-DCMAKE_C_COMPILER=mpicc -DCMAKE_CXX_COMPILER=mpicxx -DQMC_MPI=1" +export QMC_OPTIONS="${QMC_OPTIONS};-DMPIEXEC_PREFLAGS=--bind-to\;none\;--oversubscribe" export OMPI_CC=nvc export OMPI_CXX=nvc++ fi fi -# General CUDA setup for offload and legacy cuda builds -# Use system installed CUDA since this will match the driver. May conflict will a different version spack installed cuda -# TODO: Ensure consistent CUDA versions for nvhpc+cuda, spack sourced compilers etc. - -# ASSUME CORRECT CUDA VERSION ALREADY ON PATH , e.g. correct CUDA spack module loaded - -#if [[ $sys == *"legacycuda"* ]]; then -# if [ -e /usr/local/cuda/bin/nvcc ]; then -# export CUDAVER=`cat /usr/local/cuda/version.json | python3 -c "import sys, json; print(json.load(sys.stdin)['cuda']['version'])"` -# echo --- Found nvcc in /usr/local/cuda , apparent version $CUDAVER . Adding to PATH -# export PATH=/usr/local/cuda/bin:${PATH} -# export LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} -# else -# echo --- Did not find expected nvcc compiler for CUDA build. Error. -# exit 1 -# fi -#else -# if [[ $sys == *"offloadcuda"* ]]; then -# echo --- FORCING CUDA 11.2 FOR OFFLOAD BUILD TO WORKAROUND https://github.com/llvm/llvm-project/issues/54633 -# if [ -e /usr/local/cuda-11.2/bin/nvcc ]; then -# export CUDAVER=`cat /usr/local/cuda-11.2/version.json | python3 -c "import sys, json; print(json.load(sys.stdin)['cuda']['version'])"` -# echo --- Found nvcc in /usr/local/cuda-11.2 , apparent version $CUDAVER . Adding to PATH -# export PATH=/usr/local/cuda-11.2/bin:${PATH} -# export LD_LIBRARY_PATH=/usr/local/cuda-11.2/lib64:${LD_LIBRARY_PATH} -# else -# echo --- Did not find expected nvcc compiler for CUDA build. Error. -# exit 1 -# fi -# fi -#fi - -# Legacy CUDA builds setup -if [[ $sys == *"legacycuda"* ]]; then -# Specify GPUs for testing. Obtain device IDs via "nvidia-smi -L" -#export CUDA_VISIBLE_DEVICES= -QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-Legacy-CUDA -CMCFG="$CMCFG -DQMC_CUDA=1" -fi - -# Legacy CUDA2HIP builds setup -# TODO: Ensure consistent CUDA versions for nvhpc+cuda, spack sourced compilers etc. -if [[ $sys == *"legacycu2hip"* ]]; then - export ROCM_PATH=/opt/rocm - export PATH=${PATH}:${ROCM_PATH}/bin:${ROCM_PATH}/opencl/bin - QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-Legacy-CUDA2HIP - QMC_OPTIONS="${QMC_OPTIONS};-DQMC_CUDA=ON;-DQMC_CUDA2HIP=ON;-DCMAKE_HIP_ARCHITECTURES=${amdgpuarch}" +if [[ $sys == *"aocl"* ]]; then +QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-AOCL +export QMC_OPTIONS="${QMC_OPTIONS};-DBLA_VENDOR=AOCL" fi # MKL @@ -511,22 +475,13 @@ case "$sys" in THETESTS=${LESSLIMITEDTESTS} ;; esac -#case "$sys" in -# *intel2020*|*gccnew*|*clangnew*|*gcc*legacycuda*|*gcc*cu2hip*|amdclang*) echo "Running full ("less limited") test set for $sys" -# THETESTS=${LESSLIMITEDTESTS} -# ;; -# *) echo "Running limited test set for $sys" -# THETESTS=${LIMITEDTESTS} -# ;; -#esac -#THETESTS=$LIMITEDTESTS # for DEBUG. Remove for production. echo THETESTS: ${THETESTS} if [[ $sys == *"debug"* ]]; then if [[ $jobtype == *"nightly"* ]]; then export TESTCFG="--timeout 3600 -VV" else - export TESTCFG="--timeout 7200 -VV" + export TESTCFG="--timeout 10800 -VV" fi export QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-Debug CMCFG="-DCMAKE_BUILD_TYPE=Debug $CMCFG" @@ -535,7 +490,7 @@ else if [[ $jobtype == *"nightly"* ]]; then export TESTCFG="--timeout 900 -VV" else - export TESTCFG="--timeout 7200 -VV" + export TESTCFG="--timeout 10800 -VV" fi export QMCPACK_TEST_SUBMIT_NAME=${QMCPACK_TEST_SUBMIT_NAME}-Release ctestscriptarg=release diff --git a/tests/test_automation/nightly_test_scripts/ornl_setup.sh b/tests/test_automation/nightly_test_scripts/ornl_setup.sh index 0ef7ab85cb..381253da0a 100755 --- a/tests/test_automation/nightly_test_scripts/ornl_setup.sh +++ b/tests/test_automation/nightly_test_scripts/ornl_setup.sh @@ -182,22 +182,12 @@ cd $HOME/apps/spack # For reproducibility, use a specific version of Spack # Prefer to use tagged releases https://github.com/spack/spack/releases - -git checkout 75b03bc12ffbabdfac0775ead5442c3f102f94c7 -#commit 75b03bc12ffbabdfac0775ead5442c3f102f94c7 (HEAD -> develop, origin/develop, origin/HEAD) -#Author: Adam J. Stewart -#Date: Sun Nov 24 20:55:18 2024 +0100 -# -# glib: add v2.82.2 (#47766) - -#git checkout dfab174f3100840c889e8bb939260b64d93d8dbd -#commit dfab174f3100840c889e8bb939260b64d93d8dbd (HEAD -> develop, origin/develop, origin/HEAD) -#Author: Stephen Nicholas Swatman -#Date: Mon Nov 18 14:04:52 2024 +0100 +git checkout 75c3d0a053c9705e1c1f88a94c47ffd36f4be1dd +#commit 75c3d0a053c9705e1c1f88a94c47ffd36f4be1dd (HEAD -> develop, origin/develop, origin/HEAD) +#Author: Lehman Garrison +#Date: Wed Feb 19 10:14:35 2025 -0500 # -# benchmark: add version 1.9.0 (#47658) -# -# This commit adds Google Benchmark v1.9.0. +# py-yt: add 4.4.0 and dependencies (#47571) echo --- Git version and last log entry git log -1 diff --git a/tests/test_automation/nightly_test_scripts/ornl_setup_environments.sh b/tests/test_automation/nightly_test_scripts/ornl_setup_environments.sh index bd9f6fff5d..ffb1fc1161 100755 --- a/tests/test_automation/nightly_test_scripts/ornl_setup_environments.sh +++ b/tests/test_automation/nightly_test_scripts/ornl_setup_environments.sh @@ -77,7 +77,8 @@ echo --- Host is $ourhostname theenv=envgccnewmpi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vnew} @@ -92,8 +93,13 @@ spack add openmpi@${ompi_vnew}%gcc@${gcc_vnew} spack add hdf5@${hdf5_vnew}%gcc@${gcc_vnew} +fortran +hl +mpi spack add fftw@${fftw_vnew}%gcc@${gcc_vnew} -mpi #Avoid MPI for simplicity spack add openblas%gcc@${gcc_vnew} threads=openmp -#spack add blis%gcc@${gcc_vnew} threads=openmp -#spack add libflame%gcc@${gcc_vnew} threads=openmp +if [ "$ourplatform" == "AMD" ]; then +spack add amdblis; spack add amdlibflame; #spack add amd-aocl +fi +if [ "$ourplatform" == "Intel" ]; then +spack add intel-oneapi-mkl +fi + spack add py-lxml spack add py-matplotlib @@ -102,30 +108,33 @@ spack add py-mpi4py spack add py-numpy@${numpy_vnew} spack add py-scipy spack add py-h5py ^hdf5@${hdf5_vnew}%gcc@${gcc_vnew} +fortran +hl +mpi -spack add quantum-espresso@7.4 +mpi +qmcpack +spack add quantum-espresso +mpi +qmcpack export CMAKE_BUILD_PARALLEL_LEVEL=8 # For PySCF spack add py-pyscf +spack add dftd4 #spack add rmgdft #Fails to compile with GCC14 due to bug in vendored SCALAPACK #Luxury options for actual science use: spack add py-requests # for pseudo helper spack add py-ase # full Atomic Simulation Environment -#spack add graphviz +libgd # NEXUS requires optional PNG support in dot spack add libffi spack add graphviz +pangocairo # NEXUS requires optional PNG support in dot spack add py-pydot # NEXUS optional -spack add py-spglib # NEXUS optional -spack add py-seekpath # NEXUS optional -spack add py-pycifrw # NEXUS optional + +#spack add py-spglib # NEXUS optional Forces numpy<2 currently + scikit-build issue +#spack add py-seekpath # NEXUS optional +#spack add py-pycifrw # NEXUS optional #NOT IN SPACK spack add py-cif2cell # NEXUS optional install_environment +unset CMAKE_BUILD_PARALLEL_LEVEL spack env deactivate theenv=envgccnewnompi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vnew} @@ -140,8 +149,13 @@ spack add python%gcc@${gcc_vnew} spack add hdf5@${hdf5_vnew}%gcc@${gcc_vnew} +fortran +hl ~mpi spack add fftw@${fftw_vnew}%gcc@${gcc_vnew} -mpi #Avoid MPI for simplicity spack add openblas%gcc@${gcc_vnew} threads=openmp -#spack add blis%gcc@${gcc_vnew} threads=openmp -#spack add libflame%gcc@${gcc_vnew} threads=openmp +if [ "$ourplatform" == "AMD" ]; then +spack add amdblis; spack add amdlibflame; #spack add amd-aocl +fi +if [ "$ourplatform" == "Intel" ]; then +spack add intel-oneapi-mkl +fi + spack add py-lxml spack add py-matplotlib @@ -157,7 +171,8 @@ spack env deactivate theenv=envgccoldnompi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vold} @@ -188,7 +203,8 @@ spack env deactivate theenv=envgccoldmpi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vold} @@ -213,7 +229,7 @@ spack add py-mpi4py spack add py-numpy@${numpy_vold} spack add py-scipy spack add py-h5py ^hdf5@${hdf5_vnew}%gcc@${gcc_vold} +fortran +hl +mpi -#spack add quantum-espresso@7.4 +mpi +qmcpack +#spack add quantum-espresso +mpi +qmcpack #export CMAKE_BUILD_PARALLEL_LEVEL=8 # For PySCF #spack add py-pyscf spack add rmgdft @@ -221,22 +237,23 @@ spack add rmgdft #Luxury options for actual science use: spack add py-requests # for pseudo helper spack add py-ase # full Atomic Simulation Environment -#spack add graphviz +libgd # NEXUS requires optional PNG support in dot spack add libffi spack add graphviz +pangocairo # NEXUS requires optional PNG support in dot spack add py-pydot # NEXUS optional -spack add py-spglib # NEXUS optional -spack add py-seekpath # NEXUS optional -spack add py-pycifrw # NEXUS optional +#spack add py-spglib # NEXUS optional +#spack add py-seekpath # NEXUS optional +#spack add py-pycifrw # NEXUS optional #NOT IN SPACK spack add py-cif2cell # NEXUS optional install_environment +unset CMAKE_BUILD_PARALLEL_LEVEL spack env deactivate theenv=envclangnewmpi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vnew} @@ -252,8 +269,12 @@ spack add openmpi@${ompi_vnew}%gcc@${gcc_vnew} spack add hdf5@${hdf5_vnew}%gcc@${gcc_vnew} +fortran +hl +mpi spack add fftw@${fftw_vnew}%gcc@${gcc_vnew} -mpi #Avoid MPI for simplicity spack add openblas%gcc@${gcc_vnew} threads=openmp -#spack add blis%gcc@${gcc_vnew} threads=openmp -#spack add libflame%gcc@${gcc_vnew} threads=openmp +if [ "$ourplatform" == "AMD" ]; then +spack add amdblis; spack add amdlibflame; #spack add amd-aocl +fi +if [ "$ourplatform" == "Intel" ]; then +spack add intel-oneapi-mkl +fi spack add py-lxml spack add py-matplotlib @@ -272,7 +293,8 @@ spack env deactivate theenv=envclangoffloadmpi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vllvmoffload} @@ -308,7 +330,8 @@ spack env deactivate theenv=envclangoffloadnompi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vllvmoffload} @@ -345,7 +368,8 @@ if [ "$ourplatform" == "AMD" ]; then theenv=envamdclangmpi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv #Use older likely offload compatible version of GCC @@ -361,8 +385,7 @@ spack add openmpi@${ompi_vnew}%gcc@${gcc_vllvmoffload} spack add hdf5@${hdf5_vold}%gcc@${gcc_vllvmoffload} +fortran +hl +mpi spack add fftw@${fftw_vold}%gcc@${gcc_vllvmoffload} -mpi #Avoid MPI for simplicity spack add openblas%gcc@${gcc_vllvmoffload} threads=openmp -#spack add blis%gcc@${gcc_vllvmoffload} threads=openmp -#spack add libflame%gcc@${gcc_vllvmoffload} threads=openmp +spack add amdblis; spack add amdlibflame; #spack add amd-aocl spack add py-lxml spack add py-matplotlib @@ -371,31 +394,7 @@ spack add py-mpi4py spack add py-numpy@${numpy_vold} spack add py-scipy spack add py-h5py ^hdf5@${hdf5_vold}%gcc@${gcc_vold} +fortran +hl +mpi -spack add quantum-espresso@7.4 +mpi +qmcpack - -#spack add gcc@${gcc_vnew} -#spack add git -#spack add ninja -#spack add cmake@${cmake_vnew} -#spack add libxml2@${libxml2_v}%gcc@${gcc_vnew} -#spack add boost@${boost_vnew}%gcc@${gcc_vnew} -#spack add util-linux-uuid%gcc@${gcc_vnew} -#spack add python%gcc@${gcc_vnew} -#spack add openmpi@${ompi_vnew}%gcc@${gcc_vnew} -#spack add hdf5@${hdf5_vnew}%gcc@${gcc_vnew} +fortran +hl +mpi -#spack add fftw@${fftw_vnew}%gcc@${gcc_vnew} -mpi #Avoid MPI for simplicity -#spack add openblas%gcc@${gcc_vnew} threads=openmp -##spack add blis%gcc@${gcc_vnew} threads=openmp -##spack add libflame%gcc@${gcc_vnew} threads=openmp -# -#spack add py-lxml -#spack add py-matplotlib -#spack add py-pandas -#spack add py-mpi4py -#spack add py-scipy -#spack add py-h5py ^hdf5@${hdf5_vnew}%gcc@${gcc_vnew} +fortran +hl +mpi -#spack add quantum-espresso@7.4 +mpi +qmcpack - +spack add quantum-espresso +mpi +qmcpack #spack add rmgdft install_environment spack env deactivate @@ -403,7 +402,8 @@ spack env deactivate theenv=envamdclangnompi echo --- Setting up $theenv `date` spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:when_possible" +spack -e $theenv config add "concretizer:unify:true" spack env activate $theenv spack add gcc@${gcc_vllvmoffload} @@ -418,8 +418,7 @@ spack add python%gcc@${gcc_vllvmoffload} spack add hdf5@${hdf5_vold}%gcc@${gcc_vllvmoffload} +fortran +hl ~mpi spack add fftw@${fftw_vold}%gcc@${gcc_vllvmoffload} -mpi #Avoid MPI for simplicity spack add openblas%gcc@${gcc_vllvmoffload} threads=openmp -#spack add blis%gcc@${gcc_vllvmoffload} threads=openmp -#spack add libflame%gcc@${gcc_vllvmoffload} threads=openmp +spack add amdblis; spack add amdlibflame; #spack add amd-aocl spack add py-lxml spack add py-matplotlib @@ -433,59 +432,61 @@ spack env deactivate fi -if [ "$ourplatform" == "Intel" ]; then -theenv=envinteloneapinompi -echo --- Setting up $theenv `date` -spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" -spack env activate $theenv - -spack add gcc@${gcc_vintel} -spack add git -spack add ninja -spack add cmake@${cmake_vnew} -spack add libxml2@${libxml2_v}%gcc@${gcc_vintel} -spack add boost@${boost_vnew}%gcc@${gcc_vintel} -spack add util-linux-uuid%gcc@${gcc_vintel} -spack add python%gcc@${gcc_vintel} -spack add hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi -spack add fftw@${fftw_vnew}%gcc@${gcc_vintel} -mpi #Avoid MPI for simplicity - -spack add py-lxml -spack add py-matplotlib -spack add py-pandas -spack add py-numpy@${numpy_vold} -spack add py-scipy -spack add py-h5py ^hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi -install_environment -spack env deactivate - -theenv=envinteloneapimpi -echo --- Setting up $theenv `date` -spack env create $theenv -spack -e $theenv config add "concretizer:unify:when_possible" -spack env activate $theenv - -spack add gcc@${gcc_vintel} -spack add git -spack add ninja -spack add cmake@${cmake_vnew} -spack add libxml2@${libxml2_v}%gcc@${gcc_vintel} -spack add boost@${boost_vnew}%gcc@${gcc_vintel} -spack add util-linux-uuid%gcc@${gcc_vintel} -spack add python%gcc@${gcc_vintel} -spack add hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi -spack add fftw@${fftw_vnew}%gcc@${gcc_vintel} -mpi #Avoid MPI for simplicity - -spack add py-lxml -spack add py-matplotlib -spack add py-pandas -spack add py-numpy@${numpy_vold} -spack add py-scipy -spack add py-h5py ^hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi -install_environment -spack env deactivate -fi +#if [ "$ourplatform" == "Intel" ]; then +#theenv=envinteloneapinompi +#echo --- Setting up $theenv `date` +#spack env create $theenv +##spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:true" +#spack env activate $theenv +# +#spack add gcc@${gcc_vintel} +#spack add git +#spack add ninja +#spack add cmake@${cmake_vnew} +#spack add libxml2@${libxml2_v}%gcc@${gcc_vintel} +#spack add boost@${boost_vnew}%gcc@${gcc_vintel} +#spack add util-linux-uuid%gcc@${gcc_vintel} +#spack add python%gcc@${gcc_vintel} +#spack add hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi +#spack add fftw@${fftw_vnew}%gcc@${gcc_vintel} -mpi #Avoid MPI for simplicity +# +#spack add py-lxml +#spack add py-matplotlib +#spack add py-pandas +#spack add py-numpy@${numpy_vold} +#spack add py-scipy +#spack add py-h5py ^hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi +#install_environment +#spack env deactivate +# +#theenv=envinteloneapimpi +#echo --- Setting up $theenv `date` +#spack env create $theenv +##spack -e $theenv config add "concretizer:unify:when_possible" +#spack -e $theenv config add "concretizer:unify:true" +#spack env activate $theenv +# +#spack add gcc@${gcc_vintel} +#spack add git +#spack add ninja +#spack add cmake@${cmake_vnew} +#spack add libxml2@${libxml2_v}%gcc@${gcc_vintel} +#spack add boost@${boost_vnew}%gcc@${gcc_vintel} +#spack add util-linux-uuid%gcc@${gcc_vintel} +#spack add python%gcc@${gcc_vintel} +#spack add hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi +#spack add fftw@${fftw_vnew}%gcc@${gcc_vintel} -mpi #Avoid MPI for simplicity +# +#spack add py-lxml +#spack add py-matplotlib +#spack add py-pandas +#spack add py-numpy@${numpy_vold} +#spack add py-scipy +#spack add py-h5py ^hdf5@${hdf5_vnew}%gcc@${gcc_vintel} +fortran +hl ~mpi +#install_environment +#spack env deactivate +#fi # CAUTION: Removing build deps reveals which spack packages do not have correct runtime deps specified and may result in breakage #echo --- Removing build deps diff --git a/tests/test_automation/nightly_test_scripts/ornl_versions.sh b/tests/test_automation/nightly_test_scripts/ornl_versions.sh index aaa496048c..66c438789c 100755 --- a/tests/test_automation/nightly_test_scripts/ornl_versions.sh +++ b/tests/test_automation/nightly_test_scripts/ornl_versions.sh @@ -7,20 +7,16 @@ gcc_vnew=14.2.0 # Released 2024-08-01 gcc_vold=12.4.0 # Released 2024-06-20 -#gcc_vcuda=11.4.0 # https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#host-compiler-support-policy gcc_vcuda=${gcc_vold} -#gcc_vintel=13.3.0 # Compiler for C++ library used by Intel compiler -#gcc_vnvhpc=13.3.0 # Use makelocalrc to configure NVHPC with this compiler -#gcc_vllvmoffload=11.4.0 # Version for LLVM offload builds, should be compatible with CUDA version used gcc_vintel=${gcc_vold} gcc_vnvhpc=${gcc_vold} gcc_vllvmoffload=${gcc_vold} # LLVM # Dates at https://releases.llvm.org/ -llvm_vnew=19.1.4 # Released 2024-11-19 +llvm_vnew=19.1.7 # Released 2025-01-14 llvm_voffload=${llvm_vnew} -cuda_voffload=12.4.0 # CUDA version for offload builds +cuda_voffload=12.8.0 # CUDA version for offload builds # HDF5 # Dates at https://portal.hdfgroup.org/display/support/Downloads @@ -29,16 +25,16 @@ hdf5_vold=${hdf5_vnew} # CMake # Dates at https://cmake.org/files/ -cmake_vnew=3.30.5 +cmake_vnew=3.31.5 cmake_vold=${cmake_vnew} # OpenMPI # Dates at https://www.open-mpi.org/software/ompi/v5.0/ -ompi_vnew=5.0.5 # Released 2024-07-22 +ompi_vnew=5.0.6 # Released 2024-11-15 ompi_vold=${ompi_vold} # Libxml2 -libxml2_v=2.13.4 # Released 2024-10 See https://gitlab.gnome.org/GNOME/libxml2/-/releases +libxml2_v=2.13.5 # Released 2024-11-12 See https://gitlab.gnome.org/GNOME/libxml2/-/releases # FFTW # Dates at http://www.fftw.org/release-notes.html @@ -47,13 +43,14 @@ fftw_vold=${fftw_vnew} # Released 2018-05-28 # BOOST # Dates at https://www.boost.org/users/history/ -boost_vnew=1.86.0 # Released 2024-08-14 +boost_vnew=1.87.0 # Released 2024-12-12 boost_vold=1.79.0 # Released 2022-04-13 # Python # Use a single version to reduce dependencies. Ideally the spack prefered version. -python_version=3.13.0 -#numpy_vnew=2.1.2 -numpy_vnew=1.26.4 -numpy_vold=1.26.4 +python_version=3.13.1 + +numpy_vnew=2.2.2 +numpy_vold=2.2.2 +#numpy_vold=1.26.4