forked from maranGit/tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSLURM_sub.sh
129 lines (108 loc) · 3.67 KB
/
SLURM_sub.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#!/bin/sh
#
# -------------------------------------------------------------------- #
# #
# bash script to submit 1 job on Habanero using slurm #
# #
# last modified: 3/17/2019 RM #
# #
# -------------------------------------------------------------------- #
#
# The account name for the job.
#SBATCH --account=sun
#
# The job name
#SBATCH --job-name=polycrystal_3d
#
# The number of cpu cores to use
#SBATCH --cpus-per-task=9
#
# wall clock time, hour:minutes:seconds
#SBATCH --time=07:00:00
#
# The memory the job will use per cpu core
#SBATCH --mem-per-cpu=2gb
#
# User notification
#SBATCH --mail-type=ALL
#SBATCH --mail-user=rm3681@columbia.edu
#
# Error and log displayed on terminal
#SBATCH -o simulation.%j.log
#SBATCH -e simulation.%j.err
#
# bin folder, store frequently used links
#
PATH=$PATH:~/bin
#
# Reset the modules used by code.
#
source /etc/profile.d/modules.sh
module load intel-parallel-studio/2019
export INTEL=/rigel/opt/parallel_studio_xe_2019/compilers_and_libraries_2019.0.117/linux
#
# Make directory called ${WORKDIR} on the compute node
#
SERVER=$SLURM_SUBMIT_HOST
WORKDIR=/rigel/sun/users/${USER}/SLURM_$SLURM_JOB_ID
SCP=/usr/bin/scp
SSH=/usr/bin/ssh
mkdir -p $WORKDIR
PERMDIR=${SLURM_SUBMIT_DIR}
SERVPERMDIR=${SLURM_SUBMIT_HOST}:${PERMDIR}
###############################################################
# #
# Transfer files from server to local disk. #
# #
###############################################################
stagein()
{
cd ${SLURM_SUBMIT_DIR}
cp -u * ${WORKDIR}
cd ${WORKDIR}
}
############################################################
# #
# Execute the run. Do not run in the background. #
# #
############################################################
runprogram()
{
### Define number of processors
NPROCS=$SLURM_CPUS_PER_TASK
### Run a parallel OpenMP executable.
export OMP_NUM_THREADS=$NPROCS
FFT_finite_3d.exe <JC_test.inp >${SLURM_SUBMIT_DIR}/woutput
}
###########################################################
# #
# Copy necessary files back to permanent directory. #
# #
###########################################################
stageout()
{
cp -u * ${SLURM_SUBMIT_DIR}
}
##################################################
# #
# Staging in, running the job, and staging out #
# were specified above as functions. Now #
# call these functions to perform the actual #
# file transfers and program execution. #
# #
##################################################
stagein
runprogram
# stageout
###############################################################
# #
# The epilogue script automatically deletes the directory #
# created on the local disk (including all files contained #
# therein. #
# #
###############################################################
# cd ${WORKDIR}
# rm *
# cd $PBS_O_WORKDIR
exit
# End of script