StarCCM+ sbatch template for Mechthild

These scripts are updated from time to time. Therefore, review them once in a while.

If any job got killed before it could close itself, use the manual clean-up script.

A feature of this job script is the automatic writing of a stop file (ABORT) when a SLURM job is about to end. This stop file can be captured by a stopping file criterion in StarCCM.
For this reason, each individual simulation file needs to be located in its own individual directory! Otherwise a single stop file could accidentally close other jobs, which run in the same directory.

Windows users: please make sure to convert the script with dos2unix on the linux machine, and read the article on Linebreaks

job-starccm.sh
#!/bin/bash
#################### Job Settings #################################################################
# Specific Commands for the work load manager SLURM are lines beginning with "#SBATCH"
#SBATCH -J jobname             # Setting the display name for the submission
#SBATCH -N 10                  # Number of nodes to reserve, -N 2-5  for variable number of requested node count
#SBATCH --ntasks-per-node 16   # typically 16, range: 1..16 (max 16 cores per node)
#SBATCH -t 5-01:00:00          # set walltime in hours, format:    hhh:mm:ss, days-hh, days-hhh:mm:ss
#SBATCH -p long                # Desired Partition, alternatively comment this line out and submit the script with 'sbatch -p big jobscript.sh'
#SBATCH --mem 120G             # A Default Memory limit in MB.
#SBATCH --exclusive=user       # Don't share nodes
#SBATCH --signal=B:USR1@120    # Sends a signal 120 seconds before the end of the job to this script
 
#################### Mechthild Specific Setup #####################################################
# Setup OpenMP
if [ -n "$SLURM_CPUS_PER_TASK" ]; then  omp_threads=$SLURM_CPUS_PER_TASK; else   omp_threads=1; fi
export OMP_NUM_THREADS=$omp_threads
# load the modules system
source /etc/profile.d/modules.sh
 
## Make custom modules available, see wiki for example. Modify path to match custom location
module use --append /mechthild/home/<user>/.modulefiles
 
 
#################### Simulation Settings ##########################################################
## Work directory. No "/" at the end.
WORKDIR="/mechthild/home/$USER/"
 
## Simulation File (location in work directory)
SIMULATIONFILE="star.sim"
 
## Macro file (location in work directory)
MACROFILE="macro.java"
 
## Personal POD key
PERSONAL_PODKEY=""
 
## Decide which version by commenting out the desired version. 
## StarCCM needs to be installed locally, because commerical software is not really wanted on Mechthild
## See wiki page for custom modulefile
module load apps/starCCM/13.02.013
 
## Application. Can be kept constant if modules are used.
APPLICATION="starccm+"
OPTIONS="$WORKDIR/$SIMULATIONFILE -batch $WORKDIR/$MACROFILE -licpath 1999@flex.cd-adapco.com -power -podkey $PERSONAL_PODKEY -collab -time -rsh /usr/bin/ssh -mpi intel"
 
#################### Printing some Debug Information ##############################################
# simplify debugging:
echo "SLURM_JOB_NODELIST=$SLURM_JOB_NODELIST"
echo "SLURM_NNODES=$SLURM_NNODES SLURM_TASKS_PER_NODE=$SLURM_TASKS_PER_NODE"
env | grep -e MPI -e SLURM
echo "host=$(hostname) pwd=$(pwd) ulimit=$(ulimit -v) \$1=$1 \$2=$2"
exec 2>&1 # send errors into stdout stream
 
 
# Loading modules
module load mpi/intel/2018.1
 
# list and echo loaded Modules
echo "Loaded Modules: $LOADEDMODULES"
 
## Change into Work Directory
cd $WORKDIR; echo pwd=$(pwd)
#
echo OMP_NUM_THREADS=$OMP_NUM_THREADS
 
[ "$SLURM_NNODES" ] && [ $SLURM_NNODES -lt 4 ] && srun bash -c "echo task \$SLURM_PROCID of \$SLURM_NPROCS runs on \$SLURMD_NODENAME"
 
 
#################### Signal Trap ##################################################################
## Catches signal from slurm to write an ABORT file in the WORKDIR.
## This ABORT file will satisfy the stop file criterion in StarCCM.
## Change ABORTFILENAME if you changed the stop file Criterion.
ABORTFILENAME="ABORT"
## Location where Starccm is looking for the abort file
ABORTFILELOCATION=$WORKDIR/$ABORTFILENAME
 
# remove old abort file
rm -rf $ABORTFILELOCATION
# Signal handler
write_abort_file()
{
        echo "$(date +%Y-%m-%d_%H:%M:%S) The End-of-Job signal has been trapped."
        echo "Writing abort file..."
        touch $ABORTFILELOCATION
}
# Trapping signal handler
echo "Trapping handler for End-of-Job signal"
trap 'write_abort_file' USR1
 
 
#################### Preparing the Simulation #####################################################
## creating machinefile & temp in work directory
MACHINEFILE="machinefile.$SLURM_JOBID.txt"
scontrol show hostnames $SLURM_JOB_NODELIST > $WORKDIR/$MACHINEFILE
 
 
#################### Running the simulation #######################################################
## Let StarCCM+ wait for licenses on startup
export STARWAIT=1
 
## Start time stamp
echo "Start of the simulation: $(date +%Y-%m-%d_%H:%M:%S_%s_%Z)" # date as YYYY-MM-DD_HH:MM:SS_Ww_ZZZ
 
## Command to run application (StarCCM+)
$APPLICATION $OPTIONS -np $SLURM_NPROCS -machinefile $WORKDIR/$MACHINEFILE > $WORKDIR/$SIMULATIONFILE.$SLURM_JOBID.output.log 2>&1
 
 
 ## Final time stamp
echo "Simulation finalized at: $(date +%Y-%m-%d_%H:%M:%S_%s_%Z)"

module file

apps/starCCM/13.02.013
#%Module -*- tcl -*-
##
## StarCCM+ Module file
##
proc ModulesHelp { } {
 
  puts stderr "\tAdds StarCCM+ to your environment variables,"
  puts stderr "TAGS: fluid dynamics"
}
 
module-whatis "STAR-CCM+ 13.02.013-R8"

## update root to match your installation directory
set             root /mechthild/home/user/local/StarCCM
prepend-path    PATH            $root/13.02.013-R8/STAR-CCM+13.02.013-R8/star/bin
setenv          CDLMD_LICENSE_FILE $root/FLEXlm/11_14_0_2/bin
guide/mechthild/jobscript_starccm.txt · Last modified: 2021/05/10 21:45 by seengel@uni-magdeburg.de
Back to top
CC Attribution-Share Alike 3.0 Unported
chimeric.de = chi`s home Valid CSS Driven by DokuWiki do yourself a favour and use a real browser - get firefox!! Recent changes RSS feed Valid XHTML 1.0