OPENMOLCAS

#!/bin/bash -x 

#SBATCH --account=<group>
#SBATCH --partition=std
#SBATCH --job-name=TestMolcas
#SBATCH --output=SLURM-%x.%N.%j.out 
#SBATCH --error=SLURM-%x.%N.%j.err 
#SBATCH --nodes=1
#SBATCH --ntasks=8
#SBATCH --ntasks-per-node=8
#SBATCH --cpus-per-task=1
#SBATCH --time=10:00:00

env | grep -i slurm 

cd $SLURM_SUBMIT_DIR
echo "Submission Directory: $SLURM_SUBMIT_DIR"

# Creating temporary directory
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p $WORKDIR
echo "Working Directory: $WORKDIR"

# Cleans out the modules loaded in interactive and inherited by default
module purge

# Load needed modules
module load openmolcas/oneapi-2023/24.02

set -x

### Definition of variables ###
finp=$(ls *inp)
JNAME=${finp%.inp}
geom=geom.xyz

JOBINP="$JNAME".inp
JOBOUT="$JNAME".out
Project=$JNAME

export Project
if [ -z "$SLURM_CPUS_PER_TASK" ]
then
   export MOLCAS_MEM=$SLURM_MEM_PER_CPU
else
   export MOLCAS_MEM=$(( $SLURM_MEM_PER_CPU * $SLURM_CPUS_PER_TASK ))
fi
export HomeDir=PWD

## MOLCAS_NPROCS depends on values of "-nodes" and of "--ntasks-per-node"
export MOLCAS_NPROCS=$SLURM_NTASKS
export MOLCAS_WORKDIR=$WORKDIR
export CurrDir=$(pwd)

echo "Started $(date)"

echo $SLURM_JOB_NODELIST > $JNAME.nodes
srun hostname -s | sort -V > $JNAME.nodes 

pymolcas $JOBINP > $JOBOUT 2>&1

echo "Finished $(date)"

# removing the $WORKDIR 
rm -rf $WORKDIR/*
rmdir $WORKDIR