ORCA

#!/bin/bash

#SBATCH --account=<id-projet>
#SBATCH --partition=std
#SBATCH --job-name=orca610
#SBATCH --output=SLURM-%x.%N.%j.out
#SBATCH --error=SLURM-%x.%N.%j.err
#SBATCH --nodes=1
#SBATCH --ntasks=8
#SBATCH --cpus-per-task=1
#SBATCH --time=2-00:00:00

env | grep -i slurm

export INPNAME='test'
export INPUT="$INPNAME.inp"
export OUTPUT="$INPNAME.$SLURM_JOB_ID.out"

cd $SLURM_SUBMIT_DIR

## Creating temporary directory 
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
# WORKDIR=$SLURM_SUBMIT_DIR/job.$SLURM_JOB_ID.$USER
mkdir -p $WORKDIR

module purge 
module load orca/6.1.0
ulimit -s unlimited

export ORCA=$(which orca)

echo "FICHIER: $INPUT"
echo "RÉPERTOIRE DE TRAVAIL: $WORKDIR"
echo "DÉBUT: $(date)"

cp $SLURM_SUBMIT_DIR/$INPUT $WORKDIR
cd $WORKDIR

$ORCA $INPUT "--bind-to core --verbose" >& $OUTPUT

echo "$ORCA $INPUT --bind-to core --verbose >& $OUTPUT"
echo "FINI: $(date)"

# Create an output directory
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"

# Move all files to the output directory (including sub-directories and hidden files)
cp -rf "$WORKDIR"/* "$OUTDIR"

Note

Regarding the input file, it is important to adjust the parameters according to the necessary resources and in accordance with the eXplor nodes. For example, you can set:

%maxcore 11000, for the 'cne' nodes

Please note that you should consider a maximum of 75% of the memory per core of the target node (in MB). This information is available in the Table of Associations.