SIESTA
Dernière version installée sur le cluster : 5.0.0
#!/bin/bash -x
#SBATCH --account=<group>
#SBATCH --partition=std
#SBATCH --job-name=TestSiesta
#SBATCH --output=SLURM-%x.%N.%j.out
#SBATCH --error=SLURM-%x.%N.%j.err
#SBATCH --nodes=1
#SBATCH --ntasks=16
#SBATCH --ntasks-per-node=16
#SBATCH --cpus-per-task=1
#SBATCH --time=0-10:00:00
# Display SLURM environment variables
env | grep -i slurm
# Change to the submission directory
cd "$SLURM_SUBMIT_DIR" || exit
echo "Submission Directory: $SLURM_SUBMIT_DIR"
# Create a temporary working directory
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"
echo "Working Directory: $WORKDIR"
# Clean out the modules loaded in interactive and inherited by default
module purge
# Load needed modules
module load gcc-11.2.0
module load libxc/gcc-11.2.0/6.2.2
module load siesta/gcc-11.2.0/5.0.0
# Unset MPI PMI library
unset I_MPI_PMI_LIBRARY
# Set Intel MPI environment variables
export I_MPI_FALLBACK=0
export I_MPI_FABRICS=shm:tcp
# Set OpenMP environment variables
export OMP_PROC_BIND=true
export MKL_NUM_THREADS=1
export MKL_DOMAIN_NUM_THREADS="MKL_BLAS=1"
export OMP_NUM_THREADS=1
export MKL_DYNAMIC=FALSE
export OMP_DYNAMIC=FALSE
# Copy everything from the submission directory to the working directory
cp "$SLURM_SUBMIT_DIR"/* "$WORKDIR"
# Change to the working directory
cd "$WORKDIR" || exit
echo "Started $(date)"
# Run the Siesta code
mpirun -n "$SLURM_NTASKS" siesta < relax.fdf > relax.out
echo "Finished $(date)"
# Copy all files to the output directory
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"
# Move all files to the output directory
cp "$WORKDIR"/* "$OUTDIR"
# Clean the working directory
rm -rf "$WORKDIR"
echo ""
echo " JOB finished: $(date) "
echo ""