Aller au contenu principal

SIESTA

Dernière version installée sur le cluster : 5.4.1

#!/bin/bash -x

#SBATCH --account=<group>
#SBATCH --partition=std
#SBATCH --job-name=TestSiesta
#SBATCH --output=SLURM-%x.%N.%j.out
#SBATCH --error=SLURM-%x.%N.%j.err
#SBATCH --nodes=1
#SBATCH --ntasks=16
#SBATCH --ntasks-per-node=16
#SBATCH --cpus-per-task=1
#SBATCH --time=0-10:00:00

# Display SLURM environment variables
env | grep -i slurm

# Change to the submission directory
cd "$SLURM_SUBMIT_DIR"

# Create a temporary working directory
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"

# Clean out the modules loaded in interactive and inherited by default
module purge

# Load needed modules
module load siesta/5.4.1-mpi

# Copy everything from the submission directory to the working directory
cp "$SLURM_SUBMIT_DIR"/* "$WORKDIR"

# Change to the working directory
cd "$WORKDIR"

fname="relax"

# Run the Siesta code
date
mpirun -n "$SLURM_NTASKS" siesta < "${fname}.fdf" > "${fname}.out"

# Copy all files to the output directory
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"

# Move all files to the output directory
cp "$WORKDIR"/* "$OUTDIR"

# Clean the working directory (optionnel)
#rm -rf "$WORKDIR"

Dernière mise à jour : December 11, 2025