lammps
Versions de LAMMPS disponibles
Deux versions principales de LAMMPS sont proposées sur le mésocentre :
Version Intel oneAPI (CPU Intel uniquement)
- Module :
lammps/mpi/29Aug2024 - Compilée avec Intel oneAPI + MKL.
- Optimisée pour les nœuds Intel.
- Non supportée sur les nœuds AMD (ISA spécifique AVX2/AVX‑512).
Version GCC 11.2.0 (tous les nœuds)
- Module :
lammps/mpi/29Aug2024-gcc - Compilée avec gcc/11.2.0 + OpenBLAS / FFT interne.
- Compatible avec l’ensemble des nœuds AMD et Intel.
- Version recommandée par défaut.
Exemple 1 – Job multi‑nœuds (GCC, tous les nœuds CPU)
#!/bin/bash
#SBATCH -p std
#SBATCH -N 2 # à ajuster
#SBATCH -J Lammps_multi
#SBATCH -t 2-00:00:00
#SBATCH --ntasks-per-node=8 # à ajuster
#SBATCH --cpus-per-task=1 # threads OpenMP par tâche
#SBATCH --output=slurm-%x.%N.%j.out
#SBATCH --error=slurm-%x.%N.%j.err
#SBATCH --exclude=cnd[01-04,06-12] # exclure nœuds IVY (AVX seulement)
# ==== Infos SLURM ====
env | grep -i slurm
# ==== Répertoire et entrée LAMMPS ====
cd "$SLURM_SUBMIT_DIR"
input=input # fichier d’entrée LAMMPS
# ==== Scratch ====
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"
cp -f "$SLURM_SUBMIT_DIR"/system.lmp \
"$SLURM_SUBMIT_DIR"/ff.lmp \
"$SLURM_SUBMIT_DIR"/input \
"$WORKDIR"/
cd "$WORKDIR"
# ==== Modules LAMMPS ====
module purge
module load lammps/mpi/29Aug2024-gcc
source "$LAMMPS_PROF/lammps.sh"
# ==== OpenMP & MPI ====
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
NTASKS=$SLURM_NTASKS
echo "SLURM_NNODES = $SLURM_NNODES"
echo "SLURM_NTASKS = $SLURM_NTASKS"
echo "SLURM_CPUS_PER_TASK = $SLURM_CPUS_PER_TASK"
echo "OMP_NUM_THREADS = $OMP_NUM_THREADS"
ulimit -s unlimited
unset I_MPI_PMI_LIBRARY
# ==== Lancement LAMMPS ====
mpirun -np "$NTASKS" lmp -sf omp -pk omp "$OMP_NUM_THREADS" -in "$input"
# ==== Récupération résultats ====
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"
cp -f "$WORKDIR"/* "$OUTDIR"/
rm -rf "$WORKDIR"
Exemple 2 – Job multi‑nœuds (Intel oneAPI, nœuds Intel)
#!/bin/bash
#SBATCH -p std
#SBATCH -N 2 # à ajuster
#SBATCH -J Lammps_multi_intel
#SBATCH -t 2-00:00:00
#SBATCH --ntasks-per-node=8 # à ajuster
#SBATCH --cpus-per-task=1 # threads OpenMP par tâche
#SBATCH --output=slurm-%x.%N.%j.out
#SBATCH --error=slurm-%x.%N.%j.err
#SBATCH --exclude=cnl[02-04],cnj[01-64],cnd[01-04,06-12] # exclure nœuds non compatibles LAMMPS Intel
# ==== Infos SLURM ====
env | grep -i slurm
# ==== Répertoire et entrée LAMMPS ====
cd "$SLURM_SUBMIT_DIR"
input=input # fichier d’entrée LAMMPS
# ==== Scratch ====
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"
cp -f "$SLURM_SUBMIT_DIR"/system.lmp \
"$SLURM_SUBMIT_DIR"/ff.lmp \
"$SLURM_SUBMIT_DIR"/input \
"$WORKDIR"/
cd "$WORKDIR"
# ==== Modules LAMMPS ====
module purge
module load lammps/mpi/29Aug2024 # version Intel oneAPI
source "$LAMMPS_PROF/lammps.sh"
# ==== OpenMP & MPI ====
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
NTASKS=$SLURM_NTASKS
echo "SLURM_NNODES = $SLURM_NNODES"
echo "SLURM_NTASKS = $SLURM_NTASKS"
echo "SLURM_CPUS_PER_TASK = $SLURM_CPUS_PER_TASK"
echo "OMP_NUM_THREADS = $OMP_NUM_THREADS"
ulimit -s unlimited
unset I_MPI_PMI_LIBRARY
# ==== Lancement LAMMPS ====
mpirun -np "$NTASKS" lmp -sf omp -pk omp "$OMP_NUM_THREADS" -in "$input"
# ==== Récupération résultats ====
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"
cp -f "$WORKDIR"/* "$OUTDIR"/
rm -rf "$WORKDIR"
Exemple 3 – Un seul nœud, auto‑sélection des cœurs (par exemple GCC)
#!/bin/bash
#SBATCH -p std
#SBATCH -N 1
#SBATCH -J Lammps_1node
#SBATCH -t 1-00:00:00
#SBATCH --cpus-per-task=1 # threads OpenMP par tâche
#SBATCH --output=slurm-%x.%N.%j.out
#SBATCH --error=slurm-%x.%N.%j.err
#SBATCH --exclude=cnd[01-04,06-12] # exclure nœuds IVY (AVX seulement)
# ==== Infos SLURM ====
env | grep -i slurm
# ==== Répertoire et entrée LAMMPS ====
cd "$SLURM_SUBMIT_DIR"
input=in.melt.big
# ==== Scratch ====
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"
cp -f "$input" "$WORKDIR"/
cd "$WORKDIR"
# ==== Modules LAMMPS ====
module purge
module load lammps/mpi/29Aug2024-gcc
source "$LAMMPS_PROF/lammps.sh"
# ==== OpenMP & MPI ====
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
CPUS_ON_NODE=${SLURM_CPUS_ON_NODE%%,*}
NTASKS=$(( CPUS_ON_NODE / SLURM_CPUS_PER_TASK ))
echo "CPUS_ON_NODE = $CPUS_ON_NODE"
echo "SLURM_CPUS_PER_TASK = $SLURM_CPUS_PER_TASK"
echo "NTASKS (MPI ranks) = $NTASKS"
echo "OMP_NUM_THREADS = $OMP_NUM_THREADS"
ulimit -s unlimited
unset I_MPI_PMI_LIBRARY
# ==== Lancement LAMMPS ====
mpirun -np "$NTASKS" lmp -sf omp -pk omp "$OMP_NUM_THREADS" -in "$input"
# ==== Récupération résultats ====
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"
cp -f "$WORKDIR"/* "$OUTDIR"/
rm -rf "$WORKDIR"