lammps
Available LAMMPS versions
Two main LAMMPS builds are provided on the mesocenter:
Intel oneAPI build (Intel CPU nodes only)
- Module:
lammps/mpi/29Aug2024 - Compiled with Intel oneAPI + MKL.
- Optimized for Intel compute nodes.
- Not supported on some AMD nodes (requires specific AVX2/AVX‑512 ISA).
GCC 11.2.0 build (all nodes)
- Module:
lammps/mpi/29Aug2024-gcc - Compiled with gcc/11.2.0 + OpenBLAS / internal FFT.
- Compatible with all AMD and Intel nodes.
- This is the recommended default version.
Example 1 – Multi‑node job (GCC build, all nodes)
#!/bin/bash
#SBATCH -p std
#SBATCH -N 2 # adjust
#SBATCH -J Lammps_multi
#SBATCH -t 2-00:00:00
#SBATCH --ntasks-per-node=8 # adjust
#SBATCH --cpus-per-task=1 # OpenMP threads per task
#SBATCH --output=slurm-%x.%N.%j.out
#SBATCH --error=slurm-%x.%N.%j.err
#SBATCH --exclude=cnd[01-04,06-12] # exclude old IVY nodes (AVX only)
# ==== SLURM info ====
env | grep -i ^SLURM_
# ==== LAMMPS working directory and input ====
cd "$SLURM_SUBMIT_DIR"
input=input # LAMMPS input file
# ==== Scratch ====
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"
cp -f "$SLURM_SUBMIT_DIR"/system.lmp \
"$SLURM_SUBMIT_DIR"/ff.lmp \
"$SLURM_SUBMIT_DIR"/input \
"$WORKDIR"/
cd "$WORKDIR"
# ==== LAMMPS modules ====
module purge
module load lammps/mpi/29Aug2024-gcc
source "$LAMMPS_PROF/lammps.sh"
# ==== OpenMP & MPI ====
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
NTASKS=$SLURM_NTASKS
echo "SLURM_NNODES = $SLURM_NNODES"
echo "SLURM_NTASKS = $SLURM_NTASKS"
echo "SLURM_CPUS_PER_TASK = $SLURM_CPUS_PER_TASK"
echo "OMP_NUM_THREADS = $OMP_NUM_THREADS"
ulimit -s unlimited
unset I_MPI_PMI_LIBRARY
# ==== Run LAMMPS ====
mpirun -np "$NTASKS" lmp -sf omp -pk omp "$OMP_NUM_THREADS" -in "$input"
# ==== Collect results ====
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"
cp -f "$WORKDIR"/* "$OUTDIR"/
rm -rf "$WORKDIR"
Example 2 – Multi‑node job (Intel oneAPI build, Intel nodes only)
#!/bin/bash
#SBATCH -p std
#SBATCH -N 2 # adjust
#SBATCH -J Lammps_multi_intel
#SBATCH -t 2-00:00:00
#SBATCH --ntasks-per-node=8 # adjust
#SBATCH --cpus-per-task=1 # OpenMP threads per task
#SBATCH --output=slurm-%x.%N.%j.out
#SBATCH --error=slurm-%x.%N.%j.err
#SBATCH --exclude=cnl[02-04],cnj[01-64],cnd[01-04,06-12] # exclude nodes incompatible with Intel LAMMPS
# ==== SLURM info ====
env | grep -i ^SLURM_
# ==== LAMMPS working directory and input ====
cd "$SLURM_SUBMIT_DIR"
input=input # LAMMPS input file
# ==== Scratch ====
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"
cp -f "$SLURM_SUBMIT_DIR"/system.lmp \
"$SLURM_SUBMIT_DIR"/ff.lmp \
"$SLURM_SUBMIT_DIR"/input \
"$WORKDIR"/
cd "$WORKDIR"
# ==== LAMMPS modules ====
module purge
module load lammps/mpi/29Aug2024 # Intel oneAPI build
source "$LAMMPS_PROF/lammps.sh"
# ==== OpenMP & MPI ====
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
NTASKS=$SLURM_NTASKS
echo "SLURM_NNODES = $SLURM_NNODES"
echo "SLURM_NTASKS = $SLURM_NTASKS"
echo "SLURM_CPUS_PER_TASK = $SLURM_CPUS_PER_TASK"
echo "OMP_NUM_THREADS = $OMP_NUM_THREADS"
ulimit -s unlimited
unset I_MPI_PMI_LIBRARY
# ==== Run LAMMPS ====
mpirun -np "$NTASKS" lmp -sf omp -pk omp "$OMP_NUM_THREADS" -in "$input"
# ==== Collect results ====
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"
cp -f "$WORKDIR"/* "$OUTDIR"/
rm -rf "$WORKDIR"
Example 3 – Single‑node job, automatic core selection (GCC build)
#!/bin/bash
#SBATCH -p std
#SBATCH -N 1
#SBATCH -J Lammps_1node
#SBATCH -t 1-00:00:00
#SBATCH --cpus-per-task=1 # OpenMP threads per task
#SBATCH --output=slurm-%x.%N.%j.out
#SBATCH --error=slurm-%x.%N.%j.err
#SBATCH --exclude=cnd[01-04,06-12] # exclude old IVY nodes (AVX only)
# ==== SLURM info ====
env | grep -i ^SLURM_
# ==== LAMMPS working directory and input ====
cd "$SLURM_SUBMIT_DIR"
input=in.melt.big
# ==== Scratch ====
WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
mkdir -p "$WORKDIR"
cp -f "$input" "$WORKDIR"/
cd "$WORKDIR"
# ==== LAMMPS modules ====
module purge
module load lammps/mpi/29Aug2024-gcc
source "$LAMMPS_PROF/lammps.sh"
# ==== OpenMP & MPI ====
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
CPUS_ON_NODE=${SLURM_CPUS_ON_NODE%%,*}
NTASKS=$(( CPUS_ON_NODE / SLURM_CPUS_PER_TASK ))
echo "CPUS_ON_NODE = $CPUS_ON_NODE"
echo "SLURM_CPUS_PER_TASK = $SLURM_CPUS_PER_TASK"
echo "NTASKS (MPI ranks) = $NTASKS"
echo "OMP_NUM_THREADS = $OMP_NUM_THREADS"
ulimit -s unlimited
unset I_MPI_PMI_LIBRARY
# ==== Run LAMMPS ====
mpirun -np "$NTASKS" lmp -sf omp -pk omp "$OMP_NUM_THREADS" -in "$input"
# ==== Collect results ====
OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
mkdir -p "$OUTDIR"
cp -f "$WORKDIR"/* "$OUTDIR"/
rm -rf "$WORKDIR"