Script pour VASP-5

#!/bin/bash -x 
#SBATCH --account=<id-projet>
#SBATCH --partition=std
#SBATCH --job-name=Test-vasp
#SBATCH --output=slurm-%x.%N.%j.out 
#SBATCH --error=slurm-%x.%N.%j.err 
#SBATCH --nodes=2
#SBATCH --ntasks=16
#SBATCH --ntasks-per-node=8
#SBATCH --cpus-per-task=1 # always 1 thread
#SBATCH --time=0-04:00:00
#SBATCH --exclude=cnd[01-04,06-12] # IVY nodes are showing issues with VASP

  version='std'  # gam | ncl

  env | grep -i slurm 

  cd $SLURM_SUBMIT_DIR

  ## Creating directory in $SCRATCHDIR ################
  WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
  mkdir -p $WORKDIR

  if [ -e $WORKDIR ] ; then 

    cd $WORKDIR

    module purge
    module load intel/2017.4.196 mkl/2017.4.196 intelmpi/2017.4.196
    export PATH='/opt/soft/std/vasp.5.4.4':$PATH
    export VASP=$( which vasp_$version )
    export OMP_NUM_THREADS=1
    export MKL_NUM_THREADS=1
    ulimit -s unlimited
    unset I_MPI_PMI_LIBRARY

    # for IB-INTEL&AMD machine fine tune for the intranode connection 
    echo $SLURM_TOPOLOGY_ADDR > toptype.txt
    grep -i ib toptype.txt >& /dev/null
    if [ $? -eq 0 ] ; then 
      export I_MPI_FABRICS=dapl
    fi 
    grep -i cnj toptype.txt >& /dev/null
    if [ $? -eq 0 ]; then 
      export SLURM_CPU_BIND=none
    fi 

    ffile=(INCAR KPOINTS POSCAR POTCAR WAVECAR CHGCAR)
    for ifile in ${ffile[@]}; do 
      if [ -e $SLURM_SUBMIT_DIR/$ifile ]; then
        cp $SLURM_SUBMIT_DIR/$ifile $WORKDIR/$ifile
      fi    
    done 

    srun --mpi=pmi2 $VASP >& vasp.out 2>&1 

    # Copying all files to the output directory
    OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
    mkdir -p "$OUTDIR"

    # Move all files to the output directory
    cp $WORKDIR/OUTCAR $OUTDIR
    cp $WORKDIR/CONTCAR $OUTDIR
    cp $WORKDIR/IBZKPT $OUTDIR

  else
    echo "ERROR: Cannot create the WRK=$WORKDIR"
  fi 

Script pour VASP-6*

* voir Matériel EXPLOR pour identifier les nodes

  • Pour processeurs AMD
#!/bin/bash -x 
#SBATCH --account=support
#SBATCH --partition=std
#SBATCH --job-name=vaspAMD
#SBATCH -C EPYC4   # or EPYC3
#SBATCH --output=slurm-%x.%N.%j.out 
#SBATCH --error=slurm-%x.%N.%j.err 
#SBATCH --nodes=2
#SBATCH --ntasks=16
#SBATCH --ntasks-per-node=8
#SBATCH --cpus-per-task=1 # always 1 thread
#SBATCH --time=2-04:00:00

  version='std'  # gam | ncl

  env | grep -i slurm 

  echo ""
  echo "  JOB started: $(date) "
  echo ""

  cd $SLURM_SUBMIT_DIR

  ## Creating directory in $SCRATCHDIR ################
  WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
  mkdir -p $WORKDIR

  if [ -e $WORKDIR ] ; then 

    cd $WORKDIR

    module purge
    module load mcs_mod/softwares/vasp6/gcc-11.2/6.4.3
    export VASP=$( which vasp_$version )
    export OMP_NUM_THREADS=1
    export MKL_NUM_THREADS=1
    ulimit -s unlimited
    unset I_MPI_PMI_LIBRARY

    # for IB-INTEL&AMD machine fine tune for the intranode connection 
    echo $SLURM_TOPOLOGY_ADDR > toptype.txt
    grep -i ib toptype.txt >& /dev/null
    if [ $? -eq 0 ] ; then 
      export I_MPI_FABRICS=dapl
    fi 
    grep -i cnj toptype.txt >& /dev/null
    if [ $? -eq 0 ]; then 
      export SLURM_CPU_BIND=none
    fi 

    ffile=(INCAR KPOINTS POSCAR POTCAR WAVECAR CHGCAR)
    for ifile in ${ffile[@]}; do 
      if [ -e $SLURM_SUBMIT_DIR/$ifile ]; then
        cp $SLURM_SUBMIT_DIR/$ifile $WORKDIR/$ifile
      fi    
    done 

    mpirun -np $SLURM_NTASKS $VASP >& vasp.out 2>&1

    # Copying all files to the output directory
    OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
    mkdir -p "$OUTDIR"

    # Move all files to the output directory
    cp $WORKDIR/OUTCAR $OUTDIR
    cp $WORKDIR/CONTCAR $OUTDIR
    cp $WORKDIR/IBZKPT $OUTDIR

  else
    echo "ERROR: Cannot create the WRK=$WORKDIR"
  fi 

echo ""
echo "  JOB finished: $(date) "
echo ""
  • pour processeurs INTEL
#!/bin/bash -x 
#SBATCH --account=support
#SBATCH --partition=std
#SBATCH --job-name=vaspINTEL
#SBATCH --output=slurm-%x.%N.%j.out 
#SBATCH --error=slurm-%x.%N.%j.err 
#SBATCH --nodes=4
#SBATCH --ntasks=32
#SBATCH --ntasks-per-node=8
#SBATCH --cpus-per-task=1 # always 1 thread
#SBATCH --time=2-04:00:00
#SBATCH --exclude=cnd[01-04,06-12],cnl[01-18],cnj[01-64] # to exclude IVY and AMD nodes

  version='std'  # gam | ncl

  env | grep -i slurm 

  echo ""
  echo "  JOB started: $(date) "
  echo ""

  cd $SLURM_SUBMIT_DIR

  ## Creating directory in $SCRATCHDIR ################
  WORKDIR="$SCRATCHDIR/job.$SLURM_JOB_ID.$USER"
  mkdir -p $WORKDIR

  if [ -e $WORKDIR ] ; then 

    cd $WORKDIR

    module purge
    module load mcs_mod/softwares/vasp6/oneapi-2023.0.0/6.4.3
    export VASP=$( which vasp_$version )
    export OMP_NUM_THREADS=1
    export MKL_NUM_THREADS=1
    ulimit -s unlimited
    unset I_MPI_PMI_LIBRARY

    # for IB-INTEL&AMD machine fine tune for the intranode connection 
    echo $SLURM_TOPOLOGY_ADDR > toptype.txt
    grep -i ib toptype.txt >& /dev/null
    if [ $? -eq 0 ] ; then 
      export I_MPI_FABRICS=dapl
    fi 
    grep -i cnj toptype.txt >& /dev/null
    if [ $? -eq 0 ]; then 
      export SLURM_CPU_BIND=none
    fi 

    ffile=(INCAR KPOINTS POSCAR POTCAR WAVECAR CHGCAR)
    for ifile in ${ffile[@]}; do 
      if [ -e $SLURM_SUBMIT_DIR/$ifile ]; then
        cp $SLURM_SUBMIT_DIR/$ifile $WORKDIR/$ifile
      fi    
    done 

    mpirun -np $SLURM_NTASKS $VASP >& vasp.out 2>&1

    # Copying all files to the output directory
    OUTDIR="$SLURM_SUBMIT_DIR/outdir.$SLURM_JOB_ID"
    mkdir -p "$OUTDIR"

    # Move all files to the output directory
    cp $WORKDIR/OUTCAR $OUTDIR
    cp $WORKDIR/CONTCAR $OUTDIR
    cp $WORKDIR/IBZKPT $OUTDIR

  else
    echo "ERROR: Cannot create the WRK=$WORKDIR"
  fi 

echo ""
echo "  JOB finished: $(date) "
echo ""