#!/bin/bash
#SBATCH -J helloMatlab
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
#SBATCH -n 1
#SBATCH -p allgroups
#SBATCH --mem 10G
cd $WORKING_DIR
#your working directory
srun matlab < example.m
#!/bin/bash
#SBATCH -J helloGPU
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
#SBATCH -n 1
#SBATCH -p allgroups
#SBATCH –-mem 640G
# requesting 1 GPU from the group RTX; set --gres=gpu:rtx:2 to use for example two RTX GPUs
#SBATCH --gres=gpu:rtx:1
cd $WORKING_DIR
#your working directory
srun ./GPUhello
#!/bin/bash
#SBATCH -J hellompi
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
# request 32 MPI tasks
#SBATCH -n 32
#SBATCH -p allgroups
#SBATCH --mem 640G
cd $WORKING_DIR
#add openmpi executables to PATH
export PATH=$PATH:/usr/lib64/openmpi/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64/openmpi/lib
mpirun ./mphello
Please note
In alternative you can create a python virtualenv (see next)
SETUP (una tantum)
#start an interactive shell and setup
user@login$ interactive
....
user@nodename-XX$ python3 -m venv openmpivenv
user@nodename-XX$ source openmpivenv/bin/activate
(openmpivenv) user@nodename-XX$ pip install openmpi mpi4py
(openmpivenv) user@nodename-XX$ pip install numpy matplot ecc.
JOB FILE EXAMPLE
#!/bin/bash
#SBATCH --job-name OpenMPI
#SBATCH --ntasks 4
#SBATCH --cpus-per-task 1
#SBATCH --partition allgroups
#SBATCH --time 02:00:00
#SBATCH --mem 10G
#SBATCH --output .out
#SBATCH --error errors_OPmpi.err
#activate virtualenv
source openmpivenv/bin/activate
export PMIX_MCA_gds=hash
#mpirun is in openmpivenv/bin/
mpirun -n 4 python -m mpi4py.futures mycode.py
#!/bin/bash
#SBATCH -J helloopenmp
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
# notice we're using the '-c' option
# to request for OMP threads
#SBATCH -c 32
#SBATCH -p allgroups
#SBATCH -–mem 640G
cd $WORKING_DIR
#your working directory
# set this to what you asked with '-c'
export OMP_NUM_THREADS=32
srun ./omphello
Please note
OMP_NUM_THREADSmust be set to the same number as the-cparameter;- set
-nto 1 if the program uses OpenMP only. If using both MPI and OpenMP set-nto the number of MPI tasks. The total number of slots requested, in any case, will ben*c.