#!/bin/bash
#SBATCH -J helloMatlab
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
#SBATCH -n 1
#SBATCH -p allgroups
#SBATCH --mem 10G
cd $WORKING_DIR
#your working directory
srun matlab < example.m
#!/bin/bash
#SBATCH -J helloGPU
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
#SBATCH -n 1
#SBATCH -p allgroups
#SBATCH –-mem 640G
# requesting 1 GPU from the group RTX; set --gres=gpu:rtx:2 to use for example two RTX GPUs
#SBATCH --gres=gpu:rtx:1
cd $WORKING_DIR
#your working directory
srun ./GPUhello
#!/bin/bash
#SBATCH -J hellompi
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
# request 32 MPI tasks
#SBATCH -n 32
#SBATCH -p allgroups
#SBATCH --mem 640G
cd $WORKING_DIR
#your working directory
#spack load intel-parallel-studio@professional.2019.4 (work in progress)
srun ./mphello
Please note
spack load ...
initializes the Intel MPI environment and is equivalent tomodule load intel-parallel-studio-professional.2019.4-gcc-8.2.1-fnvratt
.
#!/bin/bash
#SBATCH -J helloopenmp
#SBATCH -o output_%j.txt
#SBATCH -e errors_%j.txt
#SBATCH -t 01:30:00
# notice we're using the '-c' option
# to request for OMP threads
#SBATCH -c 32
#SBATCH -p allgroups
#SBATCH -–mem 640G
cd $WORKING_DIR
#your working directory
# set this to what you asked with '-c'
export OMP_NUM_THREADS=32
srun ./omphello
Please note
OMP_NUM_THREADS
must be set to the same number as the-c
parameter;- set
-n
to 1 if the program uses OpenMP only. If using both MPI and OpenMP set-n
to the number of MPI tasks. The total number of slots requested, in any case, will ben*c
.