#!/bin/bash #SBATCH --job-name=gputest # Name of job #SBATCH --output=out/%x_%j.out # stdout #SBATCH --error=out/%x_%j.err # stderr #SBATCH --partition=gpu # partition to use (check with sinfo) #SBATCH --gres=gpu:a100:1 #SBATCH --nodes=1 # Number of nodes #SBATCH --ntasks=1 # Number of tasks | Alternative: --ntasks-per-node #SBATCH --threads-per-core=1 # Ensure we only get one logical CPU per core #SBATCH --cpus-per-task=1 # Number of cores per task #SBATCH --mem=16G # Memory per node | Alternative: --mem-per-cpu #SBATCH --time=36:00:00 # wall time limit (HH:MM:SS) #SBATCH --mail-type=ALL #SBATCH --mail-user=mohammadali.sadraeijavaheri@helmholtz-hzi.de #SBATCH --clusters=bioinf export SAD_PYTHON=/home/msadraei/miniconda3/envs/deep/bin/python export SAD_PRJ_PATH=/home/msadraei/developer/Thesis/09_Cluster $SAD_PYTHON $SAD_PRJ_PATH/train.py $SAD_PRJ_PATH/config2.yaml