diff --git a/code/submit_train.sh b/code/submit_train.sh index 02ab7fbb490d53887ca9e28080f8ec4c5567bfb8..533a7f90dad39ad82e8aecd9beb6e46a473a47af 100644 --- a/code/submit_train.sh +++ b/code/submit_train.sh @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH --job-name=train-nn-gpu #SBATCH -t 00:20:00 # estimated time # TODO: adapt to your needs/ the full training run will take approx. 5 h on an A100 -#SBATCH -p grete:shared # the partition you are training on (i.e., which nodes), for nodes see sinfo -o "%25N %5c %10m %32f %10G %18P " | grep gpu +#SBATCH -p kisski # the partition you are training on (i.e., which nodes), for nodes see sinfo -o "%25N %5c %10m %32f %10G %18P " | grep gpu #SBATCH -G A100:1 # requesting GPU slices, see https://docs.hpc.gwdg.de/usage_guide/slurm/gpu_usage/index.html for more options #SBATCH --nodes=1 # total number of nodes #SBATCH --ntasks=1 # total number of tasks