From 69115d2d81648508a0ff2ef7a2444345349bf7d5 Mon Sep 17 00:00:00 2001 From: Hauke Kirchner <hauke.gronenberg@gwdg.de> Date: Thu, 15 Aug 2024 12:44:43 +0000 Subject: [PATCH] Update submit_train.sh to fit kisski training platform --- code/submit_train.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/code/submit_train.sh b/code/submit_train.sh index 3f68aa4..02ab7fb 100644 --- a/code/submit_train.sh +++ b/code/submit_train.sh @@ -1,8 +1,8 @@ #!/bin/bash #SBATCH --job-name=train-nn-gpu -#SBATCH -t 05:00:00 # estimated time # TODO: adapt to your needs -#SBATCH -p grete:shared # the partition you are training on (i.e., which nodes), for nodes see sinfo -p grete:shared --format=%N,%G -#SBATCH -G 1g.5gb:1 #A100:1 # requesting GPU slices, see https://docs.hpc.gwdg.de/usage_guide/slurm/gpu_usage/index.html for more options +#SBATCH -t 00:20:00 # estimated time # TODO: adapt to your needs/ the full training run will take approx. 5 h on an A100 +#SBATCH -p grete:shared # the partition you are training on (i.e., which nodes), for nodes see sinfo -o "%25N %5c %10m %32f %10G %18P " | grep gpu +#SBATCH -G A100:1 # requesting GPU slices, see https://docs.hpc.gwdg.de/usage_guide/slurm/gpu_usage/index.html for more options #SBATCH --nodes=1 # total number of nodes #SBATCH --ntasks=1 # total number of tasks #SBATCH --cpus-per-task 4 # number of CPU cores per task -- GitLab