diff --git a/code/submit_test.sh b/code/submit_test.sh
index e76ece158f4fad272624eaa112bffb3d466d4e9b..a8a587e8cfc297ca11c402e19fbc42c8b837d060 100644
--- a/code/submit_test.sh
+++ b/code/submit_test.sh
@@ -1,8 +1,8 @@
 #!/bin/bash
 #SBATCH --job-name=test-nn-gpu
 #SBATCH -t 00:10:00                  # estimated time # TODO: adapt to your needs
-#SBATCH -p grete:shared              # the partition you are training on (i.e., which nodes), for nodes see sinfo -p grete:interactive --format=%N,%G
-#SBATCH -G 1g.5gb:1 #A100:1          # requesting GPU slices, see https://docs.hpc.gwdg.de/usage_guide/slurm/gpu_usage/index.html for more options
+#SBATCH -p kisski                    # the partition you are training on (i.e., which nodes), for nodes see sinfo -o "%25N  %5c  %10m  %32f  %10G %18P " | grep gpu
+#SBATCH -G A100:1          # requesting GPU slices, see https://docs.hpc.gwdg.de/usage_guide/slurm/gpu_usage/index.html for more options
 #SBATCH --nodes=1                    # total number of nodes
 #SBATCH --ntasks=1                   # total number of tasks
 #SBATCH --cpus-per-task 4            # number of CPU cores per task