#!/usr/bin/env expect ############################################################################ # Purpose: Test of Slurm functionality # Test --gpus-per-tres with --overcommit option # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ # Copyright (C) 2018 SchedMD LLC # Written by Morris Jette # # This file is part of Slurm, a resource management program. # For details, see . # Please also read the included file: DISCLAIMER. # # Slurm is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with Slurm; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. ############################################################################ source ./globals set test_id "39.15" set exit_code 0 set file_in "test$test_id.input" set number_commas "\[0-9_,\]+" print_header $test_id if {[test_cons_tres]} { send_user "\nValid configuration, using select/cons_tres\n" } else { send_user "\nWARNING: This test is only compatible with select/cons_tres\n" exit 0 } if {[test_front_end]} { send_user "\nWARNING: This test is incompatible with front-end systems\n" exit $exit_code } set def_part_name [default_partition] set nb_nodes [get_node_cnt_in_part $def_part_name] if {$nb_nodes < 2} { send_user "\nWARNING: This test is 2 or more nodes in the default partition\n" exit 0 } set nb_nodes 2 set nb_tasks 3 set gpu_cnt [get_gpu_count $nb_nodes] if {$gpu_cnt < 0} { send_user "\nFAILURE: Error getting GPU count\n" exit 1 } if {$gpu_cnt < 2} { send_user "\nWARNING: This test requires 2 or more GPUs in the default partition\n" exit 0 } get_node_config send_user "\nGPU count is $gpu_cnt\n" send_user "Sockets per node is $sockets_per_node\n" send_user "CPUs per socket is $cpus_per_socket\n" set cpus_per_node [expr $sockets_per_node * $cpus_per_socket] if {$cpus_per_node < 2} { send_user "\nWARNING: This test requires 2 or more CPUs per node in the default partition\n" exit 0 } if {$gpu_cnt > $cpus_per_node} { set gpu_cnt $cpus_per_node } # # Build input script files # make_bash_script $file_in "if \[ \$SLURM_LOCALID -eq 0 \]; then echo HOST:\$SLURMD_NODENAME CUDA_VISIBLE_DEVICES:\$CUDA_VISIBLE_DEVICES fi if \[ \$SLURM_PROCID -eq 0 \]; then $scontrol -dd show job \$SLURM_JOB_ID | grep \"GRES=\" fi exit 0" set match 0 set timeout $max_job_delay set srun_pid [spawn $srun --cpus-per-gpu=1 --gpus-per-task=1 --nodes=$nb_nodes --overcommit --ntasks=$nb_tasks -t1 -J "test$test_id" -l ./$file_in] expect { -re "CUDA_VISIBLE_DEVICES:($number_commas)" { incr match [cuda_count $expect_out(1,string)] exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid set exit_code 1 } eof { wait } } # NOTE: 2 tasks run on one node, so we should see 2 tasks with 2 GPUs each and 1 task with 1 GPU if {$match != $nb_tasks} { send_user "\nFAILURE: srun --gpus-per-task with --overcommit failure ($match != $nb_tasks)\n" set exit_code 1 } if {$exit_code == 0} { exec $bin_rm -f $file_in send_user "\nSUCCESS\n" } exit $exit_code