#!/usr/bin/env expect ############################################################################ # Purpose: Test of Slurm functionality # Validate salloc --exclusive with -n will give all cpus on node # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ # Copyright (C) 2011-2014 SchedMD LLC # Written by Nathan Yee # # This file is part of Slurm, a resource management program. # For details, see . # Please also read the included file: DISCLAIMER. # # Slurm is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with Slurm; if not, write to the Free Software Foundation, Inc. # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals set test_id 15.27 set job_id 0 set nodes "" set cputot 0 set scontrol_cpu 0 set sacct_cpu 0 set gpu_tot 0 set hostname "" set job_tres_cnt 0 set exit_code 0 print_header $test_id proc check_alloc { } { global scontrol salloc sacct cputot scontrol_cpu sacct_cpu nodes global job_id number alpha_numeric_nodelist set node_name $nodes spawn $scontrol show job $job_id expect { -re "NodeList=($alpha_numeric_nodelist)" { set node_name $expect_out(1,string) exp_continue } -re "NumCPUs=($number)" { set scontrol_cpu $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: scontrol is not responding\n" set exit_code 1 } eof { wait } } if {$scontrol_cpu == 0} { send_user "\nFAILURE: number of cpus is invalid\n" exit 1 } # Wait a bit for sacct to populate sleep 10 spawn $sacct --job=$job_id --allocation -oalloccpus --noheader expect { -re "($number)" { set sacct_cpu $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: sacct is not responding\n" set exit_code 1 } eof { wait } } if {$sacct_cpu == 0} { send_user "\nFAILURE: number of cpus is invalid\n" exit 1 } spawn $scontrol show node $node_name expect { -re "CPUTot=($number)" { set cputot $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: scontrol is not responding\n" set exit_code 1 } eof { wait } } if {$cputot != $scontrol_cpu} { send_user "\nFAILURE: scontrol reported $scontrol_cpu cpus " send_user "where used by job $job_id when it should have used " send_user "$cputot cpus\n" set exit_code 1 } if {$cputot != $sacct_cpu} { send_user "\nFAILURE: sacct reported $sacct_cpu cpus where " send_user "used by job $job_id when it should have used $cputot" send_user "cpus\n" set exit_code 1 } } if {[test_using_slurmdbd] == 0 } { send_user "\nWARNING: Test invalid with slurmdbd\n" exit 0 } if {![get_node_config 2]} { log_warn "This test requires 2 or more GPUs in a node of the default partition. Skipping.\n" exit $exit_code } spawn $salloc -t1 -n1 --exclusive $srun -l $bin_printenv SLURMD_NODENAME expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "($number): ($alpha_numeric_nodelist)" { set nodes $expect_out(2,string) exp_continue } timeout { send_user "\nFAILURE: salloc is not responding\n" set exit_code 1 } eof { wait } } check_alloc spawn $salloc -t1 -n1 --mem=100 --exclusive $srun -l $bin_printenv SLURMD_NODENAME expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "($number): ($alpha_numeric_nodelist)" { set nodes $expect_out(2,string) exp_continue } timeout { send_user "\nFAILURE: salloc is not responding\n" set exit_code 1 } eof { wait } } check_alloc # # Verify that all GPUs are allocated with the --exclusive flag # set job_id 0 spawn $salloc -t1 -n1 -w $hostname --gres=gpu --exclusive $srun -l $bin_printenv SLURMD_NODENAME expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { log_error "salloc not responding\n" set exit_code 1 } eof { wait } } set job_gpu_cnt [get_job_gpu_cnt $job_id] if {$gpu_tot != $job_gpu_cnt} { log_error "Job didn't allocate all GPUs ($gpu_tot != $job_gpu_cnt)" set exit_code 1 } else { log_info "Node GPU count matches Job GPU count ($gpu_tot == $job_gpu_cnt)" } if {[wait_for_job $job_id "DONE"] != 0} { log_error "job wasn't able to complete\n" cancel_job $job_id set exit_code 1 } if {$exit_code == 0} { send_user "\nSUCCESS\n" } exit $exit_code