#!/usr/bin/env expect ############################################################################ # Purpose: Test of Slurm functionality # Confirm that node sharing flags are respected (--nodelist and # --oversubscribe options). # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "WARNING: ..." with an explanation of why the test can't be made, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette # CODE-OCEC-09-009. All rights reserved. # # This file is part of Slurm, a resource management program. # For details, see . # Please also read the included file: DISCLAIMER. # # Slurm is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with Slurm; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals set test_id "17.17" set exit_code 0 set file_err "test$test_id.error" set file_in "test$test_id.input" set file_out "test$test_id.output" set job_id1 0 set job_id2 0 set nodelist_name "" set hostname "" set gpu_tot 0 set job_tres_cnt 0 print_header $test_id set timeout $max_job_delay if {![get_node_config 2]} { log_warn "This test requires 2 or more GPUs in a node of the default partition. Skipping.\n" exit $exit_code } # # Submit a job and get the node's NodeName from the nodelist # set timeout $max_job_delay set srun_pid [spawn $srun -v -N1 -l -t1 $bin_printenv SLURMD_NODENAME] expect { -re "on host ($alpha_numeric_under)," { set nodelist_name $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid set exit_code 1 } eof { wait } } if {[string compare $nodelist_name ""] == 0} { send_user "\nFAILURE: Did not get hostname of task 0\n" exit 1 } if {[test_front_end] != 0} { send_user "\nWARNING: Additional testing is incompatible with front-end systems\n" exit $exit_code } # # Delete left-over input script # Build input script file # exec $bin_rm -f $file_in make_bash_script $file_in "$srun $bin_sleep 5" # # Submit two jobs to the same node, one with no sharing, the other # with sharing permitted. Ensure the first job completes before the # second job is started. # set sbatch_pid [spawn $sbatch -N1 --exclusive --nodelist=$nodelist_name -t1 --output=$file_out --error=$file_err $file_in] expect { -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: sbatch not responding\n" slow_kill $sbatch_pid exit 1 } eof { wait } } if {$job_id1 == 0} { send_user "\nFAILURE: sbatch failed to report jobid\n" exit 1 } set partition "dummy" set waited 1 set timeout [expr $max_job_delay + 5] set srun_pid [spawn $srun -N1 --nodelist=$nodelist_name -t1 --oversubscribe $scontrol -o show job $job_id1] expect { -re "JobState=RUN" { set waited 0 exp_continue } -re "Partition=($alpha_numeric_under)" { set partition $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid cancel_job $job_id1 exit 1 } eof { wait } } if {$waited == 0} { spawn $scontrol show partition expect { -re "OverSubscribe=FORCE" { send_user "\nWARNING: Test incompatible with OverSubscribe=FORCE\n" set waited 1 exp_continue } eof { wait } } } if {$waited == 0} { send_user "\nFAILURE: srun failed to wait for non-sharing job to complete\n" set exit_code 1 } # # Verify that all GPUs are allocated with the --exclusive flag # set sbatch_pid2 [spawn $sbatch -t1 -N1 -w $hostname --gres=gpu --exclusive --output=$file_out $file_in] expect { -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { log_error "sbatch not responding\n" set exit_code 1 } eof { wait } } if {$job_id2 == 0} { log_error "sbatch failed to report jobid\n" set exit_code 1 } if {[wait_for_job $job_id2 "RUNNING"] != 0} { log_error "waiting for job to start running\n" set exit_code 1 } set job_gpu_cnt [get_job_gpu_cnt $job_id2] if {$gpu_tot != $job_gpu_cnt} { log_error "Job didn't allocate all GPUs ($gpu_tot != $job_gpu_cnt)" set exit_code 1 } else { log_info "Node GPU count matches Job GPU count ($gpu_tot == $job_gpu_cnt)" } if {[wait_for_job $job_id2 "DONE"] != 0} { log_error "job wasn't able to complete\n" cancel_job $job_id2 set exit_code 1 } cancel_job $job_id1 if {$exit_code == 0} { exec $bin_rm -f $file_err $file_in $file_out send_user "\nSUCCESS\n" } exit $exit_code