#!/usr/bin/env expect ############################################################################ # Purpose: Test of Slurm functionality # Test of task distribution support on multi-core systems. # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "WARNING: ..." with an explanation of why the test can't be made, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ # Copyright (C) 2005-2007 The Regents of the University of California. # Copyright (C) 2008-2010 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette # CODE-OCEC-09-009. All rights reserved. # # This file is part of Slurm, a resource management program. # For details, see . # Please also read the included file: DISCLAIMER. # # Slurm is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with Slurm; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals set test_id "1.92" set exit_code 0 set file_bash "test$test_id.bash" set job_id 0 set prompt "PROMPT: " print_header $test_id if {[test_front_end]} { send_user "\nWARNING: This test is incompatible with front-end systems\n" exit $exit_code } # find out if we have enough nodes to test functionality set partition [default_partition] set node_count [get_node_cnt_in_part $partition] if { $node_count < 2 } { send_user "WARNING: Insufficient nodes in default partition ($node_count < 2)\n" exit $exit_code } make_bash_script $file_bash { echo nodeid:$SLURM_NODEID taskid:$SLURM_PROCID localid:$SLURM_LOCALID exit 0 } # # Create an allocation # set timeout $max_job_delay set salloc_pid [spawn $salloc -N2 --ntasks-per-node=2 --verbose -t2 $bin_bash] expect { -re "salloc: Granted job allocation ($number)" { send "export PS1=\"$prompt\"\r" set job_id $expect_out(1,string) exp_continue } -re "(configuration is not available|Unable to submit batch job|Node count specification invalid|More processors requested than permitted)" { send_user "\nWARNING: can't test srun task distribution\n" file delete $file_bash exit 0 } -re "\"$prompt" { # skip this, just echo of setting prompt" exp_continue } -re $prompt { send "$srun -l -c1 ./$file_bash | $bin_sort -V\r" } timeout { send_user "\nFAILURE: salloc not responding\n" slow_kill $salloc_pid exit 1 } } if {$job_id == 0} { send_user "\nFAILURE: salloc failure\n" exit 1 } ############################################################################# # # Run a job step to get allocated processor count # set mask 0 set task_cnt 0 set prev_node -1 set node_cnt 0 expect { -re "nodeid:($number) taskid:($number)" { set this_node $expect_out(1,string) set this_tid $expect_out(2,string) incr task_cnt 1 if {$this_node != $prev_node} { incr node_cnt 1 set prev_node $this_node } exp_continue } -re "error" { send_user "\nFAILURE: some error occurred\n" set exit_code 1 exp_continue } timeout { send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" slow_kill $salloc_pid exit 1 } -re $prompt } if {$node_cnt != 2} { send_user "\nWARNING: need 2 nodes to perform test\n" file delete $file_bash exit $exit_code } if {$task_cnt < (2 * $node_cnt)} { send_user "\nWARNING: need at least 2 CPUs per node, test is not applicable\n" file delete $file_bash exit $exit_code } ############################################################################# # # Run a job step with block distribution # set this_cnt 0 set prev_node -1 set this_node -1 send "$srun -l -n $task_cnt -m block ./$file_bash | $bin_sort -V\r" expect { -re "nodeid:($number) taskid:($number) localid:($number)" { set this_node $expect_out(1,string) set this_tid $expect_out(2,string) set this_lid $expect_out(3,string) incr this_cnt 1 if {$prev_node != $this_node} { if {$prev_node > $this_node } { send_user "\nFAILURE: incorrect distribution " send_user " $this_node, $prev_node\n" set exit_code 1 } set prev_node $this_node set prev_cnt 1 } else { incr prev_cnt 1 } exp_continue } -re "error" { send_user "\nFAILURE: some error occurred\n" set exit_code 1 exp_continue } timeout { send_user "\nFAILURE: srun not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } -re $prompt } if {$prev_node > $this_node } { send_user "\nFAILURE: incorrect final distribution\n" set exit_code 1 } if {$this_cnt != $task_cnt} { send_user "\nFAILURE: task count inconsistency ($this_cnt,$task_cnt)\n" set exit_code 1 } ############################################################################# # # Run a job step with cyclic distribution # set block_size 1 set this_cnt 0 set prev_node -1 set this_node -1 set prev_cnt $block_size send "$srun -l -n $task_cnt -m cyclic ./$file_bash | $bin_sort -V\r" expect { -re "nodeid:($number) taskid:($number) localid:($number)" { set this_node $expect_out(1,string) set this_tid $expect_out(2,string) set this_lid $expect_out(3,string) incr this_cnt 1 if {$prev_node != $this_node} { if {$prev_cnt != $block_size } { send_user "\nFAILURE: incorrect distribution " send_user " $this_node, $prev_node, $prev_cnt\n" set exit_code 1 } set prev_node $this_node set prev_cnt 1 } else { incr prev_cnt 1 } exp_continue } -re "error" { send_user "\nFAILURE: some error occurred\n" set exit_code 1 exp_continue } timeout { send_user "\nFAILURE: srun not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } -re $prompt } if {$prev_cnt != $block_size} { send_user "\nFAILURE: incorrect final distribution\n" set exit_code 1 } if {$this_cnt != $task_cnt} { send_user "\nFAILURE: task count inconsistency ($this_cnt,$task_cnt)\n" set exit_code 1 } ############################################################################# # # Run a job step with plane distribution # set block_size 2 set this_cnt 0 set prev_node -1 set this_node -1 set prev_cnt $block_size send "$srun -l -n $task_cnt -m plane=$block_size ./$file_bash | $bin_sort -V\r" expect { -re "nodeid:($number) taskid:($number) localid:($number)" { set this_node $expect_out(1,string) set this_tid $expect_out(2,string) set this_lid $expect_out(3,string) incr this_cnt 1 if {$prev_node == $this_node} { incr prev_cnt 1 } else { if {$prev_node == -1 && $prev_cnt > $block_size } { send_user "\nFAILURE: incorrect distribution " send_user "(nodeid:$prev_node task_cnt:$prev_cnt)\n" set exit_code 1 } set prev_node $this_node set prev_cnt 1 } exp_continue } -re "error" { send_user "\nFAILURE: some error occurred\n" set exit_code 1 exp_continue } timeout { send_user "\nFAILURE: srun not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } -re $prompt } if {$prev_cnt > $block_size} { send_user "\nFAILURE: incorrect final distribution " send_user "(nodeid:$prev_node task_cnt:$prev_cnt)\n" set exit_code 1 } if {$this_cnt != $task_cnt} { send_user "\nFAILURE: task count inconsistency ($this_cnt,$task_cnt)\n" set exit_code 1 } ############################################################################# # # Terminate the job, free the allocation # send "exit\r" expect { -re "error" { send_user "\nFAILURE: some error occurred\n" set exit_code 1 } timeout { send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" slow_kill $salloc_pid set exit_code 1 } eof { wait } } if {$exit_code == 0} { send_user "\nSUCCESS\n" } else { send_user "\nNOTE: This test can fail if the node configuration in slurm.conf \n" send_user " (sockets, cores, threads) differs from the actual configuration\n" } file delete $file_bash exit $exit_code