#!/usr/bin/env expect ############################################################################ # Purpose: Test of Slurm functionality # Test of batch job with multiple concurrent job steps ############################################################################ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette # Rewritten by Albert Gil # CODE-OCEC-09-009. All rights reserved. # # This file is part of Slurm, a resource management program. # For details, see . # Please also read the included file: DISCLAIMER. # # Slurm is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with Slurm; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals set file_in "test$test_id.input" set file_out "test$test_id.output" set file_err "test$test_id.error" set exit_code 0 set job_id 0 set steps_started 30 set job_mem_opt "--mem-per-cpu=128M" set step_mem_opt "--mem-per-cpu=4M" proc cleanup {} { global job_id bin_rm file_in file_out file_err if {$job_id} { cancel_job $job_id } exec $bin_rm -f $file_in $file_out $file_err } log_info "Initial cleanup/setup" cleanup # Build input script file # # NOTE: Explicitly set a small memory limit. Without explicitly setting the step # memory limit, it will use the system default (same as the job) which may # prevent the level of parallelism desired. # make_bash_script $file_in " for ((i = 0; i < $steps_started; i++)); do $srun --overlap -N1 -n1 $step_mem_opt -J$test_name printenv SLURM_JOB_ID $srun --overlap -N1 -n1 $step_mem_opt -J$test_name $bin_sleep 60 & done wait " # # Spawn a batch job with multiple steps in background # log_info "Start main test" set sbatch_pid [eval spawn $sbatch $job_mem_opt -n$steps_started -t1 --output=$file_out --error=$file_err -J$test_name $file_in] expect { -re "Requested node configuration is not available" { skip "This test needs to be able to run a batch job with \"$job_mem_opt -n$steps_started -t1\"" } -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { slow_kill $sbatch_pid fail "sbatch not responding" } eof { wait } } if {$job_id == 0} { fail "Batch submit failure" } # Wait for job to start if {[wait_for_job $job_id "RUNNING"]} { fail "Job was not able to start running" } # # Check that all the steps in background are in squeue # set step_cnt 0 wait_for {$step_cnt == $steps_started} { set step_cnt 0 spawn $squeue -s --state=RUNNING expect { -re "($job_id)\\.($number)" { incr step_cnt exp_continue } eof { wait } } } subtest {$step_cnt == $steps_started} "All steps ($steps_started) should be reported by squeue" "($step_cnt != $steps_started)" # # Check that the output of all srun was written to the sbatch output file # cancel_job $job_id if {[wait_for_file $file_out]} { fail "Output file not present ($file_out)" } set step_cnt 0 spawn $bin_cat $file_out expect { -re "($job_id)" { incr step_cnt exp_continue } eof { wait } } subtest {$step_cnt == $steps_started} "All steps ($steps_started) should be reported on the output file: $file_out" "($step_cnt != $steps_started)" if {$exit_code} { fail "Test failed due to previous errors (\$exit_code = $exit_code)" }