1#!/usr/bin/env @PYTHON_SHEBANG@ 2# SPDX-License-Identifier: CDDL-1.0 3 4# 5# This file and its contents are supplied under the terms of the 6# Common Development and Distribution License ("CDDL"), version 1.0. 7# You may only use this file in accordance with the terms of version 8# 1.0 of the CDDL. 9# 10# A full copy of the text of the CDDL should have accompanied this 11# source. A copy of the CDDL is also available via the Internet at 12# http://www.illumos.org/license/CDDL. 13# 14 15# 16# Copyright (c) 2012, 2018 by Delphix. All rights reserved. 17# Copyright (c) 2019 Datto Inc. 18# Copyright (c) 2025, Klara, Inc. 19# 20# This script must remain compatible with Python 3.6+. 21# 22 23import os 24import sys 25import ctypes 26import re 27import configparser 28import traceback 29 30from datetime import datetime 31from optparse import OptionParser 32from pwd import getpwnam 33from pwd import getpwuid 34from select import select 35from subprocess import PIPE 36from subprocess import Popen 37from subprocess import check_output 38from subprocess import run 39from threading import Timer 40from time import time, CLOCK_MONOTONIC 41from os.path import exists 42 43BASEDIR = '/var/tmp/test_results' 44TESTDIR = '/usr/share/zfs/' 45KMEMLEAK_FILE = '/sys/kernel/debug/kmemleak' 46KILL = 'kill' 47TRUE = 'true' 48SUDO = 'sudo' 49LOG_FILE = 'LOG_FILE' 50LOG_OUT = 'LOG_OUT' 51LOG_ERR = 'LOG_ERR' 52LOG_FILE_OBJ = None 53 54try: 55 from time import monotonic as monotonic_time 56except ImportError: 57 class timespec(ctypes.Structure): 58 _fields_ = [ 59 ('tv_sec', ctypes.c_long), 60 ('tv_nsec', ctypes.c_long) 61 ] 62 63 librt = ctypes.CDLL('librt.so.1', use_errno=True) 64 clock_gettime = librt.clock_gettime 65 clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)] 66 67 def monotonic_time(): 68 t = timespec() 69 if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: 70 errno_ = ctypes.get_errno() 71 raise OSError(errno_, os.strerror(errno_)) 72 return t.tv_sec + t.tv_nsec * 1e-9 73 74 75class Result(object): 76 total = 0 77 runresults = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'KILLED': 0, 'RERAN': 0} 78 79 def __init__(self): 80 self.starttime = None 81 self.returncode = None 82 self.runtime = '' 83 self.stdout = [] 84 self.stderr = [] 85 self.kmemleak = '' 86 self.result = '' 87 88 def done(self, proc, killed, reran): 89 """ 90 Finalize the results of this Cmd. 91 """ 92 Result.total += 1 93 m, s = divmod(monotonic_time() - self.starttime, 60) 94 self.runtime = '%02d:%02d' % (m, s) 95 self.returncode = proc.returncode 96 if reran is True: 97 Result.runresults['RERAN'] += 1 98 if killed: 99 self.result = 'KILLED' 100 Result.runresults['KILLED'] += 1 101 elif len(self.kmemleak) > 0: 102 self.result = 'FAIL' 103 Result.runresults['FAIL'] += 1 104 elif self.returncode == 0: 105 self.result = 'PASS' 106 Result.runresults['PASS'] += 1 107 elif self.returncode == 4: 108 self.result = 'SKIP' 109 Result.runresults['SKIP'] += 1 110 elif self.returncode != 0: 111 self.result = 'FAIL' 112 Result.runresults['FAIL'] += 1 113 114 115class Output(object): 116 """ 117 This class is a slightly modified version of the 'Stream' class found 118 here: https://stackoverflow.com/q/4984549/ 119 """ 120 def __init__(self, stream, debug=False): 121 self.stream = stream 122 self.debug = debug 123 self._buf = b'' 124 self.lines = [] 125 126 def fileno(self): 127 return self.stream.fileno() 128 129 def read(self, drain=0): 130 """ 131 Read from the file descriptor. If 'drain' set, read until EOF. 132 """ 133 while self._read() is not None: 134 if not drain: 135 break 136 137 def _read(self): 138 """ 139 Read up to 4k of data from this output stream. Collect the output 140 up to the last newline, and append it to any leftover data from a 141 previous call. The lines are stored as a (timestamp, data) tuple 142 for easy sorting/merging later. 143 """ 144 fd = self.fileno() 145 buf = os.read(fd, 4096) 146 if not buf: 147 return None 148 if self.debug: 149 os.write(sys.stderr.fileno(), buf) 150 if b'\n' not in buf: 151 self._buf += buf 152 return [] 153 154 buf = self._buf + buf 155 tmp, rest = buf.rsplit(b'\n', 1) 156 self._buf = rest 157 now = datetime.now() 158 rows = tmp.split(b'\n') 159 self.lines += [(now, r) for r in rows] 160 161 162class Cmd(object): 163 verified_users = [] 164 165 def __init__(self, pathname, identifier=None, outputdir=None, 166 timeout=None, user=None, tags=None): 167 self.pathname = pathname 168 self.identifier = identifier 169 self.outputdir = outputdir or 'BASEDIR' 170 """ 171 The timeout for tests is measured in wall-clock time 172 """ 173 self.timeout = timeout 174 self.user = user or '' 175 self.killed = False 176 self.reran = None 177 self.result = Result() 178 179 if self.timeout is None: 180 self.timeout = 60 181 182 def __str__(self): 183 return '''\ 184Pathname: %s 185Identifier: %s 186Outputdir: %s 187Timeout: %d 188User: %s 189''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user) 190 191 def kill_cmd(self, proc, options, kmemleak, keyboard_interrupt=False): 192 193 """ 194 We're about to kill a command due to a timeout. 195 If we're running with the -O option, then dump debug info about the 196 process with the highest CPU usage to /dev/kmsg (Linux only). This can 197 help debug the timeout. 198 199 Debug info includes: 200 - 30 lines from 'top' 201 - /proc/<PID>/stack output of process with highest CPU usage 202 - Last lines strace-ing process with highest CPU usage 203 """ 204 if exists("/dev/kmsg"): 205 c = """ 206TOP_OUT="$(COLUMNS=160 top -b -n 1 | head -n 30)" 207read -r PID CMD <<< $(echo "$TOP_OUT" | /usr/bin/awk \ 208"/COMMAND/{ 209 print_next=1 210 next 211} 212{ 213 if (print_next == 1) { 214 print \\$1\\" \\"\\$12 215 exit 216 } 217}") 218echo "##### ZTS timeout debug #####" 219echo "----- top -----" 220echo "$TOP_OUT" 221echo "----- /proc/$PID/stack ($CMD)) -----" 222cat /proc/$PID/stack 223echo "----- strace ($CMD) -----" 224TMPFILE="$(mktemp --suffix=ZTS)" 225/usr/bin/strace -k --stack-traces -p $PID &> "$TMPFILE" & 226sleep 0.1 227killall strace 228tail -n 30 $TMPFILE 229rm "$TMPFILE" 230echo "##### /proc/sysrq-trigger stack #####" 231""" 232 c = "sudo bash -c '" + c + "'" 233 data = run(c, capture_output=True, shell=True, text=True) 234 out = data.stdout 235 try: 236 kp = Popen([SUDO, "sh", "-c", 237 "echo '" + out + "' > /dev/kmsg"]) 238 kp.wait() 239 240 """ 241 Trigger kernel stack traces 242 """ 243 kp = Popen([SUDO, "sh", "-c", 244 "echo l > /proc/sysrq-trigger"]) 245 kp.wait() 246 except Exception: 247 pass 248 249 """ 250 Kill a running command due to timeout, or ^C from the keyboard. If 251 sudo is required, this user was verified previously. 252 """ 253 self.killed = True 254 do_sudo = len(self.user) != 0 255 signal = '-TERM' 256 257 cmd = [SUDO, KILL, signal, str(proc.pid)] 258 if not do_sudo: 259 del cmd[0] 260 261 try: 262 kp = Popen(cmd) 263 kp.wait() 264 except Exception: 265 pass 266 267 """ 268 If this is not a user-initiated kill and the test has not been 269 reran before we consider if the test needs to be reran: 270 If the test has spent some time hibernating and didn't run the whole 271 length of time before being timed out we will rerun the test. 272 """ 273 if keyboard_interrupt is False and self.reran is None: 274 runtime = monotonic_time() - self.result.starttime 275 if int(self.timeout) > runtime: 276 self.killed = False 277 self.reran = False 278 self.run(options, dryrun=False, kmemleak=kmemleak) 279 self.reran = True 280 281 def update_cmd_privs(self, cmd, user): 282 """ 283 If a user has been specified to run this Cmd and we're not already 284 running as that user, prepend the appropriate sudo command to run 285 as that user. 286 """ 287 me = getpwuid(os.getuid()) 288 289 if not user or user is me: 290 if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK): 291 cmd += '.ksh' 292 if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK): 293 cmd += '.sh' 294 return cmd 295 296 if not os.path.isfile(cmd): 297 if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK): 298 cmd += '.ksh' 299 if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK): 300 cmd += '.sh' 301 302 # glibc (at least) will not pass TMPDIR through to setuid programs. 303 # if set, arrange for it to be reset before running the target cmd 304 tmpdir = os.getenv('TMPDIR') 305 if tmpdir: 306 tmpdirarg = 'env TMPDIR=%s' % tmpdir 307 else: 308 tmpdirarg = '' 309 310 ret = '%s -E -u %s %s %s' % (SUDO, user, tmpdirarg, cmd) 311 return ret.split(' ') 312 313 def collect_output(self, proc, debug=False): 314 """ 315 Read from stdout/stderr as data becomes available, until the 316 process is no longer running. Return the lines from the stdout and 317 stderr Output objects. 318 """ 319 out = Output(proc.stdout, debug) 320 err = Output(proc.stderr, debug) 321 res = [] 322 while proc.returncode is None: 323 proc.poll() 324 res = select([out, err], [], [], .1) 325 for fd in res[0]: 326 fd.read() 327 for fd in res[0]: 328 fd.read(drain=1) 329 330 return out.lines, err.lines 331 332 def run(self, options, dryrun=None, kmemleak=None): 333 """ 334 This is the main function that runs each individual test. 335 Determine whether or not the command requires sudo, and modify it 336 if needed. Run the command, and update the result object. 337 """ 338 if dryrun is None: 339 dryrun = options.dryrun 340 if dryrun is True: 341 print(self) 342 return 343 if kmemleak is None: 344 kmemleak = options.kmemleak 345 346 privcmd = self.update_cmd_privs(self.pathname, self.user) 347 try: 348 old = os.umask(0) 349 if not os.path.isdir(self.outputdir): 350 os.makedirs(self.outputdir, mode=0o777) 351 os.umask(old) 352 except OSError as e: 353 fail('%s' % e) 354 355 """ 356 Log each test we run to /dev/kmsg (on Linux), so if there's a kernel 357 warning we'll be able to match it up to a particular test. 358 """ 359 if options.kmsg is True and exists("/dev/kmsg"): 360 try: 361 kp = Popen([SUDO, "sh", "-c", 362 f"echo ZTS run {self.pathname} > /dev/kmsg"]) 363 kp.wait() 364 except Exception: 365 pass 366 367 """ 368 Log each test we run to /dev/ttyu0 (on FreeBSD), so if there's a kernel 369 warning we'll be able to match it up to a particular test. 370 """ 371 if options.kmsg is True and exists("/dev/ttyu0"): 372 try: 373 kp = Popen([SUDO, "sh", "-c", 374 f"echo ZTS run {self.pathname} > /dev/ttyu0"]) 375 kp.wait() 376 except Exception: 377 pass 378 379 self.result.starttime = monotonic_time() 380 381 if kmemleak: 382 cmd = f'{SUDO} sh -c "echo clear > {KMEMLEAK_FILE}"' 383 check_output(cmd, shell=True) 384 385 proc = Popen(privcmd, stdout=PIPE, stderr=PIPE) 386 # Allow a special timeout value of 0 to mean infinity 387 if int(self.timeout) == 0: 388 self.timeout = sys.maxsize / (10 ** 9) 389 t = Timer( 390 int(self.timeout), self.kill_cmd, [proc, options, kmemleak] 391 ) 392 393 try: 394 t.start() 395 396 out, err = self.collect_output(proc, options.debug) 397 self.result.stdout = out 398 self.result.stderr = err 399 400 if kmemleak: 401 cmd = f'{SUDO} sh -c "echo scan > {KMEMLEAK_FILE}"' 402 check_output(cmd, shell=True) 403 cmd = f'{SUDO} cat {KMEMLEAK_FILE}' 404 self.result.kmemleak = check_output(cmd, shell=True) 405 except KeyboardInterrupt: 406 self.kill_cmd(proc, options, kmemleak, True) 407 fail('\nRun terminated at user request.') 408 finally: 409 t.cancel() 410 411 if self.reran is not False: 412 self.result.done(proc, self.killed, self.reran) 413 414 def skip(self): 415 """ 416 Initialize enough of the test result that we can log a skipped 417 command. 418 """ 419 Result.total += 1 420 Result.runresults['SKIP'] += 1 421 self.result.stdout = self.result.stderr = [] 422 self.result.starttime = monotonic_time() 423 m, s = divmod(monotonic_time() - self.result.starttime, 60) 424 self.result.runtime = '%02d:%02d' % (m, s) 425 self.result.result = 'SKIP' 426 427 def log(self, options, suppress_console=False): 428 """ 429 This function is responsible for writing all output. This includes 430 the console output, the logfile of all results (with timestamped 431 merged stdout and stderr), and for each test, the unmodified 432 stdout/stderr/merged in its own file. 433 """ 434 435 timeprefix = datetime.now().strftime('[%FT%T.%f] ') 436 437 logname = getpwuid(os.getuid()).pw_name 438 rer = '' 439 if self.reran is True: 440 rer = ' (RERAN)' 441 user = ' (run as %s)' % (self.user if len(self.user) else logname) 442 if self.identifier: 443 msga = 'Test (%s): %s%s ' % (self.identifier, self.pathname, user) 444 else: 445 msga = 'Test: %s%s ' % (self.pathname, user) 446 msgb = '[%s] [%s]%s\n' % (self.result.runtime, self.result.result, rer) 447 pad = ' ' * (80 - (len(msga) + len(msgb))) 448 result_line = timeprefix + msga + pad + msgb 449 450 # The result line is always written to the log file. If -q was 451 # specified only failures are written to the console, otherwise 452 # the result line is written to the console. The console output 453 # may be suppressed by calling log() with suppress_console=True. 454 write_log(bytearray(result_line, encoding='utf-8'), LOG_FILE) 455 if not suppress_console: 456 if not options.quiet: 457 write_log(result_line, LOG_OUT) 458 elif options.quiet and self.result.result != 'PASS': 459 write_log(result_line, LOG_OUT) 460 461 lines = sorted(self.result.stdout + self.result.stderr, 462 key=lambda x: x[0]) 463 464 # Write timestamped output (stdout and stderr) to the logfile 465 for dt, line in lines: 466 timestamp = bytearray(dt.strftime("%H:%M:%S.%f ")[:11], 467 encoding='utf-8') 468 write_log(b'%s %s\n' % (timestamp, line), LOG_FILE) 469 470 # Write the separate stdout/stderr/merged files, if the data exists 471 if len(self.result.stdout): 472 with open(os.path.join(self.outputdir, 'stdout'), 'wb') as out: 473 for _, line in self.result.stdout: 474 os.write(out.fileno(), b'%s\n' % line) 475 if len(self.result.stderr): 476 with open(os.path.join(self.outputdir, 'stderr'), 'wb') as err: 477 for _, line in self.result.stderr: 478 os.write(err.fileno(), b'%s\n' % line) 479 if len(self.result.stdout) and len(self.result.stderr): 480 with open(os.path.join(self.outputdir, 'merged'), 'wb') as merged: 481 for _, line in lines: 482 os.write(merged.fileno(), b'%s\n' % line) 483 if len(self.result.kmemleak): 484 with open(os.path.join(self.outputdir, 'kmemleak'), 'wb') as kmem: 485 kmem.write(self.result.kmemleak) 486 487 488class Test(Cmd): 489 props = ['outputdir', 'timeout', 'user', 'pre', 'pre_user', 'post', 490 'post_user', 'failsafe', 'failsafe_user', 'tags'] 491 492 def __init__(self, pathname, 493 pre=None, pre_user=None, post=None, post_user=None, 494 failsafe=None, failsafe_user=None, tags=None, **kwargs): 495 super(Test, self).__init__(pathname, **kwargs) 496 self.pre = pre or '' 497 self.pre_user = pre_user or '' 498 self.post = post or '' 499 self.post_user = post_user or '' 500 self.failsafe = failsafe or '' 501 self.failsafe_user = failsafe_user or '' 502 self.tags = tags or [] 503 504 def __str__(self): 505 post_user = pre_user = failsafe_user = '' 506 if len(self.pre_user): 507 pre_user = ' (as %s)' % (self.pre_user) 508 if len(self.post_user): 509 post_user = ' (as %s)' % (self.post_user) 510 if len(self.failsafe_user): 511 failsafe_user = ' (as %s)' % (self.failsafe_user) 512 return '''\ 513Pathname: %s 514Identifier: %s 515Outputdir: %s 516Timeout: %d 517User: %s 518Pre: %s%s 519Post: %s%s 520Failsafe: %s%s 521Tags: %s 522''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user, 523 self.pre, pre_user, self.post, post_user, self.failsafe, 524 failsafe_user, self.tags) 525 526 def verify(self): 527 """ 528 Check the pre/post/failsafe scripts, user and Test. Omit the Test from 529 this run if there are any problems. 530 """ 531 files = [self.pre, self.pathname, self.post, self.failsafe] 532 users = [self.pre_user, self.user, self.post_user, self.failsafe_user] 533 534 for f in [f for f in files if len(f)]: 535 if not verify_file(f): 536 write_log("Warning: Test '%s' not added to this run because" 537 " it failed verification.\n" % f, LOG_ERR) 538 return False 539 540 for user in [user for user in users if len(user)]: 541 if not verify_user(user): 542 write_log("Not adding Test '%s' to this run.\n" % 543 self.pathname, LOG_ERR) 544 return False 545 546 return True 547 548 def run(self, options, dryrun=None, kmemleak=None): 549 """ 550 Create Cmd instances for the pre/post/failsafe scripts. If the pre 551 script doesn't pass, skip this Test. Run the post script regardless. 552 If the Test is killed, also run the failsafe script. 553 """ 554 odir = os.path.join(self.outputdir, os.path.basename(self.pre)) 555 pretest = Cmd(self.pre, identifier=self.identifier, outputdir=odir, 556 timeout=self.timeout, user=self.pre_user) 557 test = Cmd(self.pathname, identifier=self.identifier, 558 outputdir=self.outputdir, timeout=self.timeout, 559 user=self.user) 560 odir = os.path.join(self.outputdir, os.path.basename(self.failsafe)) 561 failsafe = Cmd(self.failsafe, identifier=self.identifier, 562 outputdir=odir, timeout=self.timeout, 563 user=self.failsafe_user) 564 odir = os.path.join(self.outputdir, os.path.basename(self.post)) 565 posttest = Cmd(self.post, identifier=self.identifier, outputdir=odir, 566 timeout=self.timeout, user=self.post_user) 567 568 cont = True 569 if len(pretest.pathname): 570 pretest.run(options, kmemleak=False) 571 cont = pretest.result.result == 'PASS' 572 pretest.log(options) 573 574 if cont: 575 test.run(options, kmemleak=kmemleak) 576 if test.result.result == 'KILLED' and len(failsafe.pathname): 577 failsafe.run(options, kmemleak=False) 578 failsafe.log(options, suppress_console=True) 579 else: 580 test.skip() 581 582 test.log(options) 583 584 if len(posttest.pathname): 585 posttest.run(options, kmemleak=False) 586 posttest.log(options) 587 588 589class TestGroup(Test): 590 props = Test.props + ['tests'] 591 592 def __init__(self, pathname, tests=None, **kwargs): 593 super(TestGroup, self).__init__(pathname, **kwargs) 594 self.tests = tests or [] 595 596 def __str__(self): 597 post_user = pre_user = failsafe_user = '' 598 if len(self.pre_user): 599 pre_user = ' (as %s)' % (self.pre_user) 600 if len(self.post_user): 601 post_user = ' (as %s)' % (self.post_user) 602 if len(self.failsafe_user): 603 failsafe_user = ' (as %s)' % (self.failsafe_user) 604 return '''\ 605Pathname: %s 606Identifier: %s 607Outputdir: %s 608Tests: %s 609Timeout: %s 610User: %s 611Pre: %s%s 612Post: %s%s 613Failsafe: %s%s 614Tags: %s 615''' % (self.pathname, self.identifier, self.outputdir, self.tests, 616 self.timeout, self.user, self.pre, pre_user, self.post, post_user, 617 self.failsafe, failsafe_user, self.tags) 618 619 def filter(self, keeplist): 620 self.tests = [x for x in self.tests if x in keeplist] 621 622 def verify(self): 623 """ 624 Check the pre/post/failsafe scripts, user and tests in this TestGroup. 625 Omit the TestGroup entirely, or simply delete the relevant tests in the 626 group, if that's all that's required. 627 """ 628 # If the pre/post/failsafe scripts are relative pathnames, convert to 629 # absolute, so they stand a chance of passing verification. 630 if len(self.pre) and not os.path.isabs(self.pre): 631 self.pre = os.path.join(self.pathname, self.pre) 632 if len(self.post) and not os.path.isabs(self.post): 633 self.post = os.path.join(self.pathname, self.post) 634 if len(self.failsafe) and not os.path.isabs(self.failsafe): 635 self.post = os.path.join(self.pathname, self.post) 636 637 auxfiles = [self.pre, self.post, self.failsafe] 638 users = [self.pre_user, self.user, self.post_user, self.failsafe_user] 639 640 for f in [f for f in auxfiles if len(f)]: 641 if f != self.failsafe and self.pathname != os.path.dirname(f): 642 write_log("Warning: TestGroup '%s' not added to this run. " 643 "Auxiliary script '%s' exists in a different " 644 "directory.\n" % (self.pathname, f), LOG_ERR) 645 return False 646 647 if not verify_file(f): 648 write_log("Warning: TestGroup '%s' not added to this run. " 649 "Auxiliary script '%s' failed verification.\n" % 650 (self.pathname, f), LOG_ERR) 651 return False 652 653 for user in [user for user in users if len(user)]: 654 if not verify_user(user): 655 write_log("Not adding TestGroup '%s' to this run.\n" % 656 self.pathname, LOG_ERR) 657 return False 658 659 # If one of the tests is invalid, delete it, log it, and drive on. 660 for test in self.tests: 661 if not verify_file(os.path.join(self.pathname, test)): 662 del self.tests[self.tests.index(test)] 663 write_log("Warning: Test '%s' removed from TestGroup '%s' " 664 "because it failed verification.\n" % 665 (test, self.pathname), LOG_ERR) 666 667 return len(self.tests) != 0 668 669 def run(self, options, dryrun=None, kmemleak=None): 670 """ 671 Create Cmd instances for the pre/post/failsafe scripts. If the pre 672 script doesn't pass, skip all the tests in this TestGroup. Run the 673 post script regardless. Run the failsafe script when a test is killed. 674 """ 675 # tags assigned to this test group also include the test names 676 if options.tags and not set(self.tags).intersection(set(options.tags)): 677 return 678 679 odir = os.path.join(self.outputdir, os.path.basename(self.pre)) 680 pretest = Cmd(self.pre, outputdir=odir, timeout=self.timeout, 681 user=self.pre_user, identifier=self.identifier) 682 odir = os.path.join(self.outputdir, os.path.basename(self.post)) 683 posttest = Cmd(self.post, outputdir=odir, timeout=self.timeout, 684 user=self.post_user, identifier=self.identifier) 685 686 cont = True 687 if len(pretest.pathname): 688 pretest.run(options, dryrun=dryrun, kmemleak=False) 689 cont = pretest.result.result == 'PASS' 690 pretest.log(options) 691 692 for fname in self.tests: 693 odir = os.path.join(self.outputdir, fname) 694 test = Cmd(os.path.join(self.pathname, fname), outputdir=odir, 695 timeout=self.timeout, user=self.user, 696 identifier=self.identifier) 697 odir = os.path.join(odir, os.path.basename(self.failsafe)) 698 failsafe = Cmd(self.failsafe, outputdir=odir, timeout=self.timeout, 699 user=self.failsafe_user, identifier=self.identifier) 700 if cont: 701 test.run(options, dryrun=dryrun, kmemleak=kmemleak) 702 if test.result.result == 'KILLED' and len(failsafe.pathname): 703 failsafe.run(options, dryrun=dryrun, kmemleak=False) 704 failsafe.log(options, suppress_console=True) 705 else: 706 test.skip() 707 708 test.log(options) 709 710 if len(posttest.pathname): 711 posttest.run(options, dryrun=dryrun, kmemleak=False) 712 posttest.log(options) 713 714 715class TestRun(object): 716 props = ['quiet', 'outputdir', 'debug'] 717 718 def __init__(self, options): 719 self.tests = {} 720 self.testgroups = {} 721 self.starttime = time() 722 self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S') 723 self.outputdir = os.path.join(options.outputdir, self.timestamp) 724 self.setup_logging(options) 725 self.defaults = [ 726 ('outputdir', BASEDIR), 727 ('quiet', False), 728 ('timeout', 60), 729 ('user', ''), 730 ('pre', ''), 731 ('pre_user', ''), 732 ('post', ''), 733 ('post_user', ''), 734 ('failsafe', ''), 735 ('failsafe_user', ''), 736 ('tags', []), 737 ('debug', False) 738 ] 739 740 def __str__(self): 741 s = 'TestRun:\n outputdir: %s\n' % self.outputdir 742 s += 'TESTS:\n' 743 for key in sorted(self.tests.keys()): 744 s += '%s%s' % (self.tests[key].__str__(), '\n') 745 s += 'TESTGROUPS:\n' 746 for key in sorted(self.testgroups.keys()): 747 s += '%s%s' % (self.testgroups[key].__str__(), '\n') 748 return s 749 750 def addtest(self, pathname, options): 751 """ 752 Create a new Test, and apply any properties that were passed in 753 from the command line. If it passes verification, add it to the 754 TestRun. 755 """ 756 test = Test(pathname) 757 for prop in Test.props: 758 setattr(test, prop, getattr(options, prop)) 759 760 if test.verify(): 761 self.tests[pathname] = test 762 763 def addtestgroup(self, dirname, filenames, options): 764 """ 765 Create a new TestGroup, and apply any properties that were passed 766 in from the command line. If it passes verification, add it to the 767 TestRun. 768 """ 769 if dirname not in self.testgroups: 770 testgroup = TestGroup(dirname) 771 for prop in Test.props: 772 setattr(testgroup, prop, getattr(options, prop)) 773 774 # Prevent pre/post/failsafe scripts from running as regular tests 775 for f in [testgroup.pre, testgroup.post, testgroup.failsafe]: 776 if f in filenames: 777 del filenames[filenames.index(f)] 778 779 self.testgroups[dirname] = testgroup 780 self.testgroups[dirname].tests = sorted(filenames) 781 782 testgroup.verify() 783 784 def filter(self, keeplist): 785 for group in list(self.testgroups.keys()): 786 if group not in keeplist: 787 del self.testgroups[group] 788 continue 789 790 g = self.testgroups[group] 791 792 if g.pre and os.path.basename(g.pre) in keeplist[group]: 793 continue 794 795 g.filter(keeplist[group]) 796 797 for test in list(self.tests.keys()): 798 directory, base = os.path.split(test) 799 if directory not in keeplist or base not in keeplist[directory]: 800 del self.tests[test] 801 802 def read(self, options): 803 """ 804 Read in the specified runfiles, and apply the TestRun properties 805 listed in the 'DEFAULT' section to our TestRun. Then read each 806 section, and apply the appropriate properties to the Test or 807 TestGroup. Properties from individual sections override those set 808 in the 'DEFAULT' section. If the Test or TestGroup passes 809 verification, add it to the TestRun. 810 """ 811 config = configparser.RawConfigParser() 812 parsed = config.read(options.runfiles) 813 failed = options.runfiles - set(parsed) 814 if len(failed): 815 files = ' '.join(sorted(failed)) 816 fail("Couldn't read config files: %s" % files) 817 818 for opt in TestRun.props: 819 if config.has_option('DEFAULT', opt): 820 if opt == 'outputdir': 821 outputdir = config.get('DEFAULT', opt) 822 setattr(self, opt, os.path.join(outputdir, self.timestamp)) 823 else: 824 setattr(self, opt, config.get('DEFAULT', opt)) 825 826 testdir = options.testdir 827 828 for section in config.sections(): 829 if 'tests' in config.options(section): 830 parts = section.split(':', 1) 831 sectiondir = parts[0] 832 identifier = parts[1] if len(parts) == 2 else None 833 if os.path.isdir(sectiondir): 834 pathname = sectiondir 835 elif os.path.isdir(os.path.join(testdir, sectiondir)): 836 pathname = os.path.join(testdir, sectiondir) 837 else: 838 pathname = sectiondir 839 840 testgroup = TestGroup(os.path.abspath(pathname), 841 identifier=identifier) 842 for prop in TestGroup.props: 843 for sect in ['DEFAULT', section]: 844 if config.has_option(sect, prop): 845 if prop == 'tags': 846 setattr(testgroup, prop, 847 eval(config.get(sect, prop))) 848 elif prop == 'failsafe': 849 failsafe = config.get(sect, prop) 850 setattr(testgroup, prop, 851 os.path.join(testdir, failsafe)) 852 elif prop == 'outputdir': 853 outputdir = config.get(sect, prop) 854 setattr(self, opt, 855 os.path.join(outputdir, 856 self.timestamp)) 857 else: 858 setattr(testgroup, prop, 859 config.get(sect, prop)) 860 861 # Repopulate tests using eval to convert the string to a list 862 testgroup.tests = eval(config.get(section, 'tests')) 863 864 if testgroup.verify(): 865 self.testgroups[section] = testgroup 866 else: 867 test = Test(section) 868 for prop in Test.props: 869 for sect in ['DEFAULT', section]: 870 if config.has_option(sect, prop): 871 if prop == 'failsafe': 872 failsafe = config.get(sect, prop) 873 setattr(test, prop, 874 os.path.join(testdir, failsafe)) 875 elif prop == 'outputdir': 876 outputdir = config.get(sect, prop) 877 setattr(self, opt, 878 os.path.join(outputdir, 879 self.timestamp)) 880 else: 881 setattr(test, prop, config.get(sect, prop)) 882 883 if test.verify(): 884 self.tests[section] = test 885 886 def write(self, options): 887 """ 888 Create a configuration file for editing and later use. The 889 'DEFAULT' section of the config file is created from the 890 properties that were specified on the command line. Tests are 891 simply added as sections that inherit everything from the 892 'DEFAULT' section. TestGroups are the same, except they get an 893 option including all the tests to run in that directory. 894 """ 895 896 defaults = dict([(prop, getattr(options, prop)) for prop, _ in 897 self.defaults]) 898 config = configparser.RawConfigParser(defaults) 899 900 for test in sorted(self.tests.keys()): 901 config.add_section(test) 902 for prop in Test.props: 903 if prop not in self.props: 904 config.set(test, prop, 905 getattr(self.tests[test], prop)) 906 907 for testgroup in sorted(self.testgroups.keys()): 908 config.add_section(testgroup) 909 config.set(testgroup, 'tests', self.testgroups[testgroup].tests) 910 for prop in TestGroup.props: 911 if prop not in self.props: 912 config.set(testgroup, prop, 913 getattr(self.testgroups[testgroup], prop)) 914 915 try: 916 with open(options.template, 'w') as f: 917 return config.write(f) 918 except IOError: 919 fail('Could not open \'%s\' for writing.' % options.template) 920 921 def complete_outputdirs(self): 922 """ 923 Collect all the pathnames for Tests, and TestGroups. Strip off all 924 common leading path components, and append what remains to the top 925 "output" dir, to create a tree of output directories that match 926 the test and group names in structure. Tests will be able 927 to write output files directly in the newly modified outputdir. 928 TestGroups will be able to create one subdirectory per test in the 929 outputdir, and are guaranteed uniqueness because a group can only 930 contain files in one directory. Pre and post tests will create a 931 directory rooted at the outputdir of the Test or TestGroup in 932 question for their output. Failsafe scripts will create a directory 933 rooted at the outputdir of each Test for their output. 934 """ 935 936 alltests = dict(list(self.tests.items()) + 937 list(self.testgroups.items())) 938 base = os.path.join(self.outputdir, 'output') 939 940 seen = [] 941 942 for path in list(alltests.keys()): 943 frag = path.split('/') 944 for i in range(0, len(frag)): 945 if len(seen) == i: 946 seen.append({}) 947 seen[i][frag[i]] = 1 948 949 cut = 0 950 for i in range(0, len(seen)): 951 if len(list(seen[i].keys())) == 1: 952 cut += 1 953 else: 954 break 955 956 for path in list(alltests.keys()): 957 uniq = path.split('/', cut)[-1] 958 alltests[path].outputdir = os.path.join(base, uniq) 959 960 def setup_logging(self, options): 961 """ 962 This function creates the output directory and gets a file object 963 for the logfile. This function must be called before write_log() 964 can be used. 965 """ 966 if options.dryrun is True: 967 return 968 969 global LOG_FILE_OBJ 970 if not options.template: 971 try: 972 old = os.umask(0) 973 os.makedirs(self.outputdir, mode=0o777) 974 os.umask(old) 975 filename = os.path.join(self.outputdir, 'log') 976 LOG_FILE_OBJ = open(filename, buffering=0, mode='wb') 977 except OSError as e: 978 fail('%s' % e) 979 980 def run(self, options): 981 """ 982 Walk through all the Tests and TestGroups, calling run(). 983 """ 984 try: 985 os.chdir(self.outputdir) 986 except OSError: 987 fail('Could not change to directory %s' % self.outputdir) 988 # make a symlink to the output for the currently running test 989 logsymlink = os.path.join(self.outputdir, '../current') 990 if os.path.islink(logsymlink): 991 os.unlink(logsymlink) 992 if not os.path.exists(logsymlink): 993 os.symlink(self.outputdir, logsymlink) 994 else: 995 write_log('Could not make a symlink to directory %s\n' % 996 self.outputdir, LOG_ERR) 997 998 if options.kmemleak: 999 cmd = f'{SUDO} -c "echo scan=0 > {KMEMLEAK_FILE}"' 1000 check_output(cmd, shell=True) 1001 1002 iteration = 0 1003 while iteration < options.iterations: 1004 for test in sorted(self.tests.keys()): 1005 self.tests[test].run(options) 1006 for testgroup in sorted(self.testgroups.keys()): 1007 self.testgroups[testgroup].run(options) 1008 iteration += 1 1009 1010 def summary(self): 1011 if Result.total == 0: 1012 return 2 1013 1014 print('\nResults Summary') 1015 for key in list(Result.runresults.keys()): 1016 if Result.runresults[key] != 0: 1017 print('%s\t% 4d' % (key, Result.runresults[key])) 1018 1019 m, s = divmod(time() - self.starttime, 60) 1020 h, m = divmod(m, 60) 1021 print('\nRunning Time:\t%02d:%02d:%02d' % (h, m, s)) 1022 print('Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) / 1023 float(Result.total)) * 100)) 1024 print('Log directory:\t%s' % self.outputdir) 1025 1026 if Result.runresults['FAIL'] > 0: 1027 return 1 1028 1029 if Result.runresults['KILLED'] > 0: 1030 return 1 1031 1032 if Result.runresults['RERAN'] > 0: 1033 return 3 1034 1035 return 0 1036 1037 1038def write_log(msg, target): 1039 """ 1040 Write the provided message to standard out, standard error or 1041 the logfile. If specifying LOG_FILE, then `msg` must be a bytes 1042 like object. This way we can still handle output from tests that 1043 may be in unexpected encodings. 1044 """ 1045 if target == LOG_OUT: 1046 os.write(sys.stdout.fileno(), bytearray(msg, encoding='utf-8')) 1047 elif target == LOG_ERR: 1048 os.write(sys.stderr.fileno(), bytearray(msg, encoding='utf-8')) 1049 elif target == LOG_FILE: 1050 os.write(LOG_FILE_OBJ.fileno(), msg) 1051 else: 1052 fail('log_msg called with unknown target "%s"' % target) 1053 1054 1055def verify_file(pathname): 1056 """ 1057 Verify that the supplied pathname is an executable regular file. 1058 """ 1059 if os.path.isdir(pathname) or os.path.islink(pathname): 1060 return False 1061 1062 for ext in '', '.ksh', '.sh': 1063 script_path = pathname + ext 1064 if os.path.isfile(script_path) and os.access(script_path, os.X_OK): 1065 return True 1066 1067 return False 1068 1069 1070def verify_user(user): 1071 """ 1072 Verify that the specified user exists on this system, and can execute 1073 sudo without being prompted for a password. 1074 """ 1075 testcmd = [SUDO, '-n', '-u', user, TRUE] 1076 1077 if user in Cmd.verified_users: 1078 return True 1079 1080 try: 1081 getpwnam(user) 1082 except KeyError: 1083 write_log("Warning: user '%s' does not exist.\n" % user, 1084 LOG_ERR) 1085 return False 1086 1087 p = Popen(testcmd) 1088 p.wait() 1089 if p.returncode != 0: 1090 write_log("Warning: user '%s' cannot use passwordless sudo.\n" % user, 1091 LOG_ERR) 1092 return False 1093 else: 1094 Cmd.verified_users.append(user) 1095 1096 return True 1097 1098 1099def find_tests(testrun, options): 1100 """ 1101 For the given list of pathnames, add files as Tests. For directories, 1102 if do_groups is True, add the directory as a TestGroup. If False, 1103 recursively search for executable files. 1104 """ 1105 1106 for p in sorted(options.pathnames): 1107 if os.path.isdir(p): 1108 for dirname, _, filenames in os.walk(p): 1109 if options.do_groups: 1110 testrun.addtestgroup(dirname, filenames, options) 1111 else: 1112 for f in sorted(filenames): 1113 testrun.addtest(os.path.join(dirname, f), options) 1114 else: 1115 testrun.addtest(p, options) 1116 1117 1118def filter_tests(testrun, options): 1119 try: 1120 fh = open(options.logfile, "r") 1121 except Exception as e: 1122 fail('%s' % e) 1123 1124 failed = {} 1125 while True: 1126 line = fh.readline() 1127 if not line: 1128 break 1129 m = re.match(r'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line) 1130 if not m: 1131 continue 1132 group, test = m.group(1, 2) 1133 try: 1134 failed[group].append(test) 1135 except KeyError: 1136 failed[group] = [test] 1137 fh.close() 1138 1139 testrun.filter(failed) 1140 1141 1142def fail(retstr, ret=255): 1143 print('%s: %s' % (sys.argv[0], retstr)) 1144 exit(ret) 1145 1146 1147def kmemleak_cb(option, opt_str, value, parser): 1148 if not os.path.exists(KMEMLEAK_FILE): 1149 fail(f"File '{KMEMLEAK_FILE}' doesn't exist. " + 1150 "Enable CONFIG_DEBUG_KMEMLEAK in kernel configuration.") 1151 1152 setattr(parser.values, option.dest, True) 1153 1154 1155def options_cb(option, opt_str, value, parser): 1156 path_options = ['outputdir', 'template', 'testdir', 'logfile'] 1157 1158 if opt_str in parser.rargs: 1159 fail('%s may only be specified once.' % opt_str) 1160 1161 if option.dest == 'runfiles': 1162 parser.values.cmd = 'rdconfig' 1163 value = set(os.path.abspath(p) for p in value.split(',')) 1164 if option.dest == 'tags': 1165 value = [x.strip() for x in value.split(',')] 1166 1167 if option.dest in path_options: 1168 setattr(parser.values, option.dest, os.path.abspath(value)) 1169 else: 1170 setattr(parser.values, option.dest, value) 1171 1172 1173def parse_args(): 1174 parser = OptionParser() 1175 parser.add_option('-c', action='callback', callback=options_cb, 1176 type='string', dest='runfiles', metavar='runfiles', 1177 help='Specify tests to run via config files.') 1178 parser.add_option('-d', action='store_true', default=False, dest='dryrun', 1179 help='Dry run. Print tests, but take no other action.') 1180 parser.add_option('-D', action='store_true', default=False, dest='debug', 1181 help='Write all test output to stdout as it arrives.') 1182 parser.add_option('-l', action='callback', callback=options_cb, 1183 default=None, dest='logfile', metavar='logfile', 1184 type='string', 1185 help='Read logfile and re-run tests which failed.') 1186 parser.add_option('-g', action='store_true', default=False, 1187 dest='do_groups', help='Make directories TestGroups.') 1188 parser.add_option('-o', action='callback', callback=options_cb, 1189 default=BASEDIR, dest='outputdir', type='string', 1190 metavar='outputdir', help='Specify an output directory.') 1191 parser.add_option('-O', action='store_true', default=False, 1192 dest='timeout_debug', 1193 help='Dump debugging info to /dev/kmsg on test timeout') 1194 parser.add_option('-i', action='callback', callback=options_cb, 1195 default=TESTDIR, dest='testdir', type='string', 1196 metavar='testdir', help='Specify a test directory.') 1197 parser.add_option('-K', action='store_true', default=False, dest='kmsg', 1198 help='Log tests names to /dev/kmsg') 1199 parser.add_option('-m', action='callback', callback=kmemleak_cb, 1200 default=False, dest='kmemleak', 1201 help='Enable kmemleak reporting (Linux only)') 1202 parser.add_option('-p', action='callback', callback=options_cb, 1203 default='', dest='pre', metavar='script', 1204 type='string', help='Specify a pre script.') 1205 parser.add_option('-P', action='callback', callback=options_cb, 1206 default='', dest='post', metavar='script', 1207 type='string', help='Specify a post script.') 1208 parser.add_option('-q', action='store_true', default=False, dest='quiet', 1209 help='Silence on the console during a test run.') 1210 parser.add_option('-s', action='callback', callback=options_cb, 1211 default='', dest='failsafe', metavar='script', 1212 type='string', help='Specify a failsafe script.') 1213 parser.add_option('-S', action='callback', callback=options_cb, 1214 default='', dest='failsafe_user', 1215 metavar='failsafe_user', type='string', 1216 help='Specify a user to execute the failsafe script.') 1217 parser.add_option('-t', action='callback', callback=options_cb, default=60, 1218 dest='timeout', metavar='seconds', type='int', 1219 help='Timeout (in seconds) for an individual test.') 1220 parser.add_option('-u', action='callback', callback=options_cb, 1221 default='', dest='user', metavar='user', type='string', 1222 help='Specify a different user name to run as.') 1223 parser.add_option('-w', action='callback', callback=options_cb, 1224 default=None, dest='template', metavar='template', 1225 type='string', help='Create a new config file.') 1226 parser.add_option('-x', action='callback', callback=options_cb, default='', 1227 dest='pre_user', metavar='pre_user', type='string', 1228 help='Specify a user to execute the pre script.') 1229 parser.add_option('-X', action='callback', callback=options_cb, default='', 1230 dest='post_user', metavar='post_user', type='string', 1231 help='Specify a user to execute the post script.') 1232 parser.add_option('-T', action='callback', callback=options_cb, default='', 1233 dest='tags', metavar='tags', type='string', 1234 help='Specify tags to execute specific test groups.') 1235 parser.add_option('-I', action='callback', callback=options_cb, default=1, 1236 dest='iterations', metavar='iterations', type='int', 1237 help='Number of times to run the test run.') 1238 (options, pathnames) = parser.parse_args() 1239 1240 if options.runfiles and len(pathnames): 1241 fail('Extraneous arguments.') 1242 1243 options.pathnames = [os.path.abspath(path) for path in pathnames] 1244 1245 return options 1246 1247 1248def main(): 1249 options = parse_args() 1250 1251 try: 1252 testrun = TestRun(options) 1253 1254 if options.runfiles: 1255 testrun.read(options) 1256 else: 1257 find_tests(testrun, options) 1258 1259 if options.logfile: 1260 filter_tests(testrun, options) 1261 1262 if options.template: 1263 testrun.write(options) 1264 exit(0) 1265 1266 testrun.complete_outputdirs() 1267 testrun.run(options) 1268 exit(testrun.summary()) 1269 1270 except Exception: 1271 fail("Uncaught exception in test runner:\n" + traceback.format_exc()) 1272 1273 1274if __name__ == '__main__': 1275 main() 1276