1# SPDX-License-Identifier: GPL-2.0 2# 3# Parses KTAP test results from a kernel dmesg log and incrementally prints 4# results with reader-friendly format. Stores and returns test results in a 5# Test object. 6# 7# Copyright (C) 2019, Google LLC. 8# Author: Felix Guo <felixguoxiuping@gmail.com> 9# Author: Brendan Higgins <brendanhiggins@google.com> 10# Author: Rae Moar <rmoar@google.com> 11 12from __future__ import annotations 13import re 14import sys 15 16import datetime 17from enum import Enum, auto 18from functools import reduce 19from typing import Iterable, Iterator, List, Optional, Tuple 20 21class Test(object): 22 """ 23 A class to represent a test parsed from KTAP results. All KTAP 24 results within a test log are stored in a main Test object as 25 subtests. 26 27 Attributes: 28 status : TestStatus - status of the test 29 name : str - name of the test 30 expected_count : int - expected number of subtests (0 if single 31 test case and None if unknown expected number of subtests) 32 subtests : List[Test] - list of subtests 33 log : List[str] - log of KTAP lines that correspond to the test 34 counts : TestCounts - counts of the test statuses and errors of 35 subtests or of the test itself if the test is a single 36 test case. 37 """ 38 def __init__(self) -> None: 39 """Creates Test object with default attributes.""" 40 self.status = TestStatus.TEST_CRASHED 41 self.name = '' 42 self.expected_count = 0 # type: Optional[int] 43 self.subtests = [] # type: List[Test] 44 self.log = [] # type: List[str] 45 self.counts = TestCounts() 46 47 def __str__(self) -> str: 48 """Returns string representation of a Test class object.""" 49 return ('Test(' + str(self.status) + ', ' + self.name + 50 ', ' + str(self.expected_count) + ', ' + 51 str(self.subtests) + ', ' + str(self.log) + ', ' + 52 str(self.counts) + ')') 53 54 def __repr__(self) -> str: 55 """Returns string representation of a Test class object.""" 56 return str(self) 57 58 def add_error(self, error_message: str) -> None: 59 """Records an error that occurred while parsing this test.""" 60 self.counts.errors += 1 61 print_error('Test ' + self.name + ': ' + error_message) 62 63class TestStatus(Enum): 64 """An enumeration class to represent the status of a test.""" 65 SUCCESS = auto() 66 FAILURE = auto() 67 SKIPPED = auto() 68 TEST_CRASHED = auto() 69 NO_TESTS = auto() 70 FAILURE_TO_PARSE_TESTS = auto() 71 72class TestCounts: 73 """ 74 Tracks the counts of statuses of all test cases and any errors within 75 a Test. 76 77 Attributes: 78 passed : int - the number of tests that have passed 79 failed : int - the number of tests that have failed 80 crashed : int - the number of tests that have crashed 81 skipped : int - the number of tests that have skipped 82 errors : int - the number of errors in the test and subtests 83 """ 84 def __init__(self): 85 """Creates TestCounts object with counts of all test 86 statuses and test errors set to 0. 87 """ 88 self.passed = 0 89 self.failed = 0 90 self.crashed = 0 91 self.skipped = 0 92 self.errors = 0 93 94 def __str__(self) -> str: 95 """Returns the string representation of a TestCounts object. 96 """ 97 statuses = [('passed', self.passed), ('failed', self.failed), 98 ('crashed', self.crashed), ('skipped', self.skipped), 99 ('errors', self.errors)] 100 return f'Ran {self.total()} tests: ' + \ 101 ', '.join(f'{s}: {n}' for s, n in statuses if n > 0) 102 103 def total(self) -> int: 104 """Returns the total number of test cases within a test 105 object, where a test case is a test with no subtests. 106 """ 107 return (self.passed + self.failed + self.crashed + 108 self.skipped) 109 110 def add_subtest_counts(self, counts: TestCounts) -> None: 111 """ 112 Adds the counts of another TestCounts object to the current 113 TestCounts object. Used to add the counts of a subtest to the 114 parent test. 115 116 Parameters: 117 counts - a different TestCounts object whose counts 118 will be added to the counts of the TestCounts object 119 """ 120 self.passed += counts.passed 121 self.failed += counts.failed 122 self.crashed += counts.crashed 123 self.skipped += counts.skipped 124 self.errors += counts.errors 125 126 def get_status(self) -> TestStatus: 127 """Returns the aggregated status of a Test using test 128 counts. 129 """ 130 if self.total() == 0: 131 return TestStatus.NO_TESTS 132 elif self.crashed: 133 # If one of the subtests crash, the expected status 134 # of the Test is crashed. 135 return TestStatus.TEST_CRASHED 136 elif self.failed: 137 # Otherwise if one of the subtests fail, the 138 # expected status of the Test is failed. 139 return TestStatus.FAILURE 140 elif self.passed: 141 # Otherwise if one of the subtests pass, the 142 # expected status of the Test is passed. 143 return TestStatus.SUCCESS 144 else: 145 # Finally, if none of the subtests have failed, 146 # crashed, or passed, the expected status of the 147 # Test is skipped. 148 return TestStatus.SKIPPED 149 150 def add_status(self, status: TestStatus) -> None: 151 """ 152 Increments count of inputted status. 153 154 Parameters: 155 status - status to be added to the TestCounts object 156 """ 157 if status == TestStatus.SUCCESS: 158 self.passed += 1 159 elif status == TestStatus.FAILURE: 160 self.failed += 1 161 elif status == TestStatus.SKIPPED: 162 self.skipped += 1 163 elif status != TestStatus.NO_TESTS: 164 self.crashed += 1 165 166class LineStream: 167 """ 168 A class to represent the lines of kernel output. 169 Provides a lazy peek()/pop() interface over an iterator of 170 (line#, text). 171 """ 172 _lines: Iterator[Tuple[int, str]] 173 _next: Tuple[int, str] 174 _need_next: bool 175 _done: bool 176 177 def __init__(self, lines: Iterator[Tuple[int, str]]): 178 """Creates a new LineStream that wraps the given iterator.""" 179 self._lines = lines 180 self._done = False 181 self._need_next = True 182 self._next = (0, '') 183 184 def _get_next(self) -> None: 185 """Advances the LineSteam to the next line, if necessary.""" 186 if not self._need_next: 187 return 188 try: 189 self._next = next(self._lines) 190 except StopIteration: 191 self._done = True 192 finally: 193 self._need_next = False 194 195 def peek(self) -> str: 196 """Returns the current line, without advancing the LineStream. 197 """ 198 self._get_next() 199 return self._next[1] 200 201 def pop(self) -> str: 202 """Returns the current line and advances the LineStream to 203 the next line. 204 """ 205 s = self.peek() 206 if self._done: 207 raise ValueError(f'LineStream: going past EOF, last line was {s}') 208 self._need_next = True 209 return s 210 211 def __bool__(self) -> bool: 212 """Returns True if stream has more lines.""" 213 self._get_next() 214 return not self._done 215 216 # Only used by kunit_tool_test.py. 217 def __iter__(self) -> Iterator[str]: 218 """Empties all lines stored in LineStream object into 219 Iterator object and returns the Iterator object. 220 """ 221 while bool(self): 222 yield self.pop() 223 224 def line_number(self) -> int: 225 """Returns the line number of the current line.""" 226 self._get_next() 227 return self._next[0] 228 229# Parsing helper methods: 230 231KTAP_START = re.compile(r'KTAP version ([0-9]+)$') 232TAP_START = re.compile(r'TAP version ([0-9]+)$') 233KTAP_END = re.compile('(List of all partitions:|' 234 'Kernel panic - not syncing: VFS:|reboot: System halted)') 235 236def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: 237 """Extracts KTAP lines from the kernel output.""" 238 def isolate_ktap_output(kernel_output: Iterable[str]) \ 239 -> Iterator[Tuple[int, str]]: 240 line_num = 0 241 started = False 242 for line in kernel_output: 243 line_num += 1 244 line = line.rstrip() # remove trailing \n 245 if not started and KTAP_START.search(line): 246 # start extracting KTAP lines and set prefix 247 # to number of characters before version line 248 prefix_len = len( 249 line.split('KTAP version')[0]) 250 started = True 251 yield line_num, line[prefix_len:] 252 elif not started and TAP_START.search(line): 253 # start extracting KTAP lines and set prefix 254 # to number of characters before version line 255 prefix_len = len(line.split('TAP version')[0]) 256 started = True 257 yield line_num, line[prefix_len:] 258 elif started and KTAP_END.search(line): 259 # stop extracting KTAP lines 260 break 261 elif started: 262 # remove prefix and any indention and yield 263 # line with line number 264 line = line[prefix_len:].lstrip() 265 yield line_num, line 266 return LineStream(lines=isolate_ktap_output(kernel_output)) 267 268KTAP_VERSIONS = [1] 269TAP_VERSIONS = [13, 14] 270 271def check_version(version_num: int, accepted_versions: List[int], 272 version_type: str, test: Test) -> None: 273 """ 274 Adds error to test object if version number is too high or too 275 low. 276 277 Parameters: 278 version_num - The inputted version number from the parsed KTAP or TAP 279 header line 280 accepted_version - List of accepted KTAP or TAP versions 281 version_type - 'KTAP' or 'TAP' depending on the type of 282 version line. 283 test - Test object for current test being parsed 284 """ 285 if version_num < min(accepted_versions): 286 test.add_error(version_type + 287 ' version lower than expected!') 288 elif version_num > max(accepted_versions): 289 test.add_error( 290 version_type + ' version higher than expected!') 291 292def parse_ktap_header(lines: LineStream, test: Test) -> bool: 293 """ 294 Parses KTAP/TAP header line and checks version number. 295 Returns False if fails to parse KTAP/TAP header line. 296 297 Accepted formats: 298 - 'KTAP version [version number]' 299 - 'TAP version [version number]' 300 301 Parameters: 302 lines - LineStream of KTAP output to parse 303 test - Test object for current test being parsed 304 305 Return: 306 True if successfully parsed KTAP/TAP header line 307 """ 308 ktap_match = KTAP_START.match(lines.peek()) 309 tap_match = TAP_START.match(lines.peek()) 310 if ktap_match: 311 version_num = int(ktap_match.group(1)) 312 check_version(version_num, KTAP_VERSIONS, 'KTAP', test) 313 elif tap_match: 314 version_num = int(tap_match.group(1)) 315 check_version(version_num, TAP_VERSIONS, 'TAP', test) 316 else: 317 return False 318 test.log.append(lines.pop()) 319 return True 320 321TEST_HEADER = re.compile(r'^# Subtest: (.*)$') 322 323def parse_test_header(lines: LineStream, test: Test) -> bool: 324 """ 325 Parses test header and stores test name in test object. 326 Returns False if fails to parse test header line. 327 328 Accepted format: 329 - '# Subtest: [test name]' 330 331 Parameters: 332 lines - LineStream of KTAP output to parse 333 test - Test object for current test being parsed 334 335 Return: 336 True if successfully parsed test header line 337 """ 338 match = TEST_HEADER.match(lines.peek()) 339 if not match: 340 return False 341 test.log.append(lines.pop()) 342 test.name = match.group(1) 343 return True 344 345TEST_PLAN = re.compile(r'1\.\.([0-9]+)') 346 347def parse_test_plan(lines: LineStream, test: Test) -> bool: 348 """ 349 Parses test plan line and stores the expected number of subtests in 350 test object. Reports an error if expected count is 0. 351 Returns False and sets expected_count to None if there is no valid test 352 plan. 353 354 Accepted format: 355 - '1..[number of subtests]' 356 357 Parameters: 358 lines - LineStream of KTAP output to parse 359 test - Test object for current test being parsed 360 361 Return: 362 True if successfully parsed test plan line 363 """ 364 match = TEST_PLAN.match(lines.peek()) 365 if not match: 366 test.expected_count = None 367 return False 368 test.log.append(lines.pop()) 369 expected_count = int(match.group(1)) 370 test.expected_count = expected_count 371 return True 372 373TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') 374 375TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') 376 377def peek_test_name_match(lines: LineStream, test: Test) -> bool: 378 """ 379 Matches current line with the format of a test result line and checks 380 if the name matches the name of the current test. 381 Returns False if fails to match format or name. 382 383 Accepted format: 384 - '[ok|not ok] [test number] [-] [test name] [optional skip 385 directive]' 386 387 Parameters: 388 lines - LineStream of KTAP output to parse 389 test - Test object for current test being parsed 390 391 Return: 392 True if matched a test result line and the name matching the 393 expected test name 394 """ 395 line = lines.peek() 396 match = TEST_RESULT.match(line) 397 if not match: 398 return False 399 name = match.group(4) 400 return (name == test.name) 401 402def parse_test_result(lines: LineStream, test: Test, 403 expected_num: int) -> bool: 404 """ 405 Parses test result line and stores the status and name in the test 406 object. Reports an error if the test number does not match expected 407 test number. 408 Returns False if fails to parse test result line. 409 410 Note that the SKIP directive is the only direction that causes a 411 change in status. 412 413 Accepted format: 414 - '[ok|not ok] [test number] [-] [test name] [optional skip 415 directive]' 416 417 Parameters: 418 lines - LineStream of KTAP output to parse 419 test - Test object for current test being parsed 420 expected_num - expected test number for current test 421 422 Return: 423 True if successfully parsed a test result line. 424 """ 425 line = lines.peek() 426 match = TEST_RESULT.match(line) 427 skip_match = TEST_RESULT_SKIP.match(line) 428 429 # Check if line matches test result line format 430 if not match: 431 return False 432 test.log.append(lines.pop()) 433 434 # Set name of test object 435 if skip_match: 436 test.name = skip_match.group(4) 437 else: 438 test.name = match.group(4) 439 440 # Check test num 441 num = int(match.group(2)) 442 if num != expected_num: 443 test.add_error('Expected test number ' + 444 str(expected_num) + ' but found ' + str(num)) 445 446 # Set status of test object 447 status = match.group(1) 448 if skip_match: 449 test.status = TestStatus.SKIPPED 450 elif status == 'ok': 451 test.status = TestStatus.SUCCESS 452 else: 453 test.status = TestStatus.FAILURE 454 return True 455 456def parse_diagnostic(lines: LineStream) -> List[str]: 457 """ 458 Parse lines that do not match the format of a test result line or 459 test header line and returns them in list. 460 461 Line formats that are not parsed: 462 - '# Subtest: [test name]' 463 - '[ok|not ok] [test number] [-] [test name] [optional skip 464 directive]' 465 466 Parameters: 467 lines - LineStream of KTAP output to parse 468 469 Return: 470 Log of diagnostic lines 471 """ 472 log = [] # type: List[str] 473 while lines and not TEST_RESULT.match(lines.peek()) and not \ 474 TEST_HEADER.match(lines.peek()): 475 log.append(lines.pop()) 476 return log 477 478 479# Printing helper methods: 480 481DIVIDER = '=' * 60 482 483RESET = '\033[0;0m' 484 485def red(text: str) -> str: 486 """Returns inputted string with red color code.""" 487 if not sys.stdout.isatty(): 488 return text 489 return '\033[1;31m' + text + RESET 490 491def yellow(text: str) -> str: 492 """Returns inputted string with yellow color code.""" 493 if not sys.stdout.isatty(): 494 return text 495 return '\033[1;33m' + text + RESET 496 497def green(text: str) -> str: 498 """Returns inputted string with green color code.""" 499 if not sys.stdout.isatty(): 500 return text 501 return '\033[1;32m' + text + RESET 502 503ANSI_LEN = len(red('')) 504 505def print_with_timestamp(message: str) -> None: 506 """Prints message with timestamp at beginning.""" 507 print('[%s] %s' % (datetime.datetime.now().strftime('%H:%M:%S'), message)) 508 509def format_test_divider(message: str, len_message: int) -> str: 510 """ 511 Returns string with message centered in fixed width divider. 512 513 Example: 514 '===================== message example =====================' 515 516 Parameters: 517 message - message to be centered in divider line 518 len_message - length of the message to be printed such that 519 any characters of the color codes are not counted 520 521 Return: 522 String containing message centered in fixed width divider 523 """ 524 default_count = 3 # default number of dashes 525 len_1 = default_count 526 len_2 = default_count 527 difference = len(DIVIDER) - len_message - 2 # 2 spaces added 528 if difference > 0: 529 # calculate number of dashes for each side of the divider 530 len_1 = int(difference / 2) 531 len_2 = difference - len_1 532 return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2) 533 534def print_test_header(test: Test) -> None: 535 """ 536 Prints test header with test name and optionally the expected number 537 of subtests. 538 539 Example: 540 '=================== example (2 subtests) ===================' 541 542 Parameters: 543 test - Test object representing current test being printed 544 """ 545 message = test.name 546 if test.expected_count: 547 if test.expected_count == 1: 548 message += (' (' + str(test.expected_count) + 549 ' subtest)') 550 else: 551 message += (' (' + str(test.expected_count) + 552 ' subtests)') 553 print_with_timestamp(format_test_divider(message, len(message))) 554 555def print_log(log: Iterable[str]) -> None: 556 """ 557 Prints all strings in saved log for test in yellow. 558 559 Parameters: 560 log - Iterable object with all strings saved in log for test 561 """ 562 for m in log: 563 print_with_timestamp(yellow(m)) 564 565def format_test_result(test: Test) -> str: 566 """ 567 Returns string with formatted test result with colored status and test 568 name. 569 570 Example: 571 '[PASSED] example' 572 573 Parameters: 574 test - Test object representing current test being printed 575 576 Return: 577 String containing formatted test result 578 """ 579 if test.status == TestStatus.SUCCESS: 580 return (green('[PASSED] ') + test.name) 581 elif test.status == TestStatus.SKIPPED: 582 return (yellow('[SKIPPED] ') + test.name) 583 elif test.status == TestStatus.NO_TESTS: 584 return (yellow('[NO TESTS RUN] ') + test.name) 585 elif test.status == TestStatus.TEST_CRASHED: 586 print_log(test.log) 587 return (red('[CRASHED] ') + test.name) 588 else: 589 print_log(test.log) 590 return (red('[FAILED] ') + test.name) 591 592def print_test_result(test: Test) -> None: 593 """ 594 Prints result line with status of test. 595 596 Example: 597 '[PASSED] example' 598 599 Parameters: 600 test - Test object representing current test being printed 601 """ 602 print_with_timestamp(format_test_result(test)) 603 604def print_test_footer(test: Test) -> None: 605 """ 606 Prints test footer with status of test. 607 608 Example: 609 '===================== [PASSED] example =====================' 610 611 Parameters: 612 test - Test object representing current test being printed 613 """ 614 message = format_test_result(test) 615 print_with_timestamp(format_test_divider(message, 616 len(message) - ANSI_LEN)) 617 618def print_summary_line(test: Test) -> None: 619 """ 620 Prints summary line of test object. Color of line is dependent on 621 status of test. Color is green if test passes, yellow if test is 622 skipped, and red if the test fails or crashes. Summary line contains 623 counts of the statuses of the tests subtests or the test itself if it 624 has no subtests. 625 626 Example: 627 "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0, 628 Errors: 0" 629 630 test - Test object representing current test being printed 631 """ 632 if test.status == TestStatus.SUCCESS: 633 color = green 634 elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS: 635 color = yellow 636 else: 637 color = red 638 counts = test.counts 639 print_with_timestamp(color('Testing complete. ' + str(counts))) 640 641def print_error(error_message: str) -> None: 642 """ 643 Prints error message with error format. 644 645 Example: 646 "[ERROR] Test example: missing test plan!" 647 648 Parameters: 649 error_message - message describing error 650 """ 651 print_with_timestamp(red('[ERROR] ') + error_message) 652 653# Other methods: 654 655def bubble_up_test_results(test: Test) -> None: 656 """ 657 If the test has subtests, add the test counts of the subtests to the 658 test and check if any of the tests crashed and if so set the test 659 status to crashed. Otherwise if the test has no subtests add the 660 status of the test to the test counts. 661 662 Parameters: 663 test - Test object for current test being parsed 664 """ 665 subtests = test.subtests 666 counts = test.counts 667 status = test.status 668 for t in subtests: 669 counts.add_subtest_counts(t.counts) 670 if counts.total() == 0: 671 counts.add_status(status) 672 elif test.counts.get_status() == TestStatus.TEST_CRASHED: 673 test.status = TestStatus.TEST_CRASHED 674 675def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: 676 """ 677 Finds next test to parse in LineStream, creates new Test object, 678 parses any subtests of the test, populates Test object with all 679 information (status, name) about the test and the Test objects for 680 any subtests, and then returns the Test object. The method accepts 681 three formats of tests: 682 683 Accepted test formats: 684 685 - Main KTAP/TAP header 686 687 Example: 688 689 KTAP version 1 690 1..4 691 [subtests] 692 693 - Subtest header line 694 695 Example: 696 697 # Subtest: name 698 1..3 699 [subtests] 700 ok 1 name 701 702 - Test result line 703 704 Example: 705 706 ok 1 - test 707 708 Parameters: 709 lines - LineStream of KTAP output to parse 710 expected_num - expected test number for test to be parsed 711 log - list of strings containing any preceding diagnostic lines 712 corresponding to the current test 713 714 Return: 715 Test object populated with characteristics and any subtests 716 """ 717 test = Test() 718 test.log.extend(log) 719 parent_test = False 720 main = parse_ktap_header(lines, test) 721 if main: 722 # If KTAP/TAP header is found, attempt to parse 723 # test plan 724 test.name = "main" 725 parse_test_plan(lines, test) 726 parent_test = True 727 else: 728 # If KTAP/TAP header is not found, test must be subtest 729 # header or test result line so parse attempt to parser 730 # subtest header 731 parent_test = parse_test_header(lines, test) 732 if parent_test: 733 # If subtest header is found, attempt to parse 734 # test plan and print header 735 parse_test_plan(lines, test) 736 print_test_header(test) 737 expected_count = test.expected_count 738 subtests = [] 739 test_num = 1 740 while parent_test and (expected_count is None or test_num <= expected_count): 741 # Loop to parse any subtests. 742 # Break after parsing expected number of tests or 743 # if expected number of tests is unknown break when test 744 # result line with matching name to subtest header is found 745 # or no more lines in stream. 746 sub_log = parse_diagnostic(lines) 747 sub_test = Test() 748 if not lines or (peek_test_name_match(lines, test) and 749 not main): 750 if expected_count and test_num <= expected_count: 751 # If parser reaches end of test before 752 # parsing expected number of subtests, print 753 # crashed subtest and record error 754 test.add_error('missing expected subtest!') 755 sub_test.log.extend(sub_log) 756 test.counts.add_status( 757 TestStatus.TEST_CRASHED) 758 print_test_result(sub_test) 759 else: 760 test.log.extend(sub_log) 761 break 762 else: 763 sub_test = parse_test(lines, test_num, sub_log) 764 subtests.append(sub_test) 765 test_num += 1 766 test.subtests = subtests 767 if not main: 768 # If not main test, look for test result line 769 test.log.extend(parse_diagnostic(lines)) 770 if (parent_test and peek_test_name_match(lines, test)) or \ 771 not parent_test: 772 parse_test_result(lines, test, expected_num) 773 else: 774 test.add_error('missing subtest result line!') 775 776 # Check for there being no tests 777 if parent_test and len(subtests) == 0: 778 test.status = TestStatus.NO_TESTS 779 test.add_error('0 tests run!') 780 781 # Add statuses to TestCounts attribute in Test object 782 bubble_up_test_results(test) 783 if parent_test and not main: 784 # If test has subtests and is not the main test object, print 785 # footer. 786 print_test_footer(test) 787 elif not main: 788 print_test_result(test) 789 return test 790 791def parse_run_tests(kernel_output: Iterable[str]) -> Test: 792 """ 793 Using kernel output, extract KTAP lines, parse the lines for test 794 results and print condensed test results and summary line . 795 796 Parameters: 797 kernel_output - Iterable object contains lines of kernel output 798 799 Return: 800 Test - the main test object with all subtests. 801 """ 802 print_with_timestamp(DIVIDER) 803 lines = extract_tap_lines(kernel_output) 804 test = Test() 805 if not lines: 806 test.name = '<missing>' 807 test.add_error('could not find any KTAP output!') 808 test.status = TestStatus.FAILURE_TO_PARSE_TESTS 809 else: 810 test = parse_test(lines, 0, []) 811 if test.status != TestStatus.NO_TESTS: 812 test.status = test.counts.get_status() 813 print_with_timestamp(DIVIDER) 814 print_summary_line(test) 815 return test 816