1# SPDX-License-Identifier: GPL-2.0 2# 3# Parses KTAP test results from a kernel dmesg log and incrementally prints 4# results with reader-friendly format. Stores and returns test results in a 5# Test object. 6# 7# Copyright (C) 2019, Google LLC. 8# Author: Felix Guo <felixguoxiuping@gmail.com> 9# Author: Brendan Higgins <brendanhiggins@google.com> 10# Author: Rae Moar <rmoar@google.com> 11 12from __future__ import annotations 13from dataclasses import dataclass 14import re 15import sys 16 17from enum import Enum, auto 18from typing import Iterable, Iterator, List, Optional, Tuple 19 20from kunit_printer import stdout 21 22class Test: 23 """ 24 A class to represent a test parsed from KTAP results. All KTAP 25 results within a test log are stored in a main Test object as 26 subtests. 27 28 Attributes: 29 status : TestStatus - status of the test 30 name : str - name of the test 31 expected_count : int - expected number of subtests (0 if single 32 test case and None if unknown expected number of subtests) 33 subtests : List[Test] - list of subtests 34 log : List[str] - log of KTAP lines that correspond to the test 35 counts : TestCounts - counts of the test statuses and errors of 36 subtests or of the test itself if the test is a single 37 test case. 38 """ 39 def __init__(self) -> None: 40 """Creates Test object with default attributes.""" 41 self.status = TestStatus.TEST_CRASHED 42 self.name = '' 43 self.expected_count = 0 # type: Optional[int] 44 self.subtests = [] # type: List[Test] 45 self.log = [] # type: List[str] 46 self.counts = TestCounts() 47 48 def __str__(self) -> str: 49 """Returns string representation of a Test class object.""" 50 return (f'Test({self.status}, {self.name}, {self.expected_count}, ' 51 f'{self.subtests}, {self.log}, {self.counts})') 52 53 def __repr__(self) -> str: 54 """Returns string representation of a Test class object.""" 55 return str(self) 56 57 def add_error(self, error_message: str) -> None: 58 """Records an error that occurred while parsing this test.""" 59 self.counts.errors += 1 60 stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') 61 62 def ok_status(self) -> bool: 63 """Returns true if the status was ok, i.e. passed or skipped.""" 64 return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED) 65 66class TestStatus(Enum): 67 """An enumeration class to represent the status of a test.""" 68 SUCCESS = auto() 69 FAILURE = auto() 70 SKIPPED = auto() 71 TEST_CRASHED = auto() 72 NO_TESTS = auto() 73 FAILURE_TO_PARSE_TESTS = auto() 74 75@dataclass 76class TestCounts: 77 """ 78 Tracks the counts of statuses of all test cases and any errors within 79 a Test. 80 """ 81 passed: int = 0 82 failed: int = 0 83 crashed: int = 0 84 skipped: int = 0 85 errors: int = 0 86 87 def __str__(self) -> str: 88 """Returns the string representation of a TestCounts object.""" 89 statuses = [('passed', self.passed), ('failed', self.failed), 90 ('crashed', self.crashed), ('skipped', self.skipped), 91 ('errors', self.errors)] 92 return f'Ran {self.total()} tests: ' + \ 93 ', '.join(f'{s}: {n}' for s, n in statuses if n > 0) 94 95 def total(self) -> int: 96 """Returns the total number of test cases within a test 97 object, where a test case is a test with no subtests. 98 """ 99 return (self.passed + self.failed + self.crashed + 100 self.skipped) 101 102 def add_subtest_counts(self, counts: TestCounts) -> None: 103 """ 104 Adds the counts of another TestCounts object to the current 105 TestCounts object. Used to add the counts of a subtest to the 106 parent test. 107 108 Parameters: 109 counts - a different TestCounts object whose counts 110 will be added to the counts of the TestCounts object 111 """ 112 self.passed += counts.passed 113 self.failed += counts.failed 114 self.crashed += counts.crashed 115 self.skipped += counts.skipped 116 self.errors += counts.errors 117 118 def get_status(self) -> TestStatus: 119 """Returns the aggregated status of a Test using test 120 counts. 121 """ 122 if self.total() == 0: 123 return TestStatus.NO_TESTS 124 if self.crashed: 125 # Crashes should take priority. 126 return TestStatus.TEST_CRASHED 127 if self.failed: 128 return TestStatus.FAILURE 129 if self.passed: 130 # No failures or crashes, looks good! 131 return TestStatus.SUCCESS 132 # We have only skipped tests. 133 return TestStatus.SKIPPED 134 135 def add_status(self, status: TestStatus) -> None: 136 """Increments the count for `status`.""" 137 if status == TestStatus.SUCCESS: 138 self.passed += 1 139 elif status == TestStatus.FAILURE: 140 self.failed += 1 141 elif status == TestStatus.SKIPPED: 142 self.skipped += 1 143 elif status != TestStatus.NO_TESTS: 144 self.crashed += 1 145 146class LineStream: 147 """ 148 A class to represent the lines of kernel output. 149 Provides a lazy peek()/pop() interface over an iterator of 150 (line#, text). 151 """ 152 _lines: Iterator[Tuple[int, str]] 153 _next: Tuple[int, str] 154 _need_next: bool 155 _done: bool 156 157 def __init__(self, lines: Iterator[Tuple[int, str]]): 158 """Creates a new LineStream that wraps the given iterator.""" 159 self._lines = lines 160 self._done = False 161 self._need_next = True 162 self._next = (0, '') 163 164 def _get_next(self) -> None: 165 """Advances the LineSteam to the next line, if necessary.""" 166 if not self._need_next: 167 return 168 try: 169 self._next = next(self._lines) 170 except StopIteration: 171 self._done = True 172 finally: 173 self._need_next = False 174 175 def peek(self) -> str: 176 """Returns the current line, without advancing the LineStream. 177 """ 178 self._get_next() 179 return self._next[1] 180 181 def pop(self) -> str: 182 """Returns the current line and advances the LineStream to 183 the next line. 184 """ 185 s = self.peek() 186 if self._done: 187 raise ValueError(f'LineStream: going past EOF, last line was {s}') 188 self._need_next = True 189 return s 190 191 def __bool__(self) -> bool: 192 """Returns True if stream has more lines.""" 193 self._get_next() 194 return not self._done 195 196 # Only used by kunit_tool_test.py. 197 def __iter__(self) -> Iterator[str]: 198 """Empties all lines stored in LineStream object into 199 Iterator object and returns the Iterator object. 200 """ 201 while bool(self): 202 yield self.pop() 203 204 def line_number(self) -> int: 205 """Returns the line number of the current line.""" 206 self._get_next() 207 return self._next[0] 208 209# Parsing helper methods: 210 211KTAP_START = re.compile(r'KTAP version ([0-9]+)$') 212TAP_START = re.compile(r'TAP version ([0-9]+)$') 213KTAP_END = re.compile('(List of all partitions:|' 214 'Kernel panic - not syncing: VFS:|reboot: System halted)') 215 216def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream: 217 """Extracts KTAP lines from the kernel output.""" 218 def isolate_ktap_output(kernel_output: Iterable[str]) \ 219 -> Iterator[Tuple[int, str]]: 220 line_num = 0 221 started = False 222 for line in kernel_output: 223 line_num += 1 224 line = line.rstrip() # remove trailing \n 225 if not started and KTAP_START.search(line): 226 # start extracting KTAP lines and set prefix 227 # to number of characters before version line 228 prefix_len = len( 229 line.split('KTAP version')[0]) 230 started = True 231 yield line_num, line[prefix_len:] 232 elif not started and TAP_START.search(line): 233 # start extracting KTAP lines and set prefix 234 # to number of characters before version line 235 prefix_len = len(line.split('TAP version')[0]) 236 started = True 237 yield line_num, line[prefix_len:] 238 elif started and KTAP_END.search(line): 239 # stop extracting KTAP lines 240 break 241 elif started: 242 # remove the prefix and optionally any leading 243 # whitespace. Our parsing logic relies on this. 244 line = line[prefix_len:] 245 if lstrip: 246 line = line.lstrip() 247 yield line_num, line 248 return LineStream(lines=isolate_ktap_output(kernel_output)) 249 250KTAP_VERSIONS = [1] 251TAP_VERSIONS = [13, 14] 252 253def check_version(version_num: int, accepted_versions: List[int], 254 version_type: str, test: Test) -> None: 255 """ 256 Adds error to test object if version number is too high or too 257 low. 258 259 Parameters: 260 version_num - The inputted version number from the parsed KTAP or TAP 261 header line 262 accepted_version - List of accepted KTAP or TAP versions 263 version_type - 'KTAP' or 'TAP' depending on the type of 264 version line. 265 test - Test object for current test being parsed 266 """ 267 if version_num < min(accepted_versions): 268 test.add_error(f'{version_type} version lower than expected!') 269 elif version_num > max(accepted_versions): 270 test.add_error(f'{version_type} version higer than expected!') 271 272def parse_ktap_header(lines: LineStream, test: Test) -> bool: 273 """ 274 Parses KTAP/TAP header line and checks version number. 275 Returns False if fails to parse KTAP/TAP header line. 276 277 Accepted formats: 278 - 'KTAP version [version number]' 279 - 'TAP version [version number]' 280 281 Parameters: 282 lines - LineStream of KTAP output to parse 283 test - Test object for current test being parsed 284 285 Return: 286 True if successfully parsed KTAP/TAP header line 287 """ 288 ktap_match = KTAP_START.match(lines.peek()) 289 tap_match = TAP_START.match(lines.peek()) 290 if ktap_match: 291 version_num = int(ktap_match.group(1)) 292 check_version(version_num, KTAP_VERSIONS, 'KTAP', test) 293 elif tap_match: 294 version_num = int(tap_match.group(1)) 295 check_version(version_num, TAP_VERSIONS, 'TAP', test) 296 else: 297 return False 298 test.log.append(lines.pop()) 299 return True 300 301TEST_HEADER = re.compile(r'^# Subtest: (.*)$') 302 303def parse_test_header(lines: LineStream, test: Test) -> bool: 304 """ 305 Parses test header and stores test name in test object. 306 Returns False if fails to parse test header line. 307 308 Accepted format: 309 - '# Subtest: [test name]' 310 311 Parameters: 312 lines - LineStream of KTAP output to parse 313 test - Test object for current test being parsed 314 315 Return: 316 True if successfully parsed test header line 317 """ 318 match = TEST_HEADER.match(lines.peek()) 319 if not match: 320 return False 321 test.log.append(lines.pop()) 322 test.name = match.group(1) 323 return True 324 325TEST_PLAN = re.compile(r'1\.\.([0-9]+)') 326 327def parse_test_plan(lines: LineStream, test: Test) -> bool: 328 """ 329 Parses test plan line and stores the expected number of subtests in 330 test object. Reports an error if expected count is 0. 331 Returns False and sets expected_count to None if there is no valid test 332 plan. 333 334 Accepted format: 335 - '1..[number of subtests]' 336 337 Parameters: 338 lines - LineStream of KTAP output to parse 339 test - Test object for current test being parsed 340 341 Return: 342 True if successfully parsed test plan line 343 """ 344 match = TEST_PLAN.match(lines.peek()) 345 if not match: 346 test.expected_count = None 347 return False 348 test.log.append(lines.pop()) 349 expected_count = int(match.group(1)) 350 test.expected_count = expected_count 351 return True 352 353TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') 354 355TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') 356 357def peek_test_name_match(lines: LineStream, test: Test) -> bool: 358 """ 359 Matches current line with the format of a test result line and checks 360 if the name matches the name of the current test. 361 Returns False if fails to match format or name. 362 363 Accepted format: 364 - '[ok|not ok] [test number] [-] [test name] [optional skip 365 directive]' 366 367 Parameters: 368 lines - LineStream of KTAP output to parse 369 test - Test object for current test being parsed 370 371 Return: 372 True if matched a test result line and the name matching the 373 expected test name 374 """ 375 line = lines.peek() 376 match = TEST_RESULT.match(line) 377 if not match: 378 return False 379 name = match.group(4) 380 return name == test.name 381 382def parse_test_result(lines: LineStream, test: Test, 383 expected_num: int) -> bool: 384 """ 385 Parses test result line and stores the status and name in the test 386 object. Reports an error if the test number does not match expected 387 test number. 388 Returns False if fails to parse test result line. 389 390 Note that the SKIP directive is the only direction that causes a 391 change in status. 392 393 Accepted format: 394 - '[ok|not ok] [test number] [-] [test name] [optional skip 395 directive]' 396 397 Parameters: 398 lines - LineStream of KTAP output to parse 399 test - Test object for current test being parsed 400 expected_num - expected test number for current test 401 402 Return: 403 True if successfully parsed a test result line. 404 """ 405 line = lines.peek() 406 match = TEST_RESULT.match(line) 407 skip_match = TEST_RESULT_SKIP.match(line) 408 409 # Check if line matches test result line format 410 if not match: 411 return False 412 test.log.append(lines.pop()) 413 414 # Set name of test object 415 if skip_match: 416 test.name = skip_match.group(4) 417 else: 418 test.name = match.group(4) 419 420 # Check test num 421 num = int(match.group(2)) 422 if num != expected_num: 423 test.add_error(f'Expected test number {expected_num} but found {num}') 424 425 # Set status of test object 426 status = match.group(1) 427 if skip_match: 428 test.status = TestStatus.SKIPPED 429 elif status == 'ok': 430 test.status = TestStatus.SUCCESS 431 else: 432 test.status = TestStatus.FAILURE 433 return True 434 435def parse_diagnostic(lines: LineStream) -> List[str]: 436 """ 437 Parse lines that do not match the format of a test result line or 438 test header line and returns them in list. 439 440 Line formats that are not parsed: 441 - '# Subtest: [test name]' 442 - '[ok|not ok] [test number] [-] [test name] [optional skip 443 directive]' 444 - 'KTAP version [version number]' 445 446 Parameters: 447 lines - LineStream of KTAP output to parse 448 449 Return: 450 Log of diagnostic lines 451 """ 452 log = [] # type: List[str] 453 non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START] 454 while lines and not any(re.match(lines.peek()) 455 for re in non_diagnostic_lines): 456 log.append(lines.pop()) 457 return log 458 459 460# Printing helper methods: 461 462DIVIDER = '=' * 60 463 464def format_test_divider(message: str, len_message: int) -> str: 465 """ 466 Returns string with message centered in fixed width divider. 467 468 Example: 469 '===================== message example =====================' 470 471 Parameters: 472 message - message to be centered in divider line 473 len_message - length of the message to be printed such that 474 any characters of the color codes are not counted 475 476 Return: 477 String containing message centered in fixed width divider 478 """ 479 default_count = 3 # default number of dashes 480 len_1 = default_count 481 len_2 = default_count 482 difference = len(DIVIDER) - len_message - 2 # 2 spaces added 483 if difference > 0: 484 # calculate number of dashes for each side of the divider 485 len_1 = int(difference / 2) 486 len_2 = difference - len_1 487 return ('=' * len_1) + f' {message} ' + ('=' * len_2) 488 489def print_test_header(test: Test) -> None: 490 """ 491 Prints test header with test name and optionally the expected number 492 of subtests. 493 494 Example: 495 '=================== example (2 subtests) ===================' 496 497 Parameters: 498 test - Test object representing current test being printed 499 """ 500 message = test.name 501 if message != "": 502 # Add a leading space before the subtest counts only if a test name 503 # is provided using a "# Subtest" header line. 504 message += " " 505 if test.expected_count: 506 if test.expected_count == 1: 507 message += '(1 subtest)' 508 else: 509 message += f'({test.expected_count} subtests)' 510 stdout.print_with_timestamp(format_test_divider(message, len(message))) 511 512def print_log(log: Iterable[str]) -> None: 513 """Prints all strings in saved log for test in yellow.""" 514 for m in log: 515 stdout.print_with_timestamp(stdout.yellow(m)) 516 517def format_test_result(test: Test) -> str: 518 """ 519 Returns string with formatted test result with colored status and test 520 name. 521 522 Example: 523 '[PASSED] example' 524 525 Parameters: 526 test - Test object representing current test being printed 527 528 Return: 529 String containing formatted test result 530 """ 531 if test.status == TestStatus.SUCCESS: 532 return stdout.green('[PASSED] ') + test.name 533 if test.status == TestStatus.SKIPPED: 534 return stdout.yellow('[SKIPPED] ') + test.name 535 if test.status == TestStatus.NO_TESTS: 536 return stdout.yellow('[NO TESTS RUN] ') + test.name 537 if test.status == TestStatus.TEST_CRASHED: 538 print_log(test.log) 539 return stdout.red('[CRASHED] ') + test.name 540 print_log(test.log) 541 return stdout.red('[FAILED] ') + test.name 542 543def print_test_result(test: Test) -> None: 544 """ 545 Prints result line with status of test. 546 547 Example: 548 '[PASSED] example' 549 550 Parameters: 551 test - Test object representing current test being printed 552 """ 553 stdout.print_with_timestamp(format_test_result(test)) 554 555def print_test_footer(test: Test) -> None: 556 """ 557 Prints test footer with status of test. 558 559 Example: 560 '===================== [PASSED] example =====================' 561 562 Parameters: 563 test - Test object representing current test being printed 564 """ 565 message = format_test_result(test) 566 stdout.print_with_timestamp(format_test_divider(message, 567 len(message) - stdout.color_len())) 568 569 570 571def _summarize_failed_tests(test: Test) -> str: 572 """Tries to summarize all the failing subtests in `test`.""" 573 574 def failed_names(test: Test, parent_name: str) -> List[str]: 575 # Note: we use 'main' internally for the top-level test. 576 if not parent_name or parent_name == 'main': 577 full_name = test.name 578 else: 579 full_name = parent_name + '.' + test.name 580 581 if not test.subtests: # this is a leaf node 582 return [full_name] 583 584 # If all the children failed, just say this subtest failed. 585 # Don't summarize it down "the top-level test failed", though. 586 failed_subtests = [sub for sub in test.subtests if not sub.ok_status()] 587 if parent_name and len(failed_subtests) == len(test.subtests): 588 return [full_name] 589 590 all_failures = [] # type: List[str] 591 for t in failed_subtests: 592 all_failures.extend(failed_names(t, full_name)) 593 return all_failures 594 595 failures = failed_names(test, '') 596 # If there are too many failures, printing them out will just be noisy. 597 if len(failures) > 10: # this is an arbitrary limit 598 return '' 599 600 return 'Failures: ' + ', '.join(failures) 601 602 603def print_summary_line(test: Test) -> None: 604 """ 605 Prints summary line of test object. Color of line is dependent on 606 status of test. Color is green if test passes, yellow if test is 607 skipped, and red if the test fails or crashes. Summary line contains 608 counts of the statuses of the tests subtests or the test itself if it 609 has no subtests. 610 611 Example: 612 "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0, 613 Errors: 0" 614 615 test - Test object representing current test being printed 616 """ 617 if test.status == TestStatus.SUCCESS: 618 color = stdout.green 619 elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS): 620 color = stdout.yellow 621 else: 622 color = stdout.red 623 stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) 624 625 # Summarize failures that might have gone off-screen since we had a lot 626 # of tests (arbitrarily defined as >=100 for now). 627 if test.ok_status() or test.counts.total() < 100: 628 return 629 summarized = _summarize_failed_tests(test) 630 if not summarized: 631 return 632 stdout.print_with_timestamp(color(summarized)) 633 634# Other methods: 635 636def bubble_up_test_results(test: Test) -> None: 637 """ 638 If the test has subtests, add the test counts of the subtests to the 639 test and check if any of the tests crashed and if so set the test 640 status to crashed. Otherwise if the test has no subtests add the 641 status of the test to the test counts. 642 643 Parameters: 644 test - Test object for current test being parsed 645 """ 646 subtests = test.subtests 647 counts = test.counts 648 status = test.status 649 for t in subtests: 650 counts.add_subtest_counts(t.counts) 651 if counts.total() == 0: 652 counts.add_status(status) 653 elif test.counts.get_status() == TestStatus.TEST_CRASHED: 654 test.status = TestStatus.TEST_CRASHED 655 656def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test: 657 """ 658 Finds next test to parse in LineStream, creates new Test object, 659 parses any subtests of the test, populates Test object with all 660 information (status, name) about the test and the Test objects for 661 any subtests, and then returns the Test object. The method accepts 662 three formats of tests: 663 664 Accepted test formats: 665 666 - Main KTAP/TAP header 667 668 Example: 669 670 KTAP version 1 671 1..4 672 [subtests] 673 674 - Subtest header (must include either the KTAP version line or 675 "# Subtest" header line) 676 677 Example (preferred format with both KTAP version line and 678 "# Subtest" line): 679 680 KTAP version 1 681 # Subtest: name 682 1..3 683 [subtests] 684 ok 1 name 685 686 Example (only "# Subtest" line): 687 688 # Subtest: name 689 1..3 690 [subtests] 691 ok 1 name 692 693 Example (only KTAP version line, compliant with KTAP v1 spec): 694 695 KTAP version 1 696 1..3 697 [subtests] 698 ok 1 name 699 700 - Test result line 701 702 Example: 703 704 ok 1 - test 705 706 Parameters: 707 lines - LineStream of KTAP output to parse 708 expected_num - expected test number for test to be parsed 709 log - list of strings containing any preceding diagnostic lines 710 corresponding to the current test 711 is_subtest - boolean indicating whether test is a subtest 712 713 Return: 714 Test object populated with characteristics and any subtests 715 """ 716 test = Test() 717 test.log.extend(log) 718 if not is_subtest: 719 # If parsing the main/top-level test, parse KTAP version line and 720 # test plan 721 test.name = "main" 722 ktap_line = parse_ktap_header(lines, test) 723 parse_test_plan(lines, test) 724 parent_test = True 725 else: 726 # If not the main test, attempt to parse a test header containing 727 # the KTAP version line and/or subtest header line 728 ktap_line = parse_ktap_header(lines, test) 729 subtest_line = parse_test_header(lines, test) 730 parent_test = (ktap_line or subtest_line) 731 if parent_test: 732 # If KTAP version line and/or subtest header is found, attempt 733 # to parse test plan and print test header 734 parse_test_plan(lines, test) 735 print_test_header(test) 736 expected_count = test.expected_count 737 subtests = [] 738 test_num = 1 739 while parent_test and (expected_count is None or test_num <= expected_count): 740 # Loop to parse any subtests. 741 # Break after parsing expected number of tests or 742 # if expected number of tests is unknown break when test 743 # result line with matching name to subtest header is found 744 # or no more lines in stream. 745 sub_log = parse_diagnostic(lines) 746 sub_test = Test() 747 if not lines or (peek_test_name_match(lines, test) and 748 is_subtest): 749 if expected_count and test_num <= expected_count: 750 # If parser reaches end of test before 751 # parsing expected number of subtests, print 752 # crashed subtest and record error 753 test.add_error('missing expected subtest!') 754 sub_test.log.extend(sub_log) 755 test.counts.add_status( 756 TestStatus.TEST_CRASHED) 757 print_test_result(sub_test) 758 else: 759 test.log.extend(sub_log) 760 break 761 else: 762 sub_test = parse_test(lines, test_num, sub_log, True) 763 subtests.append(sub_test) 764 test_num += 1 765 test.subtests = subtests 766 if is_subtest: 767 # If not main test, look for test result line 768 test.log.extend(parse_diagnostic(lines)) 769 if test.name != "" and not peek_test_name_match(lines, test): 770 test.add_error('missing subtest result line!') 771 else: 772 parse_test_result(lines, test, expected_num) 773 774 # Check for there being no subtests within parent test 775 if parent_test and len(subtests) == 0: 776 # Don't override a bad status if this test had one reported. 777 # Assumption: no subtests means CRASHED is from Test.__init__() 778 if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): 779 test.status = TestStatus.NO_TESTS 780 test.add_error('0 tests run!') 781 782 # Add statuses to TestCounts attribute in Test object 783 bubble_up_test_results(test) 784 if parent_test and is_subtest: 785 # If test has subtests and is not the main test object, print 786 # footer. 787 print_test_footer(test) 788 elif is_subtest: 789 print_test_result(test) 790 return test 791 792def parse_run_tests(kernel_output: Iterable[str]) -> Test: 793 """ 794 Using kernel output, extract KTAP lines, parse the lines for test 795 results and print condensed test results and summary line. 796 797 Parameters: 798 kernel_output - Iterable object contains lines of kernel output 799 800 Return: 801 Test - the main test object with all subtests. 802 """ 803 stdout.print_with_timestamp(DIVIDER) 804 lines = extract_tap_lines(kernel_output) 805 test = Test() 806 if not lines: 807 test.name = '<missing>' 808 test.add_error('Could not find any KTAP output. Did any KUnit tests run?') 809 test.status = TestStatus.FAILURE_TO_PARSE_TESTS 810 else: 811 test = parse_test(lines, 0, [], False) 812 if test.status != TestStatus.NO_TESTS: 813 test.status = test.counts.get_status() 814 stdout.print_with_timestamp(DIVIDER) 815 print_summary_line(test) 816 return test 817