1 // Copyright 2014 The Kyua Authors. 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are 6 // met: 7 // 8 // * Redistributions of source code must retain the above copyright 9 // notice, this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above copyright 11 // notice, this list of conditions and the following disclaimer in the 12 // documentation and/or other materials provided with the distribution. 13 // * Neither the name of Google Inc. nor the names of its contributors 14 // may be used to endorse or promote products derived from this software 15 // without specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 29 #include "engine/scheduler.hpp" 30 31 extern "C" { 32 #include <unistd.h> 33 } 34 35 #include <cstdio> 36 #include <cstdlib> 37 #include <fstream> 38 #include <memory> 39 #include <stdexcept> 40 41 #include "engine/config.hpp" 42 #include "engine/debugger.hpp" 43 #include "engine/exceptions.hpp" 44 #include "engine/execenv/execenv.hpp" 45 #include "engine/requirements.hpp" 46 #include "model/context.hpp" 47 #include "model/metadata.hpp" 48 #include "model/test_case.hpp" 49 #include "model/test_program.hpp" 50 #include "model/test_result.hpp" 51 #include "utils/config/tree.ipp" 52 #include "utils/datetime.hpp" 53 #include "utils/defs.hpp" 54 #include "utils/env.hpp" 55 #include "utils/format/macros.hpp" 56 #include "utils/fs/directory.hpp" 57 #include "utils/fs/exceptions.hpp" 58 #include "utils/fs/operations.hpp" 59 #include "utils/fs/path.hpp" 60 #include "utils/logging/macros.hpp" 61 #include "utils/noncopyable.hpp" 62 #include "utils/optional.ipp" 63 #include "utils/passwd.hpp" 64 #include "utils/process/executor.ipp" 65 #include "utils/process/status.hpp" 66 #include "utils/sanity.hpp" 67 #include "utils/stacktrace.hpp" 68 #include "utils/stream.hpp" 69 #include "utils/text/operations.ipp" 70 71 namespace config = utils::config; 72 namespace datetime = utils::datetime; 73 namespace execenv = engine::execenv; 74 namespace executor = utils::process::executor; 75 namespace fs = utils::fs; 76 namespace logging = utils::logging; 77 namespace passwd = utils::passwd; 78 namespace process = utils::process; 79 namespace scheduler = engine::scheduler; 80 namespace text = utils::text; 81 82 using utils::none; 83 using utils::optional; 84 85 86 /// Timeout for the test case cleanup operation. 87 /// 88 /// TODO(jmmv): This is here only for testing purposes. Maybe we should expose 89 /// this setting as part of the user_config. 90 datetime::delta scheduler::cleanup_timeout(300, 0); 91 92 93 /// Timeout for the test case execenv cleanup operation. 94 datetime::delta scheduler::execenv_cleanup_timeout(300, 0); 95 96 97 /// Timeout for the test case listing operation. 98 /// 99 /// TODO(jmmv): This is here only for testing purposes. Maybe we should expose 100 /// this setting as part of the user_config. 101 datetime::delta scheduler::list_timeout(300, 0); 102 103 104 namespace { 105 106 107 /// Magic exit status to indicate that the test case was probably skipped. 108 /// 109 /// The test case was only skipped if and only if we return this exit code and 110 /// we find the skipped_cookie file on disk. 111 static const int exit_skipped = 84; 112 113 114 /// Text file containing the skip reason for the test case. 115 /// 116 /// This will only be present within unique_work_directory if the test case 117 /// exited with the exit_skipped code. However, there is no guarantee that the 118 /// file is there (say if the test really decided to exit with code exit_skipped 119 /// on its own). 120 static const char* skipped_cookie = "skipped.txt"; 121 122 123 /// Mapping of interface names to interface definitions. 124 typedef std::map< std::string, std::shared_ptr< scheduler::interface > > 125 interfaces_map; 126 127 128 /// Mapping of interface names to interface definitions. 129 /// 130 /// Use register_interface() to add an entry to this global table. 131 static interfaces_map interfaces; 132 133 134 /// Scans the contents of a directory and appends the file listing to a file. 135 /// 136 /// \param dir_path The directory to scan. 137 /// \param output_file The file to which to append the listing. 138 /// 139 /// \throw engine::error If there are problems listing the files. 140 static void 141 append_files_listing(const fs::path& dir_path, const fs::path& output_file) 142 { 143 std::ofstream output(output_file.c_str(), std::ios::app); 144 if (!output) 145 throw engine::error(F("Failed to open output file %s for append") 146 % output_file); 147 try { 148 std::set < std::string > names; 149 150 const fs::directory dir(dir_path); 151 for (fs::directory::const_iterator iter = dir.begin(); 152 iter != dir.end(); ++iter) { 153 if (iter->name != "." && iter->name != "..") 154 names.insert(iter->name); 155 } 156 157 if (!names.empty()) { 158 output << "Files left in work directory after failure: " 159 << text::join(names, ", ") << '\n'; 160 } 161 } catch (const fs::error& e) { 162 throw engine::error(F("Cannot append files listing to %s: %s") 163 % output_file % e.what()); 164 } 165 } 166 167 168 /// Maintenance data held while a test is being executed. 169 /// 170 /// This data structure exists from the moment when a test is executed via 171 /// scheduler::spawn_test() or scheduler::impl::spawn_cleanup() to when it is 172 /// cleaned up with result_handle::cleanup(). 173 /// 174 /// This is a base data type intended to be extended for the test and cleanup 175 /// cases so that each contains only the relevant data. 176 struct exec_data : utils::noncopyable { 177 /// Test program data for this test case. 178 const model::test_program_ptr test_program; 179 180 /// Name of the test case. 181 const std::string test_case_name; 182 183 /// Constructor. 184 /// 185 /// \param test_program_ Test program data for this test case. 186 /// \param test_case_name_ Name of the test case. 187 exec_data(const model::test_program_ptr test_program_, 188 const std::string& test_case_name_) : 189 test_program(test_program_), test_case_name(test_case_name_) 190 { 191 } 192 193 /// Destructor. 194 virtual ~exec_data(void) 195 { 196 } 197 }; 198 199 200 /// Maintenance data held while a test is being executed. 201 struct test_exec_data : public exec_data { 202 /// Test program-specific execution interface. 203 const std::shared_ptr< scheduler::interface > interface; 204 205 /// User configuration passed to the execution of the test. We need this 206 /// here to recover it later when chaining the execution of a cleanup 207 /// routine (if any). 208 const config::tree user_config; 209 210 /// Whether this test case still needs to have its cleanup routine executed. 211 /// 212 /// This is set externally when the cleanup routine is actually invoked to 213 /// denote that no further attempts shall be made at cleaning this up. 214 bool needs_cleanup; 215 216 /// Whether this test case still needs to have its execenv cleanup executed. 217 /// 218 /// This is set externally when the cleanup routine is actually invoked to 219 /// denote that no further attempts shall be made at cleaning this up. 220 bool needs_execenv_cleanup; 221 222 /// Original PID of the test case subprocess. 223 /// 224 /// This is used for the cleanup upon termination by a signal, to reap the 225 /// leftovers and form missing exit_handle. 226 pid_t pid; 227 228 /// The exit_handle for this test once it has completed. 229 /// 230 /// This is set externally when the test case has finished, as we need this 231 /// information to invoke the followup cleanup routine in the right context, 232 /// as indicated by needs_cleanup. 233 optional< executor::exit_handle > exit_handle; 234 235 /// Constructor. 236 /// 237 /// \param test_program_ Test program data for this test case. 238 /// \param test_case_name_ Name of the test case. 239 /// \param interface_ Test program-specific execution interface. 240 /// \param user_config_ User configuration passed to the test. 241 test_exec_data(const model::test_program_ptr test_program_, 242 const std::string& test_case_name_, 243 const std::shared_ptr< scheduler::interface > interface_, 244 const config::tree& user_config_, 245 const pid_t pid_) : 246 exec_data(test_program_, test_case_name_), 247 interface(interface_), user_config(user_config_), pid(pid_) 248 { 249 const model::test_case& test_case = test_program->find(test_case_name); 250 needs_cleanup = test_case.get_metadata().has_cleanup(); 251 needs_execenv_cleanup = test_case.get_metadata().has_execenv(); 252 } 253 }; 254 255 256 /// Maintenance data held while a test cleanup routine is being executed. 257 /// 258 /// Instances of this object are related to a previous test_exec_data, as 259 /// cleanup routines can only exist once the test has been run. 260 struct cleanup_exec_data : public exec_data { 261 /// The exit handle of the test. This is necessary so that we can return 262 /// the correct exit_handle to the user of the scheduler. 263 executor::exit_handle body_exit_handle; 264 265 /// The final result of the test's body. This is necessary to compute the 266 /// right return value for a test with a cleanup routine: the body result is 267 /// respected if it is a "bad" result; else the result of the cleanup 268 /// routine is used if it has failed. 269 model::test_result body_result; 270 271 /// Constructor. 272 /// 273 /// \param test_program_ Test program data for this test case. 274 /// \param test_case_name_ Name of the test case. 275 /// \param body_exit_handle_ If not none, exit handle of the body 276 /// corresponding to the cleanup routine represented by this exec_data. 277 /// \param body_result_ If not none, result of the body corresponding to the 278 /// cleanup routine represented by this exec_data. 279 cleanup_exec_data(const model::test_program_ptr test_program_, 280 const std::string& test_case_name_, 281 const executor::exit_handle& body_exit_handle_, 282 const model::test_result& body_result_) : 283 exec_data(test_program_, test_case_name_), 284 body_exit_handle(body_exit_handle_), body_result(body_result_) 285 { 286 } 287 }; 288 289 290 /// Maintenance data held while a test execenv cleanup is being executed. 291 /// 292 /// Instances of this object are related to a previous test_exec_data, as 293 /// cleanup routines can only exist once the test has been run. 294 struct execenv_exec_data : public exec_data { 295 /// The exit handle of the test. This is necessary so that we can return 296 /// the correct exit_handle to the user of the scheduler. 297 executor::exit_handle body_exit_handle; 298 299 /// The final result of the test's body. This is necessary to compute the 300 /// right return value for a test with a cleanup routine: the body result is 301 /// respected if it is a "bad" result; else the result of the cleanup 302 /// routine is used if it has failed. 303 model::test_result body_result; 304 305 /// Constructor. 306 /// 307 /// \param test_program_ Test program data for this test case. 308 /// \param test_case_name_ Name of the test case. 309 /// \param body_exit_handle_ If not none, exit handle of the body 310 /// corresponding to the cleanup routine represented by this exec_data. 311 /// \param body_result_ If not none, result of the body corresponding to the 312 /// cleanup routine represented by this exec_data. 313 execenv_exec_data(const model::test_program_ptr test_program_, 314 const std::string& test_case_name_, 315 const executor::exit_handle& body_exit_handle_, 316 const model::test_result& body_result_) : 317 exec_data(test_program_, test_case_name_), 318 body_exit_handle(body_exit_handle_), body_result(body_result_) 319 { 320 } 321 }; 322 323 324 /// Shared pointer to exec_data. 325 /// 326 /// We require this because we want exec_data to not be copyable, and thus we 327 /// cannot just store it in the map without move constructors. 328 typedef std::shared_ptr< exec_data > exec_data_ptr; 329 330 331 /// Mapping of active PIDs to their maintenance data. 332 typedef std::map< int, exec_data_ptr > exec_data_map; 333 334 335 /// Enforces a test program to hold an absolute path. 336 /// 337 /// TODO(jmmv): This function (which is a pretty ugly hack) exists because we 338 /// want the interface hooks to receive a test_program as their argument. 339 /// However, those hooks run after the test program has been isolated, which 340 /// means that the current directory has changed since when the test_program 341 /// objects were created. This causes the absolute_path() method of 342 /// test_program to return bogus values if the internal representation of their 343 /// path is relative. We should fix somehow: maybe making the fs module grab 344 /// its "current_path" view at program startup time; or maybe by grabbing the 345 /// current path at test_program creation time; or maybe something else. 346 /// 347 /// \param program The test program to modify. 348 /// 349 /// \return A new test program whose internal paths are absolute. 350 static model::test_program 351 force_absolute_paths(const model::test_program program) 352 { 353 const std::string& relative = program.relative_path().str(); 354 const std::string absolute = program.absolute_path().str(); 355 356 const std::string root = absolute.substr( 357 0, absolute.length() - relative.length()); 358 359 return model::test_program( 360 program.interface_name(), 361 program.relative_path(), fs::path(root), 362 program.test_suite_name(), 363 program.get_metadata(), program.test_cases()); 364 } 365 366 367 /// Functor to list the test cases of a test program. 368 class list_test_cases { 369 /// Interface of the test program to execute. 370 std::shared_ptr< scheduler::interface > _interface; 371 372 /// Test program to execute. 373 const model::test_program _test_program; 374 375 /// User-provided configuration variables. 376 const config::tree& _user_config; 377 378 public: 379 /// Constructor. 380 /// 381 /// \param interface Interface of the test program to execute. 382 /// \param test_program Test program to execute. 383 /// \param user_config User-provided configuration variables. 384 list_test_cases( 385 const std::shared_ptr< scheduler::interface > interface, 386 const model::test_program* test_program, 387 const config::tree& user_config) : 388 _interface(interface), 389 _test_program(force_absolute_paths(*test_program)), 390 _user_config(user_config) 391 { 392 } 393 394 /// Body of the subprocess. 395 void 396 operator()(const fs::path& /* control_directory */) 397 { 398 const config::properties_map vars = scheduler::generate_config( 399 _user_config, _test_program.test_suite_name()); 400 _interface->exec_list(_test_program, vars); 401 } 402 }; 403 404 405 /// Functor to execute a test program in a child process. 406 class run_test_program { 407 /// Interface of the test program to execute. 408 std::shared_ptr< scheduler::interface > _interface; 409 410 /// Test program to execute. 411 const model::test_program _test_program; 412 413 /// Name of the test case to execute. 414 const std::string& _test_case_name; 415 416 /// User-provided configuration variables. 417 const config::tree& _user_config; 418 419 /// Verifies if the test case needs to be skipped or not. 420 /// 421 /// We could very well run this on the scheduler parent process before 422 /// issuing the fork. However, doing this here in the child process is 423 /// better for two reasons: first, it allows us to continue using the simple 424 /// spawn/wait abstraction of the scheduler; and, second, we parallelize the 425 /// requirements checks among tests. 426 /// 427 /// \post If the test's preconditions are not met, the caller process is 428 /// terminated with a special exit code and a "skipped cookie" is written to 429 /// the disk with the reason for the failure. 430 /// 431 /// \param skipped_cookie_path File to create with the skip reason details 432 /// if this test is skipped. 433 void 434 do_requirements_check(const fs::path& skipped_cookie_path) 435 { 436 const model::test_case& test_case = _test_program.find( 437 _test_case_name); 438 439 const std::string skip_reason = engine::check_reqs( 440 test_case.get_metadata(), _user_config, 441 _test_program.test_suite_name(), 442 fs::current_path()); 443 if (skip_reason.empty()) 444 return; 445 446 std::ofstream output(skipped_cookie_path.c_str()); 447 if (!output) { 448 std::perror((F("Failed to open %s for write") % 449 skipped_cookie_path).str().c_str()); 450 std::abort(); 451 } 452 output << skip_reason; 453 output.close(); 454 455 // Abruptly terminate the process. We don't want to run any destructors 456 // inherited from the parent process by mistake, which could, for 457 // example, delete our own control files! 458 ::_exit(exit_skipped); 459 } 460 461 public: 462 /// Constructor. 463 /// 464 /// \param interface Interface of the test program to execute. 465 /// \param test_program Test program to execute. 466 /// \param test_case_name Name of the test case to execute. 467 /// \param user_config User-provided configuration variables. 468 run_test_program( 469 const std::shared_ptr< scheduler::interface > interface, 470 const model::test_program_ptr test_program, 471 const std::string& test_case_name, 472 const config::tree& user_config) : 473 _interface(interface), 474 _test_program(force_absolute_paths(*test_program)), 475 _test_case_name(test_case_name), 476 _user_config(user_config) 477 { 478 } 479 480 /// Body of the subprocess. 481 /// 482 /// \param control_directory The testcase directory where files will be 483 /// read from. 484 void 485 operator()(const fs::path& control_directory) 486 { 487 const model::test_case& test_case = _test_program.find( 488 _test_case_name); 489 if (test_case.fake_result()) 490 ::_exit(EXIT_SUCCESS); 491 492 do_requirements_check(control_directory / skipped_cookie); 493 494 const config::properties_map vars = scheduler::generate_config( 495 _user_config, _test_program.test_suite_name()); 496 _interface->exec_test(_test_program, _test_case_name, vars, 497 control_directory); 498 } 499 }; 500 501 502 /// Functor to execute a test program in a child process. 503 class run_test_cleanup { 504 /// Interface of the test program to execute. 505 std::shared_ptr< scheduler::interface > _interface; 506 507 /// Test program to execute. 508 const model::test_program _test_program; 509 510 /// Name of the test case to execute. 511 const std::string& _test_case_name; 512 513 /// User-provided configuration variables. 514 const config::tree& _user_config; 515 516 public: 517 /// Constructor. 518 /// 519 /// \param interface Interface of the test program to execute. 520 /// \param test_program Test program to execute. 521 /// \param test_case_name Name of the test case to execute. 522 /// \param user_config User-provided configuration variables. 523 run_test_cleanup( 524 const std::shared_ptr< scheduler::interface > interface, 525 const model::test_program_ptr test_program, 526 const std::string& test_case_name, 527 const config::tree& user_config) : 528 _interface(interface), 529 _test_program(force_absolute_paths(*test_program)), 530 _test_case_name(test_case_name), 531 _user_config(user_config) 532 { 533 } 534 535 /// Body of the subprocess. 536 /// 537 /// \param control_directory The testcase directory where cleanup will be 538 /// run from. 539 void 540 operator()(const fs::path& control_directory) 541 { 542 const config::properties_map vars = scheduler::generate_config( 543 _user_config, _test_program.test_suite_name()); 544 _interface->exec_cleanup(_test_program, _test_case_name, vars, 545 control_directory); 546 } 547 }; 548 549 550 /// Functor to execute a test execenv cleanup in a child process. 551 class run_execenv_cleanup { 552 /// Test program to execute. 553 const model::test_program _test_program; 554 555 /// Name of the test case to execute. 556 const std::string& _test_case_name; 557 558 public: 559 /// Constructor. 560 /// 561 /// \param test_program Test program to execute. 562 /// \param test_case_name Name of the test case to execute. 563 run_execenv_cleanup( 564 const model::test_program_ptr test_program, 565 const std::string& test_case_name) : 566 _test_program(force_absolute_paths(*test_program)), 567 _test_case_name(test_case_name) 568 { 569 } 570 571 /// Body of the subprocess. 572 /// 573 /// \param control_directory The testcase directory where cleanup will be 574 /// run from. 575 void 576 operator()(const fs::path& /* control_directory */) 577 { 578 auto e = execenv::get(_test_program, _test_case_name); 579 e->cleanup(); 580 } 581 }; 582 583 584 /// Obtains the right scheduler interface for a given test program. 585 /// 586 /// \param name The name of the interface of the test program. 587 /// 588 /// \return An scheduler interface. 589 std::shared_ptr< scheduler::interface > 590 find_interface(const std::string& name) 591 { 592 const interfaces_map::const_iterator iter = interfaces.find(name); 593 PRE(interfaces.find(name) != interfaces.end()); 594 return (*iter).second; 595 } 596 597 598 } // anonymous namespace 599 600 601 void 602 scheduler::interface::exec_cleanup( 603 const model::test_program& /* test_program */, 604 const std::string& /* test_case_name */, 605 const config::properties_map& /* vars */, 606 const utils::fs::path& /* control_directory */) const 607 { 608 // Most test interfaces do not support standalone cleanup routines so 609 // provide a default implementation that does nothing. 610 UNREACHABLE_MSG("exec_cleanup not implemented for an interface that " 611 "supports standalone cleanup routines"); 612 } 613 614 615 /// Internal implementation of a lazy_test_program. 616 struct engine::scheduler::lazy_test_program::impl : utils::noncopyable { 617 /// Whether the test cases list has been yet loaded or not. 618 bool _loaded; 619 620 /// User configuration to pass to the test program list operation. 621 config::tree _user_config; 622 623 /// Scheduler context to use to load test cases. 624 scheduler::scheduler_handle& _scheduler_handle; 625 626 /// Constructor. 627 /// 628 /// \param user_config_ User configuration to pass to the test program list 629 /// operation. 630 /// \param scheduler_handle_ Scheduler context to use when loading test 631 /// cases. 632 impl(const config::tree& user_config_, 633 scheduler::scheduler_handle& scheduler_handle_) : 634 _loaded(false), _user_config(user_config_), 635 _scheduler_handle(scheduler_handle_) 636 { 637 } 638 }; 639 640 641 /// Constructs a new test program. 642 /// 643 /// \param interface_name_ Name of the test program interface. 644 /// \param binary_ The name of the test program binary relative to root_. 645 /// \param root_ The root of the test suite containing the test program. 646 /// \param test_suite_name_ The name of the test suite this program belongs to. 647 /// \param md_ Metadata of the test program. 648 /// \param user_config_ User configuration to pass to the scheduler. 649 /// \param scheduler_handle_ Scheduler context to use to load test cases. 650 scheduler::lazy_test_program::lazy_test_program( 651 const std::string& interface_name_, 652 const fs::path& binary_, 653 const fs::path& root_, 654 const std::string& test_suite_name_, 655 const model::metadata& md_, 656 const config::tree& user_config_, 657 scheduler::scheduler_handle& scheduler_handle_) : 658 test_program(interface_name_, binary_, root_, test_suite_name_, md_, 659 model::test_cases_map()), 660 _pimpl(new impl(user_config_, scheduler_handle_)) 661 { 662 } 663 664 665 /// Gets or loads the list of test cases from the test program. 666 /// 667 /// \return The list of test cases provided by the test program. 668 const model::test_cases_map& 669 scheduler::lazy_test_program::test_cases(void) const 670 { 671 _pimpl->_scheduler_handle.check_interrupt(); 672 673 if (!_pimpl->_loaded) { 674 const model::test_cases_map tcs = _pimpl->_scheduler_handle.list_tests( 675 this, _pimpl->_user_config); 676 677 // Due to the restrictions on when set_test_cases() may be called (as a 678 // way to lazily initialize the test cases list before it is ever 679 // returned), this cast is valid. 680 const_cast< scheduler::lazy_test_program* >(this)->set_test_cases(tcs); 681 682 _pimpl->_loaded = true; 683 684 _pimpl->_scheduler_handle.check_interrupt(); 685 } 686 687 INV(_pimpl->_loaded); 688 return test_program::test_cases(); 689 } 690 691 692 /// Internal implementation for the result_handle class. 693 struct engine::scheduler::result_handle::bimpl : utils::noncopyable { 694 /// Generic executor exit handle for this result handle. 695 executor::exit_handle generic; 696 697 /// Mutable pointer to the corresponding scheduler state. 698 /// 699 /// This object references a member of the scheduler_handle that yielded 700 /// this result_handle instance. We need this direct access to clean up 701 /// after ourselves when the result is destroyed. 702 exec_data_map& all_exec_data; 703 704 /// Constructor. 705 /// 706 /// \param generic_ Generic executor exit handle for this result handle. 707 /// \param [in,out] all_exec_data_ Global object keeping track of all active 708 /// executions for an scheduler. This is a pointer to a member of the 709 /// scheduler_handle object. 710 bimpl(const executor::exit_handle generic_, exec_data_map& all_exec_data_) : 711 generic(generic_), all_exec_data(all_exec_data_) 712 { 713 } 714 715 /// Destructor. 716 ~bimpl(void) 717 { 718 LD(F("Removing %s from all_exec_data") % generic.original_pid()); 719 all_exec_data.erase(generic.original_pid()); 720 } 721 }; 722 723 724 /// Constructor. 725 /// 726 /// \param pbimpl Constructed internal implementation. 727 scheduler::result_handle::result_handle(std::shared_ptr< bimpl > pbimpl) : 728 _pbimpl(pbimpl) 729 { 730 } 731 732 733 /// Destructor. 734 scheduler::result_handle::~result_handle(void) 735 { 736 } 737 738 739 /// Cleans up the test case results. 740 /// 741 /// This function should be called explicitly as it provides the means to 742 /// control any exceptions raised during cleanup. Do not rely on the destructor 743 /// to clean things up. 744 /// 745 /// \throw engine::error If the cleanup fails, especially due to the inability 746 /// to remove the work directory. 747 void 748 scheduler::result_handle::cleanup(void) 749 { 750 _pbimpl->generic.cleanup(); 751 } 752 753 754 /// Returns the original PID corresponding to this result. 755 /// 756 /// \return An exec_handle. 757 int 758 scheduler::result_handle::original_pid(void) const 759 { 760 return _pbimpl->generic.original_pid(); 761 } 762 763 764 /// Returns the timestamp of when spawn_test was called. 765 /// 766 /// \return A timestamp. 767 const datetime::timestamp& 768 scheduler::result_handle::start_time(void) const 769 { 770 return _pbimpl->generic.start_time(); 771 } 772 773 774 /// Returns the timestamp of when wait_any_test returned this object. 775 /// 776 /// \return A timestamp. 777 const datetime::timestamp& 778 scheduler::result_handle::end_time(void) const 779 { 780 return _pbimpl->generic.end_time(); 781 } 782 783 784 /// Returns the path to the test-specific work directory. 785 /// 786 /// This is guaranteed to be clear of files created by the scheduler. 787 /// 788 /// \return The path to a directory that exists until cleanup() is called. 789 fs::path 790 scheduler::result_handle::work_directory(void) const 791 { 792 return _pbimpl->generic.work_directory(); 793 } 794 795 796 /// Returns the path to the test's stdout file. 797 /// 798 /// \return The path to a file that exists until cleanup() is called. 799 const fs::path& 800 scheduler::result_handle::stdout_file(void) const 801 { 802 return _pbimpl->generic.stdout_file(); 803 } 804 805 806 /// Returns the path to the test's stderr file. 807 /// 808 /// \return The path to a file that exists until cleanup() is called. 809 const fs::path& 810 scheduler::result_handle::stderr_file(void) const 811 { 812 return _pbimpl->generic.stderr_file(); 813 } 814 815 816 /// Internal implementation for the test_result_handle class. 817 struct engine::scheduler::test_result_handle::impl : utils::noncopyable { 818 /// Test program data for this test case. 819 model::test_program_ptr test_program; 820 821 /// Name of the test case. 822 std::string test_case_name; 823 824 /// The actual result of the test execution. 825 const model::test_result test_result; 826 827 /// Constructor. 828 /// 829 /// \param test_program_ Test program data for this test case. 830 /// \param test_case_name_ Name of the test case. 831 /// \param test_result_ The actual result of the test execution. 832 impl(const model::test_program_ptr test_program_, 833 const std::string& test_case_name_, 834 const model::test_result& test_result_) : 835 test_program(test_program_), 836 test_case_name(test_case_name_), 837 test_result(test_result_) 838 { 839 } 840 }; 841 842 843 /// Constructor. 844 /// 845 /// \param pbimpl Constructed internal implementation for the base object. 846 /// \param pimpl Constructed internal implementation. 847 scheduler::test_result_handle::test_result_handle( 848 std::shared_ptr< bimpl > pbimpl, std::shared_ptr< impl > pimpl) : 849 result_handle(pbimpl), _pimpl(pimpl) 850 { 851 } 852 853 854 /// Destructor. 855 scheduler::test_result_handle::~test_result_handle(void) 856 { 857 } 858 859 860 /// Returns the test program that yielded this result. 861 /// 862 /// \return A test program. 863 const model::test_program_ptr 864 scheduler::test_result_handle::test_program(void) const 865 { 866 return _pimpl->test_program; 867 } 868 869 870 /// Returns the name of the test case that yielded this result. 871 /// 872 /// \return A test case name 873 const std::string& 874 scheduler::test_result_handle::test_case_name(void) const 875 { 876 return _pimpl->test_case_name; 877 } 878 879 880 /// Returns the actual result of the test execution. 881 /// 882 /// \return A test result. 883 const model::test_result& 884 scheduler::test_result_handle::test_result(void) const 885 { 886 return _pimpl->test_result; 887 } 888 889 890 /// Internal implementation for the scheduler_handle. 891 struct engine::scheduler::scheduler_handle::impl : utils::noncopyable { 892 /// Generic executor instance encapsulated by this one. 893 executor::executor_handle generic; 894 895 /// Mapping of exec handles to the data required at run time. 896 exec_data_map all_exec_data; 897 898 /// Collection of test_exec_data objects. 899 typedef std::vector< const test_exec_data* > test_exec_data_vector; 900 901 /// Constructor. 902 impl(void) : generic(executor::setup()) 903 { 904 } 905 906 /// Destructor. 907 /// 908 /// This runs any pending cleanup routines, which should only happen if the 909 /// scheduler is abruptly terminated (aka if a signal is received). 910 ~impl(void) 911 { 912 const test_exec_data_vector tests_data = tests_needing_cleanup(); 913 914 for (test_exec_data_vector::const_iterator iter = tests_data.begin(); 915 iter != tests_data.end(); ++iter) { 916 const test_exec_data* test_data = *iter; 917 918 try { 919 sync_cleanup(test_data); 920 } catch (const std::runtime_error& e) { 921 LW(F("Failed to run cleanup routine for %s:%s on abrupt " 922 "termination") 923 % test_data->test_program->relative_path() 924 % test_data->test_case_name); 925 } 926 } 927 928 const test_exec_data_vector td = tests_needing_execenv_cleanup(); 929 930 for (test_exec_data_vector::const_iterator iter = td.begin(); 931 iter != td.end(); ++iter) { 932 const test_exec_data* test_data = *iter; 933 934 try { 935 sync_execenv_cleanup(test_data); 936 } catch (const std::runtime_error& e) { 937 LW(F("Failed to run execenv cleanup routine for %s:%s on abrupt " 938 "termination") 939 % test_data->test_program->relative_path() 940 % test_data->test_case_name); 941 } 942 } 943 } 944 945 /// Finds any pending exec_datas that correspond to tests needing cleanup. 946 /// 947 /// \return The collection of test_exec_data objects that have their 948 /// needs_cleanup property set to true. 949 test_exec_data_vector 950 tests_needing_cleanup(void) 951 { 952 test_exec_data_vector tests_data; 953 954 for (exec_data_map::const_iterator iter = all_exec_data.begin(); 955 iter != all_exec_data.end(); ++iter) { 956 const exec_data_ptr data = (*iter).second; 957 958 try { 959 test_exec_data* test_data = &dynamic_cast< test_exec_data& >( 960 *data.get()); 961 if (test_data->needs_cleanup) { 962 tests_data.push_back(test_data); 963 test_data->needs_cleanup = false; 964 if (!test_data->exit_handle) 965 test_data->exit_handle = generic.reap(test_data->pid); 966 } 967 } catch (const std::bad_cast& e) { 968 // Do nothing for cleanup_exec_data objects. 969 } 970 } 971 972 return tests_data; 973 } 974 975 /// Finds any pending exec_datas that correspond to tests needing execenv 976 /// cleanup. 977 /// 978 /// \return The collection of test_exec_data objects that have their 979 /// specific execenv property set. 980 test_exec_data_vector 981 tests_needing_execenv_cleanup(void) 982 { 983 test_exec_data_vector tests_data; 984 985 for (exec_data_map::const_iterator iter = all_exec_data.begin(); 986 iter != all_exec_data.end(); ++iter) { 987 const exec_data_ptr data = (*iter).second; 988 989 try { 990 test_exec_data* test_data = &dynamic_cast< test_exec_data& >( 991 *data.get()); 992 if (test_data->needs_execenv_cleanup) { 993 tests_data.push_back(test_data); 994 test_data->needs_execenv_cleanup = false; 995 if (!test_data->exit_handle) 996 test_data->exit_handle = generic.reap(test_data->pid); 997 } 998 } catch (const std::bad_cast& e) { 999 // Do nothing for other objects. 1000 } 1001 } 1002 1003 return tests_data; 1004 } 1005 1006 /// Cleans up a single test case synchronously. 1007 /// 1008 /// \param test_data The data of the previously executed test case to be 1009 /// cleaned up. 1010 void 1011 sync_cleanup(const test_exec_data* test_data) 1012 { 1013 // The message in this result should never be seen by the user, but use 1014 // something reasonable just in case it leaks and we need to pinpoint 1015 // the call site. 1016 model::test_result result(model::test_result_broken, 1017 "Test case died abruptly"); 1018 1019 const executor::exec_handle cleanup_handle = spawn_cleanup( 1020 test_data->test_program, test_data->test_case_name, 1021 test_data->user_config, test_data->exit_handle.get(), 1022 result); 1023 generic.wait(cleanup_handle); 1024 } 1025 1026 /// Forks and executes a test case cleanup routine asynchronously. 1027 /// 1028 /// \param test_program The container test program. 1029 /// \param test_case_name The name of the test case to run. 1030 /// \param user_config User-provided configuration variables. 1031 /// \param body_handle The exit handle of the test case's corresponding 1032 /// body. The cleanup will be executed in the same context. 1033 /// \param body_result The result of the test case's corresponding body. 1034 /// 1035 /// \return A handle for the background operation. Used to match the result 1036 /// of the execution returned by wait_any() with this invocation. 1037 executor::exec_handle 1038 spawn_cleanup(const model::test_program_ptr test_program, 1039 const std::string& test_case_name, 1040 const config::tree& user_config, 1041 const executor::exit_handle& body_handle, 1042 const model::test_result& body_result) 1043 { 1044 generic.check_interrupt(); 1045 1046 const std::shared_ptr< scheduler::interface > interface = 1047 find_interface(test_program->interface_name()); 1048 1049 LI(F("Spawning %s:%s (cleanup)") % test_program->absolute_path() % 1050 test_case_name); 1051 1052 const executor::exec_handle handle = generic.spawn_followup( 1053 run_test_cleanup(interface, test_program, test_case_name, 1054 user_config), 1055 body_handle, cleanup_timeout); 1056 1057 const exec_data_ptr data(new cleanup_exec_data( 1058 test_program, test_case_name, body_handle, body_result)); 1059 LD(F("Inserting %s into all_exec_data (cleanup)") % handle.pid()); 1060 INV_MSG(all_exec_data.find(handle.pid()) == all_exec_data.end(), 1061 F("PID %s already in all_exec_data; not properly cleaned " 1062 "up or reused too fast") % handle.pid());; 1063 all_exec_data.insert(exec_data_map::value_type(handle.pid(), data)); 1064 1065 return handle; 1066 } 1067 1068 /// Cleans up a single test case execenv synchronously. 1069 /// 1070 /// \param test_data The data of the previously executed test case to be 1071 /// cleaned up. 1072 void 1073 sync_execenv_cleanup(const test_exec_data* test_data) 1074 { 1075 // The message in this result should never be seen by the user, but use 1076 // something reasonable just in case it leaks and we need to pinpoint 1077 // the call site. 1078 model::test_result result(model::test_result_broken, 1079 "Test case died abruptly"); 1080 1081 const executor::exec_handle cleanup_handle = spawn_execenv_cleanup( 1082 test_data->test_program, test_data->test_case_name, 1083 test_data->exit_handle.get(), result); 1084 generic.wait(cleanup_handle); 1085 } 1086 1087 /// Forks and executes a test case execenv cleanup asynchronously. 1088 /// 1089 /// \param test_program The container test program. 1090 /// \param test_case_name The name of the test case to run. 1091 /// \param body_handle The exit handle of the test case's corresponding 1092 /// body. The cleanup will be executed in the same context. 1093 /// \param body_result The result of the test case's corresponding body. 1094 /// 1095 /// \return A handle for the background operation. Used to match the result 1096 /// of the execution returned by wait_any() with this invocation. 1097 executor::exec_handle 1098 spawn_execenv_cleanup(const model::test_program_ptr test_program, 1099 const std::string& test_case_name, 1100 const executor::exit_handle& body_handle, 1101 const model::test_result& body_result) 1102 { 1103 generic.check_interrupt(); 1104 1105 LI(F("Spawning %s:%s (execenv cleanup)") 1106 % test_program->absolute_path() % test_case_name); 1107 1108 const executor::exec_handle handle = generic.spawn_followup( 1109 run_execenv_cleanup(test_program, test_case_name), 1110 body_handle, execenv_cleanup_timeout); 1111 1112 const exec_data_ptr data(new execenv_exec_data( 1113 test_program, test_case_name, body_handle, body_result)); 1114 LD(F("Inserting %s into all_exec_data (execenv cleanup)") % handle.pid()); 1115 INV_MSG(all_exec_data.find(handle.pid()) == all_exec_data.end(), 1116 F("PID %s already in all_exec_data; not properly cleaned " 1117 "up or reused too fast") % handle.pid());; 1118 all_exec_data.insert(exec_data_map::value_type(handle.pid(), data)); 1119 1120 return handle; 1121 } 1122 }; 1123 1124 1125 /// Constructor. 1126 scheduler::scheduler_handle::scheduler_handle(void) : _pimpl(new impl()) 1127 { 1128 } 1129 1130 1131 /// Destructor. 1132 scheduler::scheduler_handle::~scheduler_handle(void) 1133 { 1134 } 1135 1136 1137 /// Queries the path to the root of the work directory for all tests. 1138 /// 1139 /// \return A path. 1140 const fs::path& 1141 scheduler::scheduler_handle::root_work_directory(void) const 1142 { 1143 return _pimpl->generic.root_work_directory(); 1144 } 1145 1146 1147 /// Cleans up the scheduler state. 1148 /// 1149 /// This function should be called explicitly as it provides the means to 1150 /// control any exceptions raised during cleanup. Do not rely on the destructor 1151 /// to clean things up. 1152 /// 1153 /// \throw engine::error If there are problems cleaning up the scheduler. 1154 void 1155 scheduler::scheduler_handle::cleanup(void) 1156 { 1157 _pimpl->generic.cleanup(); 1158 } 1159 1160 1161 /// Checks if the given interface name is valid. 1162 /// 1163 /// \param name The name of the interface to validate. 1164 /// 1165 /// \throw engine::error If the given interface is not supported. 1166 void 1167 scheduler::ensure_valid_interface(const std::string& name) 1168 { 1169 if (interfaces.find(name) == interfaces.end()) 1170 throw engine::error(F("Unsupported test interface '%s'") % name); 1171 } 1172 1173 1174 /// Registers a new interface. 1175 /// 1176 /// \param name The name of the interface. Must not have yet been registered. 1177 /// \param spec Interface specification. 1178 void 1179 scheduler::register_interface(const std::string& name, 1180 const std::shared_ptr< interface > spec) 1181 { 1182 PRE(interfaces.find(name) == interfaces.end()); 1183 interfaces.insert(interfaces_map::value_type(name, spec)); 1184 } 1185 1186 1187 /// Returns the names of all registered interfaces. 1188 /// 1189 /// \return A collection of interface names. 1190 std::set< std::string > 1191 scheduler::registered_interface_names(void) 1192 { 1193 std::set< std::string > names; 1194 for (interfaces_map::const_iterator iter = interfaces.begin(); 1195 iter != interfaces.end(); ++iter) { 1196 names.insert((*iter).first); 1197 } 1198 return names; 1199 } 1200 1201 1202 /// Initializes the scheduler. 1203 /// 1204 /// \pre This function can only be called if there is no other scheduler_handle 1205 /// object alive. 1206 /// 1207 /// \return A handle to the operations of the scheduler. 1208 scheduler::scheduler_handle 1209 scheduler::setup(void) 1210 { 1211 return scheduler_handle(); 1212 } 1213 1214 1215 /// Retrieves the list of test cases from a test program. 1216 /// 1217 /// This operation is currently synchronous. 1218 /// 1219 /// This operation should never throw. Any errors during the processing of the 1220 /// test case list are subsumed into a single test case in the return value that 1221 /// represents the failed retrieval. 1222 /// 1223 /// \param test_program The test program from which to obtain the list of test 1224 /// cases. 1225 /// \param user_config User-provided configuration variables. 1226 /// 1227 /// \return The list of test cases. 1228 model::test_cases_map 1229 scheduler::scheduler_handle::list_tests( 1230 const model::test_program* test_program, 1231 const config::tree& user_config) 1232 { 1233 _pimpl->generic.check_interrupt(); 1234 1235 const std::shared_ptr< scheduler::interface > interface = find_interface( 1236 test_program->interface_name()); 1237 1238 try { 1239 const executor::exec_handle exec_handle = _pimpl->generic.spawn( 1240 list_test_cases(interface, test_program, user_config), 1241 list_timeout, none); 1242 executor::exit_handle exit_handle = _pimpl->generic.wait(exec_handle); 1243 1244 const model::test_cases_map test_cases = interface->parse_list( 1245 exit_handle.status(), 1246 exit_handle.stdout_file(), 1247 exit_handle.stderr_file()); 1248 1249 exit_handle.cleanup(); 1250 1251 if (test_cases.empty()) 1252 throw std::runtime_error("Empty test cases list"); 1253 1254 return test_cases; 1255 } catch (const std::runtime_error& e) { 1256 // TODO(jmmv): This is a very ugly workaround for the fact that we 1257 // cannot report failures at the test-program level. 1258 LW(F("Failed to load test cases list: %s") % e.what()); 1259 model::test_cases_map fake_test_cases; 1260 fake_test_cases.insert(model::test_cases_map::value_type( 1261 "__test_cases_list__", 1262 model::test_case( 1263 "__test_cases_list__", 1264 "Represents the correct processing of the test cases list", 1265 model::test_result(model::test_result_broken, e.what())))); 1266 return fake_test_cases; 1267 } 1268 } 1269 1270 1271 /// Forks and executes a test case asynchronously. 1272 /// 1273 /// Note that the caller needn't know if the test has a cleanup routine or not. 1274 /// If there indeed is a cleanup routine, we trigger it at wait_any() time. 1275 /// 1276 /// \param test_program The container test program. 1277 /// \param test_case_name The name of the test case to run. 1278 /// \param user_config User-provided configuration variables. 1279 /// 1280 /// \return A handle for the background operation. Used to match the result of 1281 /// the execution returned by wait_any() with this invocation. 1282 scheduler::exec_handle 1283 scheduler::scheduler_handle::spawn_test( 1284 const model::test_program_ptr test_program, 1285 const std::string& test_case_name, 1286 const config::tree& user_config) 1287 { 1288 _pimpl->generic.check_interrupt(); 1289 1290 const std::shared_ptr< scheduler::interface > interface = find_interface( 1291 test_program->interface_name()); 1292 1293 LI(F("Spawning %s:%s") % test_program->absolute_path() % test_case_name); 1294 1295 const model::test_case& test_case = test_program->find(test_case_name); 1296 1297 optional< passwd::user > unprivileged_user; 1298 if (user_config.is_set("unprivileged_user") && 1299 test_case.get_metadata().required_user() == "unprivileged") { 1300 unprivileged_user = user_config.lookup< engine::user_node >( 1301 "unprivileged_user"); 1302 } 1303 1304 const executor::exec_handle handle = _pimpl->generic.spawn( 1305 run_test_program(interface, test_program, test_case_name, 1306 user_config), 1307 test_case.get_metadata().timeout(), 1308 unprivileged_user); 1309 1310 const exec_data_ptr data(new test_exec_data( 1311 test_program, test_case_name, interface, user_config, handle.pid())); 1312 LD(F("Inserting %s into all_exec_data") % handle.pid()); 1313 INV_MSG( 1314 _pimpl->all_exec_data.find(handle.pid()) == _pimpl->all_exec_data.end(), 1315 F("PID %s already in all_exec_data; not cleaned up or reused too fast") 1316 % handle.pid());; 1317 _pimpl->all_exec_data.insert(exec_data_map::value_type(handle.pid(), data)); 1318 1319 return handle.pid(); 1320 } 1321 1322 1323 /// Waits for completion of any forked test case. 1324 /// 1325 /// Note that if the terminated test case has a cleanup routine, this function 1326 /// is the one in charge of spawning the cleanup routine asynchronously. 1327 /// 1328 /// \return The result of the execution of a subprocess. This is a dynamically 1329 /// allocated object because the scheduler can spawn subprocesses of various 1330 /// types and, at wait time, we don't know upfront what we are going to get. 1331 scheduler::result_handle_ptr 1332 scheduler::scheduler_handle::wait_any(void) 1333 { 1334 _pimpl->generic.check_interrupt(); 1335 1336 executor::exit_handle handle = _pimpl->generic.wait_any(); 1337 1338 const exec_data_map::iterator iter = _pimpl->all_exec_data.find( 1339 handle.original_pid()); 1340 exec_data_ptr data = (*iter).second; 1341 1342 utils::dump_stacktrace_if_available(data->test_program->absolute_path(), 1343 _pimpl->generic, handle); 1344 1345 optional< model::test_result > result; 1346 1347 // test itself 1348 try { 1349 test_exec_data* test_data = &dynamic_cast< test_exec_data& >( 1350 *data.get()); 1351 LD(F("Got %s from all_exec_data") % handle.original_pid()); 1352 1353 test_data->exit_handle = handle; 1354 1355 const model::test_case& test_case = test_data->test_program->find( 1356 test_data->test_case_name); 1357 1358 result = test_case.fake_result(); 1359 1360 if (!result && handle.status() && handle.status().get().exited() && 1361 handle.status().get().exitstatus() == exit_skipped) { 1362 // If the test's process terminated with our magic "exit_skipped" 1363 // status, there are two cases to handle. The first is the case 1364 // where the "skipped cookie" exists, in which case we never got to 1365 // actually invoke the test program; if that's the case, handle it 1366 // here. The second case is where the test case actually decided to 1367 // exit with the "exit_skipped" status; in that case, just fall back 1368 // to the regular status handling. 1369 const fs::path skipped_cookie_path = handle.control_directory() / 1370 skipped_cookie; 1371 std::ifstream input(skipped_cookie_path.c_str()); 1372 if (input) { 1373 result = model::test_result(model::test_result_skipped, 1374 utils::read_stream(input)); 1375 input.close(); 1376 1377 // If we determined that the test needs to be skipped, we do not 1378 // want to run the cleanup routine because doing so could result 1379 // in errors. However, we still want to run the cleanup routine 1380 // if the test's body reports a skip (because actions could have 1381 // already been taken). 1382 test_data->needs_cleanup = false; 1383 test_data->needs_execenv_cleanup = false; 1384 } 1385 } 1386 if (!result) { 1387 result = test_data->interface->compute_result( 1388 handle.status(), 1389 handle.control_directory(), 1390 handle.stdout_file(), 1391 handle.stderr_file()); 1392 } 1393 INV(result); 1394 1395 if (!result.get().good()) { 1396 append_files_listing(handle.work_directory(), 1397 handle.stderr_file()); 1398 } 1399 1400 std::shared_ptr< debugger > debugger = test_case.get_debugger(); 1401 if (debugger) { 1402 debugger->before_cleanup(test_data->test_program, test_case, 1403 result, handle); 1404 } 1405 1406 if (test_data->needs_cleanup) { 1407 INV(test_case.get_metadata().has_cleanup()); 1408 1409 // The test body has completed and we have processed it. If there 1410 // is a cleanup routine, trigger it now and wait for any other test 1411 // completion. The caller never knows about cleanup routines. 1412 _pimpl->spawn_cleanup(test_data->test_program, 1413 test_data->test_case_name, 1414 test_data->user_config, handle, result.get()); 1415 1416 // TODO(jmmv): Chaining this call is ugly. We'd be better off by 1417 // looping over terminated processes until we got a result suitable 1418 // for user consumption. For the time being this is good enough and 1419 // not a problem because the call chain won't get big: the majority 1420 // of test cases do not have cleanup routines. 1421 return wait_any(); 1422 } 1423 1424 if (test_data->needs_execenv_cleanup) { 1425 INV(test_case.get_metadata().has_execenv()); 1426 _pimpl->spawn_execenv_cleanup(test_data->test_program, 1427 test_data->test_case_name, 1428 handle, result.get()); 1429 test_data->needs_execenv_cleanup = false; 1430 return wait_any(); 1431 } 1432 } catch (const std::bad_cast& e) { 1433 // ok, let's check for another type 1434 } 1435 1436 // test cleanup 1437 try { 1438 const cleanup_exec_data* cleanup_data = 1439 &dynamic_cast< const cleanup_exec_data& >(*data.get()); 1440 LD(F("Got %s from all_exec_data (cleanup)") % handle.original_pid()); 1441 1442 // Handle the completion of cleanup subprocesses internally: the caller 1443 // is not aware that these exist so, when we return, we must return the 1444 // data for the original test that triggered this routine. For example, 1445 // because the caller wants to see the exact same exec_handle that was 1446 // returned by spawn_test. 1447 1448 const model::test_result& body_result = cleanup_data->body_result; 1449 if (body_result.good()) { 1450 if (!handle.status()) { 1451 result = model::test_result(model::test_result_broken, 1452 "Test case cleanup timed out"); 1453 } else { 1454 if (!handle.status().get().exited() || 1455 handle.status().get().exitstatus() != EXIT_SUCCESS) { 1456 result = model::test_result( 1457 model::test_result_broken, 1458 "Test case cleanup did not terminate successfully"); 1459 } else { 1460 result = body_result; 1461 } 1462 } 1463 } else { 1464 result = body_result; 1465 } 1466 1467 // Untrack the cleanup process. This must be done explicitly because we 1468 // do not create a result_handle object for the cleanup, and that is the 1469 // one in charge of doing so in the regular (non-cleanup) case. 1470 LD(F("Removing %s from all_exec_data (cleanup) in favor of %s") 1471 % handle.original_pid() 1472 % cleanup_data->body_exit_handle.original_pid()); 1473 _pimpl->all_exec_data.erase(handle.original_pid()); 1474 1475 handle = cleanup_data->body_exit_handle; 1476 1477 const exec_data_map::iterator it = _pimpl->all_exec_data.find( 1478 handle.original_pid()); 1479 if (it != _pimpl->all_exec_data.end()) { 1480 exec_data_ptr d = (*it).second; 1481 test_exec_data* test_data = &dynamic_cast< test_exec_data& >( 1482 *d.get()); 1483 const model::test_case& test_case = 1484 cleanup_data->test_program->find(cleanup_data->test_case_name); 1485 test_data->needs_cleanup = false; 1486 1487 if (test_data->needs_execenv_cleanup) { 1488 INV(test_case.get_metadata().has_execenv()); 1489 _pimpl->spawn_execenv_cleanup(cleanup_data->test_program, 1490 cleanup_data->test_case_name, 1491 handle, result.get()); 1492 test_data->needs_execenv_cleanup = false; 1493 return wait_any(); 1494 } 1495 } 1496 } catch (const std::bad_cast& e) { 1497 // ok, let's check for another type 1498 } 1499 1500 // execenv cleanup 1501 try { 1502 const execenv_exec_data* execenv_data = 1503 &dynamic_cast< const execenv_exec_data& >(*data.get()); 1504 LD(F("Got %s from all_exec_data (execenv cleanup)") % handle.original_pid()); 1505 1506 const model::test_result& body_result = execenv_data->body_result; 1507 if (body_result.good()) { 1508 if (!handle.status()) { 1509 result = model::test_result(model::test_result_broken, 1510 "Test case execenv cleanup timed out"); 1511 } else { 1512 if (!handle.status().get().exited() || 1513 handle.status().get().exitstatus() != EXIT_SUCCESS) { 1514 result = model::test_result( 1515 model::test_result_broken, 1516 "Test case execenv cleanup did not terminate successfully"); // ? 1517 } else { 1518 result = body_result; 1519 } 1520 } 1521 } else { 1522 result = body_result; 1523 } 1524 1525 LD(F("Removing %s from all_exec_data (execenv cleanup) in favor of %s") 1526 % handle.original_pid() 1527 % execenv_data->body_exit_handle.original_pid()); 1528 _pimpl->all_exec_data.erase(handle.original_pid()); 1529 1530 handle = execenv_data->body_exit_handle; 1531 } catch (const std::bad_cast& e) { 1532 // ok, it was one of the types above 1533 } 1534 1535 INV(result); 1536 1537 std::shared_ptr< result_handle::bimpl > result_handle_bimpl( 1538 new result_handle::bimpl(handle, _pimpl->all_exec_data)); 1539 std::shared_ptr< test_result_handle::impl > test_result_handle_impl( 1540 new test_result_handle::impl( 1541 data->test_program, data->test_case_name, result.get())); 1542 return result_handle_ptr(new test_result_handle(result_handle_bimpl, 1543 test_result_handle_impl)); 1544 } 1545 1546 1547 /// Forks and executes a test case synchronously for debugging. 1548 /// 1549 /// \pre No other processes should be in execution by the scheduler. 1550 /// 1551 /// \param test_program The container test program. 1552 /// \param test_case_name The name of the test case to run. 1553 /// \param user_config User-provided configuration variables. 1554 /// \param stdout_target File to which to write the stdout of the test case. 1555 /// \param stderr_target File to which to write the stderr of the test case. 1556 /// 1557 /// \return The result of the execution of the test. 1558 scheduler::result_handle_ptr 1559 scheduler::scheduler_handle::debug_test( 1560 const model::test_program_ptr test_program, 1561 const std::string& test_case_name, 1562 const config::tree& user_config, 1563 const fs::path& stdout_target, 1564 const fs::path& stderr_target) 1565 { 1566 const exec_handle exec_handle = spawn_test( 1567 test_program, test_case_name, user_config); 1568 result_handle_ptr result_handle = wait_any(); 1569 1570 // TODO(jmmv): We need to do this while the subprocess is alive. This is 1571 // important for debugging purposes, as we should see the contents of stdout 1572 // or stderr as they come in. 1573 // 1574 // Unfortunately, we cannot do so. We cannot just read and block from a 1575 // file, waiting for further output to appear... as this only works on pipes 1576 // or sockets. We need a better interface for this whole thing. 1577 { 1578 std::unique_ptr< std::ostream > output = utils::open_ostream( 1579 stdout_target); 1580 *output << utils::read_file(result_handle->stdout_file()); 1581 } 1582 { 1583 std::unique_ptr< std::ostream > output = utils::open_ostream( 1584 stderr_target); 1585 *output << utils::read_file(result_handle->stderr_file()); 1586 } 1587 1588 INV(result_handle->original_pid() == exec_handle); 1589 return result_handle; 1590 } 1591 1592 1593 /// Checks if an interrupt has fired. 1594 /// 1595 /// Calls to this function should be sprinkled in strategic places through the 1596 /// code protected by an interrupts_handler object. 1597 /// 1598 /// This is just a wrapper over signals::check_interrupt() to avoid leaking this 1599 /// dependency to the caller. 1600 /// 1601 /// \throw signals::interrupted_error If there has been an interrupt. 1602 void 1603 scheduler::scheduler_handle::check_interrupt(void) const 1604 { 1605 _pimpl->generic.check_interrupt(); 1606 } 1607 1608 1609 /// Queries the current execution context. 1610 /// 1611 /// \return The queried context. 1612 model::context 1613 scheduler::current_context(void) 1614 { 1615 return model::context(fs::current_path(), utils::getallenv()); 1616 } 1617 1618 1619 /// Generates the set of configuration variables for a test program. 1620 /// 1621 /// \param user_config The configuration variables provided by the user. 1622 /// \param test_suite The name of the test suite. 1623 /// 1624 /// \return The mapping of configuration variables for the test program. 1625 config::properties_map 1626 scheduler::generate_config(const config::tree& user_config, 1627 const std::string& test_suite) 1628 { 1629 config::properties_map props; 1630 1631 try { 1632 props = user_config.all_properties(F("test_suites.%s") % test_suite, 1633 true); 1634 } catch (const config::unknown_key_error& unused_error) { 1635 // Ignore: not all test suites have entries in the configuration. 1636 } 1637 1638 // TODO(jmmv): This is a hack that exists for the ATF interface only, so it 1639 // should be moved there. 1640 if (user_config.is_set("unprivileged_user")) { 1641 const passwd::user& user = 1642 user_config.lookup< engine::user_node >("unprivileged_user"); 1643 // The property is duplicated using both ATF and Kyua naming styles 1644 // for better UX. 1645 props["unprivileged-user"] = user.name; 1646 props["unprivileged_user"] = user.name; 1647 } 1648 1649 return props; 1650 } 1651