xref: /freebsd/contrib/kyua/engine/scheduler.cpp (revision 96190b4fef3b4a0cc3ca0606b0c4e3e69a5e6717)
1 // Copyright 2014 The Kyua Authors.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 //   notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright
11 //   notice, this list of conditions and the following disclaimer in the
12 //   documentation and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors
14 //   may be used to endorse or promote products derived from this software
15 //   without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29 #include "engine/scheduler.hpp"
30 
31 extern "C" {
32 #include <unistd.h>
33 }
34 
35 #include <cstdio>
36 #include <cstdlib>
37 #include <fstream>
38 #include <memory>
39 #include <stdexcept>
40 
41 #include "engine/config.hpp"
42 #include "engine/exceptions.hpp"
43 #include "engine/execenv/execenv.hpp"
44 #include "engine/requirements.hpp"
45 #include "model/context.hpp"
46 #include "model/metadata.hpp"
47 #include "model/test_case.hpp"
48 #include "model/test_program.hpp"
49 #include "model/test_result.hpp"
50 #include "utils/config/tree.ipp"
51 #include "utils/datetime.hpp"
52 #include "utils/defs.hpp"
53 #include "utils/env.hpp"
54 #include "utils/format/macros.hpp"
55 #include "utils/fs/directory.hpp"
56 #include "utils/fs/exceptions.hpp"
57 #include "utils/fs/operations.hpp"
58 #include "utils/fs/path.hpp"
59 #include "utils/logging/macros.hpp"
60 #include "utils/noncopyable.hpp"
61 #include "utils/optional.ipp"
62 #include "utils/passwd.hpp"
63 #include "utils/process/executor.ipp"
64 #include "utils/process/status.hpp"
65 #include "utils/sanity.hpp"
66 #include "utils/stacktrace.hpp"
67 #include "utils/stream.hpp"
68 #include "utils/text/operations.ipp"
69 
70 namespace config = utils::config;
71 namespace datetime = utils::datetime;
72 namespace execenv = engine::execenv;
73 namespace executor = utils::process::executor;
74 namespace fs = utils::fs;
75 namespace logging = utils::logging;
76 namespace passwd = utils::passwd;
77 namespace process = utils::process;
78 namespace scheduler = engine::scheduler;
79 namespace text = utils::text;
80 
81 using utils::none;
82 using utils::optional;
83 
84 
85 /// Timeout for the test case cleanup operation.
86 ///
87 /// TODO(jmmv): This is here only for testing purposes.  Maybe we should expose
88 /// this setting as part of the user_config.
89 datetime::delta scheduler::cleanup_timeout(60, 0);
90 
91 
92 /// Timeout for the test case execenv cleanup operation.
93 datetime::delta scheduler::execenv_cleanup_timeout(60, 0);
94 
95 
96 /// Timeout for the test case listing operation.
97 ///
98 /// TODO(jmmv): This is here only for testing purposes.  Maybe we should expose
99 /// this setting as part of the user_config.
100 datetime::delta scheduler::list_timeout(300, 0);
101 
102 
103 namespace {
104 
105 
106 /// Magic exit status to indicate that the test case was probably skipped.
107 ///
108 /// The test case was only skipped if and only if we return this exit code and
109 /// we find the skipped_cookie file on disk.
110 static const int exit_skipped = 84;
111 
112 
113 /// Text file containing the skip reason for the test case.
114 ///
115 /// This will only be present within unique_work_directory if the test case
116 /// exited with the exit_skipped code.  However, there is no guarantee that the
117 /// file is there (say if the test really decided to exit with code exit_skipped
118 /// on its own).
119 static const char* skipped_cookie = "skipped.txt";
120 
121 
122 /// Mapping of interface names to interface definitions.
123 typedef std::map< std::string, std::shared_ptr< scheduler::interface > >
124     interfaces_map;
125 
126 
127 /// Mapping of interface names to interface definitions.
128 ///
129 /// Use register_interface() to add an entry to this global table.
130 static interfaces_map interfaces;
131 
132 
133 /// Scans the contents of a directory and appends the file listing to a file.
134 ///
135 /// \param dir_path The directory to scan.
136 /// \param output_file The file to which to append the listing.
137 ///
138 /// \throw engine::error If there are problems listing the files.
139 static void
140 append_files_listing(const fs::path& dir_path, const fs::path& output_file)
141 {
142     std::ofstream output(output_file.c_str(), std::ios::app);
143     if (!output)
144         throw engine::error(F("Failed to open output file %s for append")
145                             % output_file);
146     try {
147         std::set < std::string > names;
148 
149         const fs::directory dir(dir_path);
150         for (fs::directory::const_iterator iter = dir.begin();
151              iter != dir.end(); ++iter) {
152             if (iter->name != "." && iter->name != "..")
153                 names.insert(iter->name);
154         }
155 
156         if (!names.empty()) {
157             output << "Files left in work directory after failure: "
158                    << text::join(names, ", ") << '\n';
159         }
160     } catch (const fs::error& e) {
161         throw engine::error(F("Cannot append files listing to %s: %s")
162                             % output_file % e.what());
163     }
164 }
165 
166 
167 /// Maintenance data held while a test is being executed.
168 ///
169 /// This data structure exists from the moment when a test is executed via
170 /// scheduler::spawn_test() or scheduler::impl::spawn_cleanup() to when it is
171 /// cleaned up with result_handle::cleanup().
172 ///
173 /// This is a base data type intended to be extended for the test and cleanup
174 /// cases so that each contains only the relevant data.
175 struct exec_data : utils::noncopyable {
176     /// Test program data for this test case.
177     const model::test_program_ptr test_program;
178 
179     /// Name of the test case.
180     const std::string test_case_name;
181 
182     /// Constructor.
183     ///
184     /// \param test_program_ Test program data for this test case.
185     /// \param test_case_name_ Name of the test case.
186     exec_data(const model::test_program_ptr test_program_,
187               const std::string& test_case_name_) :
188         test_program(test_program_), test_case_name(test_case_name_)
189     {
190     }
191 
192     /// Destructor.
193     virtual ~exec_data(void)
194     {
195     }
196 };
197 
198 
199 /// Maintenance data held while a test is being executed.
200 struct test_exec_data : public exec_data {
201     /// Test program-specific execution interface.
202     const std::shared_ptr< scheduler::interface > interface;
203 
204     /// User configuration passed to the execution of the test.  We need this
205     /// here to recover it later when chaining the execution of a cleanup
206     /// routine (if any).
207     const config::tree user_config;
208 
209     /// Whether this test case still needs to have its cleanup routine executed.
210     ///
211     /// This is set externally when the cleanup routine is actually invoked to
212     /// denote that no further attempts shall be made at cleaning this up.
213     bool needs_cleanup;
214 
215     /// Whether this test case still needs to have its execenv cleanup executed.
216     ///
217     /// This is set externally when the cleanup routine is actually invoked to
218     /// denote that no further attempts shall be made at cleaning this up.
219     bool needs_execenv_cleanup;
220 
221     /// Original PID of the test case subprocess.
222     ///
223     /// This is used for the cleanup upon termination by a signal, to reap the
224     /// leftovers and form missing exit_handle.
225     pid_t pid;
226 
227     /// The exit_handle for this test once it has completed.
228     ///
229     /// This is set externally when the test case has finished, as we need this
230     /// information to invoke the followup cleanup routine in the right context,
231     /// as indicated by needs_cleanup.
232     optional< executor::exit_handle > exit_handle;
233 
234     /// Constructor.
235     ///
236     /// \param test_program_ Test program data for this test case.
237     /// \param test_case_name_ Name of the test case.
238     /// \param interface_ Test program-specific execution interface.
239     /// \param user_config_ User configuration passed to the test.
240     test_exec_data(const model::test_program_ptr test_program_,
241                    const std::string& test_case_name_,
242                    const std::shared_ptr< scheduler::interface > interface_,
243                    const config::tree& user_config_,
244                    const pid_t pid_) :
245         exec_data(test_program_, test_case_name_),
246         interface(interface_), user_config(user_config_), pid(pid_)
247     {
248         const model::test_case& test_case = test_program->find(test_case_name);
249         needs_cleanup = test_case.get_metadata().has_cleanup();
250         needs_execenv_cleanup = test_case.get_metadata().has_execenv();
251     }
252 };
253 
254 
255 /// Maintenance data held while a test cleanup routine is being executed.
256 ///
257 /// Instances of this object are related to a previous test_exec_data, as
258 /// cleanup routines can only exist once the test has been run.
259 struct cleanup_exec_data : public exec_data {
260     /// The exit handle of the test.  This is necessary so that we can return
261     /// the correct exit_handle to the user of the scheduler.
262     executor::exit_handle body_exit_handle;
263 
264     /// The final result of the test's body.  This is necessary to compute the
265     /// right return value for a test with a cleanup routine: the body result is
266     /// respected if it is a "bad" result; else the result of the cleanup
267     /// routine is used if it has failed.
268     model::test_result body_result;
269 
270     /// Constructor.
271     ///
272     /// \param test_program_ Test program data for this test case.
273     /// \param test_case_name_ Name of the test case.
274     /// \param body_exit_handle_ If not none, exit handle of the body
275     ///     corresponding to the cleanup routine represented by this exec_data.
276     /// \param body_result_ If not none, result of the body corresponding to the
277     ///     cleanup routine represented by this exec_data.
278     cleanup_exec_data(const model::test_program_ptr test_program_,
279                       const std::string& test_case_name_,
280                       const executor::exit_handle& body_exit_handle_,
281                       const model::test_result& body_result_) :
282         exec_data(test_program_, test_case_name_),
283         body_exit_handle(body_exit_handle_), body_result(body_result_)
284     {
285     }
286 };
287 
288 
289 /// Maintenance data held while a test execenv cleanup is being executed.
290 ///
291 /// Instances of this object are related to a previous test_exec_data, as
292 /// cleanup routines can only exist once the test has been run.
293 struct execenv_exec_data : public exec_data {
294     /// The exit handle of the test.  This is necessary so that we can return
295     /// the correct exit_handle to the user of the scheduler.
296     executor::exit_handle body_exit_handle;
297 
298     /// The final result of the test's body.  This is necessary to compute the
299     /// right return value for a test with a cleanup routine: the body result is
300     /// respected if it is a "bad" result; else the result of the cleanup
301     /// routine is used if it has failed.
302     model::test_result body_result;
303 
304     /// Constructor.
305     ///
306     /// \param test_program_ Test program data for this test case.
307     /// \param test_case_name_ Name of the test case.
308     /// \param body_exit_handle_ If not none, exit handle of the body
309     ///     corresponding to the cleanup routine represented by this exec_data.
310     /// \param body_result_ If not none, result of the body corresponding to the
311     ///     cleanup routine represented by this exec_data.
312     execenv_exec_data(const model::test_program_ptr test_program_,
313                               const std::string& test_case_name_,
314                               const executor::exit_handle& body_exit_handle_,
315                               const model::test_result& body_result_) :
316         exec_data(test_program_, test_case_name_),
317         body_exit_handle(body_exit_handle_), body_result(body_result_)
318     {
319     }
320 };
321 
322 
323 /// Shared pointer to exec_data.
324 ///
325 /// We require this because we want exec_data to not be copyable, and thus we
326 /// cannot just store it in the map without move constructors.
327 typedef std::shared_ptr< exec_data > exec_data_ptr;
328 
329 
330 /// Mapping of active PIDs to their maintenance data.
331 typedef std::map< int, exec_data_ptr > exec_data_map;
332 
333 
334 /// Enforces a test program to hold an absolute path.
335 ///
336 /// TODO(jmmv): This function (which is a pretty ugly hack) exists because we
337 /// want the interface hooks to receive a test_program as their argument.
338 /// However, those hooks run after the test program has been isolated, which
339 /// means that the current directory has changed since when the test_program
340 /// objects were created.  This causes the absolute_path() method of
341 /// test_program to return bogus values if the internal representation of their
342 /// path is relative.  We should fix somehow: maybe making the fs module grab
343 /// its "current_path" view at program startup time; or maybe by grabbing the
344 /// current path at test_program creation time; or maybe something else.
345 ///
346 /// \param program The test program to modify.
347 ///
348 /// \return A new test program whose internal paths are absolute.
349 static model::test_program
350 force_absolute_paths(const model::test_program program)
351 {
352     const std::string& relative = program.relative_path().str();
353     const std::string absolute = program.absolute_path().str();
354 
355     const std::string root = absolute.substr(
356         0, absolute.length() - relative.length());
357 
358     return model::test_program(
359         program.interface_name(),
360         program.relative_path(), fs::path(root),
361         program.test_suite_name(),
362         program.get_metadata(), program.test_cases());
363 }
364 
365 
366 /// Functor to list the test cases of a test program.
367 class list_test_cases {
368     /// Interface of the test program to execute.
369     std::shared_ptr< scheduler::interface > _interface;
370 
371     /// Test program to execute.
372     const model::test_program _test_program;
373 
374     /// User-provided configuration variables.
375     const config::tree& _user_config;
376 
377 public:
378     /// Constructor.
379     ///
380     /// \param interface Interface of the test program to execute.
381     /// \param test_program Test program to execute.
382     /// \param user_config User-provided configuration variables.
383     list_test_cases(
384         const std::shared_ptr< scheduler::interface > interface,
385         const model::test_program* test_program,
386         const config::tree& user_config) :
387         _interface(interface),
388         _test_program(force_absolute_paths(*test_program)),
389         _user_config(user_config)
390     {
391     }
392 
393     /// Body of the subprocess.
394     void
395     operator()(const fs::path& /* control_directory */)
396     {
397         const config::properties_map vars = scheduler::generate_config(
398             _user_config, _test_program.test_suite_name());
399         _interface->exec_list(_test_program, vars);
400     }
401 };
402 
403 
404 /// Functor to execute a test program in a child process.
405 class run_test_program {
406     /// Interface of the test program to execute.
407     std::shared_ptr< scheduler::interface > _interface;
408 
409     /// Test program to execute.
410     const model::test_program _test_program;
411 
412     /// Name of the test case to execute.
413     const std::string& _test_case_name;
414 
415     /// User-provided configuration variables.
416     const config::tree& _user_config;
417 
418     /// Verifies if the test case needs to be skipped or not.
419     ///
420     /// We could very well run this on the scheduler parent process before
421     /// issuing the fork.  However, doing this here in the child process is
422     /// better for two reasons: first, it allows us to continue using the simple
423     /// spawn/wait abstraction of the scheduler; and, second, we parallelize the
424     /// requirements checks among tests.
425     ///
426     /// \post If the test's preconditions are not met, the caller process is
427     /// terminated with a special exit code and a "skipped cookie" is written to
428     /// the disk with the reason for the failure.
429     ///
430     /// \param skipped_cookie_path File to create with the skip reason details
431     ///     if this test is skipped.
432     void
433     do_requirements_check(const fs::path& skipped_cookie_path)
434     {
435         const model::test_case& test_case = _test_program.find(
436             _test_case_name);
437 
438         const std::string skip_reason = engine::check_reqs(
439             test_case.get_metadata(), _user_config,
440             _test_program.test_suite_name(),
441             fs::current_path());
442         if (skip_reason.empty())
443             return;
444 
445         std::ofstream output(skipped_cookie_path.c_str());
446         if (!output) {
447             std::perror((F("Failed to open %s for write") %
448                          skipped_cookie_path).str().c_str());
449             std::abort();
450         }
451         output << skip_reason;
452         output.close();
453 
454         // Abruptly terminate the process.  We don't want to run any destructors
455         // inherited from the parent process by mistake, which could, for
456         // example, delete our own control files!
457         ::_exit(exit_skipped);
458     }
459 
460 public:
461     /// Constructor.
462     ///
463     /// \param interface Interface of the test program to execute.
464     /// \param test_program Test program to execute.
465     /// \param test_case_name Name of the test case to execute.
466     /// \param user_config User-provided configuration variables.
467     run_test_program(
468         const std::shared_ptr< scheduler::interface > interface,
469         const model::test_program_ptr test_program,
470         const std::string& test_case_name,
471         const config::tree& user_config) :
472         _interface(interface),
473         _test_program(force_absolute_paths(*test_program)),
474         _test_case_name(test_case_name),
475         _user_config(user_config)
476     {
477     }
478 
479     /// Body of the subprocess.
480     ///
481     /// \param control_directory The testcase directory where files will be
482     ///     read from.
483     void
484     operator()(const fs::path& control_directory)
485     {
486         const model::test_case& test_case = _test_program.find(
487             _test_case_name);
488         if (test_case.fake_result())
489             ::_exit(EXIT_SUCCESS);
490 
491         do_requirements_check(control_directory / skipped_cookie);
492 
493         const config::properties_map vars = scheduler::generate_config(
494             _user_config, _test_program.test_suite_name());
495         _interface->exec_test(_test_program, _test_case_name, vars,
496                               control_directory);
497     }
498 };
499 
500 
501 /// Functor to execute a test program in a child process.
502 class run_test_cleanup {
503     /// Interface of the test program to execute.
504     std::shared_ptr< scheduler::interface > _interface;
505 
506     /// Test program to execute.
507     const model::test_program _test_program;
508 
509     /// Name of the test case to execute.
510     const std::string& _test_case_name;
511 
512     /// User-provided configuration variables.
513     const config::tree& _user_config;
514 
515 public:
516     /// Constructor.
517     ///
518     /// \param interface Interface of the test program to execute.
519     /// \param test_program Test program to execute.
520     /// \param test_case_name Name of the test case to execute.
521     /// \param user_config User-provided configuration variables.
522     run_test_cleanup(
523         const std::shared_ptr< scheduler::interface > interface,
524         const model::test_program_ptr test_program,
525         const std::string& test_case_name,
526         const config::tree& user_config) :
527         _interface(interface),
528         _test_program(force_absolute_paths(*test_program)),
529         _test_case_name(test_case_name),
530         _user_config(user_config)
531     {
532     }
533 
534     /// Body of the subprocess.
535     ///
536     /// \param control_directory The testcase directory where cleanup will be
537     ///     run from.
538     void
539     operator()(const fs::path& control_directory)
540     {
541         const config::properties_map vars = scheduler::generate_config(
542             _user_config, _test_program.test_suite_name());
543         _interface->exec_cleanup(_test_program, _test_case_name, vars,
544                                  control_directory);
545     }
546 };
547 
548 
549 /// Functor to execute a test execenv cleanup in a child process.
550 class run_execenv_cleanup {
551     /// Test program to execute.
552     const model::test_program _test_program;
553 
554     /// Name of the test case to execute.
555     const std::string& _test_case_name;
556 
557 public:
558     /// Constructor.
559     ///
560     /// \param test_program Test program to execute.
561     /// \param test_case_name Name of the test case to execute.
562     run_execenv_cleanup(
563         const model::test_program_ptr test_program,
564         const std::string& test_case_name) :
565         _test_program(force_absolute_paths(*test_program)),
566         _test_case_name(test_case_name)
567     {
568     }
569 
570     /// Body of the subprocess.
571     ///
572     /// \param control_directory The testcase directory where cleanup will be
573     ///     run from.
574     void
575     operator()(const fs::path& /* control_directory */)
576     {
577         auto e = execenv::get(_test_program, _test_case_name);
578         e->cleanup();
579     }
580 };
581 
582 
583 /// Obtains the right scheduler interface for a given test program.
584 ///
585 /// \param name The name of the interface of the test program.
586 ///
587 /// \return An scheduler interface.
588 std::shared_ptr< scheduler::interface >
589 find_interface(const std::string& name)
590 {
591     const interfaces_map::const_iterator iter = interfaces.find(name);
592     PRE(interfaces.find(name) != interfaces.end());
593     return (*iter).second;
594 }
595 
596 
597 }  // anonymous namespace
598 
599 
600 void
601 scheduler::interface::exec_cleanup(
602     const model::test_program& /* test_program */,
603     const std::string& /* test_case_name */,
604     const config::properties_map& /* vars */,
605     const utils::fs::path& /* control_directory */) const
606 {
607     // Most test interfaces do not support standalone cleanup routines so
608     // provide a default implementation that does nothing.
609     UNREACHABLE_MSG("exec_cleanup not implemented for an interface that "
610                     "supports standalone cleanup routines");
611 }
612 
613 
614 /// Internal implementation of a lazy_test_program.
615 struct engine::scheduler::lazy_test_program::impl : utils::noncopyable {
616     /// Whether the test cases list has been yet loaded or not.
617     bool _loaded;
618 
619     /// User configuration to pass to the test program list operation.
620     config::tree _user_config;
621 
622     /// Scheduler context to use to load test cases.
623     scheduler::scheduler_handle& _scheduler_handle;
624 
625     /// Constructor.
626     ///
627     /// \param user_config_ User configuration to pass to the test program list
628     ///     operation.
629     /// \param scheduler_handle_ Scheduler context to use when loading test
630     ///     cases.
631     impl(const config::tree& user_config_,
632          scheduler::scheduler_handle& scheduler_handle_) :
633         _loaded(false), _user_config(user_config_),
634         _scheduler_handle(scheduler_handle_)
635     {
636     }
637 };
638 
639 
640 /// Constructs a new test program.
641 ///
642 /// \param interface_name_ Name of the test program interface.
643 /// \param binary_ The name of the test program binary relative to root_.
644 /// \param root_ The root of the test suite containing the test program.
645 /// \param test_suite_name_ The name of the test suite this program belongs to.
646 /// \param md_ Metadata of the test program.
647 /// \param user_config_ User configuration to pass to the scheduler.
648 /// \param scheduler_handle_ Scheduler context to use to load test cases.
649 scheduler::lazy_test_program::lazy_test_program(
650     const std::string& interface_name_,
651     const fs::path& binary_,
652     const fs::path& root_,
653     const std::string& test_suite_name_,
654     const model::metadata& md_,
655     const config::tree& user_config_,
656     scheduler::scheduler_handle& scheduler_handle_) :
657     test_program(interface_name_, binary_, root_, test_suite_name_, md_,
658                  model::test_cases_map()),
659     _pimpl(new impl(user_config_, scheduler_handle_))
660 {
661 }
662 
663 
664 /// Gets or loads the list of test cases from the test program.
665 ///
666 /// \return The list of test cases provided by the test program.
667 const model::test_cases_map&
668 scheduler::lazy_test_program::test_cases(void) const
669 {
670     _pimpl->_scheduler_handle.check_interrupt();
671 
672     if (!_pimpl->_loaded) {
673         const model::test_cases_map tcs = _pimpl->_scheduler_handle.list_tests(
674             this, _pimpl->_user_config);
675 
676         // Due to the restrictions on when set_test_cases() may be called (as a
677         // way to lazily initialize the test cases list before it is ever
678         // returned), this cast is valid.
679         const_cast< scheduler::lazy_test_program* >(this)->set_test_cases(tcs);
680 
681         _pimpl->_loaded = true;
682 
683         _pimpl->_scheduler_handle.check_interrupt();
684     }
685 
686     INV(_pimpl->_loaded);
687     return test_program::test_cases();
688 }
689 
690 
691 /// Internal implementation for the result_handle class.
692 struct engine::scheduler::result_handle::bimpl : utils::noncopyable {
693     /// Generic executor exit handle for this result handle.
694     executor::exit_handle generic;
695 
696     /// Mutable pointer to the corresponding scheduler state.
697     ///
698     /// This object references a member of the scheduler_handle that yielded
699     /// this result_handle instance.  We need this direct access to clean up
700     /// after ourselves when the result is destroyed.
701     exec_data_map& all_exec_data;
702 
703     /// Constructor.
704     ///
705     /// \param generic_ Generic executor exit handle for this result handle.
706     /// \param [in,out] all_exec_data_ Global object keeping track of all active
707     ///     executions for an scheduler.  This is a pointer to a member of the
708     ///     scheduler_handle object.
709     bimpl(const executor::exit_handle generic_, exec_data_map& all_exec_data_) :
710         generic(generic_), all_exec_data(all_exec_data_)
711     {
712     }
713 
714     /// Destructor.
715     ~bimpl(void)
716     {
717         LD(F("Removing %s from all_exec_data") % generic.original_pid());
718         all_exec_data.erase(generic.original_pid());
719     }
720 };
721 
722 
723 /// Constructor.
724 ///
725 /// \param pbimpl Constructed internal implementation.
726 scheduler::result_handle::result_handle(std::shared_ptr< bimpl > pbimpl) :
727     _pbimpl(pbimpl)
728 {
729 }
730 
731 
732 /// Destructor.
733 scheduler::result_handle::~result_handle(void)
734 {
735 }
736 
737 
738 /// Cleans up the test case results.
739 ///
740 /// This function should be called explicitly as it provides the means to
741 /// control any exceptions raised during cleanup.  Do not rely on the destructor
742 /// to clean things up.
743 ///
744 /// \throw engine::error If the cleanup fails, especially due to the inability
745 ///     to remove the work directory.
746 void
747 scheduler::result_handle::cleanup(void)
748 {
749     _pbimpl->generic.cleanup();
750 }
751 
752 
753 /// Returns the original PID corresponding to this result.
754 ///
755 /// \return An exec_handle.
756 int
757 scheduler::result_handle::original_pid(void) const
758 {
759     return _pbimpl->generic.original_pid();
760 }
761 
762 
763 /// Returns the timestamp of when spawn_test was called.
764 ///
765 /// \return A timestamp.
766 const datetime::timestamp&
767 scheduler::result_handle::start_time(void) const
768 {
769     return _pbimpl->generic.start_time();
770 }
771 
772 
773 /// Returns the timestamp of when wait_any_test returned this object.
774 ///
775 /// \return A timestamp.
776 const datetime::timestamp&
777 scheduler::result_handle::end_time(void) const
778 {
779     return _pbimpl->generic.end_time();
780 }
781 
782 
783 /// Returns the path to the test-specific work directory.
784 ///
785 /// This is guaranteed to be clear of files created by the scheduler.
786 ///
787 /// \return The path to a directory that exists until cleanup() is called.
788 fs::path
789 scheduler::result_handle::work_directory(void) const
790 {
791     return _pbimpl->generic.work_directory();
792 }
793 
794 
795 /// Returns the path to the test's stdout file.
796 ///
797 /// \return The path to a file that exists until cleanup() is called.
798 const fs::path&
799 scheduler::result_handle::stdout_file(void) const
800 {
801     return _pbimpl->generic.stdout_file();
802 }
803 
804 
805 /// Returns the path to the test's stderr file.
806 ///
807 /// \return The path to a file that exists until cleanup() is called.
808 const fs::path&
809 scheduler::result_handle::stderr_file(void) const
810 {
811     return _pbimpl->generic.stderr_file();
812 }
813 
814 
815 /// Internal implementation for the test_result_handle class.
816 struct engine::scheduler::test_result_handle::impl : utils::noncopyable {
817     /// Test program data for this test case.
818     model::test_program_ptr test_program;
819 
820     /// Name of the test case.
821     std::string test_case_name;
822 
823     /// The actual result of the test execution.
824     const model::test_result test_result;
825 
826     /// Constructor.
827     ///
828     /// \param test_program_ Test program data for this test case.
829     /// \param test_case_name_ Name of the test case.
830     /// \param test_result_ The actual result of the test execution.
831     impl(const model::test_program_ptr test_program_,
832          const std::string& test_case_name_,
833          const model::test_result& test_result_) :
834         test_program(test_program_),
835         test_case_name(test_case_name_),
836         test_result(test_result_)
837     {
838     }
839 };
840 
841 
842 /// Constructor.
843 ///
844 /// \param pbimpl Constructed internal implementation for the base object.
845 /// \param pimpl Constructed internal implementation.
846 scheduler::test_result_handle::test_result_handle(
847     std::shared_ptr< bimpl > pbimpl, std::shared_ptr< impl > pimpl) :
848     result_handle(pbimpl), _pimpl(pimpl)
849 {
850 }
851 
852 
853 /// Destructor.
854 scheduler::test_result_handle::~test_result_handle(void)
855 {
856 }
857 
858 
859 /// Returns the test program that yielded this result.
860 ///
861 /// \return A test program.
862 const model::test_program_ptr
863 scheduler::test_result_handle::test_program(void) const
864 {
865     return _pimpl->test_program;
866 }
867 
868 
869 /// Returns the name of the test case that yielded this result.
870 ///
871 /// \return A test case name
872 const std::string&
873 scheduler::test_result_handle::test_case_name(void) const
874 {
875     return _pimpl->test_case_name;
876 }
877 
878 
879 /// Returns the actual result of the test execution.
880 ///
881 /// \return A test result.
882 const model::test_result&
883 scheduler::test_result_handle::test_result(void) const
884 {
885     return _pimpl->test_result;
886 }
887 
888 
889 /// Internal implementation for the scheduler_handle.
890 struct engine::scheduler::scheduler_handle::impl : utils::noncopyable {
891     /// Generic executor instance encapsulated by this one.
892     executor::executor_handle generic;
893 
894     /// Mapping of exec handles to the data required at run time.
895     exec_data_map all_exec_data;
896 
897     /// Collection of test_exec_data objects.
898     typedef std::vector< const test_exec_data* > test_exec_data_vector;
899 
900     /// Constructor.
901     impl(void) : generic(executor::setup())
902     {
903     }
904 
905     /// Destructor.
906     ///
907     /// This runs any pending cleanup routines, which should only happen if the
908     /// scheduler is abruptly terminated (aka if a signal is received).
909     ~impl(void)
910     {
911         const test_exec_data_vector tests_data = tests_needing_cleanup();
912 
913         for (test_exec_data_vector::const_iterator iter = tests_data.begin();
914              iter != tests_data.end(); ++iter) {
915             const test_exec_data* test_data = *iter;
916 
917             try {
918                 sync_cleanup(test_data);
919             } catch (const std::runtime_error& e) {
920                 LW(F("Failed to run cleanup routine for %s:%s on abrupt "
921                      "termination")
922                    % test_data->test_program->relative_path()
923                    % test_data->test_case_name);
924             }
925         }
926 
927         const test_exec_data_vector td = tests_needing_execenv_cleanup();
928 
929         for (test_exec_data_vector::const_iterator iter = td.begin();
930              iter != td.end(); ++iter) {
931             const test_exec_data* test_data = *iter;
932 
933             try {
934                 sync_execenv_cleanup(test_data);
935             } catch (const std::runtime_error& e) {
936                 LW(F("Failed to run execenv cleanup routine for %s:%s on abrupt "
937                      "termination")
938                    % test_data->test_program->relative_path()
939                    % test_data->test_case_name);
940             }
941         }
942     }
943 
944     /// Finds any pending exec_datas that correspond to tests needing cleanup.
945     ///
946     /// \return The collection of test_exec_data objects that have their
947     /// needs_cleanup property set to true.
948     test_exec_data_vector
949     tests_needing_cleanup(void)
950     {
951         test_exec_data_vector tests_data;
952 
953         for (exec_data_map::const_iterator iter = all_exec_data.begin();
954              iter != all_exec_data.end(); ++iter) {
955             const exec_data_ptr data = (*iter).second;
956 
957             try {
958                 test_exec_data* test_data = &dynamic_cast< test_exec_data& >(
959                     *data.get());
960                 if (test_data->needs_cleanup) {
961                     tests_data.push_back(test_data);
962                     test_data->needs_cleanup = false;
963                     if (!test_data->exit_handle)
964                         test_data->exit_handle = generic.reap(test_data->pid);
965                 }
966             } catch (const std::bad_cast& e) {
967                 // Do nothing for cleanup_exec_data objects.
968             }
969         }
970 
971         return tests_data;
972     }
973 
974     /// Finds any pending exec_datas that correspond to tests needing execenv
975     /// cleanup.
976     ///
977     /// \return The collection of test_exec_data objects that have their
978     /// specific execenv property set.
979     test_exec_data_vector
980     tests_needing_execenv_cleanup(void)
981     {
982         test_exec_data_vector tests_data;
983 
984         for (exec_data_map::const_iterator iter = all_exec_data.begin();
985              iter != all_exec_data.end(); ++iter) {
986             const exec_data_ptr data = (*iter).second;
987 
988             try {
989                 test_exec_data* test_data = &dynamic_cast< test_exec_data& >(
990                     *data.get());
991                 if (test_data->needs_execenv_cleanup) {
992                     tests_data.push_back(test_data);
993                     test_data->needs_execenv_cleanup = false;
994                     if (!test_data->exit_handle)
995                         test_data->exit_handle = generic.reap(test_data->pid);
996                 }
997             } catch (const std::bad_cast& e) {
998                 // Do nothing for other objects.
999             }
1000         }
1001 
1002         return tests_data;
1003     }
1004 
1005     /// Cleans up a single test case synchronously.
1006     ///
1007     /// \param test_data The data of the previously executed test case to be
1008     ///     cleaned up.
1009     void
1010     sync_cleanup(const test_exec_data* test_data)
1011     {
1012         // The message in this result should never be seen by the user, but use
1013         // something reasonable just in case it leaks and we need to pinpoint
1014         // the call site.
1015         model::test_result result(model::test_result_broken,
1016                                   "Test case died abruptly");
1017 
1018         const executor::exec_handle cleanup_handle = spawn_cleanup(
1019             test_data->test_program, test_data->test_case_name,
1020             test_data->user_config, test_data->exit_handle.get(),
1021             result);
1022         generic.wait(cleanup_handle);
1023     }
1024 
1025     /// Forks and executes a test case cleanup routine asynchronously.
1026     ///
1027     /// \param test_program The container test program.
1028     /// \param test_case_name The name of the test case to run.
1029     /// \param user_config User-provided configuration variables.
1030     /// \param body_handle The exit handle of the test case's corresponding
1031     ///     body.  The cleanup will be executed in the same context.
1032     /// \param body_result The result of the test case's corresponding body.
1033     ///
1034     /// \return A handle for the background operation.  Used to match the result
1035     /// of the execution returned by wait_any() with this invocation.
1036     executor::exec_handle
1037     spawn_cleanup(const model::test_program_ptr test_program,
1038                   const std::string& test_case_name,
1039                   const config::tree& user_config,
1040                   const executor::exit_handle& body_handle,
1041                   const model::test_result& body_result)
1042     {
1043         generic.check_interrupt();
1044 
1045         const std::shared_ptr< scheduler::interface > interface =
1046             find_interface(test_program->interface_name());
1047 
1048         LI(F("Spawning %s:%s (cleanup)") % test_program->absolute_path() %
1049            test_case_name);
1050 
1051         const executor::exec_handle handle = generic.spawn_followup(
1052             run_test_cleanup(interface, test_program, test_case_name,
1053                              user_config),
1054             body_handle, cleanup_timeout);
1055 
1056         const exec_data_ptr data(new cleanup_exec_data(
1057             test_program, test_case_name, body_handle, body_result));
1058         LD(F("Inserting %s into all_exec_data (cleanup)") % handle.pid());
1059         INV_MSG(all_exec_data.find(handle.pid()) == all_exec_data.end(),
1060                 F("PID %s already in all_exec_data; not properly cleaned "
1061                   "up or reused too fast") % handle.pid());;
1062         all_exec_data.insert(exec_data_map::value_type(handle.pid(), data));
1063 
1064         return handle;
1065     }
1066 
1067     /// Cleans up a single test case execenv synchronously.
1068     ///
1069     /// \param test_data The data of the previously executed test case to be
1070     ///     cleaned up.
1071     void
1072     sync_execenv_cleanup(const test_exec_data* test_data)
1073     {
1074         // The message in this result should never be seen by the user, but use
1075         // something reasonable just in case it leaks and we need to pinpoint
1076         // the call site.
1077         model::test_result result(model::test_result_broken,
1078                                   "Test case died abruptly");
1079 
1080         const executor::exec_handle cleanup_handle = spawn_execenv_cleanup(
1081             test_data->test_program, test_data->test_case_name,
1082             test_data->exit_handle.get(), result);
1083         generic.wait(cleanup_handle);
1084     }
1085 
1086     /// Forks and executes a test case execenv cleanup asynchronously.
1087     ///
1088     /// \param test_program The container test program.
1089     /// \param test_case_name The name of the test case to run.
1090     /// \param body_handle The exit handle of the test case's corresponding
1091     ///     body.  The cleanup will be executed in the same context.
1092     /// \param body_result The result of the test case's corresponding body.
1093     ///
1094     /// \return A handle for the background operation.  Used to match the result
1095     /// of the execution returned by wait_any() with this invocation.
1096     executor::exec_handle
1097     spawn_execenv_cleanup(const model::test_program_ptr test_program,
1098                           const std::string& test_case_name,
1099                           const executor::exit_handle& body_handle,
1100                           const model::test_result& body_result)
1101     {
1102         generic.check_interrupt();
1103 
1104         LI(F("Spawning %s:%s (execenv cleanup)")
1105             % test_program->absolute_path() % test_case_name);
1106 
1107         const executor::exec_handle handle = generic.spawn_followup(
1108             run_execenv_cleanup(test_program, test_case_name),
1109             body_handle, execenv_cleanup_timeout);
1110 
1111         const exec_data_ptr data(new execenv_exec_data(
1112             test_program, test_case_name, body_handle, body_result));
1113         LD(F("Inserting %s into all_exec_data (execenv cleanup)") % handle.pid());
1114         INV_MSG(all_exec_data.find(handle.pid()) == all_exec_data.end(),
1115                 F("PID %s already in all_exec_data; not properly cleaned "
1116                   "up or reused too fast") % handle.pid());;
1117         all_exec_data.insert(exec_data_map::value_type(handle.pid(), data));
1118 
1119         return handle;
1120     }
1121 };
1122 
1123 
1124 /// Constructor.
1125 scheduler::scheduler_handle::scheduler_handle(void) : _pimpl(new impl())
1126 {
1127 }
1128 
1129 
1130 /// Destructor.
1131 scheduler::scheduler_handle::~scheduler_handle(void)
1132 {
1133 }
1134 
1135 
1136 /// Queries the path to the root of the work directory for all tests.
1137 ///
1138 /// \return A path.
1139 const fs::path&
1140 scheduler::scheduler_handle::root_work_directory(void) const
1141 {
1142     return _pimpl->generic.root_work_directory();
1143 }
1144 
1145 
1146 /// Cleans up the scheduler state.
1147 ///
1148 /// This function should be called explicitly as it provides the means to
1149 /// control any exceptions raised during cleanup.  Do not rely on the destructor
1150 /// to clean things up.
1151 ///
1152 /// \throw engine::error If there are problems cleaning up the scheduler.
1153 void
1154 scheduler::scheduler_handle::cleanup(void)
1155 {
1156     _pimpl->generic.cleanup();
1157 }
1158 
1159 
1160 /// Checks if the given interface name is valid.
1161 ///
1162 /// \param name The name of the interface to validate.
1163 ///
1164 /// \throw engine::error If the given interface is not supported.
1165 void
1166 scheduler::ensure_valid_interface(const std::string& name)
1167 {
1168     if (interfaces.find(name) == interfaces.end())
1169         throw engine::error(F("Unsupported test interface '%s'") % name);
1170 }
1171 
1172 
1173 /// Registers a new interface.
1174 ///
1175 /// \param name The name of the interface.  Must not have yet been registered.
1176 /// \param spec Interface specification.
1177 void
1178 scheduler::register_interface(const std::string& name,
1179                               const std::shared_ptr< interface > spec)
1180 {
1181     PRE(interfaces.find(name) == interfaces.end());
1182     interfaces.insert(interfaces_map::value_type(name, spec));
1183 }
1184 
1185 
1186 /// Returns the names of all registered interfaces.
1187 ///
1188 /// \return A collection of interface names.
1189 std::set< std::string >
1190 scheduler::registered_interface_names(void)
1191 {
1192     std::set< std::string > names;
1193     for (interfaces_map::const_iterator iter = interfaces.begin();
1194          iter != interfaces.end(); ++iter) {
1195         names.insert((*iter).first);
1196     }
1197     return names;
1198 }
1199 
1200 
1201 /// Initializes the scheduler.
1202 ///
1203 /// \pre This function can only be called if there is no other scheduler_handle
1204 /// object alive.
1205 ///
1206 /// \return A handle to the operations of the scheduler.
1207 scheduler::scheduler_handle
1208 scheduler::setup(void)
1209 {
1210     return scheduler_handle();
1211 }
1212 
1213 
1214 /// Retrieves the list of test cases from a test program.
1215 ///
1216 /// This operation is currently synchronous.
1217 ///
1218 /// This operation should never throw.  Any errors during the processing of the
1219 /// test case list are subsumed into a single test case in the return value that
1220 /// represents the failed retrieval.
1221 ///
1222 /// \param test_program The test program from which to obtain the list of test
1223 /// cases.
1224 /// \param user_config User-provided configuration variables.
1225 ///
1226 /// \return The list of test cases.
1227 model::test_cases_map
1228 scheduler::scheduler_handle::list_tests(
1229     const model::test_program* test_program,
1230     const config::tree& user_config)
1231 {
1232     _pimpl->generic.check_interrupt();
1233 
1234     const std::shared_ptr< scheduler::interface > interface = find_interface(
1235         test_program->interface_name());
1236 
1237     try {
1238         const executor::exec_handle exec_handle = _pimpl->generic.spawn(
1239             list_test_cases(interface, test_program, user_config),
1240             list_timeout, none);
1241         executor::exit_handle exit_handle = _pimpl->generic.wait(exec_handle);
1242 
1243         const model::test_cases_map test_cases = interface->parse_list(
1244             exit_handle.status(),
1245             exit_handle.stdout_file(),
1246             exit_handle.stderr_file());
1247 
1248         exit_handle.cleanup();
1249 
1250         if (test_cases.empty())
1251             throw std::runtime_error("Empty test cases list");
1252 
1253         return test_cases;
1254     } catch (const std::runtime_error& e) {
1255         // TODO(jmmv): This is a very ugly workaround for the fact that we
1256         // cannot report failures at the test-program level.
1257         LW(F("Failed to load test cases list: %s") % e.what());
1258         model::test_cases_map fake_test_cases;
1259         fake_test_cases.insert(model::test_cases_map::value_type(
1260             "__test_cases_list__",
1261             model::test_case(
1262                 "__test_cases_list__",
1263                 "Represents the correct processing of the test cases list",
1264                 model::test_result(model::test_result_broken, e.what()))));
1265         return fake_test_cases;
1266     }
1267 }
1268 
1269 
1270 /// Forks and executes a test case asynchronously.
1271 ///
1272 /// Note that the caller needn't know if the test has a cleanup routine or not.
1273 /// If there indeed is a cleanup routine, we trigger it at wait_any() time.
1274 ///
1275 /// \param test_program The container test program.
1276 /// \param test_case_name The name of the test case to run.
1277 /// \param user_config User-provided configuration variables.
1278 ///
1279 /// \return A handle for the background operation.  Used to match the result of
1280 /// the execution returned by wait_any() with this invocation.
1281 scheduler::exec_handle
1282 scheduler::scheduler_handle::spawn_test(
1283     const model::test_program_ptr test_program,
1284     const std::string& test_case_name,
1285     const config::tree& user_config)
1286 {
1287     _pimpl->generic.check_interrupt();
1288 
1289     const std::shared_ptr< scheduler::interface > interface = find_interface(
1290         test_program->interface_name());
1291 
1292     LI(F("Spawning %s:%s") % test_program->absolute_path() % test_case_name);
1293 
1294     const model::test_case& test_case = test_program->find(test_case_name);
1295 
1296     optional< passwd::user > unprivileged_user;
1297     if (user_config.is_set("unprivileged_user") &&
1298         test_case.get_metadata().required_user() == "unprivileged") {
1299         unprivileged_user = user_config.lookup< engine::user_node >(
1300             "unprivileged_user");
1301     }
1302 
1303     const executor::exec_handle handle = _pimpl->generic.spawn(
1304         run_test_program(interface, test_program, test_case_name,
1305                          user_config),
1306         test_case.get_metadata().timeout(),
1307         unprivileged_user);
1308 
1309     const exec_data_ptr data(new test_exec_data(
1310         test_program, test_case_name, interface, user_config, handle.pid()));
1311     LD(F("Inserting %s into all_exec_data") % handle.pid());
1312     INV_MSG(
1313         _pimpl->all_exec_data.find(handle.pid()) == _pimpl->all_exec_data.end(),
1314         F("PID %s already in all_exec_data; not cleaned up or reused too fast")
1315         % handle.pid());;
1316     _pimpl->all_exec_data.insert(exec_data_map::value_type(handle.pid(), data));
1317 
1318     return handle.pid();
1319 }
1320 
1321 
1322 /// Waits for completion of any forked test case.
1323 ///
1324 /// Note that if the terminated test case has a cleanup routine, this function
1325 /// is the one in charge of spawning the cleanup routine asynchronously.
1326 ///
1327 /// \return The result of the execution of a subprocess.  This is a dynamically
1328 /// allocated object because the scheduler can spawn subprocesses of various
1329 /// types and, at wait time, we don't know upfront what we are going to get.
1330 scheduler::result_handle_ptr
1331 scheduler::scheduler_handle::wait_any(void)
1332 {
1333     _pimpl->generic.check_interrupt();
1334 
1335     executor::exit_handle handle = _pimpl->generic.wait_any();
1336 
1337     const exec_data_map::iterator iter = _pimpl->all_exec_data.find(
1338         handle.original_pid());
1339     exec_data_ptr data = (*iter).second;
1340 
1341     utils::dump_stacktrace_if_available(data->test_program->absolute_path(),
1342                                         _pimpl->generic, handle);
1343 
1344     optional< model::test_result > result;
1345 
1346     // test itself
1347     try {
1348         test_exec_data* test_data = &dynamic_cast< test_exec_data& >(
1349             *data.get());
1350         LD(F("Got %s from all_exec_data") % handle.original_pid());
1351 
1352         test_data->exit_handle = handle;
1353 
1354         const model::test_case& test_case = test_data->test_program->find(
1355             test_data->test_case_name);
1356 
1357         result = test_case.fake_result();
1358 
1359         if (!result && handle.status() && handle.status().get().exited() &&
1360             handle.status().get().exitstatus() == exit_skipped) {
1361             // If the test's process terminated with our magic "exit_skipped"
1362             // status, there are two cases to handle.  The first is the case
1363             // where the "skipped cookie" exists, in which case we never got to
1364             // actually invoke the test program; if that's the case, handle it
1365             // here.  The second case is where the test case actually decided to
1366             // exit with the "exit_skipped" status; in that case, just fall back
1367             // to the regular status handling.
1368             const fs::path skipped_cookie_path = handle.control_directory() /
1369                 skipped_cookie;
1370             std::ifstream input(skipped_cookie_path.c_str());
1371             if (input) {
1372                 result = model::test_result(model::test_result_skipped,
1373                                             utils::read_stream(input));
1374                 input.close();
1375 
1376                 // If we determined that the test needs to be skipped, we do not
1377                 // want to run the cleanup routine because doing so could result
1378                 // in errors.  However, we still want to run the cleanup routine
1379                 // if the test's body reports a skip (because actions could have
1380                 // already been taken).
1381                 test_data->needs_cleanup = false;
1382                 test_data->needs_execenv_cleanup = false;
1383             }
1384         }
1385         if (!result) {
1386             result = test_data->interface->compute_result(
1387                 handle.status(),
1388                 handle.control_directory(),
1389                 handle.stdout_file(),
1390                 handle.stderr_file());
1391         }
1392         INV(result);
1393 
1394         if (!result.get().good()) {
1395             append_files_listing(handle.work_directory(),
1396                                  handle.stderr_file());
1397         }
1398 
1399         if (test_data->needs_cleanup) {
1400             INV(test_case.get_metadata().has_cleanup());
1401             // The test body has completed and we have processed it.  If there
1402             // is a cleanup routine, trigger it now and wait for any other test
1403             // completion.  The caller never knows about cleanup routines.
1404             _pimpl->spawn_cleanup(test_data->test_program,
1405                                   test_data->test_case_name,
1406                                   test_data->user_config, handle, result.get());
1407 
1408             // TODO(jmmv): Chaining this call is ugly.  We'd be better off by
1409             // looping over terminated processes until we got a result suitable
1410             // for user consumption.  For the time being this is good enough and
1411             // not a problem because the call chain won't get big: the majority
1412             // of test cases do not have cleanup routines.
1413             return wait_any();
1414         }
1415 
1416         if (test_data->needs_execenv_cleanup) {
1417             INV(test_case.get_metadata().has_execenv());
1418             _pimpl->spawn_execenv_cleanup(test_data->test_program,
1419                                           test_data->test_case_name,
1420                                           handle, result.get());
1421             test_data->needs_execenv_cleanup = false;
1422             return wait_any();
1423         }
1424     } catch (const std::bad_cast& e) {
1425         // ok, let's check for another type
1426     }
1427 
1428     // test cleanup
1429     try {
1430         const cleanup_exec_data* cleanup_data =
1431             &dynamic_cast< const cleanup_exec_data& >(*data.get());
1432         LD(F("Got %s from all_exec_data (cleanup)") % handle.original_pid());
1433 
1434         // Handle the completion of cleanup subprocesses internally: the caller
1435         // is not aware that these exist so, when we return, we must return the
1436         // data for the original test that triggered this routine.  For example,
1437         // because the caller wants to see the exact same exec_handle that was
1438         // returned by spawn_test.
1439 
1440         const model::test_result& body_result = cleanup_data->body_result;
1441         if (body_result.good()) {
1442             if (!handle.status()) {
1443                 result = model::test_result(model::test_result_broken,
1444                                             "Test case cleanup timed out");
1445             } else {
1446                 if (!handle.status().get().exited() ||
1447                     handle.status().get().exitstatus() != EXIT_SUCCESS) {
1448                     result = model::test_result(
1449                         model::test_result_broken,
1450                         "Test case cleanup did not terminate successfully");
1451                 } else {
1452                     result = body_result;
1453                 }
1454             }
1455         } else {
1456             result = body_result;
1457         }
1458 
1459         // Untrack the cleanup process.  This must be done explicitly because we
1460         // do not create a result_handle object for the cleanup, and that is the
1461         // one in charge of doing so in the regular (non-cleanup) case.
1462         LD(F("Removing %s from all_exec_data (cleanup) in favor of %s")
1463            % handle.original_pid()
1464            % cleanup_data->body_exit_handle.original_pid());
1465         _pimpl->all_exec_data.erase(handle.original_pid());
1466 
1467         handle = cleanup_data->body_exit_handle;
1468 
1469         const exec_data_map::iterator it = _pimpl->all_exec_data.find(
1470             handle.original_pid());
1471         if (it != _pimpl->all_exec_data.end()) {
1472             exec_data_ptr d = (*it).second;
1473             test_exec_data* test_data = &dynamic_cast< test_exec_data& >(
1474                 *d.get());
1475             const model::test_case& test_case =
1476                 cleanup_data->test_program->find(cleanup_data->test_case_name);
1477             test_data->needs_cleanup = false;
1478 
1479             if (test_data->needs_execenv_cleanup) {
1480                 INV(test_case.get_metadata().has_execenv());
1481                 _pimpl->spawn_execenv_cleanup(cleanup_data->test_program,
1482                                               cleanup_data->test_case_name,
1483                                               handle, result.get());
1484                 test_data->needs_execenv_cleanup = false;
1485                 return wait_any();
1486             }
1487         }
1488     } catch (const std::bad_cast& e) {
1489         // ok, let's check for another type
1490     }
1491 
1492     // execenv cleanup
1493     try {
1494         const execenv_exec_data* execenv_data =
1495             &dynamic_cast< const execenv_exec_data& >(*data.get());
1496         LD(F("Got %s from all_exec_data (execenv cleanup)") % handle.original_pid());
1497 
1498         const model::test_result& body_result = execenv_data->body_result;
1499         if (body_result.good()) {
1500             if (!handle.status()) {
1501                 result = model::test_result(model::test_result_broken,
1502                                             "Test case execenv cleanup timed out");
1503             } else {
1504                 if (!handle.status().get().exited() ||
1505                     handle.status().get().exitstatus() != EXIT_SUCCESS) {
1506                     result = model::test_result(
1507                         model::test_result_broken,
1508                         "Test case execenv cleanup did not terminate successfully"); // ?
1509                 } else {
1510                     result = body_result;
1511                 }
1512             }
1513         } else {
1514             result = body_result;
1515         }
1516 
1517         LD(F("Removing %s from all_exec_data (execenv cleanup) in favor of %s")
1518            % handle.original_pid()
1519            % execenv_data->body_exit_handle.original_pid());
1520         _pimpl->all_exec_data.erase(handle.original_pid());
1521 
1522         handle = execenv_data->body_exit_handle;
1523     } catch (const std::bad_cast& e) {
1524         // ok, it was one of the types above
1525     }
1526 
1527     INV(result);
1528 
1529     std::shared_ptr< result_handle::bimpl > result_handle_bimpl(
1530         new result_handle::bimpl(handle, _pimpl->all_exec_data));
1531     std::shared_ptr< test_result_handle::impl > test_result_handle_impl(
1532         new test_result_handle::impl(
1533             data->test_program, data->test_case_name, result.get()));
1534     return result_handle_ptr(new test_result_handle(result_handle_bimpl,
1535                                                     test_result_handle_impl));
1536 }
1537 
1538 
1539 /// Forks and executes a test case synchronously for debugging.
1540 ///
1541 /// \pre No other processes should be in execution by the scheduler.
1542 ///
1543 /// \param test_program The container test program.
1544 /// \param test_case_name The name of the test case to run.
1545 /// \param user_config User-provided configuration variables.
1546 /// \param stdout_target File to which to write the stdout of the test case.
1547 /// \param stderr_target File to which to write the stderr of the test case.
1548 ///
1549 /// \return The result of the execution of the test.
1550 scheduler::result_handle_ptr
1551 scheduler::scheduler_handle::debug_test(
1552     const model::test_program_ptr test_program,
1553     const std::string& test_case_name,
1554     const config::tree& user_config,
1555     const fs::path& stdout_target,
1556     const fs::path& stderr_target)
1557 {
1558     const exec_handle exec_handle = spawn_test(
1559         test_program, test_case_name, user_config);
1560     result_handle_ptr result_handle = wait_any();
1561 
1562     // TODO(jmmv): We need to do this while the subprocess is alive.  This is
1563     // important for debugging purposes, as we should see the contents of stdout
1564     // or stderr as they come in.
1565     //
1566     // Unfortunately, we cannot do so.  We cannot just read and block from a
1567     // file, waiting for further output to appear... as this only works on pipes
1568     // or sockets.  We need a better interface for this whole thing.
1569     {
1570         std::auto_ptr< std::ostream > output = utils::open_ostream(
1571             stdout_target);
1572         *output << utils::read_file(result_handle->stdout_file());
1573     }
1574     {
1575         std::auto_ptr< std::ostream > output = utils::open_ostream(
1576             stderr_target);
1577         *output << utils::read_file(result_handle->stderr_file());
1578     }
1579 
1580     INV(result_handle->original_pid() == exec_handle);
1581     return result_handle;
1582 }
1583 
1584 
1585 /// Checks if an interrupt has fired.
1586 ///
1587 /// Calls to this function should be sprinkled in strategic places through the
1588 /// code protected by an interrupts_handler object.
1589 ///
1590 /// This is just a wrapper over signals::check_interrupt() to avoid leaking this
1591 /// dependency to the caller.
1592 ///
1593 /// \throw signals::interrupted_error If there has been an interrupt.
1594 void
1595 scheduler::scheduler_handle::check_interrupt(void) const
1596 {
1597     _pimpl->generic.check_interrupt();
1598 }
1599 
1600 
1601 /// Queries the current execution context.
1602 ///
1603 /// \return The queried context.
1604 model::context
1605 scheduler::current_context(void)
1606 {
1607     return model::context(fs::current_path(), utils::getallenv());
1608 }
1609 
1610 
1611 /// Generates the set of configuration variables for a test program.
1612 ///
1613 /// \param user_config The configuration variables provided by the user.
1614 /// \param test_suite The name of the test suite.
1615 ///
1616 /// \return The mapping of configuration variables for the test program.
1617 config::properties_map
1618 scheduler::generate_config(const config::tree& user_config,
1619                            const std::string& test_suite)
1620 {
1621     config::properties_map props;
1622 
1623     try {
1624         props = user_config.all_properties(F("test_suites.%s") % test_suite,
1625                                            true);
1626     } catch (const config::unknown_key_error& unused_error) {
1627         // Ignore: not all test suites have entries in the configuration.
1628     }
1629 
1630     // TODO(jmmv): This is a hack that exists for the ATF interface only, so it
1631     // should be moved there.
1632     if (user_config.is_set("unprivileged_user")) {
1633         const passwd::user& user =
1634             user_config.lookup< engine::user_node >("unprivileged_user");
1635         props["unprivileged-user"] = user.name;
1636     }
1637 
1638     return props;
1639 }
1640