xref: /illumos-gate/usr/src/test/test-runner/cmd/run (revision 4a04e8db7f069cc2eb910470e630778f35ef3c44)
1#!@PYTHON@
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
16# Copyright (c) 2017, Chris Fraire <cfraire@me.com>.
17# Copyright 2018 Joyent, Inc.
18#
19
20import ConfigParser
21import os
22import logging
23import platform
24from logging.handlers import WatchedFileHandler
25from datetime import datetime
26from optparse import OptionParser
27from pwd import getpwnam
28from pwd import getpwuid
29from select import select
30from subprocess import PIPE
31from subprocess import Popen
32from sys import argv
33from sys import maxint
34from threading import Timer
35from time import time
36
37BASEDIR = '/var/tmp/test_results'
38KILL = '/usr/bin/kill'
39TRUE = '/usr/bin/true'
40SUDO = '/usr/bin/sudo'
41
42# Custom class to reopen the log file in case it is forcibly closed by a test.
43class WatchedFileHandlerClosed(WatchedFileHandler):
44    """Watch files, including closed files.
45    Similar to (and inherits from) logging.handler.WatchedFileHandler,
46    except that IOErrors are handled by reopening the stream and retrying.
47    This will be retried up to a configurable number of times before
48    giving up, default 5.
49    """
50
51    def __init__(self, filename, mode='a', encoding=None, delay=0, max_tries=5):
52        self.max_tries = max_tries
53        self.tries = 0
54        WatchedFileHandler.__init__(self, filename, mode, encoding, delay)
55
56    def emit(self, record):
57        while True:
58            try:
59                WatchedFileHandler.emit(self, record)
60                self.tries = 0
61                return
62            except IOError as err:
63                if self.tries == self.max_tries:
64                    raise
65                self.stream.close()
66                self.stream = self._open()
67                self.tries += 1
68
69class Result(object):
70    total = 0
71    runresults = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'KILLED': 0}
72
73    def __init__(self):
74        self.starttime = None
75        self.returncode = None
76        self.runtime = ''
77        self.stdout = []
78        self.stderr = []
79        self.result = ''
80
81    def done(self, proc, killed):
82        """
83        Finalize the results of this Cmd.
84        """
85        Result.total += 1
86        m, s = divmod(time() - self.starttime, 60)
87        self.runtime = '%02d:%02d' % (m, s)
88        self.returncode = proc.returncode
89        if killed:
90            self.result = 'KILLED'
91            Result.runresults['KILLED'] += 1
92        elif self.returncode is 0:
93            self.result = 'PASS'
94            Result.runresults['PASS'] += 1
95        elif self.returncode is not 0:
96            self.result = 'FAIL'
97            Result.runresults['FAIL'] += 1
98
99
100class Output(object):
101    """
102    This class is a slightly modified version of the 'Stream' class found
103    here: http://goo.gl/aSGfv
104    """
105    def __init__(self, stream):
106        self.stream = stream
107        self._buf = ''
108        self.lines = []
109
110    def fileno(self):
111        return self.stream.fileno()
112
113    def read(self, drain=0):
114        """
115        Read from the file descriptor. If 'drain' set, read until EOF.
116        """
117        while self._read() is not None:
118            if not drain:
119                break
120
121    def _read(self):
122        """
123        Read up to 4k of data from this output stream. Collect the output
124        up to the last newline, and append it to any leftover data from a
125        previous call. The lines are stored as a (timestamp, data) tuple
126        for easy sorting/merging later.
127        """
128        fd = self.fileno()
129        buf = os.read(fd, 4096)
130        if not buf:
131            return None
132        if '\n' not in buf:
133            self._buf += buf
134            return []
135
136        buf = self._buf + buf
137        tmp, rest = buf.rsplit('\n', 1)
138        self._buf = rest
139        now = datetime.now()
140        rows = tmp.split('\n')
141        self.lines += [(now, r) for r in rows]
142
143
144class Cmd(object):
145    verified_users = []
146
147    def __init__(self, pathname, outputdir=None, timeout=None, user=None):
148        self.pathname = pathname
149        self.outputdir = outputdir or 'BASEDIR'
150        self.timeout = timeout
151        self.user = user or ''
152        self.killed = False
153        self.result = Result()
154
155        if self.timeout is None:
156            self.timeout = 60
157
158    def __str__(self):
159        return "Pathname: %s\nOutputdir: %s\nTimeout: %s\nUser: %s\n" % \
160            (self.pathname, self.outputdir, self.timeout, self.user)
161
162    def kill_cmd(self, proc):
163        """
164        Kill a running command due to timeout, or ^C from the keyboard. If
165        sudo is required, this user was verified previously.
166        """
167        self.killed = True
168        do_sudo = len(self.user) != 0
169        signal = '-TERM'
170
171        cmd = [SUDO, KILL, signal, str(proc.pid)]
172        if not do_sudo:
173            del cmd[0]
174
175        try:
176            kp = Popen(cmd)
177            kp.wait()
178        except:
179            pass
180
181    def update_cmd_privs(self, cmd, user):
182        """
183        If a user has been specified to run this Cmd and we're not already
184        running as that user, prepend the appropriate sudo command to run
185        as that user.
186        """
187        me = getpwuid(os.getuid())
188
189        if not user or user is me:
190            return cmd
191
192        ret = '%s -E -u %s %s' % (SUDO, user, cmd)
193        return ret.split(' ')
194
195    def collect_output(self, proc):
196        """
197        Read from stdout/stderr as data becomes available, until the
198        process is no longer running. Return the lines from the stdout and
199        stderr Output objects.
200        """
201        out = Output(proc.stdout)
202        err = Output(proc.stderr)
203        res = []
204        while proc.returncode is None:
205            proc.poll()
206            res = select([out, err], [], [], .1)
207            for fd in res[0]:
208                fd.read()
209        for fd in res[0]:
210            fd.read(drain=1)
211
212        return out.lines, err.lines
213
214    def run(self, options):
215        """
216        This is the main function that runs each individual test.
217        Determine whether or not the command requires sudo, and modify it
218        if needed. Run the command, and update the result object.
219        """
220        if options.dryrun is True:
221            print self
222            return
223
224        privcmd = self.update_cmd_privs(self.pathname, self.user)
225        try:
226            old = os.umask(0)
227            if not os.path.isdir(self.outputdir):
228                os.makedirs(self.outputdir, mode=0777)
229            os.umask(old)
230        except OSError, e:
231            fail('%s' % e)
232
233        try:
234            self.result.starttime = time()
235            proc = Popen(privcmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
236            proc.stdin.close()
237
238            # Allow a special timeout value of 0 to mean infinity
239            if int(self.timeout) == 0:
240                self.timeout = maxint
241            t = Timer(int(self.timeout), self.kill_cmd, [proc])
242            t.start()
243            self.result.stdout, self.result.stderr = self.collect_output(proc)
244        except KeyboardInterrupt:
245            self.kill_cmd(proc)
246            fail('\nRun terminated at user request.')
247        finally:
248            t.cancel()
249
250        self.result.done(proc, self.killed)
251
252    def skip(self):
253        """
254        Initialize enough of the test result that we can log a skipped
255        command.
256        """
257        Result.total += 1
258        Result.runresults['SKIP'] += 1
259        self.result.stdout = self.result.stderr = []
260        self.result.starttime = time()
261        m, s = divmod(time() - self.result.starttime, 60)
262        self.result.runtime = '%02d:%02d' % (m, s)
263        self.result.result = 'SKIP'
264
265    def log(self, logger, options):
266        """
267        This function is responsible for writing all output. This includes
268        the console output, the logfile of all results (with timestamped
269        merged stdout and stderr), and for each test, the unmodified
270        stdout/stderr/merged in it's own file.
271        """
272        if logger is None:
273            return
274
275        logname = getpwuid(os.getuid()).pw_name
276        user = ' (run as %s)' % (self.user if len(self.user) else logname)
277        msga = 'Test: %s%s ' % (self.pathname, user)
278        msgb = '[%s] [%s]' % (self.result.runtime, self.result.result)
279        pad = ' ' * (80 - (len(msga) + len(msgb)))
280
281        # If -q is specified, only print a line for tests that didn't pass.
282        # This means passing tests need to be logged as DEBUG, or the one
283        # line summary will only be printed in the logfile for failures.
284        if not options.quiet:
285            logger.info('%s%s%s' % (msga, pad, msgb))
286        elif self.result.result is not 'PASS':
287            logger.info('%s%s%s' % (msga, pad, msgb))
288        else:
289            logger.debug('%s%s%s' % (msga, pad, msgb))
290
291        lines = sorted(self.result.stdout + self.result.stderr,
292                       cmp=lambda x, y: cmp(x[0], y[0]))
293
294        for dt, line in lines:
295            logger.debug('%s %s' % (dt.strftime("%H:%M:%S.%f ")[:11], line))
296
297        if len(self.result.stdout):
298            with open(os.path.join(self.outputdir, 'stdout'), 'w') as out:
299                for _, line in self.result.stdout:
300                    os.write(out.fileno(), '%s\n' % line)
301        if len(self.result.stderr):
302            with open(os.path.join(self.outputdir, 'stderr'), 'w') as err:
303                for _, line in self.result.stderr:
304                    os.write(err.fileno(), '%s\n' % line)
305        if len(self.result.stdout) and len(self.result.stderr):
306            with open(os.path.join(self.outputdir, 'merged'), 'w') as merged:
307                for _, line in lines:
308                    os.write(merged.fileno(), '%s\n' % line)
309
310
311class Test(Cmd):
312    props = ['outputdir', 'timeout', 'user', 'pre', 'pre_user', 'post',
313             'post_user']
314
315    def __init__(self, pathname, outputdir=None, timeout=None, user=None,
316                 pre=None, pre_user=None, post=None, post_user=None):
317        super(Test, self).__init__(pathname, outputdir, timeout, user)
318        self.pre = pre or ''
319        self.pre_user = pre_user or ''
320        self.post = post or ''
321        self.post_user = post_user or ''
322
323    def __str__(self):
324        post_user = pre_user = ''
325        if len(self.pre_user):
326            pre_user = ' (as %s)' % (self.pre_user)
327        if len(self.post_user):
328            post_user = ' (as %s)' % (self.post_user)
329        return "Pathname: %s\nOutputdir: %s\nTimeout: %d\nPre: %s%s\nPost: " \
330               "%s%s\nUser: %s\n" % \
331               (self.pathname, self.outputdir, self.timeout, self.pre,
332                pre_user, self.post, post_user, self.user)
333
334    def verify(self, logger):
335        """
336        Check the pre/post scripts, user and Test. Omit the Test from this
337        run if there are any problems.
338        """
339        files = [self.pre, self.pathname, self.post]
340        users = [self.pre_user, self.user, self.post_user]
341
342        for f in [f for f in files if len(f)]:
343            if not verify_file(f):
344                logger.info("Warning: Test '%s' not added to this run because"
345                            " it failed verification." % f)
346                return False
347
348        for user in [user for user in users if len(user)]:
349            if not verify_user(user, logger):
350                logger.info("Not adding Test '%s' to this run." %
351                            self.pathname)
352                return False
353
354        return True
355
356    def run(self, logger, options):
357        """
358        Create Cmd instances for the pre/post scripts. If the pre script
359        doesn't pass, skip this Test. Run the post script regardless.
360        """
361        odir = os.path.join(self.outputdir, os.path.basename(self.pre))
362        pretest = Cmd(self.pre, outputdir=odir, timeout=self.timeout,
363                      user=self.pre_user)
364        test = Cmd(self.pathname, outputdir=self.outputdir,
365                   timeout=self.timeout, user=self.user)
366        odir = os.path.join(self.outputdir, os.path.basename(self.post))
367        posttest = Cmd(self.post, outputdir=odir, timeout=self.timeout,
368                       user=self.post_user)
369
370        cont = True
371        if len(pretest.pathname):
372            pretest.run(options)
373            cont = pretest.result.result is 'PASS'
374            pretest.log(logger, options)
375
376        if cont:
377            test.run(options)
378        else:
379            test.skip()
380
381        test.log(logger, options)
382
383        if len(posttest.pathname):
384            posttest.run(options)
385            posttest.log(logger, options)
386
387
388class TestGroup(Test):
389    props = Test.props + ['tests']
390
391    def __init__(self, pathname, outputdir=None, timeout=None, user=None,
392                 pre=None, pre_user=None, post=None, post_user=None,
393                 tests=None):
394        super(TestGroup, self).__init__(pathname, outputdir, timeout, user,
395                                        pre, pre_user, post, post_user)
396        self.tests = tests or []
397
398    def __str__(self):
399        post_user = pre_user = ''
400        if len(self.pre_user):
401            pre_user = ' (as %s)' % (self.pre_user)
402        if len(self.post_user):
403            post_user = ' (as %s)' % (self.post_user)
404        return "Pathname: %s\nOutputdir: %s\nTests: %s\nTimeout: %d\n" \
405               "Pre: %s%s\nPost: %s%s\nUser: %s\n" % \
406               (self.pathname, self.outputdir, self.tests, self.timeout,
407                self.pre, pre_user, self.post, post_user, self.user)
408
409    def verify(self, logger):
410        """
411        Check the pre/post scripts, user and tests in this TestGroup. Omit
412        the TestGroup entirely, or simply delete the relevant tests in the
413        group, if that's all that's required.
414        """
415        # If the pre or post scripts are relative pathnames, convert to
416        # absolute, so they stand a chance of passing verification.
417        if len(self.pre) and not os.path.isabs(self.pre):
418            self.pre = os.path.join(self.pathname, self.pre)
419        if len(self.post) and not os.path.isabs(self.post):
420            self.post = os.path.join(self.pathname, self.post)
421
422        auxfiles = [self.pre, self.post]
423        users = [self.pre_user, self.user, self.post_user]
424
425        for f in [f for f in auxfiles if len(f)]:
426            if self.pathname != os.path.dirname(f):
427                logger.info("Warning: TestGroup '%s' not added to this run. "
428                            "Auxiliary script '%s' exists in a different "
429                            "directory." % (self.pathname, f))
430                return False
431
432            if not verify_file(f):
433                logger.info("Warning: TestGroup '%s' not added to this run. "
434                            "Auxiliary script '%s' failed verification." %
435                            (self.pathname, f))
436                return False
437
438        for user in [user for user in users if len(user)]:
439            if not verify_user(user, logger):
440                logger.info("Not adding TestGroup '%s' to this run." %
441                            self.pathname)
442                return False
443
444        # If one of the tests is invalid, delete it, log it, and drive on.
445        self.tests[:] = [f for f in self.tests if
446          verify_file(os.path.join(self.pathname, f))]
447
448        return len(self.tests) is not 0
449
450    def run(self, logger, options):
451        """
452        Create Cmd instances for the pre/post scripts. If the pre script
453        doesn't pass, skip all the tests in this TestGroup. Run the post
454        script regardless.
455        """
456        odir = os.path.join(self.outputdir, os.path.basename(self.pre))
457        pretest = Cmd(self.pre, outputdir=odir, timeout=self.timeout,
458                      user=self.pre_user)
459        odir = os.path.join(self.outputdir, os.path.basename(self.post))
460        posttest = Cmd(self.post, outputdir=odir, timeout=self.timeout,
461                       user=self.post_user)
462
463        cont = True
464        if len(pretest.pathname):
465            pretest.run(options)
466            cont = pretest.result.result is 'PASS'
467            pretest.log(logger, options)
468
469        for fname in self.tests:
470            test = Cmd(os.path.join(self.pathname, fname),
471                       outputdir=os.path.join(self.outputdir, fname),
472                       timeout=self.timeout, user=self.user)
473            if cont:
474                test.run(options)
475            else:
476                test.skip()
477
478            test.log(logger, options)
479
480        if len(posttest.pathname):
481            posttest.run(options)
482            posttest.log(logger, options)
483
484
485class TestRun(object):
486    props = ['quiet', 'outputdir']
487
488    def __init__(self, options):
489        self.tests = {}
490        self.testgroups = {}
491        self.starttime = time()
492        self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S')
493        self.outputdir = os.path.join(options.outputdir, self.timestamp)
494        self.logger = self.setup_logging(options)
495        self.defaults = [
496            ('outputdir', BASEDIR),
497            ('quiet', False),
498            ('timeout', 60),
499            ('user', ''),
500            ('pre', ''),
501            ('pre_user', ''),
502            ('post', ''),
503            ('post_user', '')
504        ]
505
506    def __str__(self):
507        s = 'TestRun:\n    outputdir: %s\n' % self.outputdir
508        s += 'TESTS:\n'
509        for key in sorted(self.tests.keys()):
510            s += '%s%s' % (self.tests[key].__str__(), '\n')
511        s += 'TESTGROUPS:\n'
512        for key in sorted(self.testgroups.keys()):
513            s += '%s%s' % (self.testgroups[key].__str__(), '\n')
514        return s
515
516    def addtest(self, pathname, options):
517        """
518        Create a new Test, and apply any properties that were passed in
519        from the command line. If it passes verification, add it to the
520        TestRun.
521        """
522        test = Test(pathname)
523        for prop in Test.props:
524            setattr(test, prop, getattr(options, prop))
525
526        if test.verify(self.logger):
527            self.tests[pathname] = test
528
529    def addtestgroup(self, dirname, filenames, options):
530        """
531        Create a new TestGroup, and apply any properties that were passed
532        in from the command line. If it passes verification, add it to the
533        TestRun.
534        """
535        if dirname not in self.testgroups:
536            testgroup = TestGroup(dirname)
537            for prop in Test.props:
538                setattr(testgroup, prop, getattr(options, prop))
539
540            # Prevent pre/post scripts from running as regular tests
541            for f in [testgroup.pre, testgroup.post]:
542                if f in filenames:
543                    del filenames[filenames.index(f)]
544
545            self.testgroups[dirname] = testgroup
546            self.testgroups[dirname].tests = sorted(filenames)
547
548            testgroup.verify(self.logger)
549
550    def read(self, logger, options):
551        """
552        Read in the specified runfile, and apply the TestRun properties
553        listed in the 'DEFAULT' section to our TestRun. Then read each
554        section, and apply the appropriate properties to the Test or
555        TestGroup. Properties from individual sections override those set
556        in the 'DEFAULT' section. If the Test or TestGroup passes
557        verification, add it to the TestRun.
558        """
559        config = ConfigParser.RawConfigParser()
560        if not len(config.read(options.runfile)):
561            fail("Coulnd't read config file %s" % options.runfile)
562
563        for opt in TestRun.props:
564            if config.has_option('DEFAULT', opt):
565                setattr(self, opt, config.get('DEFAULT', opt))
566        self.outputdir = os.path.join(self.outputdir, self.timestamp)
567
568        for section in config.sections():
569            if ('arch' in config.options(section) and
570                platform.machine() != config.get(section, 'arch')):
571                continue
572
573            if 'tests' in config.options(section):
574                testgroup = TestGroup(section)
575                for prop in TestGroup.props:
576                    for sect in ['DEFAULT', section]:
577                        if config.has_option(sect, prop):
578                            setattr(testgroup, prop, config.get(sect, prop))
579
580                # Repopulate tests using eval to convert the string to a list
581                testgroup.tests = eval(config.get(section, 'tests'))
582
583                if testgroup.verify(logger):
584                    self.testgroups[section] = testgroup
585
586            elif 'autotests' in config.options(section):
587                testgroup = TestGroup(section)
588                for prop in TestGroup.props:
589                    for sect in ['DEFAULT', section]:
590                        if config.has_option(sect, prop):
591                            setattr(testgroup, prop, config.get(sect, prop))
592
593                filenames = os.listdir(section)
594                # only files starting with "tst." are considered tests
595                filenames = [f for f in filenames if f.startswith("tst.")]
596                testgroup.tests = sorted(filenames)
597
598                if testgroup.verify(logger):
599                    self.testgroups[section] = testgroup
600
601            else:
602                test = Test(section)
603                for prop in Test.props:
604                    for sect in ['DEFAULT', section]:
605                        if config.has_option(sect, prop):
606                            setattr(test, prop, config.get(sect, prop))
607
608                if test.verify(logger):
609                    self.tests[section] = test
610
611    def write(self, options):
612        """
613        Create a configuration file for editing and later use. The
614        'DEFAULT' section of the config file is created from the
615        properties that were specified on the command line. Tests are
616        simply added as sections that inherit everything from the
617        'DEFAULT' section. TestGroups are the same, except they get an
618        option including all the tests to run in that directory.
619        """
620
621        defaults = dict([(prop, getattr(options, prop)) for prop, _ in
622                         self.defaults])
623        config = ConfigParser.RawConfigParser(defaults)
624
625        for test in sorted(self.tests.keys()):
626            config.add_section(test)
627
628        for testgroup in sorted(self.testgroups.keys()):
629            config.add_section(testgroup)
630            config.set(testgroup, 'tests', self.testgroups[testgroup].tests)
631
632        try:
633            with open(options.template, 'w') as f:
634                return config.write(f)
635        except IOError:
636            fail('Could not open \'%s\' for writing.' % options.template)
637
638    def complete_outputdirs(self):
639        """
640        Collect all the pathnames for Tests, and TestGroups. Work
641        backwards one pathname component at a time, to create a unique
642        directory name in which to deposit test output. Tests will be able
643        to write output files directly in the newly modified outputdir.
644        TestGroups will be able to create one subdirectory per test in the
645        outputdir, and are guaranteed uniqueness because a group can only
646        contain files in one directory. Pre and post tests will create a
647        directory rooted at the outputdir of the Test or TestGroup in
648        question for their output.
649        """
650        done = False
651        components = 0
652        tmp_dict = dict(self.tests.items() + self.testgroups.items())
653        total = len(tmp_dict)
654        base = self.outputdir
655
656        while not done:
657            l = []
658            components -= 1
659            for testfile in tmp_dict.keys():
660                uniq = '/'.join(testfile.split('/')[components:]).lstrip('/')
661                if uniq not in l:
662                    l.append(uniq)
663                    tmp_dict[testfile].outputdir = os.path.join(base, uniq)
664                else:
665                    break
666            done = total == len(l)
667
668    def setup_logging(self, options):
669        """
670        Two loggers are set up here. The first is for the logfile which
671        will contain one line summarizing the test, including the test
672        name, result, and running time. This logger will also capture the
673        timestamped combined stdout and stderr of each run. The second
674        logger is optional console output, which will contain only the one
675        line summary. The loggers are initialized at two different levels
676        to facilitate segregating the output.
677        """
678        if options.dryrun is True:
679            return
680
681        testlogger = logging.getLogger(__name__)
682        testlogger.setLevel(logging.DEBUG)
683
684        if options.cmd is not 'wrconfig':
685            try:
686                old = os.umask(0)
687                os.makedirs(self.outputdir, mode=0777)
688                os.umask(old)
689            except OSError, e:
690                fail('%s' % e)
691            filename = os.path.join(self.outputdir, 'log')
692
693            logfile = WatchedFileHandlerClosed(filename)
694            logfile.setLevel(logging.DEBUG)
695            logfilefmt = logging.Formatter('%(message)s')
696            logfile.setFormatter(logfilefmt)
697            testlogger.addHandler(logfile)
698
699        cons = logging.StreamHandler()
700        cons.setLevel(logging.INFO)
701        consfmt = logging.Formatter('%(message)s')
702        cons.setFormatter(consfmt)
703        testlogger.addHandler(cons)
704
705        return testlogger
706
707    def run(self, options):
708        """
709        Walk through all the Tests and TestGroups, calling run().
710        """
711        if not options.dryrun:
712            try:
713                os.chdir(self.outputdir)
714            except OSError:
715                fail('Could not change to directory %s' % self.outputdir)
716        for test in sorted(self.tests.keys()):
717            self.tests[test].run(self.logger, options)
718        for testgroup in sorted(self.testgroups.keys()):
719            self.testgroups[testgroup].run(self.logger, options)
720
721    def summary(self):
722        if Result.total is 0:
723            return
724
725        print '\nResults Summary'
726        for key in Result.runresults.keys():
727            if Result.runresults[key] is not 0:
728                print '%s\t% 4d' % (key, Result.runresults[key])
729
730        m, s = divmod(time() - self.starttime, 60)
731        h, m = divmod(m, 60)
732        print '\nRunning Time:\t%02d:%02d:%02d' % (h, m, s)
733        print 'Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) /
734                                            float(Result.total)) * 100)
735        print 'Log directory:\t%s' % self.outputdir
736
737
738def verify_file(pathname):
739    """
740    Verify that the supplied pathname is an executable regular file.
741    """
742    if os.path.isdir(pathname) or os.path.islink(pathname):
743        return False
744
745    if os.path.isfile(pathname) and os.access(pathname, os.X_OK):
746        return True
747
748    return False
749
750
751def verify_user(user, logger):
752    """
753    Verify that the specified user exists on this system, and can execute
754    sudo without being prompted for a password.
755    """
756    testcmd = [SUDO, '-n', '-u', user, TRUE]
757
758    if user in Cmd.verified_users:
759        return True
760
761    try:
762        _ = getpwnam(user)
763    except KeyError:
764        logger.info("Warning: user '%s' does not exist.", user)
765        return False
766
767    p = Popen(testcmd)
768    p.wait()
769    if p.returncode is not 0:
770        logger.info("Warning: user '%s' cannot use passwordless sudo.", user)
771        return False
772    else:
773        Cmd.verified_users.append(user)
774
775    return True
776
777
778def find_tests(testrun, options):
779    """
780    For the given list of pathnames, add files as Tests. For directories,
781    if do_groups is True, add the directory as a TestGroup. If False,
782    recursively search for executable files.
783    """
784
785    for p in sorted(options.pathnames):
786        if os.path.isdir(p):
787            for dirname, _, filenames in os.walk(p):
788                if options.do_groups:
789                    testrun.addtestgroup(dirname, filenames, options)
790                else:
791                    for f in sorted(filenames):
792                        testrun.addtest(os.path.join(dirname, f), options)
793        else:
794            testrun.addtest(p, options)
795
796
797def fail(retstr, ret=1):
798    print '%s: %s' % (argv[0], retstr)
799    exit(ret)
800
801
802def options_cb(option, opt_str, value, parser):
803    path_options = ['runfile', 'outputdir', 'template']
804
805    if option.dest is 'runfile' and '-w' in parser.rargs or \
806            option.dest is 'template' and '-c' in parser.rargs:
807        fail('-c and -w are mutually exclusive.')
808
809    if opt_str in parser.rargs:
810        fail('%s may only be specified once.' % opt_str)
811
812    if option.dest is 'runfile':
813        parser.values.cmd = 'rdconfig'
814    if option.dest is 'template':
815        parser.values.cmd = 'wrconfig'
816
817    setattr(parser.values, option.dest, value)
818    if option.dest in path_options:
819        setattr(parser.values, option.dest, os.path.abspath(value))
820
821
822def parse_args():
823    parser = OptionParser()
824    parser.add_option('-c', action='callback', callback=options_cb,
825                      type='string', dest='runfile', metavar='runfile',
826                      help='Specify tests to run via config file.')
827    parser.add_option('-d', action='store_true', default=False, dest='dryrun',
828                      help='Dry run. Print tests, but take no other action.')
829    parser.add_option('-g', action='store_true', default=False,
830                      dest='do_groups', help='Make directories TestGroups.')
831    parser.add_option('-o', action='callback', callback=options_cb,
832                      default=BASEDIR, dest='outputdir', type='string',
833                      metavar='outputdir', help='Specify an output directory.')
834    parser.add_option('-p', action='callback', callback=options_cb,
835                      default='', dest='pre', metavar='script',
836                      type='string', help='Specify a pre script.')
837    parser.add_option('-P', action='callback', callback=options_cb,
838                      default='', dest='post', metavar='script',
839                      type='string', help='Specify a post script.')
840    parser.add_option('-q', action='store_true', default=False, dest='quiet',
841                      help='Silence on the console during a test run.')
842    parser.add_option('-t', action='callback', callback=options_cb, default=60,
843                      dest='timeout', metavar='seconds', type='int',
844                      help='Timeout (in seconds) for an individual test.')
845    parser.add_option('-u', action='callback', callback=options_cb,
846                      default='', dest='user', metavar='user', type='string',
847                      help='Specify a different user name to run as.')
848    parser.add_option('-w', action='callback', callback=options_cb,
849                      default=None, dest='template', metavar='template',
850                      type='string', help='Create a new config file.')
851    parser.add_option('-x', action='callback', callback=options_cb, default='',
852                      dest='pre_user', metavar='pre_user', type='string',
853                      help='Specify a user to execute the pre script.')
854    parser.add_option('-X', action='callback', callback=options_cb, default='',
855                      dest='post_user', metavar='post_user', type='string',
856                      help='Specify a user to execute the post script.')
857    (options, pathnames) = parser.parse_args()
858
859    if not options.runfile and not options.template:
860        options.cmd = 'runtests'
861
862    if options.runfile and len(pathnames):
863        fail('Extraneous arguments.')
864
865    options.pathnames = [os.path.abspath(path) for path in pathnames]
866
867    return options
868
869
870def main():
871    options = parse_args()
872    testrun = TestRun(options)
873
874    if options.cmd is 'runtests':
875        find_tests(testrun, options)
876    elif options.cmd is 'rdconfig':
877        testrun.read(testrun.logger, options)
878    elif options.cmd is 'wrconfig':
879        find_tests(testrun, options)
880        testrun.write(options)
881        exit(0)
882    else:
883        fail('Unknown command specified')
884
885    testrun.complete_outputdirs()
886    testrun.run(options)
887    testrun.summary()
888    exit(0)
889
890
891if __name__ == '__main__':
892    main()
893