diff options
Diffstat (limited to 'test/py/conftest.py')
-rw-r--r-- | test/py/conftest.py | 69 |
1 files changed, 43 insertions, 26 deletions
diff --git a/test/py/conftest.py b/test/py/conftest.py index 3e162cafcc..3012c8e495 100644 --- a/test/py/conftest.py +++ b/test/py/conftest.py @@ -71,6 +71,9 @@ def pytest_addoption(parser): help='U-Boot board identity/instance') parser.addoption('--build', default=False, action='store_true', help='Compile U-Boot before running tests') + parser.addoption('--gdbserver', default=None, + help='Run sandbox under gdbserver. The argument is the channel '+ + 'over which gdbserver should communicate, e.g. localhost:1234') def pytest_configure(config): """pytest hook: Perform custom initialization at startup time. @@ -110,6 +113,10 @@ def pytest_configure(config): persistent_data_dir = build_dir + '/persistent-data' mkdir_p(persistent_data_dir) + gdbserver = config.getoption('gdbserver') + if gdbserver and board_type != 'sandbox': + raise Exception('--gdbserver only supported with sandbox') + import multiplexed_log log = multiplexed_log.Logfile(result_dir + '/test-log.html') @@ -122,10 +129,12 @@ def pytest_configure(config): ['make', o_opt, '-s', board_type + '_defconfig'], ['make', o_opt, '-s', '-j8'], ) - runner = log.get_runner('make', sys.stdout) - for cmd in cmds: - runner.run(cmd, cwd=source_dir) - runner.close() + with log.section('make'): + runner = log.get_runner('make', sys.stdout) + for cmd in cmds: + runner.run(cmd, cwd=source_dir) + runner.close() + log.status_pass('OK') class ArbitraryAttributeContainer(object): pass @@ -169,6 +178,7 @@ def pytest_configure(config): ubconfig.persistent_data_dir = persistent_data_dir ubconfig.board_type = board_type ubconfig.board_identity = board_identity + ubconfig.gdbserver = gdbserver env_vars = ( 'board_type', @@ -247,6 +257,7 @@ def u_boot_console(request): console.ensure_spawned() return console +anchors = {} tests_not_run = set() tests_failed = set() tests_xpassed = set() @@ -286,27 +297,33 @@ def cleanup(): if console: console.close() if log: - log.status_pass('%d passed' % len(tests_passed)) - if tests_skipped: - log.status_skipped('%d skipped' % len(tests_skipped)) - for test in tests_skipped: - log.status_skipped('... ' + test) - if tests_xpassed: - log.status_xpass('%d xpass' % len(tests_xpassed)) - for test in tests_xpassed: - log.status_xpass('... ' + test) - if tests_xfailed: - log.status_xfail('%d xfail' % len(tests_xfailed)) - for test in tests_xfailed: - log.status_xfail('... ' + test) - if tests_failed: - log.status_fail('%d failed' % len(tests_failed)) - for test in tests_failed: - log.status_fail('... ' + test) - if tests_not_run: - log.status_fail('%d not run' % len(tests_not_run)) - for test in tests_not_run: - log.status_fail('... ' + test) + with log.section('Status Report', 'status_report'): + log.status_pass('%d passed' % len(tests_passed)) + if tests_skipped: + log.status_skipped('%d skipped' % len(tests_skipped)) + for test in tests_skipped: + anchor = anchors.get(test, None) + log.status_skipped('... ' + test, anchor) + if tests_xpassed: + log.status_xpass('%d xpass' % len(tests_xpassed)) + for test in tests_xpassed: + anchor = anchors.get(test, None) + log.status_xpass('... ' + test, anchor) + if tests_xfailed: + log.status_xfail('%d xfail' % len(tests_xfailed)) + for test in tests_xfailed: + anchor = anchors.get(test, None) + log.status_xfail('... ' + test, anchor) + if tests_failed: + log.status_fail('%d failed' % len(tests_failed)) + for test in tests_failed: + anchor = anchors.get(test, None) + log.status_fail('... ' + test, anchor) + if tests_not_run: + log.status_fail('%d not run' % len(tests_not_run)) + for test in tests_not_run: + anchor = anchors.get(test, None) + log.status_fail('... ' + test, anchor) log.close() atexit.register(cleanup) @@ -372,7 +389,7 @@ def pytest_runtest_setup(item): Nothing. """ - log.start_section(item.name) + anchors[item.name] = log.start_section(item.name) setup_boardspec(item) setup_buildconfigspec(item) |