diff options
Diffstat (limited to 'test/py')
-rw-r--r-- | test/py/README.md | 60 | ||||
-rw-r--r-- | test/py/conftest.py | 189 | ||||
-rw-r--r-- | test/py/multiplexed_log.css | 41 | ||||
-rw-r--r-- | test/py/multiplexed_log.py | 142 | ||||
-rwxr-xr-x | test/py/test.py | 1 | ||||
-rw-r--r-- | test/py/tests/test_sleep.py | 7 | ||||
-rw-r--r-- | test/py/tests/test_ut.py | 29 | ||||
-rw-r--r-- | test/py/u_boot_console_base.py | 20 | ||||
-rw-r--r-- | test/py/u_boot_console_exec_attach.py | 26 | ||||
-rw-r--r-- | test/py/u_boot_console_sandbox.py | 5 | ||||
-rw-r--r-- | test/py/u_boot_spawn.py | 22 |
11 files changed, 400 insertions, 142 deletions
diff --git a/test/py/README.md b/test/py/README.md index 8036299d07..ba1674cb1d 100644 --- a/test/py/README.md +++ b/test/py/README.md @@ -76,6 +76,43 @@ will be written to `${build_dir}/test-log.html`. This is best viewed in a web browser, but may be read directly as plain text, perhaps with the aid of the `html2text` utility. +### Testing under a debugger + +If you need to run sandbox under a debugger, you may pass the command-line +option `--gdbserver COMM`. This causes two things to happens: + +- Instead of running U-Boot directly, it will be run under gdbserver, with + debug communication via the channel `COMM`. You can attach a debugger to the + sandbox process in order to debug it. See `man gdbserver` and the example + below for details of valid values for `COMM`. +- All timeouts in tests are disabled, allowing U-Boot an arbitrary amount of + time to execute commands. This is useful if U-Boot is stopped at a breakpoint + during debugging. + +A usage example is: + +Window 1: +```shell +./test/py/test.py --bd sandbox --gdbserver localhost:1234 +``` + +Window 2: +```shell +gdb ./build-sandbox/u-boot -ex 'target remote localhost:1234' +``` + +Alternatively, you could leave off the `-ex` option and type the command +manually into gdb once it starts. + +You can use any debugger you wish, so long as it speaks the gdb remote +protocol, or any graphical wrapper around gdb. + +Some tests deliberately cause the sandbox process to exit, e.g. to test the +reset command, or sandbox's CTRL-C handling. When this happens, you will need +to attach the debugger to the new sandbox instance. If these tests are not +relevant to your debugging session, you can skip them using pytest's -k +command-line option; see the next section. + ## Command-line options - `--board-type`, `--bd`, `-B` set the type of the board to be tested. For @@ -98,10 +135,25 @@ browser, but may be read directly as plain text, perhaps with the aid of the data. This is test data that may be re-used across test runs, such as file- system images. -`pytest` also implements a number of its own command-line options. Please see -`pytest` documentation for complete details. Execute `py.test --version` for -a brief summary. Note that U-Boot's test.py script passes all command-line -arguments directly to `pytest` for processing. +`pytest` also implements a number of its own command-line options. Commonly used +options are mentioned below. Please see `pytest` documentation for complete +details. Execute `py.test --version` for a brief summary. Note that U-Boot's +test.py script passes all command-line arguments directly to `pytest` for +processing. + +- `-k` selects which tests to run. The default is to run all known tests. This + option takes a single argument which is used to filter test names. Simple + logical operators are supported. For example: + - `'ums'` runs only tests with "ums" in their name. + - ``ut_dm'` runs only tests with "ut_dm" in their name. Note that in this + case, "ut_dm" is a parameter to a test rather than the test name. The full + test name is e.g. "test_ut[ut_dm_leak]". + - `'not reset'` runs everything except tests with "reset" in their name. + - `'ut or hush'` runs only tests with "ut" or "hush" in their name. + - `'not (ut or hush)'` runs everything except tests with "ut" or "hush" in + their name. +- `-s` prevents pytest from hiding a test's stdout. This allows you to see + U-Boot's console log in real time on pytest's stdout. ## Testing real hardware diff --git a/test/py/conftest.py b/test/py/conftest.py index 3e162cafcc..449f98bee3 100644 --- a/test/py/conftest.py +++ b/test/py/conftest.py @@ -17,10 +17,10 @@ import atexit import errno import os import os.path -import pexpect import pytest from _pytest.runner import runtestprotocol import ConfigParser +import re import StringIO import sys @@ -71,6 +71,9 @@ def pytest_addoption(parser): help='U-Boot board identity/instance') parser.addoption('--build', default=False, action='store_true', help='Compile U-Boot before running tests') + parser.addoption('--gdbserver', default=None, + help='Run sandbox under gdbserver. The argument is the channel '+ + 'over which gdbserver should communicate, e.g. localhost:1234') def pytest_configure(config): """pytest hook: Perform custom initialization at startup time. @@ -110,6 +113,10 @@ def pytest_configure(config): persistent_data_dir = build_dir + '/persistent-data' mkdir_p(persistent_data_dir) + gdbserver = config.getoption('gdbserver') + if gdbserver and board_type != 'sandbox': + raise Exception('--gdbserver only supported with sandbox') + import multiplexed_log log = multiplexed_log.Logfile(result_dir + '/test-log.html') @@ -122,10 +129,12 @@ def pytest_configure(config): ['make', o_opt, '-s', board_type + '_defconfig'], ['make', o_opt, '-s', '-j8'], ) - runner = log.get_runner('make', sys.stdout) - for cmd in cmds: - runner.run(cmd, cwd=source_dir) - runner.close() + with log.section('make'): + runner = log.get_runner('make', sys.stdout) + for cmd in cmds: + runner.run(cmd, cwd=source_dir) + runner.close() + log.status_pass('OK') class ArbitraryAttributeContainer(object): pass @@ -169,6 +178,7 @@ def pytest_configure(config): ubconfig.persistent_data_dir = persistent_data_dir ubconfig.board_type = board_type ubconfig.board_identity = board_identity + ubconfig.gdbserver = gdbserver env_vars = ( 'board_type', @@ -189,8 +199,42 @@ def pytest_configure(config): import u_boot_console_exec_attach console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig) -def pytest_generate_tests(metafunc): - """pytest hook: parameterize test functions based on custom rules. +re_ut_test_list = re.compile(r'_u_boot_list_2_(dm|env)_test_2_\1_test_(.*)\s*$') +def generate_ut_subtest(metafunc, fixture_name): + """Provide parametrization for a ut_subtest fixture. + + Determines the set of unit tests built into a U-Boot binary by parsing the + list of symbols generated by the build process. Provides this information + to test functions by parameterizing their ut_subtest fixture parameter. + + Args: + metafunc: The pytest test function. + fixture_name: The fixture name to test. + + Returns: + Nothing. + """ + + fn = console.config.build_dir + '/u-boot.sym' + try: + with open(fn, 'rt') as f: + lines = f.readlines() + except: + lines = [] + lines.sort() + + vals = [] + for l in lines: + m = re_ut_test_list.search(l) + if not m: + continue + vals.append(m.group(1) + ' ' + m.group(2)) + + ids = ['ut_' + s.replace(' ', '_') for s in vals] + metafunc.parametrize(fixture_name, vals, ids=ids) + +def generate_config(metafunc, fixture_name): + """Provide parametrization for {env,brd}__ fixtures. If a test function takes parameter(s) (fixture names) of the form brd__xxx or env__xxx, the brd and env configuration dictionaries are consulted to @@ -199,6 +243,7 @@ def pytest_generate_tests(metafunc): Args: metafunc: The pytest test function. + fixture_name: The fixture name to test. Returns: Nothing. @@ -208,30 +253,49 @@ def pytest_generate_tests(metafunc): 'brd': console.config.brd, 'env': console.config.env, } + parts = fixture_name.split('__') + if len(parts) < 2: + return + if parts[0] not in subconfigs: + return + subconfig = subconfigs[parts[0]] + vals = [] + val = subconfig.get(fixture_name, []) + # If that exact name is a key in the data source: + if val: + # ... use the dict value as a single parameter value. + vals = (val, ) + else: + # ... otherwise, see if there's a key that contains a list of + # values to use instead. + vals = subconfig.get(fixture_name+ 's', []) + def fixture_id(index, val): + try: + return val['fixture_id'] + except: + return fixture_name + str(index) + ids = [fixture_id(index, val) for (index, val) in enumerate(vals)] + metafunc.parametrize(fixture_name, vals, ids=ids) + +def pytest_generate_tests(metafunc): + """pytest hook: parameterize test functions based on custom rules. + + Check each test function parameter (fixture name) to see if it is one of + our custom names, and if so, provide the correct parametrization for that + parameter. + + Args: + metafunc: The pytest test function. + + Returns: + Nothing. + """ + for fn in metafunc.fixturenames: - parts = fn.split('__') - if len(parts) < 2: + if fn == 'ut_subtest': + generate_ut_subtest(metafunc, fn) continue - if parts[0] not in subconfigs: - continue - subconfig = subconfigs[parts[0]] - vals = [] - val = subconfig.get(fn, []) - # If that exact name is a key in the data source: - if val: - # ... use the dict value as a single parameter value. - vals = (val, ) - else: - # ... otherwise, see if there's a key that contains a list of - # values to use instead. - vals = subconfig.get(fn + 's', []) - def fixture_id(index, val): - try: - return val["fixture_id"] - except: - return fn + str(index) - ids = [fixture_id(index, val) for (index, val) in enumerate(vals)] - metafunc.parametrize(fn, vals, ids=ids) + generate_config(metafunc, fn) @pytest.fixture(scope='function') def u_boot_console(request): @@ -247,12 +311,13 @@ def u_boot_console(request): console.ensure_spawned() return console -tests_not_run = set() -tests_failed = set() -tests_xpassed = set() -tests_xfailed = set() -tests_skipped = set() -tests_passed = set() +anchors = {} +tests_not_run = [] +tests_failed = [] +tests_xpassed = [] +tests_xfailed = [] +tests_skipped = [] +tests_passed = [] def pytest_itemcollected(item): """pytest hook: Called once for each test found during collection. @@ -267,7 +332,7 @@ def pytest_itemcollected(item): Nothing. """ - tests_not_run.add(item.name) + tests_not_run.append(item.name) def cleanup(): """Clean up all global state. @@ -286,27 +351,33 @@ def cleanup(): if console: console.close() if log: - log.status_pass('%d passed' % len(tests_passed)) - if tests_skipped: - log.status_skipped('%d skipped' % len(tests_skipped)) - for test in tests_skipped: - log.status_skipped('... ' + test) - if tests_xpassed: - log.status_xpass('%d xpass' % len(tests_xpassed)) - for test in tests_xpassed: - log.status_xpass('... ' + test) - if tests_xfailed: - log.status_xfail('%d xfail' % len(tests_xfailed)) - for test in tests_xfailed: - log.status_xfail('... ' + test) - if tests_failed: - log.status_fail('%d failed' % len(tests_failed)) - for test in tests_failed: - log.status_fail('... ' + test) - if tests_not_run: - log.status_fail('%d not run' % len(tests_not_run)) - for test in tests_not_run: - log.status_fail('... ' + test) + with log.section('Status Report', 'status_report'): + log.status_pass('%d passed' % len(tests_passed)) + if tests_skipped: + log.status_skipped('%d skipped' % len(tests_skipped)) + for test in tests_skipped: + anchor = anchors.get(test, None) + log.status_skipped('... ' + test, anchor) + if tests_xpassed: + log.status_xpass('%d xpass' % len(tests_xpassed)) + for test in tests_xpassed: + anchor = anchors.get(test, None) + log.status_xpass('... ' + test, anchor) + if tests_xfailed: + log.status_xfail('%d xfail' % len(tests_xfailed)) + for test in tests_xfailed: + anchor = anchors.get(test, None) + log.status_xfail('... ' + test, anchor) + if tests_failed: + log.status_fail('%d failed' % len(tests_failed)) + for test in tests_failed: + anchor = anchors.get(test, None) + log.status_fail('... ' + test, anchor) + if tests_not_run: + log.status_fail('%d not run' % len(tests_not_run)) + for test in tests_not_run: + anchor = anchors.get(test, None) + log.status_fail('... ' + test, anchor) log.close() atexit.register(cleanup) @@ -372,7 +443,7 @@ def pytest_runtest_setup(item): Nothing. """ - log.start_section(item.name) + anchors[item.name] = log.start_section(item.name) setup_boardspec(item) setup_buildconfigspec(item) @@ -422,7 +493,7 @@ def pytest_runtest_protocol(item, nextitem): if failure_cleanup: console.drain_console() - test_list.add(item.name) + test_list.append(item.name) tests_not_run.remove(item.name) try: diff --git a/test/py/multiplexed_log.css b/test/py/multiplexed_log.css index f6240d52da..f135b10a24 100644 --- a/test/py/multiplexed_log.css +++ b/test/py/multiplexed_log.css @@ -25,37 +25,24 @@ pre { color: #808080; } -.section { +.block { border-style: solid; border-color: #303030; border-width: 0px 0px 0px 5px; padding-left: 5px } -.section-header { +.block-header { background-color: #303030; margin-left: -5px; margin-top: 5px; } -.section-trailer { - display: none; +.block-header:hover { + text-decoration: underline; } -.stream { - border-style: solid; - border-color: #303030; - border-width: 0px 0px 0px 5px; - padding-left: 5px -} - -.stream-header { - background-color: #303030; - margin-left: -5px; - margin-top: 5px; -} - -.stream-trailer { +.block-trailer { display: none; } @@ -94,3 +81,21 @@ pre { .status-fail { color: #ff0000 } + +.hidden { + display: none; +} + +a:link { + text-decoration: inherit; + color: inherit; +} + +a:visited { + text-decoration: inherit; + color: inherit; +} + +a:hover { + text-decoration: underline; +} diff --git a/test/py/multiplexed_log.py b/test/py/multiplexed_log.py index 69a577e577..68917eb0ea 100644 --- a/test/py/multiplexed_log.py +++ b/test/py/multiplexed_log.py @@ -168,12 +168,13 @@ class SectionCtxMgr(object): Objects of this type should be created by factory functions in the Logfile class rather than directly.""" - def __init__(self, log, marker): + def __init__(self, log, marker, anchor): """Initialize a new object. Args: log: The Logfile object to log to. marker: The name of the nested log section. + anchor: The anchor value to pass to start_section(). Returns: Nothing. @@ -181,9 +182,10 @@ class SectionCtxMgr(object): self.log = log self.marker = marker + self.anchor = anchor def __enter__(self): - self.log.start_section(self.marker) + self.anchor = self.log.start_section(self.marker, self.anchor) def __exit__(self, extype, value, traceback): self.log.end_section(self.marker) @@ -206,11 +208,70 @@ class Logfile(object): self.last_stream = None self.blocks = [] self.cur_evt = 1 + self.anchor = 0 + shutil.copy(mod_dir + '/multiplexed_log.css', os.path.dirname(fn)) self.f.write('''\ <html> <head> <link rel="stylesheet" type="text/css" href="multiplexed_log.css"> +<script src="http://code.jquery.com/jquery.min.js"></script> +<script> +$(document).ready(function () { + // Copy status report HTML to start of log for easy access + sts = $(".block#status_report")[0].outerHTML; + $("tt").prepend(sts); + + // Add expand/contract buttons to all block headers + btns = "<span class=\\\"block-expand hidden\\\">[+] </span>" + + "<span class=\\\"block-contract\\\">[-] </span>"; + $(".block-header").prepend(btns); + + // Pre-contract all blocks which passed, leaving only problem cases + // expanded, to highlight issues the user should look at. + // Only top-level blocks (sections) should have any status + passed_bcs = $(".block-content:has(.status-pass)"); + // Some blocks might have multiple status entries (e.g. the status + // report), so take care not to hide blocks with partial success. + passed_bcs = passed_bcs.not(":has(.status-fail)"); + passed_bcs = passed_bcs.not(":has(.status-xfail)"); + passed_bcs = passed_bcs.not(":has(.status-xpass)"); + passed_bcs = passed_bcs.not(":has(.status-skipped)"); + // Hide the passed blocks + passed_bcs.addClass("hidden"); + // Flip the expand/contract button hiding for those blocks. + bhs = passed_bcs.parent().children(".block-header") + bhs.children(".block-expand").removeClass("hidden"); + bhs.children(".block-contract").addClass("hidden"); + + // Add click handler to block headers. + // The handler expands/contracts the block. + $(".block-header").on("click", function (e) { + var header = $(this); + var content = header.next(".block-content"); + var expanded = !content.hasClass("hidden"); + if (expanded) { + content.addClass("hidden"); + header.children(".block-expand").first().removeClass("hidden"); + header.children(".block-contract").first().addClass("hidden"); + } else { + header.children(".block-contract").first().removeClass("hidden"); + header.children(".block-expand").first().addClass("hidden"); + content.removeClass("hidden"); + } + }); + + // When clicking on a link, expand the target block + $("a").on("click", function (e) { + var block = $($(this).attr("href")); + var header = block.children(".block-header"); + var content = block.children(".block-content").first(); + header.children(".block-contract").first().removeClass("hidden"); + header.children(".block-expand").first().addClass("hidden"); + content.removeClass("hidden"); + }); +}); +</script> </head> <body> <tt> @@ -273,45 +334,60 @@ class Logfile(object): if not self.last_stream: return self.f.write('</pre>\n') - self.f.write('<div class="stream-trailer" id="' + - self.last_stream.name + '">End stream: ' + + self.f.write('<div class="stream-trailer block-trailer">End stream: ' + self.last_stream.name + '</div>\n') self.f.write('</div>\n') + self.f.write('</div>\n') self.last_stream = None - def _note(self, note_type, msg): + def _note(self, note_type, msg, anchor=None): """Write a note or one-off message to the log file. Args: note_type: The type of note. This must be a value supported by the accompanying multiplexed_log.css. msg: The note/message to log. + anchor: Optional internal link target. Returns: Nothing. """ self._terminate_stream() - self.f.write('<div class="' + note_type + '">\n<pre>') + self.f.write('<div class="' + note_type + '">\n') + if anchor: + self.f.write('<a href="#%s">\n' % anchor) + self.f.write('<pre>') self.f.write(self._escape(msg)) - self.f.write('\n</pre></div>\n') + self.f.write('\n</pre>\n') + if anchor: + self.f.write('</a>\n') + self.f.write('</div>\n') - def start_section(self, marker): + def start_section(self, marker, anchor=None): """Begin a new nested section in the log file. Args: marker: The name of the section that is starting. + anchor: The value to use for the anchor. If None, a unique value + will be calculated and used Returns: - Nothing. + Name of the HTML anchor emitted before section. """ self._terminate_stream() self.blocks.append(marker) + if not anchor: + self.anchor += 1 + anchor = str(self.anchor) blk_path = '/'.join(self.blocks) - self.f.write('<div class="section" id="' + blk_path + '">\n') - self.f.write('<div class="section-header" id="' + blk_path + - '">Section: ' + blk_path + '</div>\n') + self.f.write('<div class="section block" id="' + anchor + '">\n') + self.f.write('<div class="section-header block-header">Section: ' + + blk_path + '</div>\n') + self.f.write('<div class="section-content block-content">\n') + + return anchor def end_section(self, marker): """Terminate the current nested section in the log file. @@ -331,12 +407,13 @@ class Logfile(object): (marker, '/'.join(self.blocks))) self._terminate_stream() blk_path = '/'.join(self.blocks) - self.f.write('<div class="section-trailer" id="section-trailer-' + - blk_path + '">End section: ' + blk_path + '</div>\n') + self.f.write('<div class="section-trailer block-trailer">' + + 'End section: ' + blk_path + '</div>\n') + self.f.write('</div>\n') self.f.write('</div>\n') self.blocks.pop() - def section(self, marker): + def section(self, marker, anchor=None): """Create a temporary section in the log file. This function creates a context manager for Python's "with" statement, @@ -349,12 +426,13 @@ class Logfile(object): Args: marker: The name of the nested section. + anchor: The anchor value to pass to start_section(). Returns: A context manager object. """ - return SectionCtxMgr(self, marker) + return SectionCtxMgr(self, marker, anchor) def error(self, msg): """Write an error note to the log file. @@ -404,65 +482,70 @@ class Logfile(object): self._note("action", msg) - def status_pass(self, msg): + def status_pass(self, msg, anchor=None): """Write a note to the log file describing test(s) which passed. Args: msg: A message describing the passed test(s). + anchor: Optional internal link target. Returns: Nothing. """ - self._note("status-pass", msg) + self._note("status-pass", msg, anchor) - def status_skipped(self, msg): + def status_skipped(self, msg, anchor=None): """Write a note to the log file describing skipped test(s). Args: msg: A message describing the skipped test(s). + anchor: Optional internal link target. Returns: Nothing. """ - self._note("status-skipped", msg) + self._note("status-skipped", msg, anchor) - def status_xfail(self, msg): + def status_xfail(self, msg, anchor=None): """Write a note to the log file describing xfailed test(s). Args: msg: A message describing the xfailed test(s). + anchor: Optional internal link target. Returns: Nothing. """ - self._note("status-xfail", msg) + self._note("status-xfail", msg, anchor) - def status_xpass(self, msg): + def status_xpass(self, msg, anchor=None): """Write a note to the log file describing xpassed test(s). Args: msg: A message describing the xpassed test(s). + anchor: Optional internal link target. Returns: Nothing. """ - self._note("status-xpass", msg) + self._note("status-xpass", msg, anchor) - def status_fail(self, msg): + def status_fail(self, msg, anchor=None): """Write a note to the log file describing failed test(s). Args: msg: A message describing the failed test(s). + anchor: Optional internal link target. Returns: Nothing. """ - self._note("status-fail", msg) + self._note("status-fail", msg, anchor) def get_stream(self, name, chained_file=None): """Create an object to log a single stream's data into the log file. @@ -519,9 +602,10 @@ class Logfile(object): if stream != self.last_stream: self._terminate_stream() - self.f.write('<div class="stream" id="%s">\n' % stream.name) - self.f.write('<div class="stream-header" id="' + stream.name + - '">Stream: ' + stream.name + '</div>\n') + self.f.write('<div class="stream block">\n') + self.f.write('<div class="stream-header block-header">Stream: ' + + stream.name + '</div>\n') + self.f.write('<div class="stream-content block-content">\n') self.f.write('<pre>') if implicit: self.f.write('<span class="implicit">') diff --git a/test/py/test.py b/test/py/test.py index 95671d4737..74e560a4d3 100755 --- a/test/py/test.py +++ b/test/py/test.py @@ -30,3 +30,4 @@ except: print >>sys.stderr, ''' exec(py.test) failed; perhaps you are missing some dependencies? See test/py/README.md for the list.''' + sys.exit(1) diff --git a/test/py/tests/test_sleep.py b/test/py/tests/test_sleep.py index 74add891c3..5c1a2623fe 100644 --- a/test/py/tests/test_sleep.py +++ b/test/py/tests/test_sleep.py @@ -15,6 +15,7 @@ def test_sleep(u_boot_console): u_boot_console.run_command('sleep %d' % sleep_time) tend = time.time() elapsed = tend - tstart - delta_to_expected = abs(elapsed - sleep_time) - # 0.25s margin is hopefully enough to account for any system overhead. - assert delta_to_expected < 0.25 + assert elapsed >= sleep_time + if not u_boot_console.config.gdbserver: + # 0.25s margin is hopefully enough to account for any system overhead. + assert elapsed < (sleep_time + 0.25) diff --git a/test/py/tests/test_ut.py b/test/py/tests/test_ut.py new file mode 100644 index 0000000000..5c25a2d465 --- /dev/null +++ b/test/py/tests/test_ut.py @@ -0,0 +1,29 @@ +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# +# SPDX-License-Identifier: GPL-2.0 + +import os.path +import pytest + +@pytest.mark.buildconfigspec('ut_dm') +def test_ut_dm_init(u_boot_console): + """Initialize data for ut dm tests.""" + + fn = u_boot_console.config.source_dir + '/testflash.bin' + if not os.path.exists(fn): + data = 'this is a test' + data += '\x00' * ((4 * 1024 * 1024) - len(data)) + with open(fn, 'wb') as fh: + fh.write(data) + + fn = u_boot_console.config.source_dir + '/spi.bin' + if not os.path.exists(fn): + data = '\x00' * (2 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + +def test_ut(u_boot_console, ut_subtest): + """Execute a "ut" subtest.""" + + output = u_boot_console.run_command('ut ' + ut_subtest) + assert output.endswith('Failures: 0') diff --git a/test/py/u_boot_console_base.py b/test/py/u_boot_console_base.py index 392f8cb885..bc2bd767e4 100644 --- a/test/py/u_boot_console_base.py +++ b/test/py/u_boot_console_base.py @@ -17,8 +17,8 @@ import sys import u_boot_spawn # Regexes for text we expect U-Boot to send to the console. -pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}-[^\r\n]*)') -pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}-[^\r\n]*)') +pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}[^\r\n]*\\))') +pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}[^\r\n]*\\))') pattern_stop_autoboot_prompt = re.compile('Hit any key to stop autoboot: ') pattern_unknown_command = re.compile('Unknown command \'.*\' - try \'help\'') pattern_error_notification = re.compile('## Error: ') @@ -293,16 +293,17 @@ class ConsoleBase(object): if self.p: return try: + self.log.start_section('Starting U-Boot') self.at_prompt = False - self.log.action('Starting U-Boot') self.p = self.get_spawn() # Real targets can take a long time to scroll large amounts of # text if LCD is enabled. This value may need tweaking in the # future, possibly per-test to be optimal. This works for 'help' # on board 'seaboard'. - self.p.timeout = 30000 + if not self.config.gdbserver: + self.p.timeout = 30000 self.p.logfile_read = self.logstream - if self.config.buildconfig.get('CONFIG_SPL', False) == 'y': + if self.config.buildconfig.get('config_spl', False) == 'y': m = self.p.expect([pattern_u_boot_spl_signon] + self.bad_patterns) if m != 0: raise Exception('Bad pattern found on console: ' + @@ -311,12 +312,7 @@ class ConsoleBase(object): if m != 0: raise Exception('Bad pattern found on console: ' + self.bad_pattern_ids[m - 1]) - signon = self.p.after - build_idx = signon.find(', Build:') - if build_idx == -1: - self.u_boot_version_string = signon - else: - self.u_boot_version_string = signon[:build_idx] + self.u_boot_version_string = self.p.after while True: m = self.p.expect([self.prompt_escaped, pattern_stop_autoboot_prompt] + self.bad_patterns) @@ -333,6 +329,8 @@ class ConsoleBase(object): self.log.error(str(ex)) self.cleanup_spawn() raise + finally: + self.log.end_section('Starting U-Boot') def cleanup_spawn(self): """Shut down all interaction with the U-Boot instance. diff --git a/test/py/u_boot_console_exec_attach.py b/test/py/u_boot_console_exec_attach.py index 19520cb3b9..445b58dda6 100644 --- a/test/py/u_boot_console_exec_attach.py +++ b/test/py/u_boot_console_exec_attach.py @@ -35,11 +35,13 @@ class ConsoleExecAttach(ConsoleBase): # HW flow control would mean this could be infinite. super(ConsoleExecAttach, self).__init__(log, config, max_fifo_fill=16) - self.log.action('Flashing U-Boot') - cmd = ['u-boot-test-flash', config.board_type, config.board_identity] - runner = self.log.get_runner(cmd[0], sys.stdout) - runner.run(cmd) - runner.close() + with self.log.section('flash'): + self.log.action('Flashing U-Boot') + cmd = ['u-boot-test-flash', config.board_type, config.board_identity] + runner = self.log.get_runner(cmd[0], sys.stdout) + runner.run(cmd) + runner.close() + self.log.status_pass('OK') def get_spawn(self): """Connect to a fresh U-Boot instance. @@ -56,10 +58,14 @@ class ConsoleExecAttach(ConsoleBase): args = [self.config.board_type, self.config.board_identity] s = Spawn(['u-boot-test-console'] + args) - self.log.action('Resetting board') - cmd = ['u-boot-test-reset'] + args - runner = self.log.get_runner(cmd[0], sys.stdout) - runner.run(cmd) - runner.close() + try: + self.log.action('Resetting board') + cmd = ['u-boot-test-reset'] + args + runner = self.log.get_runner(cmd[0], sys.stdout) + runner.run(cmd) + runner.close() + except: + s.close() + raise return s diff --git a/test/py/u_boot_console_sandbox.py b/test/py/u_boot_console_sandbox.py index a7263f30b8..3de0fe4a3b 100644 --- a/test/py/u_boot_console_sandbox.py +++ b/test/py/u_boot_console_sandbox.py @@ -39,7 +39,10 @@ class ConsoleSandbox(ConsoleBase): A u_boot_spawn.Spawn object that is attached to U-Boot. """ - cmd = [ + cmd = [] + if self.config.gdbserver: + cmd += ['gdbserver', self.config.gdbserver] + cmd += [ self.config.build_dir + '/u-boot', '-d', self.config.build_dir + '/arch/sandbox/dts/test.dtb' diff --git a/test/py/u_boot_spawn.py b/test/py/u_boot_spawn.py index 0f52d3e945..a5f4a8e91b 100644 --- a/test/py/u_boot_spawn.py +++ b/test/py/u_boot_spawn.py @@ -56,8 +56,12 @@ class Spawn(object): finally: os._exit(255) - self.poll = select.poll() - self.poll.register(self.fd, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP | select.POLLNVAL) + try: + self.poll = select.poll() + self.poll.register(self.fd, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP | select.POLLNVAL) + except: + self.close() + raise def kill(self, sig): """Send unix signal "sig" to the child process. @@ -142,16 +146,20 @@ class Spawn(object): earliest_pi = pi if earliest_m: pos = earliest_m.start() - posafter = earliest_m.end() + 1 + posafter = earliest_m.end() self.before = self.buf[:pos] self.after = self.buf[pos:posafter] self.buf = self.buf[posafter:] return earliest_pi tnow_s = time.time() - tdelta_ms = (tnow_s - tstart_s) * 1000 - if tdelta_ms > self.timeout: - raise Timeout() - events = self.poll.poll(self.timeout - tdelta_ms) + if self.timeout: + tdelta_ms = (tnow_s - tstart_s) * 1000 + poll_maxwait = self.timeout - tdelta_ms + if tdelta_ms > self.timeout: + raise Timeout() + else: + poll_maxwait = None + events = self.poll.poll(poll_maxwait) if not events: raise Timeout() c = os.read(self.fd, 1024) |