summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rwxr-xr-xtest/cmd_repeat.sh29
-rw-r--r--test/command_ut.c136
-rw-r--r--test/dm/Makefile1
-rw-r--r--test/dm/syscon.c17
-rw-r--r--test/dm/usb.c2
-rw-r--r--test/dm/video.c271
-rwxr-xr-xtest/fs/fs-test.sh14
-rw-r--r--test/py/.gitignore1
-rw-r--r--test/py/README.md300
-rw-r--r--test/py/conftest.py422
-rw-r--r--test/py/multiplexed_log.css88
-rw-r--r--test/py/multiplexed_log.py515
-rw-r--r--test/py/pytest.ini11
-rwxr-xr-xtest/py/test.py32
-rw-r--r--test/py/tests/test_000_version.py20
-rw-r--r--test/py/tests/test_env.py221
-rw-r--r--test/py/tests/test_help.py9
-rw-r--r--test/py/tests/test_hush_if_test.py154
-rw-r--r--test/py/tests/test_md.py36
-rw-r--r--test/py/tests/test_sandbox_exit.py24
-rw-r--r--test/py/tests/test_shell_basics.py42
-rw-r--r--test/py/tests/test_sleep.py24
-rw-r--r--test/py/tests/test_ums.py94
-rw-r--r--test/py/tests/test_unknown_cmd.py14
-rw-r--r--test/py/u_boot_console_base.py360
-rw-r--r--test/py/u_boot_console_exec_attach.py65
-rw-r--r--test/py/u_boot_console_sandbox.py79
-rw-r--r--test/py/u_boot_spawn.py174
28 files changed, 2988 insertions, 167 deletions
diff --git a/test/cmd_repeat.sh b/test/cmd_repeat.sh
deleted file mode 100755
index 990e79900f..0000000000
--- a/test/cmd_repeat.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-
-# Test for U-Boot cli including command repeat
-
-BASE="$(dirname $0)"
-. $BASE/common.sh
-
-run_test() {
- ./${OUTPUT_DIR}/u-boot <<END
-setenv ctrlc_ignore y
-md 0
-
-reset
-END
-}
-check_results() {
- echo "Check results"
-
- grep -q 00000100 ${tmp} || fail "Command did not repeat"
-}
-
-echo "Test CLI repeat"
-echo
-tmp="$(tempfile)"
-build_uboot
-run_test >${tmp}
-check_results ${tmp}
-rm ${tmp}
-echo "Test passed"
diff --git a/test/command_ut.c b/test/command_ut.c
index 926573a395..54bf62b9bc 100644
--- a/test/command_ut.c
+++ b/test/command_ut.c
@@ -7,9 +7,6 @@
#define DEBUG
#include <common.h>
-#ifdef CONFIG_SANDBOX
-#include <os.h>
-#endif
static const char test_cmd[] = "setenv list 1\n setenv list ${list}2; "
"setenv list ${list}3\0"
@@ -20,21 +17,6 @@ static int do_ut_cmd(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
printf("%s: Testing commands\n", __func__);
run_command("env default -f -a", 0);
- /* run a single command */
- run_command("setenv single 1", 0);
- assert(!strcmp("1", getenv("single")));
-
- /* make sure that compound statements work */
-#ifdef CONFIG_SYS_HUSH_PARSER
- run_command("if test -n ${single} ; then setenv check 1; fi", 0);
- assert(!strcmp("1", getenv("check")));
- run_command("setenv check", 0);
-#endif
-
- /* commands separated by ; */
- run_command_list("setenv list 1; setenv list ${list}1", -1, 0);
- assert(!strcmp("11", getenv("list")));
-
/* commands separated by \n */
run_command_list("setenv list 1\n setenv list ${list}1", -1, 0);
assert(!strcmp("11", getenv("list")));
@@ -43,11 +25,6 @@ static int do_ut_cmd(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
run_command_list("setenv list 1${list}\n", -1, 0);
assert(!strcmp("111", getenv("list")));
- /* three commands in a row */
- run_command_list("setenv list 1\n setenv list ${list}2; "
- "setenv list ${list}3", -1, 0);
- assert(!strcmp("123", getenv("list")));
-
/* a command string with \0 in it. Stuff after \0 should be ignored */
run_command("setenv list", 0);
run_command_list(test_cmd, sizeof(test_cmd), 0);
@@ -66,13 +43,6 @@ static int do_ut_cmd(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
assert(run_command_list("false", -1, 0) == 1);
assert(run_command_list("echo", -1, 0) == 0);
- run_command("setenv foo 'setenv monty 1; setenv python 2'", 0);
- run_command("run foo", 0);
- assert(getenv("monty") != NULL);
- assert(!strcmp("1", getenv("monty")));
- assert(getenv("python") != NULL);
- assert(!strcmp("2", getenv("python")));
-
#ifdef CONFIG_SYS_HUSH_PARSER
run_command("setenv foo 'setenv black 1\nsetenv adder 2'", 0);
run_command("run foo", 0);
@@ -80,112 +50,6 @@ static int do_ut_cmd(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
assert(!strcmp("1", getenv("black")));
assert(getenv("adder") != NULL);
assert(!strcmp("2", getenv("adder")));
-
- /* Test the 'test' command */
-
-#define HUSH_TEST(name, expr, expected_result) \
- run_command("if test " expr " ; then " \
- "setenv " #name "_" #expected_result " y; else " \
- "setenv " #name "_" #expected_result " n; fi", 0); \
- assert(!strcmp(#expected_result, getenv(#name "_" #expected_result))); \
- setenv(#name "_" #expected_result, NULL);
-
- /* Basic operators */
- HUSH_TEST(streq, "aaa = aaa", y);
- HUSH_TEST(streq, "aaa = bbb", n);
-
- HUSH_TEST(strneq, "aaa != bbb", y);
- HUSH_TEST(strneq, "aaa != aaa", n);
-
- HUSH_TEST(strlt, "aaa < bbb", y);
- HUSH_TEST(strlt, "bbb < aaa", n);
-
- HUSH_TEST(strgt, "bbb > aaa", y);
- HUSH_TEST(strgt, "aaa > bbb", n);
-
- HUSH_TEST(eq, "123 -eq 123", y);
- HUSH_TEST(eq, "123 -eq 456", n);
-
- HUSH_TEST(ne, "123 -ne 456", y);
- HUSH_TEST(ne, "123 -ne 123", n);
-
- HUSH_TEST(lt, "123 -lt 456", y);
- HUSH_TEST(lt_eq, "123 -lt 123", n);
- HUSH_TEST(lt, "456 -lt 123", n);
-
- HUSH_TEST(le, "123 -le 456", y);
- HUSH_TEST(le_eq, "123 -le 123", y);
- HUSH_TEST(le, "456 -le 123", n);
-
- HUSH_TEST(gt, "456 -gt 123", y);
- HUSH_TEST(gt_eq, "123 -gt 123", n);
- HUSH_TEST(gt, "123 -gt 456", n);
-
- HUSH_TEST(ge, "456 -ge 123", y);
- HUSH_TEST(ge_eq, "123 -ge 123", y);
- HUSH_TEST(ge, "123 -ge 456", n);
-
- HUSH_TEST(z, "-z \"\"", y);
- HUSH_TEST(z, "-z \"aaa\"", n);
-
- HUSH_TEST(n, "-n \"aaa\"", y);
- HUSH_TEST(n, "-n \"\"", n);
-
- /* Inversion of simple tests */
- HUSH_TEST(streq_inv, "! aaa = aaa", n);
- HUSH_TEST(streq_inv, "! aaa = bbb", y);
-
- HUSH_TEST(streq_inv_inv, "! ! aaa = aaa", y);
- HUSH_TEST(streq_inv_inv, "! ! aaa = bbb", n);
-
- /* Binary operators */
- HUSH_TEST(or_0_0, "aaa != aaa -o bbb != bbb", n);
- HUSH_TEST(or_0_1, "aaa != aaa -o bbb = bbb", y);
- HUSH_TEST(or_1_0, "aaa = aaa -o bbb != bbb", y);
- HUSH_TEST(or_1_1, "aaa = aaa -o bbb = bbb", y);
-
- HUSH_TEST(and_0_0, "aaa != aaa -a bbb != bbb", n);
- HUSH_TEST(and_0_1, "aaa != aaa -a bbb = bbb", n);
- HUSH_TEST(and_1_0, "aaa = aaa -a bbb != bbb", n);
- HUSH_TEST(and_1_1, "aaa = aaa -a bbb = bbb", y);
-
- /* Inversion within binary operators */
- HUSH_TEST(or_0_0_inv, "! aaa != aaa -o ! bbb != bbb", y);
- HUSH_TEST(or_0_1_inv, "! aaa != aaa -o ! bbb = bbb", y);
- HUSH_TEST(or_1_0_inv, "! aaa = aaa -o ! bbb != bbb", y);
- HUSH_TEST(or_1_1_inv, "! aaa = aaa -o ! bbb = bbb", n);
-
- HUSH_TEST(or_0_0_inv_inv, "! ! aaa != aaa -o ! ! bbb != bbb", n);
- HUSH_TEST(or_0_1_inv_inv, "! ! aaa != aaa -o ! ! bbb = bbb", y);
- HUSH_TEST(or_1_0_inv_inv, "! ! aaa = aaa -o ! ! bbb != bbb", y);
- HUSH_TEST(or_1_1_inv_inv, "! ! aaa = aaa -o ! ! bbb = bbb", y);
-
- setenv("ut_var_nonexistent", NULL);
- setenv("ut_var_exists", "1");
- HUSH_TEST(z_varexp_quoted, "-z \"$ut_var_nonexistent\"", y);
- HUSH_TEST(z_varexp_quoted, "-z \"$ut_var_exists\"", n);
- setenv("ut_var_exists", NULL);
-
- run_command("setenv ut_var_space \" \"", 0);
- assert(!strcmp(getenv("ut_var_space"), " "));
- run_command("setenv ut_var_test $ut_var_space", 0);
- assert(!getenv("ut_var_test"));
- run_command("setenv ut_var_test \"$ut_var_space\"", 0);
- assert(!strcmp(getenv("ut_var_test"), " "));
- run_command("setenv ut_var_test \" 1${ut_var_space}${ut_var_space} 2 \"", 0);
- assert(!strcmp(getenv("ut_var_test"), " 1 2 "));
- setenv("ut_var_space", NULL);
- setenv("ut_var_test", NULL);
-
-#ifdef CONFIG_SANDBOX
- /* File existence */
- HUSH_TEST(e, "-e hostfs - creating_this_file_breaks_uboot_unit_test", n);
- run_command("sb save hostfs - creating_this_file_breaks_uboot_unit_test 0 1", 0);
- HUSH_TEST(e, "-e hostfs - creating_this_file_breaks_uboot_unit_test", y);
- /* Perhaps this could be replaced by an "rm" shell command one day */
- assert(!os_unlink("creating_this_file_breaks_uboot_unit_test"));
- HUSH_TEST(e, "-e hostfs - creating_this_file_breaks_uboot_unit_test", n);
-#endif
#endif
assert(run_command("", 0) == 0);
diff --git a/test/dm/Makefile b/test/dm/Makefile
index 3ff1b75e6f..d4f3f22e58 100644
--- a/test/dm/Makefile
+++ b/test/dm/Makefile
@@ -34,5 +34,6 @@ obj-$(CONFIG_DM_USB) += usb.o
obj-$(CONFIG_DM_PMIC) += pmic.o
obj-$(CONFIG_DM_REGULATOR) += regulator.o
obj-$(CONFIG_TIMER) += timer.o
+obj-$(CONFIG_DM_VIDEO) += video.o
obj-$(CONFIG_ADC) += adc.o
endif
diff --git a/test/dm/syscon.c b/test/dm/syscon.c
index 36424816b8..c40f5fc09d 100644
--- a/test/dm/syscon.c
+++ b/test/dm/syscon.c
@@ -29,3 +29,20 @@ static int dm_test_syscon_base(struct unit_test_state *uts)
return 0;
}
DM_TEST(dm_test_syscon_base, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/* Test system controller finding */
+static int dm_test_syscon_by_driver_data(struct unit_test_state *uts)
+{
+ struct udevice *dev;
+
+ ut_assertok(syscon_get_by_driver_data(SYSCON0, &dev));
+ ut_asserteq(SYSCON0, dev->driver_data);
+
+ ut_assertok(syscon_get_by_driver_data(SYSCON1, &dev));
+ ut_asserteq(SYSCON1, dev->driver_data);
+
+ ut_asserteq(-ENODEV, syscon_get_by_driver_data(2, &dev));
+
+ return 0;
+}
+DM_TEST(dm_test_syscon_by_driver_data, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
diff --git a/test/dm/usb.c b/test/dm/usb.c
index cbc7899ff9..3a2e52b2b5 100644
--- a/test/dm/usb.c
+++ b/test/dm/usb.c
@@ -50,7 +50,7 @@ static int dm_test_usb_flash(struct unit_test_state *uts)
/* Read a few blocks and look for the string we expect */
ut_asserteq(512, dev_desc->blksz);
memset(cmp, '\0', sizeof(cmp));
- ut_asserteq(2, dev_desc->block_read(dev_desc->dev, 0, 2, cmp));
+ ut_asserteq(2, dev_desc->block_read(dev_desc, 0, 2, cmp));
ut_assertok(strcmp(cmp, "this is a test"));
return 0;
diff --git a/test/dm/video.c b/test/dm/video.c
new file mode 100644
index 0000000000..9f5e7fce37
--- /dev/null
+++ b/test/dm/video.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2014 Google, Inc
+ * Written by Simon Glass <sjg@chromium.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <bzlib.h>
+#include <dm.h>
+#include <mapmem.h>
+#include <os.h>
+#include <video.h>
+#include <video_console.h>
+#include <dm/test.h>
+#include <dm/uclass-internal.h>
+#include <test/ut.h>
+
+/*
+ * These tests use the standard sandbox frame buffer, the resolution of which
+ * is defined in the device tree. This only supports 16bpp so the tests only
+ * test that code path. It would be possible to adjust this fairly easily,
+ * by adjusting the bpix value in struct sandbox_sdl_plat. However the code
+ * in sandbox_sdl_sync() would also need to change to handle the different
+ * surface depth.
+ */
+DECLARE_GLOBAL_DATA_PTR;
+
+/* Basic test of the video uclass */
+static int dm_test_video_base(struct unit_test_state *uts)
+{
+ struct video_priv *priv;
+ struct udevice *dev;
+
+ ut_assertok(uclass_get_device(UCLASS_VIDEO, 0, &dev));
+ ut_asserteq(1366, video_get_xsize(dev));
+ ut_asserteq(768, video_get_ysize(dev));
+ priv = dev_get_uclass_priv(dev);
+ ut_asserteq(priv->fb_size, 1366 * 768 * 2);
+
+ return 0;
+}
+DM_TEST(dm_test_video_base, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/**
+ * compress_frame_buffer() - Compress the frame buffer and return its size
+ *
+ * We want to write tests which perform operations on the video console and
+ * check that the frame buffer ends up with the correct contents. But it is
+ * painful to store 'known good' images for comparison with the frame
+ * buffer. As an alternative, we can compress the frame buffer and check the
+ * size of the compressed data. This provides a pretty good level of
+ * certainty and the resulting tests need only check a single value.
+ *
+ * @dev: Video device
+ * @return compressed size of the frame buffer, or -ve on error
+ */
+static int compress_frame_buffer(struct udevice *dev)
+{
+ struct video_priv *priv = dev_get_uclass_priv(dev);
+ uint destlen;
+ void *dest;
+ int ret;
+
+ destlen = priv->fb_size;
+ dest = malloc(priv->fb_size);
+ if (!dest)
+ return -ENOMEM;
+ ret = BZ2_bzBuffToBuffCompress(dest, &destlen,
+ priv->fb, priv->fb_size,
+ 3, 0, 0);
+ free(dest);
+ if (ret)
+ return ret;
+
+ return destlen;
+}
+
+/*
+ * Call this function at any point to halt and show the current display. Be
+ * sure to run the test with the -l flag.
+ */
+static void __maybe_unused see_output(void)
+{
+ video_sync_all();
+ while (1);
+}
+
+/* Test text output works on the video console */
+static int dm_test_video_text(struct unit_test_state *uts)
+{
+ struct udevice *dev, *con;
+ int i;
+
+#define WHITE 0xffff
+#define SCROLL_LINES 100
+
+ ut_assertok(uclass_get_device(UCLASS_VIDEO, 0, &dev));
+ ut_asserteq(46, compress_frame_buffer(dev));
+
+ ut_assertok(uclass_get_device(UCLASS_VIDEO_CONSOLE, 0, &con));
+ vidconsole_putc_xy(con, 0, 0, 'a');
+ ut_asserteq(79, compress_frame_buffer(dev));
+
+ vidconsole_putc_xy(con, 0, 0, ' ');
+ ut_asserteq(46, compress_frame_buffer(dev));
+
+ for (i = 0; i < 20; i++)
+ vidconsole_putc_xy(con, i * 8, 0, ' ' + i);
+ ut_asserteq(273, compress_frame_buffer(dev));
+
+ vidconsole_set_row(con, 0, WHITE);
+ ut_asserteq(46, compress_frame_buffer(dev));
+
+ for (i = 0; i < 20; i++)
+ vidconsole_putc_xy(con, i * 8, 0, ' ' + i);
+ ut_asserteq(273, compress_frame_buffer(dev));
+
+ return 0;
+}
+DM_TEST(dm_test_video_text, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/* Test handling of special characters in the console */
+static int dm_test_video_chars(struct unit_test_state *uts)
+{
+ struct udevice *dev, *con;
+ const char *test_string = "Well\b\b\b\bxhe is\r \n\ta very modest \bman\n\t\tand Has much to\b\bto be modest about.";
+ const char *s;
+
+ ut_assertok(uclass_get_device(UCLASS_VIDEO, 0, &dev));
+ ut_assertok(uclass_get_device(UCLASS_VIDEO_CONSOLE, 0, &con));
+ for (s = test_string; *s; s++)
+ vidconsole_put_char(con, *s);
+ ut_asserteq(466, compress_frame_buffer(dev));
+
+ return 0;
+}
+DM_TEST(dm_test_video_chars, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/**
+ * check_vidconsole_output() - Run a text console test
+ *
+ * @uts: Test state
+ * @rot: Console rotation (0, 90, 180, 270)
+ * @wrap_size: Expected size of compressed frame buffer for the wrap test
+ * @scroll_size: Same for the scroll test
+ * @return 0 on success
+ */
+static int check_vidconsole_output(struct unit_test_state *uts, int rot,
+ int wrap_size, int scroll_size)
+{
+ struct udevice *dev, *con;
+ struct sandbox_sdl_plat *plat;
+ int i;
+
+ ut_assertok(uclass_find_device(UCLASS_VIDEO, 0, &dev));
+ ut_assert(!device_active(dev));
+ plat = dev_get_platdata(dev);
+ plat->rot = rot;
+
+ ut_assertok(uclass_get_device(UCLASS_VIDEO, 0, &dev));
+ ut_assertok(uclass_get_device(UCLASS_VIDEO_CONSOLE, 0, &con));
+ ut_asserteq(46, compress_frame_buffer(dev));
+
+ /* Check display wrap */
+ for (i = 0; i < 120; i++)
+ vidconsole_put_char(con, 'A' + i % 50);
+ ut_asserteq(wrap_size, compress_frame_buffer(dev));
+
+ /* Check display scrolling */
+ for (i = 0; i < SCROLL_LINES; i++) {
+ vidconsole_put_char(con, 'A' + i % 50);
+ vidconsole_put_char(con, '\n');
+ }
+ ut_asserteq(scroll_size, compress_frame_buffer(dev));
+
+ /* If we scroll enough, the screen becomes blank again */
+ for (i = 0; i < SCROLL_LINES; i++)
+ vidconsole_put_char(con, '\n');
+ ut_asserteq(46, compress_frame_buffer(dev));
+
+ return 0;
+}
+
+/* Test text output through the console uclass */
+static int dm_test_video_context(struct unit_test_state *uts)
+{
+ return check_vidconsole_output(uts, 0, 788, 453);
+}
+DM_TEST(dm_test_video_context, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/* Test rotated text output through the console uclass */
+static int dm_test_video_rotation1(struct unit_test_state *uts)
+{
+ ut_assertok(check_vidconsole_output(uts, 1, 1112, 680));
+
+ return 0;
+}
+DM_TEST(dm_test_video_rotation1, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/* Test rotated text output through the console uclass */
+static int dm_test_video_rotation2(struct unit_test_state *uts)
+{
+ ut_assertok(check_vidconsole_output(uts, 2, 785, 446));
+
+ return 0;
+}
+DM_TEST(dm_test_video_rotation2, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/* Test rotated text output through the console uclass */
+static int dm_test_video_rotation3(struct unit_test_state *uts)
+{
+ ut_assertok(check_vidconsole_output(uts, 3, 1134, 681));
+
+ return 0;
+}
+DM_TEST(dm_test_video_rotation3, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/* Read a file into memory and return a pointer to it */
+static int read_file(struct unit_test_state *uts, const char *fname,
+ ulong *addrp)
+{
+ int buf_size = 100000;
+ ulong addr = 0;
+ int size, fd;
+ char *buf;
+
+ buf = map_sysmem(addr, 0);
+ ut_assert(buf != NULL);
+ fd = os_open(fname, OS_O_RDONLY);
+ ut_assert(fd >= 0);
+ size = os_read(fd, buf, buf_size);
+ ut_assert(size >= 0);
+ ut_assert(size < buf_size);
+ os_close(fd);
+ *addrp = addr;
+
+ return 0;
+}
+
+/* Test drawing a bitmap file */
+static int dm_test_video_bmp(struct unit_test_state *uts)
+{
+ struct udevice *dev;
+ ulong addr;
+
+ ut_assertok(uclass_get_device(UCLASS_VIDEO, 0, &dev));
+ ut_assertok(read_file(uts, "tools/logos/denx.bmp", &addr));
+
+ ut_assertok(video_bmp_display(dev, addr, 0, 0, false));
+ ut_asserteq(1368, compress_frame_buffer(dev));
+
+ return 0;
+}
+DM_TEST(dm_test_video_bmp, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
+
+/* Test drawing a compressed bitmap file */
+static int dm_test_video_bmp_comp(struct unit_test_state *uts)
+{
+ struct udevice *dev;
+ ulong addr;
+
+ ut_assertok(uclass_get_device(UCLASS_VIDEO, 0, &dev));
+ ut_assertok(read_file(uts, "tools/logos/denx-comp.bmp", &addr));
+
+ ut_assertok(video_bmp_display(dev, addr, 0, 0, false));
+ ut_asserteq(1368, compress_frame_buffer(dev));
+
+ return 0;
+}
+DM_TEST(dm_test_video_bmp_comp, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
diff --git a/test/fs/fs-test.sh b/test/fs/fs-test.sh
index fc41c04c15..043e5d0c0e 100755
--- a/test/fs/fs-test.sh
+++ b/test/fs/fs-test.sh
@@ -100,7 +100,7 @@ function compile_sandbox() {
# We save time by not deleting and recreating the file system images
function prepare_env() {
rm -f ${MD5_FILE}.* ${OUT}.*
- mkdir ${OUT_DIR}
+ mkdir -p ${OUT_DIR}
}
# 1st parameter is the name of the image file to be created
@@ -115,11 +115,23 @@ function create_image() {
fi
if [ ! -f "$1" ]; then
fallocate -l 3G "$1" &> /dev/null
+ if [ $? -ne 0 ]; then
+ echo fallocate failed - using dd instead
+ dd if=/dev/zero of=$1 bs=1024 count=$((3 * 1024 * 1024))
+ if [ $? -ne 0 ]; then
+ echo Could not create empty disk image
+ exit $?
+ fi
+ fi
mkfs -t "$2" $MKFS_OPTION "$1" &> /dev/null
if [ $? -ne 0 -a "$2" = "fat" ]; then
# If we fail and we did fat, try vfat.
mkfs -t vfat $MKFS_OPTION "$1" &> /dev/null
fi
+ if [ $? -ne 0 ]; then
+ echo Could not create filesystem
+ exit $?
+ fi
fi
}
diff --git a/test/py/.gitignore b/test/py/.gitignore
new file mode 100644
index 0000000000..0d20b6487c
--- /dev/null
+++ b/test/py/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/test/py/README.md b/test/py/README.md
new file mode 100644
index 0000000000..8036299d07
--- /dev/null
+++ b/test/py/README.md
@@ -0,0 +1,300 @@
+# U-Boot pytest suite
+
+## Introduction
+
+This tool aims to test U-Boot by executing U-Boot shell commands using the
+console interface. A single top-level script exists to execute or attach to the
+U-Boot console, run the entire script of tests against it, and summarize the
+results. Advantages of this approach are:
+
+- Testing is performed in the same way a user or script would interact with
+ U-Boot; there can be no disconnect.
+- There is no need to write or embed test-related code into U-Boot itself.
+ It is asserted that writing test-related code in Python is simpler and more
+ flexible that writing it all in C.
+- It is reasonably simple to interact with U-Boot in this way.
+
+## Requirements
+
+The test suite is implemented using pytest. Interaction with the U-Boot console
+involves executing some binary and interacting with its stdin/stdout. You will
+need to implement various "hook" scripts that are called by the test suite at
+the appropriate time.
+
+On Debian or Debian-like distributions, the following packages are required.
+Similar package names should exist in other distributions.
+
+| Package | Version tested (Ubuntu 14.04) |
+| -------------- | ----------------------------- |
+| python | 2.7.5-5ubuntu3 |
+| python-pytest | 2.5.1-1 |
+
+The test script supports either:
+
+- Executing a sandbox port of U-Boot on the local machine as a sub-process,
+ and interacting with it over stdin/stdout.
+- Executing an external "hook" scripts to flash a U-Boot binary onto a
+ physical board, attach to the board's console stream, and reset the board.
+ Further details are described later.
+
+### Using `virtualenv` to provide requirements
+
+Older distributions (e.g. Ubuntu 10.04) may not provide all the required
+packages, or may provide versions that are too old to run the test suite. One
+can use the Python `virtualenv` script to locally install more up-to-date
+versions of the required packages without interfering with the OS installation.
+For example:
+
+```bash
+$ cd /path/to/u-boot
+$ sudo apt-get install python python-virtualenv
+$ virtualenv venv
+$ . ./venv/bin/activate
+$ pip install pytest
+```
+
+## Testing sandbox
+
+To run the testsuite on the sandbox port (U-Boot built as a native user-space
+application), simply execute:
+
+```
+./test/py/test.py --bd sandbox --build
+```
+
+The `--bd` option tells the test suite which board type is being tested. This
+lets the test suite know which features the board has, and hence exactly what
+can be tested.
+
+The `--build` option tells U-Boot to compile U-Boot. Alternatively, you may
+omit this option and build U-Boot yourself, in whatever way you choose, before
+running the test script.
+
+The test script will attach to U-Boot, execute all valid tests for the board,
+then print a summary of the test process. A complete log of the test session
+will be written to `${build_dir}/test-log.html`. This is best viewed in a web
+browser, but may be read directly as plain text, perhaps with the aid of the
+`html2text` utility.
+
+## Command-line options
+
+- `--board-type`, `--bd`, `-B` set the type of the board to be tested. For
+ example, `sandbox` or `seaboard`.
+- `--board-identity`, `--id` set the identity of the board to be tested.
+ This allows differentiation between multiple instances of the same type of
+ physical board that are attached to the same host machine. This parameter is
+ not interpreted by the test script in any way, but rather is simply passed
+ to the hook scripts described below, and may be used in any site-specific
+ way deemed necessary.
+- `--build` indicates that the test script should compile U-Boot itself
+ before running the tests. If using this option, make sure that any
+ environment variables required by the build process are already set, such as
+ `$CROSS_COMPILE`.
+- `--build-dir` sets the directory containing the compiled U-Boot binaries.
+ If omitted, this is `${source_dir}/build-${board_type}`.
+- `--result-dir` sets the directory to write results, such as log files,
+ into. If omitted, the build directory is used.
+- `--persistent-data-dir` sets the directory used to store persistent test
+ data. This is test data that may be re-used across test runs, such as file-
+ system images.
+
+`pytest` also implements a number of its own command-line options. Please see
+`pytest` documentation for complete details. Execute `py.test --version` for
+a brief summary. Note that U-Boot's test.py script passes all command-line
+arguments directly to `pytest` for processing.
+
+## Testing real hardware
+
+The tools and techniques used to interact with real hardware will vary
+radically between different host and target systems, and the whims of the user.
+For this reason, the test suite does not attempt to directly interact with real
+hardware in any way. Rather, it executes a standardized set of "hook" scripts
+via `$PATH`. These scripts implement certain actions on behalf of the test
+suite. This keeps the test suite simple and isolated from system variances
+unrelated to U-Boot features.
+
+### Hook scripts
+
+#### Environment variables
+
+The following environment variables are set when running hook scripts:
+
+- `UBOOT_BOARD_TYPE` the board type being tested.
+- `UBOOT_BOARD_IDENTITY` the board identity being tested, or `na` if none was
+ specified.
+- `UBOOT_SOURCE_DIR` the U-Boot source directory.
+- `UBOOT_TEST_PY_DIR` the full path to `test/py/` in the source directory.
+- `UBOOT_BUILD_DIR` the U-Boot build directory.
+- `UBOOT_RESULT_DIR` the test result directory.
+- `UBOOT_PERSISTENT_DATA_DIR` the test peristent data directory.
+
+#### `u-boot-test-console`
+
+This script provides access to the U-Boot console. The script's stdin/stdout
+should be connected to the board's console. This process should continue to run
+indefinitely, until killed. The test suite will run this script in parallel
+with all other hooks.
+
+This script may be implemented e.g. by exec()ing `cu`, `kermit`, `conmux`, etc.
+
+If you are able to run U-Boot under a hardware simulator such as qemu, then
+you would likely spawn that simulator from this script. However, note that
+`u-boot-test-reset` may be called multiple times per test script run, and must
+cause U-Boot to start execution from scratch each time. Hopefully your
+simulator includes a virtual reset button! If not, you can launch the
+simulator from `u-boot-test-reset` instead, while arranging for this console
+process to always communicate with the current simulator instance.
+
+#### `u-boot-test-flash`
+
+Prior to running the test suite against a board, some arrangement must be made
+so that the board executes the particular U-Boot binary to be tested. Often,
+this involves writing the U-Boot binary to the board's flash ROM. The test
+suite calls this hook script for that purpose.
+
+This script should perform the entire flashing process synchronously; the
+script should only exit once flashing is complete, and a board reset will
+cause the newly flashed U-Boot binary to be executed.
+
+It is conceivable that this script will do nothing. This might be useful in
+the following cases:
+
+- Some other process has already written the desired U-Boot binary into the
+ board's flash prior to running the test suite.
+- The board allows U-Boot to be downloaded directly into RAM, and executed
+ from there. Use of this feature will reduce wear on the board's flash, so
+ may be preferable if available, and if cold boot testing of U-Boot is not
+ required. If this feature is used, the `u-boot-test-reset` script should
+ peform this download, since the board could conceivably be reset multiple
+ times in a single test run.
+
+It is up to the user to determine if those situations exist, and to code this
+hook script appropriately.
+
+This script will typically be implemented by calling out to some SoC- or
+board-specific vendor flashing utility.
+
+#### `u-boot-test-reset`
+
+Whenever the test suite needs to reset the target board, this script is
+executed. This is guaranteed to happen at least once, prior to executing the
+first test function. If any test fails, the test infra-structure will execute
+this script again to restore U-Boot to an operational state before running the
+next test function.
+
+This script will likely be implemented by communicating with some form of
+relay or electronic switch attached to the board's reset signal.
+
+The semantics of this script require that when it is executed, U-Boot will
+start running from scratch. If the U-Boot binary to be tested has been written
+to flash, pulsing the board's reset signal is likely all this script need do.
+However, in some scenarios, this script may perform other actions. For
+example, it may call out to some SoC- or board-specific vendor utility in order
+to download the U-Boot binary directly into RAM and execute it. This would
+avoid the need for `u-boot-test-flash` to actually write U-Boot to flash, thus
+saving wear on the flash chip(s).
+
+### Board-type-specific configuration
+
+Each board has a different configuration and behaviour. Many of these
+differences can be automatically detected by parsing the `.config` file in the
+build directory. However, some differences can't yet be handled automatically.
+
+For each board, an optional Python module `u_boot_board_${board_type}` may exist
+to provide board-specific information to the test script. Any global value
+defined in these modules is available for use by any test function. The data
+contained in these scripts must be purely derived from U-Boot source code.
+Hence, these configuration files are part of the U-Boot source tree too.
+
+### Execution environment configuration
+
+Each user's hardware setup may enable testing different subsets of the features
+implemented by a particular board's configuration of U-Boot. For example, a
+U-Boot configuration may support USB device mode and USB Mass Storage, but this
+can only be tested if a USB cable is connected between the board and the host
+machine running the test script.
+
+For each board, optional Python modules `u_boot_boardenv_${board_type}` and
+`u_boot_boardenv_${board_type}_${board_identity}` may exist to provide
+board-specific and board-identity-specific information to the test script. Any
+global value defined in these modules is available for use by any test
+function. The data contained in these is specific to a particular user's
+hardware configuration. Hence, these configuration files are not part of the
+U-Boot source tree, and should be installed outside of the source tree. Users
+should set `$PYTHONPATH` prior to running the test script to allow these
+modules to be loaded.
+
+### Board module parameter usage
+
+The test scripts rely on the following variables being defined by the board
+module:
+
+- None at present.
+
+### U-Boot `.config` feature usage
+
+The test scripts rely on various U-Boot `.config` features, either directly in
+order to test those features, or indirectly in order to query information from
+the running U-Boot instance in order to test other features.
+
+One example is that testing of the `md` command requires knowledge of a RAM
+address to use for the test. This data is parsed from the output of the
+`bdinfo` command, and hence relies on CONFIG_CMD_BDI being enabled.
+
+For a complete list of dependencies, please search the test scripts for
+instances of:
+
+- `buildconfig.get(...`
+- `@pytest.mark.buildconfigspec(...`
+
+### Complete invocation example
+
+Assuming that you have installed the hook scripts into $HOME/ubtest/bin, and
+any required environment configuration Python modules into $HOME/ubtest/py,
+then you would likely invoke the test script as follows:
+
+If U-Boot has already been built:
+
+```bash
+PATH=$HOME/ubtest/bin:$PATH \
+ PYTHONPATH=${HOME}/ubtest/py:${PYTHONPATH} \
+ ./test/py/test.py --bd seaboard
+```
+
+If you want the test script to compile U-Boot for you too, then you likely
+need to set `$CROSS_COMPILE` to allow this, and invoke the test script as
+follow:
+
+```bash
+CROSS_COMPILE=arm-none-eabi- \
+ PATH=$HOME/ubtest/bin:$PATH \
+ PYTHONPATH=${HOME}/ubtest/py:${PYTHONPATH} \
+ ./test/py/test.py --bd seaboard --build
+```
+
+## Writing tests
+
+Please refer to the pytest documentation for details of writing pytest tests.
+Details specific to the U-Boot test suite are described below.
+
+A test fixture named `u_boot_console` should be used by each test function. This
+provides the means to interact with the U-Boot console, and retrieve board and
+environment configuration information.
+
+The function `u_boot_console.run_command()` executes a shell command on the
+U-Boot console, and returns all output from that command. This allows
+validation or interpretation of the command output. This function validates
+that certain strings are not seen on the U-Boot console. These include shell
+error messages and the U-Boot sign-on message (in order to detect unexpected
+board resets). See the source of `u_boot_console_base.py` for a complete list of
+"bad" strings. Some test scenarios are expected to trigger these strings. Use
+`u_boot_console.disable_check()` to temporarily disable checking for specific
+strings. See `test_unknown_cmd.py` for an example.
+
+Board- and board-environment configuration values may be accessed as sub-fields
+of the `u_boot_console.config` object, for example
+`u_boot_console.config.ram_base`.
+
+Build configuration values (from `.config`) may be accessed via the dictionary
+`u_boot_console.config.buildconfig`, with keys equal to the Kconfig variable
+names.
diff --git a/test/py/conftest.py b/test/py/conftest.py
new file mode 100644
index 0000000000..e1674dfce0
--- /dev/null
+++ b/test/py/conftest.py
@@ -0,0 +1,422 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Implementation of pytest run-time hook functions. These are invoked by
+# pytest at certain points during operation, e.g. startup, for each executed
+# test, at shutdown etc. These hooks perform functions such as:
+# - Parsing custom command-line options.
+# - Pullilng in user-specified board configuration.
+# - Creating the U-Boot console test fixture.
+# - Creating the HTML log file.
+# - Monitoring each test's results.
+# - Implementing custom pytest markers.
+
+import atexit
+import errno
+import os
+import os.path
+import pexpect
+import pytest
+from _pytest.runner import runtestprotocol
+import ConfigParser
+import StringIO
+import sys
+
+# Globals: The HTML log file, and the connection to the U-Boot console.
+log = None
+console = None
+
+def mkdir_p(path):
+ '''Create a directory path.
+
+ This includes creating any intermediate/parent directories. Any errors
+ caused due to already extant directories are ignored.
+
+ Args:
+ path: The directory path to create.
+
+ Returns:
+ Nothing.
+ '''
+
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(path):
+ pass
+ else:
+ raise
+
+def pytest_addoption(parser):
+ '''pytest hook: Add custom command-line options to the cmdline parser.
+
+ Args:
+ parser: The pytest command-line parser.
+
+ Returns:
+ Nothing.
+ '''
+
+ parser.addoption('--build-dir', default=None,
+ help='U-Boot build directory (O=)')
+ parser.addoption('--result-dir', default=None,
+ help='U-Boot test result/tmp directory')
+ parser.addoption('--persistent-data-dir', default=None,
+ help='U-Boot test persistent generated data directory')
+ parser.addoption('--board-type', '--bd', '-B', default='sandbox',
+ help='U-Boot board type')
+ parser.addoption('--board-identity', '--id', default='na',
+ help='U-Boot board identity/instance')
+ parser.addoption('--build', default=False, action='store_true',
+ help='Compile U-Boot before running tests')
+
+def pytest_configure(config):
+ '''pytest hook: Perform custom initialization at startup time.
+
+ Args:
+ config: The pytest configuration.
+
+ Returns:
+ Nothing.
+ '''
+
+ global log
+ global console
+ global ubconfig
+
+ test_py_dir = os.path.dirname(os.path.abspath(__file__))
+ source_dir = os.path.dirname(os.path.dirname(test_py_dir))
+
+ board_type = config.getoption('board_type')
+ board_type_filename = board_type.replace('-', '_')
+
+ board_identity = config.getoption('board_identity')
+ board_identity_filename = board_identity.replace('-', '_')
+
+ build_dir = config.getoption('build_dir')
+ if not build_dir:
+ build_dir = source_dir + '/build-' + board_type
+ mkdir_p(build_dir)
+
+ result_dir = config.getoption('result_dir')
+ if not result_dir:
+ result_dir = build_dir
+ mkdir_p(result_dir)
+
+ persistent_data_dir = config.getoption('persistent_data_dir')
+ if not persistent_data_dir:
+ persistent_data_dir = build_dir + '/persistent-data'
+ mkdir_p(persistent_data_dir)
+
+ import multiplexed_log
+ log = multiplexed_log.Logfile(result_dir + '/test-log.html')
+
+ if config.getoption('build'):
+ if build_dir != source_dir:
+ o_opt = 'O=%s' % build_dir
+ else:
+ o_opt = ''
+ cmds = (
+ ['make', o_opt, '-s', board_type + '_defconfig'],
+ ['make', o_opt, '-s', '-j8'],
+ )
+ runner = log.get_runner('make', sys.stdout)
+ for cmd in cmds:
+ runner.run(cmd, cwd=source_dir)
+ runner.close()
+
+ class ArbitraryAttributeContainer(object):
+ pass
+
+ ubconfig = ArbitraryAttributeContainer()
+ ubconfig.brd = dict()
+ ubconfig.env = dict()
+
+ modules = [
+ (ubconfig.brd, 'u_boot_board_' + board_type_filename),
+ (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
+ (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
+ board_identity_filename),
+ ]
+ for (dict_to_fill, module_name) in modules:
+ try:
+ module = __import__(module_name)
+ except ImportError:
+ continue
+ dict_to_fill.update(module.__dict__)
+
+ ubconfig.buildconfig = dict()
+
+ for conf_file in ('.config', 'include/autoconf.mk'):
+ dot_config = build_dir + '/' + conf_file
+ if not os.path.exists(dot_config):
+ raise Exception(conf_file + ' does not exist; ' +
+ 'try passing --build option?')
+
+ with open(dot_config, 'rt') as f:
+ ini_str = '[root]\n' + f.read()
+ ini_sio = StringIO.StringIO(ini_str)
+ parser = ConfigParser.RawConfigParser()
+ parser.readfp(ini_sio)
+ ubconfig.buildconfig.update(parser.items('root'))
+
+ ubconfig.test_py_dir = test_py_dir
+ ubconfig.source_dir = source_dir
+ ubconfig.build_dir = build_dir
+ ubconfig.result_dir = result_dir
+ ubconfig.persistent_data_dir = persistent_data_dir
+ ubconfig.board_type = board_type
+ ubconfig.board_identity = board_identity
+
+ env_vars = (
+ 'board_type',
+ 'board_identity',
+ 'source_dir',
+ 'test_py_dir',
+ 'build_dir',
+ 'result_dir',
+ 'persistent_data_dir',
+ )
+ for v in env_vars:
+ os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
+
+ if board_type == 'sandbox':
+ import u_boot_console_sandbox
+ console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
+ else:
+ import u_boot_console_exec_attach
+ console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
+
+def pytest_generate_tests(metafunc):
+ '''pytest hook: parameterize test functions based on custom rules.
+
+ If a test function takes parameter(s) (fixture names) of the form brd__xxx
+ or env__xxx, the brd and env configuration dictionaries are consulted to
+ find the list of values to use for those parameters, and the test is
+ parametrized so that it runs once for each combination of values.
+
+ Args:
+ metafunc: The pytest test function.
+
+ Returns:
+ Nothing.
+ '''
+
+ subconfigs = {
+ 'brd': console.config.brd,
+ 'env': console.config.env,
+ }
+ for fn in metafunc.fixturenames:
+ parts = fn.split('__')
+ if len(parts) < 2:
+ continue
+ if parts[0] not in subconfigs:
+ continue
+ subconfig = subconfigs[parts[0]]
+ vals = []
+ val = subconfig.get(fn, [])
+ # If that exact name is a key in the data source:
+ if val:
+ # ... use the dict value as a single parameter value.
+ vals = (val, )
+ else:
+ # ... otherwise, see if there's a key that contains a list of
+ # values to use instead.
+ vals = subconfig.get(fn + 's', [])
+ metafunc.parametrize(fn, vals)
+
+@pytest.fixture(scope='session')
+def u_boot_console(request):
+ '''Generate the value of a test's u_boot_console fixture.
+
+ Args:
+ request: The pytest request.
+
+ Returns:
+ The fixture value.
+ '''
+
+ return console
+
+tests_not_run = set()
+tests_failed = set()
+tests_skipped = set()
+tests_passed = set()
+
+def pytest_itemcollected(item):
+ '''pytest hook: Called once for each test found during collection.
+
+ This enables our custom result analysis code to see the list of all tests
+ that should eventually be run.
+
+ Args:
+ item: The item that was collected.
+
+ Returns:
+ Nothing.
+ '''
+
+ tests_not_run.add(item.name)
+
+def cleanup():
+ '''Clean up all global state.
+
+ Executed (via atexit) once the entire test process is complete. This
+ includes logging the status of all tests, and the identity of any failed
+ or skipped tests.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ if console:
+ console.close()
+ if log:
+ log.status_pass('%d passed' % len(tests_passed))
+ if tests_skipped:
+ log.status_skipped('%d skipped' % len(tests_skipped))
+ for test in tests_skipped:
+ log.status_skipped('... ' + test)
+ if tests_failed:
+ log.status_fail('%d failed' % len(tests_failed))
+ for test in tests_failed:
+ log.status_fail('... ' + test)
+ if tests_not_run:
+ log.status_fail('%d not run' % len(tests_not_run))
+ for test in tests_not_run:
+ log.status_fail('... ' + test)
+ log.close()
+atexit.register(cleanup)
+
+def setup_boardspec(item):
+ '''Process any 'boardspec' marker for a test.
+
+ Such a marker lists the set of board types that a test does/doesn't
+ support. If tests are being executed on an unsupported board, the test is
+ marked to be skipped.
+
+ Args:
+ item: The pytest test item.
+
+ Returns:
+ Nothing.
+ '''
+
+ mark = item.get_marker('boardspec')
+ if not mark:
+ return
+ required_boards = []
+ for board in mark.args:
+ if board.startswith('!'):
+ if ubconfig.board_type == board[1:]:
+ pytest.skip('board not supported')
+ return
+ else:
+ required_boards.append(board)
+ if required_boards and ubconfig.board_type not in required_boards:
+ pytest.skip('board not supported')
+
+def setup_buildconfigspec(item):
+ '''Process any 'buildconfigspec' marker for a test.
+
+ Such a marker lists some U-Boot configuration feature that the test
+ requires. If tests are being executed on an U-Boot build that doesn't
+ have the required feature, the test is marked to be skipped.
+
+ Args:
+ item: The pytest test item.
+
+ Returns:
+ Nothing.
+ '''
+
+ mark = item.get_marker('buildconfigspec')
+ if not mark:
+ return
+ for option in mark.args:
+ if not ubconfig.buildconfig.get('config_' + option.lower(), None):
+ pytest.skip('.config feature not enabled')
+
+def pytest_runtest_setup(item):
+ '''pytest hook: Configure (set up) a test item.
+
+ Called once for each test to perform any custom configuration. This hook
+ is used to skip the test if certain conditions apply.
+
+ Args:
+ item: The pytest test item.
+
+ Returns:
+ Nothing.
+ '''
+
+ log.start_section(item.name)
+ setup_boardspec(item)
+ setup_buildconfigspec(item)
+
+def pytest_runtest_protocol(item, nextitem):
+ '''pytest hook: Called to execute a test.
+
+ This hook wraps the standard pytest runtestprotocol() function in order
+ to acquire visibility into, and record, each test function's result.
+
+ Args:
+ item: The pytest test item to execute.
+ nextitem: The pytest test item that will be executed after this one.
+
+ Returns:
+ A list of pytest reports (test result data).
+ '''
+
+ reports = runtestprotocol(item, nextitem=nextitem)
+ failed = None
+ skipped = None
+ for report in reports:
+ if report.outcome == 'failed':
+ failed = report
+ break
+ if report.outcome == 'skipped':
+ if not skipped:
+ skipped = report
+
+ if failed:
+ tests_failed.add(item.name)
+ elif skipped:
+ tests_skipped.add(item.name)
+ else:
+ tests_passed.add(item.name)
+ tests_not_run.remove(item.name)
+
+ try:
+ if failed:
+ msg = 'FAILED:\n' + str(failed.longrepr)
+ log.status_fail(msg)
+ elif skipped:
+ msg = 'SKIPPED:\n' + str(skipped.longrepr)
+ log.status_skipped(msg)
+ else:
+ log.status_pass('OK')
+ except:
+ # If something went wrong with logging, it's better to let the test
+ # process continue, which may report other exceptions that triggered
+ # the logging issue (e.g. console.log wasn't created). Hence, just
+ # squash the exception. If the test setup failed due to e.g. syntax
+ # error somewhere else, this won't be seen. However, once that issue
+ # is fixed, if this exception still exists, it will then be logged as
+ # part of the test's stdout.
+ import traceback
+ print 'Exception occurred while logging runtest status:'
+ traceback.print_exc()
+ # FIXME: Can we force a test failure here?
+
+ log.end_section(item.name)
+
+ if failed:
+ console.cleanup_spawn()
+
+ return reports
diff --git a/test/py/multiplexed_log.css b/test/py/multiplexed_log.css
new file mode 100644
index 0000000000..50f7b90929
--- /dev/null
+++ b/test/py/multiplexed_log.css
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015 Stephen Warren
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+/*
+ * This provides pretty formatting of the HTML log file, e.g.
+ * - colored bars beside/above log sections for easily parsed delineation.
+ * - color highlighting of various messages.
+ */
+
+body {
+ background-color: black;
+ color: #ffffff;
+}
+
+pre {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
+.implicit {
+ color: #808080;
+}
+
+.section {
+ border-style: solid;
+ border-color: #303030;
+ border-width: 0px 0px 0px 5px;
+ padding-left: 5px
+}
+
+.section-header {
+ background-color: #303030;
+ margin-left: -5px;
+ margin-top: 5px;
+}
+
+.section-trailer {
+ display: none;
+}
+
+.stream {
+ border-style: solid;
+ border-color: #303030;
+ border-width: 0px 0px 0px 5px;
+ padding-left: 5px
+}
+
+.stream-header {
+ background-color: #303030;
+ margin-left: -5px;
+ margin-top: 5px;
+}
+
+.stream-trailer {
+ display: none;
+}
+
+.error {
+ color: #ff0000
+}
+
+.warning {
+ color: #ffff00
+}
+
+.info {
+ color: #808080
+}
+
+.action {
+ color: #8080ff
+}
+
+.status-pass {
+ color: #00ff00
+}
+
+.status-skipped {
+ color: #ffff00
+}
+
+.status-fail {
+ color: #ff0000
+}
diff --git a/test/py/multiplexed_log.py b/test/py/multiplexed_log.py
new file mode 100644
index 0000000000..48f2b51de1
--- /dev/null
+++ b/test/py/multiplexed_log.py
@@ -0,0 +1,515 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Generate an HTML-formatted log file containing multiple streams of data,
+# each represented in a well-delineated/-structured fashion.
+
+import cgi
+import os.path
+import shutil
+import subprocess
+
+mod_dir = os.path.dirname(os.path.abspath(__file__))
+
+class LogfileStream(object):
+ '''A file-like object used to write a single logical stream of data into
+ a multiplexed log file. Objects of this type should be created by factory
+ functions in the Logfile class rather than directly.'''
+
+ def __init__(self, logfile, name, chained_file):
+ '''Initialize a new object.
+
+ Args:
+ logfile: The Logfile object to log to.
+ name: The name of this log stream.
+ chained_file: The file-like object to which all stream data should be
+ logged to in addition to logfile. Can be None.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.logfile = logfile
+ self.name = name
+ self.chained_file = chained_file
+
+ def close(self):
+ '''Dummy function so that this class is "file-like".
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ pass
+
+ def write(self, data, implicit=False):
+ '''Write data to the log stream.
+
+ Args:
+ data: The data to write tot he file.
+ implicit: Boolean indicating whether data actually appeared in the
+ stream, or was implicitly generated. A valid use-case is to
+ repeat a shell prompt at the start of each separate log
+ section, which makes the log sections more readable in
+ isolation.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.logfile.write(self, data, implicit)
+ if self.chained_file:
+ self.chained_file.write(data)
+
+ def flush(self):
+ '''Flush the log stream, to ensure correct log interleaving.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.logfile.flush()
+ if self.chained_file:
+ self.chained_file.flush()
+
+class RunAndLog(object):
+ '''A utility object used to execute sub-processes and log their output to
+ a multiplexed log file. Objects of this type should be created by factory
+ functions in the Logfile class rather than directly.'''
+
+ def __init__(self, logfile, name, chained_file):
+ '''Initialize a new object.
+
+ Args:
+ logfile: The Logfile object to log to.
+ name: The name of this log stream or sub-process.
+ chained_file: The file-like object to which all stream data should
+ be logged to in addition to logfile. Can be None.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.logfile = logfile
+ self.name = name
+ self.chained_file = chained_file
+
+ def close(self):
+ '''Clean up any resources managed by this object.'''
+ pass
+
+ def run(self, cmd, cwd=None):
+ '''Run a command as a sub-process, and log the results.
+
+ Args:
+ cmd: The command to execute.
+ cwd: The directory to run the command in. Can be None to use the
+ current directory.
+
+ Returns:
+ Nothing.
+ '''
+
+ msg = "+" + " ".join(cmd) + "\n"
+ if self.chained_file:
+ self.chained_file.write(msg)
+ self.logfile.write(self, msg)
+
+ try:
+ p = subprocess.Popen(cmd, cwd=cwd,
+ stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ (stdout, stderr) = p.communicate()
+ output = ''
+ if stdout:
+ if stderr:
+ output += 'stdout:\n'
+ output += stdout
+ if stderr:
+ if stdout:
+ output += 'stderr:\n'
+ output += stderr
+ exit_status = p.returncode
+ exception = None
+ except subprocess.CalledProcessError as cpe:
+ output = cpe.output
+ exit_status = cpe.returncode
+ exception = cpe
+ except Exception as e:
+ output = ''
+ exit_status = 0
+ exception = e
+ if output and not output.endswith('\n'):
+ output += '\n'
+ if exit_status and not exception:
+ exception = Exception('Exit code: ' + str(exit_status))
+ if exception:
+ output += str(exception) + '\n'
+ self.logfile.write(self, output)
+ if self.chained_file:
+ self.chained_file.write(output)
+ if exception:
+ raise exception
+
+class SectionCtxMgr(object):
+ '''A context manager for Python's "with" statement, which allows a certain
+ portion of test code to be logged to a separate section of the log file.
+ Objects of this type should be created by factory functions in the Logfile
+ class rather than directly.'''
+
+ def __init__(self, log, marker):
+ '''Initialize a new object.
+
+ Args:
+ log: The Logfile object to log to.
+ marker: The name of the nested log section.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.log = log
+ self.marker = marker
+
+ def __enter__(self):
+ self.log.start_section(self.marker)
+
+ def __exit__(self, extype, value, traceback):
+ self.log.end_section(self.marker)
+
+class Logfile(object):
+ '''Generates an HTML-formatted log file containing multiple streams of
+ data, each represented in a well-delineated/-structured fashion.'''
+
+ def __init__(self, fn):
+ '''Initialize a new object.
+
+ Args:
+ fn: The filename to write to.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.f = open(fn, "wt")
+ self.last_stream = None
+ self.blocks = []
+ self.cur_evt = 1
+ shutil.copy(mod_dir + "/multiplexed_log.css", os.path.dirname(fn))
+ self.f.write("""\
+<html>
+<head>
+<link rel="stylesheet" type="text/css" href="multiplexed_log.css">
+</head>
+<body>
+<tt>
+""")
+
+ def close(self):
+ '''Close the log file.
+
+ After calling this function, no more data may be written to the log.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.f.write("""\
+</tt>
+</body>
+</html>
+""")
+ self.f.close()
+
+ # The set of characters that should be represented as hexadecimal codes in
+ # the log file.
+ _nonprint = ("%" + "".join(chr(c) for c in range(0, 32) if c not in (9, 10)) +
+ "".join(chr(c) for c in range(127, 256)))
+
+ def _escape(self, data):
+ '''Render data format suitable for inclusion in an HTML document.
+
+ This includes HTML-escaping certain characters, and translating
+ control characters to a hexadecimal representation.
+
+ Args:
+ data: The raw string data to be escaped.
+
+ Returns:
+ An escaped version of the data.
+ '''
+
+ data = data.replace(chr(13), "")
+ data = "".join((c in self._nonprint) and ("%%%02x" % ord(c)) or
+ c for c in data)
+ data = cgi.escape(data)
+ return data
+
+ def _terminate_stream(self):
+ '''Write HTML to the log file to terminate the current stream's data.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.cur_evt += 1
+ if not self.last_stream:
+ return
+ self.f.write("</pre>\n")
+ self.f.write("<div class=\"stream-trailer\" id=\"" +
+ self.last_stream.name + "\">End stream: " +
+ self.last_stream.name + "</div>\n")
+ self.f.write("</div>\n")
+ self.last_stream = None
+
+ def _note(self, note_type, msg):
+ '''Write a note or one-off message to the log file.
+
+ Args:
+ note_type: The type of note. This must be a value supported by the
+ accompanying multiplexed_log.css.
+ msg: The note/message to log.
+
+ Returns:
+ Nothing.
+ '''
+
+ self._terminate_stream()
+ self.f.write("<div class=\"" + note_type + "\">\n<pre>")
+ self.f.write(self._escape(msg))
+ self.f.write("\n</pre></div>\n")
+
+ def start_section(self, marker):
+ '''Begin a new nested section in the log file.
+
+ Args:
+ marker: The name of the section that is starting.
+
+ Returns:
+ Nothing.
+ '''
+
+ self._terminate_stream()
+ self.blocks.append(marker)
+ blk_path = "/".join(self.blocks)
+ self.f.write("<div class=\"section\" id=\"" + blk_path + "\">\n")
+ self.f.write("<div class=\"section-header\" id=\"" + blk_path +
+ "\">Section: " + blk_path + "</div>\n")
+
+ def end_section(self, marker):
+ '''Terminate the current nested section in the log file.
+
+ This function validates proper nesting of start_section() and
+ end_section() calls. If a mismatch is found, an exception is raised.
+
+ Args:
+ marker: The name of the section that is ending.
+
+ Returns:
+ Nothing.
+ '''
+
+ if (not self.blocks) or (marker != self.blocks[-1]):
+ raise Exception("Block nesting mismatch: \"%s\" \"%s\"" %
+ (marker, "/".join(self.blocks)))
+ self._terminate_stream()
+ blk_path = "/".join(self.blocks)
+ self.f.write("<div class=\"section-trailer\" id=\"section-trailer-" +
+ blk_path + "\">End section: " + blk_path + "</div>\n")
+ self.f.write("</div>\n")
+ self.blocks.pop()
+
+ def section(self, marker):
+ '''Create a temporary section in the log file.
+
+ This function creates a context manager for Python's "with" statement,
+ which allows a certain portion of test code to be logged to a separate
+ section of the log file.
+
+ Usage:
+ with log.section("somename"):
+ some test code
+
+ Args:
+ marker: The name of the nested section.
+
+ Returns:
+ A context manager object.
+ '''
+
+ return SectionCtxMgr(self, marker)
+
+ def error(self, msg):
+ '''Write an error note to the log file.
+
+ Args:
+ msg: A message describing the error.
+
+ Returns:
+ Nothing.
+ '''
+
+ self._note("error", msg)
+
+ def warning(self, msg):
+ '''Write an warning note to the log file.
+
+ Args:
+ msg: A message describing the warning.
+
+ Returns:
+ Nothing.
+ '''
+
+ self._note("warning", msg)
+
+ def info(self, msg):
+ '''Write an informational note to the log file.
+
+ Args:
+ msg: An informational message.
+
+ Returns:
+ Nothing.
+ '''
+
+ self._note("info", msg)
+
+ def action(self, msg):
+ '''Write an action note to the log file.
+
+ Args:
+ msg: A message describing the action that is being logged.
+
+ Returns:
+ Nothing.
+ '''
+
+ self._note("action", msg)
+
+ def status_pass(self, msg):
+ '''Write a note to the log file describing test(s) which passed.
+
+ Args:
+ msg: A message describing passed test(s).
+
+ Returns:
+ Nothing.
+ '''
+
+ self._note("status-pass", msg)
+
+ def status_skipped(self, msg):
+ '''Write a note to the log file describing skipped test(s).
+
+ Args:
+ msg: A message describing passed test(s).
+
+ Returns:
+ Nothing.
+ '''
+
+ self._note("status-skipped", msg)
+
+ def status_fail(self, msg):
+ '''Write a note to the log file describing failed test(s).
+
+ Args:
+ msg: A message describing passed test(s).
+
+ Returns:
+ Nothing.
+ '''
+
+ self._note("status-fail", msg)
+
+ def get_stream(self, name, chained_file=None):
+ '''Create an object to log a single stream's data into the log file.
+
+ This creates a "file-like" object that can be written to in order to
+ write a single stream's data to the log file. The implementation will
+ handle any required interleaving of data (from multiple streams) in
+ the log, in a way that makes it obvious which stream each bit of data
+ came from.
+
+ Args:
+ name: The name of the stream.
+ chained_file: The file-like object to which all stream data should
+ be logged to in addition to this log. Can be None.
+
+ Returns:
+ A file-like object.
+ '''
+
+ return LogfileStream(self, name, chained_file)
+
+ def get_runner(self, name, chained_file=None):
+ '''Create an object that executes processes and logs their output.
+
+ Args:
+ name: The name of this sub-process.
+ chained_file: The file-like object to which all stream data should
+ be logged to in addition to logfile. Can be None.
+
+ Returns:
+ A RunAndLog object.
+ '''
+
+ return RunAndLog(self, name, chained_file)
+
+ def write(self, stream, data, implicit=False):
+ '''Write stream data into the log file.
+
+ This function should only be used by instances of LogfileStream or
+ RunAndLog.
+
+ Args:
+ stream: The stream whose data is being logged.
+ data: The data to log.
+ implicit: Boolean indicating whether data actually appeared in the
+ stream, or was implicitly generated. A valid use-case is to
+ repeat a shell prompt at the start of each separate log
+ section, which makes the log sections more readable in
+ isolation.
+
+ Returns:
+ Nothing.
+ '''
+
+ if stream != self.last_stream:
+ self._terminate_stream()
+ self.f.write("<div class=\"stream\" id=\"%s\">\n" % stream.name)
+ self.f.write("<div class=\"stream-header\" id=\"" + stream.name +
+ "\">Stream: " + stream.name + "</div>\n")
+ self.f.write("<pre>")
+ if implicit:
+ self.f.write("<span class=\"implicit\">")
+ self.f.write(self._escape(data))
+ if implicit:
+ self.f.write("</span>")
+ self.last_stream = stream
+
+ def flush(self):
+ '''Flush the log stream, to ensure correct log interleaving.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.f.flush()
diff --git a/test/py/pytest.ini b/test/py/pytest.ini
new file mode 100644
index 0000000000..67e514f420
--- /dev/null
+++ b/test/py/pytest.ini
@@ -0,0 +1,11 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Static configuration data for pytest. pytest reads this at startup time.
+
+[pytest]
+markers =
+ boardspec: U-Boot: Describes the set of boards a test can/can't run on.
+ buildconfigspec: U-Boot: Describes Kconfig/config-header constraints.
diff --git a/test/py/test.py b/test/py/test.py
new file mode 100755
index 0000000000..9c23898774
--- /dev/null
+++ b/test/py/test.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Wrapper script to invoke pytest with the directory name that contains the
+# U-Boot tests.
+
+import os
+import os.path
+import sys
+
+# Get rid of argv[0]
+sys.argv.pop(0)
+
+# argv; py.test test_directory_name user-supplied-arguments
+args = ["py.test", os.path.dirname(__file__) + "/tests"]
+args.extend(sys.argv)
+
+try:
+ os.execvp("py.test", args)
+except:
+ # Log full details of any exception for detailed analysis
+ import traceback
+ traceback.print_exc()
+ # Hint to the user that they likely simply haven't installed the required
+ # dependencies.
+ print >>sys.stderr, """
+exec(py.test) failed; perhaps you are missing some dependencies?
+See test/py/README.md for the list."""
diff --git a/test/py/tests/test_000_version.py b/test/py/tests/test_000_version.py
new file mode 100644
index 0000000000..d262f0534e
--- /dev/null
+++ b/test/py/tests/test_000_version.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# pytest runs tests the order of their module path, which is related to the
+# filename containing the test. This file is named such that it is sorted
+# first, simply as a very basic sanity check of the functionality of the U-Boot
+# command prompt.
+
+def test_version(u_boot_console):
+ '''Test that the "version" command prints the U-Boot version.'''
+
+ # "version" prints the U-Boot sign-on message. This is usually considered
+ # an error, so that any unexpected reboot causes an error. Here, this
+ # error detection is disabled since the sign-on message is expected.
+ with u_boot_console.disable_check('main_signon'):
+ response = u_boot_console.run_command('version')
+ # Ensure "version" printed what we expected.
+ u_boot_console.validate_version_string_in_text(response)
diff --git a/test/py/tests/test_env.py b/test/py/tests/test_env.py
new file mode 100644
index 0000000000..a3e8dd3033
--- /dev/null
+++ b/test/py/tests/test_env.py
@@ -0,0 +1,221 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Test operation of shell commands relating to environment variables.
+
+import pytest
+
+# FIXME: This might be useful for other tests;
+# perhaps refactor it into ConsoleBase or some other state object?
+class StateTestEnv(object):
+ '''Container that represents the state of all U-Boot environment variables.
+ This enables quick determination of existant/non-existant variable
+ names.
+ '''
+
+ def __init__(self, u_boot_console):
+ '''Initialize a new StateTestEnv object.
+
+ Args:
+ u_boot_console: A U-Boot console.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.u_boot_console = u_boot_console
+ self.get_env()
+ self.set_var = self.get_non_existent_var()
+
+ def get_env(self):
+ '''Read all current environment variables from U-Boot.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ response = self.u_boot_console.run_command('printenv')
+ self.env = {}
+ for l in response.splitlines():
+ if not '=' in l:
+ continue
+ (var, value) = l.strip().split('=', 1)
+ self.env[var] = value
+
+ def get_existent_var(self):
+ '''Return the name of an environment variable that exists.
+
+ Args:
+ None.
+
+ Returns:
+ The name of an environment variable.
+ '''
+
+ for var in self.env:
+ return var
+
+ def get_non_existent_var(self):
+ '''Return the name of an environment variable that does not exist.
+
+ Args:
+ None.
+
+ Returns:
+ The name of an environment variable.
+ '''
+
+ n = 0
+ while True:
+ var = 'test_env_' + str(n)
+ if var not in self.env:
+ return var
+ n += 1
+
+@pytest.fixture(scope='module')
+def state_test_env(u_boot_console):
+ '''pytest fixture to provide a StateTestEnv object to tests.'''
+
+ return StateTestEnv(u_boot_console)
+
+def unset_var(state_test_env, var):
+ '''Unset an environment variable.
+
+ This both executes a U-Boot shell command and updates a StateTestEnv
+ object.
+
+ Args:
+ state_test_env: The StateTestEnv object to updata.
+ var: The variable name to unset.
+
+ Returns:
+ Nothing.
+ '''
+
+ state_test_env.u_boot_console.run_command('setenv %s' % var)
+ if var in state_test_env.env:
+ del state_test_env.env[var]
+
+def set_var(state_test_env, var, value):
+ '''Set an environment variable.
+
+ This both executes a U-Boot shell command and updates a StateTestEnv
+ object.
+
+ Args:
+ state_test_env: The StateTestEnv object to updata.
+ var: The variable name to set.
+ value: The value to set the variable to.
+
+ Returns:
+ Nothing.
+ '''
+
+ state_test_env.u_boot_console.run_command('setenv %s "%s"' % (var, value))
+ state_test_env.env[var] = value
+
+def validate_empty(state_test_env, var):
+ '''Validate that a variable is not set, using U-Boot shell commands.
+
+ Args:
+ var: The variable name to test.
+
+ Returns:
+ Nothing.
+ '''
+
+ response = state_test_env.u_boot_console.run_command('echo $%s' % var)
+ assert response == ''
+
+def validate_set(state_test_env, var, value):
+ '''Validate that a variable is set, using U-Boot shell commands.
+
+ Args:
+ var: The variable name to test.
+ value: The value the variable is expected to have.
+
+ Returns:
+ Nothing.
+ '''
+
+ # echo does not preserve leading, internal, or trailing whitespace in the
+ # value. printenv does, and hence allows more complete testing.
+ response = state_test_env.u_boot_console.run_command('printenv %s' % var)
+ assert response == ('%s=%s' % (var, value))
+
+def test_env_echo_exists(state_test_env):
+ '''Test echoing a variable that exists.'''
+
+ var = state_test_env.get_existent_var()
+ value = state_test_env.env[var]
+ validate_set(state_test_env, var, value)
+
+def test_env_echo_non_existent(state_test_env):
+ '''Test echoing a variable that doesn't exist.'''
+
+ var = state_test_env.set_var
+ validate_empty(state_test_env, var)
+
+def test_env_printenv_non_existent(state_test_env):
+ '''Test printenv error message for non-existant variables.'''
+
+ var = state_test_env.set_var
+ c = state_test_env.u_boot_console
+ with c.disable_check('error_notification'):
+ response = c.run_command('printenv %s' % var)
+ assert(response == '## Error: "%s" not defined' % var)
+
+def test_env_unset_non_existent(state_test_env):
+ '''Test unsetting a nonexistent variable.'''
+
+ var = state_test_env.get_non_existent_var()
+ unset_var(state_test_env, var)
+ validate_empty(state_test_env, var)
+
+def test_env_set_non_existent(state_test_env):
+ '''Test set a non-existant variable.'''
+
+ var = state_test_env.set_var
+ value = 'foo'
+ set_var(state_test_env, var, value)
+ validate_set(state_test_env, var, value)
+
+def test_env_set_existing(state_test_env):
+ '''Test setting an existant variable.'''
+
+ var = state_test_env.set_var
+ value = 'bar'
+ set_var(state_test_env, var, value)
+ validate_set(state_test_env, var, value)
+
+def test_env_unset_existing(state_test_env):
+ '''Test unsetting a variable.'''
+
+ var = state_test_env.set_var
+ unset_var(state_test_env, var)
+ validate_empty(state_test_env, var)
+
+def test_env_expansion_spaces(state_test_env):
+ '''Test expanding a variable that contains a space in its value.'''
+
+ var_space = None
+ var_test = None
+ try:
+ var_space = state_test_env.get_non_existent_var()
+ set_var(state_test_env, var_space, ' ')
+
+ var_test = state_test_env.get_non_existent_var()
+ value = ' 1${%(var_space)s}${%(var_space)s} 2 ' % locals()
+ set_var(state_test_env, var_test, value)
+ value = ' 1 2 '
+ validate_set(state_test_env, var_test, value)
+ finally:
+ if var_space:
+ unset_var(state_test_env, var_space)
+ if var_test:
+ unset_var(state_test_env, var_test)
diff --git a/test/py/tests/test_help.py b/test/py/tests/test_help.py
new file mode 100644
index 0000000000..894f3b5f17
--- /dev/null
+++ b/test/py/tests/test_help.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+def test_help(u_boot_console):
+ '''Test that the "help" command can be executed.'''
+
+ u_boot_console.run_command('help')
diff --git a/test/py/tests/test_hush_if_test.py b/test/py/tests/test_hush_if_test.py
new file mode 100644
index 0000000000..cf4c3aeeb7
--- /dev/null
+++ b/test/py/tests/test_hush_if_test.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Test operation of the "if" shell command.
+
+import os
+import os.path
+import pytest
+
+# The list of "if test" conditions to test.
+subtests = (
+ # Base if functionality.
+
+ ('true', True),
+ ('false', False),
+
+ # Basic operators.
+
+ ('test aaa = aaa', True),
+ ('test aaa = bbb', False),
+
+ ('test aaa != bbb', True),
+ ('test aaa != aaa', False),
+
+ ('test aaa < bbb', True),
+ ('test bbb < aaa', False),
+
+ ('test bbb > aaa', True),
+ ('test aaa > bbb', False),
+
+ ('test 123 -eq 123', True),
+ ('test 123 -eq 456', False),
+
+ ('test 123 -ne 456', True),
+ ('test 123 -ne 123', False),
+
+ ('test 123 -lt 456', True),
+ ('test 123 -lt 123', False),
+ ('test 456 -lt 123', False),
+
+ ('test 123 -le 456', True),
+ ('test 123 -le 123', True),
+ ('test 456 -le 123', False),
+
+ ('test 456 -gt 123', True),
+ ('test 123 -gt 123', False),
+ ('test 123 -gt 456', False),
+
+ ('test 456 -ge 123', True),
+ ('test 123 -ge 123', True),
+ ('test 123 -ge 456', False),
+
+ ('test -z ""', True),
+ ('test -z "aaa"', False),
+
+ ('test -n "aaa"', True),
+ ('test -n ""', False),
+
+ # Inversion of simple tests.
+
+ ('test ! aaa = aaa', False),
+ ('test ! aaa = bbb', True),
+ ('test ! ! aaa = aaa', True),
+ ('test ! ! aaa = bbb', False),
+
+ # Binary operators.
+
+ ('test aaa != aaa -o bbb != bbb', False),
+ ('test aaa != aaa -o bbb = bbb', True),
+ ('test aaa = aaa -o bbb != bbb', True),
+ ('test aaa = aaa -o bbb = bbb', True),
+
+ ('test aaa != aaa -a bbb != bbb', False),
+ ('test aaa != aaa -a bbb = bbb', False),
+ ('test aaa = aaa -a bbb != bbb', False),
+ ('test aaa = aaa -a bbb = bbb', True),
+
+ # Inversion within binary operators.
+
+ ('test ! aaa != aaa -o ! bbb != bbb', True),
+ ('test ! aaa != aaa -o ! bbb = bbb', True),
+ ('test ! aaa = aaa -o ! bbb != bbb', True),
+ ('test ! aaa = aaa -o ! bbb = bbb', False),
+
+ ('test ! ! aaa != aaa -o ! ! bbb != bbb', False),
+ ('test ! ! aaa != aaa -o ! ! bbb = bbb', True),
+ ('test ! ! aaa = aaa -o ! ! bbb != bbb', True),
+ ('test ! ! aaa = aaa -o ! ! bbb = bbb', True),
+
+ # -z operator.
+
+ ('test -z "$ut_var_nonexistent"', True),
+ ('test -z "$ut_var_exists"', False),
+)
+
+def exec_hush_if(u_boot_console, expr, result):
+ '''Execute a shell "if" command, and validate its result.'''
+
+ cmd = 'if ' + expr + '; then echo true; else echo false; fi'
+ response = u_boot_console.run_command(cmd)
+ assert response.strip() == str(result).lower()
+
+@pytest.mark.buildconfigspec('sys_hush_parser')
+def test_hush_if_test_setup(u_boot_console):
+ '''Set up environment variables used during the "if" tests.'''
+
+ u_boot_console.run_command('setenv ut_var_nonexistent')
+ u_boot_console.run_command('setenv ut_var_exists 1')
+
+@pytest.mark.buildconfigspec('sys_hush_parser')
+@pytest.mark.parametrize('expr,result', subtests)
+def test_hush_if_test(u_boot_console, expr, result):
+ '''Test a single "if test" condition.'''
+
+ exec_hush_if(u_boot_console, expr, result)
+
+@pytest.mark.buildconfigspec('sys_hush_parser')
+def test_hush_if_test_teardown(u_boot_console):
+ '''Clean up environment variables used during the "if" tests.'''
+
+ u_boot_console.run_command('setenv ut_var_exists')
+
+@pytest.mark.buildconfigspec('sys_hush_parser')
+# We might test this on real filesystems via UMS, DFU, 'save', etc.
+# Of those, only UMS currently allows file removal though.
+@pytest.mark.boardspec('sandbox')
+def test_hush_if_test_host_file_exists(u_boot_console):
+ '''Test the "if test -e" shell command.'''
+
+ test_file = u_boot_console.config.result_dir + \
+ '/creating_this_file_breaks_u_boot_tests'
+
+ try:
+ os.unlink(test_file)
+ except:
+ pass
+ assert not os.path.exists(test_file)
+
+ expr = 'test -e hostfs - ' + test_file
+ exec_hush_if(u_boot_console, expr, False)
+
+ try:
+ with file(test_file, 'wb'):
+ pass
+ assert os.path.exists(test_file)
+
+ expr = 'test -e hostfs - ' + test_file
+ exec_hush_if(u_boot_console, expr, True)
+ finally:
+ os.unlink(test_file)
+
+ expr = 'test -e hostfs - ' + test_file
+ exec_hush_if(u_boot_console, expr, False)
diff --git a/test/py/tests/test_md.py b/test/py/tests/test_md.py
new file mode 100644
index 0000000000..94603c7df6
--- /dev/null
+++ b/test/py/tests/test_md.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+import pytest
+
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_md(u_boot_console):
+ '''Test that md reads memory as expected, and that memory can be modified
+ using the mw command.'''
+
+ ram_base = u_boot_console.find_ram_base()
+ addr = '%08x' % ram_base
+ val = 'a5f09876'
+ expected_response = addr + ': ' + val
+ u_boot_console.run_command('mw ' + addr + ' 0 10')
+ response = u_boot_console.run_command('md ' + addr + ' 10')
+ assert(not (expected_response in response))
+ u_boot_console.run_command('mw ' + addr + ' ' + val)
+ response = u_boot_console.run_command('md ' + addr + ' 10')
+ assert(expected_response in response)
+
+@pytest.mark.buildconfigspec('cmd_memory')
+def test_md_repeat(u_boot_console):
+ '''Test command repeat (via executing an empty command) operates correctly
+ for "md"; the command must repeat and dump an incrementing address.'''
+
+ ram_base = u_boot_console.find_ram_base()
+ addr_base = '%08x' % ram_base
+ words = 0x10
+ addr_repeat = '%08x' % (ram_base + (words * 4))
+ u_boot_console.run_command('md %s %x' % (addr_base, words))
+ response = u_boot_console.run_command('')
+ expected_response = addr_repeat + ': '
+ assert(expected_response in response)
diff --git a/test/py/tests/test_sandbox_exit.py b/test/py/tests/test_sandbox_exit.py
new file mode 100644
index 0000000000..2aa8eb4abc
--- /dev/null
+++ b/test/py/tests/test_sandbox_exit.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+import pytest
+import signal
+
+@pytest.mark.boardspec('sandbox')
+@pytest.mark.buildconfigspec('reset')
+def test_reset(u_boot_console):
+ '''Test that the "reset" command exits sandbox process.'''
+
+ u_boot_console.run_command('reset', wait_for_prompt=False)
+ assert(u_boot_console.validate_exited())
+ u_boot_console.ensure_spawned()
+
+@pytest.mark.boardspec('sandbox')
+def test_ctrl_c(u_boot_console):
+ '''Test that sending SIGINT to sandbox causes it to exit.'''
+
+ u_boot_console.kill(signal.SIGINT)
+ assert(u_boot_console.validate_exited())
+ u_boot_console.ensure_spawned()
diff --git a/test/py/tests/test_shell_basics.py b/test/py/tests/test_shell_basics.py
new file mode 100644
index 0000000000..719ce611d7
--- /dev/null
+++ b/test/py/tests/test_shell_basics.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Test basic shell functionality, such as commands separate by semi-colons.
+
+def test_shell_execute(u_boot_console):
+ '''Test any shell command.'''
+
+ response = u_boot_console.run_command('echo hello')
+ assert response.strip() == 'hello'
+
+def test_shell_semicolon_two(u_boot_console):
+ '''Test two shell commands separate by a semi-colon.'''
+
+ cmd = 'echo hello; echo world'
+ response = u_boot_console.run_command(cmd)
+ # This validation method ignores the exact whitespace between the strings
+ assert response.index('hello') < response.index('world')
+
+def test_shell_semicolon_three(u_boot_console):
+ '''Test three shell commands separate by a semi-colon, with variable
+ expansion dependencies between them.'''
+
+ cmd = 'setenv list 1; setenv list ${list}2; setenv list ${list}3; ' + \
+ 'echo ${list}'
+ response = u_boot_console.run_command(cmd)
+ assert response.strip() == '123'
+ u_boot_console.run_command('setenv list')
+
+def test_shell_run(u_boot_console):
+ '''Test the "run" shell command.'''
+
+ u_boot_console.run_command('setenv foo \"setenv monty 1; setenv python 2\"')
+ u_boot_console.run_command('run foo')
+ response = u_boot_console.run_command('echo $monty')
+ assert response.strip() == '1'
+ response = u_boot_console.run_command('echo $python')
+ assert response.strip() == '2'
+ u_boot_console.run_command('setenv foo')
+ u_boot_console.run_command('setenv monty')
+ u_boot_console.run_command('setenv python')
diff --git a/test/py/tests/test_sleep.py b/test/py/tests/test_sleep.py
new file mode 100644
index 0000000000..64f1ddf9a0
--- /dev/null
+++ b/test/py/tests/test_sleep.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+import pytest
+import time
+
+def test_sleep(u_boot_console):
+ '''Test the sleep command, and validate that it sleeps for approximately
+ the correct amount of time.'''
+
+ # Do this before we time anything, to make sure U-Boot is already running.
+ # Otherwise, the system boot time is included in the time measurement.
+ u_boot_console.ensure_spawned()
+
+ # 3s isn't too long, but is enough to cross a few second boundaries.
+ sleep_time = 3
+ tstart = time.time()
+ u_boot_console.run_command('sleep %d' % sleep_time)
+ tend = time.time()
+ elapsed = tend - tstart
+ delta_to_expected = abs(elapsed - sleep_time)
+ # 0.25s margin is hopefully enough to account for any system overhead.
+ assert delta_to_expected < 0.25
diff --git a/test/py/tests/test_ums.py b/test/py/tests/test_ums.py
new file mode 100644
index 0000000000..a137221c7a
--- /dev/null
+++ b/test/py/tests/test_ums.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Test U-Boot's "ums" command. At present, this test only ensures that a UMS
+# device can be enumerated by the host/test machine. In the future, this test
+# should be enhanced to validate disk IO.
+
+import os
+import pytest
+import time
+
+'''
+Note: This test relies on:
+
+a) boardenv_* to contain configuration values to define which USB ports are
+available for testing. Without this, this test will be automatically skipped.
+For example:
+
+env__usb_dev_ports = (
+ {'tgt_usb_ctlr': '0', 'host_ums_dev_node': '/dev/disk/by-path/pci-0000:00:14.0-usb-0:13:1.0-scsi-0:0:0:0'},
+)
+
+env__block_devs = (
+ {'type': 'mmc', 'id': '0'}, # eMMC; always present
+ {'type': 'mmc', 'id': '1'}, # SD card; present since I plugged one in
+)
+
+b) udev rules to set permissions on devices nodes, so that sudo is not
+required. For example:
+
+ACTION=="add", SUBSYSTEM=="block", SUBSYSTEMS=="usb", KERNELS=="3-13", MODE:="666"
+
+(You may wish to change the group ID instead of setting the permissions wide
+open. All that matters is that the user ID running the test can access the
+device.)
+'''
+
+def open_ums_device(host_ums_dev_node):
+ '''Attempt to open a device node, returning either the opened file handle,
+ or None on any error.'''
+
+ try:
+ return open(host_ums_dev_node, 'rb')
+ except:
+ return None
+
+def wait_for_ums_device(host_ums_dev_node):
+ '''Continually attempt to open the device node exported by the "ums"
+ command, and either return the opened file handle, or raise an exception
+ after a timeout.'''
+
+ for i in xrange(100):
+ fh = open_ums_device(host_ums_dev_node)
+ if fh:
+ return fh
+ time.sleep(0.1)
+ raise Exception('UMS device did not appear')
+
+def wait_for_ums_device_gone(host_ums_dev_node):
+ '''Continually attempt to open the device node exported by the "ums"
+ command, and either return once the device has disappeared, or raise an
+ exception if it does not before a timeout occurs.'''
+
+ for i in xrange(100):
+ fh = open_ums_device(host_ums_dev_node)
+ if not fh:
+ return
+ fh.close()
+ time.sleep(0.1)
+ raise Exception('UMS device did not disappear')
+
+@pytest.mark.buildconfigspec('cmd_usb_mass_storage')
+def test_ums(u_boot_console, env__usb_dev_port, env__block_devs):
+ '''Test the "ums" command; the host system must be able to enumerate a UMS
+ device when "ums" is running, and this device must disappear when "ums" is
+ aborted.'''
+
+ tgt_usb_ctlr = env__usb_dev_port['tgt_usb_ctlr']
+ host_ums_dev_node = env__usb_dev_port['host_ums_dev_node']
+
+ # We're interested in testing USB device mode on each port, not the cross-
+ # product of that with each device. So, just pick the first entry in the
+ # device list here. We'll test each block device somewhere else.
+ tgt_dev_type = env__block_devs[0]['type']
+ tgt_dev_id = env__block_devs[0]['id']
+
+ cmd = 'ums %s %s %s' % (tgt_usb_ctlr, tgt_dev_type, tgt_dev_id)
+ u_boot_console.run_command('ums 0 mmc 0', wait_for_prompt=False)
+ fh = wait_for_ums_device(host_ums_dev_node)
+ fh.read(4096)
+ fh.close()
+ u_boot_console.ctrlc()
+ wait_for_ums_device_gone(host_ums_dev_node)
diff --git a/test/py/tests/test_unknown_cmd.py b/test/py/tests/test_unknown_cmd.py
new file mode 100644
index 0000000000..2de93e0026
--- /dev/null
+++ b/test/py/tests/test_unknown_cmd.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+def test_unknown_command(u_boot_console):
+ '''Test that executing an unknown command causes U-Boot to print an
+ error.'''
+
+ # The "unknown command" error is actively expected here,
+ # so error detection for it is disabled.
+ with u_boot_console.disable_check('unknown_command'):
+ response = u_boot_console.run_command('non_existent_cmd')
+ assert('Unknown command \'non_existent_cmd\' - try \'help\'' in response)
diff --git a/test/py/u_boot_console_base.py b/test/py/u_boot_console_base.py
new file mode 100644
index 0000000000..520f9a9e9f
--- /dev/null
+++ b/test/py/u_boot_console_base.py
@@ -0,0 +1,360 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Common logic to interact with U-Boot via the console. This class provides
+# the interface that tests use to execute U-Boot shell commands and wait for
+# their results. Sub-classes exist to perform board-type-specific setup
+# operations, such as spawning a sub-process for Sandbox, or attaching to the
+# serial console of real hardware.
+
+import multiplexed_log
+import os
+import pytest
+import re
+import sys
+
+# Regexes for text we expect U-Boot to send to the console.
+pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}-[^\r\n]*)')
+pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}-[^\r\n]*)')
+pattern_stop_autoboot_prompt = re.compile('Hit any key to stop autoboot: ')
+pattern_unknown_command = re.compile('Unknown command \'.*\' - try \'help\'')
+pattern_error_notification = re.compile('## Error: ')
+
+class ConsoleDisableCheck(object):
+ '''Context manager (for Python's with statement) that temporarily disables
+ the specified console output error check. This is useful when deliberately
+ executing a command that is known to trigger one of the error checks, in
+ order to test that the error condition is actually raised. This class is
+ used internally by ConsoleBase::disable_check(); it is not intended for
+ direct usage.'''
+
+ def __init__(self, console, check_type):
+ self.console = console
+ self.check_type = check_type
+
+ def __enter__(self):
+ self.console.disable_check_count[self.check_type] += 1
+
+ def __exit__(self, extype, value, traceback):
+ self.console.disable_check_count[self.check_type] -= 1
+
+class ConsoleBase(object):
+ '''The interface through which test functions interact with the U-Boot
+ console. This primarily involves executing shell commands, capturing their
+ results, and checking for common error conditions. Some common utilities
+ are also provided too.'''
+
+ def __init__(self, log, config, max_fifo_fill):
+ '''Initialize a U-Boot console connection.
+
+ Can only usefully be called by sub-classes.
+
+ Args:
+ log: A mulptiplex_log.Logfile object, to which the U-Boot output
+ will be logged.
+ config: A configuration data structure, as built by conftest.py.
+ max_fifo_fill: The maximum number of characters to send to U-Boot
+ command-line before waiting for U-Boot to echo the characters
+ back. For UART-based HW without HW flow control, this value
+ should be set less than the UART RX FIFO size to avoid
+ overflow, assuming that U-Boot can't keep up with full-rate
+ traffic at the baud rate.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.log = log
+ self.config = config
+ self.max_fifo_fill = max_fifo_fill
+
+ self.logstream = self.log.get_stream('console', sys.stdout)
+
+ # Array slice removes leading/trailing quotes
+ self.prompt = self.config.buildconfig['config_sys_prompt'][1:-1]
+ self.prompt_escaped = re.escape(self.prompt)
+ self.p = None
+ self.disable_check_count = {
+ 'spl_signon': 0,
+ 'main_signon': 0,
+ 'unknown_command': 0,
+ 'error_notification': 0,
+ }
+
+ self.at_prompt = False
+ self.at_prompt_logevt = None
+ self.ram_base = None
+
+ def close(self):
+ '''Terminate the connection to the U-Boot console.
+
+ This function is only useful once all interaction with U-Boot is
+ complete. Once this function is called, data cannot be sent to or
+ received from U-Boot.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ if self.p:
+ self.p.close()
+ self.logstream.close()
+
+ def run_command(self, cmd, wait_for_echo=True, send_nl=True,
+ wait_for_prompt=True):
+ '''Execute a command via the U-Boot console.
+
+ The command is always sent to U-Boot.
+
+ U-Boot echoes any command back to its output, and this function
+ typically waits for that to occur. The wait can be disabled by setting
+ wait_for_echo=False, which is useful e.g. when sending CTRL-C to
+ interrupt a long-running command such as "ums".
+
+ Command execution is typically triggered by sending a newline
+ character. This can be disabled by setting send_nl=False, which is
+ also useful when sending CTRL-C.
+
+ This function typically waits for the command to finish executing, and
+ returns the console output that it generated. This can be disabled by
+ setting wait_for_prompt=False, which is useful when invoking a long-
+ running command such as "ums".
+
+ Args:
+ cmd: The command to send.
+ wait_for_each: Boolean indicating whether to wait for U-Boot to
+ echo the command text back to its output.
+ send_nl: Boolean indicating whether to send a newline character
+ after the command string.
+ wait_for_prompt: Boolean indicating whether to wait for the
+ command prompt to be sent by U-Boot. This typically occurs
+ immediately after the command has been executed.
+
+ Returns:
+ If wait_for_prompt == False:
+ Nothing.
+ Else:
+ The output from U-Boot during command execution. In other
+ words, the text U-Boot emitted between the point it echod the
+ command string and emitted the subsequent command prompts.
+ '''
+
+ self.ensure_spawned()
+
+ if self.at_prompt and \
+ self.at_prompt_logevt != self.logstream.logfile.cur_evt:
+ self.logstream.write(self.prompt, implicit=True)
+
+ bad_patterns = []
+ bad_pattern_ids = []
+ if (self.disable_check_count['spl_signon'] == 0 and
+ self.u_boot_spl_signon):
+ bad_patterns.append(self.u_boot_spl_signon_escaped)
+ bad_pattern_ids.append('SPL signon')
+ if self.disable_check_count['main_signon'] == 0:
+ bad_patterns.append(self.u_boot_main_signon_escaped)
+ bad_pattern_ids.append('U-Boot main signon')
+ if self.disable_check_count['unknown_command'] == 0:
+ bad_patterns.append(pattern_unknown_command)
+ bad_pattern_ids.append('Unknown command')
+ if self.disable_check_count['error_notification'] == 0:
+ bad_patterns.append(pattern_error_notification)
+ bad_pattern_ids.append('Error notification')
+ try:
+ self.at_prompt = False
+ if send_nl:
+ cmd += '\n'
+ while cmd:
+ # Limit max outstanding data, so UART FIFOs don't overflow
+ chunk = cmd[:self.max_fifo_fill]
+ cmd = cmd[self.max_fifo_fill:]
+ self.p.send(chunk)
+ if not wait_for_echo:
+ continue
+ chunk = re.escape(chunk)
+ chunk = chunk.replace('\\\n', '[\r\n]')
+ m = self.p.expect([chunk] + bad_patterns)
+ if m != 0:
+ self.at_prompt = False
+ raise Exception('Bad pattern found on console: ' +
+ bad_pattern_ids[m - 1])
+ if not wait_for_prompt:
+ return
+ m = self.p.expect([self.prompt_escaped] + bad_patterns)
+ if m != 0:
+ self.at_prompt = False
+ raise Exception('Bad pattern found on console: ' +
+ bad_pattern_ids[m - 1])
+ self.at_prompt = True
+ self.at_prompt_logevt = self.logstream.logfile.cur_evt
+ # Only strip \r\n; space/TAB might be significant if testing
+ # indentation.
+ return self.p.before.strip('\r\n')
+ except Exception as ex:
+ self.log.error(str(ex))
+ self.cleanup_spawn()
+ raise
+
+ def ctrlc(self):
+ '''Send a CTRL-C character to U-Boot.
+
+ This is useful in order to stop execution of long-running synchronous
+ commands such as "ums".
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.run_command(chr(3), wait_for_echo=False, send_nl=False)
+
+ def ensure_spawned(self):
+ '''Ensure a connection to a correctly running U-Boot instance.
+
+ This may require spawning a new Sandbox process or resetting target
+ hardware, as defined by the implementation sub-class.
+
+ This is an internal function and should not be called directly.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ if self.p:
+ return
+ try:
+ self.at_prompt = False
+ self.log.action('Starting U-Boot')
+ self.p = self.get_spawn()
+ # Real targets can take a long time to scroll large amounts of
+ # text if LCD is enabled. This value may need tweaking in the
+ # future, possibly per-test to be optimal. This works for 'help'
+ # on board 'seaboard'.
+ self.p.timeout = 30000
+ self.p.logfile_read = self.logstream
+ if self.config.buildconfig.get('CONFIG_SPL', False) == 'y':
+ self.p.expect([pattern_u_boot_spl_signon])
+ self.u_boot_spl_signon = self.p.after
+ self.u_boot_spl_signon_escaped = re.escape(self.p.after)
+ else:
+ self.u_boot_spl_signon = None
+ self.p.expect([pattern_u_boot_main_signon])
+ self.u_boot_main_signon = self.p.after
+ self.u_boot_main_signon_escaped = re.escape(self.p.after)
+ build_idx = self.u_boot_main_signon.find(', Build:')
+ if build_idx == -1:
+ self.u_boot_version_string = self.u_boot_main_signon
+ else:
+ self.u_boot_version_string = self.u_boot_main_signon[:build_idx]
+ while True:
+ match = self.p.expect([self.prompt_escaped,
+ pattern_stop_autoboot_prompt])
+ if match == 1:
+ self.p.send(chr(3)) # CTRL-C
+ continue
+ break
+ self.at_prompt = True
+ self.at_prompt_logevt = self.logstream.logfile.cur_evt
+ except Exception as ex:
+ self.log.error(str(ex))
+ self.cleanup_spawn()
+ raise
+
+ def cleanup_spawn(self):
+ '''Shut down all interaction with the U-Boot instance.
+
+ This is used when an error is detected prior to re-establishing a
+ connection with a fresh U-Boot instance.
+
+ This is an internal function and should not be called directly.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ try:
+ if self.p:
+ self.p.close()
+ except:
+ pass
+ self.p = None
+
+ def validate_version_string_in_text(self, text):
+ '''Assert that a command's output includes the U-Boot signon message.
+
+ This is primarily useful for validating the "version" command without
+ duplicating the signon text regex in a test function.
+
+ Args:
+ text: The command output text to check.
+
+ Returns:
+ Nothing. An exception is raised if the validation fails.
+ '''
+
+ assert(self.u_boot_version_string in text)
+
+ def disable_check(self, check_type):
+ '''Temporarily disable an error check of U-Boot's output.
+
+ Create a new context manager (for use with the "with" statement) which
+ temporarily disables a particular console output error check.
+
+ Args:
+ check_type: The type of error-check to disable. Valid values may
+ be found in self.disable_check_count above.
+
+ Returns:
+ A context manager object.
+ '''
+
+ return ConsoleDisableCheck(self, check_type)
+
+ def find_ram_base(self):
+ '''Find the running U-Boot's RAM location.
+
+ Probe the running U-Boot to determine the address of the first bank
+ of RAM. This is useful for tests that test reading/writing RAM, or
+ load/save files that aren't associated with some standard address
+ typically represented in an environment variable such as
+ ${kernel_addr_r}. The value is cached so that it only needs to be
+ actively read once.
+
+ Args:
+ None.
+
+ Returns:
+ The address of U-Boot's first RAM bank, as an integer.
+ '''
+
+ if self.config.buildconfig.get('config_cmd_bdi', 'n') != 'y':
+ pytest.skip('bdinfo command not supported')
+ if self.ram_base == -1:
+ pytest.skip('Previously failed to find RAM bank start')
+ if self.ram_base is not None:
+ return self.ram_base
+
+ with self.log.section('find_ram_base'):
+ response = self.run_command('bdinfo')
+ for l in response.split('\n'):
+ if '-> start' in l:
+ self.ram_base = int(l.split('=')[1].strip(), 16)
+ break
+ if self.ram_base is None:
+ self.ram_base = -1
+ raise Exception('Failed to find RAM bank start in `bdinfo`')
+
+ return self.ram_base
diff --git a/test/py/u_boot_console_exec_attach.py b/test/py/u_boot_console_exec_attach.py
new file mode 100644
index 0000000000..0ca9e7c178
--- /dev/null
+++ b/test/py/u_boot_console_exec_attach.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Logic to interact with U-Boot running on real hardware, typically via a
+# physical serial port.
+
+import sys
+from u_boot_spawn import Spawn
+from u_boot_console_base import ConsoleBase
+
+class ConsoleExecAttach(ConsoleBase):
+ '''Represents a physical connection to a U-Boot console, typically via a
+ serial port. This implementation executes a sub-process to attach to the
+ console, expecting that the stdin/out of the sub-process will be forwarded
+ to/from the physical hardware. This approach isolates the test infra-
+ structure from the user-/installation-specific details of how to
+ communicate with, and the identity of, serial ports etc.'''
+
+ def __init__(self, log, config):
+ '''Initialize a U-Boot console connection.
+
+ Args:
+ log: A multiplexed_log.Logfile instance.
+ config: A "configuration" object as defined in conftest.py.
+
+ Returns:
+ Nothing.
+ '''
+
+ # The max_fifo_fill value might need tweaking per-board/-SoC?
+ # 1 would be safe anywhere, but is very slow (a pexpect issue?).
+ # 16 is a common FIFO size.
+ # HW flow control would mean this could be infinite.
+ super(ConsoleExecAttach, self).__init__(log, config, max_fifo_fill=16)
+
+ self.log.action('Flashing U-Boot')
+ cmd = ['u-boot-test-flash', config.board_type, config.board_identity]
+ runner = self.log.get_runner(cmd[0], sys.stdout)
+ runner.run(cmd)
+ runner.close()
+
+ def get_spawn(self):
+ '''Connect to a fresh U-Boot instance.
+
+ The target board is reset, so that U-Boot begins running from scratch.
+
+ Args:
+ None.
+
+ Returns:
+ A u_boot_spawn.Spawn object that is attached to U-Boot.
+ '''
+
+ args = [self.config.board_type, self.config.board_identity]
+ s = Spawn(['u-boot-test-console'] + args)
+
+ self.log.action('Resetting board')
+ cmd = ['u-boot-test-reset'] + args
+ runner = self.log.get_runner(cmd[0], sys.stdout)
+ runner.run(cmd)
+ runner.close()
+
+ return s
diff --git a/test/py/u_boot_console_sandbox.py b/test/py/u_boot_console_sandbox.py
new file mode 100644
index 0000000000..88b137e8c3
--- /dev/null
+++ b/test/py/u_boot_console_sandbox.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2015 Stephen Warren
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Logic to interact with the sandbox port of U-Boot, running as a sub-process.
+
+import time
+from u_boot_spawn import Spawn
+from u_boot_console_base import ConsoleBase
+
+class ConsoleSandbox(ConsoleBase):
+ '''Represents a connection to a sandbox U-Boot console, executed as a sub-
+ process.'''
+
+ def __init__(self, log, config):
+ '''Initialize a U-Boot console connection.
+
+ Args:
+ log: A multiplexed_log.Logfile instance.
+ config: A "configuration" object as defined in conftest.py.
+
+ Returns:
+ Nothing.
+ '''
+
+ super(ConsoleSandbox, self).__init__(log, config, max_fifo_fill=1024)
+
+ def get_spawn(self):
+ '''Connect to a fresh U-Boot instance.
+
+ A new sandbox process is created, so that U-Boot begins running from
+ scratch.
+
+ Args:
+ None.
+
+ Returns:
+ A u_boot_spawn.Spawn object that is attached to U-Boot.
+ '''
+
+ return Spawn([self.config.build_dir + '/u-boot'])
+
+ def kill(self, sig):
+ '''Send a specific Unix signal to the sandbox process.
+
+ Args:
+ sig: The Unix signal to send to the process.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.ensure_spawned()
+ self.log.action('kill %d' % sig)
+ self.p.kill(sig)
+
+ def validate_exited(self):
+ '''Determine whether the sandbox process has exited.
+
+ If required, this function waits a reasonable time for the process to
+ exit.
+
+ Args:
+ None.
+
+ Returns:
+ Boolean indicating whether the process has exited.
+ '''
+
+ p = self.p
+ self.p = None
+ for i in xrange(100):
+ ret = not p.isalive()
+ if ret:
+ break
+ time.sleep(0.1)
+ p.close()
+ return ret
diff --git a/test/py/u_boot_spawn.py b/test/py/u_boot_spawn.py
new file mode 100644
index 0000000000..1baee63df2
--- /dev/null
+++ b/test/py/u_boot_spawn.py
@@ -0,0 +1,174 @@
+# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0
+
+# Logic to spawn a sub-process and interact with its stdio.
+
+import os
+import re
+import pty
+import signal
+import select
+import time
+
+class Timeout(Exception):
+ '''An exception sub-class that indicates that a timeout occurred.'''
+ pass
+
+class Spawn(object):
+ '''Represents the stdio of a freshly created sub-process. Commands may be
+ sent to the process, and responses waited for.
+ '''
+
+ def __init__(self, args):
+ '''Spawn (fork/exec) the sub-process.
+
+ Args:
+ args: array of processs arguments. argv[0] is the command to execute.
+
+ Returns:
+ Nothing.
+ '''
+
+ self.waited = False
+ self.buf = ''
+ self.logfile_read = None
+ self.before = ''
+ self.after = ''
+ self.timeout = None
+
+ (self.pid, self.fd) = pty.fork()
+ if self.pid == 0:
+ try:
+ # For some reason, SIGHUP is set to SIG_IGN at this point when
+ # run under "go" (www.go.cd). Perhaps this happens under any
+ # background (non-interactive) system?
+ signal.signal(signal.SIGHUP, signal.SIG_DFL)
+ os.execvp(args[0], args)
+ except:
+ print 'CHILD EXECEPTION:'
+ import traceback
+ traceback.print_exc()
+ finally:
+ os._exit(255)
+
+ self.poll = select.poll()
+ self.poll.register(self.fd, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP | select.POLLNVAL)
+
+ def kill(self, sig):
+ '''Send unix signal "sig" to the child process.
+
+ Args:
+ sig: The signal number to send.
+
+ Returns:
+ Nothing.
+ '''
+
+ os.kill(self.pid, sig)
+
+ def isalive(self):
+ '''Determine whether the child process is still running.
+
+ Args:
+ None.
+
+ Returns:
+ Boolean indicating whether process is alive.
+ '''
+
+ if self.waited:
+ return False
+
+ w = os.waitpid(self.pid, os.WNOHANG)
+ if w[0] == 0:
+ return True
+
+ self.waited = True
+ return False
+
+ def send(self, data):
+ '''Send data to the sub-process's stdin.
+
+ Args:
+ data: The data to send to the process.
+
+ Returns:
+ Nothing.
+ '''
+
+ os.write(self.fd, data)
+
+ def expect(self, patterns):
+ '''Wait for the sub-process to emit specific data.
+
+ This function waits for the process to emit one pattern from the
+ supplied list of patterns, or for a timeout to occur.
+
+ Args:
+ patterns: A list of strings or regex objects that we expect to
+ see in the sub-process' stdout.
+
+ Returns:
+ The index within the patterns array of the pattern the process
+ emitted.
+
+ Notable exceptions:
+ Timeout, if the process did not emit any of the patterns within
+ the expected time.
+ '''
+
+ for pi in xrange(len(patterns)):
+ if type(patterns[pi]) == type(''):
+ patterns[pi] = re.compile(patterns[pi])
+
+ try:
+ while True:
+ earliest_m = None
+ earliest_pi = None
+ for pi in xrange(len(patterns)):
+ pattern = patterns[pi]
+ m = pattern.search(self.buf)
+ if not m:
+ continue
+ if earliest_m and m.start() > earliest_m.start():
+ continue
+ earliest_m = m
+ earliest_pi = pi
+ if earliest_m:
+ pos = earliest_m.start()
+ posafter = earliest_m.end() + 1
+ self.before = self.buf[:pos]
+ self.after = self.buf[pos:posafter]
+ self.buf = self.buf[posafter:]
+ return earliest_pi
+ events = self.poll.poll(self.timeout)
+ if not events:
+ raise Timeout()
+ c = os.read(self.fd, 1024)
+ if not c:
+ raise EOFError()
+ if self.logfile_read:
+ self.logfile_read.write(c)
+ self.buf += c
+ finally:
+ if self.logfile_read:
+ self.logfile_read.flush()
+
+ def close(self):
+ '''Close the stdio connection to the sub-process.
+
+ This also waits a reasonable time for the sub-process to stop running.
+
+ Args:
+ None.
+
+ Returns:
+ Nothing.
+ '''
+
+ os.close(self.fd)
+ for i in xrange(100):
+ if not self.isalive():
+ break
+ time.sleep(0.1)