diff options
Diffstat (limited to 'tools')
39 files changed, 1028 insertions, 1383 deletions
diff --git a/tools/Makefile b/tools/Makefile index 2fc4a583d4..cb1683e153 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -60,9 +60,21 @@ hostprogs-$(CONFIG_FIT_SIGNATURE) += fit_info fit_check_sign FIT_SIG_OBJS-$(CONFIG_FIT_SIGNATURE) := common/image-sig.o # Flattened device tree objects -LIBFDT_OBJS := $(addprefix lib/libfdt/, \ - fdt.o fdt_ro.o fdt_rw.o fdt_strerror.o fdt_wip.o \ - fdt_region.o fdt_sw.o) +LIBFDT_CSRCS := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c \ + fdt_empty_tree.c fdt_addresses.c fdt_overlay.c \ + fdt_region.c + +# Unfortunately setup.py below cannot handle srctree being ".." which it often +# is. It fails with an error like: +# Fatal error: can't create build/temp.linux-x86_64-2.7/../lib/libfdt/fdt.o: +# No such file or directory +# To fix this, use an absolute path. +libfdt_tree := $(shell readlink -f $(srctree)/lib/libfdt) + +LIBFDT_SRCS := $(addprefix $(libfdt_tree)/, $(LIBFDT_CSRCS)) +LIBFDT_SWIG := $(addprefix $(libfdt_tree)/, pylibfdt/libfdt.i) +LIBFDT_OBJS := $(addprefix lib/libfdt/, $(patsubst %.c, %.o, $(LIBFDT_CSRCS))) + RSA_OBJS-$(CONFIG_FIT_SIGNATURE) := $(addprefix lib/rsa/, \ rsa-sign.o rsa-verify.o rsa-checksum.o \ rsa-mod-exp.o) @@ -112,22 +124,22 @@ mkimage-objs := $(dumpimage-mkimage-objs) mkimage.o fit_info-objs := $(dumpimage-mkimage-objs) fit_info.o fit_check_sign-objs := $(dumpimage-mkimage-objs) fit_check_sign.o -# Build a libfdt Python module if swig is available -# Use 'sudo apt-get install swig libpython-dev' to enable this -hostprogs-y += \ - $(if $(shell which swig 2> /dev/null),_libfdt.so) -_libfdt.so-sharedobjs += $(LIBFDT_OBJS) -libfdt: - -tools/_libfdt.so: $(patsubst %.o,%.c,$(LIBFDT_OBJS)) tools/libfdt_wrap.c - LDFLAGS="$(HOSTLDFLAGS)" CFLAGS= ${PYTHON} $(srctree)/lib/libfdt/setup.py \ - "$(_hostc_flags)" $^ - mv _libfdt.so $@ - -tools/libfdt_wrap.c: $(srctree)/lib/libfdt/libfdt.swig - swig -python -o $@ $< - -# TODO(sjg@chromium.org): Is this correct on Mac OS? +# Unfortunately setup.py (or actually the Python distutil implementation) +# puts files into the same directory as the .i file. We cannot touch the source +# directory, so we copy the .i file into the tools/ build subdirectory before +# calling setup. This directory is safe to write to. This ensures that we get +# all three files in $(obj)/tools: _libfdt.so, libfdt.py and libfdt_wrap.c +# The latter is a temporary file which we could actually remove. +tools/_libfdt.so: $(LIBFDT_SRCS) $(LIBFDT_SWIG) + cp $(LIBFDT_SWIG) tools/. + unset CC; \ + unset CROSS_COMPILE; \ + LDFLAGS="$(HOSTLDFLAGS)" CFLAGS= VERSION="u-boot-$(UBOOTVERSION)" \ + CPPFLAGS="$(_hostc_flags)" OBJDIR=tools \ + SOURCES="$(LIBFDT_SRCS) tools/libfdt.i" \ + SWIG_OPTS="-I$(srctree)/lib/libfdt -I$(srctree)/lib" \ + $(libfdt_tree)/pylibfdt/setup.py --quiet build_ext \ + --build-lib tools ifneq ($(CONFIG_MX23)$(CONFIG_MX28),) # Add CONFIG_MXS into host CFLAGS, so we can check whether or not register @@ -216,6 +228,10 @@ clean-dirs := lib common always := $(hostprogs-y) +# Build a libfdt Python module if swig is available +# Use 'sudo apt-get install swig libpython-dev' to enable this +always += $(if $(shell which swig 2> /dev/null),_libfdt.so) + # Generated LCD/video logo LOGO_H = $(objtree)/include/bmp_logo.h LOGO_DATA_H = $(objtree)/include/bmp_logo_data.h diff --git a/tools/binman/binman.py b/tools/binman/binman.py index 857d698b4c..95d3a048d8 100755 --- a/tools/binman/binman.py +++ b/tools/binman/binman.py @@ -21,6 +21,9 @@ sys.path.append(os.path.join(our_path, '../patman')) sys.path.append(os.path.join(our_path, '../dtoc')) sys.path.append(os.path.join(our_path, '../')) +# Bring in the libfdt module +sys.path.append('tools') + # Also allow entry-type modules to be brought in from the etype directory. sys.path.append(os.path.join(our_path, 'etype')) diff --git a/tools/binman/control.py b/tools/binman/control.py index e90967807c..e9d48df030 100644 --- a/tools/binman/control.py +++ b/tools/binman/control.py @@ -12,7 +12,7 @@ import sys import tools import command -import fdt_select +import fdt import fdt_util from image import Image import tout @@ -40,15 +40,15 @@ def _ReadImageDesc(binman_node): images['image'] = Image('image', binman_node) return images -def _FindBinmanNode(fdt): +def _FindBinmanNode(dtb): """Find the 'binman' node in the device tree Args: - fdt: Fdt object to scan + dtb: Fdt object to scan Returns: Node object of /binman node, or None if not found """ - for node in fdt.GetRoot().subnodes: + for node in dtb.GetRoot().subnodes: if node.name == 'binman': return node return None @@ -92,8 +92,8 @@ def Binman(options, args): try: tools.SetInputDirs(options.indir) tools.PrepareOutputDir(options.outdir, options.preserve) - fdt = fdt_select.FdtScan(dtb_fname) - node = _FindBinmanNode(fdt) + dtb = fdt.FdtScan(dtb_fname) + node = _FindBinmanNode(dtb) if not node: raise ValueError("Device tree '%s' does not have a 'binman' " "node" % dtb_fname) diff --git a/tools/binman/etype/u_boot_dtb_with_ucode.py b/tools/binman/etype/u_boot_dtb_with_ucode.py index fc02c67c14..a384a759c4 100644 --- a/tools/binman/etype/u_boot_dtb_with_ucode.py +++ b/tools/binman/etype/u_boot_dtb_with_ucode.py @@ -6,7 +6,7 @@ # Entry-type module for U-Boot device tree with the microcode removed # -import fdt_select +import fdt from entry import Entry from blob import Entry_blob import tools @@ -44,9 +44,8 @@ class Entry_u_boot_dtb_with_ucode(Entry_blob): fd.write(self.data) # Remove the microcode - fdt = fdt_select.FdtScan(fname) - fdt.Scan() - ucode = fdt.GetNode('/microcode') + dtb = fdt.FdtScan(fname) + ucode = dtb.GetNode('/microcode') if not ucode: raise self.Raise("No /microcode node found in '%s'" % fname) @@ -57,20 +56,15 @@ class Entry_u_boot_dtb_with_ucode(Entry_blob): data_prop = node.props.get('data') if data_prop: self.ucode_data += ''.join(data_prop.bytes) - if not self.collate: - poffset = data_prop.GetOffset() - if poffset is None: - # We cannot obtain a property offset. Collate instead. - self.collate = True - else: - # Find the offset in the device tree of the ucode data - self.ucode_offset = poffset + 12 - self.ucode_size = len(data_prop.bytes) if self.collate: prop = node.DeleteProp('data') + else: + # Find the offset in the device tree of the ucode data + self.ucode_offset = data_prop.GetOffset() + 12 + self.ucode_size = len(data_prop.bytes) if self.collate: - fdt.Pack() - fdt.Flush() + dtb.Pack() + dtb.Flush() # Make this file the contents of this entry self._pathname = fname diff --git a/tools/binman/fdt_test.py b/tools/binman/fdt_test.py index 1d9494e52f..249a9ea388 100644 --- a/tools/binman/fdt_test.py +++ b/tools/binman/fdt_test.py @@ -11,7 +11,8 @@ import sys import tempfile import unittest -from fdt_select import FdtScan +import fdt +from fdt import FdtScan import fdt_util import tools @@ -28,21 +29,56 @@ class TestFdt(unittest.TestCase): def GetCompiled(self, fname): return fdt_util.EnsureCompiled(self.TestFile(fname)) - def _DeleteProp(self, fdt): - node = fdt.GetNode('/microcode/update@0') + def _DeleteProp(self, dt): + node = dt.GetNode('/microcode/update@0') node.DeleteProp('data') def testFdtNormal(self): fname = self.GetCompiled('34_x86_ucode.dts') - fdt = FdtScan(fname) - self._DeleteProp(fdt) + dt = FdtScan(fname) + self._DeleteProp(dt) - def testFdtFallback(self): - fname = self.GetCompiled('34_x86_ucode.dts') - fdt = FdtScan(fname, True) - fdt.GetProp('/microcode/update@0', 'data') - self.assertEqual('fred', - fdt.GetProp('/microcode/update@0', 'none', default='fred')) - self.assertEqual('12345678 12345679', - fdt.GetProp('/microcode/update@0', 'data', typespec='x')) - self._DeleteProp(fdt) + def testFdtNormalProp(self): + fname = self.GetCompiled('45_prop_test.dts') + dt = FdtScan(fname) + node = dt.GetNode('/binman/intel-me') + self.assertEquals('intel-me', node.name) + val = fdt_util.GetString(node, 'filename') + self.assertEquals(str, type(val)) + self.assertEquals('me.bin', val) + + prop = node.props['intval'] + self.assertEquals(fdt.TYPE_INT, prop.type) + self.assertEquals(3, fdt_util.GetInt(node, 'intval')) + + prop = node.props['intarray'] + self.assertEquals(fdt.TYPE_INT, prop.type) + self.assertEquals(list, type(prop.value)) + self.assertEquals(2, len(prop.value)) + self.assertEquals([5, 6], + [fdt_util.fdt32_to_cpu(val) for val in prop.value]) + + prop = node.props['byteval'] + self.assertEquals(fdt.TYPE_BYTE, prop.type) + self.assertEquals(chr(8), prop.value) + + prop = node.props['bytearray'] + self.assertEquals(fdt.TYPE_BYTE, prop.type) + self.assertEquals(list, type(prop.value)) + self.assertEquals(str, type(prop.value[0])) + self.assertEquals(3, len(prop.value)) + self.assertEquals([chr(1), '#', '4'], prop.value) + + prop = node.props['longbytearray'] + self.assertEquals(fdt.TYPE_INT, prop.type) + self.assertEquals(0x090a0b0c, fdt_util.GetInt(node, 'longbytearray')) + + prop = node.props['stringval'] + self.assertEquals(fdt.TYPE_STRING, prop.type) + self.assertEquals('message2', fdt_util.GetString(node, 'stringval')) + + prop = node.props['stringarray'] + self.assertEquals(fdt.TYPE_STRING, prop.type) + self.assertEquals(list, type(prop.value)) + self.assertEquals(3, len(prop.value)) + self.assertEquals(['another', 'multi-word', 'message'], prop.value) diff --git a/tools/binman/func_test.py b/tools/binman/func_test.py index 740fa9e4e2..8b4db41659 100644 --- a/tools/binman/func_test.py +++ b/tools/binman/func_test.py @@ -21,7 +21,7 @@ import cmdline import command import control import entry -import fdt_select +import fdt import fdt_util import tools import tout @@ -658,8 +658,8 @@ class TestFunctional(unittest.TestCase): fname = tools.GetOutputFilename('test.dtb') with open(fname, 'wb') as fd: fd.write(second) - fdt = fdt_select.FdtScan(fname) - ucode = fdt.GetNode('/microcode') + dtb = fdt.FdtScan(fname) + ucode = dtb.GetNode('/microcode') self.assertTrue(ucode) for node in ucode.subnodes: self.assertFalse(node.props.get('data')) @@ -683,7 +683,7 @@ class TestFunctional(unittest.TestCase): self.assertEqual('nodtb with microcode' + pos_and_size + ' somewhere in here', first) - def _RunPackUbootSingleMicrocode(self, collate): + def _RunPackUbootSingleMicrocode(self): """Test that x86 microcode can be handled correctly We expect to see the following in the image, in order: @@ -695,8 +695,6 @@ class TestFunctional(unittest.TestCase): # We need the libfdt library to run this test since only that allows # finding the offset of a property. This is required by # Entry_u_boot_dtb_with_ucode.ObtainContents(). - if not fdt_select.have_libfdt: - return data = self._DoReadFile('35_x86_single_ucode.dts', True) second = data[len(U_BOOT_NODTB_DATA):] @@ -705,34 +703,22 @@ class TestFunctional(unittest.TestCase): third = second[fdt_len:] second = second[:fdt_len] - if not collate: - ucode_data = struct.pack('>2L', 0x12345678, 0x12345679) - self.assertIn(ucode_data, second) - ucode_pos = second.find(ucode_data) + len(U_BOOT_NODTB_DATA) + ucode_data = struct.pack('>2L', 0x12345678, 0x12345679) + self.assertIn(ucode_data, second) + ucode_pos = second.find(ucode_data) + len(U_BOOT_NODTB_DATA) - # Check that the microcode pointer was inserted. It should match the - # expected position and size - pos_and_size = struct.pack('<2L', 0xfffffe00 + ucode_pos, - len(ucode_data)) - first = data[:len(U_BOOT_NODTB_DATA)] - self.assertEqual('nodtb with microcode' + pos_and_size + - ' somewhere in here', first) + # Check that the microcode pointer was inserted. It should match the + # expected position and size + pos_and_size = struct.pack('<2L', 0xfffffe00 + ucode_pos, + len(ucode_data)) + first = data[:len(U_BOOT_NODTB_DATA)] + self.assertEqual('nodtb with microcode' + pos_and_size + + ' somewhere in here', first) def testPackUbootSingleMicrocode(self): """Test that x86 microcode can be handled correctly with fdt_normal. """ - self._RunPackUbootSingleMicrocode(False) - - def testPackUbootSingleMicrocodeFallback(self): - """Test that x86 microcode can be handled correctly with fdt_fallback. - - This only supports collating the microcode. - """ - try: - old_val = fdt_select.UseFallback(True) - self._RunPackUbootSingleMicrocode(True) - finally: - fdt_select.UseFallback(old_val) + self._RunPackUbootSingleMicrocode() def testUBootImg(self): """Test that u-boot.img can be put in a file""" @@ -763,14 +749,12 @@ class TestFunctional(unittest.TestCase): def testMicrocodeWithoutPtrInElf(self): """Test that a U-Boot binary without the microcode symbol is detected""" # ELF file without a '_dt_ucode_base_size' symbol - if not fdt_select.have_libfdt: - return try: with open(self.TestFile('u_boot_no_ucode_ptr')) as fd: TestFunctional._MakeInputFile('u-boot', fd.read()) with self.assertRaises(ValueError) as e: - self._RunPackUbootSingleMicrocode(False) + self._RunPackUbootSingleMicrocode() self.assertIn("Node '/binman/u-boot-with-ucode-ptr': Cannot locate " "_dt_ucode_base_size symbol in u-boot", str(e.exception)) diff --git a/tools/binman/test/45_prop_test.dts b/tools/binman/test/45_prop_test.dts new file mode 100644 index 0000000000..d22e460d29 --- /dev/null +++ b/tools/binman/test/45_prop_test.dts @@ -0,0 +1,23 @@ +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + sort-by-pos; + end-at-4gb; + size = <16>; + intel-me { + filename = "me.bin"; + pos-unset; + intval = <3>; + intarray = <5 6>; + byteval = [08]; + bytearray = [01 23 34]; + longbytearray = [09 0a 0b 0c]; + stringval = "message2"; + stringarray = "another", "multi-word", "message"; + }; + }; +}; diff --git a/tools/buildman/builder.py b/tools/buildman/builder.py index b0ea57ebb4..acb0810457 100644 --- a/tools/buildman/builder.py +++ b/tools/buildman/builder.py @@ -847,7 +847,7 @@ class Builder: delta.reverse() args = [add, -remove, grow, -shrink, up, -down, up - down] - if max(args) == 0: + if max(args) == 0 and min(args) == 0: return args = [self.ColourNum(x) for x in args] indent = ' ' * 15 diff --git a/tools/buildman/builderthread.py b/tools/buildman/builderthread.py index acaf5007f5..9e8ca80c5b 100644 --- a/tools/buildman/builderthread.py +++ b/tools/buildman/builderthread.py @@ -280,13 +280,15 @@ class BuilderThread(threading.Thread): outfile = os.path.join(build_dir, 'log') with open(outfile, 'w') as fd: if result.stdout: - fd.write(result.stdout.encode('latin-1', 'ignore')) + # We don't want unicode characters in log files + fd.write(result.stdout.decode('UTF-8').encode('ASCII', 'replace')) errfile = self.builder.GetErrFile(result.commit_upto, result.brd.target) if result.stderr: with open(errfile, 'w') as fd: - fd.write(result.stderr.encode('latin-1', 'ignore')) + # We don't want unicode characters in log files + fd.write(result.stderr.decode('UTF-8').encode('ASCII', 'replace')) elif os.path.exists(errfile): os.remove(errfile) diff --git a/tools/buildman/func_test.py b/tools/buildman/func_test.py index d439e17db6..bc32f61733 100644 --- a/tools/buildman/func_test.py +++ b/tools/buildman/func_test.py @@ -39,7 +39,6 @@ boards = [ ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 1', 'board0', ''], ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 2', 'board1', ''], ['Active', 'powerpc', 'powerpc', '', 'Tester', 'PowerPC board 1', 'board2', ''], - ['Active', 'powerpc', 'mpc5xx', '', 'Tester', 'PowerPC board 2', 'board3', ''], ['Active', 'sandbox', 'sandbox', '', 'Tester', 'Sandbox board', 'board4', ''], ] diff --git a/tools/buildman/test.py b/tools/buildman/test.py index ed2a3a8929..53ebc3756c 100644 --- a/tools/buildman/test.py +++ b/tools/buildman/test.py @@ -89,7 +89,6 @@ boards = [ ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 1', 'board0', ''], ['Active', 'arm', 'armv7', '', 'Tester', 'ARM Board 2', 'board1', ''], ['Active', 'powerpc', 'powerpc', '', 'Tester', 'PowerPC board 1', 'board2', ''], - ['Active', 'powerpc', 'mpc5xx', '', 'Tester', 'PowerPC board 2', 'board3', ''], ['Active', 'sandbox', 'sandbox', '', 'Tester', 'Sandbox board', 'board4', ''], ] diff --git a/tools/buildman/toolchain.py b/tools/buildman/toolchain.py index 5cf97ac814..2076323d5d 100644 --- a/tools/buildman/toolchain.py +++ b/tools/buildman/toolchain.py @@ -144,7 +144,9 @@ class Toolchain: """Returns an environment for using the toolchain. Thie takes the current environment and adds CROSS_COMPILE so that - the tool chain will operate correctly. + the tool chain will operate correctly. This also disables localized + output and possibly unicode encoded output of all build tools by + adding LC_ALL=C. Args: full_path: Return the full path in CROSS_COMPILE and don't set @@ -159,6 +161,8 @@ class Toolchain: env['CROSS_COMPILE'] = wrapper + self.cross env['PATH'] = self.path + ':' + env['PATH'] + env['LC_ALL'] = 'C' + return env diff --git a/tools/dtoc/dtoc.py b/tools/dtoc/dtoc.py index 2e0b9c04e2..08e35f148c 100755 --- a/tools/dtoc/dtoc.py +++ b/tools/dtoc/dtoc.py @@ -17,7 +17,6 @@ our_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(our_path, '../patman')) import fdt -import fdt_select import fdt_util # When we see these properties we ignore them - i.e. do not create a structure member @@ -170,7 +169,7 @@ class DtbPlatdata: Once this is done, self.fdt.GetRoot() can be called to obtain the device tree root node, and progress from there. """ - self.fdt = fdt_select.FdtScan(self._dtb_fname) + self.fdt = fdt.FdtScan(self._dtb_fname) def ScanNode(self, root): for node in root.subnodes: diff --git a/tools/dtoc/fdt.py b/tools/dtoc/fdt.py index 816fdbe525..63a32ea2d7 100644 --- a/tools/dtoc/fdt.py +++ b/tools/dtoc/fdt.py @@ -10,12 +10,15 @@ import struct import sys import fdt_util +import libfdt # This deals with a device tree, presenting it as an assortment of Node and # Prop objects, representing nodes and properties, respectively. This file -# contains the base classes and defines the high-level API. Most of the -# implementation is in the FdtFallback and FdtNormal subclasses. See -# fdt_select.py for how to create an Fdt object. +# contains the base classes and defines the high-level API. You can use +# FdtScan() as a convenience function to create and scan an Fdt. + +# This implementation uses a libfdt Python library to access the device tree, +# so it is fairly efficient. # A list of types we support (TYPE_BYTE, TYPE_INT, TYPE_STRING, TYPE_BOOL) = range(4) @@ -25,7 +28,7 @@ def CheckErr(errnum, msg): raise ValueError('Error %d: %s: %s' % (errnum, libfdt.fdt_strerror(errnum), msg)) -class PropBase: +class Prop: """A device tree property Properties: @@ -34,11 +37,17 @@ class PropBase: bytes type: Value type """ - def __init__(self, node, offset, name): + def __init__(self, node, offset, name, bytes): self._node = node self._offset = offset self.name = name self.value = None + self.bytes = str(bytes) + if not bytes: + self.type = TYPE_BOOL + self.value = True + return + self.type, self.value = self.BytesToValue(bytes) def GetPhandle(self): """Get a (single) phandle value from a property @@ -96,6 +105,7 @@ class PropBase: TYPE_INT: a byte-swapped integer stored as a 4-byte string TYPE_BYTE: a byte stored as a single-byte string """ + bytes = str(bytes) size = len(bytes) strings = bytes.split('\0') is_string = True @@ -147,15 +157,12 @@ class PropBase: def GetOffset(self): """Get the offset of a property - This can be implemented by subclasses. - Returns: - The offset of the property (struct fdt_property) within the - file, or None if not known. + The offset of the property (struct fdt_property) within the file """ - return None + return self._node._fdt.GetStructOffset(self._offset) -class NodeBase: +class Node: """A device tree node Properties: @@ -188,25 +195,65 @@ class NodeBase: return subnode return None + def Offset(self): + """Returns the offset of a node, after checking the cache + + This should be used instead of self._offset directly, to ensure that + the cache does not contain invalid offsets. + """ + self._fdt.CheckCache() + return self._offset + def Scan(self): - """Scan the subnodes of a node + """Scan a node's properties and subnodes - This should be implemented by subclasses + This fills in the props and subnodes properties, recursively + searching into subnodes so that the entire tree is built. """ - raise NotImplementedError() + self.props = self._fdt.GetProps(self) + + offset = libfdt.fdt_first_subnode(self._fdt.GetFdt(), self.Offset()) + while offset >= 0: + sep = '' if self.path[-1] == '/' else '/' + name = self._fdt._fdt_obj.get_name(offset) + path = self.path + sep + name + node = Node(self._fdt, offset, name, path) + self.subnodes.append(node) + + node.Scan() + offset = libfdt.fdt_next_subnode(self._fdt.GetFdt(), offset) + + def Refresh(self, my_offset): + """Fix up the _offset for each node, recursively + + Note: This does not take account of property offsets - these will not + be updated. + """ + if self._offset != my_offset: + #print '%s: %d -> %d\n' % (self.path, self._offset, my_offset) + self._offset = my_offset + offset = libfdt.fdt_first_subnode(self._fdt.GetFdt(), self._offset) + for subnode in self.subnodes: + subnode.Refresh(offset) + offset = libfdt.fdt_next_subnode(self._fdt.GetFdt(), offset) def DeleteProp(self, prop_name): """Delete a property of a node - This should be implemented by subclasses + The property is deleted and the offset cache is invalidated. Args: prop_name: Name of the property to delete + Raises: + ValueError if the property does not exist """ - raise NotImplementedError() + CheckErr(libfdt.fdt_delprop(self._fdt.GetFdt(), self.Offset(), prop_name), + "Node '%s': delete property: '%s'" % (self.path, prop_name)) + del self.props[prop_name] + self._fdt.Invalidate() class Fdt: - """Provides simple access to a flat device tree blob. + """Provides simple access to a flat device tree blob using libfdts. Properties: fname: Filename of fdt @@ -214,6 +261,13 @@ class Fdt: """ def __init__(self, fname): self._fname = fname + self._cached_offsets = False + if self._fname: + self._fname = fdt_util.EnsureCompiled(self._fname) + + with open(self._fname) as fd: + self._fdt = bytearray(fd.read()) + self._fdt_obj = libfdt.Fdt(self._fdt) def Scan(self, root='/'): """Scan a device tree, building up a tree of Node objects @@ -255,15 +309,100 @@ class Fdt: """Flush device tree changes back to the file If the device tree has changed in memory, write it back to the file. - Subclasses can implement this if needed. """ - pass + with open(self._fname, 'wb') as fd: + fd.write(self._fdt) def Pack(self): """Pack the device tree down to its minimum size When nodes and properties shrink or are deleted, wasted space can - build up in the device tree binary. Subclasses can implement this - to remove that spare space. + build up in the device tree binary. """ - pass + CheckErr(libfdt.fdt_pack(self._fdt), 'pack') + fdt_len = libfdt.fdt_totalsize(self._fdt) + del self._fdt[fdt_len:] + + def GetFdt(self): + """Get the contents of the FDT + + Returns: + The FDT contents as a string of bytes + """ + return self._fdt + + def CheckErr(errnum, msg): + if errnum: + raise ValueError('Error %d: %s: %s' % + (errnum, libfdt.fdt_strerror(errnum), msg)) + + + def GetProps(self, node): + """Get all properties from a node. + + Args: + node: Full path to node name to look in. + + Returns: + A dictionary containing all the properties, indexed by node name. + The entries are Prop objects. + + Raises: + ValueError: if the node does not exist. + """ + props_dict = {} + poffset = libfdt.fdt_first_property_offset(self._fdt, node._offset) + while poffset >= 0: + p = self._fdt_obj.get_property_by_offset(poffset) + prop = Prop(node, poffset, p.name, p.value) + props_dict[prop.name] = prop + + poffset = libfdt.fdt_next_property_offset(self._fdt, poffset) + return props_dict + + def Invalidate(self): + """Mark our offset cache as invalid""" + self._cached_offsets = False + + def CheckCache(self): + """Refresh the offset cache if needed""" + if self._cached_offsets: + return + self.Refresh() + self._cached_offsets = True + + def Refresh(self): + """Refresh the offset cache""" + self._root.Refresh(0) + + def GetStructOffset(self, offset): + """Get the file offset of a given struct offset + + Args: + offset: Offset within the 'struct' region of the device tree + Returns: + Position of @offset within the device tree binary + """ + return libfdt.fdt_off_dt_struct(self._fdt) + offset + + @classmethod + def Node(self, fdt, offset, name, path): + """Create a new node + + This is used by Fdt.Scan() to create a new node using the correct + class. + + Args: + fdt: Fdt object + offset: Offset of node + name: Node name + path: Full path to node + """ + node = Node(fdt, offset, name, path) + return node + +def FdtScan(fname): + """Returns a new Fdt object from the implementation we are using""" + dtb = Fdt(fname) + dtb.Scan() + return dtb diff --git a/tools/dtoc/fdt_fallback.py b/tools/dtoc/fdt_fallback.py deleted file mode 100644 index 23e26796c8..0000000000 --- a/tools/dtoc/fdt_fallback.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/python -# -# Copyright (C) 2016 Google, Inc -# Written by Simon Glass <sjg@chromium.org> -# -# SPDX-License-Identifier: GPL-2.0+ -# - -import command -import fdt -from fdt import Fdt, NodeBase, PropBase -import fdt_util -import sys - -# This deals with a device tree, presenting it as a list of Node and Prop -# objects, representing nodes and properties, respectively. -# -# This implementation uses the fdtget tool to access the device tree, so it -# is not very efficient for larger trees. The tool is called once for each -# node and property in the tree. - -class Prop(PropBase): - """A device tree property - - Properties: - name: Property name (as per the device tree) - value: Property value as a string of bytes, or a list of strings of - bytes - type: Value type - """ - def __init__(self, node, name, byte_list_str): - PropBase.__init__(self, node, 0, name) - if not byte_list_str.strip(): - self.type = fdt.TYPE_BOOL - return - self.bytes = [chr(int(byte, 16)) - for byte in byte_list_str.strip().split(' ')] - self.type, self.value = self.BytesToValue(''.join(self.bytes)) - - -class Node(NodeBase): - """A device tree node - - Properties: - name: Device tree node tname - path: Full path to node, along with the node name itself - _fdt: Device tree object - subnodes: A list of subnodes for this node, each a Node object - props: A dict of properties for this node, each a Prop object. - Keyed by property name - """ - def __init__(self, fdt, offset, name, path): - NodeBase.__init__(self, fdt, offset, name, path) - - def Scan(self): - """Scan a node's properties and subnodes - - This fills in the props and subnodes properties, recursively - searching into subnodes so that the entire tree is built. - """ - for name, byte_list_str in self._fdt.GetProps(self.path).items(): - prop = Prop(self, name, byte_list_str) - self.props[name] = prop - - for name in self._fdt.GetSubNodes(self.path): - sep = '' if self.path[-1] == '/' else '/' - path = self.path + sep + name - node = Node(self._fdt, 0, name, path) - self.subnodes.append(node) - - node.Scan() - - def DeleteProp(self, prop_name): - """Delete a property of a node - - The property is deleted using fdtput. - - Args: - prop_name: Name of the property to delete - Raises: - CommandError if the property does not exist - """ - args = [self._fdt._fname, '-d', self.path, prop_name] - command.Output('fdtput', *args) - del self.props[prop_name] - -class FdtFallback(Fdt): - """Provides simple access to a flat device tree blob using fdtget/fdtput - - Properties: - See superclass - """ - - def __init__(self, fname): - Fdt.__init__(self, fname) - if self._fname: - self._fname = fdt_util.EnsureCompiled(self._fname) - - def GetSubNodes(self, node): - """Returns a list of sub-nodes of a given node - - Args: - node: Node name to return children from - - Returns: - List of children in the node (each a string node name) - - Raises: - CmdError: if the node does not exist. - """ - out = command.Output('fdtget', self._fname, '-l', node) - return out.strip().splitlines() - - def GetProps(self, node): - """Get all properties from a node - - Args: - node: full path to node name to look in - - Returns: - A dictionary containing all the properties, indexed by node name. - The entries are simply strings - no decoding of lists or numbers - is done. - - Raises: - CmdError: if the node does not exist. - """ - out = command.Output('fdtget', self._fname, node, '-p') - props = out.strip().splitlines() - props_dict = {} - for prop in props: - name = prop - props_dict[prop] = self.GetProp(node, name) - return props_dict - - def GetProp(self, node, prop, default=None, typespec=None): - """Get a property from a device tree. - - This looks up the given node and property, and returns the value as a - string, - - If the node or property does not exist, this will return the default - value. - - Args: - node: Full path to node to look up. - prop: Property name to look up. - default: Default value to return if nothing is present in the fdt, - or None to raise in this case. This will be converted to a - string. - typespec: Type character to use (None for default, 's' for string) - - Returns: - string containing the property value. - - Raises: - CmdError: if the property does not exist and no default is provided. - """ - args = [self._fname, node, prop, '-t', 'bx'] - if default is not None: - args += ['-d', str(default)] - if typespec is not None: - args += ['-t', typespec] - out = command.Output('fdtget', *args) - return out.strip() - - @classmethod - def Node(self, fdt, offset, name, path): - """Create a new node - - This is used by Fdt.Scan() to create a new node using the correct - class. - - Args: - fdt: Fdt object - offset: Offset of node - name: Node name - path: Full path to node - """ - node = Node(fdt, offset, name, path) - return node diff --git a/tools/dtoc/fdt_normal.py b/tools/dtoc/fdt_normal.py deleted file mode 100644 index cce5c06d8c..0000000000 --- a/tools/dtoc/fdt_normal.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/python -# -# Copyright (C) 2016 Google, Inc -# Written by Simon Glass <sjg@chromium.org> -# -# SPDX-License-Identifier: GPL-2.0+ -# - -import struct -import sys - -import fdt -from fdt import Fdt, NodeBase, PropBase -import fdt_util -import libfdt - -# This deals with a device tree, presenting it as a list of Node and Prop -# objects, representing nodes and properties, respectively. -# -# This implementation uses a libfdt Python library to access the device tree, -# so it is fairly efficient. - -def CheckErr(errnum, msg): - if errnum: - raise ValueError('Error %d: %s: %s' % - (errnum, libfdt.fdt_strerror(errnum), msg)) - -class Prop(PropBase): - """A device tree property - - Properties: - name: Property name (as per the device tree) - value: Property value as a string of bytes, or a list of strings of - bytes - type: Value type - """ - def __init__(self, node, offset, name, bytes): - PropBase.__init__(self, node, offset, name) - self.bytes = bytes - if not bytes: - self.type = fdt.TYPE_BOOL - self.value = True - return - self.type, self.value = self.BytesToValue(bytes) - - def GetOffset(self): - """Get the offset of a property - - Returns: - The offset of the property (struct fdt_property) within the file - """ - return self._node._fdt.GetStructOffset(self._offset) - -class Node(NodeBase): - """A device tree node - - Properties: - offset: Integer offset in the device tree - name: Device tree node tname - path: Full path to node, along with the node name itself - _fdt: Device tree object - subnodes: A list of subnodes for this node, each a Node object - props: A dict of properties for this node, each a Prop object. - Keyed by property name - """ - def __init__(self, fdt, offset, name, path): - NodeBase.__init__(self, fdt, offset, name, path) - - def Offset(self): - """Returns the offset of a node, after checking the cache - - This should be used instead of self._offset directly, to ensure that - the cache does not contain invalid offsets. - """ - self._fdt.CheckCache() - return self._offset - - def Scan(self): - """Scan a node's properties and subnodes - - This fills in the props and subnodes properties, recursively - searching into subnodes so that the entire tree is built. - """ - self.props = self._fdt.GetProps(self) - - offset = libfdt.fdt_first_subnode(self._fdt.GetFdt(), self.Offset()) - while offset >= 0: - sep = '' if self.path[-1] == '/' else '/' - name = libfdt.Name(self._fdt.GetFdt(), offset) - path = self.path + sep + name - node = Node(self._fdt, offset, name, path) - self.subnodes.append(node) - - node.Scan() - offset = libfdt.fdt_next_subnode(self._fdt.GetFdt(), offset) - - def Refresh(self, my_offset): - """Fix up the _offset for each node, recursively - - Note: This does not take account of property offsets - these will not - be updated. - """ - if self._offset != my_offset: - #print '%s: %d -> %d\n' % (self.path, self._offset, my_offset) - self._offset = my_offset - offset = libfdt.fdt_first_subnode(self._fdt.GetFdt(), self._offset) - for subnode in self.subnodes: - subnode.Refresh(offset) - offset = libfdt.fdt_next_subnode(self._fdt.GetFdt(), offset) - - def DeleteProp(self, prop_name): - """Delete a property of a node - - The property is deleted and the offset cache is invalidated. - - Args: - prop_name: Name of the property to delete - Raises: - ValueError if the property does not exist - """ - CheckErr(libfdt.fdt_delprop(self._fdt.GetFdt(), self.Offset(), prop_name), - "Node '%s': delete property: '%s'" % (self.path, prop_name)) - del self.props[prop_name] - self._fdt.Invalidate() - -class FdtNormal(Fdt): - """Provides simple access to a flat device tree blob using libfdt. - - Properties: - _fdt: Device tree contents (bytearray) - _cached_offsets: True if all the nodes have a valid _offset property, - False if something has changed to invalidate the offsets - """ - def __init__(self, fname): - Fdt.__init__(self, fname) - self._cached_offsets = False - if self._fname: - self._fname = fdt_util.EnsureCompiled(self._fname) - - with open(self._fname) as fd: - self._fdt = bytearray(fd.read()) - - def GetFdt(self): - """Get the contents of the FDT - - Returns: - The FDT contents as a string of bytes - """ - return self._fdt - - def Flush(self): - """Flush device tree changes back to the file""" - with open(self._fname, 'wb') as fd: - fd.write(self._fdt) - - def Pack(self): - """Pack the device tree down to its minimum size""" - CheckErr(libfdt.fdt_pack(self._fdt), 'pack') - fdt_len = libfdt.fdt_totalsize(self._fdt) - del self._fdt[fdt_len:] - - def GetProps(self, node): - """Get all properties from a node. - - Args: - node: Full path to node name to look in. - - Returns: - A dictionary containing all the properties, indexed by node name. - The entries are Prop objects. - - Raises: - ValueError: if the node does not exist. - """ - props_dict = {} - poffset = libfdt.fdt_first_property_offset(self._fdt, node._offset) - while poffset >= 0: - dprop, plen = libfdt.fdt_get_property_by_offset(self._fdt, poffset) - prop = Prop(node, poffset, libfdt.String(self._fdt, dprop.nameoff), - libfdt.Data(dprop)) - props_dict[prop.name] = prop - - poffset = libfdt.fdt_next_property_offset(self._fdt, poffset) - return props_dict - - def Invalidate(self): - """Mark our offset cache as invalid""" - self._cached_offsets = False - - def CheckCache(self): - """Refresh the offset cache if needed""" - if self._cached_offsets: - return - self.Refresh() - self._cached_offsets = True - - def Refresh(self): - """Refresh the offset cache""" - self._root.Refresh(0) - - def GetStructOffset(self, offset): - """Get the file offset of a given struct offset - - Args: - offset: Offset within the 'struct' region of the device tree - Returns: - Position of @offset within the device tree binary - """ - return libfdt.fdt_off_dt_struct(self._fdt) + offset - - @classmethod - def Node(self, fdt, offset, name, path): - """Create a new node - - This is used by Fdt.Scan() to create a new node using the correct - class. - - Args: - fdt: Fdt object - offset: Offset of node - name: Node name - path: Full path to node - """ - node = Node(fdt, offset, name, path) - return node diff --git a/tools/dtoc/fdt_select.py b/tools/dtoc/fdt_select.py deleted file mode 100644 index ea78c527fc..0000000000 --- a/tools/dtoc/fdt_select.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/python -# -# Copyright (C) 2016 Google, Inc -# Written by Simon Glass <sjg@chromium.org> -# -# SPDX-License-Identifier: GPL-2.0+ -# - -import fdt_fallback - -# Bring in either the normal fdt library (which relies on libfdt) or the -# fallback one (which uses fdtget and is slower). Both provide the same -# interface for this file to use. -try: - import fdt_normal - have_libfdt = True -except ImportError: - have_libfdt = False - -force_fallback = False - -def FdtScan(fname, _force_fallback=False): - """Returns a new Fdt object from the implementation we are using""" - if have_libfdt and not force_fallback and not _force_fallback: - dtb = fdt_normal.FdtNormal(fname) - else: - dtb = fdt_fallback.FdtFallback(fname) - dtb.Scan() - return dtb - -def UseFallback(fallback): - global force_fallback - - old_val = force_fallback - force_fallback = fallback - return old_val diff --git a/tools/fdtgrep.c b/tools/fdtgrep.c index e373c43e36..f51f5f15f5 100644 --- a/tools/fdtgrep.c +++ b/tools/fdtgrep.c @@ -522,18 +522,21 @@ static int check_type_include(void *priv, int type, const char *data, int size) * return 1 at the first match. For exclusive conditions, we must * check that there are no matches. */ - for (val = disp->value_head; val; val = val->next) { - if (!(type & val->type)) - continue; - match = fdt_stringlist_contains(data, size, val->string); - debug(" - val->type=%x, str='%s', match=%d\n", - val->type, val->string, match); - if (match && val->include) { - debug(" - match inc %s\n", val->string); - return 1; + if (data) { + for (val = disp->value_head; val; val = val->next) { + if (!(type & val->type)) + continue; + match = fdt_stringlist_contains(data, size, + val->string); + debug(" - val->type=%x, str='%s', match=%d\n", + val->type, val->string, match); + if (match && val->include) { + debug(" - match inc %s\n", val->string); + return 1; + } + if (match) + none_match &= ~val->type; } - if (match) - none_match &= ~val->type; } /* diff --git a/tools/img2srec.c b/tools/img2srec.c index ec76964023..75efd76e0e 100644 --- a/tools/img2srec.c +++ b/tools/img2srec.c @@ -112,7 +112,7 @@ static char* ExtractDecimal (uint32_t* value, char* getPtr) static void ExtractNumber (uint32_t* value, char* getPtr) { - bool neg = false;; + bool neg = false; while (*getPtr == ' ') getPtr++; if (*getPtr == '-') { diff --git a/tools/kwbimage.c b/tools/kwbimage.c index 8c0e730e7b..5830549d26 100644 --- a/tools/kwbimage.c +++ b/tools/kwbimage.c @@ -24,7 +24,7 @@ #include <openssl/err.h> #include <openssl/evp.h> -#if OPENSSL_VERSION_NUMBER < 0x10100000L +#if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) static void RSA_get0_key(const RSA *r, const BIGNUM **n, const BIGNUM **e, const BIGNUM **d) { @@ -1476,47 +1476,6 @@ static int image_get_version(void) return e->version; } -static int image_version_file(const char *input) -{ - FILE *fcfg; - int version; - int ret; - - fcfg = fopen(input, "r"); - if (!fcfg) { - fprintf(stderr, "Could not open input file %s\n", input); - return -1; - } - - image_cfg = malloc(IMAGE_CFG_ELEMENT_MAX * - sizeof(struct image_cfg_element)); - if (!image_cfg) { - fprintf(stderr, "Cannot allocate memory\n"); - fclose(fcfg); - return -1; - } - - memset(image_cfg, 0, - IMAGE_CFG_ELEMENT_MAX * sizeof(struct image_cfg_element)); - rewind(fcfg); - - ret = image_create_config_parse(fcfg); - fclose(fcfg); - if (ret) { - free(image_cfg); - return -1; - } - - version = image_get_version(); - /* Fallback to version 0 is no version is provided in the cfg file */ - if (version == -1) - version = 0; - - free(image_cfg); - - return version; -} - static void kwbimage_set_header(void *ptr, struct stat *sbuf, int ifd, struct image_tool_params *params) { @@ -1657,18 +1616,62 @@ static int kwbimage_verify_header(unsigned char *ptr, int image_size, static int kwbimage_generate(struct image_tool_params *params, struct image_type_params *tparams) { + FILE *fcfg; int alloc_len; + int version; void *hdr; - int version = 0; + int ret; - version = image_version_file(params->imagename); - if (version == 0) { + fcfg = fopen(params->imagename, "r"); + if (!fcfg) { + fprintf(stderr, "Could not open input file %s\n", + params->imagename); + exit(EXIT_FAILURE); + } + + image_cfg = malloc(IMAGE_CFG_ELEMENT_MAX * + sizeof(struct image_cfg_element)); + if (!image_cfg) { + fprintf(stderr, "Cannot allocate memory\n"); + fclose(fcfg); + exit(EXIT_FAILURE); + } + + memset(image_cfg, 0, + IMAGE_CFG_ELEMENT_MAX * sizeof(struct image_cfg_element)); + rewind(fcfg); + + ret = image_create_config_parse(fcfg); + fclose(fcfg); + if (ret) { + free(image_cfg); + exit(EXIT_FAILURE); + } + + version = image_get_version(); + switch (version) { + /* + * Fallback to version 0 if no version is provided in the + * cfg file + */ + case -1: + case 0: alloc_len = sizeof(struct main_hdr_v0) + sizeof(struct ext_hdr_v0); - } else { + break; + + case 1: alloc_len = image_headersz_v1(NULL); + break; + + default: + fprintf(stderr, "Unsupported version %d\n", version); + free(image_cfg); + exit(EXIT_FAILURE); } + free(image_cfg); + hdr = malloc(alloc_len); if (!hdr) { fprintf(stderr, "%s: malloc return failure: %s\n", diff --git a/tools/patman/cros_subprocess.py b/tools/patman/cros_subprocess.py index 7c76014340..ebd4300dfd 100644 --- a/tools/patman/cros_subprocess.py +++ b/tools/patman/cros_subprocess.py @@ -190,8 +190,6 @@ class Popen(subprocess.Popen): # We will get an error on read if the pty is closed try: data = os.read(self.stdout.fileno(), 1024) - if isinstance(data, bytes): - data = data.decode('utf-8') except OSError: pass if data == "": @@ -207,8 +205,6 @@ class Popen(subprocess.Popen): # We will get an error on read if the pty is closed try: data = os.read(self.stderr.fileno(), 1024) - if isinstance(data, bytes): - data = data.decode('utf-8') except OSError: pass if data == "": diff --git a/tools/patman/func_test.py b/tools/patman/func_test.py new file mode 100644 index 0000000000..2c0da84b30 --- /dev/null +++ b/tools/patman/func_test.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2017 Google, Inc +# +# SPDX-License-Identifier: GPL-2.0+ +# + +import contextlib +import os +import re +import shutil +import sys +import tempfile +import unittest + +import gitutil +import patchstream +import settings + + +@contextlib.contextmanager +def capture(): + import sys + from cStringIO import StringIO + oldout,olderr = sys.stdout, sys.stderr + try: + out=[StringIO(), StringIO()] + sys.stdout,sys.stderr = out + yield out + finally: + sys.stdout,sys.stderr = oldout, olderr + out[0] = out[0].getvalue() + out[1] = out[1].getvalue() + + +class TestFunctional(unittest.TestCase): + def setUp(self): + self.tmpdir = tempfile.mkdtemp(prefix='patman.') + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + @staticmethod + def GetPath(fname): + return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), + 'test', fname) + + @classmethod + def GetText(self, fname): + return open(self.GetPath(fname)).read() + + @classmethod + def GetPatchName(self, subject): + fname = re.sub('[ :]', '-', subject) + return fname.replace('--', '-') + + def CreatePatchesForTest(self, series): + cover_fname = None + fname_list = [] + for i, commit in enumerate(series.commits): + clean_subject = self.GetPatchName(commit.subject) + src_fname = '%04d-%s.patch' % (i + 1, clean_subject[:52]) + fname = os.path.join(self.tmpdir, src_fname) + shutil.copy(self.GetPath(src_fname), fname) + fname_list.append(fname) + if series.get('cover'): + src_fname = '0000-cover-letter.patch' + cover_fname = os.path.join(self.tmpdir, src_fname) + fname = os.path.join(self.tmpdir, src_fname) + shutil.copy(self.GetPath(src_fname), fname) + + return cover_fname, fname_list + + def testBasic(self): + """Tests the basic flow of patman + + This creates a series from some hard-coded patches build from a simple + tree with the following metadata in the top commit: + + Series-to: u-boot + Series-prefix: RFC + Series-cc: Stefan Brüns <stefan.bruens@rwth-aachen.de> + Cover-letter-cc: Lord Mëlchett <clergy@palace.gov> + Series-version: 2 + Series-changes: 4 + - Some changes + + Cover-letter: + test: A test patch series + This is a test of how the cover + leter + works + END + + and this in the first commit: + + Series-notes: + some notes + about some things + from the first commit + END + + Commit-notes: + Some notes about + the first commit + END + + with the following commands: + + git log -n2 --reverse >/path/to/tools/patman/test/test01.txt + git format-patch --subject-prefix RFC --cover-letter HEAD~2 + mv 00* /path/to/tools/patman/test + + It checks these aspects: + - git log can be processed by patchstream + - emailing patches uses the correct command + - CC file has information on each commit + - cover letter has the expected text and subject + - each patch has the correct subject + - dry-run information prints out correctly + - unicode is handled correctly + - Series-to, Series-cc, Series-prefix, Cover-letter + - Cover-letter-cc, Series-version, Series-changes, Series-notes + - Commit-notes + """ + process_tags = True + ignore_bad_tags = True + stefan = u'Stefan Brüns <stefan.bruens@rwth-aachen.de>' + rick = 'Richard III <richard@palace.gov>' + mel = u'Lord Mëlchett <clergy@palace.gov>' + ed = u'Lond Edmund Blackaddër <weasel@blackadder.org' + fred = 'Fred Bloggs <f.bloggs@napier.net>' + add_maintainers = [stefan, rick] + dry_run = True + in_reply_to = mel + count = 2 + settings.alias = { + 'fdt': ['simon'], + 'u-boot': ['u-boot@lists.denx.de'], + 'simon': [ed], + 'fred': [fred], + } + + text = self.GetText('test01.txt') + series = patchstream.GetMetaDataForTest(text) + cover_fname, args = self.CreatePatchesForTest(series) + with capture() as out: + patchstream.FixPatches(series, args) + if cover_fname and series.get('cover'): + patchstream.InsertCoverLetter(cover_fname, series, count) + series.DoChecks() + cc_file = series.MakeCcFile(process_tags, cover_fname, + not ignore_bad_tags, add_maintainers) + cmd = gitutil.EmailPatches(series, cover_fname, args, + dry_run, not ignore_bad_tags, cc_file, + in_reply_to=in_reply_to, thread=None) + series.ShowActions(args, cmd, process_tags) + cc_lines = open(cc_file).read().splitlines() + os.remove(cc_file) + + lines = out[0].splitlines() + #print '\n'.join(lines) + self.assertEqual('Cleaned %s patches' % len(series.commits), lines[0]) + self.assertEqual('Change log missing for v2', lines[1]) + self.assertEqual('Change log missing for v3', lines[2]) + self.assertEqual('Change log for unknown version v4', lines[3]) + self.assertEqual("Alias 'pci' not found", lines[4]) + self.assertIn('Dry run', lines[5]) + self.assertIn('Send a total of %d patches' % count, lines[7]) + line = 8 + for i, commit in enumerate(series.commits): + self.assertEqual(' %s' % args[i], lines[line + 0]) + line += 1 + while 'Cc:' in lines[line]: + line += 1 + self.assertEqual('To: u-boot@lists.denx.de', lines[line]) + self.assertEqual('Cc: %s' % stefan.encode('utf-8'), lines[line + 1]) + self.assertEqual('Version: 3', lines[line + 2]) + self.assertEqual('Prefix:\t RFC', lines[line + 3]) + self.assertEqual('Cover: 4 lines', lines[line + 4]) + line += 5 + self.assertEqual(' Cc: %s' % mel.encode('utf-8'), lines[line + 0]) + self.assertEqual(' Cc: %s' % rick, lines[line + 1]) + self.assertEqual(' Cc: %s' % fred, lines[line + 2]) + self.assertEqual(' Cc: %s' % ed.encode('utf-8'), lines[line + 3]) + expected = ('Git command: git send-email --annotate ' + '--in-reply-to="%s" --to "u-boot@lists.denx.de" ' + '--cc "%s" --cc-cmd "%s --cc-cmd %s" %s %s' + % (in_reply_to, stefan, sys.argv[0], cc_file, cover_fname, + ' '.join(args))).encode('utf-8') + line += 4 + self.assertEqual(expected, lines[line]) + + self.assertEqual(('%s %s, %s' % (args[0], rick, stefan)) + .encode('utf-8'), cc_lines[0]) + self.assertEqual(('%s %s, %s, %s, %s' % (args[1], fred, rick, stefan, + ed)).encode('utf-8'), cc_lines[1]) + + expected = ''' +This is a test of how the cover +leter +works + +some notes +about some things +from the first commit + +Changes in v4: +- Some changes + +Simon Glass (2): + pci: Correct cast for sandbox + fdt: Correct cast for sandbox in fdtdec_setup_memory_size() + + cmd/pci.c | 3 ++- + fs/fat/fat.c | 1 + + lib/efi_loader/efi_memory.c | 1 + + lib/fdtdec.c | 3 ++- + 4 files changed, 6 insertions(+), 2 deletions(-) + +--\x20 +2.7.4 + +''' + lines = open(cover_fname).read().splitlines() + #print '\n'.join(lines) + self.assertEqual( + 'Subject: [RFC PATCH v3 0/2] test: A test patch series', + lines[3]) + self.assertEqual(expected.splitlines(), lines[7:]) + + for i, fname in enumerate(args): + lines = open(fname).read().splitlines() + #print '\n'.join(lines) + subject = [line for line in lines if line.startswith('Subject')] + self.assertEqual('Subject: [RFC %d/%d]' % (i + 1, count), + subject[0][:18]) + if i == 0: + # Check that we got our commit notes + self.assertEqual('---', lines[17]) + self.assertEqual('Some notes about', lines[18]) + self.assertEqual('the first commit', lines[19]) diff --git a/tools/patman/gitutil.py b/tools/patman/gitutil.py index 0d23079a3a..08be9377ce 100644 --- a/tools/patman/gitutil.py +++ b/tools/patman/gitutil.py @@ -407,6 +407,8 @@ def EmailPatches(series, cover_fname, args, dry_run, raise_on_error, cc_fname, cc = [] cmd = ['git', 'send-email', '--annotate'] if in_reply_to: + if type(in_reply_to) != str: + in_reply_to = in_reply_to.encode('utf-8') cmd.append('--in-reply-to="%s"' % in_reply_to) if thread: cmd.append('--thread') @@ -417,10 +419,10 @@ def EmailPatches(series, cover_fname, args, dry_run, raise_on_error, cc_fname, if cover_fname: cmd.append(cover_fname) cmd += args - str = ' '.join(cmd) + cmdstr = ' '.join(cmd) if not dry_run: - os.system(str) - return str + os.system(cmdstr) + return cmdstr def LookupEmail(lookup_name, alias=None, raise_on_error=True, level=0): diff --git a/tools/patman/patchstream.py b/tools/patman/patchstream.py index cd4667f61c..1b9136aa5c 100644 --- a/tools/patman/patchstream.py +++ b/tools/patman/patchstream.py @@ -308,15 +308,6 @@ class PatchStream: # Well that means this is an ordinary line else: - pos = 1 - # Look for ugly ASCII characters - for ch in line: - # TODO: Would be nicer to report source filename and line - if ord(ch) > 0x80: - self.warn.append("Line %d/%d ('%s') has funny ascii char" % - (self.linenum, pos, line)) - pos += 1 - # Look for space before tab m = re_space_before_tab.match(line) if m: @@ -433,6 +424,19 @@ def GetMetaData(start, count): """ return GetMetaDataForList('HEAD~%d' % start, None, count) +def GetMetaDataForTest(text): + """Process metadata from a file containing a git log. Used for tests + + Args: + text: + """ + series = Series() + ps = PatchStream(series, is_log=True) + for line in text.splitlines(): + ps.ProcessLine(line) + ps.Finalize() + return series + def FixPatch(backup_dir, fname, series, commit): """Fix up a patch file, by adding/removing as required. @@ -486,7 +490,6 @@ def FixPatches(series, fnames): print count += 1 print('Cleaned %d patches' % count) - return series def InsertCoverLetter(fname, series, count): """Inserts a cover letter with the required info into patch 0 diff --git a/tools/patman/patman.py b/tools/patman/patman.py index fdbee67f55..4b3bc78745 100755 --- a/tools/patman/patman.py +++ b/tools/patman/patman.py @@ -82,11 +82,13 @@ if __name__ != "__main__": # Run our meagre tests elif options.test: import doctest + import func_test sys.argv = [sys.argv[0]] - suite = unittest.TestLoader().loadTestsFromTestCase(test.TestPatch) result = unittest.TestResult() - suite.run(result) + for module in (test.TestPatch, func_test.TestFunctional): + suite = unittest.TestLoader().loadTestsFromTestCase(module) + suite.run(result) for module in ['gitutil', 'settings']: suite = doctest.DocTestSuite(module) @@ -141,8 +143,8 @@ else: series) # Fix up the patch files to our liking, and insert the cover letter - series = patchstream.FixPatches(series, args) - if series and cover_fname and series.get('cover'): + patchstream.FixPatches(series, args) + if cover_fname and series.get('cover'): patchstream.InsertCoverLetter(cover_fname, series, options.count) # Do a few checks on the series diff --git a/tools/patman/series.py b/tools/patman/series.py index c1b86521aa..d3947a7c2a 100644 --- a/tools/patman/series.py +++ b/tools/patman/series.py @@ -212,7 +212,9 @@ class Series(dict): cover_fname: If non-None the name of the cover letter. raise_on_error: True to raise an error when an alias fails to match, False to just print a message. - add_maintainers: Call the get_maintainers to CC maintainers + add_maintainers: Either: + True/False to call the get_maintainers to CC maintainers + List of maintainers to include (for testing) Return: Filename of temp file created """ @@ -221,21 +223,27 @@ class Series(dict): fd = open(fname, 'w') all_ccs = [] for commit in self.commits: - list = [] + cc = [] if process_tags: - list += gitutil.BuildEmailList(commit.tags, + cc += gitutil.BuildEmailList(commit.tags, raise_on_error=raise_on_error) - list += gitutil.BuildEmailList(commit.cc_list, + cc += gitutil.BuildEmailList(commit.cc_list, raise_on_error=raise_on_error) - if add_maintainers: - list += get_maintainer.GetMaintainer(commit.patch) - all_ccs += list - print(commit.patch, ', '.join(set(list)), file=fd) - self._generated_cc[commit.patch] = list + if type(add_maintainers) == type(cc): + cc += add_maintainers + elif add_maintainers: + cc += get_maintainer.GetMaintainer(commit.patch) + cc = [m.encode('utf-8') if type(m) != str else m for m in cc] + all_ccs += cc + print(commit.patch, ', '.join(set(cc)), file=fd) + self._generated_cc[commit.patch] = cc if cover_fname: cover_cc = gitutil.BuildEmailList(self.get('cover_cc', '')) - cc_list = ', '.join([x.decode('utf-8') for x in set(cover_cc + all_ccs)]) + cover_cc = [m.encode('utf-8') if type(m) != str else m + for m in cover_cc] + cc_list = ', '.join([x.decode('utf-8') + for x in set(cover_cc + all_ccs)]) print(cover_fname, cc_list.encode('utf-8'), file=fd) fd.close() diff --git a/tools/patman/test.py b/tools/patman/test.py index 8c39f66e73..20dc9c1e0d 100644 --- a/tools/patman/test.py +++ b/tools/patman/test.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2011 The Chromium OS Authors. # @@ -31,6 +32,10 @@ Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support This adds functions to enable/disable clocks and reset to on-chip peripherals. +cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type + ‘long long unsigned int’, but argument 3 has type + ‘u64 {aka long unsigned int}’ [-Wformat=] + BUG=chromium-os:13875 TEST=build U-Boot for Seaboard, boot @@ -53,6 +58,10 @@ Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support This adds functions to enable/disable clocks and reset to on-chip peripherals. +cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type + ‘long long unsigned int’, but argument 3 has type + ‘u64 {aka long unsigned int}’ [-Wformat=] + Signed-off-by: Simon Glass <sjg@chromium.org> --- diff --git a/tools/patman/test/0000-cover-letter.patch b/tools/patman/test/0000-cover-letter.patch new file mode 100644 index 0000000000..29062015bc --- /dev/null +++ b/tools/patman/test/0000-cover-letter.patch @@ -0,0 +1,23 @@ +From 5ab48490f03051875ab13d288a4bf32b507d76fd Mon Sep 17 00:00:00 2001 +From: Simon Glass <sjg@chromium.org> +Date: Sat, 27 May 2017 20:52:11 -0600 +Subject: [RFC 0/2] *** SUBJECT HERE *** +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +*** BLURB HERE *** + +Simon Glass (2): + pci: Correct cast for sandbox + fdt: Correct cast for sandbox in fdtdec_setup_memory_size() + + cmd/pci.c | 3 ++- + fs/fat/fat.c | 1 + + lib/efi_loader/efi_memory.c | 1 + + lib/fdtdec.c | 3 ++- + 4 files changed, 6 insertions(+), 2 deletions(-) + +-- +2.7.4 + diff --git a/tools/patman/test/0001-pci-Correct-cast-for-sandbox.patch b/tools/patman/test/0001-pci-Correct-cast-for-sandbox.patch new file mode 100644 index 0000000000..7191176f75 --- /dev/null +++ b/tools/patman/test/0001-pci-Correct-cast-for-sandbox.patch @@ -0,0 +1,48 @@ +From b9da5f937bd5ea4931ea17459bf79b2905d9594d Mon Sep 17 00:00:00 2001 +From: Simon Glass <sjg@chromium.org> +Date: Sat, 15 Apr 2017 15:39:08 -0600 +Subject: [RFC 1/2] pci: Correct cast for sandbox +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This gives a warning with some native compilers: + +cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type + ‘long long unsigned int’, but argument 3 has type + ‘u64 {aka long unsigned int}’ [-Wformat=] + +Fix it with a cast. + +Signed-off-by: Simon Glass <sjg@chromium.org> +Series-notes: +some notes +about some things +from the first commit +END + +Commit-notes: +Some notes about +the first commit +END +--- + cmd/pci.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/cmd/pci.c b/cmd/pci.c +index 41b4fff..fe27b4f 100644 +--- a/cmd/pci.c ++++ b/cmd/pci.c +@@ -150,7 +150,8 @@ int pci_bar_show(struct udevice *dev) + if ((!is_64 && size_low) || (is_64 && size)) { + size = ~size + 1; + printf(" %d %#016llx %#016llx %d %s %s\n", +- bar_id, base, size, is_64 ? 64 : 32, ++ bar_id, (unsigned long long)base, ++ (unsigned long long)size, is_64 ? 64 : 32, + is_io ? "I/O" : "MEM", + prefetchable ? "Prefetchable" : ""); + } +-- +2.7.4 + diff --git a/tools/patman/test/0002-fdt-Correct-cast-for-sandbox-in-fdtdec_setup_memory_.patch b/tools/patman/test/0002-fdt-Correct-cast-for-sandbox-in-fdtdec_setup_memory_.patch new file mode 100644 index 0000000000..e3284973a0 --- /dev/null +++ b/tools/patman/test/0002-fdt-Correct-cast-for-sandbox-in-fdtdec_setup_memory_.patch @@ -0,0 +1,73 @@ +From 5ab48490f03051875ab13d288a4bf32b507d76fd Mon Sep 17 00:00:00 2001 +From: Simon Glass <sjg@chromium.org> +Date: Sat, 15 Apr 2017 15:39:08 -0600 +Subject: [RFC 2/2] fdt: Correct cast for sandbox in fdtdec_setup_memory_size() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +This gives a warning with some native compilers: + +lib/fdtdec.c:1203:8: warning: format ‘%llx’ expects argument of type + ‘long long unsigned int’, but argument 3 has type + ‘long unsigned int’ [-Wformat=] + +Fix it with a cast. + +Signed-off-by: Simon Glass <sjg@chromium.org> +Series-to: u-boot +Series-prefix: RFC +Series-cc: Stefan Brüns <stefan.bruens@rwth-aachen.de> +Cover-letter-cc: Lord Mëlchett <clergy@palace.gov> +Series-version: 3 +Patch-cc: fred +Series-changes: 4 +- Some changes + +Cover-letter: +test: A test patch series +This is a test of how the cover +leter +works +END +--- + fs/fat/fat.c | 1 + + lib/efi_loader/efi_memory.c | 1 + + lib/fdtdec.c | 3 ++- + 3 files changed, 4 insertions(+), 1 deletion(-) + +diff --git a/fs/fat/fat.c b/fs/fat/fat.c +index a71bad1..ba169dc 100644 +--- a/fs/fat/fat.c ++++ b/fs/fat/fat.c +@@ -1,3 +1,4 @@ ++ + /* + * fat.c + * +diff --git a/lib/efi_loader/efi_memory.c b/lib/efi_loader/efi_memory.c +index db2ae19..05f75d1 100644 +--- a/lib/efi_loader/efi_memory.c ++++ b/lib/efi_loader/efi_memory.c +@@ -1,3 +1,4 @@ ++ + /* + * EFI application memory management + * +diff --git a/lib/fdtdec.c b/lib/fdtdec.c +index c072e54..942244f 100644 +--- a/lib/fdtdec.c ++++ b/lib/fdtdec.c +@@ -1200,7 +1200,8 @@ int fdtdec_setup_memory_size(void) + } + + gd->ram_size = (phys_size_t)(res.end - res.start + 1); +- debug("%s: Initial DRAM size %llx\n", __func__, (u64)gd->ram_size); ++ debug("%s: Initial DRAM size %llx\n", __func__, ++ (unsigned long long)gd->ram_size); + + return 0; + } +-- +2.7.4 + diff --git a/tools/patman/test/test01.txt b/tools/patman/test/test01.txt new file mode 100644 index 0000000000..8ad9587aef --- /dev/null +++ b/tools/patman/test/test01.txt @@ -0,0 +1,56 @@ +commit b9da5f937bd5ea4931ea17459bf79b2905d9594d +Author: Simon Glass <sjg@chromium.org> +Date: Sat Apr 15 15:39:08 2017 -0600 + + pci: Correct cast for sandbox + + This gives a warning with some native compilers: + + cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type + ‘long long unsigned int’, but argument 3 has type + ‘u64 {aka long unsigned int}’ [-Wformat=] + + Fix it with a cast. + + Signed-off-by: Simon Glass <sjg@chromium.org> + Series-notes: + some notes + about some things + from the first commit + END + + Commit-notes: + Some notes about + the first commit + END + +commit 5ab48490f03051875ab13d288a4bf32b507d76fd +Author: Simon Glass <sjg@chromium.org> +Date: Sat Apr 15 15:39:08 2017 -0600 + + fdt: Correct cast for sandbox in fdtdec_setup_memory_size() + + This gives a warning with some native compilers: + + lib/fdtdec.c:1203:8: warning: format ‘%llx’ expects argument of type + ‘long long unsigned int’, but argument 3 has type + ‘long unsigned int’ [-Wformat=] + + Fix it with a cast. + + Signed-off-by: Simon Glass <sjg@chromium.org> + Series-to: u-boot + Series-prefix: RFC + Series-cc: Stefan Brüns <stefan.bruens@rwth-aachen.de> + Cover-letter-cc: Lord Mëlchett <clergy@palace.gov> + Series-version: 3 + Patch-cc: fred + Series-changes: 4 + - Some changes + + Cover-letter: + test: A test patch series + This is a test of how the cover + leter + works + END diff --git a/tools/rkcommon.c b/tools/rkcommon.c index 8283a740c1..1056ffa2be 100644 --- a/tools/rkcommon.c +++ b/tools/rkcommon.c @@ -2,6 +2,8 @@ * (C) Copyright 2015 Google, Inc * Written by Simon Glass <sjg@chromium.org> * + * (C) 2017 Theobroma Systems Design und Consulting GmbH + * * SPDX-License-Identifier: GPL-2.0+ * * Helper functions for Rockchip images @@ -74,7 +76,8 @@ static struct spl_info spl_infos[] = { { "rk3188", "RK31", 0x8000 - 0x800, true, false }, { "rk3288", "RK32", 0x8000, false, false }, { "rk3328", "RK32", 0x8000 - 0x1000, false, false }, - { "rk3399", "RK33", 0x20000, false, true }, + { "rk3399", "RK33", 0x30000 - 0x2000, false, true }, + { "rv1108", "RK11", 0x1800, false, false}, }; static unsigned char rc4_key[16] = { @@ -182,11 +185,14 @@ static void rkcommon_set_header0(void *buf, uint file_size, */ hdr->init_size = ROUND(hdr->init_size, 4); /* - * The images we create do not contain the stage following the SPL as - * part of the SPL image, so the init_boot_size (which might have been - * read by Rockchip's miniloder) should be the same as the init_size. + * init_boot_size needs to be set, as it is read by the BootROM + * to determine the size of the next-stage bootloader (e.g. U-Boot + * proper), when used with the back-to-bootrom functionality. + * + * see https://lists.denx.de/pipermail/u-boot/2017-May/293267.html + * for a more detailed explanation by Andy Yan */ - hdr->init_boot_size = hdr->init_size; + hdr->init_boot_size = hdr->init_size + RK_MAX_BOOT_SIZE / RK_BLK_SIZE; rc4_encode(buf, RK_BLK_SIZE, rc4_key); } @@ -201,7 +207,7 @@ int rkcommon_set_header(void *buf, uint file_size, rkcommon_set_header0(buf, file_size, params); - /* Set up the SPL name */ + /* Set up the SPL name (i.e. copy spl_hdr over) */ memcpy(&hdr->magic, rkcommon_get_spl_hdr(params), RK_SPL_HDR_SIZE); if (rkcommon_need_rc4_spl(params)) @@ -211,6 +217,116 @@ int rkcommon_set_header(void *buf, uint file_size, return 0; } +static inline unsigned rkcommon_offset_to_spi(unsigned offset) +{ + /* + * While SD/MMC images use a flat addressing, SPI images are padded + * to use the first 2K of every 4K sector only. + */ + return ((offset & ~0x7ff) << 1) + (offset & 0x7ff); +} + +static int rkcommon_parse_header(const void *buf, struct header0_info *header0, + struct spl_info **spl_info) +{ + unsigned hdr1_offset; + struct header1_info *hdr1_sdmmc, *hdr1_spi; + int i; + + if (spl_info) + *spl_info = NULL; + + /* + * The first header (hdr0) is always RC4 encoded, so try to decrypt + * with the well-known key. + */ + memcpy((void *)header0, buf, sizeof(struct header0_info)); + rc4_encode((void *)header0, sizeof(struct header0_info), rc4_key); + + if (header0->signature != RK_SIGNATURE) + return -EPROTO; + + /* We don't support RC4 encoded image payloads here, yet... */ + if (header0->disable_rc4 == 0) + return -ENOSYS; + + hdr1_offset = header0->init_offset * RK_BLK_SIZE; + hdr1_sdmmc = (struct header1_info *)(buf + hdr1_offset); + hdr1_spi = (struct header1_info *)(buf + + rkcommon_offset_to_spi(hdr1_offset)); + + for (i = 0; i < ARRAY_SIZE(spl_infos); i++) { + if (!memcmp(&hdr1_sdmmc->magic, spl_infos[i].spl_hdr, 4)) { + if (spl_info) + *spl_info = &spl_infos[i]; + return IH_TYPE_RKSD; + } else if (!memcmp(&hdr1_spi->magic, spl_infos[i].spl_hdr, 4)) { + if (spl_info) + *spl_info = &spl_infos[i]; + return IH_TYPE_RKSPI; + } + } + + return -1; +} + +int rkcommon_verify_header(unsigned char *buf, int size, + struct image_tool_params *params) +{ + struct header0_info header0; + struct spl_info *img_spl_info, *spl_info; + int ret; + + ret = rkcommon_parse_header(buf, &header0, &img_spl_info); + + /* If this is the (unimplemented) RC4 case, then rewrite the result */ + if (ret == -ENOSYS) + return 0; + + if (ret < 0) + return ret; + + /* + * If no 'imagename' is specified via the commandline (e.g. if this is + * 'dumpimage -l' w/o any further constraints), we accept any spl_info. + */ + if (params->imagename == NULL) + return 0; + + /* Match the 'imagename' against the 'spl_hdr' found */ + spl_info = rkcommon_get_spl_info(params->imagename); + if (spl_info && img_spl_info) + return strcmp(spl_info->spl_hdr, img_spl_info->spl_hdr); + + return -ENOENT; +} + +void rkcommon_print_header(const void *buf) +{ + struct header0_info header0; + struct spl_info *spl_info; + uint8_t image_type; + int ret; + + ret = rkcommon_parse_header(buf, &header0, &spl_info); + + /* If this is the (unimplemented) RC4 case, then fail silently */ + if (ret == -ENOSYS) + return; + + if (ret < 0) { + fprintf(stderr, "Error: image verification failed\n"); + return; + } + + image_type = ret; + + printf("Image Type: Rockchip %s (%s) boot image\n", + spl_info->spl_hdr, + (image_type == IH_TYPE_RKSD) ? "SD/MMC" : "SPI"); + printf("Data Size: %d bytes\n", header0.init_size * RK_BLK_SIZE); +} + void rkcommon_rc4_encode_spl(void *buf, unsigned int offset, unsigned int size) { unsigned int remaining = size; @@ -259,8 +375,9 @@ int rkcommon_vrec_header(struct image_tool_params *params, /* Allocate, clear and install the header */ tparams->hdr = malloc(tparams->header_size); + if (!tparams->hdr) + return -ENOMEM; memset(tparams->hdr, 0, tparams->header_size); - tparams->header_size = tparams->header_size; /* * If someone passed in 0 for the alignment, we'd better handle diff --git a/tools/rkcommon.h b/tools/rkcommon.h index a21321fe83..8790f1ceab 100644 --- a/tools/rkcommon.h +++ b/tools/rkcommon.h @@ -10,6 +10,7 @@ enum { RK_BLK_SIZE = 512, + RK_INIT_SIZE_ALIGN = 2048, RK_INIT_OFFSET = 4, RK_MAX_BOOT_SIZE = 512 << 10, RK_SPL_HDR_START = RK_INIT_OFFSET * RK_BLK_SIZE, @@ -56,6 +57,25 @@ int rkcommon_set_header(void *buf, uint file_size, struct image_tool_params *params); /** + * rkcommon_verify_header() - verify the header for a Rockchip boot image + * + * @buf: Pointer to the image file + * @file_size: Size of entire bootable image file (incl. all padding) + * @return 0 if OK + */ +int rkcommon_verify_header(unsigned char *buf, int size, + struct image_tool_params *params); + +/** + * rkcommon_print_header() - print the header for a Rockchip boot image + * + * This prints the header, spl_name and whether this is a SD/MMC or SPI image. + * + * @buf: Pointer to the image (can be a read-only file-mapping) + */ +void rkcommon_print_header(const void *buf); + +/** * rkcommon_need_rc4_spl() - check if rc4 encoded spl is required * * Some socs cannot disable the rc4-encryption of the spl binary. diff --git a/tools/rksd.c b/tools/rksd.c index 8627b6d31b..c56153d2ca 100644 --- a/tools/rksd.c +++ b/tools/rksd.c @@ -13,29 +13,17 @@ #include "mkimage.h" #include "rkcommon.h" -static int rksd_verify_header(unsigned char *buf, int size, - struct image_tool_params *params) -{ - return 0; -} - -static void rksd_print_header(const void *buf) -{ -} - static void rksd_set_header(void *buf, struct stat *sbuf, int ifd, - struct image_tool_params *params) + struct image_tool_params *params) { unsigned int size; int ret; - printf("params->file_size %d\n", params->file_size); - printf("params->orig_file_size %d\n", params->orig_file_size); - /* * We need to calculate this using 'RK_SPL_HDR_START' and not using * 'tparams->header_size', as the additional byte inserted when - * 'is_boot0' is true counts towards the payload. + * 'is_boot0' is true counts towards the payload (and not towards the + * header). */ size = params->file_size - RK_SPL_HDR_START; ret = rkcommon_set_header(buf, size, params); @@ -46,11 +34,6 @@ static void rksd_set_header(void *buf, struct stat *sbuf, int ifd, } } -static int rksd_extract_subimage(void *buf, struct image_tool_params *params) -{ - return 0; -} - static int rksd_check_image_type(uint8_t type) { if (type == IH_TYPE_RKSD) @@ -63,10 +46,10 @@ static int rksd_vrec_header(struct image_tool_params *params, struct image_type_params *tparams) { /* - * Pad to the RK_BLK_SIZE (512 bytes) to be consistent with init_size - * being encoded in RK_BLK_SIZE units in header0 (see rkcommon.c). + * Pad to a 2KB alignment, as required for init_size by the ROM + * (see https://lists.denx.de/pipermail/u-boot/2017-May/293268.html) */ - return rkcommon_vrec_header(params, tparams, RK_BLK_SIZE); + return rkcommon_vrec_header(params, tparams, RK_INIT_SIZE_ALIGN); } /* @@ -78,10 +61,10 @@ U_BOOT_IMAGE_TYPE( 0, NULL, rkcommon_check_params, - rksd_verify_header, - rksd_print_header, + rkcommon_verify_header, + rkcommon_print_header, rksd_set_header, - rksd_extract_subimage, + NULL, rksd_check_image_type, NULL, rksd_vrec_header diff --git a/tools/rkspi.c b/tools/rkspi.c index 87bd1a9e6e..4332ce1765 100644 --- a/tools/rkspi.c +++ b/tools/rkspi.c @@ -17,16 +17,6 @@ enum { RKSPI_SECT_LEN = RK_BLK_SIZE * 4, }; -static int rkspi_verify_header(unsigned char *buf, int size, - struct image_tool_params *params) -{ - return 0; -} - -static void rkspi_print_header(const void *buf) -{ -} - static void rkspi_set_header(void *buf, struct stat *sbuf, int ifd, struct image_tool_params *params) { @@ -58,11 +48,6 @@ static void rkspi_set_header(void *buf, struct stat *sbuf, int ifd, } } -static int rkspi_extract_subimage(void *buf, struct image_tool_params *params) -{ - return 0; -} - static int rkspi_check_image_type(uint8_t type) { if (type == IH_TYPE_RKSPI) @@ -78,7 +63,7 @@ static int rkspi_check_image_type(uint8_t type) static int rkspi_vrec_header(struct image_tool_params *params, struct image_type_params *tparams) { - int padding = rkcommon_vrec_header(params, tparams, 2048); + int padding = rkcommon_vrec_header(params, tparams, RK_INIT_SIZE_ALIGN); /* * The file size has not been adjusted at this point (our caller will * eventually add the header/padding to the file_size), so we need to @@ -112,10 +97,10 @@ U_BOOT_IMAGE_TYPE( 0, NULL, rkcommon_check_params, - rkspi_verify_header, - rkspi_print_header, + rkcommon_verify_header, + rkcommon_print_header, rkspi_set_header, - rkspi_extract_subimage, + NULL, rkspi_check_image_type, NULL, rkspi_vrec_header diff --git a/tools/tbot/README b/tools/tbot/README deleted file mode 100644 index 49b9e95f8f..0000000000 --- a/tools/tbot/README +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) 2016 DENX Software Engineering GmbH -# Heiko Schocher <hs@denx.de> -# -# SPDX-License-Identifier: GPL-2.0+ -# - -What is tbot ? -============== - -tbot is a tool for executing testcases on boards. -Source code found on [1] -Based on DUTS [2] -written in python - -Basic Ideas of tbot -=================== -(see also the figure: -https://github.com/hsdenx/tbot/blob/master/doc/tbot_structure.png ) - -- Virtual laboratory (VL) - VL is the basic environment that groups: - - [a number of] boards - target devices on which tbot executes testcases. - - one Lab PC - -- Test case (TC): - A piece of python code, which uses the tbot class from [1]. - Tbot provides functions for sending shell commands and parsing the - shell commands output. - Tbot waits endless for a shell commands end (detected through reading - the consoles prompt). - A TC can also call other TC-es. - - remark: - Tbot not really waits endless, for a shell commands end, instead - tbot starts a watchdog in the background, and if it triggers, tbot - ends the TC as failed. In the tbot beginning there was a lot of - timeouts / retry cases, but it turned out, that waiting endless - is robust and easy ... - -- Host PC (where tbot runs, currently only linux host tested) - must not a powerful machine (For example [3], I use a - raspberry pi for running tbot and buildbot) - -- Lab PC: - - Host PC connects through ssh to the Lab PC - -> so it is possible to test boards, which - are not at the same place as the Host PC. - (Lab PC and Host PC can be the same of course) - -> maybe we can setup a Testsystem, which does nightly - U-Boot/Linux builds and test from current mainline U-Boot - on boards wherever they are accessible. - - - necessary tasks a Lab PC must deliver: - - connect to boards console through a shell command. - - power on/off boards through a shell command - - detect the current power state of a board through - a shell command - - - optional tasks: - - tftp server (for example loading images) - - nfs server (used as rootfs for linux kernels) - - Internet access for example for downloading - U-Boot source with git. - - toolchains installed for compiling source code - - -> a linux machine is preffered. - - - currently only Lab PC with an installed linux supported/tested. - -- Boards(s): - the boards on which shell commands are executed. - -- Board state: - equals to the software, the board is currently running. - - Currently tbot supports 2 board states: - - "u-boot", if the board is running U-Boot - - "linux", if the board is running a linux kernel - - It should be easy to add other board states to tbot, see - https://github.com/hsdenx/tbot/tree/master/src/lab_api/state_[u-boot/linux].py - - A board state is detected through analysing the boards - shell prompt. In linux, tbot sets a special tbot prompt, - in U-Boot the prompt is static, and configurable in tbot through - a board config file. - - A TC can say in which board state it want to send shell commands. - Tbot tries to detect the current board state, if board is not in - the requested board state, tbot tries to switch into the correct - state. If this fails, the TC fails. - - It is possible to switch in a single TC between board states. - -- Events - tbot creates while executing testcases so called events. - After tbot ended with the testcase it can call event_backends, - which convert the events to different formats. more info: - - https://github.com/hsdenx/tbot/blob/master/doc/README.event - - demo for a event backend: - http://xeidos.ddns.net/tests/test_db_auslesen.php - -- tbot cmdline parameters: - -$ python2.7 src/common/tbot.py --help -Usage: tbot.py [options] - -Options: - -h, --help show this help message and exit - -c CFGFILE, --cfgfile=CFGFILE - the tbot common configfilename - -l LOGFILE, --logfile=LOGFILE - the tbot logfilename, if default, tbot creates a - defaultnamelogfile - -t TC, --testcase=TC the testcase which should be run - -v, --verbose be verbose, print all read/write to stdout - -w WORKDIR, --workdir=WORKDIR - set workdir, default os.getcwd() -$ - -tbot needs the following files for proper execution: - - - tbot board configuration file (option -c): - A board configuration file contains settings tbot needs to - connect to the Lab PC and board specific variable settings - for testcases. - - - name of the logfile tbot creates (option -l) - defaultname: 'log/' + now.strftime("%Y-%m-%d-%H-%M") + '.log' - - - tbots working directory (option -w) - - - the testcasename tbot executes (option -t) - -You are interested and want to use tbot? -If so, please read on the file: -tools/tbot/README.install - -If not read [3] ;-) - -Heiko Schocher <hs@denx.de> -v1 2016.01.22 - --------------- -[1] https://github.com/hsdenx/tbot -[2] http://www.denx.de/wiki/DUTS/DUTSDocs -[3] automated Testsetup with buildbot and tbot doing cyclic tests - (buildbot used for starting tbot TC and web presentation of the - results, all testing done through tbot): - http://xeidos.ddns.net/buildbot/tgrid - Host PC in Letkes/hungary - VL in munich/germany - - Fancy things are done here, for example: - - http://xeidos.ddns.net/buildbot/builders/smartweb_dfu/builds/43/steps/shell/logs/tbotlog - (I try to cleanup the logfile soon, so it is not so filled with crap ;-) - A first step see here: - http://xeidos.ddns.net/buildbot/builders/smartweb_dfu/builds/45/steps/shell/logs/tbotlog - (same TC now with the new loglevel = 'CON' ... not yet perfect) - Executed steps: - - clone u-boot.git - - set toolchain - - get a list of patchwork patches from my U-Boots ToDo list - - download all of them, and check them with checkpatch - and apply them to u-boot.git - - compile U-Boot for the smartweb board - - install the resulting images on the smartweb board - - boot U-boot - - test DFU - - more TC should be added here for testing U-Boot - - - automatic "git bisect" - https://github.com/hsdenx/tbot/blob/master/src/tc/tc_board_git_bisect.py - http://xeidos.ddns.net/buildbot/builders/tqm5200s/builds/3/steps/shell/logs/tbotlog - - If a current U-Boot image not works on the tqm5200 board - this TC can be started. It starts a "git bisect" session, - and compiles for each step U-Boot, install it on the tqm5200 - board, and tests if U-Boot works ! - - At the end, it detects the commit, which breaks the board - - This TC is not dependend on U-Boot nor on a special board. It - needs only 3 variables: - tb.board_git_bisect_get_source_tc: TC which gets the source tree, in which - "git bisect" should be executed - tb.board_git_bisect_call_tc: TC which gets called every "git bisect" step, - which executes commands for detecting if current source code is OK or not. - This could be a TC which compiles U-Boot, install it on the board and - executes TC on the new booted U-Boot image. ! Board maybe gets borken, - as not all U-Boot images work, so you must have a TC which install U-Boot - image for example through a debugger. - tb.board_git_bisect_good_commit: last nown good commit id diff --git a/tools/tbot/README-ToDo b/tools/tbot/README-ToDo deleted file mode 100644 index daf1af1323..0000000000 --- a/tools/tbot/README-ToDo +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2016 DENX Software Engineering GmbH -# Heiko Schocher <hs@denx.de> -# -# SPDX-License-Identifier: GPL-2.0+ -# - -ToDo list for tbot -================== - -please look also into the tbot ToDo list. -https://github.com/hsdenx/tbot/blob/master/ToDo - -- cleanup tbot code: - - remove all retry / timeout pieces of code - - clean up tbot function names, as I am not good in - giving function a understandable name ;-) - - as I am not a python programmer, cleanup whole tbot code - -- introduce a "layering" like yocto do, so U-Boot TC can integrated - into U-Boot source code. - - Proposal: - introduce subdirs in "src/tc" - - lab: all lab specific stuff - lab/common: common lab stuff (for example ssh handling) - lab/ssh_std: ssh_std specific stuff - - u-boot: all u-boot tests - u-boot/common: common u-boot tc - u-boot/duts: DUTS tc - u-boot-dxr2: all u-boot dxr2 board specific tc - - board: board tc - board/common: common board tc - board/dxr2: all tc for dxr2 board - - linux: all linux tc - linux/common: common linux tc - linux/dxr2 - - - move U-Boot special TC to U-Boot source - -> need a mechanism in tbot, how it gets automatically for example - U-Boot TC from U-Boot source... - -> add a consistency checker - -- simplify tbot log output (seperate a lot of output which is currently - in INFO logging level, to another logging level) - started (new loglevel "CON", whih prints read/write from console only), see: - https://github.com/hsdenx/tbot/commit/b4ab2567ad8c19ad53f785203159d3c8465a21c6 - - make the timestamp configurable - -- Open more than 2 filehandles ? - Do we need for more complex TC more than 2 filehandles? - -- Find a way to document all TC and document all variables they use in an - automated way. - -- write a lot of more TC - -- get U-Boot configuration settings from current U-Boot code and use - them in U-Boot TC-es diff --git a/tools/tbot/README.create_a_new_testcase b/tools/tbot/README.create_a_new_testcase deleted file mode 100644 index fbf8ae8329..0000000000 --- a/tools/tbot/README.create_a_new_testcase +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2016 DENX Software Engineering GmbH -# Heiko Schocher <hs@denx.de> -# -# SPDX-License-Identifier: GPL-2.0+ -# - -write a new testcase -===================== - -A TC is written in python, so you can use python as usual. For accessing -the boards console, use functions from the tbotlib, therefore - -First import the tbotlib with the line: - - from tbotlib import tbot - -If your TC uses variables, please add a line which adds them to -the log file (for debugging purposes): - - logging.info("args: %s ...", tb.varname, ...) - -Say tbot, for which board state your TC is valid with: - - tb.set_board_state("u-boot") - -Then you are ready ... and you can use the tbotlib funtions -for writting/reading to the boards console. - -Big fat warning: - -A TC must worry about to end only if a board has finished the shell -command! - -Not following this rule, will end in unpredictable behaviour. - -(hopefully) useful tbotlib functions -==================================== -- set the board state, you want to test - tb.set_board_state(state) - states are: "u-boot" or "linux" - If tbot could not set the board state, tbot ends with failure. - -- write a command to the boards console: - tb.eof_write_con(command): - write the command to the boards console. If this - fails, tbot ends with failure - -- write a command to boards console and wait for prompt: - tb.eof_write_cmd(fd, command): - fd: filedescriptor which is used, use tb.channel_con for boards console - command: command which is written to fd - - Wait endless for board prompt - -- write a list of commands to boards console: - tb.eof_write_cmd_list(fd, cmdlist): - fd: filedescriptor which is used, use tb.channel_con for boards console - cmdlist: python list of commandstrings which is written to fd - -- wait for boards prompt: - tb.eof_read_end_state_con(retry): - retry: deprecated, not used anymore, cleanup needed here... - tbot waits endless for the boards prompt - -- write a command, wait for prompt and check, if a string is read - tb.write_cmd_check(fd, cmd, string): - fd: filedescriptor which is used, use tb.channel_con for boards console - cmd: command, which is send to fd - string: string which should be read from fd - - return value: - True, if string is read and tbot got back boards prompt - False, else - - tb.eof_write_cmd_check(fd, cmd, string): - same as tb.write_cmd_check(fd, cmd, string) except, that tbot - ends immediately with Failure, if string is not read. - -- read until prompt and search strings: - tb.readline_and_search_strings(fd, strings): - fd: filedescriptor which is used, use tb.channel_con for boards console - strings: python list of strings, which can be read - If one of this strings is read, this function return the index, which - string is read. This function shoud be called in a while loop, - until this function returns 'prompt' - -- read a line from filedescriptor: - not recommended to use, as the TC must check, if tprompt is read for every - readen line. Also TC must ensure, that it ends only, if prompt is read. - tb.read_line(fd, retry) - fd: filedescriptor which is used, use tb.channel_con for boards console - retry: retry of trying to reead a line - - return values: - True, if a line is read. Readen line in tb.buf[fd] - False, if something read, but not a complete line - None, if nothing is read - - check if string contains prompt with: - tb.is_end_fd(fd, string) - fd: filedescriptor which is used, use tb.channel_con for boards console - string: buffer, in which a prompt gets searched. - -- calling other TC: - eof_call_tc(name): - call another TC from "src/tc" - if the called TC fails with failure, tbot ends with failure - - call_tc(name): - call another TC from "src/tc" - if the TC which call_tc calls fails, call_tc() returns False, else True - -There are more functions, but for writting TC this should be enough. But -its software, so new useful functions can always pop up. - -Heiko Schocher <hs@denx.de> -v1 2016.01.23 diff --git a/tools/tbot/README.install b/tools/tbot/README.install deleted file mode 100644 index a68e70524d..0000000000 --- a/tools/tbot/README.install +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright (c) 2016 DENX Software Engineering GmbH -# Heiko Schocher <hs@denx.de> -# -# SPDX-License-Identifier: GPL-2.0+ -# - -install tbot on your PC (linux only tested): -============================================ - -- get the source code: - -$ git clone https://github.com/hsdenx/tbot.git -[...] -$ - - cd into the tbot directory. - -- you need the for running tbot the python paramiko module, see: - http://www.paramiko.org/installing.html - - paramiko is used for handling ssh sessions, and open filedescriptors - on a ssh connection. Tbot open a ssh connection to a "lab PC" and - opens on that connection 2 filehandles, one for control functions - and one for the connection to the boards console. May it is worth - to think about to open more filehandles and use them in tbot, but - thats a point in the Todo list ... - - See [1] for more infos about tbot principles. - -- prepare a directory for storing the logfiles - and pass it with the commandline option "-l" - to tbot. Default is the directory "log" in the tbot - root (don;t forget to create it, if you want to use it) - -- If your VL is not yet in tbot source, integrate it - (This task has only to be done once for your VL): - - A VL has, as described in [2] "necessary tasks for a Lab PC" explained, - 3 tasks: - - a) power on/off the board - b) get power state of the board - c) connect to the boards console - - As tbot sends only shell commands (also to the Lab PC) - this tasks must be executable through shell commands on your - Lab PC: - - Task a) power on/off board: - default TC for this task is: - https://github.com/hsdenx/tbot/blob/master/src/tc/tc_lab_denx_power.py - - - now copy this file to for example - cp src/tc/tc_lab_denx_power.py src/tc/tc_lab_denx_power_XXX.py - (replace XXX to a proper value) - and adapt the "remote_power" command from the denx lab to your needs. - - As this TC powers on the board for all your boards in your VL, - you can differ between the boards through the tbot class - variable "tb.boardlabpowername" (which is in the default case the - same as "tb.boardname"), but you may need to name the power target - with an other name than boardname, so you can configure this case. - The power state "tb.power_state" which the TC has to set - is "on" for power on, or "off" for power off. - - If switching on the power is successful, call "tb.end_tc(True)" - else "tb.end_tc(False)" - - - set in your board config file: - self.tc_lab_denx_power_tc = 'tc_lab_denx_power_XXX.py' - - Task b) power on/off board: - default TC for this task is: - https://github.com/hsdenx/tbot/blob/master/src/tc/tc_lab_denx_get_power_state.py - - - now copy this file to for example - (replace XXX to a proper value) - cp src/tc/tc_lab_denx_get_power_state.py src/tc/tc_lab_denx_get_power_state_XXX.py - and adapt the commands to your needs. - - If the power of the board is on, call "tb.end_tc(True)" - else "tb.end_tc(False)" - - - set in your board config file: - self.tc_lab_denx_get_power_state_tc = 'tc_lab_denx_get_power_state_XXX.py' - - Task c) connect to the boards console: - default TC for this task is: - https://github.com/hsdenx/tbot/blob/master/src/tc/tc_lab_denx_connect_to_board.py - - - now copy this file to for example - (replace XXX to a proper value) - cp src/tc/tc_lab_denx_connect_to_board.py src/tc/tc_lab_denx_connect_to_board_XXX.py - and adapt the commands to your needs. - - If connect fails end this TC with "tb.end_tc(False)" - else call "tb.end_tc(True)" - - If you want to use kermit for connecting to the boards console, you - can use: - - https://github.com/hsdenx/tbot/blob/master/src/tc/tc_workfd_connect_with_kermit.py - - Example for such a board in the VL from denx: - self.tc_lab_denx_connect_to_board_tc = 'tc_workfd_connect_with_kermit.py' - https://github.com/hsdenx/tbot/blob/master/tbot_dxr2.cfg#L24 - - Hopefully this works for you too. - - - set in your board config file: - self.tc_lab_denx_connect_to_board_tc = 'tc_lab_denx_connect_to_board_XXX.py' - - remarks while writting this: - - Currently there is only the denx VL. Original idea was to include - other VL through a seperate class/file in - https://github.com/hsdenx/tbot/tree/master/src/lab_api - but it turned out, that if we say "ssh" is the standard way to connect - to a VL, we can integrate the VL specific tasks through testcases, see - above, so we should do: - - rename the "denx" API to a more general name. - This is a point on my ToDo list ... done, renamed to 'ssh_std' - - - the VL specific configuration may moved from the board config files - and should be collected in VL specific config files, which boards - config file simple include. - -- prepare password.py file: - This file contains all passwords tbot needs (for example for - linux login on the boards) - tbot searches this file in the tbot root directory. - It is a simple python file, for example: - - # passwords for the lab - if (board == 'lab'): - if (user == 'hs'): - password = 'passwordforuserhs' - if (user == 'root'): - password = 'passwordforrootuser' - # passwords for the boards - elif (board == 'mcx'): - if (user == 'root'): - password = 'passwordformcxrootfs' - else: - if (user == 'root'): - password = '' - -- prepare board config file - Each board which is found in the VL needs a tbot configuration file - pass the config file name with the option '-c' to tbot, tbot searches - in the root dir for them. - - board Example (dxr2 board): - https://github.com/hsdenx/tbot/blob/master/tbot_dxr2.cfg - - Necessary variables: - - line 3: boardname, here it is the "etamin" board - no default value, must be set. - line 4: boardlabname: name used for connecting to the board - may differ from tb.boardname, default tb.boardname - line 5: boardlabpowername: name used for power on/off - may differ from tb.boardname, default tb.boardname - line 6: tftpboardname: name used for tftp subdir (from where - U-Boot loads images for example). - may differ from tb.boardname, default tb.boardname - line 7: labprompt: linux prompt tbot sets - no defaultvalue, must be set (maybe we should introduce - "ttbott" as default ... - line 8: debug: If True, adds debug output on the tbot shell - line 9: debugstatus: enable status debug output on the shell - line 10: ip: Where tbot finds the Lab PC - line 11: user: As which user does tbot logs into the Lab PC - line 12: accept_all: passed to paramiko, accept all connections - line 13: keepalivetimout: passed to paramiko, timeout for sending - keepalive message. - line 14: channel_timeout: passed to paramiko - line 15: loglevel: tbots loglevel for adding entries into the logfile. - line 17: wdt_timeout: timeout in seconds for tbots watchdog. - Watchdog gets triggered if prompt get read. - line 24: tc_lab_denx_connect_to_board_tc: Which TC is used for - connecting to the boards console the TC, here: - https://github.com/hsdenx/tbot/blob/master/src/tc/tc_workfd_connect_with_kermit.py - line 27: uboot_prompt: boards U-Boot prompt - line 28: linux_prompt: boards linux prompt - - Now comes a list of variables TC needs, this vary from which TC - you start on the board. - -Thats it ... you now can call tbot and hopefully, it works ;-) -Find an example log [3] for calling simple U-Boot TC for setting -an U-Boot Environmentvariable. - -If you have problems in setting tbot up, please contact me -(and may give me ssh access to your Lab PC ;-) - -If you have running your first TC [3], you may want to write now your own -TC (and hopefully share them), so continue with: -u-boot:tools/tbot/README.create_a_new_testcase - -Heiko Schocher <hs@denx.de> -v2 2016.04.26 - --------------- - -[1] tbot Dokumentation: - [2] u-boot:/tools/tbot/README - https://github.com/hsdenx/tbot/blob/master/README.md - tbot-devel@googlegroups.com - -[3] Example for a first U-Boot TC which should always work: - (with commandline option "-v" for verbose output): -hs@localhost:tbot [event-devel] $ python2.7 src/common/tbot.py -c tbot_dxr2.cfg -t tc_ub_setenv.py -v -l log/tbot.log -**** option cfg: tbot_dxr2.cfg log: log/tbot.log tc: tc_ub_setenv.py v 1 -('CUR WORK PATH: ', '/home/hs/data/Entwicklung/tbot') -('CFGFILE ', 'tbot_dxr2.cfg') -('LOGFILE ', '/home/hs/data/Entwicklung/tbot/log/tbot.log') -tb_ctrl: Last login: Mon Apr 25 14:52:42 2016 from 87.97.29.27 -************************************************************* -BDI2000 Assignment: (last updated: 2015-11-20 12:30 MET) -bdi1 => techem bdi2 => cetec_mx25 bdi3 => lpc3250 -bdi4 => - bdi5 => --Rev.B!-- bdi6 => tqm5200s -bdi7 => [stefano] bdi8 => smartweb bdi9 => sigmatek-nand -bdi10 => pcm052 bdi11 => socrates bdi12 => aristainetos -bdi13 => imx53 bdi14 => ib8315 bdi15 => cairo -bdi16 => g2c1 bdi17 => lwe090 bdi18 => symphony -bdi19 => dxr2 bdi20 => ima3-mx6 bdi21 => sama5d3 -bdi98 => - bdi99 => - bdi0 => - -Please power off unused systems when you leave! Thanks, wd. -************************************************************* -tb_ctrl: pollux:~ hs $ -tb_ctrl: export PS1=ttbott -ttbott -tb_ctrl: stty cols 200 -ttbott -tb_ctrl: export TERM=vt200 -ttbott -tb_ctrl: echo $COLUMNS -200 -ttbott -tb_con: Last login: Tue Apr 26 06:28:59 2016 from 87.97.29.27 -************************************************************* -BDI2000 Assignment: (last updated: 2015-11-20 12:30 MET) -bdi1 => techem bdi2 => cetec_mx25 bdi3 => lpc3250 -bdi4 => - bdi5 => --Rev.B!-- bdi6 => tqm5200s -bdi7 => [stefano] bdi8 => smartweb bdi9 => sigmatek-nand -bdi10 => pcm052 bdi11 => socrates bdi12 => aristainetos -bdi13 => imx53 bdi14 => ib8315 bdi15 => cairo -bdi16 => g2c1 bdi17 => lwe090 bdi18 => symphony -bdi19 => dxr2 bdi20 => ima3-mx6 bdi21 => sama5d3 -bdi98 => - bdi99 => - bdi0 => - -Please power off unused systems when you leave! Thanks, wd. -************************************************************* -tb_con: pollux:~ hs $ -tb_con: export PS1=ttbot -tb_con: t -ttbott -tb_con: stty cols 200 -ttbott -tb_con: export TERM=vt200 -ttbott -tb_con: echo $COLUMNS -200 -ttbott -tb_con: ssh hs@lena -tb_con: hs@lena's password: -tb_con: -tb_con: Last login: Mon Apr 25 07:03:29 2016 from 192.168.1.1 -tb_con: [hs@lena ~]$ -tb_con: export PS1=ttbott -ttbott -tb_con: stty cols 200 -ttbott -tb_con: export TERM=vt200 -ttbott -tb_con: echo $COLUMNS -200 -ttbott -tb_con: kermit -C-Kermit 8.0.211, 10 Apr 2004, for Linux - Copyright (C) 1985, 2004, - Trustees of Columbia University in the City of New York. -Type ? or HELP for help. -(/home/hs/) C-Kermit> -tb_con: set line /dev/ttyUSB0 -(/home/hs/) C-Kermit> -tb_con: set speed 115200 -/dev/ttyUSB0, 115200 bps -(/home/hs/) C-Kermit> -tb_con: set flow-control none -(/home/hs/) C-Kermit> -tb_con: set carrier-watch off -(/home/hs/) C-Kermit> -tb_con: connect -Connecting to /dev/ttyUSB0, speed 115200 - Escape character: Ctrl-\ (ASCII 28, FS): enabled -Type the escape character followed by C to get back, -or followed by ? to see other options. ----------------------------------------------------- -tb_con: <INTERRUPT> -U-Boot# -tb_con: U-Boot# -U-Boot# -tb_con: setenv Heiko Schocher -U-Boot# -tb_con: printenv Heiko -Heiko=Schocher -U-Boot# -[('tc_workfd_ssh.py', 1, 0), ('tc_workfd_connect_with_kermit.py', 1, 0), ('tc_ub_setenv.py', 1, 0)] -End of TBOT: success -hs@localhost:tbot [event-devel] $ |