summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Tretter <m.tretter@pengutronix.de>2018-12-03 16:37:53 +0100
committerMichal Simek <michal.simek@xilinx.com>2019-01-24 10:03:42 +0100
commit16c78cba92f0cb24d56eaa87356beaca4a2d7f56 (patch)
tree5ac4e266203f224a2bcdc28c33eefb43ba25f77f
parentaff66f22d6eeb27c6329c0a3c1ebc52914c8affa (diff)
tools: zynqmpimage: round up partition size
The FSBL copies "Total Partition Word Length" * 4 bytes from the boot.bin, which implies that the partition size is 4 byte aligned. When writing the partition, mkimage calculates "Total Partition Word Length" by dividing the size by 4. This implicitly cuts unaligned bytes at the end of the added binary. Instead of rounding down, the size must be round up to 4 bytes and the binary padded accordingly. Signed-off-by: Michael Tretter <m.tretter@pengutronix.de> Reviewed-by: Alexander Graf <agraf@suse.de> Signed-off-by: Michal Simek <michal.simek@xilinx.com>
-rw-r--r--tools/zynqmpbif.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/tools/zynqmpbif.c b/tools/zynqmpbif.c
index 6c8f66055d..a33c15e1f0 100644
--- a/tools/zynqmpbif.c
+++ b/tools/zynqmpbif.c
@@ -319,16 +319,25 @@ static int bif_add_pmufw(struct bif_entry *bf, const char *data, size_t len)
static int bif_add_part(struct bif_entry *bf, const char *data, size_t len)
{
size_t parthdr_offset = 0;
+ size_t len_padded = ROUND(len, 4);
+
struct partition_header parthdr = {
- .len_enc = cpu_to_le32(len / 4),
- .len_unenc = cpu_to_le32(len / 4),
- .len = cpu_to_le32(len / 4),
+ .len_enc = cpu_to_le32(len_padded / 4),
+ .len_unenc = cpu_to_le32(len_padded / 4),
+ .len = cpu_to_le32(len_padded / 4),
.entry_point = cpu_to_le64(bf->entry),
.load_address = cpu_to_le64(bf->load),
};
int r;
uint32_t csum;
+ if (len < len_padded) {
+ char *newdata = malloc(len_padded);
+ memcpy(newdata, data, len);
+ memset(newdata + len, 0, len_padded - len);
+ data = newdata;
+ }
+
if (bf->flags & (1ULL << BIF_FLAG_PMUFW_IMAGE))
return bif_add_pmufw(bf, data, len);