diff options
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/cfi_flash.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/Kconfig | 8 | ||||
-rw-r--r-- | drivers/mtd/nand/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/nand/fsl_ifc_spl.c | 24 | ||||
-rw-r--r-- | drivers/mtd/nand/mxs_nand.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_base.c | 72 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_ids.c | 4 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_spl_simple.c | 62 | ||||
-rw-r--r-- | drivers/mtd/nand/omap_gpmc.c | 3 | ||||
-rw-r--r-- | drivers/mtd/nand/sunxi_nand.c | 1845 | ||||
-rw-r--r-- | drivers/mtd/nand/tegra_nand.c | 2 | ||||
-rw-r--r-- | drivers/mtd/onenand/onenand_base.c | 31 | ||||
-rw-r--r-- | drivers/mtd/onenand/onenand_spl.c | 48 | ||||
-rw-r--r-- | drivers/mtd/onenand/onenand_uboot.c | 30 | ||||
-rw-r--r-- | drivers/mtd/spi/Kconfig | 12 | ||||
-rw-r--r-- | drivers/mtd/spi/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/spi/sunxi_spi_spl.c | 283 | ||||
-rw-r--r-- | drivers/mtd/ubispl/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/ubispl/ubi-wrapper.h | 106 | ||||
-rw-r--r-- | drivers/mtd/ubispl/ubispl.c | 926 | ||||
-rw-r--r-- | drivers/mtd/ubispl/ubispl.h | 136 |
21 files changed, 3565 insertions, 34 deletions
diff --git a/drivers/mtd/cfi_flash.c b/drivers/mtd/cfi_flash.c index 8ccaff0e63..33c4a9342f 100644 --- a/drivers/mtd/cfi_flash.c +++ b/drivers/mtd/cfi_flash.c @@ -608,7 +608,7 @@ static int flash_full_status_check (flash_info_t * info, flash_sect_t sector, case CFI_CMDSET_INTEL_EXTENDED: case CFI_CMDSET_INTEL_STANDARD: if ((retcode == ERR_OK) - && !flash_isequal (info, sector, 0, FLASH_STATUS_DONE)) { + && !flash_isset(info, sector, 0, FLASH_STATUS_DONE)) { retcode = ERR_INVAL; printf ("Flash %s error at address %lx\n", prompt, info->start[sector]); diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 8c46a2ff8e..5ce7d6d06c 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -64,12 +64,14 @@ config NAND_PXA3XX PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). config NAND_SUNXI - bool "Support for NAND on Allwinner SoCs in SPL" + bool "Support for NAND on Allwinner SoCs" depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I select SYS_NAND_SELF_INIT ---help--- - Enable support for NAND. This option allows SPL to read from - sunxi NAND using DMA transfers. + Enable support for NAND. This option enables the standard and + SPL drivers. + The SPL driver only supports reading from the NAND using DMA + transfers. config NAND_ARASAN bool "Configure Arasan Nand" diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 837d397bda..1df9273cdd 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -66,6 +66,7 @@ obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o obj-$(CONFIG_NAND_PLAT) += nand_plat.o +obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o else # minimal SPL drivers diff --git a/drivers/mtd/nand/fsl_ifc_spl.c b/drivers/mtd/nand/fsl_ifc_spl.c index cbeb74a5bb..4e49a4e154 100644 --- a/drivers/mtd/nand/fsl_ifc_spl.c +++ b/drivers/mtd/nand/fsl_ifc_spl.c @@ -11,6 +11,9 @@ #include <asm/io.h> #include <fsl_ifc.h> #include <linux/mtd/nand.h> +#ifdef CONFIG_CHAIN_OF_TRUST +#include <fsl_validate.h> +#endif static inline int is_blank(uchar *addr, int page_size) { @@ -268,6 +271,27 @@ void nand_boot(void) */ flush_cache(CONFIG_SYS_NAND_U_BOOT_DST, CONFIG_SYS_NAND_U_BOOT_SIZE); #endif + +#ifdef CONFIG_CHAIN_OF_TRUST + /* + * U-Boot header is appended at end of U-boot image, so + * calculate U-boot header address using U-boot header size. + */ +#define CONFIG_U_BOOT_HDR_ADDR \ + ((CONFIG_SYS_NAND_U_BOOT_START + \ + CONFIG_SYS_NAND_U_BOOT_SIZE) - \ + CONFIG_U_BOOT_HDR_SIZE) + spl_validate_uboot(CONFIG_U_BOOT_HDR_ADDR, + CONFIG_SYS_NAND_U_BOOT_START); + /* + * In case of failure in validation, spl_validate_uboot would + * not return back in case of Production environment with ITS=1. + * Thus U-Boot will not start. + * In Development environment (ITS=0 and SB_EN=1), the function + * may return back in case of non-fatal failures. + */ +#endif + uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START; uboot(); } diff --git a/drivers/mtd/nand/mxs_nand.c b/drivers/mtd/nand/mxs_nand.c index c90a3a7bd2..94fc5c18a0 100644 --- a/drivers/mtd/nand/mxs_nand.c +++ b/drivers/mtd/nand/mxs_nand.c @@ -976,7 +976,7 @@ static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs) * counted, so we know the physical geometry. This enables us to make some * important configuration decisions. * - * The return value of this function propogates directly back to this driver's + * The return value of this function propagates directly back to this driver's * call to nand_scan(). Anything other than zero will cause this driver to * tear everything down and declare failure. */ diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 689716753a..d1287bc3be 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -29,6 +29,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <common.h> +#if CONFIG_IS_ENABLED(OF_CONTROL) +#include <fdtdec.h> +#endif #include <malloc.h> #include <watchdog.h> #include <linux/err.h> @@ -2411,7 +2414,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, int cached = writelen > bytes && page != blockmask; uint8_t *wbuf = buf; int use_bufpoi; - int part_pagewr = (column || writelen < (mtd->writesize - 1)); + int part_pagewr = (column || writelen < mtd->writesize); if (part_pagewr) use_bufpoi = 1; @@ -3763,6 +3766,66 @@ ident_done: return type; } +#if CONFIG_IS_ENABLED(OF_CONTROL) +DECLARE_GLOBAL_DATA_PTR; + +static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip, int node) +{ + int ret, ecc_mode = -1, ecc_strength, ecc_step; + const void *blob = gd->fdt_blob; + const char *str; + + ret = fdtdec_get_int(blob, node, "nand-bus-width", -1); + if (ret == 16) + chip->options |= NAND_BUSWIDTH_16; + + if (fdtdec_get_bool(blob, node, "nand-on-flash-bbt")) + chip->bbt_options |= NAND_BBT_USE_FLASH; + + str = fdt_getprop(blob, node, "nand-ecc-mode", NULL); + if (str) { + if (!strcmp(str, "none")) + ecc_mode = NAND_ECC_NONE; + else if (!strcmp(str, "soft")) + ecc_mode = NAND_ECC_SOFT; + else if (!strcmp(str, "hw")) + ecc_mode = NAND_ECC_HW; + else if (!strcmp(str, "hw_syndrome")) + ecc_mode = NAND_ECC_HW_SYNDROME; + else if (!strcmp(str, "hw_oob_first")) + ecc_mode = NAND_ECC_HW_OOB_FIRST; + else if (!strcmp(str, "soft_bch")) + ecc_mode = NAND_ECC_SOFT_BCH; + } + + + ecc_strength = fdtdec_get_int(blob, node, "nand-ecc-strength", -1); + ecc_step = fdtdec_get_int(blob, node, "nand-ecc-step-size", -1); + + if ((ecc_step >= 0 && !(ecc_strength >= 0)) || + (!(ecc_step >= 0) && ecc_strength >= 0)) { + pr_err("must set both strength and step size in DT\n"); + return -EINVAL; + } + + if (ecc_mode >= 0) + chip->ecc.mode = ecc_mode; + + if (ecc_strength >= 0) + chip->ecc.strength = ecc_strength; + + if (ecc_step > 0) + chip->ecc.size = ecc_step; + + return 0; +} +#else +static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip, int node) +{ + return 0; +} +#endif /* CONFIG_IS_ENABLED(OF_CONTROL) */ + /** * nand_scan_ident - [NAND Interface] Scan for the NAND device * @mtd: MTD device structure @@ -3779,6 +3842,13 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, int i, nand_maf_id, nand_dev_id; struct nand_chip *chip = mtd_to_nand(mtd); struct nand_flash_dev *type; + int ret; + + if (chip->flash_node) { + ret = nand_dt_init(mtd, chip, chip->flash_node); + if (ret) + return ret; + } /* Set the default functions */ nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16); diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 561d2cd63b..ce0a14e28a 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c @@ -62,6 +62,10 @@ struct nand_flash_dev nand_flash_ids[] = { { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} }, SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640, NAND_ECC_INFO(40, SZ_1K), 4 }, + {"H27QCG8T2E5R‐BCF 64G 3.3V 8-bit", + { .id = {0xad, 0xde, 0x14, 0xa7, 0x42, 0x4a} }, + SZ_16K, SZ_8K, SZ_4M, NAND_NEED_SCRAMBLING, 6, 1664, + NAND_ECC_INFO(56, SZ_1K), 1 }, LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), diff --git a/drivers/mtd/nand/nand_spl_simple.c b/drivers/mtd/nand/nand_spl_simple.c index 60a7607073..55f48d3a14 100644 --- a/drivers/mtd/nand/nand_spl_simple.c +++ b/drivers/mtd/nand/nand_spl_simple.c @@ -209,6 +209,68 @@ static int nand_read_page(int block, int page, void *dst) } #endif +#ifdef CONFIG_SPL_UBI +/* + * Temporary storage for non NAND page aligned and non NAND page sized + * reads. Note: This does not support runtime detected FLASH yet, but + * that should be reasonably easy to fix by making the buffer large + * enough :) + */ +static u8 scratch_buf[CONFIG_SYS_NAND_PAGE_SIZE]; + +/** + * nand_spl_read_block - Read data from physical eraseblock into a buffer + * @block: Number of the physical eraseblock + * @offset: Data offset from the start of @peb + * @len: Data size to read + * @dst: Address of the destination buffer + * + * This could be further optimized if we'd have a subpage read + * function in the simple code. On NAND which allows subpage reads + * this would spare quite some time to readout e.g. the VID header of + * UBI. + * + * Notes: + * @offset + @len are not allowed to be larger than a physical + * erase block. No sanity check done for simplicity reasons. + * + * To support runtime detected flash this needs to be extended by + * information about the actual flash geometry, but thats beyond the + * scope of this effort and for most applications where fast boot is + * required it is not an issue anyway. + */ +int nand_spl_read_block(int block, int offset, int len, void *dst) +{ + int page, read; + + /* Calculate the page number */ + page = offset / CONFIG_SYS_NAND_PAGE_SIZE; + + /* Offset to the start of a flash page */ + offset = offset % CONFIG_SYS_NAND_PAGE_SIZE; + + while (len) { + /* + * Non page aligned reads go to the scratch buffer. + * Page aligned reads go directly to the destination. + */ + if (offset || len < CONFIG_SYS_NAND_PAGE_SIZE) { + nand_read_page(block, page, scratch_buf); + read = min(len, CONFIG_SYS_NAND_PAGE_SIZE - offset); + memcpy(dst, scratch_buf + offset, read); + offset = 0; + } else { + nand_read_page(block, page, dst); + read = CONFIG_SYS_NAND_PAGE_SIZE; + } + page++; + len -= read; + dst += read; + } + return 0; +} +#endif + int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst) { unsigned int block, lastblock; diff --git a/drivers/mtd/nand/omap_gpmc.c b/drivers/mtd/nand/omap_gpmc.c index 67f293dcd0..6e201d68e5 100644 --- a/drivers/mtd/nand/omap_gpmc.c +++ b/drivers/mtd/nand/omap_gpmc.c @@ -264,7 +264,8 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat, { struct nand_chip *chip = mtd_to_nand(mtd); struct omap_nand_info *info = nand_get_controller_data(chip); - uint32_t *ptr, val = 0; + const uint32_t *ptr; + uint32_t val = 0; int8_t i = 0, j; switch (info->ecc_scheme) { diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c new file mode 100644 index 0000000000..c4e2cd7f55 --- /dev/null +++ b/drivers/mtd/nand/sunxi_nand.c @@ -0,0 +1,1845 @@ +/* + * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com> + * Copyright (C) 2015 Roy Spliet <r.spliet@ultimaker.com> + * + * Derived from: + * https://github.com/yuq/sunxi-nfc-mtd + * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com> + * + * https://github.com/hno/Allwinner-Info + * Copyright (C) 2013 Henrik Nordström <Henrik Nordström> + * + * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com> + * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <common.h> +#include <fdtdec.h> +#include <memalign.h> +#include <nand.h> + +#include <linux/kernel.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> +#include <linux/io.h> + +#include <asm/gpio.h> +#include <asm/arch/clock.h> + +DECLARE_GLOBAL_DATA_PTR; + +#define NFC_REG_CTL 0x0000 +#define NFC_REG_ST 0x0004 +#define NFC_REG_INT 0x0008 +#define NFC_REG_TIMING_CTL 0x000C +#define NFC_REG_TIMING_CFG 0x0010 +#define NFC_REG_ADDR_LOW 0x0014 +#define NFC_REG_ADDR_HIGH 0x0018 +#define NFC_REG_SECTOR_NUM 0x001C +#define NFC_REG_CNT 0x0020 +#define NFC_REG_CMD 0x0024 +#define NFC_REG_RCMD_SET 0x0028 +#define NFC_REG_WCMD_SET 0x002C +#define NFC_REG_IO_DATA 0x0030 +#define NFC_REG_ECC_CTL 0x0034 +#define NFC_REG_ECC_ST 0x0038 +#define NFC_REG_DEBUG 0x003C +#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3) +#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) +#define NFC_REG_SPARE_AREA 0x00A0 +#define NFC_REG_PAT_ID 0x00A4 +#define NFC_RAM0_BASE 0x0400 +#define NFC_RAM1_BASE 0x0800 + +/* define bit use in NFC_CTL */ +#define NFC_EN BIT(0) +#define NFC_RESET BIT(1) +#define NFC_BUS_WIDTH_MSK BIT(2) +#define NFC_BUS_WIDTH_8 (0 << 2) +#define NFC_BUS_WIDTH_16 (1 << 2) +#define NFC_RB_SEL_MSK BIT(3) +#define NFC_RB_SEL(x) ((x) << 3) +#define NFC_CE_SEL_MSK (0x7 << 24) +#define NFC_CE_SEL(x) ((x) << 24) +#define NFC_CE_CTL BIT(6) +#define NFC_PAGE_SHIFT_MSK (0xf << 8) +#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8) +#define NFC_SAM BIT(12) +#define NFC_RAM_METHOD BIT(14) +#define NFC_DEBUG_CTL BIT(31) + +/* define bit use in NFC_ST */ +#define NFC_RB_B2R BIT(0) +#define NFC_CMD_INT_FLAG BIT(1) +#define NFC_DMA_INT_FLAG BIT(2) +#define NFC_CMD_FIFO_STATUS BIT(3) +#define NFC_STA BIT(4) +#define NFC_NATCH_INT_FLAG BIT(5) +#define NFC_RB_STATE(x) BIT(x + 8) + +/* define bit use in NFC_INT */ +#define NFC_B2R_INT_ENABLE BIT(0) +#define NFC_CMD_INT_ENABLE BIT(1) +#define NFC_DMA_INT_ENABLE BIT(2) +#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \ + NFC_CMD_INT_ENABLE | \ + NFC_DMA_INT_ENABLE) + +/* define bit use in NFC_TIMING_CTL */ +#define NFC_TIMING_CTL_EDO BIT(8) + +/* define NFC_TIMING_CFG register layout */ +#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \ + (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \ + (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \ + (((tCAD) & 0x7) << 8)) + +/* define bit use in NFC_CMD */ +#define NFC_CMD_LOW_BYTE_MSK 0xff +#define NFC_CMD_HIGH_BYTE_MSK (0xff << 8) +#define NFC_CMD(x) (x) +#define NFC_ADR_NUM_MSK (0x7 << 16) +#define NFC_ADR_NUM(x) (((x) - 1) << 16) +#define NFC_SEND_ADR BIT(19) +#define NFC_ACCESS_DIR BIT(20) +#define NFC_DATA_TRANS BIT(21) +#define NFC_SEND_CMD1 BIT(22) +#define NFC_WAIT_FLAG BIT(23) +#define NFC_SEND_CMD2 BIT(24) +#define NFC_SEQ BIT(25) +#define NFC_DATA_SWAP_METHOD BIT(26) +#define NFC_ROW_AUTO_INC BIT(27) +#define NFC_SEND_CMD3 BIT(28) +#define NFC_SEND_CMD4 BIT(29) +#define NFC_CMD_TYPE_MSK (0x3 << 30) +#define NFC_NORMAL_OP (0 << 30) +#define NFC_ECC_OP (1 << 30) +#define NFC_PAGE_OP (2 << 30) + +/* define bit use in NFC_RCMD_SET */ +#define NFC_READ_CMD_MSK 0xff +#define NFC_RND_READ_CMD0_MSK (0xff << 8) +#define NFC_RND_READ_CMD1_MSK (0xff << 16) + +/* define bit use in NFC_WCMD_SET */ +#define NFC_PROGRAM_CMD_MSK 0xff +#define NFC_RND_WRITE_CMD_MSK (0xff << 8) +#define NFC_READ_CMD0_MSK (0xff << 16) +#define NFC_READ_CMD1_MSK (0xff << 24) + +/* define bit use in NFC_ECC_CTL */ +#define NFC_ECC_EN BIT(0) +#define NFC_ECC_PIPELINE BIT(3) +#define NFC_ECC_EXCEPTION BIT(4) +#define NFC_ECC_BLOCK_SIZE_MSK BIT(5) +#define NFC_ECC_BLOCK_512 (1 << 5) +#define NFC_RANDOM_EN BIT(9) +#define NFC_RANDOM_DIRECTION BIT(10) +#define NFC_ECC_MODE_MSK (0xf << 12) +#define NFC_ECC_MODE(x) ((x) << 12) +#define NFC_RANDOM_SEED_MSK (0x7fff << 16) +#define NFC_RANDOM_SEED(x) ((x) << 16) + +/* define bit use in NFC_ECC_ST */ +#define NFC_ECC_ERR(x) BIT(x) +#define NFC_ECC_PAT_FOUND(x) BIT(x + 16) +#define NFC_ECC_ERR_CNT(b, x) (((x) >> ((b) * 8)) & 0xff) + +#define NFC_DEFAULT_TIMEOUT_MS 1000 + +#define NFC_SRAM_SIZE 1024 + +#define NFC_MAX_CS 7 + +/* + * Ready/Busy detection type: describes the Ready/Busy detection modes + * + * @RB_NONE: no external detection available, rely on STATUS command + * and software timeouts + * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy + * pin of the NAND flash chip must be connected to one of the + * native NAND R/B pins (those which can be muxed to the NAND + * Controller) + * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy + * pin of the NAND flash chip must be connected to a GPIO capable + * pin. + */ +enum sunxi_nand_rb_type { + RB_NONE, + RB_NATIVE, + RB_GPIO, +}; + +/* + * Ready/Busy structure: stores information related to Ready/Busy detection + * + * @type: the Ready/Busy detection mode + * @info: information related to the R/B detection mode. Either a gpio + * id or a native R/B id (those supported by the NAND controller). + */ +struct sunxi_nand_rb { + enum sunxi_nand_rb_type type; + union { + struct gpio_desc gpio; + int nativeid; + } info; +}; + +/* + * Chip Select structure: stores information related to NAND Chip Select + * + * @cs: the NAND CS id used to communicate with a NAND Chip + * @rb: the Ready/Busy description + */ +struct sunxi_nand_chip_sel { + u8 cs; + struct sunxi_nand_rb rb; +}; + +/* + * sunxi HW ECC infos: stores information related to HW ECC support + * + * @mode: the sunxi ECC mode field deduced from ECC requirements + * @layout: the OOB layout depending on the ECC requirements and the + * selected ECC mode + */ +struct sunxi_nand_hw_ecc { + int mode; + struct nand_ecclayout layout; +}; + +/* + * NAND chip structure: stores NAND chip device related information + * + * @node: used to store NAND chips into a list + * @nand: base NAND chip structure + * @mtd: base MTD structure + * @clk_rate: clk_rate required for this NAND chip + * @timing_cfg TIMING_CFG register value for this NAND chip + * @selected: current active CS + * @nsels: number of CS lines required by the NAND chip + * @sels: array of CS lines descriptions + */ +struct sunxi_nand_chip { + struct list_head node; + struct nand_chip nand; + unsigned long clk_rate; + u32 timing_cfg; + u32 timing_ctl; + int selected; + int addr_cycles; + u32 addr[2]; + int cmd_cycles; + u8 cmd[2]; + int nsels; + struct sunxi_nand_chip_sel sels[0]; +}; + +static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand) +{ + return container_of(nand, struct sunxi_nand_chip, nand); +} + +/* + * NAND Controller structure: stores sunxi NAND controller information + * + * @controller: base controller structure + * @dev: parent device (used to print error messages) + * @regs: NAND controller registers + * @ahb_clk: NAND Controller AHB clock + * @mod_clk: NAND Controller mod clock + * @assigned_cs: bitmask describing already assigned CS lines + * @clk_rate: NAND controller current clock rate + * @chips: a list containing all the NAND chips attached to + * this NAND controller + * @complete: a completion object used to wait for NAND + * controller events + */ +struct sunxi_nfc { + struct nand_hw_control controller; + struct device *dev; + void __iomem *regs; + struct clk *ahb_clk; + struct clk *mod_clk; + unsigned long assigned_cs; + unsigned long clk_rate; + struct list_head chips; +}; + +static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl) +{ + return container_of(ctrl, struct sunxi_nfc, controller); +} + +static void sunxi_nfc_set_clk_rate(unsigned long hz) +{ + struct sunxi_ccm_reg *const ccm = + (struct sunxi_ccm_reg *)SUNXI_CCM_BASE; + int div_m, div_n; + + div_m = (clock_get_pll6() + hz - 1) / hz; + for (div_n = 0; div_n < 3 && div_m > 16; div_n++) { + if (div_m % 2) + div_m++; + div_m >>= 1; + } + if (div_m > 16) + div_m = 16; + + /* config mod clock */ + writel(CCM_NAND_CTRL_ENABLE | CCM_NAND_CTRL_PLL6 | + CCM_NAND_CTRL_N(div_n) | CCM_NAND_CTRL_M(div_m), + &ccm->nand0_clk_cfg); + + /* gate on nand clock */ + setbits_le32(&ccm->ahb_gate0, (1 << AHB_GATE_OFFSET_NAND0)); +#ifdef CONFIG_MACH_SUN9I + setbits_le32(&ccm->ahb_gate1, (1 << AHB_GATE_OFFSET_DMA)); +#else + setbits_le32(&ccm->ahb_gate0, (1 << AHB_GATE_OFFSET_DMA)); +#endif +} + +static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags, + unsigned int timeout_ms) +{ + unsigned int timeout_ticks; + u32 time_start, status; + int ret = -ETIMEDOUT; + + if (!timeout_ms) + timeout_ms = NFC_DEFAULT_TIMEOUT_MS; + + timeout_ticks = (timeout_ms * CONFIG_SYS_HZ) / 1000; + + time_start = get_timer(0); + + do { + status = readl(nfc->regs + NFC_REG_ST); + if ((status & flags) == flags) { + ret = 0; + break; + } + + udelay(1); + } while (get_timer(time_start) < timeout_ticks); + + writel(status & flags, nfc->regs + NFC_REG_ST); + + return ret; +} + +static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc) +{ + unsigned long timeout = (CONFIG_SYS_HZ * + NFC_DEFAULT_TIMEOUT_MS) / 1000; + u32 time_start; + + time_start = get_timer(0); + do { + if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS)) + return 0; + } while (get_timer(time_start) < timeout); + + dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n"); + return -ETIMEDOUT; +} + +static int sunxi_nfc_rst(struct sunxi_nfc *nfc) +{ + unsigned long timeout = (CONFIG_SYS_HZ * + NFC_DEFAULT_TIMEOUT_MS) / 1000; + u32 time_start; + + writel(0, nfc->regs + NFC_REG_ECC_CTL); + writel(NFC_RESET, nfc->regs + NFC_REG_CTL); + + time_start = get_timer(0); + do { + if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET)) + return 0; + } while (get_timer(time_start) < timeout); + + dev_err(nfc->dev, "wait for NAND controller reset timedout\n"); + return -ETIMEDOUT; +} + +static int sunxi_nfc_dev_ready(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); + struct sunxi_nand_rb *rb; + unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20); + int ret; + + if (sunxi_nand->selected < 0) + return 0; + + rb = &sunxi_nand->sels[sunxi_nand->selected].rb; + + switch (rb->type) { + case RB_NATIVE: + ret = !!(readl(nfc->regs + NFC_REG_ST) & + NFC_RB_STATE(rb->info.nativeid)); + if (ret) + break; + + sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo); + ret = !!(readl(nfc->regs + NFC_REG_ST) & + NFC_RB_STATE(rb->info.nativeid)); + break; + case RB_GPIO: + ret = dm_gpio_get_value(&rb->info.gpio); + break; + case RB_NONE: + default: + ret = 0; + dev_err(nfc->dev, "cannot check R/B NAND status!\n"); + break; + } + + return ret; +} + +static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); + struct sunxi_nand_chip_sel *sel; + u32 ctl; + + if (chip > 0 && chip >= sunxi_nand->nsels) + return; + + if (chip == sunxi_nand->selected) + return; + + ctl = readl(nfc->regs + NFC_REG_CTL) & + ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN); + + if (chip >= 0) { + sel = &sunxi_nand->sels[chip]; + + ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | + NFC_PAGE_SHIFT(nand->page_shift - 10); + if (sel->rb.type == RB_NONE) { + nand->dev_ready = NULL; + } else { + nand->dev_ready = sunxi_nfc_dev_ready; + if (sel->rb.type == RB_NATIVE) + ctl |= NFC_RB_SEL(sel->rb.info.nativeid); + } + + writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA); + + if (nfc->clk_rate != sunxi_nand->clk_rate) { + sunxi_nfc_set_clk_rate(sunxi_nand->clk_rate); + nfc->clk_rate = sunxi_nand->clk_rate; + } + } + + writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL); + writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG); + writel(ctl, nfc->regs + NFC_REG_CTL); + + sunxi_nand->selected = chip; +} + +static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); + int ret; + int cnt; + int offs = 0; + u32 tmp; + + while (len > offs) { + cnt = min(len - offs, NFC_SRAM_SIZE); + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + break; + + writel(cnt, nfc->regs + NFC_REG_CNT); + tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD; + writel(tmp, nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + if (ret) + break; + + if (buf) + memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE, + cnt); + offs += cnt; + } +} + +static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, + int len) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); + int ret; + int cnt; + int offs = 0; + u32 tmp; + + while (len > offs) { + cnt = min(len - offs, NFC_SRAM_SIZE); + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + break; + + writel(cnt, nfc->regs + NFC_REG_CNT); + memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt); + tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | + NFC_ACCESS_DIR; + writel(tmp, nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + if (ret) + break; + + offs += cnt; + } +} + +static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd) +{ + uint8_t ret; + + sunxi_nfc_read_buf(mtd, &ret, 1); + + return ret; +} + +static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, + unsigned int ctrl) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); + int ret; + u32 tmp; + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + return; + + if (ctrl & NAND_CTRL_CHANGE) { + tmp = readl(nfc->regs + NFC_REG_CTL); + if (ctrl & NAND_NCE) + tmp |= NFC_CE_CTL; + else + tmp &= ~NFC_CE_CTL; + writel(tmp, nfc->regs + NFC_REG_CTL); + } + + if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) && + !(ctrl & (NAND_CLE | NAND_ALE))) { + u32 cmd = 0; + + if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles) + return; + + if (sunxi_nand->cmd_cycles--) + cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0]; + + if (sunxi_nand->cmd_cycles--) { + cmd |= NFC_SEND_CMD2; + writel(sunxi_nand->cmd[1], + nfc->regs + NFC_REG_RCMD_SET); + } + + sunxi_nand->cmd_cycles = 0; + + if (sunxi_nand->addr_cycles) { + cmd |= NFC_SEND_ADR | + NFC_ADR_NUM(sunxi_nand->addr_cycles); + writel(sunxi_nand->addr[0], + nfc->regs + NFC_REG_ADDR_LOW); + } + + if (sunxi_nand->addr_cycles > 4) + writel(sunxi_nand->addr[1], + nfc->regs + NFC_REG_ADDR_HIGH); + + writel(cmd, nfc->regs + NFC_REG_CMD); + sunxi_nand->addr[0] = 0; + sunxi_nand->addr[1] = 0; + sunxi_nand->addr_cycles = 0; + sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + } + + if (ctrl & NAND_CLE) { + sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat; + } else if (ctrl & NAND_ALE) { + sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |= + dat << ((sunxi_nand->addr_cycles % 4) * 8); + sunxi_nand->addr_cycles++; + } +} + +/* These seed values have been extracted from Allwinner's BSP */ +static const u16 sunxi_nfc_randomizer_page_seeds[] = { + 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72, + 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436, + 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d, + 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130, + 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56, + 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55, + 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb, + 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17, + 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62, + 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064, + 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126, + 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e, + 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3, + 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b, + 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d, + 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db, +}; + +/* + * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds + * have been generated using + * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what + * the randomizer engine does internally before de/scrambling OOB data. + * + * Those tables are statically defined to avoid calculating randomizer state + * at runtime. + */ +static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = { + 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64, + 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409, + 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617, + 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d, + 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91, + 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d, + 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab, + 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8, + 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8, + 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b, + 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5, + 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a, + 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891, + 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36, + 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd, + 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0, +}; + +static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = { + 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6, + 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982, + 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9, + 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07, + 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e, + 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2, + 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c, + 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f, + 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc, + 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e, + 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8, + 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68, + 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d, + 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179, + 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601, + 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd, +}; + +static u16 sunxi_nfc_randomizer_step(u16 state, int count) +{ + state &= 0x7fff; + + /* + * This loop is just a simple implementation of a Fibonacci LFSR using + * the x16 + x15 + 1 polynomial. + */ + while (count--) + state = ((state >> 1) | + (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff; + + return state; +} + +static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc) +{ + const u16 *seeds = sunxi_nfc_randomizer_page_seeds; + int mod = mtd->erasesize / mtd->writesize; + + if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds)) + mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds); + + if (ecc) { + if (mtd->ecc_step_size == 512) + seeds = sunxi_nfc_randomizer_ecc512_seeds; + else + seeds = sunxi_nfc_randomizer_ecc1024_seeds; + } + + return seeds[page % mod]; +} + +static void sunxi_nfc_randomizer_config(struct mtd_info *mtd, + int page, bool ecc) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); + u16 state; + + if (!(nand->options & NAND_NEED_SCRAMBLING)) + return; + + ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); + state = sunxi_nfc_randomizer_state(mtd, page, ecc); + ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK; + writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL); +} + +static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + + if (!(nand->options & NAND_NEED_SCRAMBLING)) + return; + + writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN, + nfc->regs + NFC_REG_ECC_CTL); +} + +static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + + if (!(nand->options & NAND_NEED_SCRAMBLING)) + return; + + writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN, + nfc->regs + NFC_REG_ECC_CTL); +} + +static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm) +{ + u16 state = sunxi_nfc_randomizer_state(mtd, page, true); + + bbm[0] ^= state; + bbm[1] ^= sunxi_nfc_randomizer_step(state, 8); +} + +static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd, + const uint8_t *buf, int len, + bool ecc, int page) +{ + sunxi_nfc_randomizer_config(mtd, page, ecc); + sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_write_buf(mtd, buf, len); + sunxi_nfc_randomizer_disable(mtd); +} + +static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf, + int len, bool ecc, int page) +{ + sunxi_nfc_randomizer_config(mtd, page, ecc); + sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_read_buf(mtd, buf, len); + sunxi_nfc_randomizer_disable(mtd); +} + +static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct sunxi_nand_hw_ecc *data = nand->ecc.priv; + u32 ecc_ctl; + + ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); + ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE | + NFC_ECC_BLOCK_SIZE_MSK); + ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION; + + if (nand->ecc.size == 512) + ecc_ctl |= NFC_ECC_BLOCK_512; + + writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); +} + +static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + + writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN, + nfc->regs + NFC_REG_ECC_CTL); +} + +static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf) +{ + buf[0] = user_data; + buf[1] = user_data >> 8; + buf[2] = user_data >> 16; + buf[3] = user_data >> 24; +} + +static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, + u8 *data, int data_off, + u8 *oob, int oob_off, + int *cur_off, + unsigned int *max_bitflips, + bool bbm, int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct nand_ecc_ctrl *ecc = &nand->ecc; + int raw_mode = 0; + u32 status; + int ret; + + if (*cur_off != data_off) + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); + + sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page); + + if (data_off + ecc->size != oob_off) + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + return ret; + + sunxi_nfc_randomizer_enable(mtd); + writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, + nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + sunxi_nfc_randomizer_disable(mtd); + if (ret) + return ret; + + *cur_off = oob_off + ecc->bytes + 4; + + status = readl(nfc->regs + NFC_REG_ECC_ST); + if (status & NFC_ECC_PAT_FOUND(0)) { + u8 pattern = 0xff; + + if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) + pattern = 0x0; + + memset(data, pattern, ecc->size); + memset(oob, pattern, ecc->bytes + 4); + + return 1; + } + + ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0))); + + memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size); + + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); + sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page); + + if (status & NFC_ECC_ERR(0)) { + /* + * Re-read the data with the randomizer disabled to identify + * bitflips in erased pages. + */ + if (nand->options & NAND_NEED_SCRAMBLING) { + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1); + nand->read_buf(mtd, data, ecc->size); + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1); + nand->read_buf(mtd, oob, ecc->bytes + 4); + } + + ret = nand_check_erased_ecc_chunk(data, ecc->size, + oob, ecc->bytes + 4, + NULL, 0, ecc->strength); + if (ret >= 0) + raw_mode = 1; + } else { + /* + * The engine protects 4 bytes of OOB data per chunk. + * Retrieve the corrected OOB bytes. + */ + sunxi_nfc_user_data_to_buf(readl(nfc->regs + + NFC_REG_USER_DATA(0)), + oob); + + /* De-randomize the Bad Block Marker. */ + if (bbm && nand->options & NAND_NEED_SCRAMBLING) + sunxi_nfc_randomize_bbm(mtd, page, oob); + } + + if (ret < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += ret; + *max_bitflips = max_t(unsigned int, *max_bitflips, ret); + } + + return raw_mode; +} + +static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, + u8 *oob, int *cur_off, + bool randomize, int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &nand->ecc; + int offset = ((ecc->bytes + 4) * ecc->steps); + int len = mtd->oobsize - offset; + + if (len <= 0) + return; + + if (*cur_off != offset) + nand->cmdfunc(mtd, NAND_CMD_RNDOUT, + offset + mtd->writesize, -1); + + if (!randomize) + sunxi_nfc_read_buf(mtd, oob + offset, len); + else + sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len, + false, page); + + *cur_off = mtd->oobsize + mtd->writesize; +} + +static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) +{ + return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); +} + +static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, + const u8 *data, int data_off, + const u8 *oob, int oob_off, + int *cur_off, bool bbm, + int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct nand_ecc_ctrl *ecc = &nand->ecc; + int ret; + + if (data_off != *cur_off) + nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1); + + sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page); + + /* Fill OOB data in */ + if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) { + u8 user_data[4]; + + memcpy(user_data, oob, 4); + sunxi_nfc_randomize_bbm(mtd, page, user_data); + writel(sunxi_nfc_buf_to_user_data(user_data), + nfc->regs + NFC_REG_USER_DATA(0)); + } else { + writel(sunxi_nfc_buf_to_user_data(oob), + nfc->regs + NFC_REG_USER_DATA(0)); + } + + if (data_off + ecc->size != oob_off) + nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1); + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + return ret; + + sunxi_nfc_randomizer_enable(mtd); + writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | + NFC_ACCESS_DIR | NFC_ECC_OP, + nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0); + sunxi_nfc_randomizer_disable(mtd); + if (ret) + return ret; + + *cur_off = oob_off + ecc->bytes + 4; + + return 0; +} + +static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, + u8 *oob, int *cur_off, + int page) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &nand->ecc; + int offset = ((ecc->bytes + 4) * ecc->steps); + int len = mtd->oobsize - offset; + + if (len <= 0) + return; + + if (*cur_off != offset) + nand->cmdfunc(mtd, NAND_CMD_RNDIN, + offset + mtd->writesize, -1); + + sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page); + + *cur_off = mtd->oobsize + mtd->writesize; +} + +static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, + struct nand_chip *chip, uint8_t *buf, + int oob_required, int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + unsigned int max_bitflips = 0; + int ret, i, cur_off = 0; + bool raw_mode = false; + + sunxi_nfc_hw_ecc_enable(mtd); + + for (i = 0; i < ecc->steps; i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + u8 *data = buf + data_off; + u8 *oob = chip->oob_poi + oob_off; + + ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, + oob_off + mtd->writesize, + &cur_off, &max_bitflips, + !i, page); + if (ret < 0) + return ret; + else if (ret) + raw_mode = true; + } + + if (oob_required) + sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off, + !raw_mode, page); + + sunxi_nfc_hw_ecc_disable(mtd); + + return max_bitflips; +} + +static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, + struct nand_chip *chip, + uint32_t data_offs, uint32_t readlen, + uint8_t *bufpoi, int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret, i, cur_off = 0; + unsigned int max_bitflips = 0; + + sunxi_nfc_hw_ecc_enable(mtd); + + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + for (i = data_offs / ecc->size; + i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + u8 *data = bufpoi + data_off; + u8 *oob = chip->oob_poi + oob_off; + + ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, + oob, oob_off + mtd->writesize, + &cur_off, &max_bitflips, !i, page); + if (ret < 0) + return ret; + } + + sunxi_nfc_hw_ecc_disable(mtd); + + return max_bitflips; +} + +static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, + struct nand_chip *chip, + const uint8_t *buf, int oob_required, + int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret, i, cur_off = 0; + + sunxi_nfc_hw_ecc_enable(mtd); + + for (i = 0; i < ecc->steps; i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + const u8 *data = buf + data_off; + const u8 *oob = chip->oob_poi + oob_off; + + ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, + oob_off + mtd->writesize, + &cur_off, !i, page); + if (ret) + return ret; + } + + if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) + sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + &cur_off, page); + + sunxi_nfc_hw_ecc_disable(mtd); + + return 0; +} + +static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd, + struct nand_chip *chip, + u32 data_offs, u32 data_len, + const u8 *buf, int oob_required, + int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret, i, cur_off = 0; + + sunxi_nfc_hw_ecc_enable(mtd); + + for (i = data_offs / ecc->size; + i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { + int data_off = i * ecc->size; + int oob_off = i * (ecc->bytes + 4); + const u8 *data = buf + data_off; + const u8 *oob = chip->oob_poi + oob_off; + + ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, + oob_off + mtd->writesize, + &cur_off, !i, page); + if (ret) + return ret; + } + + sunxi_nfc_hw_ecc_disable(mtd); + + return 0; +} + +static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, + struct nand_chip *chip, + uint8_t *buf, int oob_required, + int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + unsigned int max_bitflips = 0; + int ret, i, cur_off = 0; + bool raw_mode = false; + + sunxi_nfc_hw_ecc_enable(mtd); + + for (i = 0; i < ecc->steps; i++) { + int data_off = i * (ecc->size + ecc->bytes + 4); + int oob_off = data_off + ecc->size; + u8 *data = buf + (i * ecc->size); + u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4)); + + ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, + oob_off, &cur_off, + &max_bitflips, !i, page); + if (ret < 0) + return ret; + else if (ret) + raw_mode = true; + } + + if (oob_required) + sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off, + !raw_mode, page); + + sunxi_nfc_hw_ecc_disable(mtd); + + return max_bitflips; +} + +static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, + struct nand_chip *chip, + const uint8_t *buf, + int oob_required, int page) +{ + struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret, i, cur_off = 0; + + sunxi_nfc_hw_ecc_enable(mtd); + + for (i = 0; i < ecc->steps; i++) { + int data_off = i * (ecc->size + ecc->bytes + 4); + int oob_off = data_off + ecc->size; + const u8 *data = buf + (i * ecc->size); + const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4)); + + ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, + oob, oob_off, &cur_off, + false, page); + if (ret) + return ret; + } + + if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) + sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + &cur_off, page); + + sunxi_nfc_hw_ecc_disable(mtd); + + return 0; +} + +static const s32 tWB_lut[] = {6, 12, 16, 20}; +static const s32 tRHW_lut[] = {4, 8, 12, 20}; + +static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, + u32 clk_period) +{ + u32 clk_cycles = DIV_ROUND_UP(duration, clk_period); + int i; + + for (i = 0; i < lut_size; i++) { + if (clk_cycles <= lut[i]) + return i; + } + + /* Doesn't fit */ + return -EINVAL; +} + +#define sunxi_nand_lookup_timing(l, p, c) \ + _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) + +static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip, + const struct nand_sdr_timings *timings) +{ + u32 min_clk_period = 0; + s32 tWB, tADL, tWHR, tRHW, tCAD; + + /* T1 <=> tCLS */ + if (timings->tCLS_min > min_clk_period) + min_clk_period = timings->tCLS_min; + + /* T2 <=> tCLH */ + if (timings->tCLH_min > min_clk_period) + min_clk_period = timings->tCLH_min; + + /* T3 <=> tCS */ + if (timings->tCS_min > min_clk_period) + min_clk_period = timings->tCS_min; + + /* T4 <=> tCH */ + if (timings->tCH_min > min_clk_period) + min_clk_period = timings->tCH_min; + + /* T5 <=> tWP */ + if (timings->tWP_min > min_clk_period) + min_clk_period = timings->tWP_min; + + /* T6 <=> tWH */ + if (timings->tWH_min > min_clk_period) + min_clk_period = timings->tWH_min; + + /* T7 <=> tALS */ + if (timings->tALS_min > min_clk_period) + min_clk_period = timings->tALS_min; + + /* T8 <=> tDS */ + if (timings->tDS_min > min_clk_period) + min_clk_period = timings->tDS_min; + + /* T9 <=> tDH */ + if (timings->tDH_min > min_clk_period) + min_clk_period = timings->tDH_min; + + /* T10 <=> tRR */ + if (timings->tRR_min > (min_clk_period * 3)) + min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3); + + /* T11 <=> tALH */ + if (timings->tALH_min > min_clk_period) + min_clk_period = timings->tALH_min; + + /* T12 <=> tRP */ + if (timings->tRP_min > min_clk_period) + min_clk_period = timings->tRP_min; + + /* T13 <=> tREH */ + if (timings->tREH_min > min_clk_period) + min_clk_period = timings->tREH_min; + + /* T14 <=> tRC */ + if (timings->tRC_min > (min_clk_period * 2)) + min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2); + + /* T15 <=> tWC */ + if (timings->tWC_min > (min_clk_period * 2)) + min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2); + + /* T16 - T19 + tCAD */ + tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max, + min_clk_period); + if (tWB < 0) { + dev_err(nfc->dev, "unsupported tWB\n"); + return tWB; + } + + tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3; + if (tADL > 3) { + dev_err(nfc->dev, "unsupported tADL\n"); + return -EINVAL; + } + + tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3; + if (tWHR > 3) { + dev_err(nfc->dev, "unsupported tWHR\n"); + return -EINVAL; + } + + tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min, + min_clk_period); + if (tRHW < 0) { + dev_err(nfc->dev, "unsupported tRHW\n"); + return tRHW; + } + + /* + * TODO: according to ONFI specs this value only applies for DDR NAND, + * but Allwinner seems to set this to 0x7. Mimic them for now. + */ + tCAD = 0x7; + + /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */ + chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); + + /* + * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data + * output cycle timings shall be used if the host drives tRC less than + * 30 ns. + */ + chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0; + + /* Convert min_clk_period from picoseconds to nanoseconds */ + min_clk_period = DIV_ROUND_UP(min_clk_period, 1000); + + /* + * Convert min_clk_period into a clk frequency, then get the + * appropriate rate for the NAND controller IP given this formula + * (specified in the datasheet): + * nand clk_rate = min_clk_rate + */ + chip->clk_rate = 1000000000L / min_clk_period; + + return 0; +} + +static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(&chip->nand); + const struct nand_sdr_timings *timings; + int ret; + int mode; + + mode = onfi_get_async_timing_mode(&chip->nand); + if (mode == ONFI_TIMING_MODE_UNKNOWN) { + mode = chip->nand.onfi_timing_mode_default; + } else { + uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {}; + int i; + + mode = fls(mode) - 1; + if (mode < 0) + mode = 0; + + feature[0] = mode; + for (i = 0; i < chip->nsels; i++) { + chip->nand.select_chip(mtd, i); + ret = chip->nand.onfi_set_features(mtd, + &chip->nand, + ONFI_FEATURE_ADDR_TIMING_MODE, + feature); + chip->nand.select_chip(mtd, -1); + if (ret) + return ret; + } + } + + timings = onfi_async_timing_mode_to_sdr_timings(mode); + if (IS_ERR(timings)) + return PTR_ERR(timings); + + return sunxi_nand_chip_set_timings(chip, timings); +} + +static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 }; + struct sunxi_nand_hw_ecc *data; + struct nand_ecclayout *layout; + int nsectors; + int ret; + int i; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (ecc->size != 512 && ecc->size != 1024) + return -EINVAL; + + /* Prefer 1k ECC chunk over 512 ones */ + if (ecc->size == 512 && mtd->writesize > 512) { + ecc->size = 1024; + ecc->strength *= 2; + } + + /* Add ECC info retrieval from DT */ + for (i = 0; i < ARRAY_SIZE(strengths); i++) { + if (ecc->strength <= strengths[i]) + break; + } + + if (i >= ARRAY_SIZE(strengths)) { + dev_err(nfc->dev, "unsupported strength\n"); + ret = -ENOTSUPP; + goto err; + } + + data->mode = i; + + /* HW ECC always request ECC bytes for 1024 bytes blocks */ + ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8); + + /* HW ECC always work with even numbers of ECC bytes */ + ecc->bytes = ALIGN(ecc->bytes, 2); + + layout = &data->layout; + nsectors = mtd->writesize / ecc->size; + + if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) { + ret = -EINVAL; + goto err; + } + + layout->eccbytes = (ecc->bytes * nsectors); + + ecc->layout = layout; + ecc->priv = data; + + return 0; + +err: + kfree(data); + + return ret; +} + +#ifndef __UBOOT__ +static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc) +{ + kfree(ecc->priv); +} +#endif /* __UBOOT__ */ + +static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + struct nand_ecclayout *layout; + int nsectors; + int i, j; + int ret; + + ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc); + if (ret) + return ret; + + ecc->read_page = sunxi_nfc_hw_ecc_read_page; + ecc->write_page = sunxi_nfc_hw_ecc_write_page; + ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; + ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage; + layout = ecc->layout; + nsectors = mtd->writesize / ecc->size; + + for (i = 0; i < nsectors; i++) { + if (i) { + layout->oobfree[i].offset = + layout->oobfree[i - 1].offset + + layout->oobfree[i - 1].length + + ecc->bytes; + layout->oobfree[i].length = 4; + } else { + /* + * The first 2 bytes are used for BB markers, hence we + * only have 2 bytes available in the first user data + * section. + */ + layout->oobfree[i].length = 2; + layout->oobfree[i].offset = 2; + } + + for (j = 0; j < ecc->bytes; j++) + layout->eccpos[(ecc->bytes * i) + j] = + layout->oobfree[i].offset + + layout->oobfree[i].length + j; + } + + if (mtd->oobsize > (ecc->bytes + 4) * nsectors) { + layout->oobfree[nsectors].offset = + layout->oobfree[nsectors - 1].offset + + layout->oobfree[nsectors - 1].length + + ecc->bytes; + layout->oobfree[nsectors].length = mtd->oobsize - + ((ecc->bytes + 4) * nsectors); + } + + return 0; +} + +static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) +{ + struct nand_ecclayout *layout; + int nsectors; + int i; + int ret; + + ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc); + if (ret) + return ret; + + ecc->prepad = 4; + ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page; + ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page; + + layout = ecc->layout; + nsectors = mtd->writesize / ecc->size; + + for (i = 0; i < (ecc->bytes * nsectors); i++) + layout->eccpos[i] = i; + + layout->oobfree[0].length = mtd->oobsize - i; + layout->oobfree[0].offset = i; + + return 0; +} + +#ifndef __UBOOT__ +static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc) +{ + switch (ecc->mode) { + case NAND_ECC_HW: + case NAND_ECC_HW_SYNDROME: + sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc); + break; + case NAND_ECC_NONE: + kfree(ecc->layout); + default: + break; + } +} +#endif /* __UBOOT__ */ + +static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + int ret; + + if (!ecc->size) { + ecc->size = nand->ecc_step_ds; + ecc->strength = nand->ecc_strength_ds; + } + + if (!ecc->size || !ecc->strength) + return -EINVAL; + + switch (ecc->mode) { + case NAND_ECC_SOFT_BCH: + break; + case NAND_ECC_HW: + ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc); + if (ret) + return ret; + break; + case NAND_ECC_HW_SYNDROME: + ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc); + if (ret) + return ret; + break; + case NAND_ECC_NONE: + ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL); + if (!ecc->layout) + return -ENOMEM; + ecc->layout->oobfree[0].length = mtd->oobsize; + case NAND_ECC_SOFT: + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sunxi_nand_chip_init(int node, struct sunxi_nfc *nfc, int devnum) +{ + const struct nand_sdr_timings *timings; + const void *blob = gd->fdt_blob; + struct sunxi_nand_chip *chip; + struct mtd_info *mtd; + struct nand_chip *nand; + int nsels; + int ret; + int i; + u32 cs[8], rb[8]; + + if (!fdt_getprop(blob, node, "reg", &nsels)) + return -EINVAL; + + nsels /= sizeof(u32); + if (!nsels || nsels > 8) { + dev_err(dev, "invalid reg property size\n"); + return -EINVAL; + } + + chip = kzalloc(sizeof(*chip) + + (nsels * sizeof(struct sunxi_nand_chip_sel)), + GFP_KERNEL); + if (!chip) { + dev_err(dev, "could not allocate chip\n"); + return -ENOMEM; + } + + chip->nsels = nsels; + chip->selected = -1; + + for (i = 0; i < nsels; i++) { + cs[i] = -1; + rb[i] = -1; + } + + ret = fdtdec_get_int_array(gd->fdt_blob, node, "reg", cs, nsels); + if (ret) { + dev_err(dev, "could not retrieve reg property: %d\n", ret); + return ret; + } + + ret = fdtdec_get_int_array(gd->fdt_blob, node, "allwinner,rb", rb, + nsels); + if (ret) { + dev_err(dev, "could not retrieve reg property: %d\n", ret); + return ret; + } + + for (i = 0; i < nsels; i++) { + int tmp = cs[i]; + + if (tmp > NFC_MAX_CS) { + dev_err(dev, + "invalid reg value: %u (max CS = 7)\n", + tmp); + return -EINVAL; + } + + if (test_and_set_bit(tmp, &nfc->assigned_cs)) { + dev_err(dev, "CS %d already assigned\n", tmp); + return -EINVAL; + } + + chip->sels[i].cs = tmp; + + tmp = rb[i]; + if (tmp >= 0 && tmp < 2) { + chip->sels[i].rb.type = RB_NATIVE; + chip->sels[i].rb.info.nativeid = tmp; + } else { + ret = gpio_request_by_name_nodev(blob, node, + "rb-gpios", i, + &chip->sels[i].rb.info.gpio, + GPIOD_IS_IN); + if (ret) + chip->sels[i].rb.type = RB_GPIO; + else + chip->sels[i].rb.type = RB_NONE; + } + } + + timings = onfi_async_timing_mode_to_sdr_timings(0); + if (IS_ERR(timings)) { + ret = PTR_ERR(timings); + dev_err(dev, + "could not retrieve timings for ONFI mode 0: %d\n", + ret); + return ret; + } + + ret = sunxi_nand_chip_set_timings(chip, timings); + if (ret) { + dev_err(dev, "could not configure chip timings: %d\n", ret); + return ret; + } + + nand = &chip->nand; + /* Default tR value specified in the ONFI spec (chapter 4.15.1) */ + nand->chip_delay = 200; + nand->controller = &nfc->controller; + /* + * Set the ECC mode to the default value in case nothing is specified + * in the DT. + */ + nand->ecc.mode = NAND_ECC_HW; + nand->flash_node = node; + nand->select_chip = sunxi_nfc_select_chip; + nand->cmd_ctrl = sunxi_nfc_cmd_ctrl; + nand->read_buf = sunxi_nfc_read_buf; + nand->write_buf = sunxi_nfc_write_buf; + nand->read_byte = sunxi_nfc_read_byte; + + mtd = nand_to_mtd(nand); + ret = nand_scan_ident(mtd, nsels, NULL); + if (ret) + return ret; + + if (nand->bbt_options & NAND_BBT_USE_FLASH) + nand->bbt_options |= NAND_BBT_NO_OOB; + + if (nand->options & NAND_NEED_SCRAMBLING) + nand->options |= NAND_NO_SUBPAGE_WRITE; + + nand->options |= NAND_SUBPAGE_READ; + + ret = sunxi_nand_chip_init_timings(chip); + if (ret) { + dev_err(dev, "could not configure chip timings: %d\n", ret); + return ret; + } + + ret = sunxi_nand_ecc_init(mtd, &nand->ecc); + if (ret) { + dev_err(dev, "ECC init failed: %d\n", ret); + return ret; + } + + ret = nand_scan_tail(mtd); + if (ret) { + dev_err(dev, "nand_scan_tail failed: %d\n", ret); + return ret; + } + + ret = nand_register(devnum, mtd); + if (ret) { + dev_err(dev, "failed to register mtd device: %d\n", ret); + return ret; + } + + list_add_tail(&chip->node, &nfc->chips); + + return 0; +} + +static int sunxi_nand_chips_init(int node, struct sunxi_nfc *nfc) +{ + const void *blob = gd->fdt_blob; + int nand_node; + int ret, i = 0; + + for (nand_node = fdt_first_subnode(blob, node); nand_node >= 0; + nand_node = fdt_next_subnode(blob, nand_node)) + i++; + + if (i > 8) { + dev_err(dev, "too many NAND chips: %d (max = 8)\n", i); + return -EINVAL; + } + + i = 0; + for (nand_node = fdt_first_subnode(blob, node); nand_node >= 0; + nand_node = fdt_next_subnode(blob, nand_node)) { + ret = sunxi_nand_chip_init(nand_node, nfc, i++); + if (ret) + return ret; + } + + return 0; +} + +#ifndef __UBOOT__ +static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) +{ + struct sunxi_nand_chip *chip; + + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip, + node); + nand_release(&chip->mtd); + sunxi_nand_ecc_cleanup(&chip->nand.ecc); + list_del(&chip->node); + kfree(chip); + } +} +#endif /* __UBOOT__ */ + +void sunxi_nand_init(void) +{ + const void *blob = gd->fdt_blob; + struct sunxi_nfc *nfc; + fdt_addr_t regs; + int node; + int ret; + + nfc = kzalloc(sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return; + + spin_lock_init(&nfc->controller.lock); + init_waitqueue_head(&nfc->controller.wq); + INIT_LIST_HEAD(&nfc->chips); + + node = fdtdec_next_compatible(blob, 0, COMPAT_SUNXI_NAND); + if (node < 0) { + pr_err("unable to find nfc node in device tree\n"); + goto err; + } + + if (!fdtdec_get_is_enabled(blob, node)) { + pr_err("nfc disabled in device tree\n"); + goto err; + } + + regs = fdtdec_get_addr(blob, node, "reg"); + if (regs == FDT_ADDR_T_NONE) { + pr_err("unable to find nfc address in device tree\n"); + goto err; + } + + nfc->regs = (void *)regs; + + ret = sunxi_nfc_rst(nfc); + if (ret) + goto err; + + ret = sunxi_nand_chips_init(node, nfc); + if (ret) { + dev_err(dev, "failed to init nand chips\n"); + goto err; + } + + return; + +err: + kfree(nfc); +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Boris BREZILLON"); +MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver"); diff --git a/drivers/mtd/nand/tegra_nand.c b/drivers/mtd/nand/tegra_nand.c index 2032f65812..38bd7a5578 100644 --- a/drivers/mtd/nand/tegra_nand.c +++ b/drivers/mtd/nand/tegra_nand.c @@ -884,7 +884,7 @@ static void setup_timing(unsigned timing[FDT_NAND_TIMING_COUNT], * Decode NAND parameters from the device tree * * @param blob Device tree blob - * @param node Node containing "nand-flash" compatble node + * @param node Node containing "nand-flash" compatible node * @return 0 if ok, -ve on error (FDT_ERR_...) */ static int fdt_decode_nand(const void *blob, int node, struct fdt_nand *config) diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 03deabce10..0e35dc5b88 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -20,6 +20,7 @@ */ #include <common.h> +#include <watchdog.h> #include <linux/compat.h> #include <linux/mtd/mtd.h> #include "linux/mtd/flashchip.h" @@ -467,15 +468,18 @@ static int onenand_read_ecc(struct onenand_chip *this) static int onenand_wait(struct mtd_info *mtd, int state) { struct onenand_chip *this = mtd->priv; - unsigned int flags = ONENAND_INT_MASTER; unsigned int interrupt = 0; unsigned int ctrl; - while (1) { + /* Wait at most 20ms ... */ + u32 timeo = (CONFIG_SYS_HZ * 20) / 1000; + u32 time_start = get_timer(0); + do { + WATCHDOG_RESET(); + if (get_timer(time_start) > timeo) + return -EIO; interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); - if (interrupt & flags) - break; - } + } while ((interrupt & ONENAND_INT_MASTER) == 0); ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); @@ -1154,15 +1158,18 @@ int onenand_read_oob(struct mtd_info *mtd, loff_t from, static int onenand_bbt_wait(struct mtd_info *mtd, int state) { struct onenand_chip *this = mtd->priv; - unsigned int flags = ONENAND_INT_MASTER; unsigned int interrupt; unsigned int ctrl; - while (1) { + /* Wait at most 20ms ... */ + u32 timeo = (CONFIG_SYS_HZ * 20) / 1000; + u32 time_start = get_timer(0); + do { + WATCHDOG_RESET(); + if (get_timer(time_start) > timeo) + return ONENAND_BBT_READ_FATAL_ERROR; interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); - if (interrupt & flags) - break; - } + } while ((interrupt & ONENAND_INT_MASTER) == 0); /* To get correct interrupt status in timeout case */ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); @@ -2536,7 +2543,8 @@ static int onenand_chip_probe(struct mtd_info *mtd) this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM); /* Wait reset */ - this->wait(mtd, FL_RESETING); + if (this->wait(mtd, FL_RESETING)) + return -ENXIO; /* Restore system configuration 1 */ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); @@ -2649,6 +2657,7 @@ int onenand_probe(struct mtd_info *mtd) mtd->_sync = onenand_sync; mtd->_block_isbad = onenand_block_isbad; mtd->_block_markbad = onenand_block_markbad; + mtd->writebufsize = mtd->writesize; return 0; } diff --git a/drivers/mtd/onenand/onenand_spl.c b/drivers/mtd/onenand/onenand_spl.c index fe6b7d923c..1925f41d8a 100644 --- a/drivers/mtd/onenand/onenand_spl.c +++ b/drivers/mtd/onenand/onenand_spl.c @@ -93,6 +93,54 @@ static int onenand_spl_read_page(uint32_t block, uint32_t page, uint32_t *buf, return 0; } +#ifdef CONFIG_SPL_UBI +/* Temporary storage for non page aligned and non page sized reads. */ +static u8 scratch_buf[PAGE_4K]; + +/** + * onenand_spl_read_block - Read data from physical eraseblock into a buffer + * @block: Number of the physical eraseblock + * @offset: Data offset from the start of @peb + * @len: Data size to read + * @dst: Address of the destination buffer + * + * Notes: + * @offset + @len are not allowed to be larger than a physical + * erase block. No sanity check done for simplicity reasons. + */ +int onenand_spl_read_block(int block, int offset, int len, void *dst) +{ + int page, read, psize; + + psize = onenand_spl_get_geometry(); + /* Calculate the page number */ + page = offset / psize; + /* Offset to the start of a flash page */ + offset = offset % psize; + + while (len) { + /* + * Non page aligned reads go to the scratch buffer. + * Page aligned reads go directly to the destination. + */ + if (offset || len < psize) { + onenand_spl_read_page(block, page, + (uint32_t *)scratch_buf, psize); + read = min(len, psize - offset); + memcpy(dst, scratch_buf + offset, read); + offset = 0; + } else { + onenand_spl_read_page(block, page, dst, psize); + read = psize; + } + page++; + len -= read; + dst += read; + } + return 0; +} +#endif + void onenand_spl_load_image(uint32_t offs, uint32_t size, void *dst) { uint32_t *addr = (uint32_t *)dst; diff --git a/drivers/mtd/onenand/onenand_uboot.c b/drivers/mtd/onenand/onenand_uboot.c index ae60c3bb71..c15ec9df07 100644 --- a/drivers/mtd/onenand/onenand_uboot.c +++ b/drivers/mtd/onenand/onenand_uboot.c @@ -24,33 +24,33 @@ static __attribute__((unused)) char dev_name[] = "onenand0"; void onenand_init(void) { + int err = 0; memset(&onenand_mtd, 0, sizeof(struct mtd_info)); memset(&onenand_chip, 0, sizeof(struct onenand_chip)); onenand_mtd.priv = &onenand_chip; #ifdef CONFIG_USE_ONENAND_BOARD_INIT - /* - * It's used for some board init required - */ - onenand_board_init(&onenand_mtd); + /* It's used for some board init required */ + err = onenand_board_init(&onenand_mtd); #else onenand_chip.base = (void *) CONFIG_SYS_ONENAND_BASE; #endif - onenand_scan(&onenand_mtd, 1); + if (!err && !(onenand_scan(&onenand_mtd, 1))) { - if (onenand_chip.device_id & DEVICE_IS_FLEXONENAND) - puts("Flex-"); - puts("OneNAND: "); - print_size(onenand_chip.chipsize, "\n"); + if (onenand_chip.device_id & DEVICE_IS_FLEXONENAND) + puts("Flex-"); + puts("OneNAND: "); #ifdef CONFIG_MTD_DEVICE - /* - * Add MTD device so that we can reference it later - * via the mtdcore infrastructure (e.g. ubi). - */ - onenand_mtd.name = dev_name; - add_mtd_device(&onenand_mtd); + /* + * Add MTD device so that we can reference it later + * via the mtdcore infrastructure (e.g. ubi). + */ + onenand_mtd.name = dev_name; + add_mtd_device(&onenand_mtd); #endif + } + print_size(onenand_chip.chipsize, "\n"); } diff --git a/drivers/mtd/spi/Kconfig b/drivers/mtd/spi/Kconfig index 3f7433cbc2..1f23c8e34e 100644 --- a/drivers/mtd/spi/Kconfig +++ b/drivers/mtd/spi/Kconfig @@ -128,4 +128,16 @@ config SPI_FLASH_MTD If unsure, say N +if SPL + +config SPL_SPI_SUNXI + bool "Support for SPI Flash on Allwinner SoCs in SPL" + depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUN8I_H3 || MACH_SUN50I + ---help--- + Enable support for SPI Flash. This option allows SPL to read from + sunxi SPI Flash. It uses the same method as the boot ROM, so does + not need any extra configuration. + +endif + endmenu # menu "SPI Flash Support" diff --git a/drivers/mtd/spi/Makefile b/drivers/mtd/spi/Makefile index c665836f95..6f47a66f40 100644 --- a/drivers/mtd/spi/Makefile +++ b/drivers/mtd/spi/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_DM_SPI_FLASH) += sf-uclass.o ifdef CONFIG_SPL_BUILD obj-$(CONFIG_SPL_SPI_LOAD) += spi_spl_load.o obj-$(CONFIG_SPL_SPI_BOOT) += fsl_espi_spl.o +obj-$(CONFIG_SPL_SPI_SUNXI) += sunxi_spi_spl.o endif obj-$(CONFIG_SPI_FLASH) += sf_probe.o spi_flash.o sf_params.o sf.o diff --git a/drivers/mtd/spi/sunxi_spi_spl.c b/drivers/mtd/spi/sunxi_spi_spl.c new file mode 100644 index 0000000000..e3ded5b4e8 --- /dev/null +++ b/drivers/mtd/spi/sunxi_spi_spl.c @@ -0,0 +1,283 @@ +/* + * Copyright (C) 2016 Siarhei Siamashka <siarhei.siamashka@gmail.com> + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <common.h> +#include <spl.h> +#include <asm/gpio.h> +#include <asm/io.h> + +#ifdef CONFIG_SPL_OS_BOOT +#error CONFIG_SPL_OS_BOOT is not supported yet +#endif + +/* + * This is a very simple U-Boot image loading implementation, trying to + * replicate what the boot ROM is doing when loading the SPL. Because we + * know the exact pins where the SPI Flash is connected and also know + * that the Read Data Bytes (03h) command is supported, the hardware + * configuration is very simple and we don't need the extra flexibility + * of the SPI framework. Moreover, we rely on the default settings of + * the SPI controler hardware registers and only adjust what needs to + * be changed. This is good for the code size and this implementation + * adds less than 400 bytes to the SPL. + * + * There are two variants of the SPI controller in Allwinner SoCs: + * A10/A13/A20 (sun4i variant) and everything else (sun6i variant). + * Both of them are supported. + * + * The pin mixing part is SoC specific and only A10/A13/A20/H3/A64 are + * supported at the moment. + */ + +/*****************************************************************************/ +/* SUN4I variant of the SPI controller */ +/*****************************************************************************/ + +#define SUN4I_SPI0_CCTL (0x01C05000 + 0x1C) +#define SUN4I_SPI0_CTL (0x01C05000 + 0x08) +#define SUN4I_SPI0_RX (0x01C05000 + 0x00) +#define SUN4I_SPI0_TX (0x01C05000 + 0x04) +#define SUN4I_SPI0_FIFO_STA (0x01C05000 + 0x28) +#define SUN4I_SPI0_BC (0x01C05000 + 0x20) +#define SUN4I_SPI0_TC (0x01C05000 + 0x24) + +#define SUN4I_CTL_ENABLE BIT(0) +#define SUN4I_CTL_MASTER BIT(1) +#define SUN4I_CTL_TF_RST BIT(8) +#define SUN4I_CTL_RF_RST BIT(9) +#define SUN4I_CTL_XCH BIT(10) + +/*****************************************************************************/ +/* SUN6I variant of the SPI controller */ +/*****************************************************************************/ + +#define SUN6I_SPI0_CCTL (0x01C68000 + 0x24) +#define SUN6I_SPI0_GCR (0x01C68000 + 0x04) +#define SUN6I_SPI0_TCR (0x01C68000 + 0x08) +#define SUN6I_SPI0_FIFO_STA (0x01C68000 + 0x1C) +#define SUN6I_SPI0_MBC (0x01C68000 + 0x30) +#define SUN6I_SPI0_MTC (0x01C68000 + 0x34) +#define SUN6I_SPI0_BCC (0x01C68000 + 0x38) +#define SUN6I_SPI0_TXD (0x01C68000 + 0x200) +#define SUN6I_SPI0_RXD (0x01C68000 + 0x300) + +#define SUN6I_CTL_ENABLE BIT(0) +#define SUN6I_CTL_MASTER BIT(1) +#define SUN6I_CTL_SRST BIT(31) +#define SUN6I_TCR_XCH BIT(31) + +/*****************************************************************************/ + +#define CCM_AHB_GATING0 (0x01C20000 + 0x60) +#define CCM_SPI0_CLK (0x01C20000 + 0xA0) +#define SUN6I_BUS_SOFT_RST_REG0 (0x01C20000 + 0x2C0) + +#define AHB_RESET_SPI0_SHIFT 20 +#define AHB_GATE_OFFSET_SPI0 20 + +#define SPI0_CLK_DIV_BY_2 0x1000 +#define SPI0_CLK_DIV_BY_4 0x1001 + +/*****************************************************************************/ + +/* + * Allwinner A10/A20 SoCs were using pins PC0,PC1,PC2,PC23 for booting + * from SPI Flash, everything else is using pins PC0,PC1,PC2,PC3. + */ +static void spi0_pinmux_setup(unsigned int pin_function) +{ + unsigned int pin; + + for (pin = SUNXI_GPC(0); pin <= SUNXI_GPC(2); pin++) + sunxi_gpio_set_cfgpin(pin, pin_function); + + if (IS_ENABLED(CONFIG_MACH_SUN4I) || IS_ENABLED(CONFIG_MACH_SUN7I)) + sunxi_gpio_set_cfgpin(SUNXI_GPC(23), pin_function); + else + sunxi_gpio_set_cfgpin(SUNXI_GPC(3), pin_function); +} + +/* + * Setup 6 MHz from OSC24M (because the BROM is doing the same). + */ +static void spi0_enable_clock(void) +{ + /* Deassert SPI0 reset on SUN6I */ + if (IS_ENABLED(CONFIG_SUNXI_GEN_SUN6I)) + setbits_le32(SUN6I_BUS_SOFT_RST_REG0, + (1 << AHB_RESET_SPI0_SHIFT)); + + /* Open the SPI0 gate */ + setbits_le32(CCM_AHB_GATING0, (1 << AHB_GATE_OFFSET_SPI0)); + + /* Divide by 4 */ + writel(SPI0_CLK_DIV_BY_4, IS_ENABLED(CONFIG_SUNXI_GEN_SUN6I) ? + SUN6I_SPI0_CCTL : SUN4I_SPI0_CCTL); + /* 24MHz from OSC24M */ + writel((1 << 31), CCM_SPI0_CLK); + + if (IS_ENABLED(CONFIG_SUNXI_GEN_SUN6I)) { + /* Enable SPI in the master mode and do a soft reset */ + setbits_le32(SUN6I_SPI0_GCR, SUN6I_CTL_MASTER | + SUN6I_CTL_ENABLE | + SUN6I_CTL_SRST); + /* Wait for completion */ + while (readl(SUN6I_SPI0_GCR) & SUN6I_CTL_SRST) + ; + } else { + /* Enable SPI in the master mode and reset FIFO */ + setbits_le32(SUN4I_SPI0_CTL, SUN4I_CTL_MASTER | + SUN4I_CTL_ENABLE | + SUN4I_CTL_TF_RST | + SUN4I_CTL_RF_RST); + } +} + +static void spi0_disable_clock(void) +{ + /* Disable the SPI0 controller */ + if (IS_ENABLED(CONFIG_SUNXI_GEN_SUN6I)) + clrbits_le32(SUN6I_SPI0_GCR, SUN6I_CTL_MASTER | + SUN6I_CTL_ENABLE); + else + clrbits_le32(SUN4I_SPI0_CTL, SUN4I_CTL_MASTER | + SUN4I_CTL_ENABLE); + + /* Disable the SPI0 clock */ + writel(0, CCM_SPI0_CLK); + + /* Close the SPI0 gate */ + clrbits_le32(CCM_AHB_GATING0, (1 << AHB_GATE_OFFSET_SPI0)); + + /* Assert SPI0 reset on SUN6I */ + if (IS_ENABLED(CONFIG_SUNXI_GEN_SUN6I)) + clrbits_le32(SUN6I_BUS_SOFT_RST_REG0, + (1 << AHB_RESET_SPI0_SHIFT)); +} + +static int spi0_init(void) +{ + unsigned int pin_function = SUNXI_GPC_SPI0; + if (IS_ENABLED(CONFIG_MACH_SUN50I)) + pin_function = SUN50I_GPC_SPI0; + + spi0_pinmux_setup(pin_function); + spi0_enable_clock(); +} + +static void spi0_deinit(void) +{ + /* New SoCs can disable pins, older could only set them as input */ + unsigned int pin_function = SUNXI_GPIO_INPUT; + if (IS_ENABLED(CONFIG_SUNXI_GEN_SUN6I)) + pin_function = SUNXI_GPIO_DISABLE; + + spi0_disable_clock(); + spi0_pinmux_setup(pin_function); +} + +/*****************************************************************************/ + +#define SPI_READ_MAX_SIZE 60 /* FIFO size, minus 4 bytes of the header */ + +static void sunxi_spi0_read_data(u8 *buf, u32 addr, u32 bufsize, + u32 spi_ctl_reg, + u32 spi_ctl_xch_bitmask, + u32 spi_fifo_reg, + u32 spi_tx_reg, + u32 spi_rx_reg, + u32 spi_bc_reg, + u32 spi_tc_reg, + u32 spi_bcc_reg) +{ + writel(4 + bufsize, spi_bc_reg); /* Burst counter (total bytes) */ + writel(4, spi_tc_reg); /* Transfer counter (bytes to send) */ + if (spi_bcc_reg) + writel(4, spi_bcc_reg); /* SUN6I also needs this */ + + /* Send the Read Data Bytes (03h) command header */ + writeb(0x03, spi_tx_reg); + writeb((u8)(addr >> 16), spi_tx_reg); + writeb((u8)(addr >> 8), spi_tx_reg); + writeb((u8)(addr), spi_tx_reg); + + /* Start the data transfer */ + setbits_le32(spi_ctl_reg, spi_ctl_xch_bitmask); + + /* Wait until everything is received in the RX FIFO */ + while ((readl(spi_fifo_reg) & 0x7F) < 4 + bufsize) + ; + + /* Skip 4 bytes */ + readl(spi_rx_reg); + + /* Read the data */ + while (bufsize-- > 0) + *buf++ = readb(spi_rx_reg); + + /* tSHSL time is up to 100 ns in various SPI flash datasheets */ + udelay(1); +} + +static void spi0_read_data(void *buf, u32 addr, u32 len) +{ + u8 *buf8 = buf; + u32 chunk_len; + + while (len > 0) { + chunk_len = len; + if (chunk_len > SPI_READ_MAX_SIZE) + chunk_len = SPI_READ_MAX_SIZE; + + if (IS_ENABLED(CONFIG_SUNXI_GEN_SUN6I)) { + sunxi_spi0_read_data(buf8, addr, chunk_len, + SUN6I_SPI0_TCR, + SUN6I_TCR_XCH, + SUN6I_SPI0_FIFO_STA, + SUN6I_SPI0_TXD, + SUN6I_SPI0_RXD, + SUN6I_SPI0_MBC, + SUN6I_SPI0_MTC, + SUN6I_SPI0_BCC); + } else { + sunxi_spi0_read_data(buf8, addr, chunk_len, + SUN4I_SPI0_CTL, + SUN4I_CTL_XCH, + SUN4I_SPI0_FIFO_STA, + SUN4I_SPI0_TX, + SUN4I_SPI0_RX, + SUN4I_SPI0_BC, + SUN4I_SPI0_TC, + 0); + } + + len -= chunk_len; + buf8 += chunk_len; + addr += chunk_len; + } +} + +/*****************************************************************************/ + +int spl_spi_load_image(void) +{ + int err; + struct image_header *header; + header = (struct image_header *)(CONFIG_SYS_TEXT_BASE); + + spi0_init(); + + spi0_read_data((void *)header, CONFIG_SYS_SPI_U_BOOT_OFFS, 0x40); + err = spl_parse_image_header(header); + if (err) + return err; + + spi0_read_data((void *)spl_image.load_addr, CONFIG_SYS_SPI_U_BOOT_OFFS, + spl_image.size); + + spi0_deinit(); + return 0; +} diff --git a/drivers/mtd/ubispl/Makefile b/drivers/mtd/ubispl/Makefile new file mode 100644 index 0000000000..740dbed12a --- /dev/null +++ b/drivers/mtd/ubispl/Makefile @@ -0,0 +1 @@ +obj-y += ubispl.o ../ubi/crc32.o diff --git a/drivers/mtd/ubispl/ubi-wrapper.h b/drivers/mtd/ubispl/ubi-wrapper.h new file mode 100644 index 0000000000..72b9dcb76d --- /dev/null +++ b/drivers/mtd/ubispl/ubi-wrapper.h @@ -0,0 +1,106 @@ +/* + * The parts taken from the kernel implementation are: + * + * Copyright (c) International Business Machines Corp., 2006 + * + * UBISPL specific defines: + * + * Copyright (c) Thomas Gleixner <tglx@linutronix.de> + * + * SPDX-License-Identifier: GPL 2.0+ BSD-3-Clause + */ + +/* + * Contains various defines copy&pasted from ubi.h and ubi-user.h to make + * the upstream fastboot code happy. + */ +#ifndef __UBOOT_UBI_WRAPPER_H +#define __UBOOT_UBI_WRAPPER_H + +/* + * Error codes returned by the I/O sub-system. + * + * UBI_IO_FF: the read region of flash contains only 0xFFs + * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data + * integrity error reported by the MTD driver + * (uncorrectable ECC error in case of NAND) + * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) + * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a + * data integrity error reported by the MTD driver + * (uncorrectable ECC error in case of NAND) + * UBI_IO_BITFLIPS: bit-flips were detected and corrected + * + * UBI_FASTMAP_ANCHOR: u-boot SPL add on to tell the caller that the fastmap + * anchor block has been found + * + * Note, it is probably better to have bit-flip and ebadmsg as flags which can + * be or'ed with other error code. But this is a big change because there are + * may callers, so it does not worth the risk of introducing a bug + */ +enum { + UBI_IO_FF = 1, + UBI_IO_FF_BITFLIPS, + UBI_IO_BAD_HDR, + UBI_IO_BAD_HDR_EBADMSG, + UBI_IO_BITFLIPS, + UBI_FASTMAP_ANCHOR, +}; + +/* + * UBI volume type constants. + * + * @UBI_DYNAMIC_VOLUME: dynamic volume + * @UBI_STATIC_VOLUME: static volume + */ +enum { + UBI_DYNAMIC_VOLUME = 3, + UBI_STATIC_VOLUME = 4, +}; + +/* + * Return codes of the fastmap sub-system + * + * UBI_NO_FASTMAP: No fastmap super block was found + * UBI_BAD_FASTMAP: A fastmap was found but it's unusable + */ +enum { + UBI_NO_FASTMAP = 1, + UBI_BAD_FASTMAP, +}; + +/** + * struct ubi_fastmap_layout - in-memory fastmap data structure. + * @e: PEBs used by the current fastmap + * @to_be_tortured: if non-zero tortured this PEB + * @used_blocks: number of used PEBs + * @max_pool_size: maximal size of the user pool + * @max_wl_pool_size: maximal size of the pool used by the WL sub-system + */ +struct ubi_fastmap_layout { + struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS]; + int to_be_tortured[UBI_FM_MAX_BLOCKS]; + int used_blocks; + int max_pool_size; + int max_wl_pool_size; +}; + +/** + * struct ubi_fm_pool - in-memory fastmap pool + * @pebs: PEBs in this pool + * @used: number of used PEBs + * @size: total number of PEBs in this pool + * @max_size: maximal size of the pool + * + * A pool gets filled with up to max_size. + * If all PEBs within the pool are used a new fastmap will be written + * to the flash and the pool gets refilled with empty PEBs. + * + */ +struct ubi_fm_pool { + int pebs[UBI_FM_MAX_POOL_SIZE]; + int used; + int size; + int max_size; +}; + +#endif diff --git a/drivers/mtd/ubispl/ubispl.c b/drivers/mtd/ubispl/ubispl.c new file mode 100644 index 0000000000..a81a8e715d --- /dev/null +++ b/drivers/mtd/ubispl/ubispl.c @@ -0,0 +1,926 @@ +/* + * Copyright (c) Thomas Gleixner <tglx@linutronix.de> + * + * The parts taken from the kernel implementation are: + * + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier: GPL 2.0+ BSD-3-Clause + */ + +#include <common.h> +#include <errno.h> +#include <ubispl.h> + +#include <linux/crc32.h> + +#include "ubispl.h" + +/** + * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. + * @ubi: UBI device description object + */ +static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi) +{ + size_t size; + + size = sizeof(struct ubi_fm_sb) + + sizeof(struct ubi_fm_hdr) + + sizeof(struct ubi_fm_scan_pool) + + sizeof(struct ubi_fm_scan_pool) + + (ubi->peb_count * sizeof(struct ubi_fm_ec)) + + (sizeof(struct ubi_fm_eba) + + (ubi->peb_count * sizeof(__be32))) + + sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; + return roundup(size, ubi->leb_size); +} + +static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum, + unsigned long from, unsigned long len) +{ + return ubi->read(pnum + ubi->peb_offset, from, len, buf); +} + +static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb) +{ + return peb >= ubi->peb_count || peb < 0; +} + +static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum, + struct ubi_vid_hdr *vh, int unused) +{ + u32 magic; + int res; + + /* No point in rescanning a corrupt block */ + if (test_bit(pnum, ubi->corrupt)) + return UBI_IO_BAD_HDR; + /* + * If the block has been scanned already, no need to rescan + */ + if (test_and_set_bit(pnum, ubi->scanned)) + return 0; + + res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh)); + + /* + * Bad block, unrecoverable ECC error, skip the block + */ + if (res) { + ubi_dbg("Skipping bad or unreadable block %d", pnum); + vh->magic = 0; + generic_set_bit(pnum, ubi->corrupt); + return res; + } + + /* Magic number available ? */ + magic = be32_to_cpu(vh->magic); + if (magic != UBI_VID_HDR_MAGIC) { + generic_set_bit(pnum, ubi->corrupt); + if (magic == 0xffffffff) + return UBI_IO_FF; + ubi_msg("Bad magic in block 0%d %08x", pnum, magic); + return UBI_IO_BAD_HDR; + } + + /* Header CRC correct ? */ + if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) != + be32_to_cpu(vh->hdr_crc)) { + ubi_msg("Bad CRC in block 0%d", pnum); + generic_set_bit(pnum, ubi->corrupt); + return UBI_IO_BAD_HDR; + } + + ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum)); + + return 0; +} + +static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi, + struct ubi_vid_hdr *vh, + u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum) +{ + int res; + + if (ubi_io_is_bad(ubi, fm_pnum)) + return -EINVAL; + + res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0); + if (!res) { + /* Check volume id, volume type and lnum */ + if (be32_to_cpu(vh->vol_id) == fm_vol_id && + vh->vol_type == UBI_VID_STATIC && + be32_to_cpu(vh->lnum) == fm_lnum) + return 0; + ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u", + fm_pnum, fm_vol_id, vh->vol_type, + be32_to_cpu(vh->vol_id), + fm_lnum, be32_to_cpu(vh->lnum)); + } + return res; +} + +/* Insert the logic block into the volume info */ +static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi, + struct ubi_vid_hdr *vh, u32 vol_id, + u32 pnum, u32 lnum) +{ + struct ubi_vol_info *vi = ubi->volinfo + vol_id; + u32 *ltp; + + /* + * If the volume is larger than expected, yell and give up :( + */ + if (lnum >= UBI_MAX_VOL_LEBS) { + ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS); + return -EINVAL; + } + + ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d", + pnum, vol_id, lnum, !!test_bit(lnum, vi->found), + !!test_bit(pnum, ubi->scanned)); + + /* Points to the translation entry */ + ltp = vi->lebs_to_pebs + lnum; + + /* If the block is already assigned, check sqnum */ + if (__test_and_set_bit(lnum, vi->found)) { + u32 cur_pnum = *ltp; + struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum; + + /* + * If the current block hase not yet been scanned, we + * need to do that. The other block might be stale or + * the current block corrupted and the FM not yet + * updated. + */ + if (!test_bit(cur_pnum, ubi->scanned)) { + /* + * If the scan fails, we use the valid block + */ + if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id, + lnum)) { + *ltp = pnum; + return 0; + } + } + + /* + * Should not happen .... + */ + if (test_bit(cur_pnum, ubi->corrupt)) { + *ltp = pnum; + return 0; + } + + ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu", + vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum, + be64_to_cpu(vh->sqnum)); + + /* + * Compare sqnum and take the newer one + */ + if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum)) + *ltp = pnum; + } else { + *ltp = pnum; + if (lnum > vi->last_block) + vi->last_block = lnum; + } + + return 0; +} + +static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh, + u32 pnum) +{ + u32 vol_id, lnum; + int res; + + if (ubi_io_is_bad(ubi, pnum)) + return -EINVAL; + + res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); + if (res) + return res; + + /* Get volume id */ + vol_id = be32_to_cpu(vh->vol_id); + + /* If this is the fastmap anchor, return right away */ + if (vol_id == UBI_FM_SB_VOLUME_ID) + return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0; + + /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */ + if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC) + return 0; + + /* We are only interested in the volumes to load */ + if (!test_bit(vol_id, ubi->toload)) + return 0; + + lnum = be32_to_cpu(vh->lnum); + return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum); +} + +static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum, + u32 vol_id, u32 vol_type, u32 used) +{ + struct ubi_vid_hdr *vh; + + if (ubi_io_is_bad(ubi, pnum)) + return -EINVAL; + + ubi->fastmap_pebs++; + + if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME) + return 0; + + /* We are only interested in the volumes to load */ + if (!test_bit(vol_id, ubi->toload)) + return 0; + + vh = ubi->blockinfo + pnum; + + return ubi_scan_vid_hdr(ubi, vh, pnum); +} + +static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size) +{ + struct ubi_vid_hdr *vh; + u32 pnum; + int i; + + ubi_dbg("Scanning pool size: %d", pool_size); + + for (i = 0; i < pool_size; i++) { + pnum = be32_to_cpu(pebs[i]); + + if (ubi_io_is_bad(ubi, pnum)) { + ubi_err("FM: Bad PEB in fastmap pool! %u", pnum); + return UBI_BAD_FASTMAP; + } + + vh = ubi->blockinfo + pnum; + /* + * We allow the scan to fail here. The loader will notice + * and look for a replacement. + */ + ubi_scan_vid_hdr(ubi, vh, pnum); + } + return 0; +} + +/* + * Fastmap code is stolen from Linux kernel and this stub structure is used + * to make it happy. + */ +struct ubi_attach_info { + int i; +}; + +static int ubi_attach_fastmap(struct ubi_scan_info *ubi, + struct ubi_attach_info *ai, + struct ubi_fastmap_layout *fm) +{ + struct ubi_fm_hdr *fmhdr; + struct ubi_fm_scan_pool *fmpl1, *fmpl2; + struct ubi_fm_ec *fmec; + struct ubi_fm_volhdr *fmvhdr; + struct ubi_fm_eba *fm_eba; + int ret, i, j, pool_size, wl_pool_size; + size_t fm_pos = 0, fm_size = ubi->fm_size; + void *fm_raw = ubi->fm_buf; + + memset(ubi->fm_used, 0, sizeof(ubi->fm_used)); + + fm_pos += sizeof(struct ubi_fm_sb); + if (fm_pos >= fm_size) + goto fail_bad; + + fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmhdr); + if (fm_pos >= fm_size) + goto fail_bad; + + if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { + ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", + be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); + goto fail_bad; + } + + fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmpl1); + if (fm_pos >= fm_size) + goto fail_bad; + if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { + ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", + be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); + goto fail_bad; + } + + fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmpl2); + if (fm_pos >= fm_size) + goto fail_bad; + if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { + ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", + be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); + goto fail_bad; + } + + pool_size = be16_to_cpu(fmpl1->size); + wl_pool_size = be16_to_cpu(fmpl2->size); + fm->max_pool_size = be16_to_cpu(fmpl1->max_size); + fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); + + if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { + ubi_err("bad pool size: %i", pool_size); + goto fail_bad; + } + + if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { + ubi_err("bad WL pool size: %i", wl_pool_size); + goto fail_bad; + } + + if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || + fm->max_pool_size < 0) { + ubi_err("bad maximal pool size: %i", fm->max_pool_size); + goto fail_bad; + } + + if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || + fm->max_wl_pool_size < 0) { + ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); + goto fail_bad; + } + + /* read EC values from free list */ + for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { + fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmec); + if (fm_pos >= fm_size) + goto fail_bad; + } + + /* read EC values from used list */ + for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { + fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmec); + if (fm_pos >= fm_size) + goto fail_bad; + + generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used); + } + + /* read EC values from scrub list */ + for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { + fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmec); + if (fm_pos >= fm_size) + goto fail_bad; + } + + /* read EC values from erase list */ + for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { + fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmec); + if (fm_pos >= fm_size) + goto fail_bad; + } + + /* Iterate over all volumes and read their EBA table */ + for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { + u32 vol_id, vol_type, used, reserved; + + fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); + fm_pos += sizeof(*fmvhdr); + if (fm_pos >= fm_size) + goto fail_bad; + + if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { + ubi_err("bad fastmap vol header magic: 0x%x, " \ + "expected: 0x%x", + be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); + goto fail_bad; + } + + vol_id = be32_to_cpu(fmvhdr->vol_id); + vol_type = fmvhdr->vol_type; + used = be32_to_cpu(fmvhdr->used_ebs); + + fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); + fm_pos += sizeof(*fm_eba); + fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); + if (fm_pos >= fm_size) + goto fail_bad; + + if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { + ubi_err("bad fastmap EBA header magic: 0x%x, " \ + "expected: 0x%x", + be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); + goto fail_bad; + } + + reserved = be32_to_cpu(fm_eba->reserved_pebs); + ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved); + for (j = 0; j < reserved; j++) { + int pnum = be32_to_cpu(fm_eba->pnum[j]); + + if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) + continue; + + if (!__test_and_clear_bit(pnum, ubi->fm_used)) + continue; + + /* + * We only handle static volumes so used_ebs + * needs to be handed in. And we do not assign + * the reserved blocks + */ + if (j >= used) + continue; + + ret = assign_aeb_to_av(ubi, pnum, j, vol_id, + vol_type, used); + if (!ret) + continue; + + /* + * Nasty: The fastmap claims that the volume + * has one block more than it, but that block + * is always empty and the other blocks have + * the correct number of total LEBs in the + * headers. Deal with it. + */ + if (ret != UBI_IO_FF && j != used - 1) + goto fail_bad; + ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d", + vol_id, j, used); + } + } + + ret = scan_pool(ubi, fmpl1->pebs, pool_size); + if (ret) + goto fail; + + ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size); + if (ret) + goto fail; + +#ifdef CHECKME + /* + * If fastmap is leaking PEBs (must not happen), raise a + * fat warning and fall back to scanning mode. + * We do this here because in ubi_wl_init() it's too late + * and we cannot fall back to scanning. + */ + if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - + ai->bad_peb_count - fm->used_blocks)) + goto fail_bad; +#endif + + return 0; + +fail_bad: + ret = UBI_BAD_FASTMAP; +fail: + return ret; +} + +static int ubi_scan_fastmap(struct ubi_scan_info *ubi, + struct ubi_attach_info *ai, + int fm_anchor) +{ + struct ubi_fm_sb *fmsb, *fmsb2; + struct ubi_vid_hdr *vh; + struct ubi_fastmap_layout *fm; + int i, used_blocks, pnum, ret = 0; + size_t fm_size; + __be32 crc, tmp_crc; + unsigned long long sqnum = 0; + + fmsb = &ubi->fm_sb; + fm = &ubi->fm_layout; + + ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); + if (ret && ret != UBI_IO_BITFLIPS) + goto free_fm_sb; + else if (ret == UBI_IO_BITFLIPS) + fm->to_be_tortured[0] = 1; + + if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { + ubi_err("bad super block magic: 0x%x, expected: 0x%x", + be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); + ret = UBI_BAD_FASTMAP; + goto free_fm_sb; + } + + if (fmsb->version != UBI_FM_FMT_VERSION) { + ubi_err("bad fastmap version: %i, expected: %i", + fmsb->version, UBI_FM_FMT_VERSION); + ret = UBI_BAD_FASTMAP; + goto free_fm_sb; + } + + used_blocks = be32_to_cpu(fmsb->used_blocks); + if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { + ubi_err("number of fastmap blocks is invalid: %i", used_blocks); + ret = UBI_BAD_FASTMAP; + goto free_fm_sb; + } + + fm_size = ubi->leb_size * used_blocks; + if (fm_size != ubi->fm_size) { + ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, + ubi->fm_size); + ret = UBI_BAD_FASTMAP; + goto free_fm_sb; + } + + vh = &ubi->fm_vh; + + for (i = 0; i < used_blocks; i++) { + pnum = be32_to_cpu(fmsb->block_loc[i]); + + if (ubi_io_is_bad(ubi, pnum)) { + ret = UBI_BAD_FASTMAP; + goto free_hdr; + } + +#ifdef LATER + int image_seq; + ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); + if (ret && ret != UBI_IO_BITFLIPS) { + ubi_err("unable to read fastmap block# %i EC (PEB: %i)", + i, pnum); + if (ret > 0) + ret = UBI_BAD_FASTMAP; + goto free_hdr; + } else if (ret == UBI_IO_BITFLIPS) + fm->to_be_tortured[i] = 1; + + image_seq = be32_to_cpu(ech->image_seq); + if (!ubi->image_seq) + ubi->image_seq = image_seq; + /* + * Older UBI implementations have image_seq set to zero, so + * we shouldn't fail if image_seq == 0. + */ + if (image_seq && (image_seq != ubi->image_seq)) { + ubi_err("wrong image seq:%d instead of %d", + be32_to_cpu(ech->image_seq), ubi->image_seq); + ret = UBI_BAD_FASTMAP; + goto free_hdr; + } +#endif + ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); + if (ret && ret != UBI_IO_BITFLIPS) { + ubi_err("unable to read fastmap block# %i (PEB: %i)", + i, pnum); + goto free_hdr; + } + + /* + * Mainline code rescans the anchor header. We've done + * that already so we merily copy it over. + */ + if (pnum == fm_anchor) + memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm)); + + if (i == 0) { + if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { + ubi_err("bad fastmap anchor vol_id: 0x%x," \ + " expected: 0x%x", + be32_to_cpu(vh->vol_id), + UBI_FM_SB_VOLUME_ID); + ret = UBI_BAD_FASTMAP; + goto free_hdr; + } + } else { + if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { + ubi_err("bad fastmap data vol_id: 0x%x," \ + " expected: 0x%x", + be32_to_cpu(vh->vol_id), + UBI_FM_DATA_VOLUME_ID); + ret = UBI_BAD_FASTMAP; + goto free_hdr; + } + } + + if (sqnum < be64_to_cpu(vh->sqnum)) + sqnum = be64_to_cpu(vh->sqnum); + + ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, + ubi->leb_start, ubi->leb_size); + if (ret && ret != UBI_IO_BITFLIPS) { + ubi_err("unable to read fastmap block# %i (PEB: %i, " \ + "err: %i)", i, pnum, ret); + goto free_hdr; + } + } + + fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); + tmp_crc = be32_to_cpu(fmsb2->data_crc); + fmsb2->data_crc = 0; + crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); + if (crc != tmp_crc) { + ubi_err("fastmap data CRC is invalid"); + ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); + ret = UBI_BAD_FASTMAP; + goto free_hdr; + } + + fmsb2->sqnum = sqnum; + + fm->used_blocks = used_blocks; + + ret = ubi_attach_fastmap(ubi, ai, fm); + if (ret) { + if (ret > 0) + ret = UBI_BAD_FASTMAP; + goto free_hdr; + } + + ubi->fm = fm; + ubi->fm_pool.max_size = ubi->fm->max_pool_size; + ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; + ubi_msg("attached by fastmap %uMB %u blocks", + ubi->fsize_mb, ubi->peb_count); + ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size); + ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); + +out: + if (ret) + ubi_err("Attach by fastmap failed, doing a full scan!"); + return ret; + +free_hdr: +free_fm_sb: + goto out; +} + +/* + * Scan the flash and attempt to attach via fastmap + */ +static void ipl_scan(struct ubi_scan_info *ubi) +{ + unsigned int pnum; + int res; + + /* + * Scan first for the fastmap super block + */ + for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { + res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum); + /* + * We ignore errors here as we are meriliy scanning + * the headers. + */ + if (res != UBI_FASTMAP_ANCHOR) + continue; + + /* + * If fastmap is disabled, continue scanning. This + * might happen because the previous attempt failed or + * the caller disabled it right away. + */ + if (!ubi->fm_enabled) + continue; + + /* + * Try to attach the fastmap, if that fails continue + * scanning. + */ + if (!ubi_scan_fastmap(ubi, NULL, pnum)) + return; + /* + * Fastmap failed. Clear everything we have and start + * over. We are paranoid and do not trust anything. + */ + memset(ubi->volinfo, 0, sizeof(ubi->volinfo)); + pnum = 0; + break; + } + + /* + * Continue scanning, ignore errors, we might find what we are + * looking for, + */ + for (; pnum < ubi->peb_count; pnum++) + ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum); +} + +/* + * Load a logical block of a volume into memory + */ +static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr, + struct ubi_vol_info *vi, u32 vol_id, u32 lnum, + u32 last) +{ + struct ubi_vid_hdr *vh, *vrepl; + u32 pnum, crc, dlen; + +retry: + /* + * If this is a fastmap run, we try to rescan full, otherwise + * we simply give up. + */ + if (!test_bit(lnum, vi->found)) { + ubi_warn("LEB %d of %d is missing", lnum, last); + return -EINVAL; + } + + pnum = vi->lebs_to_pebs[lnum]; + + ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum); + + if (ubi_io_is_bad(ubi, pnum)) { + ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum); + return -EINVAL; + } + + if (test_bit(pnum, ubi->corrupt)) + goto find_other; + + /* + * Lets try to read that block + */ + vh = ubi->blockinfo + pnum; + + if (!test_bit(pnum, ubi->scanned)) { + ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id, + lnum, pnum); + if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum)) + goto find_other; + } + + /* + * Check, if the total number of blocks is correct + */ + if (be32_to_cpu(vh->used_ebs) != last) { + ubi_dbg("Block count missmatch."); + ubi_dbg("vh->used_ebs: %d nrblocks: %d", + be32_to_cpu(vh->used_ebs), last); + generic_set_bit(pnum, ubi->corrupt); + goto find_other; + } + + /* + * Get the data length of this block. + */ + dlen = be32_to_cpu(vh->data_size); + + /* + * Read the data into RAM. We ignore the return value + * here as the only thing which might go wrong are + * bitflips. Try nevertheless. + */ + ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen); + + /* Calculate CRC over the data */ + crc = crc32(UBI_CRC32_INIT, laddr, dlen); + + if (crc != be32_to_cpu(vh->data_crc)) { + ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id, + lnum, pnum); + generic_set_bit(pnum, ubi->corrupt); + goto find_other; + } + + /* We are good. Return the data length we read */ + return dlen; + +find_other: + ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum); + generic_clear_bit(lnum, vi->found); + vrepl = NULL; + + for (pnum = 0; pnum < ubi->peb_count; pnum++) { + struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum; + u32 t_vol_id = be32_to_cpu(tmp->vol_id); + u32 t_lnum = be32_to_cpu(tmp->lnum); + + if (test_bit(pnum, ubi->corrupt)) + continue; + + if (t_vol_id != vol_id || t_lnum != lnum) + continue; + + if (!test_bit(pnum, ubi->scanned)) { + ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", + vol_id, lnum, pnum); + if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum)) + continue; + } + + /* + * We found one. If its the first, assign it otherwise + * compare the sqnum + */ + generic_set_bit(lnum, vi->found); + + if (!vrepl) { + vrepl = tmp; + continue; + } + + if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum)) + vrepl = tmp; + } + + if (vrepl) { + /* Update the vi table */ + pnum = vrepl - ubi->blockinfo; + vi->lebs_to_pebs[lnum] = pnum; + ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum); + vh = vrepl; + } + goto retry; +} + +/* + * Load a volume into RAM + */ +static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr) +{ + struct ubi_vol_info *vi; + u32 lnum, last, len; + + if (vol_id >= UBI_SPL_VOL_IDS) + return -EINVAL; + + len = 0; + vi = ubi->volinfo + vol_id; + last = vi->last_block + 1; + + /* Read the blocks to RAM, check CRC */ + for (lnum = 0 ; lnum < last; lnum++) { + int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last); + + if (res < 0) { + ubi_warn("Failed to load volume %u", vol_id); + return res; + } + /* res is the data length of the read block */ + laddr += res; + len += res; + } + return len; +} + +int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols, + int nrvols) +{ + struct ubi_scan_info *ubi = info->ubi; + int res, i, fastmap = info->fastmap; + u32 fsize; + +retry: + /* + * We do a partial initializiation of @ubi. Cleaning fm_buf is + * not necessary. + */ + memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf)); + + ubi->read = info->read; + + /* Precalculate the offsets */ + ubi->vid_offset = info->vid_offset; + ubi->leb_start = info->leb_start; + ubi->leb_size = info->peb_size - ubi->leb_start; + ubi->peb_count = info->peb_count; + ubi->peb_offset = info->peb_offset; + + fsize = info->peb_size * info->peb_count; + ubi->fsize_mb = fsize >> 20; + + /* Fastmap init */ + ubi->fm_size = ubi_calc_fm_size(ubi); + ubi->fm_enabled = fastmap; + + for (i = 0; i < nrvols; i++) { + struct ubispl_load *lv = lvols + i; + + generic_set_bit(lv->vol_id, ubi->toload); + } + + ipl_scan(ubi); + + for (i = 0; i < nrvols; i++) { + struct ubispl_load *lv = lvols + i; + + ubi_msg("Loading VolId #%d", lv->vol_id); + res = ipl_load(ubi, lv->vol_id, lv->load_addr); + if (res < 0) { + if (fastmap) { + fastmap = 0; + goto retry; + } + ubi_warn("Failed"); + return res; + } + } + return 0; +} diff --git a/drivers/mtd/ubispl/ubispl.h b/drivers/mtd/ubispl/ubispl.h new file mode 100644 index 0000000000..9227881f63 --- /dev/null +++ b/drivers/mtd/ubispl/ubispl.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) Thomas Gleixner <tglx@linutronix.de> + * + * SPDX-License-Identifier: GPL 2.0+ BSD-3-Clause + */ + +#ifndef _UBOOT_MTD_UBISPL_H +#define _UBOOT_MTD_UBISPL_H + +#include "../ubi/ubi-media.h" +#include "ubi-wrapper.h" + +/* + * The maximum number of volume ids we scan. So you can load volume id + * 0 to (CONFIG_SPL_UBI_VOL_ID_MAX - 1) + */ +#define UBI_SPL_VOL_IDS CONFIG_SPL_UBI_VOL_IDS +/* + * The size of the read buffer for the fastmap blocks. In theory up to + * UBI_FM_MAX_BLOCKS * CONFIG_SPL_MAX_PEB_SIZE. In practice today + * one or two blocks. + */ +#define UBI_FM_BUF_SIZE (UBI_FM_MAX_BLOCKS*CONFIG_SPL_UBI_MAX_PEB_SIZE) +/* + * The size of the bitmaps for the attach/ scan + */ +#define UBI_FM_BM_SIZE ((CONFIG_SPL_UBI_MAX_PEBS / BITS_PER_LONG) + 1) +/* + * The maximum number of logical erase blocks per loadable volume + */ +#define UBI_MAX_VOL_LEBS CONFIG_SPL_UBI_MAX_VOL_LEBS +/* + * The bitmap size for the above to denote the found blocks inside the volume + */ +#define UBI_VOL_BM_SIZE ((UBI_MAX_VOL_LEBS / BITS_PER_LONG) + 1) + +/** + * struct ubi_vol_info - UBISPL internal volume represenation + * @last_block: The last block (highest LEB) found for this volume + * @found: Bitmap to mark found LEBS + * @lebs_to_pebs: LEB to PEB translation table + */ +struct ubi_vol_info { + u32 last_block; + unsigned long found[UBI_VOL_BM_SIZE]; + u32 lebs_to_pebs[UBI_MAX_VOL_LEBS]; +}; + +/** + * struct ubi_scan_info - UBISPL internal data for FM attach and full scan + * + * @read: Read function to access the flash provided by the caller + * @peb_count: Number of physical erase blocks in the UBI FLASH area + * aka MTD partition. + * @peb_offset: Offset of PEB0 in the UBI FLASH area (aka MTD partition) + * to the real start of the FLASH in erase blocks. + * @fsize_mb: Size of the scanned FLASH area in MB (stats only) + * @vid_offset: Offset from the start of a PEB to the VID header + * @leb_start: Offset from the start of a PEB to the data area + * @leb_size: Size of the data area + * + * @fastmap_pebs: Counter of PEBs "attached" by fastmap + * @fastmap_anchor: The anchor PEB of the fastmap + * @fm_sb: The fastmap super block data + * @fm_vh: The fastmap VID header + * @fm: Pointer to the fastmap layout + * @fm_layout: The fastmap layout itself + * @fm_pool: The pool of PEBs to scan at fastmap attach time + * @fm_wl_pool: The pool of PEBs scheduled for wearleveling + * + * @fm_enabled: Indicator whether fastmap attachment is enabled. + * @fm_used: Bitmap to indicate the PEBS covered by fastmap + * @scanned: Bitmap to indicate the PEBS of which the VID header + * hase been physically scanned. + * @corrupt: Bitmap to indicate corrupt blocks + * @toload: Bitmap to indicate the volumes which should be loaded + * + * @blockinfo: The vid headers of the scanned blocks + * @volinfo: The volume information of the interesting (toload) + * volumes + * + * @fm_buf: The large fastmap attach buffer + */ +struct ubi_scan_info { + ubispl_read_flash read; + unsigned int fsize_mb; + unsigned int peb_count; + unsigned int peb_offset; + + unsigned long vid_offset; + unsigned long leb_start; + unsigned long leb_size; + + /* Fastmap: The upstream required fields */ + int fastmap_pebs; + int fastmap_anchor; + size_t fm_size; + struct ubi_fm_sb fm_sb; + struct ubi_vid_hdr fm_vh; + struct ubi_fastmap_layout *fm; + struct ubi_fastmap_layout fm_layout; + struct ubi_fm_pool fm_pool; + struct ubi_fm_pool fm_wl_pool; + + /* Fastmap: UBISPL specific data */ + int fm_enabled; + unsigned long fm_used[UBI_FM_BM_SIZE]; + unsigned long scanned[UBI_FM_BM_SIZE]; + unsigned long corrupt[UBI_FM_BM_SIZE]; + unsigned long toload[UBI_FM_BM_SIZE]; + + /* Data for storing the VID and volume information */ + struct ubi_vol_info volinfo[UBI_SPL_VOL_IDS]; + struct ubi_vid_hdr blockinfo[CONFIG_SPL_UBI_MAX_PEBS]; + + /* The large buffer for the fastmap */ + uint8_t fm_buf[UBI_FM_BUF_SIZE]; +}; + +#ifdef CFG_DEBUG +#define ubi_dbg(fmt, ...) printf("UBI: debug:" fmt "\n", ##__VA_ARGS__) +#else +#define ubi_dbg(fmt, ...) +#endif + +#ifdef CONFIG_UBI_SILENCE_MSG +#define ubi_msg(fmt, ...) +#else +#define ubi_msg(fmt, ...) printf("UBI: " fmt "\n", ##__VA_ARGS__) +#endif +/* UBI warning messages */ +#define ubi_warn(fmt, ...) printf("UBI warning: " fmt "\n", ##__VA_ARGS__) +/* UBI error messages */ +#define ubi_err(fmt, ...) printf("UBI error: " fmt "\n", ##__VA_ARGS__) + +#endif |