diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/clk/mvebu/armada-37xx-periph.c | 130 | ||||
-rw-r--r-- | drivers/mtd/nand/pxa3xx_nand.c | 447 | ||||
-rw-r--r-- | drivers/phy/marvell/comphy_a3700.h | 2 | ||||
-rw-r--r-- | drivers/phy/marvell/comphy_core.c | 12 | ||||
-rw-r--r-- | drivers/phy/marvell/comphy_core.h (renamed from drivers/phy/marvell/comphy.h) | 16 | ||||
-rw-r--r-- | drivers/phy/marvell/comphy_cp110.c | 21 | ||||
-rw-r--r-- | drivers/phy/marvell/comphy_hpipe.h | 6 | ||||
-rw-r--r-- | drivers/phy/marvell/comphy_mux.c | 2 |
8 files changed, 477 insertions, 159 deletions
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index 902a6cc9ef..b1a35968e1 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -224,11 +224,21 @@ static const struct clk_periph clks_sb[] = { { }, }; -static inline int get_mux(struct a37xx_periphclk *priv, int shift) +static int get_mux(struct a37xx_periphclk *priv, int shift) { return (readl(priv->reg + TBG_SEL) >> shift) & 3; } +static void set_mux(struct a37xx_periphclk *priv, int shift, int val) +{ + u32 reg; + + reg = readl(priv->reg + TBG_SEL); + reg &= ~(3 << shift); + reg |= (val & 3) << shift; + writel(reg, priv->reg + TBG_SEL); +} + static ulong periph_clk_get_rate(struct a37xx_periphclk *priv, int id); static ulong get_parent_rate(struct a37xx_periphclk *priv, int id) @@ -277,6 +287,17 @@ static ulong get_div(struct a37xx_periphclk *priv, return 0; } +static void set_div_val(struct a37xx_periphclk *priv, + const struct clk_periph *clk, int idx, int val) +{ + u32 reg; + + reg = readl(priv->reg + clk->div_reg_off[idx]); + reg &= ~(clk->div_mask[idx] << clk->div_shift[idx]); + reg |= (val & clk->div_mask[idx]) << clk->div_shift[idx]; + writel(reg, priv->reg + clk->div_reg_off[idx]); +} + static ulong periph_clk_get_rate(struct a37xx_periphclk *priv, int id) { const struct clk_periph *clk = &priv->clks[id]; @@ -337,6 +358,111 @@ static int armada_37xx_periph_clk_disable(struct clk *clk) return periph_clk_enable(clk, 0); } +#define diff(a, b) abs((long)(a) - (long)(b)) + +static ulong find_best_div(const struct clk_div_table *t0, + const struct clk_div_table *t1, ulong parent_rate, + ulong req_rate, int *v0, int *v1) +{ + const struct clk_div_table *i, *j; + ulong rate, best_rate = 0; + + for (i = t0; i && i->div; ++i) { + for (j = t1; j && j->div; ++j) { + rate = DIV_ROUND_UP(parent_rate, i->div * j->div); + + if (!best_rate || + diff(rate, req_rate) < diff(best_rate, req_rate)) { + best_rate = rate; + *v0 = i->val; + *v1 = j->val; + } + } + } + + return best_rate; +} + +static ulong armada_37xx_periph_clk_set_rate(struct clk *clk, ulong req_rate) +{ + struct a37xx_periphclk *priv = dev_get_priv(clk->dev); + const struct clk_periph *periph_clk = &priv->clks[clk->id]; + ulong rate, old_rate, parent_rate; + int div_val0 = 0, div_val1 = 0; + const struct clk_div_table *t1; + static const struct clk_div_table empty_table[2] = { + { 1, 0 }, + { 0, 0 } + }; + + if (clk->id > priv->count) + return -EINVAL; + + old_rate = periph_clk_get_rate(priv, clk->id); + if (old_rate == -EINVAL) + return -EINVAL; + + if (old_rate == req_rate) + return old_rate; + + if (!periph_clk->can_gate || !periph_clk->dividers) + return -ENOTSUPP; + + parent_rate = get_parent_rate(priv, clk->id); + if (parent_rate == -EINVAL) + return -EINVAL; + + t1 = empty_table; + if (periph_clk->dividers > 1) + t1 = periph_clk->div_table[1]; + + rate = find_best_div(periph_clk->div_table[0], t1, parent_rate, + req_rate, &div_val0, &div_val1); + + periph_clk_enable(clk, 0); + + set_div_val(priv, periph_clk, 0, div_val0); + if (periph_clk->dividers > 1) + set_div_val(priv, periph_clk, 1, div_val1); + + periph_clk_enable(clk, 1); + + return rate; +} + +static int armada_37xx_periph_clk_set_parent(struct clk *clk, + struct clk *parent) +{ + struct a37xx_periphclk *priv = dev_get_priv(clk->dev); + const struct clk_periph *periph_clk = &priv->clks[clk->id]; + struct clk check_parent; + int ret; + + /* We also check if parent is our TBG clock */ + if (clk->id > priv->count || parent->id >= MAX_TBG_PARENTS) + return -EINVAL; + + if (!periph_clk->can_mux || !periph_clk->can_gate) + return -ENOTSUPP; + + ret = clk_get_by_index(clk->dev, 0, &check_parent); + if (ret < 0) + return ret; + + if (parent->dev != check_parent.dev) + ret = -EINVAL; + + clk_free(&check_parent); + if (ret < 0) + return ret; + + periph_clk_enable(clk, 0); + set_mux(priv, periph_clk->mux_shift, parent->id); + periph_clk_enable(clk, 1); + + return 0; +} + #if defined(CONFIG_CMD_CLK) && defined(CONFIG_CLK_ARMADA_3720) static int armada_37xx_periph_clk_dump(struct udevice *dev) { @@ -473,6 +599,8 @@ static int armada_37xx_periph_clk_probe(struct udevice *dev) static const struct clk_ops armada_37xx_periph_clk_ops = { .get_rate = armada_37xx_periph_clk_get_rate, + .set_rate = armada_37xx_periph_clk_set_rate, + .set_parent = armada_37xx_periph_clk_set_parent, .enable = armada_37xx_periph_clk_enable, .disable = armada_37xx_periph_clk_disable, }; diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 9d02fd8eb1..2a02a9d58e 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -24,14 +24,16 @@ DECLARE_GLOBAL_DATA_PTR; #define TIMEOUT_DRAIN_FIFO 5 /* in ms */ #define CHIP_DELAY_TIMEOUT 200 #define NAND_STOP_DELAY 40 -#define PAGE_CHUNK_SIZE (2048) /* * Define a buffer size for the initial command that detects the flash device: - * STATUS, READID and PARAM. The largest of these is the PARAM command, - * needing 256 bytes. + * STATUS, READID and PARAM. + * ONFI param page is 256 bytes, and there are three redundant copies + * to be read. JEDEC param page is 512 bytes, and there are also three + * redundant copies to be read. + * Hence this buffer should be at least 512 x 3. Let's pick 2048. */ -#define INIT_BUFFER_SIZE 256 +#define INIT_BUFFER_SIZE 2048 /* registers and bit definitions */ #define NDCR (0x00) /* Control register */ @@ -58,7 +60,7 @@ DECLARE_GLOBAL_DATA_PTR; #define NDCR_ND_MODE (0x3 << 21) #define NDCR_NAND_MODE (0x0) #define NDCR_CLR_PG_CNT (0x1 << 20) -#define NDCR_STOP_ON_UNCOR (0x1 << 19) +#define NFCV1_NDCR_ARB_CNTL (0x1 << 19) #define NDCR_RD_ID_CNT_MASK (0x7 << 16) #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) @@ -109,6 +111,13 @@ DECLARE_GLOBAL_DATA_PTR; #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */ #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */ +/* + * This should be large enough to read 'ONFI' and 'JEDEC'. + * Let's use 7 bytes, which is the maximum ID count supported + * by the controller (see NDCR_RD_ID_CNT_MASK). + */ +#define READ_ID_BYTES 7 + /* macros for registers read/write */ #define nand_writel(info, off, val) \ writel((val), (info)->mmio_base + (off)) @@ -146,7 +155,6 @@ enum pxa3xx_nand_variant { struct pxa3xx_nand_host { struct nand_chip chip; - struct mtd_info *mtd; void *info_data; /* page size of attached chip */ @@ -156,8 +164,6 @@ struct pxa3xx_nand_host { /* calculated from pxa3xx_nand_flash data */ unsigned int col_addr_cycles; unsigned int row_addr_cycles; - size_t read_id_bytes; - }; struct pxa3xx_nand_info { @@ -193,15 +199,44 @@ struct pxa3xx_nand_info { int use_spare; /* use spare ? */ int need_wait; - unsigned int data_size; /* data to be read from FIFO */ - unsigned int chunk_size; /* split commands chunk size */ - unsigned int oob_size; + /* Amount of real data per full chunk */ + unsigned int chunk_size; + + /* Amount of spare data per full chunk */ unsigned int spare_size; + + /* Number of full chunks (i.e chunk_size + spare_size) */ + unsigned int nfullchunks; + + /* + * Total number of chunks. If equal to nfullchunks, then there + * are only full chunks. Otherwise, there is one last chunk of + * size (last_chunk_size + last_spare_size) + */ + unsigned int ntotalchunks; + + /* Amount of real data in the last chunk */ + unsigned int last_chunk_size; + + /* Amount of spare data in the last chunk */ + unsigned int last_spare_size; + unsigned int ecc_size; unsigned int ecc_err_cnt; unsigned int max_bitflips; int retcode; + /* + * Variables only valid during command + * execution. step_chunk_size and step_spare_size is the + * amount of real data and spare data in the current + * chunk. cur_chunk is the current chunk being + * read/programmed. + */ + unsigned int step_chunk_size; + unsigned int step_spare_size; + unsigned int cur_chunk; + /* cached register value */ uint32_t reg_ndcr; uint32_t ndtr0cs0; @@ -215,13 +250,33 @@ struct pxa3xx_nand_info { }; static struct pxa3xx_nand_timing timing[] = { + /* + * tCH Enable signal hold time + * tCS Enable signal setup time + * tWH ND_nWE high duration + * tWP ND_nWE pulse time + * tRH ND_nRE high duration + * tRP ND_nRE pulse width + * tR ND_nWE high to ND_nRE low for read + * tWHR ND_nWE high to ND_nRE low for status read + * tAR ND_ALE low to ND_nRE low delay + */ + /*ch cs wh wp rh rp r whr ar */ { 40, 80, 60, 100, 80, 100, 90000, 400, 40, }, { 10, 0, 20, 40, 30, 40, 11123, 110, 10, }, { 10, 25, 15, 25, 15, 30, 25000, 60, 10, }, { 10, 35, 15, 25, 15, 25, 25000, 60, 10, }, + { 5, 20, 10, 12, 10, 12, 25000, 60, 10, }, }; static struct pxa3xx_nand_flash builtin_flash_types[] = { + /* + * chip_id + * flash_width Width of Flash memory (DWIDTH_M) + * dfc_width Width of flash controller(DWIDTH_C) + * *timing + * http://www.linux-mtd.infradead.org/nand-data/nanddata.html + */ { 0x46ec, 16, 16, &timing[1] }, { 0xdaec, 8, 8, &timing[1] }, { 0xd7ec, 8, 8, &timing[1] }, @@ -230,6 +285,7 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = { { 0xdc2c, 8, 8, &timing[2] }, { 0xcc2c, 16, 16, &timing[2] }, { 0xba20, 16, 16, &timing[3] }, + { 0xda98, 8, 8, &timing[4] }, }; #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT @@ -267,6 +323,20 @@ static struct nand_ecclayout ecc_layout_2KB_bch4bit = { .oobfree = { {2, 30} } }; +static struct nand_ecclayout ecc_layout_2KB_bch8bit = { + .eccbytes = 64, + .eccpos = { + 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127}, + .oobfree = { {1, 4}, {6, 26} } +}; + static struct nand_ecclayout ecc_layout_4KB_bch4bit = { .eccbytes = 64, .eccpos = { @@ -282,6 +352,33 @@ static struct nand_ecclayout ecc_layout_4KB_bch4bit = { .oobfree = { {6, 26}, { 64, 32} } }; +static struct nand_ecclayout ecc_layout_8KB_bch4bit = { + .eccbytes = 128, + .eccpos = { + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, + + 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, + + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + + 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255}, + + /* Bootrom looks in bytes 0 & 5 for bad blocks */ + .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} } +}; + static struct nand_ecclayout ecc_layout_4KB_bch8bit = { .eccbytes = 128, .eccpos = { @@ -292,6 +389,13 @@ static struct nand_ecclayout ecc_layout_4KB_bch8bit = { .oobfree = { } }; +static struct nand_ecclayout ecc_layout_8KB_bch8bit = { + .eccbytes = 256, + .eccpos = {}, + /* HW ECC handles all ECC data and all spare area is free for OOB */ + .oobfree = {{0, 160} } +}; + #define NDTR0_tCH(c) (min((c), 7) << 19) #define NDTR0_tCS(c) (min((c), 7) << 16) #define NDTR0_tWH(c) (min((c), 7) << 11) @@ -347,9 +451,9 @@ static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host, u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000); u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000); u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000); - u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000); + u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000); u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000); - u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000); + u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000); u32 tR = chip->chip_delay * 1000; u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000); u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000); @@ -381,16 +485,17 @@ static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host) struct nand_chip *chip = &host->chip; struct pxa3xx_nand_info *info = host->info_data; const struct pxa3xx_nand_flash *f = NULL; + struct mtd_info *mtd = nand_to_mtd(&host->chip); int mode, id, ntypes, i; mode = onfi_get_async_timing_mode(chip); if (mode == ONFI_TIMING_MODE_UNKNOWN) { ntypes = ARRAY_SIZE(builtin_flash_types); - chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1); + chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); - id = chip->read_byte(host->mtd); - id |= chip->read_byte(host->mtd) << 0x8; + id = chip->read_byte(mtd); + id |= chip->read_byte(mtd) << 0x8; for (i = 0; i < ntypes; i++) { f = &builtin_flash_types[i]; @@ -427,25 +532,6 @@ static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host) return 0; } -/* - * Set the data and OOB size, depending on the selected - * spare and ECC configuration. - * Only applicable to READ0, READOOB and PAGEPROG commands. - */ -static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info, - struct mtd_info *mtd) -{ - int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; - - info->data_size = mtd->writesize; - if (!oob_enable) - return; - - info->oob_size = info->spare_size; - if (!info->use_ecc) - info->oob_size += info->ecc_size; -} - /** * NOTE: it is a must to set ND_RUN first, then write * command buffer, otherwise, it does not work. @@ -478,8 +564,8 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) ndcr |= NDCR_ND_RUN; /* clear status bits and run */ - nand_writel(info, NDCR, 0); nand_writel(info, NDSR, NDSR_MASK); + nand_writel(info, NDCR, 0); nand_writel(info, NDCR, ndcr); } @@ -526,39 +612,38 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) static void handle_data_pio(struct pxa3xx_nand_info *info) { - unsigned int do_bytes = min(info->data_size, info->chunk_size); - switch (info->state) { case STATE_PIO_WRITING: - writesl(info->mmio_base + NDDB, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + if (info->step_chunk_size) + writesl(info->mmio_base + NDDB, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(info->step_chunk_size, 4)); - if (info->oob_size > 0) + if (info->step_spare_size) writesl(info->mmio_base + NDDB, info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + DIV_ROUND_UP(info->step_spare_size, 4)); break; case STATE_PIO_READING: - drain_fifo(info, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + if (info->step_chunk_size) + drain_fifo(info, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(info->step_chunk_size, 4)); - if (info->oob_size > 0) + if (info->step_spare_size) drain_fifo(info, info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + DIV_ROUND_UP(info->step_spare_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, - info->state); + info->state); BUG(); } /* Update buffer pointers for multi-page read/write */ - info->data_buff_pos += do_bytes; - info->oob_buff_pos += info->oob_size; - info->data_size -= do_bytes; + info->data_buff_pos += info->step_chunk_size; + info->oob_buff_pos += info->step_spare_size; } static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info) @@ -583,6 +668,9 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info) cmd_done = NDSR_CS1_CMDD; } + /* TODO - find out why we need the delay during write operation. */ + ndelay(1); + status = nand_readl(info, NDSR); if (status & NDSR_UNCORERR) @@ -620,8 +708,14 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info) is_ready = 1; } + /* + * Clear all status bit before issuing the next command, which + * can and will alter the status bits and will deserve a new + * interrupt on its own. This lets the controller exit the IRQ + */ + nand_writel(info, NDSR, status); + if (status & NDSR_WRCMDREQ) { - nand_writel(info, NDSR, NDSR_WRCMDREQ); status &= ~NDSR_WRCMDREQ; info->state = STATE_CMD_HANDLE; @@ -642,8 +736,6 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info) nand_writel(info, NDCB0, info->ndcb3); } - /* clear NDSR to let the controller exit the IRQ */ - nand_writel(info, NDSR, status); if (is_completed) info->cmd_complete = 1; if (is_ready) @@ -664,7 +756,7 @@ static void set_command_address(struct pxa3xx_nand_info *info, unsigned int page_size, uint16_t column, int page_addr) { /* small page addr setting */ - if (page_size < PAGE_CHUNK_SIZE) { + if (page_size < info->chunk_size) { info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) | (column & 0xFF); @@ -683,14 +775,16 @@ static void set_command_address(struct pxa3xx_nand_info *info, static void prepare_start_command(struct pxa3xx_nand_info *info, int command) { struct pxa3xx_nand_host *host = info->host[info->cs]; - struct mtd_info *mtd = host->mtd; + struct mtd_info *mtd = nand_to_mtd(&host->chip); /* reset data and oob column point to handle data */ info->buf_start = 0; info->buf_count = 0; - info->oob_size = 0; info->data_buff_pos = 0; info->oob_buff_pos = 0; + info->step_chunk_size = 0; + info->step_spare_size = 0; + info->cur_chunk = 0; info->use_ecc = 0; info->use_spare = 1; info->retcode = ERR_NONE; @@ -700,10 +794,9 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) switch (command) { case NAND_CMD_READ0: + case NAND_CMD_READOOB: case NAND_CMD_PAGEPROG: info->use_ecc = 1; - case NAND_CMD_READOOB: - pxa3xx_set_datasize(info, mtd); break; case NAND_CMD_PARAM: info->use_spare = 0; @@ -734,7 +827,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, struct mtd_info *mtd; host = info->host[info->cs]; - mtd = host->mtd; + mtd = nand_to_mtd(&host->chip); addr_cycle = 0; exec_cmd = 1; @@ -760,19 +853,27 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, if (command == NAND_CMD_READOOB) info->buf_start += mtd->writesize; + if (info->cur_chunk < info->nfullchunks) { + info->step_chunk_size = info->chunk_size; + info->step_spare_size = info->spare_size; + } else { + info->step_chunk_size = info->last_chunk_size; + info->step_spare_size = info->last_spare_size; + } + /* * Multiple page read needs an 'extended command type' field, * which is either naked-read or last-read according to the * state. */ - if (mtd->writesize == PAGE_CHUNK_SIZE) { + if (mtd->writesize == info->chunk_size) { info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8); - } else if (mtd->writesize > PAGE_CHUNK_SIZE) { + } else if (mtd->writesize > info->chunk_size) { info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) | NDCB0_LEN_OVRD | NDCB0_EXT_CMD_TYPE(ext_cmd_type); - info->ndcb3 = info->chunk_size + - info->oob_size; + info->ndcb3 = info->step_chunk_size + + info->step_spare_size; } set_command_address(info, mtd->writesize, column, page_addr); @@ -787,13 +888,11 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, * Multiple page programming needs to execute the initial * SEQIN command that sets the page address. */ - if (mtd->writesize > PAGE_CHUNK_SIZE) { + if (mtd->writesize > info->chunk_size) { info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | NDCB0_EXT_CMD_TYPE(ext_cmd_type) | addr_cycle | command; - /* No data transfer in this case */ - info->data_size = 0; exec_cmd = 1; } break; @@ -805,8 +904,16 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, break; } + if (info->cur_chunk < info->nfullchunks) { + info->step_chunk_size = info->chunk_size; + info->step_spare_size = info->spare_size; + } else { + info->step_chunk_size = info->last_chunk_size; + info->step_spare_size = info->last_spare_size; + } + /* Second command setting for large pages */ - if (mtd->writesize > PAGE_CHUNK_SIZE) { + if (mtd->writesize > info->chunk_size) { /* * Multiple page write uses the 'extended command' * field. This can be used to issue a command dispatch @@ -815,14 +922,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | NDCB0_LEN_OVRD | NDCB0_EXT_CMD_TYPE(ext_cmd_type); - info->ndcb3 = info->chunk_size + - info->oob_size; + info->ndcb3 = info->step_chunk_size + + info->step_spare_size; /* * This is the command dispatch that completes a chunked * page program operation. */ - if (info->data_size == 0) { + if (info->cur_chunk == info->ntotalchunks) { info->ndcb0 = NDCB0_CMD_TYPE(0x1) | NDCB0_EXT_CMD_TYPE(ext_cmd_type) | command; @@ -842,24 +949,24 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, break; case NAND_CMD_PARAM: - info->buf_count = 256; + info->buf_count = INIT_BUFFER_SIZE; info->ndcb0 |= NDCB0_CMD_TYPE(0) | NDCB0_ADDR_CYC(1) | NDCB0_LEN_OVRD | command; info->ndcb1 = (column & 0xFF); - info->ndcb3 = 256; - info->data_size = 256; + info->ndcb3 = INIT_BUFFER_SIZE; + info->step_chunk_size = INIT_BUFFER_SIZE; break; case NAND_CMD_READID: - info->buf_count = host->read_id_bytes; + info->buf_count = READ_ID_BYTES; info->ndcb0 |= NDCB0_CMD_TYPE(3) | NDCB0_ADDR_CYC(1) | command; info->ndcb1 = (column & 0xFF); - info->data_size = 8; + info->step_chunk_size = 8; break; case NAND_CMD_STATUS: info->buf_count = 1; @@ -867,7 +974,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | NDCB0_ADDR_CYC(1) | command; - info->data_size = 8; + info->step_chunk_size = 8; break; case NAND_CMD_ERASE1: @@ -1051,22 +1158,31 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, } } + /* Only a few commands need several steps */ + if (command != NAND_CMD_PAGEPROG && + command != NAND_CMD_READ0 && + command != NAND_CMD_READOOB) + break; + + info->cur_chunk++; + /* Check if the sequence is complete */ - if (info->data_size == 0 && command != NAND_CMD_PAGEPROG) + if (info->cur_chunk == info->ntotalchunks && + command != NAND_CMD_PAGEPROG) break; /* * After a splitted program command sequence has issued * the command dispatch, the command sequence is complete. */ - if (info->data_size == 0 && + if (info->cur_chunk == (info->ntotalchunks + 1) && command == NAND_CMD_PAGEPROG && ext_cmd_type == EXT_CMD_TYPE_DISPATCH) break; if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) { /* Last read: issue a 'last naked read' */ - if (info->data_size == info->chunk_size) + if (info->cur_chunk == info->ntotalchunks - 1) ext_cmd_type = EXT_CMD_TYPE_LAST_RW; else ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; @@ -1076,7 +1192,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, * the command dispatch must be issued to complete. */ } else if (command == NAND_CMD_PAGEPROG && - info->data_size == 0) { + info->cur_chunk == info->ntotalchunks) { ext_cmd_type = EXT_CMD_TYPE_DISPATCH; } } while (1); @@ -1218,42 +1334,42 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) return NAND_STATUS_READY; } -static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info) +static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info) +{ + struct pxa3xx_nand_platform_data *pdata = info->pdata; + + /* Configure default flash values */ + info->reg_ndcr = 0x0; /* enable all interrupts */ + info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; + info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES); + info->reg_ndcr |= NDCR_SPARE_EN; + + return 0; +} + +static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info) { struct pxa3xx_nand_host *host = info->host[info->cs]; - struct mtd_info *mtd = host->mtd; + struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip); struct nand_chip *chip = mtd_to_nand(mtd); info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0; info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0; - - return 0; } -static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) +static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) { - /* - * We set 0 by hard coding here, for we don't support keep_config - * when there is more than one chip attached to the controller - */ - struct pxa3xx_nand_host *host = info->host[0]; + struct pxa3xx_nand_platform_data *pdata = info->pdata; uint32_t ndcr = nand_readl(info, NDCR); - if (ndcr & NDCR_PAGE_SZ) { - /* Controller's FIFO size */ - info->chunk_size = 2048; - host->read_id_bytes = 4; - } else { - info->chunk_size = 512; - host->read_id_bytes = 2; - } - /* Set an initial chunk size */ - info->reg_ndcr = ndcr & ~NDCR_INT_MASK; + info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; + info->reg_ndcr = ndcr & + ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL); + info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; info->ndtr0cs0 = nand_readl(info, NDTR0CS0); info->ndtr1cs0 = nand_readl(info, NDTR1CS0); - return 0; } static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) @@ -1273,13 +1389,13 @@ static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host) const struct nand_sdr_timings *timings; int ret; - mtd = info->host[info->cs]->mtd; + mtd = nand_to_mtd(&info->host[info->cs]->chip); chip = mtd_to_nand(mtd); /* configure default flash values */ info->reg_ndcr = 0x0; /* enable all interrupts */ info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; - info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes); + info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES); info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */ /* use the common timing to make a try */ @@ -1302,6 +1418,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, int strength, int ecc_stepsize, int page_size) { if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 2048; info->spare_size = 40; info->ecc_size = 24; @@ -1310,6 +1428,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, ecc->strength = 1; } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 512; info->spare_size = 8; info->ecc_size = 8; @@ -1323,6 +1443,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, */ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) { info->ecc_bch = 1; + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 2048; info->spare_size = 32; info->ecc_size = 32; @@ -1333,6 +1455,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { info->ecc_bch = 1; + info->nfullchunks = 2; + info->ntotalchunks = 2; info->chunk_size = 2048; info->spare_size = 32; info->ecc_size = 32; @@ -1341,19 +1465,64 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, ecc->layout = &ecc_layout_4KB_bch4bit; ecc->strength = 16; + } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) { + info->ecc_bch = 1; + info->nfullchunks = 4; + info->ntotalchunks = 4; + info->chunk_size = 2048; + info->spare_size = 32; + info->ecc_size = 32; + ecc->mode = NAND_ECC_HW; + ecc->size = info->chunk_size; + ecc->layout = &ecc_layout_8KB_bch4bit; + ecc->strength = 16; + /* * Required ECC: 8-bit correction per 512 bytes * Select: 16-bit correction per 1024 bytes */ + } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) { + info->ecc_bch = 1; + info->nfullchunks = 1; + info->ntotalchunks = 2; + info->chunk_size = 1024; + info->spare_size = 0; + info->last_chunk_size = 1024; + info->last_spare_size = 64; + info->ecc_size = 32; + ecc->mode = NAND_ECC_HW; + ecc->size = info->chunk_size; + ecc->layout = &ecc_layout_2KB_bch8bit; + ecc->strength = 16; + } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) { info->ecc_bch = 1; + info->nfullchunks = 4; + info->ntotalchunks = 5; info->chunk_size = 1024; info->spare_size = 0; + info->last_chunk_size = 0; + info->last_spare_size = 64; info->ecc_size = 32; ecc->mode = NAND_ECC_HW; ecc->size = info->chunk_size; ecc->layout = &ecc_layout_4KB_bch8bit; ecc->strength = 16; + + } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) { + info->ecc_bch = 1; + info->nfullchunks = 8; + info->ntotalchunks = 9; + info->chunk_size = 1024; + info->spare_size = 0; + info->last_chunk_size = 0; + info->last_spare_size = 160; + info->ecc_size = 32; + ecc->mode = NAND_ECC_HW; + ecc->size = info->chunk_size; + ecc->layout = &ecc_layout_8KB_bch8bit; + ecc->strength = 16; + } else { dev_err(&info->pdev->dev, "ECC strength %d at page size %d is not supported\n", @@ -1373,21 +1542,21 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) int ret; uint16_t ecc_strength, ecc_step; - if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) - goto KEEP_CONFIG; - - /* Set a default chunk size */ - info->chunk_size = 512; - - ret = pxa3xx_nand_sensing(host); - if (ret) { - dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", - info->cs); - - return ret; + if (pdata->keep_config) { + pxa3xx_nand_detect_config(info); + } else { + ret = pxa3xx_nand_config_ident(info); + if (ret) + return ret; + ret = pxa3xx_nand_sensing(host); + if (ret) { + dev_info(&info->pdev->dev, + "There is no chip on cs %d!\n", + info->cs); + return ret; + } } -KEEP_CONFIG: /* Device detection must be done with ECC disabled */ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) nand_writel(info, NDECCCTRL, 0x0); @@ -1404,10 +1573,6 @@ KEEP_CONFIG: } } - ret = pxa3xx_nand_config_flash(info); - if (ret) - return ret; - #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT /* * We'll use a bad block table stored in-flash and don't @@ -1418,21 +1583,6 @@ KEEP_CONFIG: chip->bbt_md = &bbt_mirror_descr; #endif - /* - * If the page size is bigger than the FIFO size, let's check - * we are given the right variant and then switch to the extended - * (aka splitted) command handling, - */ - if (mtd->writesize > PAGE_CHUNK_SIZE) { - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { - chip->cmdfunc = nand_cmdfunc_extended; - } else { - dev_err(&info->pdev->dev, - "unsupported page size on this variant\n"); - return -ENODEV; - } - } - if (pdata->ecc_strength && pdata->ecc_step_size) { ecc_strength = pdata->ecc_strength; ecc_step = pdata->ecc_step_size; @@ -1452,6 +1602,21 @@ KEEP_CONFIG: if (ret) return ret; + /* + * If the page size is bigger than the FIFO size, let's check + * we are given the right variant and then switch to the extended + * (aka split) command handling, + */ + if (mtd->writesize > info->chunk_size) { + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { + chip->cmdfunc = nand_cmdfunc_extended; + } else { + dev_err(&info->pdev->dev, + "unsupported page size on this variant\n"); + return -ENODEV; + } + } + /* calculate addressing information */ if (mtd->writesize >= 2048) host->col_addr_cycles = 2; @@ -1472,6 +1637,10 @@ KEEP_CONFIG: host->row_addr_cycles = 3; else host->row_addr_cycles = 2; + + if (!pdata->keep_config) + pxa3xx_nand_config_tail(info); + return nand_scan_tail(mtd); } @@ -1494,10 +1663,8 @@ static int alloc_nand_resource(struct pxa3xx_nand_info *info) mtd = nand_to_mtd(chip); host = (struct pxa3xx_nand_host *)chip; info->host[cs] = host; - host->mtd = mtd; host->cs = cs; host->info_data = info; - host->read_id_bytes = 4; mtd->owner = THIS_MODULE; nand_set_controller_data(chip, host); @@ -1612,7 +1779,7 @@ static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info) probe_success = 0; for (cs = 0; cs < pdata->num_cs; cs++) { - struct mtd_info *mtd = info->host[cs]->mtd; + struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip); /* * The mtd name matches the one used in 'mtdparts' kernel diff --git a/drivers/phy/marvell/comphy_a3700.h b/drivers/phy/marvell/comphy_a3700.h index a14767d809..b0941ffb37 100644 --- a/drivers/phy/marvell/comphy_a3700.h +++ b/drivers/phy/marvell/comphy_a3700.h @@ -6,7 +6,7 @@ #ifndef _COMPHY_A3700_H_ #define _COMPHY_A3700_H_ -#include "comphy.h" +#include "comphy_core.h" #include "comphy_hpipe.h" #define MVEBU_REG(offs) \ diff --git a/drivers/phy/marvell/comphy_core.c b/drivers/phy/marvell/comphy_core.c index c6e2cc8897..9c24692629 100644 --- a/drivers/phy/marvell/comphy_core.c +++ b/drivers/phy/marvell/comphy_core.c @@ -11,7 +11,7 @@ #include <linux/errno.h> #include <asm/io.h> -#include "comphy.h" +#include "comphy_core.h" #define COMPHY_MAX_CHIP 4 @@ -66,6 +66,11 @@ void comphy_print(struct chip_serdes_phy_config *chip_cfg, } } +__weak int comphy_update_map(struct comphy_map *serdes_map, int count) +{ + return 0; +} + static int comphy_probe(struct udevice *dev) { const void *blob = gd->fdt_blob; @@ -76,6 +81,7 @@ static int comphy_probe(struct udevice *dev) int lane; int last_idx = 0; static int current_idx; + int res; /* Save base addresses for later use */ chip_cfg->comphy_base_addr = (void *)devfdt_get_addr_index(dev, 0); @@ -143,6 +149,10 @@ static int comphy_probe(struct udevice *dev) lane++; } + res = comphy_update_map(comphy_map_data, chip_cfg->comphy_lanes_count); + if (res < 0) + return res; + /* Save CP index for MultiCP devices (A8K) */ chip_cfg->cp_index = current_idx++; /* PHY power UP sequence */ diff --git a/drivers/phy/marvell/comphy.h b/drivers/phy/marvell/comphy_core.h index b588ae41f0..12ab921d24 100644 --- a/drivers/phy/marvell/comphy.h +++ b/drivers/phy/marvell/comphy_core.h @@ -3,11 +3,11 @@ * Copyright (C) 2015-2016 Marvell International Ltd. */ -#ifndef _COMPHY_H_ -#define _COMPHY_H_ +#ifndef _COMPHY_CORE_H_ +#define _COMPHY_CORE_H_ -#include <dt-bindings/comphy/comphy_data.h> #include <fdtdec.h> +#include <mvebu/comphy.h> #if defined(DEBUG) #define debug_enter() printf("----> Enter %s\n", __func__); @@ -80,14 +80,6 @@ struct comphy_mux_data { struct comphy_mux_options mux_values[MAX_LANE_OPTIONS]; }; -struct comphy_map { - u32 type; - u32 speed; - u32 invert; - bool clk_src; - bool end_point; -}; - struct chip_serdes_phy_config { struct comphy_mux_data *mux_data; int (*ptr_comphy_chip_init)(struct chip_serdes_phy_config *, @@ -183,5 +175,5 @@ void comphy_pcie_config_detect(u32 comphy_max_count, struct comphy_map *serdes_map); void comphy_pcie_unit_general_config(u32 pex_index); -#endif /* _COMPHY_H_ */ +#endif /* _COMPHY_CORE_H_ */ diff --git a/drivers/phy/marvell/comphy_cp110.c b/drivers/phy/marvell/comphy_cp110.c index b0d5d5ca26..9998c07a47 100644 --- a/drivers/phy/marvell/comphy_cp110.c +++ b/drivers/phy/marvell/comphy_cp110.c @@ -9,7 +9,7 @@ #include <asm/arch/cpu.h> #include <asm/arch/soc.h> -#include "comphy.h" +#include "comphy_core.h" #include "comphy_hpipe.h" #include "sata.h" #include "utmi_phy.h" @@ -641,7 +641,8 @@ static int comphy_usb3_power_up(u32 lane, void __iomem *hpipe_base, } static int comphy_sata_power_up(u32 lane, void __iomem *hpipe_base, - void __iomem *comphy_base, int cp_index) + void __iomem *comphy_base, int cp_index, + u32 invert) { u32 mask, data, i, ret = 1; void __iomem *hpipe_addr = HPIPE_ADDR(hpipe_base, lane); @@ -927,6 +928,19 @@ static int comphy_sata_power_up(u32 lane, void __iomem *hpipe_base, reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, 0x0 << HPIPE_PWR_CTR_RST_DFE_OFFSET, HPIPE_PWR_CTR_RST_DFE_MASK); + + /* Set RX / TX swaps */ + data = mask = 0; + if (invert & PHY_POLARITY_TXD_INVERT) { + data |= (1 << HPIPE_SYNC_PATTERN_TXD_SWAP_OFFSET); + mask |= HPIPE_SYNC_PATTERN_TXD_SWAP_MASK; + } + if (invert & PHY_POLARITY_RXD_INVERT) { + data |= (1 << HPIPE_SYNC_PATTERN_RXD_SWAP_OFFSET); + mask |= HPIPE_SYNC_PATTERN_RXD_SWAP_MASK; + } + reg_set(hpipe_addr + HPIPE_SYNC_PATTERN_REG, data, mask); + /* SW reset for interupt logic */ reg_set(hpipe_addr + HPIPE_PWR_CTR_REG, 0x1 << HPIPE_PWR_CTR_SFT_RST_OFFSET, @@ -2006,7 +2020,8 @@ int comphy_cp110_init(struct chip_serdes_phy_config *ptr_chip_cfg, case PHY_TYPE_SATA3: ret = comphy_sata_power_up( lane, hpipe_base_addr, comphy_base_addr, - ptr_chip_cfg->cp_index); + ptr_chip_cfg->cp_index, + serdes_map[lane].invert); break; case PHY_TYPE_USB3_HOST0: case PHY_TYPE_USB3_HOST1: diff --git a/drivers/phy/marvell/comphy_hpipe.h b/drivers/phy/marvell/comphy_hpipe.h index d99da7b9ff..a692035c94 100644 --- a/drivers/phy/marvell/comphy_hpipe.h +++ b/drivers/phy/marvell/comphy_hpipe.h @@ -221,6 +221,12 @@ (0x7 << HPIPE_LOOPBACK_SEL_OFFSET) #define HPIPE_SYNC_PATTERN_REG 0x090 +#define HPIPE_SYNC_PATTERN_TXD_SWAP_OFFSET 10 +#define HPIPE_SYNC_PATTERN_TXD_SWAP_MASK \ + (0x1 << HPIPE_SYNC_PATTERN_TXD_SWAP_OFFSET) +#define HPIPE_SYNC_PATTERN_RXD_SWAP_OFFSET 11 +#define HPIPE_SYNC_PATTERN_RXD_SWAP_MASK \ + (0x1 << HPIPE_SYNC_PATTERN_RXD_SWAP_OFFSET) #define HPIPE_INTERFACE_REG 0x94 #define HPIPE_INTERFACE_GEN_MAX_OFFSET 10 diff --git a/drivers/phy/marvell/comphy_mux.c b/drivers/phy/marvell/comphy_mux.c index 1f757d8e04..c67ba99762 100644 --- a/drivers/phy/marvell/comphy_mux.c +++ b/drivers/phy/marvell/comphy_mux.c @@ -6,7 +6,7 @@ #include <common.h> #include <asm/io.h> -#include "comphy.h" +#include "comphy_core.h" #include "comphy_hpipe.h" /* |