2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * SPDX-License-Identifier: GPL-2.0
13 #include <asm/errno.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/nand.h>
18 #include <linux/types.h>
20 #include "pxa3xx_nand.h"
22 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
23 #define CHIP_DELAY_TIMEOUT 200
24 #define NAND_STOP_DELAY 40
25 #define PAGE_CHUNK_SIZE (2048)
28 * Define a buffer size for the initial command that detects the flash device:
29 * STATUS, READID and PARAM. The largest of these is the PARAM command,
32 #define INIT_BUFFER_SIZE 256
34 /* registers and bit definitions */
35 #define NDCR (0x00) /* Control register */
36 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
37 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
38 #define NDSR (0x14) /* Status Register */
39 #define NDPCR (0x18) /* Page Count Register */
40 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
41 #define NDBDR1 (0x20) /* Bad Block Register 1 */
42 #define NDECCCTRL (0x28) /* ECC control */
43 #define NDDB (0x40) /* Data Buffer */
44 #define NDCB0 (0x48) /* Command Buffer0 */
45 #define NDCB1 (0x4C) /* Command Buffer1 */
46 #define NDCB2 (0x50) /* Command Buffer2 */
48 #define NDCR_SPARE_EN (0x1 << 31)
49 #define NDCR_ECC_EN (0x1 << 30)
50 #define NDCR_DMA_EN (0x1 << 29)
51 #define NDCR_ND_RUN (0x1 << 28)
52 #define NDCR_DWIDTH_C (0x1 << 27)
53 #define NDCR_DWIDTH_M (0x1 << 26)
54 #define NDCR_PAGE_SZ (0x1 << 24)
55 #define NDCR_NCSX (0x1 << 23)
56 #define NDCR_ND_MODE (0x3 << 21)
57 #define NDCR_NAND_MODE (0x0)
58 #define NDCR_CLR_PG_CNT (0x1 << 20)
59 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
60 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
61 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
63 #define NDCR_RA_START (0x1 << 15)
64 #define NDCR_PG_PER_BLK (0x1 << 14)
65 #define NDCR_ND_ARB_EN (0x1 << 12)
66 #define NDCR_INT_MASK (0xFFF)
68 #define NDSR_MASK (0xfff)
69 #define NDSR_ERR_CNT_OFF (16)
70 #define NDSR_ERR_CNT_MASK (0x1f)
71 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
72 #define NDSR_RDY (0x1 << 12)
73 #define NDSR_FLASH_RDY (0x1 << 11)
74 #define NDSR_CS0_PAGED (0x1 << 10)
75 #define NDSR_CS1_PAGED (0x1 << 9)
76 #define NDSR_CS0_CMDD (0x1 << 8)
77 #define NDSR_CS1_CMDD (0x1 << 7)
78 #define NDSR_CS0_BBD (0x1 << 6)
79 #define NDSR_CS1_BBD (0x1 << 5)
80 #define NDSR_UNCORERR (0x1 << 4)
81 #define NDSR_CORERR (0x1 << 3)
82 #define NDSR_WRDREQ (0x1 << 2)
83 #define NDSR_RDDREQ (0x1 << 1)
84 #define NDSR_WRCMDREQ (0x1)
86 #define NDCB0_LEN_OVRD (0x1 << 28)
87 #define NDCB0_ST_ROW_EN (0x1 << 26)
88 #define NDCB0_AUTO_RS (0x1 << 25)
89 #define NDCB0_CSEL (0x1 << 24)
90 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
91 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
92 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
93 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
94 #define NDCB0_NC (0x1 << 20)
95 #define NDCB0_DBC (0x1 << 19)
96 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
97 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
98 #define NDCB0_CMD2_MASK (0xff << 8)
99 #define NDCB0_CMD1_MASK (0xff)
100 #define NDCB0_ADDR_CYC_SHIFT (16)
102 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
103 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
104 #define EXT_CMD_TYPE_READ 4 /* Read */
105 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
106 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
107 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
108 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
110 /* macros for registers read/write */
111 #define nand_writel(info, off, val) \
112 writel((val), (info)->mmio_base + (off))
114 #define nand_readl(info, off) \
115 readl((info)->mmio_base + (off))
117 /* error code and state */
140 enum pxa3xx_nand_variant {
141 PXA3XX_NAND_VARIANT_PXA,
142 PXA3XX_NAND_VARIANT_ARMADA370,
145 struct pxa3xx_nand_host {
146 struct nand_chip chip;
147 struct mtd_info *mtd;
150 /* page size of attached chip */
154 /* calculated from pxa3xx_nand_flash data */
155 unsigned int col_addr_cycles;
156 unsigned int row_addr_cycles;
157 size_t read_id_bytes;
161 struct pxa3xx_nand_info {
162 struct nand_hw_control controller;
163 struct pxa3xx_nand_platform_data *pdata;
166 void __iomem *mmio_base;
167 unsigned long mmio_phys;
168 int cmd_complete, dev_ready;
170 unsigned int buf_start;
171 unsigned int buf_count;
172 unsigned int buf_size;
173 unsigned int data_buff_pos;
174 unsigned int oob_buff_pos;
176 unsigned char *data_buff;
177 unsigned char *oob_buff;
179 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
183 * This driver supports NFCv1 (as found in PXA SoC)
184 * and NFCv2 (as found in Armada 370/XP SoC).
186 enum pxa3xx_nand_variant variant;
189 int use_ecc; /* use HW ECC ? */
190 int ecc_bch; /* using BCH ECC? */
191 int use_spare; /* use spare ? */
194 unsigned int data_size; /* data to be read from FIFO */
195 unsigned int chunk_size; /* split commands chunk size */
196 unsigned int oob_size;
197 unsigned int spare_size;
198 unsigned int ecc_size;
199 unsigned int ecc_err_cnt;
200 unsigned int max_bitflips;
203 /* cached register value */
208 /* generated NDCBx register values */
215 static struct pxa3xx_nand_timing timing[] = {
216 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
217 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
218 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
219 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
222 static struct pxa3xx_nand_flash builtin_flash_types[] = {
223 { 0x46ec, 16, 16, &timing[1] },
224 { 0xdaec, 8, 8, &timing[1] },
225 { 0xd7ec, 8, 8, &timing[1] },
226 { 0xa12c, 8, 8, &timing[2] },
227 { 0xb12c, 16, 16, &timing[2] },
228 { 0xdc2c, 8, 8, &timing[2] },
229 { 0xcc2c, 16, 16, &timing[2] },
230 { 0xba20, 16, 16, &timing[3] },
233 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
234 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
236 static struct nand_bbt_descr bbt_main_descr = {
237 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
238 | NAND_BBT_2BIT | NAND_BBT_VERSION,
242 .maxblocks = 8, /* Last 8 blocks in each chip */
243 .pattern = bbt_pattern
246 static struct nand_bbt_descr bbt_mirror_descr = {
247 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
248 | NAND_BBT_2BIT | NAND_BBT_VERSION,
252 .maxblocks = 8, /* Last 8 blocks in each chip */
253 .pattern = bbt_mirror_pattern
256 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
259 32, 33, 34, 35, 36, 37, 38, 39,
260 40, 41, 42, 43, 44, 45, 46, 47,
261 48, 49, 50, 51, 52, 53, 54, 55,
262 56, 57, 58, 59, 60, 61, 62, 63},
263 .oobfree = { {2, 30} }
266 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
269 32, 33, 34, 35, 36, 37, 38, 39,
270 40, 41, 42, 43, 44, 45, 46, 47,
271 48, 49, 50, 51, 52, 53, 54, 55,
272 56, 57, 58, 59, 60, 61, 62, 63,
273 96, 97, 98, 99, 100, 101, 102, 103,
274 104, 105, 106, 107, 108, 109, 110, 111,
275 112, 113, 114, 115, 116, 117, 118, 119,
276 120, 121, 122, 123, 124, 125, 126, 127},
277 /* Bootrom looks in bytes 0 & 5 for bad blocks */
278 .oobfree = { {6, 26}, { 64, 32} }
281 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
284 32, 33, 34, 35, 36, 37, 38, 39,
285 40, 41, 42, 43, 44, 45, 46, 47,
286 48, 49, 50, 51, 52, 53, 54, 55,
287 56, 57, 58, 59, 60, 61, 62, 63},
291 #define NDTR0_tCH(c) (min((c), 7) << 19)
292 #define NDTR0_tCS(c) (min((c), 7) << 16)
293 #define NDTR0_tWH(c) (min((c), 7) << 11)
294 #define NDTR0_tWP(c) (min((c), 7) << 8)
295 #define NDTR0_tRH(c) (min((c), 7) << 3)
296 #define NDTR0_tRP(c) (min((c), 7) << 0)
298 #define NDTR1_tR(c) (min((c), 65535) << 16)
299 #define NDTR1_tWHR(c) (min((c), 15) << 4)
300 #define NDTR1_tAR(c) (min((c), 15) << 0)
302 /* convert nano-seconds to nand flash controller clock cycles */
303 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
305 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
307 /* We only support the Armada 370/XP/38x for now */
308 return PXA3XX_NAND_VARIANT_ARMADA370;
311 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
312 const struct pxa3xx_nand_timing *t)
314 struct pxa3xx_nand_info *info = host->info_data;
315 unsigned long nand_clk = mvebu_get_nand_clock();
316 uint32_t ndtr0, ndtr1;
318 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
319 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
320 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
321 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
322 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
323 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
325 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
326 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
327 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
329 info->ndtr0cs0 = ndtr0;
330 info->ndtr1cs0 = ndtr1;
331 nand_writel(info, NDTR0CS0, ndtr0);
332 nand_writel(info, NDTR1CS0, ndtr1);
335 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
336 const struct nand_sdr_timings *t)
338 struct pxa3xx_nand_info *info = host->info_data;
339 struct nand_chip *chip = &host->chip;
340 unsigned long nand_clk = mvebu_get_nand_clock();
341 uint32_t ndtr0, ndtr1;
343 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
344 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
345 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
346 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
347 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
348 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
349 u32 tR = chip->chip_delay * 1000;
350 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
351 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
353 /* fallback to a default value if tR = 0 */
357 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
358 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
359 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
360 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
361 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
362 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
364 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
365 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
366 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
368 info->ndtr0cs0 = ndtr0;
369 info->ndtr1cs0 = ndtr1;
370 nand_writel(info, NDTR0CS0, ndtr0);
371 nand_writel(info, NDTR1CS0, ndtr1);
374 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
376 const struct nand_sdr_timings *timings;
377 struct nand_chip *chip = &host->chip;
378 struct pxa3xx_nand_info *info = host->info_data;
379 const struct pxa3xx_nand_flash *f = NULL;
380 int mode, id, ntypes, i;
382 mode = onfi_get_async_timing_mode(chip);
383 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
384 ntypes = ARRAY_SIZE(builtin_flash_types);
386 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
388 id = chip->read_byte(host->mtd);
389 id |= chip->read_byte(host->mtd) << 0x8;
391 for (i = 0; i < ntypes; i++) {
392 f = &builtin_flash_types[i];
394 if (f->chip_id == id)
399 dev_err(&info->pdev->dev, "Error: timings not found\n");
403 pxa3xx_nand_set_timing(host, f->timing);
405 if (f->flash_width == 16) {
406 info->reg_ndcr |= NDCR_DWIDTH_M;
407 chip->options |= NAND_BUSWIDTH_16;
410 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
412 mode = fls(mode) - 1;
416 timings = onfi_async_timing_mode_to_sdr_timings(mode);
418 return PTR_ERR(timings);
420 pxa3xx_nand_set_sdr_timing(host, timings);
427 * Set the data and OOB size, depending on the selected
428 * spare and ECC configuration.
429 * Only applicable to READ0, READOOB and PAGEPROG commands.
431 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
432 struct mtd_info *mtd)
434 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
436 info->data_size = mtd->writesize;
440 info->oob_size = info->spare_size;
442 info->oob_size += info->ecc_size;
446 * NOTE: it is a must to set ND_RUN first, then write
447 * command buffer, otherwise, it does not work.
448 * We enable all the interrupt at the same time, and
449 * let pxa3xx_nand_irq to handle all logic.
451 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
455 ndcr = info->reg_ndcr;
460 nand_writel(info, NDECCCTRL, 0x1);
462 ndcr &= ~NDCR_ECC_EN;
464 nand_writel(info, NDECCCTRL, 0x0);
467 ndcr &= ~NDCR_DMA_EN;
470 ndcr |= NDCR_SPARE_EN;
472 ndcr &= ~NDCR_SPARE_EN;
476 /* clear status bits and run */
477 nand_writel(info, NDCR, 0);
478 nand_writel(info, NDSR, NDSR_MASK);
479 nand_writel(info, NDCR, ndcr);
482 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
486 ndcr = nand_readl(info, NDCR);
487 nand_writel(info, NDCR, ndcr | int_mask);
490 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
496 * According to the datasheet, when reading from NDDB
497 * with BCH enabled, after each 32 bytes reads, we
498 * have to make sure that the NDSR.RDDREQ bit is set.
500 * Drain the FIFO 8 32 bits reads at a time, and skip
501 * the polling on the last read.
504 readsl(info->mmio_base + NDDB, data, 8);
507 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
508 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
509 dev_err(&info->pdev->dev,
510 "Timeout on RDDREQ while draining the FIFO\n");
520 readsl(info->mmio_base + NDDB, data, len);
523 static void handle_data_pio(struct pxa3xx_nand_info *info)
525 unsigned int do_bytes = min(info->data_size, info->chunk_size);
527 switch (info->state) {
528 case STATE_PIO_WRITING:
529 writesl(info->mmio_base + NDDB,
530 info->data_buff + info->data_buff_pos,
531 DIV_ROUND_UP(do_bytes, 4));
533 if (info->oob_size > 0)
534 writesl(info->mmio_base + NDDB,
535 info->oob_buff + info->oob_buff_pos,
536 DIV_ROUND_UP(info->oob_size, 4));
538 case STATE_PIO_READING:
540 info->data_buff + info->data_buff_pos,
541 DIV_ROUND_UP(do_bytes, 4));
543 if (info->oob_size > 0)
545 info->oob_buff + info->oob_buff_pos,
546 DIV_ROUND_UP(info->oob_size, 4));
549 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
554 /* Update buffer pointers for multi-page read/write */
555 info->data_buff_pos += do_bytes;
556 info->oob_buff_pos += info->oob_size;
557 info->data_size -= do_bytes;
560 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
562 handle_data_pio(info);
564 info->state = STATE_CMD_DONE;
565 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
568 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
570 unsigned int status, is_completed = 0, is_ready = 0;
571 unsigned int ready, cmd_done;
572 irqreturn_t ret = IRQ_HANDLED;
575 ready = NDSR_FLASH_RDY;
576 cmd_done = NDSR_CS0_CMDD;
579 cmd_done = NDSR_CS1_CMDD;
582 status = nand_readl(info, NDSR);
584 if (status & NDSR_UNCORERR)
585 info->retcode = ERR_UNCORERR;
586 if (status & NDSR_CORERR) {
587 info->retcode = ERR_CORERR;
588 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
590 info->ecc_err_cnt = NDSR_ERR_CNT(status);
592 info->ecc_err_cnt = 1;
595 * Each chunk composing a page is corrected independently,
596 * and we need to store maximum number of corrected bitflips
597 * to return it to the MTD layer in ecc.read_page().
599 info->max_bitflips = max_t(unsigned int,
603 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
604 info->state = (status & NDSR_RDDREQ) ?
605 STATE_PIO_READING : STATE_PIO_WRITING;
606 /* Call the IRQ thread in U-Boot directly */
607 pxa3xx_nand_irq_thread(info);
610 if (status & cmd_done) {
611 info->state = STATE_CMD_DONE;
614 if (status & ready) {
615 info->state = STATE_READY;
619 if (status & NDSR_WRCMDREQ) {
620 nand_writel(info, NDSR, NDSR_WRCMDREQ);
621 status &= ~NDSR_WRCMDREQ;
622 info->state = STATE_CMD_HANDLE;
625 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
626 * must be loaded by writing directly either 12 or 16
627 * bytes directly to NDCB0, four bytes at a time.
629 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
630 * but each NDCBx register can be read.
632 nand_writel(info, NDCB0, info->ndcb0);
633 nand_writel(info, NDCB0, info->ndcb1);
634 nand_writel(info, NDCB0, info->ndcb2);
636 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
637 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
638 nand_writel(info, NDCB0, info->ndcb3);
641 /* clear NDSR to let the controller exit the IRQ */
642 nand_writel(info, NDSR, status);
644 info->cmd_complete = 1;
651 static inline int is_buf_blank(uint8_t *buf, size_t len)
653 for (; len > 0; len--)
659 static void set_command_address(struct pxa3xx_nand_info *info,
660 unsigned int page_size, uint16_t column, int page_addr)
662 /* small page addr setting */
663 if (page_size < PAGE_CHUNK_SIZE) {
664 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
669 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
672 if (page_addr & 0xFF0000)
673 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
679 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
681 struct pxa3xx_nand_host *host = info->host[info->cs];
682 struct mtd_info *mtd = host->mtd;
684 /* reset data and oob column point to handle data */
688 info->data_buff_pos = 0;
689 info->oob_buff_pos = 0;
692 info->retcode = ERR_NONE;
693 info->ecc_err_cnt = 0;
699 case NAND_CMD_PAGEPROG:
701 case NAND_CMD_READOOB:
702 pxa3xx_set_datasize(info, mtd);
714 * If we are about to issue a read command, or about to set
715 * the write address, then clean the data buffer.
717 if (command == NAND_CMD_READ0 ||
718 command == NAND_CMD_READOOB ||
719 command == NAND_CMD_SEQIN) {
720 info->buf_count = mtd->writesize + mtd->oobsize;
721 memset(info->data_buff, 0xFF, info->buf_count);
725 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
726 int ext_cmd_type, uint16_t column, int page_addr)
728 int addr_cycle, exec_cmd;
729 struct pxa3xx_nand_host *host;
730 struct mtd_info *mtd;
732 host = info->host[info->cs];
738 info->ndcb0 = NDCB0_CSEL;
742 if (command == NAND_CMD_SEQIN)
745 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
746 + host->col_addr_cycles);
749 case NAND_CMD_READOOB:
751 info->buf_start = column;
752 info->ndcb0 |= NDCB0_CMD_TYPE(0)
756 if (command == NAND_CMD_READOOB)
757 info->buf_start += mtd->writesize;
760 * Multiple page read needs an 'extended command type' field,
761 * which is either naked-read or last-read according to the
764 if (mtd->writesize == PAGE_CHUNK_SIZE) {
765 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
766 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
767 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
769 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
770 info->ndcb3 = info->chunk_size +
774 set_command_address(info, mtd->writesize, column, page_addr);
779 info->buf_start = column;
780 set_command_address(info, mtd->writesize, 0, page_addr);
783 * Multiple page programming needs to execute the initial
784 * SEQIN command that sets the page address.
786 if (mtd->writesize > PAGE_CHUNK_SIZE) {
787 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
788 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
791 /* No data transfer in this case */
797 case NAND_CMD_PAGEPROG:
798 if (is_buf_blank(info->data_buff,
799 (mtd->writesize + mtd->oobsize))) {
804 /* Second command setting for large pages */
805 if (mtd->writesize > PAGE_CHUNK_SIZE) {
807 * Multiple page write uses the 'extended command'
808 * field. This can be used to issue a command dispatch
809 * or a naked-write depending on the current stage.
811 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
813 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
814 info->ndcb3 = info->chunk_size +
818 * This is the command dispatch that completes a chunked
819 * page program operation.
821 if (info->data_size == 0) {
822 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
823 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
830 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
834 | (NAND_CMD_PAGEPROG << 8)
841 info->buf_count = 256;
842 info->ndcb0 |= NDCB0_CMD_TYPE(0)
846 info->ndcb1 = (column & 0xFF);
848 info->data_size = 256;
851 case NAND_CMD_READID:
852 info->buf_count = host->read_id_bytes;
853 info->ndcb0 |= NDCB0_CMD_TYPE(3)
856 info->ndcb1 = (column & 0xFF);
860 case NAND_CMD_STATUS:
862 info->ndcb0 |= NDCB0_CMD_TYPE(4)
869 case NAND_CMD_ERASE1:
870 info->ndcb0 |= NDCB0_CMD_TYPE(2)
874 | (NAND_CMD_ERASE2 << 8)
876 info->ndcb1 = page_addr;
881 info->ndcb0 |= NDCB0_CMD_TYPE(5)
886 case NAND_CMD_ERASE2:
892 dev_err(&info->pdev->dev, "non-supported command %x\n",
900 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
901 int column, int page_addr)
903 struct pxa3xx_nand_host *host = mtd->priv;
904 struct pxa3xx_nand_info *info = host->info_data;
908 * if this is a x16 device ,then convert the input
909 * "byte" address into a "word" address appropriate
910 * for indexing a word-oriented device
912 if (info->reg_ndcr & NDCR_DWIDTH_M)
916 * There may be different NAND chip hooked to
917 * different chip select, so check whether
918 * chip select has been changed, if yes, reset the timing
920 if (info->cs != host->cs) {
922 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
923 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
926 prepare_start_command(info, command);
928 info->state = STATE_PREPARED;
929 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
934 info->cmd_complete = 0;
937 pxa3xx_nand_start(info);
943 status = nand_readl(info, NDSR);
945 pxa3xx_nand_irq(info);
947 if (info->cmd_complete)
950 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
951 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
956 info->state = STATE_IDLE;
959 static void nand_cmdfunc_extended(struct mtd_info *mtd,
960 const unsigned command,
961 int column, int page_addr)
963 struct pxa3xx_nand_host *host = mtd->priv;
964 struct pxa3xx_nand_info *info = host->info_data;
965 int exec_cmd, ext_cmd_type;
968 * if this is a x16 device then convert the input
969 * "byte" address into a "word" address appropriate
970 * for indexing a word-oriented device
972 if (info->reg_ndcr & NDCR_DWIDTH_M)
976 * There may be different NAND chip hooked to
977 * different chip select, so check whether
978 * chip select has been changed, if yes, reset the timing
980 if (info->cs != host->cs) {
982 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
983 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
986 /* Select the extended command for the first command */
989 case NAND_CMD_READOOB:
990 ext_cmd_type = EXT_CMD_TYPE_MONO;
993 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
995 case NAND_CMD_PAGEPROG:
996 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1003 prepare_start_command(info, command);
1006 * Prepare the "is ready" completion before starting a command
1007 * transaction sequence. If the command is not executed the
1008 * completion will be completed, see below.
1010 * We can do that inside the loop because the command variable
1011 * is invariant and thus so is the exec_cmd.
1013 info->need_wait = 1;
1014 info->dev_ready = 0;
1019 info->state = STATE_PREPARED;
1020 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1023 info->need_wait = 0;
1024 info->dev_ready = 1;
1028 info->cmd_complete = 0;
1029 pxa3xx_nand_start(info);
1035 status = nand_readl(info, NDSR);
1037 pxa3xx_nand_irq(info);
1039 if (info->cmd_complete)
1042 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1043 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1048 /* Check if the sequence is complete */
1049 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1053 * After a splitted program command sequence has issued
1054 * the command dispatch, the command sequence is complete.
1056 if (info->data_size == 0 &&
1057 command == NAND_CMD_PAGEPROG &&
1058 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1061 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1062 /* Last read: issue a 'last naked read' */
1063 if (info->data_size == info->chunk_size)
1064 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1066 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1069 * If a splitted program command has no more data to transfer,
1070 * the command dispatch must be issued to complete.
1072 } else if (command == NAND_CMD_PAGEPROG &&
1073 info->data_size == 0) {
1074 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1078 info->state = STATE_IDLE;
1081 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1082 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1084 chip->write_buf(mtd, buf, mtd->writesize);
1085 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1090 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1091 struct nand_chip *chip, uint8_t *buf, int oob_required,
1094 struct pxa3xx_nand_host *host = mtd->priv;
1095 struct pxa3xx_nand_info *info = host->info_data;
1097 chip->read_buf(mtd, buf, mtd->writesize);
1098 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1100 if (info->retcode == ERR_CORERR && info->use_ecc) {
1101 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1103 } else if (info->retcode == ERR_UNCORERR) {
1105 * for blank page (all 0xff), HW will calculate its ECC as
1106 * 0, which is different from the ECC information within
1107 * OOB, ignore such uncorrectable errors
1109 if (is_buf_blank(buf, mtd->writesize))
1110 info->retcode = ERR_NONE;
1112 mtd->ecc_stats.failed++;
1115 return info->max_bitflips;
1118 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1120 struct pxa3xx_nand_host *host = mtd->priv;
1121 struct pxa3xx_nand_info *info = host->info_data;
1124 if (info->buf_start < info->buf_count)
1125 /* Has just send a new command? */
1126 retval = info->data_buff[info->buf_start++];
1131 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1133 struct pxa3xx_nand_host *host = mtd->priv;
1134 struct pxa3xx_nand_info *info = host->info_data;
1135 u16 retval = 0xFFFF;
1137 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1138 retval = *((u16 *)(info->data_buff+info->buf_start));
1139 info->buf_start += 2;
1144 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1146 struct pxa3xx_nand_host *host = mtd->priv;
1147 struct pxa3xx_nand_info *info = host->info_data;
1148 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1150 memcpy(buf, info->data_buff + info->buf_start, real_len);
1151 info->buf_start += real_len;
1154 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1155 const uint8_t *buf, int len)
1157 struct pxa3xx_nand_host *host = mtd->priv;
1158 struct pxa3xx_nand_info *info = host->info_data;
1159 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1161 memcpy(info->data_buff + info->buf_start, buf, real_len);
1162 info->buf_start += real_len;
1165 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1170 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1172 struct pxa3xx_nand_host *host = mtd->priv;
1173 struct pxa3xx_nand_info *info = host->info_data;
1175 if (info->need_wait) {
1178 info->need_wait = 0;
1184 status = nand_readl(info, NDSR);
1186 pxa3xx_nand_irq(info);
1188 if (info->dev_ready)
1191 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1192 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1193 return NAND_STATUS_FAIL;
1198 /* pxa3xx_nand_send_command has waited for command complete */
1199 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1200 if (info->retcode == ERR_NONE)
1203 return NAND_STATUS_FAIL;
1206 return NAND_STATUS_READY;
1209 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1211 struct pxa3xx_nand_host *host = info->host[info->cs];
1212 struct mtd_info *mtd = host->mtd;
1213 struct nand_chip *chip = mtd->priv;
1215 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1216 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1217 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1222 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1225 * We set 0 by hard coding here, for we don't support keep_config
1226 * when there is more than one chip attached to the controller
1228 struct pxa3xx_nand_host *host = info->host[0];
1229 uint32_t ndcr = nand_readl(info, NDCR);
1231 if (ndcr & NDCR_PAGE_SZ) {
1232 /* Controller's FIFO size */
1233 info->chunk_size = 2048;
1234 host->read_id_bytes = 4;
1236 info->chunk_size = 512;
1237 host->read_id_bytes = 2;
1240 /* Set an initial chunk size */
1241 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1242 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1243 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1247 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1249 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1250 if (info->data_buff == NULL)
1255 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1257 struct pxa3xx_nand_info *info = host->info_data;
1258 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1259 struct mtd_info *mtd;
1260 struct nand_chip *chip;
1261 const struct nand_sdr_timings *timings;
1264 mtd = info->host[info->cs]->mtd;
1267 /* configure default flash values */
1268 info->reg_ndcr = 0x0; /* enable all interrupts */
1269 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1270 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1271 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1273 /* use the common timing to make a try */
1274 timings = onfi_async_timing_mode_to_sdr_timings(0);
1275 if (IS_ERR(timings))
1276 return PTR_ERR(timings);
1278 pxa3xx_nand_set_sdr_timing(host, timings);
1280 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1281 ret = chip->waitfunc(mtd, chip);
1282 if (ret & NAND_STATUS_FAIL)
1288 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1289 struct nand_ecc_ctrl *ecc,
1290 int strength, int ecc_stepsize, int page_size)
1292 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1293 info->chunk_size = 2048;
1294 info->spare_size = 40;
1295 info->ecc_size = 24;
1296 ecc->mode = NAND_ECC_HW;
1300 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1301 info->chunk_size = 512;
1302 info->spare_size = 8;
1304 ecc->mode = NAND_ECC_HW;
1309 * Required ECC: 4-bit correction per 512 bytes
1310 * Select: 16-bit correction per 2048 bytes
1312 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1314 info->chunk_size = 2048;
1315 info->spare_size = 32;
1316 info->ecc_size = 32;
1317 ecc->mode = NAND_ECC_HW;
1318 ecc->size = info->chunk_size;
1319 ecc->layout = &ecc_layout_2KB_bch4bit;
1322 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1324 info->chunk_size = 2048;
1325 info->spare_size = 32;
1326 info->ecc_size = 32;
1327 ecc->mode = NAND_ECC_HW;
1328 ecc->size = info->chunk_size;
1329 ecc->layout = &ecc_layout_4KB_bch4bit;
1333 * Required ECC: 8-bit correction per 512 bytes
1334 * Select: 16-bit correction per 1024 bytes
1336 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1338 info->chunk_size = 1024;
1339 info->spare_size = 0;
1340 info->ecc_size = 32;
1341 ecc->mode = NAND_ECC_HW;
1342 ecc->size = info->chunk_size;
1343 ecc->layout = &ecc_layout_4KB_bch8bit;
1346 dev_err(&info->pdev->dev,
1347 "ECC strength %d at page size %d is not supported\n",
1348 strength, page_size);
1355 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1357 struct pxa3xx_nand_host *host = mtd->priv;
1358 struct pxa3xx_nand_info *info = host->info_data;
1359 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1360 struct nand_chip *chip = mtd->priv;
1362 uint16_t ecc_strength, ecc_step;
1364 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1367 /* Set a default chunk size */
1368 info->chunk_size = 512;
1370 ret = pxa3xx_nand_sensing(host);
1372 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1379 /* Device detection must be done with ECC disabled */
1380 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1381 nand_writel(info, NDECCCTRL, 0x0);
1383 if (nand_scan_ident(mtd, 1, NULL))
1386 if (!pdata->keep_config) {
1387 ret = pxa3xx_nand_init_timings(host);
1389 dev_err(&info->pdev->dev,
1390 "Failed to set timings: %d\n", ret);
1395 ret = pxa3xx_nand_config_flash(info);
1399 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1401 * We'll use a bad block table stored in-flash and don't
1402 * allow writing the bad block marker to the flash.
1404 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1405 chip->bbt_td = &bbt_main_descr;
1406 chip->bbt_md = &bbt_mirror_descr;
1410 * If the page size is bigger than the FIFO size, let's check
1411 * we are given the right variant and then switch to the extended
1412 * (aka splitted) command handling,
1414 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1415 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1416 chip->cmdfunc = nand_cmdfunc_extended;
1418 dev_err(&info->pdev->dev,
1419 "unsupported page size on this variant\n");
1424 if (pdata->ecc_strength && pdata->ecc_step_size) {
1425 ecc_strength = pdata->ecc_strength;
1426 ecc_step = pdata->ecc_step_size;
1428 ecc_strength = chip->ecc_strength_ds;
1429 ecc_step = chip->ecc_step_ds;
1432 /* Set default ECC strength requirements on non-ONFI devices */
1433 if (ecc_strength < 1 && ecc_step < 1) {
1438 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1439 ecc_step, mtd->writesize);
1443 /* calculate addressing information */
1444 if (mtd->writesize >= 2048)
1445 host->col_addr_cycles = 2;
1447 host->col_addr_cycles = 1;
1449 /* release the initial buffer */
1450 kfree(info->data_buff);
1452 /* allocate the real data + oob buffer */
1453 info->buf_size = mtd->writesize + mtd->oobsize;
1454 ret = pxa3xx_nand_init_buff(info);
1457 info->oob_buff = info->data_buff + mtd->writesize;
1459 if ((mtd->size >> chip->page_shift) > 65536)
1460 host->row_addr_cycles = 3;
1462 host->row_addr_cycles = 2;
1463 return nand_scan_tail(mtd);
1466 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1468 struct pxa3xx_nand_platform_data *pdata;
1469 struct pxa3xx_nand_host *host;
1470 struct nand_chip *chip = NULL;
1471 struct mtd_info *mtd;
1474 pdata = info->pdata;
1475 if (pdata->num_cs <= 0)
1478 info->variant = pxa3xx_nand_get_variant();
1479 for (cs = 0; cs < pdata->num_cs; cs++) {
1480 mtd = &nand_info[cs];
1481 chip = (struct nand_chip *)
1482 ((u8 *)&info[1] + sizeof(*host) * cs);
1483 host = (struct pxa3xx_nand_host *)chip;
1484 info->host[cs] = host;
1487 host->info_data = info;
1488 host->read_id_bytes = 4;
1490 mtd->owner = THIS_MODULE;
1492 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1493 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1494 chip->controller = &info->controller;
1495 chip->waitfunc = pxa3xx_nand_waitfunc;
1496 chip->select_chip = pxa3xx_nand_select_chip;
1497 chip->read_word = pxa3xx_nand_read_word;
1498 chip->read_byte = pxa3xx_nand_read_byte;
1499 chip->read_buf = pxa3xx_nand_read_buf;
1500 chip->write_buf = pxa3xx_nand_write_buf;
1501 chip->options |= NAND_NO_SUBPAGE_WRITE;
1502 chip->cmdfunc = nand_cmdfunc;
1505 info->mmio_base = (void __iomem *)MVEBU_NAND_BASE;
1507 /* Allocate a buffer to allow flash detection */
1508 info->buf_size = INIT_BUFFER_SIZE;
1509 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1510 if (info->data_buff == NULL) {
1512 goto fail_disable_clk;
1515 /* initialize all interrupts to be disabled */
1516 disable_int(info, NDSR_MASK);
1520 kfree(info->data_buff);
1525 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1527 struct pxa3xx_nand_platform_data *pdata;
1529 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1533 pdata->enable_arbiter = 1;
1536 info->pdata = pdata;
1541 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1543 struct pxa3xx_nand_platform_data *pdata;
1544 int ret, cs, probe_success;
1546 ret = pxa3xx_nand_probe_dt(info);
1550 pdata = info->pdata;
1552 ret = alloc_nand_resource(info);
1554 dev_err(&pdev->dev, "alloc nand resource failed\n");
1559 for (cs = 0; cs < pdata->num_cs; cs++) {
1560 struct mtd_info *mtd = info->host[cs]->mtd;
1563 * The mtd name matches the one used in 'mtdparts' kernel
1564 * parameter. This name cannot be changed or otherwise
1565 * user's mtd partitions configuration would get broken.
1567 mtd->name = "pxa3xx_nand-0";
1569 ret = pxa3xx_nand_scan(mtd);
1571 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1587 * Main initialization routine
1589 void board_nand_init(void)
1591 struct pxa3xx_nand_info *info;
1592 struct pxa3xx_nand_host *host;
1595 info = kzalloc(sizeof(*info) +
1596 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1601 ret = pxa3xx_nand_probe(info);