2 * Freescale GPMI NFC NAND Flash Driver
4 * Copyright (C) 2010 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
22 #include <linux/err.h>
23 #include <linux/mtd/compat.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/nand.h>
26 #include <linux/mtd/nand_ecc.h>
28 #ifdef CONFIG_MTD_PARTITIONS
29 #include <linux/mtd/partitions.h>
32 #include <asm/sizes.h>
34 #include <asm/errno.h>
35 #include <asm/dma-mapping.h>
36 #include <asm/arch/mx28.h>
38 #ifdef CONFIG_JFFS2_NAND
39 #include <jffs2/jffs2.h>
42 #include <asm/arch/regs-clkctrl.h>
43 #include <asm/arch/mxs_gpmi-regs.h>
44 #include <asm/arch/mxs_gpmi-bch-regs.h>
48 #define dbg_lvl(l) (debug > (l))
49 #define DBG(l, fmt...) do { if (dbg_lvl(l)) printk(KERN_DEBUG fmt); } while (0)
52 #define DBG(l, fmt...) do { } while (0)
55 #define dma_timeout 1000
58 #define MAX_CHIP_COUNT CONFIG_SYS_MAX_NAND_DEVICE
59 #define COMMAND_BUFFER_SIZE 10
60 #define MAX_PIO_WORDS 16
62 /* dmaengine interface */
70 /* MXS APBH DMA controller interface */
71 #define HW_APBHX_CTRL0 0x000
72 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
73 #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
74 #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
75 #define BP_APBH_CTRL0_RESET_CHANNEL 16
76 #define HW_APBHX_CTRL1 0x010
77 #define HW_APBHX_CTRL2 0x020
78 #define HW_APBHX_CHANNEL_CTRL 0x030
79 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
80 #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
81 #define HW_APBX_VERSION 0x800
82 #define BP_APBHX_VERSION_MAJOR 24
83 #define HW_APBHX_CHn_NXTCMDAR(n) (0x110 + (n) * 0x70)
84 #define HW_APBHX_CHn_SEMA(n) (0x140 + (n) * 0x70)
87 * ccw bits definitions
92 * NAND_LOCK: 4 (1) - not implemented
93 * NAND_WAIT4READY: 5 (1) - not implemented
96 * HALT_ON_TERMINATE: 8 (1)
97 * TERMINATE_FLUSH: 9 (1)
98 * RESERVED: 10..11 (2)
101 #define BP_CCW_COMMAND 0
102 #define BM_CCW_COMMAND (3 << 0)
103 #define CCW_CHAIN (1 << 2)
104 #define CCW_IRQ (1 << 3)
105 #define CCW_DEC_SEM (1 << 6)
106 #define CCW_WAIT4END (1 << 7)
107 #define CCW_HALT_ON_TERM (1 << 8)
108 #define CCW_TERM_FLUSH (1 << 9)
109 #define BP_CCW_PIO_NUM 12
110 #define BM_CCW_PIO_NUM (0xf << 12)
112 #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
114 #define MXS_DMA_CMD_NO_XFER 0
115 #define MXS_DMA_CMD_WRITE 1
116 #define MXS_DMA_CMD_READ 2
117 #define MXS_DMA_CMD_DMA_SENSE 3
123 #define MAX_XFER_BYTES 0xff00
125 #define MXS_PIO_WORDS 16
126 u32 pio_words[MXS_PIO_WORDS];
129 #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
131 struct mxs_dma_chan {
132 struct mxs_dma_engine *mxs_dma;
134 struct mxs_dma_ccw *ccw;
136 unsigned long ccw_phys;
137 enum dma_status status;
138 #define MXS_DMA_SG_LOOP (1 << 0)
141 #define MXS_DMA_CHANNELS 16
142 #define MXS_DMA_CHANNELS_MASK 0xffff
144 struct mxs_dma_engine {
147 struct mxs_dma_chan mxs_chans[MAX_CHIP_COUNT];
151 struct mxs_dma_engine mxs_dma;
152 void __iomem *gpmi_regs;
153 void __iomem *bch_regs;
154 void __iomem *dma_regs;
156 unsigned int chip_count;
158 struct mtd_partition *parts;
159 unsigned int nr_parts;
160 struct mtd_info *mtd;
161 struct nand_chip *chip;
162 struct nand_ecclayout ecc_layout;
171 u32 pio_data[MAX_PIO_WORDS];
172 u8 cmd_buf[COMMAND_BUFFER_SIZE];
174 unsigned block0_ecc_strength:5,
175 blockn_ecc_strength:5,
177 block_mark_bit_offset:3,
178 block_mark_byte_offset:14;
181 static uint8_t scan_ff_pattern[] = { 0xff };
182 static struct nand_bbt_descr gpmi_bbt_descr = {
186 .pattern = scan_ff_pattern
189 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
190 static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
192 static struct nand_bbt_descr mxs_gpmi_bbt_main_descr = {
193 .options = NAND_BBT_LASTBLOCK | NAND_BBT_2BIT |
194 NAND_BBT_VERSION | NAND_BBT_PERCHIP |
200 .pattern = bbt_pattern,
203 static struct nand_bbt_descr mxs_gpmi_bbt_mirror_descr = {
204 .options = NAND_BBT_LASTBLOCK | NAND_BBT_2BIT |
205 NAND_BBT_VERSION | NAND_BBT_PERCHIP |
211 .pattern = mirror_pattern,
214 /* MXS DMA implementation */
216 static inline u32 __mxs_dma_readl(struct mxs_dma_engine *mxs_dma,
218 const char *name, const char *fn, int ln)
221 void __iomem *addr = mxs_dma->base + reg;
224 DBG(3, "%s@%d: Read %08x from %s[%08x]\n", fn, ln, val, name,
225 APBHDMA_BASE_ADDR + reg);
228 #define mxs_dma_readl(t, r) __mxs_dma_readl(t, r, #r, __func__, __LINE__)
230 static inline void __mxs_dma_writel(u32 val,
231 struct mxs_dma_engine *mxs_dma, unsigned int reg,
232 const char *name, const char *fn, int ln)
234 void __iomem *addr = mxs_dma->base + reg;
236 DBG(3, "%s@%d: Writing %08x to %s[%08x]\n", fn, ln, val, name,
237 APBHDMA_BASE_ADDR + reg);
240 #define mxs_dma_writel(v, t, r) __mxs_dma_writel(v, t, r, #r, __func__, __LINE__)
242 static inline u32 mxs_dma_readl(struct mxs_dma_engine *mxs_dma,
245 BUG_ON(mxs_dma->base == NULL);
246 return readl(mxs_dma->base + reg);
249 static inline void mxs_dma_writel(u32 val, struct mxs_dma_engine *mxs_dma,
252 BUG_ON(mxs_dma->base == NULL);
253 writel(val, mxs_dma->base + reg);
257 static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
259 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
260 int chan_id = mxs_chan->chan_id;
261 int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
263 /* enable apbh channel clock */
264 mxs_dma_writel(1 << chan_id,
265 mxs_dma, HW_APBHX_CTRL0 + set_clr);
268 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
270 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
271 int chan_id = mxs_chan->chan_id;
273 mxs_dma_writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
274 mxs_dma, HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
277 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
279 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
280 int chan_id = mxs_chan->chan_id;
282 /* clkgate needs to be enabled before writing other registers */
283 mxs_dma_clkgate(mxs_chan, 1);
285 /* set cmd_addr up */
286 mxs_dma_writel(mxs_chan->ccw_phys,
287 mxs_dma, HW_APBHX_CHn_NXTCMDAR(chan_id));
289 /* write 1 to SEMA to kick off the channel */
290 mxs_dma_writel(1, mxs_dma, HW_APBHX_CHn_SEMA(chan_id));
293 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
295 /* disable apbh channel clock */
296 mxs_dma_clkgate(mxs_chan, 0);
298 mxs_chan->status = DMA_SUCCESS;
302 static inline u32 __mxs_gpmi_readl(struct mxs_gpmi *gpmi,
304 const char *name, const char *fn, int ln)
307 void __iomem *addr = gpmi->gpmi_regs + reg;
310 DBG(3, "%s@%d: Read %08x from %s[%08x]\n", fn, ln, val, name,
311 GPMI_BASE_ADDR + reg);
314 #define mxs_gpmi_readl(t, r) __mxs_gpmi_readl(t, r, #r, __func__, __LINE__)
316 static inline void __mxs_gpmi_writel(u32 val,
317 struct mxs_gpmi *gpmi, unsigned int reg,
318 const char *name, const char *fn, int ln)
320 void __iomem *addr = gpmi->gpmi_regs + reg;
322 DBG(3, "%s@%d: Writing %08x to %s[%08x]\n", fn, ln, val, name,
323 GPMI_BASE_ADDR + reg);
326 #define mxs_gpmi_writel(v, t, r) __mxs_gpmi_writel(v, t, r, #r, __func__, __LINE__)
328 static inline u32 __mxs_bch_readl(struct mxs_gpmi *gpmi,
329 unsigned int reg, const char *name,
330 const char *fn, int ln)
333 void __iomem *addr = gpmi->bch_regs + reg;
336 DBG(3, "%s@%d: Read %08x from %s[%08x]\n", fn, ln, val, name,
337 BCH_BASE_ADDR + reg);
340 #define mxs_bch_readl(t, r) __mxs_bch_readl(t, r, #r, __func__, __LINE__)
342 static inline void __mxs_bch_writel(u32 val,
343 struct mxs_gpmi *gpmi, unsigned int reg,
344 const char *name, const char *fn, int ln)
346 void __iomem *addr = gpmi->bch_regs + reg;
348 DBG(3, "%s@%d: Writing %08x to %s[%08x]\n", fn, ln, val, name,
349 BCH_BASE_ADDR + reg);
352 #define mxs_bch_writel(v, t, r) __mxs_bch_writel(v, t, r, #r, __func__, __LINE__)
354 static inline u32 mxs_gpmi_readl(struct mxs_gpmi *gpmi, unsigned int reg)
356 return readl(gpmi->gpmi_regs + reg);
359 static inline void mxs_gpmi_writel(u32 val,
360 struct mxs_gpmi *gpmi, unsigned int reg)
362 writel(val, gpmi->gpmi_regs + reg);
365 static inline u32 mxs_bch_readl(struct mxs_gpmi *gpmi, unsigned int reg)
367 return readl(gpmi->bch_regs + reg);
370 static inline void mxs_bch_writel(u32 val,
371 struct mxs_gpmi *gpmi, unsigned int reg)
373 writel(val, gpmi->bch_regs + reg);
377 static inline struct mxs_dma_chan *mxs_gpmi_dma_chan(struct mxs_gpmi *gpmi,
378 unsigned int dma_channel)
380 BUG_ON(dma_channel >= ARRAY_SIZE(gpmi->mxs_dma.mxs_chans));
381 DBG(3, "%s: DMA chan[%d]=%p[%d]\n", __func__,
382 dma_channel, &gpmi->mxs_dma.mxs_chans[dma_channel],
383 gpmi->mxs_dma.mxs_chans[dma_channel].chan_id);
384 return &gpmi->mxs_dma.mxs_chans[dma_channel];
387 #define DUMP_DMA_CONTEXT
389 static int first = 1;
390 #ifdef DUMP_DMA_CONTEXT
392 #define APBH_DMA_PHYS_ADDR (MXS_IO_BASE_ADDR + 0x004000)
394 static void dump_dma_context(struct mxs_gpmi *gpmi, const char *title)
403 DBG(0, "%s: %s\n", __func__, title);
405 DBG(0, "%s: GPMI:\n", __func__);
407 void __iomem *GPMI = gpmi->gpmi_regs;
412 for (i = 0; i < ARRAY_SIZE(old); i++) {
413 u32 val = readl(gpmi->gpmi_regs + i * 0x10);
415 if (first || val != old[i]) {
417 DBG(0, " [%p] 0x%08x\n",
420 DBG(0, " [%p] 0x%08x -> 0x%08x\n",
429 DBG(0, "%s: BCH:\n", __func__);
431 void *BCH = gpmi->bch_regs;
436 for (i = 0; i < ARRAY_SIZE(old); i++) {
437 u32 val = readl(gpmi->bch_regs + i * 0x10);
439 if (first || val != old[i]) {
441 DBG(0, " [%p] 0x%08x\n",
444 DBG(0, " [%p] 0x%08x -> 0x%08x\n",
453 DBG(0, "%s: APBH:\n", __func__);
455 void *APBH = gpmi->dma_regs;
457 static u32 chan[16][7];
461 for (i = 0; i < ARRAY_SIZE(old); i++) {
462 u32 val = readl(gpmi->dma_regs + i * 0x10);
464 if (first || val != old[i]) {
466 DBG(0, " [%p] 0x%08x\n",
469 DBG(0, " [%p] 0x%08x -> 0x%08x\n",
476 for (i = 0; i < ARRAY_SIZE(chan); i++) {
479 printk("CHAN %2d:\n", i);
480 for (j = 0; j < ARRAY_SIZE(chan[i]); j++) {
483 p = q = APBH + 0x100 + i * 0x70 + j * 0x10;
485 val = readl(gpmi->dma_regs + 0x100 + i * 0x70 + j * 0x10);
487 if (first || val != chan[i][j]) {
489 DBG(0, " [%p] 0x%08x\n",
492 DBG(0, " [%p] 0x%08x -> 0x%08x\n",
504 static inline void dump_dma_context(struct mxs_gpmi *gpmi, char *title)
509 static int mxs_gpmi_init_hw(struct mxs_gpmi *gpmi)
511 dump_dma_context(gpmi, "BEFORE INIT");
513 if (mxs_gpmi_readl(gpmi, HW_GPMI_CTRL0) & 0xc0000000) {
514 DBG(0, "%s: Resetting GPMI\n", __func__);
515 mxs_reset_block(gpmi->gpmi_regs);
518 mxs_gpmi_writel(BM_GPMI_CTRL1_GPMI_MODE, gpmi, HW_GPMI_CTRL1_CLR);
519 mxs_gpmi_writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
520 gpmi, HW_GPMI_CTRL1_SET);
522 /* Disable write protection and Select BCH ECC */
523 mxs_gpmi_writel(BM_GPMI_CTRL1_DEV_RESET | BM_GPMI_CTRL1_BCH_MODE,
524 gpmi, HW_GPMI_CTRL1_SET);
526 /* Select BCH ECC. */
527 mxs_gpmi_writel(BM_GPMI_CTRL1_BCH_MODE,
528 gpmi, HW_GPMI_CTRL1_SET);
530 dump_dma_context(gpmi, "AFTER INIT");
534 static int mxs_dma_chan_init(struct mxs_dma_chan *mxs_chan, int chan_id)
536 mxs_chan->ccw = dma_alloc_coherent(PAGE_SIZE, &mxs_chan->ccw_phys);
537 if (mxs_chan->ccw == NULL)
540 DBG(0, "%s: mxs_chan[%d]=%p ccw=%p\n", __func__,
541 chan_id, mxs_chan, mxs_chan->ccw);
543 memset(mxs_chan->ccw, 0, PAGE_SIZE);
544 mxs_chan->chan_id = chan_id;
546 mxs_dma_clkgate(mxs_chan, 1);
547 mxs_dma_reset_chan(mxs_chan);
548 mxs_dma_clkgate(mxs_chan, 0);
552 static int mxs_dma_init(struct mxs_gpmi *gpmi, int dma_chan,
553 int num_dma_channels)
556 struct mxs_dma_engine *mxs_dma = &gpmi->mxs_dma;
559 writel(readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI) & ~BM_CLKCTRL_GPMI_CLKGATE,
560 CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
562 mxs_dma->base = gpmi->dma_regs;
564 ret = mxs_reset_block(mxs_dma->base);
566 printk(KERN_ERR "%s: Failed to reset APBH DMA controller\n", __func__);
570 mxs_dma_writel(BM_APBH_CTRL0_APB_BURST_EN,
571 mxs_dma, HW_APBHX_CTRL0 + MXS_SET_ADDR);
572 mxs_dma_writel(BM_APBH_CTRL0_APB_BURST8_EN,
573 mxs_dma, HW_APBHX_CTRL0 + MXS_SET_ADDR);
575 for (i = 0; i < num_dma_channels; i++) {
576 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
578 mxs_chan->mxs_dma = mxs_dma;
579 ret = mxs_dma_chan_init(mxs_chan, dma_chan + i);
586 static int mxs_dma_prep_slave(struct mxs_dma_chan *mxs_chan, void *buffer,
587 dma_addr_t buf_dma, int len,
588 enum dma_data_direction direction,
592 int idx = append ? mxs_chan->ccw_idx : 0;
593 struct mxs_dma_ccw *ccw;
595 DBG(1, "%s: mxs_chan=%p status=%d append=%d ccw=%p\n", __func__,
596 mxs_chan, mxs_chan->status, append, mxs_chan->ccw);
598 BUG_ON(mxs_chan->ccw == NULL);
599 BUG_ON(mxs_chan == NULL);
601 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
604 if (mxs_chan->status != DMA_IN_PROGRESS && append) {
609 if (idx >= NUM_CCW) {
610 printk(KERN_ERR "maximum number of segments exceeded: %d > %d\n",
616 mxs_chan->status = DMA_IN_PROGRESS;
620 ccw = &mxs_chan->ccw[idx - 1];
621 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
622 ccw->bits |= CCW_CHAIN;
623 ccw->bits &= ~CCW_IRQ;
624 ccw->bits &= ~CCW_DEC_SEM;
625 ccw->bits &= ~CCW_WAIT4END;
627 DBG(3, "%s: Appending sg to list[%d]@%p: next=0x%08x bits=0x%08x\n",
628 __func__, idx, ccw, ccw->next, ccw->bits);
632 ccw = &mxs_chan->ccw[idx++];
633 if (direction == DMA_NONE) {
637 for (j = 0; j < len; j++)
638 ccw->pio_words[j] = *pio++;
641 DBG(0, "%s: Storing %d PIO words in ccw[%d]@%p:", __func__,
643 for (j = 0; j < len; j++) {
644 printk(" %08x", ccw->pio_words[j]);
649 ccw->bits |= CCW_IRQ;
650 ccw->bits |= CCW_DEC_SEM;
651 ccw->bits |= CCW_WAIT4END;
652 ccw->bits |= CCW_HALT_ON_TERM;
653 ccw->bits |= CCW_TERM_FLUSH;
654 ccw->bits |= BF_CCW(len, PIO_NUM);
655 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
657 if (len > MAX_XFER_BYTES) {
658 printk(KERN_ERR "maximum bytes for sg entry exceeded: %d > %d\n",
659 len, MAX_XFER_BYTES);
664 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
665 ccw->bufaddr = buf_dma;
666 ccw->xfer_bytes = len;
669 ccw->bits |= CCW_CHAIN;
670 ccw->bits |= CCW_HALT_ON_TERM;
671 ccw->bits |= CCW_TERM_FLUSH;
672 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
673 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
676 ccw->bits &= ~CCW_CHAIN;
677 ccw->bits |= CCW_IRQ;
678 ccw->bits |= CCW_DEC_SEM;
679 ccw->bits |= CCW_WAIT4END;
680 DBG(3, "%s: DMA descriptor: ccw=%p next=0x%08x bufadr=%08x xfer_bytes=%08x bits=0x%08x\n",
681 __func__, ccw, ccw->next, ccw->bufaddr,
682 ccw->xfer_bytes, ccw->bits);
685 mxs_chan->ccw_idx = idx;
690 mxs_chan->ccw_idx = 0;
691 mxs_chan->status = DMA_ERROR;
695 static int mxs_dma_submit(struct mxs_gpmi *gpmi, struct mxs_dma_chan *mxs_chan)
700 int chan_id = mxs_chan->chan_id;
701 u32 chan_mask = 1 << chan_id;
702 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
705 dump_dma_context(gpmi, "BEFORE");
707 mxs_dma_enable_chan(mxs_chan);
709 dump_dma_context(gpmi, "WITHIN");
712 stat1 = mxs_dma_readl(mxs_dma, HW_APBHX_CTRL1);
713 stat2 = mxs_dma_readl(mxs_dma, HW_APBHX_CTRL2);
714 if ((stat1 & chan_mask) || (stat2 & chan_mask)) {
718 DBG(1, "Waiting for DMA channel %d to finish\n",
727 dump_dma_context(gpmi, "AFTER");
729 mxs_dma_writel(chan_mask, mxs_dma, HW_APBHX_CTRL1 + MXS_CLR_ADDR);
730 mxs_dma_writel(chan_mask, mxs_dma, HW_APBHX_CTRL2 + MXS_CLR_ADDR);
731 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) |
732 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1);
733 if (stat2 & chan_mask) {
734 printk(KERN_ERR "DMA error in channel %d\n", chan_id);
736 } else if (stat1 & chan_mask) {
737 DBG(0, "DMA channel %d finished\n", chan_id);
740 printk(KERN_ERR "DMA channel %d early termination\n", chan_id);
743 mxs_chan->status = ret == 0 ? DMA_SUCCESS : DMA_ERROR;
747 static void mxs_dma_terminate_all(struct mxs_dma_chan *mxs_chan)
749 mxs_dma_disable_chan(mxs_chan);
752 static int poll_bit(void __iomem *addr, unsigned int mask, long timeout)
754 while (!(readl(addr) & mask)) {
758 return timeout == 0 ? -ETIME : 0;
761 static int mxs_gpmi_dma_go(struct mxs_gpmi *gpmi,
765 struct mxs_dma_chan *mxs_chan = mxs_gpmi_dma_chan(gpmi,
768 DBG(1, "> %s\n", __func__);
770 error = mxs_dma_submit(gpmi, mxs_chan);
771 DBG(1, "%s: mxs_dma_submit returned %d\n", __func__,
777 DBG(1, "%s: Waiting for BCH completion\n", __func__);
778 error = poll_bit(gpmi->bch_regs + HW_BCH_CTRL,
779 BM_BCH_CTRL_COMPLETE_IRQ,
781 DBG(1, "%s: poll_bit returned %d\n", __func__,
783 DBG(1, "%s: BCH status %08x\n", __func__,
784 mxs_bch_readl(gpmi, HW_BCH_STATUS0));
785 if (mxs_bch_readl(gpmi, HW_BCH_CTRL) & BM_BCH_CTRL_COMPLETE_IRQ) {
786 DBG(1, "%s: Clearing BCH IRQ\n", __func__);
787 mxs_bch_writel(BM_BCH_CTRL_COMPLETE_IRQ, gpmi, HW_BCH_CTRL_CLR);
794 DBG(1, "< %s: %d\n", __func__, error);
799 struct mxs_dma_chan *mxs_chan = mxs_gpmi_dma_chan(gpmi,
801 dump_dma_context(gpmi, "ERROR");
802 mxs_dma_terminate_all(mxs_chan);
807 int mxs_gpmi_dma_setup(struct mxs_gpmi *gpmi, void *buffer, int length,
808 int pio_words, enum dma_data_direction dir, int append)
812 struct mxs_dma_chan *mxs_chan;
815 mxs_chan = mxs_gpmi_dma_chan(gpmi, gpmi->current_chip);
816 if (mxs_chan == NULL)
819 DBG(1, "%s: buffer=%p len=%u pio=%d append=%d\n", __func__,
820 buffer, length, pio_words, append);
823 ret = mxs_dma_prep_slave(mxs_chan, gpmi->pio_data, ~0,
824 pio_words, DMA_NONE, append);
826 mxs_dma_terminate_all(mxs_chan);
828 "%s: Failed to setup DMA PIO xfer for %d words: %d\n",
829 __func__, pio_words, ret);
839 if (dir == DMA_FROM_DEVICE)
840 memset(buffer, 0x55, length);
842 buf_dma = dma_map_single(buffer, length, dir);
844 DBG(1, "%s: buffer=%p dma_addr=%08x\n", __func__, buffer, buf_dma);
846 ret = mxs_dma_prep_slave(mxs_chan, buffer, buf_dma, length, dir, append);
848 mxs_dma_terminate_all(mxs_chan);
849 DBG(1, "%s: mxs_dma_prep_slave() returned %d\n",
851 dma_unmap_single(buffer, length, dir);
856 static int mxs_gpmi_dma_xfer(struct mxs_gpmi *gpmi,
857 void *buffer, int length, int pio_words,
858 enum dma_data_direction dir)
862 ret = mxs_gpmi_dma_setup(gpmi, buffer, length,
866 DBG(1, "%s: mxs_gpmi_dma_setup() returned %d\n",
871 DBG(1, "%s: starting DMA xfer\n", __func__);
872 ret = mxs_gpmi_dma_go(gpmi, 0);
874 DBG(1, "%s: DMA xfer done: %d\n", __func__, ret);
878 /* low level accessor functions */
879 static int mxs_gpmi_read_data(struct mxs_gpmi *gpmi, int cs,
880 void *buffer, size_t length)
886 DBG(2, "%s: buf=%p len=%d\n", __func__, buffer, length);
888 memset(buffer, 0x44, length);
890 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
891 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
894 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
895 BF_GPMI_CTRL0_CS_V1(cs) |
896 BM_GPMI_CTRL0_WORD_LENGTH |
897 BF_GPMI_CTRL0_ADDRESS(address) |
898 BF_GPMI_CTRL0_XFER_COUNT(length);
899 gpmi->pio_data[1] = 0;
901 ret = mxs_gpmi_dma_xfer(gpmi, buffer, length, 2, DMA_FROM_DEVICE);
905 /* mtd layer interface */
906 static void mxs_gpmi_select_chip(struct mtd_info *mtd, int cs)
908 struct nand_chip *chip = mtd->priv;
909 struct mxs_gpmi *gpmi = chip->priv;
911 gpmi->current_chip = cs;
914 static int mxs_gpmi_dev_ready(struct mtd_info *mtd)
917 struct nand_chip *chip = mtd->priv;
918 struct mxs_gpmi *gpmi = chip->priv;
921 int cs = gpmi->current_chip;
926 DBG(1, "> %s\n", __func__);
928 mask = BF_GPMI_STAT_READY_BUSY(1 << cs);
929 reg = mxs_gpmi_readl(gpmi, HW_GPMI_STAT);
931 ready = !!(reg & mask);
932 DBG(1, "< %s: %d\n", __func__, ready);
936 static void mxs_gpmi_swap_bb_mark(struct mxs_gpmi *gpmi,
937 void *payload, void *auxiliary)
939 unsigned char *p = payload + gpmi->block_mark_byte_offset;
940 unsigned char *a = auxiliary;
941 unsigned int bit = gpmi->block_mark_bit_offset;
943 unsigned char from_data;
944 unsigned char from_oob;
947 * Get the byte from the data area that overlays the block mark. Since
948 * the ECC engine applies its own view to the bits in the page, the
949 * physical block mark won't (in general) appear on a byte boundary in
952 from_data = (p[0] >> bit) | (p[1] << (8 - bit));
954 /* Get the byte from the OOB. */
960 mask = (0x1 << bit) - 1;
961 p[0] = (p[0] & mask) | (from_oob << bit);
964 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
967 static int mxs_gpmi_read_page(struct mtd_info *mtd, struct nand_chip *chip,
971 struct mxs_gpmi *gpmi = chip->priv;
972 int cs = gpmi->current_chip;
980 DBG(3, "%s: read page to buffer %p\n", __func__, buf);
982 buf_dma = dma_map_single(gpmi->page_buf, mtd->writesize,
985 oob_dma = dma_map_single(gpmi->oob_buf, mtd->oobsize,
988 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
989 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
992 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
993 BF_GPMI_CTRL0_CS_V1(cs) |
994 BM_GPMI_CTRL0_WORD_LENGTH |
995 BF_GPMI_CTRL0_ADDRESS(address) |
996 BF_GPMI_CTRL0_XFER_COUNT(0);
997 gpmi->pio_data[1] = 0;
999 ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 2, DMA_NONE, 0);
1004 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1005 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1006 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__DECODE;
1007 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1008 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1010 gpmi->pio_data[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1011 BM_GPMI_CTRL0_WORD_LENGTH |
1012 BF_GPMI_CTRL0_CS_V1(cs) |
1013 BF_GPMI_CTRL0_ADDRESS(address) |
1014 BF_GPMI_CTRL0_XFER_COUNT(mtd->writesize + mtd->oobsize);
1016 gpmi->pio_data[1] = 0;
1018 gpmi->pio_data[2] = BM_GPMI_ECCCTRL_ENABLE_ECC |
1019 BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) |
1020 BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1022 gpmi->pio_data[3] = mtd->writesize + mtd->oobsize;
1023 gpmi->pio_data[4] = buf_dma;
1024 gpmi->pio_data[5] = oob_dma;
1026 ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 6, DMA_NONE, 1);
1031 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1032 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1035 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1036 BM_GPMI_CTRL0_WORD_LENGTH |
1037 BF_GPMI_CTRL0_CS_V1(cs) |
1038 BF_GPMI_CTRL0_ADDRESS(address) |
1039 BF_GPMI_CTRL0_XFER_COUNT(mtd->writesize + mtd->oobsize);
1041 gpmi->pio_data[1] = 0;
1043 ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 2, DMA_NONE, 1);
1045 ret = mxs_gpmi_dma_go(gpmi, 1);
1048 dma_unmap_single(gpmi->oob_buf, mtd->oobsize,
1050 dma_unmap_single(gpmi->page_buf, mtd->writesize,
1053 #define STATUS_GOOD 0x00
1054 #define STATUS_ERASED 0xff
1055 #define STATUS_UNCORRECTABLE 0xfe
1056 /* Loop over status bytes, accumulating ECC status. */
1057 struct nand_chip *chip = mtd->priv;
1060 u8 *status = gpmi->oob_buf + mtd->oobavail;
1063 for (i = 0; i < mtd->writesize / chip->ecc.size; i++, status++) {
1064 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1067 if (*status == STATUS_UNCORRECTABLE) {
1071 corrected += *status;
1074 * Propagate ECC status to the owning MTD only when failed or
1075 * corrected times nearly reaches our ECC correction threshold.
1077 if (failed || corrected >= (chip->ecc.size - 1)) {
1078 DBG(0, "%s: ECC failures: %d\n", __func__, failed);
1079 mtd->ecc_stats.failed += failed;
1080 mtd->ecc_stats.corrected += corrected;
1084 if (gpmi->swap_block_mark)
1085 mxs_gpmi_swap_bb_mark(gpmi, gpmi->page_buf, gpmi->oob_buf);
1087 memcpy(buf, gpmi->page_buf, mtd->writesize);
1090 printk(KERN_ERR "%s: FAILED to read page to buffer %p\n", __func__, buf);
1095 static int mxs_gpmi_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1096 int page, int sndcmd)
1098 DBG(3, "%s: reading OOB of page %d\n", __func__, page);
1100 memset(chip->oob_poi, dbg_lvl(0) ? 0xfe : 0xff, mtd->oobsize);
1101 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1102 chip->read_buf(mtd, chip->oob_poi, mtd->oobavail);
1106 static void mxs_gpmi_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1110 struct mxs_gpmi *gpmi = chip->priv;
1111 int cs = gpmi->current_chip;
1119 DBG(3, "%s: Writing buffer %p\n", __func__, buf);
1121 memset(gpmi->oob_buf + mtd->oobavail, dbg_lvl(0) ? 0xef : 0xff,
1122 mtd->oobsize - mtd->oobavail);
1125 memcpy(gpmi->page_buf, buf, mtd->writesize);
1127 if (gpmi->swap_block_mark)
1128 mxs_gpmi_swap_bb_mark(gpmi, gpmi->page_buf, gpmi->oob_buf);
1130 buf_dma = dma_map_single(gpmi->page_buf, mtd->writesize,
1133 oob_dma = dma_map_single(gpmi->oob_buf, mtd->oobsize,
1136 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1137 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1138 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__ENCODE;
1139 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1140 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1143 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1144 BM_GPMI_CTRL0_WORD_LENGTH |
1145 BF_GPMI_CTRL0_CS_V1(cs) |
1146 BF_GPMI_CTRL0_ADDRESS(address) |
1147 BF_GPMI_CTRL0_XFER_COUNT(0);
1148 gpmi->pio_data[1] = 0;
1150 BM_GPMI_ECCCTRL_ENABLE_ECC |
1151 BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) |
1152 BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1153 gpmi->pio_data[3] = mtd->writesize + mtd->oobsize;
1154 gpmi->pio_data[4] = buf_dma;
1155 gpmi->pio_data[5] = oob_dma;
1157 ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 6, DMA_NONE, 0);
1159 ret = mxs_gpmi_dma_go(gpmi, 1);
1162 dma_unmap_single(gpmi->oob_buf, mtd->oobsize,
1164 dma_unmap_single(gpmi->page_buf, mtd->writesize,
1167 printk(KERN_ERR "%s: FAILED!\n", __func__);
1171 static int mxs_gpmi_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1178 static void mxs_gpmi_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1181 memcpy(gpmi->page_buf, buf, mtd->writesize);
1186 static void mxs_gpmi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
1188 struct nand_chip *chip = mtd->priv;
1189 struct mxs_gpmi *gpmi = chip->priv;
1193 if (len > mtd->writesize + mtd->oobsize) {
1194 DBG(0, "%s: Allocating temporary buffer\n", __func__);
1195 xfer_buf = kzalloc(len, GFP_KERNEL);
1196 if (xfer_buf == NULL) {
1198 "Failed to allocate %u byte for xfer buffer\n",
1200 memset(buf, 0xee, len);
1206 DBG(3, "%s: reading %u byte to %p(%p)\n", __func__,
1207 len, buf, xfer_buf);
1209 ret = mxs_gpmi_read_data(gpmi, gpmi->current_chip, xfer_buf, len);
1210 if (xfer_buf != buf) {
1212 memcpy(buf, xfer_buf, len);
1216 DBG(1, "< %s %d\n", __func__, ret);
1219 static u_char mxs_gpmi_read_byte(struct mtd_info *mtd)
1221 struct nand_chip *chip = mtd->priv;
1222 struct mxs_gpmi *gpmi = chip->priv;
1223 u_char *buf = (u_char *)gpmi->pio_data;
1225 mxs_gpmi_read_buf(mtd, buf, 1);
1229 static void mxs_gpmi_write_buf(struct mtd_info *mtd, const u_char *buf,
1233 struct nand_chip *chip = mtd->priv;
1234 struct mxs_gpmi *gpmi = chip->priv;
1235 void *xfer_buf = (void *)buf; /* cast away the 'const' */
1239 int cs = gpmi->current_chip;
1241 DBG(3, "%s: writing %u byte from %p\n", __func__, len, buf);
1243 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1244 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1247 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1248 BF_GPMI_CTRL0_CS_V1(cs) |
1249 BM_GPMI_CTRL0_WORD_LENGTH |
1250 BF_GPMI_CTRL0_ADDRESS(address) |
1251 BF_GPMI_CTRL0_XFER_COUNT(len);
1252 gpmi->pio_data[1] = 0;
1254 ret = mxs_gpmi_dma_xfer(gpmi, xfer_buf, len, 2, DMA_TO_DEVICE);
1256 ret = mxs_gpmi_send_data(gpmi, gpmi->current_chip, xfer_buf, len);
1259 printk(KERN_ERR "%s: Failed to write %u byte from %p\n", __func__,
1263 static int mxs_gpmi_scan_bbt(struct mtd_info *mtd)
1267 DBG(0, "%s: \n", __func__);
1268 ret = nand_scan_bbt(mtd, create_bbt ? &gpmi_bbt_descr : NULL);
1269 DBG(0, "%s: nand_scan_bbt() returned %d\n", __func__, ret);
1273 static int mxs_gpmi_send_command(struct mxs_gpmi *gpmi, unsigned cs,
1274 void *buffer, unsigned int length)
1280 DBG(1, "%s: Sending NAND command\n", __func__);
1282 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1283 address = BV_GPMI_CTRL0_ADDRESS__NAND_CLE;
1286 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1287 BF_GPMI_CTRL0_CS_V1(cs) |
1288 BM_GPMI_CTRL0_WORD_LENGTH |
1289 BF_GPMI_CTRL0_ADDRESS(address) |
1290 BM_GPMI_CTRL0_ADDRESS_INCREMENT |
1291 BF_GPMI_CTRL0_XFER_COUNT(length);
1293 gpmi->pio_data[1] = 0;
1295 gpmi->pio_data[2] = 0;
1297 error = mxs_gpmi_dma_xfer(gpmi, buffer, length, 3,
1300 printk(KERN_ERR "[%s] DMA error\n", __func__);
1305 static void mxs_gpmi_cmdctrl(struct mtd_info *mtd,
1306 int data, unsigned int ctrl)
1309 struct nand_chip *chip = mtd->priv;
1310 struct mxs_gpmi *gpmi = chip->priv;
1313 DBG(2, "%s: data=%04x ctrl=%04x\n", __func__,
1316 * Every operation begins with a command byte and a series of zero or
1317 * more address bytes. These are distinguished by either the Address
1318 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
1319 * asserted. When MTD is ready to execute the command, it will deassert
1320 * both latch enables.
1322 * Rather than run a separate DMA operation for every single byte, we
1323 * queue them up and run a single DMA operation for the entire series
1324 * of command and data bytes.
1326 if ((ctrl & (NAND_ALE | NAND_CLE))) {
1327 if (data != NAND_CMD_NONE) {
1328 DBG(3, "%s: Storing cmd byte %02x\n", __func__, data & 0xff);
1329 gpmi->cmd_buf[gpmi->command_length++] = data;
1334 * If control arrives here, MTD has deasserted both the ALE and CLE,
1335 * which means it's ready to run an operation. Check if we have any
1338 if (gpmi->command_length == 0)
1341 DBG(1, "%s: sending command...\n", __func__);
1342 for (i = 0; i < gpmi->command_length; i++)
1343 DBG(2, " 0x%02x", gpmi->cmd_buf[i]);
1346 ret = mxs_gpmi_send_command(gpmi,
1347 gpmi->current_chip, gpmi->cmd_buf,
1348 gpmi->command_length);
1350 printk(KERN_ERR "[%s] Chip: %u, Error %d\n",
1351 __func__, gpmi->current_chip, ret);
1354 gpmi->command_length = 0;
1355 DBG(1, "%s: ...Finished\n", __func__);
1358 static int mxs_gpmi_set_ecclayout(struct mxs_gpmi *gpmi,
1359 int page_size, int oob_size)
1361 struct nand_chip *chip = gpmi->chip;
1362 struct mtd_info *mtd = gpmi->mtd;
1363 struct nand_ecclayout *layout = &gpmi->ecc_layout;
1364 const int meta_size = 10;
1365 const int block0_size = 512;
1366 const int blockn_size = 512;
1367 const int fl0_nblocks = (mtd->writesize >> 9) - !!block0_size;
1370 chip->ecc.mode = NAND_ECC_HW;
1371 chip->ecc.size = blockn_size;
1372 chip->ecc.layout = layout;
1374 chip->bbt_td = &mxs_gpmi_bbt_main_descr;
1375 chip->bbt_md = &mxs_gpmi_bbt_mirror_descr;
1378 chip->bbt_td->options |= NAND_BBT_WRITE | NAND_BBT_CREATE;
1379 chip->bbt_md->options |= NAND_BBT_WRITE | NAND_BBT_CREATE;
1382 switch (page_size) {
1384 /* default GPMI OOB layout */
1385 layout->eccbytes = 4 * 10 + 9;
1386 gpmi->block0_ecc_strength = 8;
1387 gpmi->blockn_ecc_strength = 8;
1391 if (mtd->oobsize == 128) {
1392 gpmi->block0_ecc_strength = 8;
1393 gpmi->blockn_ecc_strength = 8;
1395 gpmi->block0_ecc_strength = 16;
1396 gpmi->blockn_ecc_strength = 16;
1401 gpmi->block0_ecc_strength = 24;
1402 gpmi->blockn_ecc_strength = 24;
1406 printk(KERN_ERR "unsupported page size: %u\n", page_size);
1411 int chunk0_data_size_in_bits = block0_size * 8;
1412 int chunk0_ecc_size_in_bits = gpmi->block0_ecc_strength * 13;
1413 int chunkn_data_size_in_bits = blockn_size * 8;
1414 int chunkn_ecc_size_in_bits = gpmi->blockn_ecc_strength * 13;
1415 int chunkn_total_size_in_bits = chunkn_data_size_in_bits +
1416 chunkn_ecc_size_in_bits;
1418 /* Compute the bit offset of the block mark within the physical page. */
1419 int block_mark_bit_offset = mtd->writesize * 8;
1421 /* Subtract the metadata bits. */
1422 block_mark_bit_offset -= meta_size * 8;
1424 /* if the first block is metadata only,
1425 * subtract the number of ecc bits of that block
1427 if (block0_size == 0) {
1428 block_mark_bit_offset -= chunk0_ecc_size_in_bits;
1431 * Compute the chunk number (starting at zero) in which the block mark
1434 int block_mark_chunk_number =
1435 block_mark_bit_offset / chunkn_total_size_in_bits;
1438 * Compute the bit offset of the block mark within its chunk, and
1441 int block_mark_chunk_bit_offset = block_mark_bit_offset -
1442 (block_mark_chunk_number * chunkn_total_size_in_bits);
1444 if (block_mark_chunk_bit_offset > chunkn_data_size_in_bits) {
1446 * If control arrives here, the block mark actually appears in
1447 * the ECC bits of this chunk. This wont' work.
1449 printf("Unsupported page geometry (block mark in ECC): %u:%u",
1450 mtd->writesize, mtd->oobsize);
1455 * Now that we know the chunk number in which the block mark appears,
1456 * we can subtract all the ECC bits that appear before it.
1458 block_mark_bit_offset -= block_mark_chunk_number *
1459 chunkn_ecc_size_in_bits;
1462 * We now know the absolute bit offset of the block mark within the
1463 * ECC-based data. We can now compute the byte offset and the bit
1464 * offset within the byte.
1466 gpmi->block_mark_byte_offset = block_mark_bit_offset / 8;
1467 gpmi->block_mark_bit_offset = block_mark_bit_offset % 8;
1469 DBG(0, "NAND geometry:\n");
1470 DBG(0, "page size : %5u byte\n", mtd->writesize);
1471 DBG(0, "oob size : %5u byte\n", mtd->oobsize);
1472 DBG(0, "erase size : %5u byte\n", mtd->erasesize);
1473 DBG(0, "metadata : %5u byte\n", meta_size);
1475 DBG(0, "chunk0 level: %5u\n", gpmi->block0_ecc_strength);
1476 DBG(0, "chunk0 data : %5u bit (%5u byte)\n",
1477 chunk0_data_size_in_bits,
1478 DIV_ROUND_UP(chunk0_data_size_in_bits, 8));
1479 DBG(0, "chunk0 ECC : %5u bit (%5u byte)\n",
1480 chunk0_ecc_size_in_bits,
1481 DIV_ROUND_UP(chunk0_ecc_size_in_bits, 8));
1483 DBG(0, "chunkn level: %5u\n", gpmi->blockn_ecc_strength);
1484 DBG(0, "chunkn data : %5u bit (%5u byte)\n",
1485 chunkn_data_size_in_bits,
1486 DIV_ROUND_UP(chunkn_data_size_in_bits, 8));
1487 DBG(0, "chunkn ECC : %5u bit (%5u byte)\n",
1488 chunkn_ecc_size_in_bits,
1489 DIV_ROUND_UP(chunkn_ecc_size_in_bits, 8));
1490 DBG(0, "BB chunk : %5d\n", block_mark_chunk_number);
1491 DBG(0, "BB byte offs: %5u\n", gpmi->block_mark_byte_offset);
1492 DBG(0, "BB bit offs : %5u\n", gpmi->block_mark_bit_offset);
1495 for (i = 0; i < layout->eccbytes; i++) {
1496 layout->eccpos[i] = mtd->oobsize - i - 1;
1498 layout->oobfree[0].length = meta_size;
1500 chip->ecc.bytes = layout->eccbytes;
1502 DBG(0, "%s: Resetting BCH\n", __func__);
1503 mxs_reset_block(gpmi->bch_regs);
1506 BF_BCH_FLASH0LAYOUT0_NBLOCKS(fl0_nblocks) |
1507 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta_size) |
1508 BF_BCH_FLASH0LAYOUT0_ECC0(gpmi->block0_ecc_strength >> 1) |
1509 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size),
1510 gpmi, HW_BCH_FLASH0LAYOUT0);
1513 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(mtd->writesize + mtd->oobsize) |
1514 BF_BCH_FLASH0LAYOUT1_ECCN(gpmi->blockn_ecc_strength >> 1) |
1515 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size),
1516 gpmi, HW_BCH_FLASH0LAYOUT1);
1518 mxs_bch_writel(0, gpmi, HW_BCH_LAYOUTSELECT);
1520 mxs_bch_writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, gpmi, HW_BCH_CTRL_SET);
1524 int mxs_gpmi_nand_init(struct mtd_info *mtd, struct nand_chip *chip)
1527 struct mxs_gpmi *gpmi;
1529 gpmi = kzalloc(sizeof(struct mxs_gpmi), GFP_KERNEL);
1537 gpmi->chip_count = CONFIG_SYS_MAX_NAND_DEVICE;
1538 gpmi->swap_block_mark = 1;
1540 gpmi->gpmi_regs = __ioremap(GPMI_BASE_ADDR, SZ_4K, 1);
1541 if (gpmi->gpmi_regs == NULL) {
1546 gpmi->bch_regs = __ioremap(BCH_BASE_ADDR, SZ_4K, 1);
1547 if (gpmi->bch_regs == NULL) {
1552 gpmi->dma_regs = __ioremap(APBHDMA_BASE_ADDR, SZ_4K, 1);
1553 if (gpmi->dma_regs == NULL) {
1558 ret = mxs_dma_init(gpmi, CONFIG_SYS_MXS_DMA_CHANNEL,
1559 CONFIG_SYS_MAX_NAND_DEVICE);
1563 ret = mxs_gpmi_init_hw(gpmi);
1569 chip->select_chip = mxs_gpmi_select_chip;
1570 chip->cmd_ctrl = mxs_gpmi_cmdctrl;
1571 chip->dev_ready = mxs_gpmi_dev_ready;
1573 chip->read_byte = mxs_gpmi_read_byte;
1574 chip->read_buf = mxs_gpmi_read_buf;
1575 chip->write_buf = mxs_gpmi_write_buf;
1577 chip->scan_bbt = mxs_gpmi_scan_bbt;
1579 chip->options |= NAND_NO_SUBPAGE_WRITE;
1580 chip->options |= NAND_USE_FLASH_BBT | NAND_USE_FLASH_BBT_NO_OOB;
1582 chip->ecc.read_page = mxs_gpmi_read_page;
1583 chip->ecc.read_oob = mxs_gpmi_read_oob;
1584 chip->ecc.write_page = mxs_gpmi_write_page;
1585 chip->ecc.write_oob = mxs_gpmi_write_oob;
1587 DBG(0, "%s: Scanning for NAND chips\n", __func__);
1588 ret = nand_scan_ident(mtd, gpmi->chip_count);
1590 DBG(0, "%s: Failed to scan for NAND chips\n", __func__);
1593 DBG(0, "%s: pagesize=%d oobsize=%d\n", __func__,
1594 mtd->writesize, mtd->oobsize);
1596 gpmi->page_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1597 if (gpmi->page_buf == NULL) {
1601 gpmi->oob_buf = gpmi->page_buf + mtd->writesize;
1603 ret = mxs_gpmi_set_ecclayout(gpmi, mtd->writesize, mtd->oobsize);
1605 DBG(0, "%s: Unsupported ECC layout\n", __func__);
1608 DBG(0, "%s: NAND scan succeeded\n", __func__);