]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - drivers/mtd/nand/mxs_gpmi.c
applied patches from Freescale and Ka-Ro
[karo-tx-uboot.git] / drivers / mtd / nand / mxs_gpmi.c
1 /*
2  * Freescale GPMI NFC NAND Flash Driver
3  *
4  * Copyright (C) 2010 Freescale Semiconductor, Inc.
5  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  */
18
19 #include <common.h>
20 #include <malloc.h>
21 #include <watchdog.h>
22 #include <linux/err.h>
23 #include <linux/mtd/compat.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/nand.h>
26 #include <linux/mtd/nand_ecc.h>
27
28 #ifdef CONFIG_MTD_PARTITIONS
29 #include <linux/mtd/partitions.h>
30 #endif
31
32 #include <asm/sizes.h>
33 #include <asm/io.h>
34 #include <asm/errno.h>
35 #include <asm/dma-mapping.h>
36 #include <asm/arch/mx28.h>
37
38 #ifdef CONFIG_JFFS2_NAND
39 #include <jffs2/jffs2.h>
40 #endif
41
42 #include <asm/arch/regs-clkctrl.h>
43 #include <asm/arch/mxs_gpmi-regs.h>
44 #include <asm/arch/mxs_gpmi-bch-regs.h>
45
46 #ifdef _DEBUG
47 static int debug = 1;
48 #define dbg_lvl(l)              (debug > (l))
49 #define DBG(l, fmt...)          do { if (dbg_lvl(l)) printk(KERN_DEBUG fmt); } while (0)
50 #else
51 #define dbg_lvl(n)              0
52 #define DBG(l, fmt...)          do { } while (0)
53 #endif
54
55 #define dma_timeout             1000
56 #define create_bbt              1
57
58 #define MAX_CHIP_COUNT          CONFIG_SYS_MAX_NAND_DEVICE
59 #define COMMAND_BUFFER_SIZE     10
60 #define MAX_PIO_WORDS           16
61
62 /* dmaengine interface */
63 enum dma_status {
64         DMA_SUCCESS,
65         DMA_IN_PROGRESS,
66         DMA_PAUSED,
67         DMA_ERROR,
68 };
69
70 /* MXS APBH DMA controller interface */
71 #define HW_APBHX_CTRL0                          0x000
72 #define BM_APBH_CTRL0_APB_BURST8_EN             (1 << 29)
73 #define BM_APBH_CTRL0_APB_BURST_EN              (1 << 28)
74 #define BP_APBH_CTRL0_CLKGATE_CHANNEL           8
75 #define BP_APBH_CTRL0_RESET_CHANNEL             16
76 #define HW_APBHX_CTRL1                          0x010
77 #define HW_APBHX_CTRL2                          0x020
78 #define HW_APBHX_CHANNEL_CTRL                   0x030
79 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL     16
80 #define HW_APBH_VERSION                         (cpu_is_mx23() ? 0x3f0 : 0x800)
81 #define HW_APBX_VERSION                         0x800
82 #define BP_APBHX_VERSION_MAJOR                  24
83 #define HW_APBHX_CHn_NXTCMDAR(n)                (0x110 + (n) * 0x70)
84 #define HW_APBHX_CHn_SEMA(n)                    (0x140 + (n) * 0x70)
85
86 /*
87  * ccw bits definitions
88  *
89  * COMMAND:             0..1    (2)
90  * CHAIN:               2       (1)
91  * IRQ:                 3       (1)
92  * NAND_LOCK:           4       (1) - not implemented
93  * NAND_WAIT4READY:     5       (1) - not implemented
94  * DEC_SEM:             6       (1)
95  * WAIT4END:            7       (1)
96  * HALT_ON_TERMINATE:   8       (1)
97  * TERMINATE_FLUSH:     9       (1)
98  * RESERVED:            10..11  (2)
99  * PIO_NUM:             12..15  (4)
100  */
101 #define BP_CCW_COMMAND          0
102 #define BM_CCW_COMMAND          (3 << 0)
103 #define CCW_CHAIN               (1 << 2)
104 #define CCW_IRQ                 (1 << 3)
105 #define CCW_DEC_SEM             (1 << 6)
106 #define CCW_WAIT4END            (1 << 7)
107 #define CCW_HALT_ON_TERM        (1 << 8)
108 #define CCW_TERM_FLUSH          (1 << 9)
109 #define BP_CCW_PIO_NUM          12
110 #define BM_CCW_PIO_NUM          (0xf << 12)
111
112 #define BF_CCW(value, field)    (((value) << BP_CCW_##field) & BM_CCW_##field)
113
114 #define MXS_DMA_CMD_NO_XFER     0
115 #define MXS_DMA_CMD_WRITE       1
116 #define MXS_DMA_CMD_READ        2
117 #define MXS_DMA_CMD_DMA_SENSE   3
118
119 struct mxs_dma_ccw {
120         u32             next;
121         u16             bits;
122         u16             xfer_bytes;
123 #define MAX_XFER_BYTES  0xff00
124         u32             bufaddr;
125 #define MXS_PIO_WORDS   16
126         u32             pio_words[MXS_PIO_WORDS];
127 };
128
129 #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
130
131 struct mxs_dma_chan {
132         struct mxs_dma_engine           *mxs_dma;
133         int                             chan_id;
134         struct mxs_dma_ccw              *ccw;
135         int                             ccw_idx;
136         unsigned long                   ccw_phys;
137         enum dma_status                 status;
138 #define MXS_DMA_SG_LOOP                 (1 << 0)
139 };
140
141 #define MXS_DMA_CHANNELS                16
142 #define MXS_DMA_CHANNELS_MASK           0xffff
143
144 struct mxs_dma_engine {
145         int                             dev_id;
146         void __iomem                    *base;
147         struct mxs_dma_chan             mxs_chans[MAX_CHIP_COUNT];
148 };
149
150 struct mxs_gpmi {
151         struct mxs_dma_engine mxs_dma;
152         void __iomem *gpmi_regs;
153         void __iomem *bch_regs;
154         void __iomem *dma_regs;
155
156         unsigned int chip_count;
157
158         struct mtd_partition *parts;
159         unsigned int nr_parts;
160         struct mtd_info *mtd;
161         struct nand_chip *chip;
162         struct nand_ecclayout ecc_layout;
163
164         int current_chip;
165
166         void *page_buf;
167         void *oob_buf;
168         void *data_buf;
169
170         int command_length;
171         u32 pio_data[MAX_PIO_WORDS];
172         u8 cmd_buf[COMMAND_BUFFER_SIZE];
173
174         unsigned block0_ecc_strength:5,
175                 blockn_ecc_strength:5,
176                 swap_block_mark:1,
177                 block_mark_bit_offset:3,
178                 block_mark_byte_offset:14;
179 };
180
181 static uint8_t scan_ff_pattern[] = { 0xff };
182 static struct nand_bbt_descr gpmi_bbt_descr = {
183         .options        = 0,
184         .offs           = 0,
185         .len            = 1,
186         .pattern        = scan_ff_pattern
187 };
188
189 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
190 static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
191
192 static struct nand_bbt_descr mxs_gpmi_bbt_main_descr = {
193         .options = NAND_BBT_LASTBLOCK | NAND_BBT_2BIT |
194                 NAND_BBT_VERSION | NAND_BBT_PERCHIP |
195                 NAND_BBT_NO_OOB,
196         .offs = 0,
197         .len = 4,
198         .veroffs = 4,
199         .maxblocks = 4,
200         .pattern = bbt_pattern,
201 };
202
203 static struct nand_bbt_descr mxs_gpmi_bbt_mirror_descr = {
204         .options = NAND_BBT_LASTBLOCK | NAND_BBT_2BIT |
205                 NAND_BBT_VERSION | NAND_BBT_PERCHIP |
206                 NAND_BBT_NO_OOB,
207         .offs = 0,
208         .len = 4,
209         .veroffs = 4,
210         .maxblocks = 4,
211         .pattern = mirror_pattern,
212 };
213
214 /* MXS DMA implementation */
215 #ifdef _DEBUG
216 static inline u32 __mxs_dma_readl(struct mxs_dma_engine *mxs_dma,
217                                         unsigned int reg,
218                                         const char *name, const char *fn, int ln)
219 {
220         u32 val;
221         void __iomem *addr = mxs_dma->base + reg;
222
223         val = readl(addr);
224         DBG(3, "%s@%d: Read %08x from %s[%08x]\n", fn, ln, val, name,
225                 APBHDMA_BASE_ADDR + reg);
226         return val;
227 }
228 #define mxs_dma_readl(t, r)             __mxs_dma_readl(t, r, #r, __func__, __LINE__)
229
230 static inline void __mxs_dma_writel(u32 val,
231                                 struct mxs_dma_engine *mxs_dma, unsigned int reg,
232                                 const char *name, const char *fn, int ln)
233 {
234         void __iomem *addr = mxs_dma->base + reg;
235
236         DBG(3, "%s@%d: Writing %08x to %s[%08x]\n", fn, ln, val, name,
237                 APBHDMA_BASE_ADDR + reg);
238         writel(val, addr);
239 }
240 #define mxs_dma_writel(v, t, r)         __mxs_dma_writel(v, t, r, #r, __func__, __LINE__)
241 #else
242 static inline u32 mxs_dma_readl(struct mxs_dma_engine *mxs_dma,
243                                 unsigned int reg)
244 {
245         BUG_ON(mxs_dma->base == NULL);
246         return readl(mxs_dma->base + reg);
247 }
248
249 static inline void mxs_dma_writel(u32 val, struct mxs_dma_engine *mxs_dma,
250                                 unsigned int reg)
251 {
252         BUG_ON(mxs_dma->base == NULL);
253         writel(val, mxs_dma->base + reg);
254 }
255 #endif
256
257 static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
258 {
259         struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
260         int chan_id = mxs_chan->chan_id;
261         int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
262
263         /* enable apbh channel clock */
264         mxs_dma_writel(1 << chan_id,
265                 mxs_dma, HW_APBHX_CTRL0 + set_clr);
266 }
267
268 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
269 {
270         struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
271         int chan_id = mxs_chan->chan_id;
272
273         mxs_dma_writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
274                 mxs_dma, HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
275 }
276
277 static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
278 {
279         struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
280         int chan_id = mxs_chan->chan_id;
281
282         /* clkgate needs to be enabled before writing other registers */
283         mxs_dma_clkgate(mxs_chan, 1);
284
285         /* set cmd_addr up */
286         mxs_dma_writel(mxs_chan->ccw_phys,
287                 mxs_dma, HW_APBHX_CHn_NXTCMDAR(chan_id));
288
289         /* write 1 to SEMA to kick off the channel */
290         mxs_dma_writel(1, mxs_dma, HW_APBHX_CHn_SEMA(chan_id));
291 }
292
293 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
294 {
295         /* disable apbh channel clock */
296         mxs_dma_clkgate(mxs_chan, 0);
297
298         mxs_chan->status = DMA_SUCCESS;
299 }
300
301 #ifdef _DEBUG
302 static inline u32 __mxs_gpmi_readl(struct mxs_gpmi *gpmi,
303                                         unsigned int reg,
304                                         const char *name, const char *fn, int ln)
305 {
306         u32 val;
307         void __iomem *addr = gpmi->gpmi_regs + reg;
308
309         val = readl(addr);
310         DBG(3, "%s@%d: Read %08x from %s[%08x]\n", fn, ln, val, name,
311                 GPMI_BASE_ADDR + reg);
312         return val;
313 }
314 #define mxs_gpmi_readl(t, r)            __mxs_gpmi_readl(t, r, #r, __func__, __LINE__)
315
316 static inline void __mxs_gpmi_writel(u32 val,
317                                 struct mxs_gpmi *gpmi, unsigned int reg,
318                                 const char *name, const char *fn, int ln)
319 {
320         void __iomem *addr = gpmi->gpmi_regs + reg;
321
322         DBG(3, "%s@%d: Writing %08x to %s[%08x]\n", fn, ln, val, name,
323                 GPMI_BASE_ADDR + reg);
324         writel(val, addr);
325 }
326 #define mxs_gpmi_writel(v, t, r)        __mxs_gpmi_writel(v, t, r, #r, __func__, __LINE__)
327
328 static inline u32 __mxs_bch_readl(struct mxs_gpmi *gpmi,
329                                         unsigned int reg, const char *name,
330                                         const char *fn, int ln)
331 {
332         u32 val;
333         void __iomem *addr = gpmi->bch_regs + reg;
334
335         val = readl(addr);
336         DBG(3, "%s@%d: Read %08x from %s[%08x]\n", fn, ln, val, name,
337                 BCH_BASE_ADDR + reg);
338         return val;
339 }
340 #define mxs_bch_readl(t, r)             __mxs_bch_readl(t, r, #r, __func__, __LINE__)
341
342 static inline void __mxs_bch_writel(u32 val,
343                                 struct mxs_gpmi *gpmi, unsigned int reg,
344                                 const char *name, const char *fn, int ln)
345 {
346         void __iomem *addr = gpmi->bch_regs + reg;
347
348         DBG(3, "%s@%d: Writing %08x to %s[%08x]\n", fn, ln, val, name,
349                 BCH_BASE_ADDR + reg);
350         writel(val, addr);
351 }
352 #define mxs_bch_writel(v, t, r)         __mxs_bch_writel(v, t, r, #r, __func__, __LINE__)
353 #else
354 static inline u32 mxs_gpmi_readl(struct mxs_gpmi *gpmi, unsigned int reg)
355 {
356         return readl(gpmi->gpmi_regs + reg);
357 }
358
359 static inline void mxs_gpmi_writel(u32 val,
360                                 struct mxs_gpmi *gpmi, unsigned int reg)
361 {
362         writel(val, gpmi->gpmi_regs + reg);
363 }
364
365 static inline u32 mxs_bch_readl(struct mxs_gpmi *gpmi, unsigned int reg)
366 {
367         return readl(gpmi->bch_regs + reg);
368 }
369
370 static inline void mxs_bch_writel(u32 val,
371                                 struct mxs_gpmi *gpmi, unsigned int reg)
372 {
373         writel(val, gpmi->bch_regs + reg);
374 }
375 #endif
376
377 static inline struct mxs_dma_chan *mxs_gpmi_dma_chan(struct mxs_gpmi *gpmi,
378                                                 unsigned int dma_channel)
379 {
380         BUG_ON(dma_channel >= ARRAY_SIZE(gpmi->mxs_dma.mxs_chans));
381         DBG(3, "%s: DMA chan[%d]=%p[%d]\n", __func__,
382                 dma_channel, &gpmi->mxs_dma.mxs_chans[dma_channel],
383                 gpmi->mxs_dma.mxs_chans[dma_channel].chan_id);
384         return &gpmi->mxs_dma.mxs_chans[dma_channel];
385 }
386
387 #define DUMP_DMA_CONTEXT
388
389 static int first = 1;
390 #ifdef DUMP_DMA_CONTEXT
391
392 #define APBH_DMA_PHYS_ADDR      (MXS_IO_BASE_ADDR + 0x004000)
393
394 static void dump_dma_context(struct mxs_gpmi *gpmi, const char *title)
395 {
396         void *q;
397         u32 *p;
398         unsigned int i;
399
400         if (!dbg_lvl(3))
401                 return;
402
403         DBG(0, "%s: %s\n", __func__, title);
404
405         DBG(0, "%s: GPMI:\n", __func__);
406         {
407                 void __iomem *GPMI = gpmi->gpmi_regs;
408                 static u32 old[13];
409
410                 p = q = GPMI;
411
412                 for (i = 0; i < ARRAY_SIZE(old); i++) {
413                         u32 val = readl(gpmi->gpmi_regs + i * 0x10);
414
415                         if (first || val != old[i]) {
416                                 if (first)
417                                         DBG(0, "    [%p] 0x%08x\n",
418                                                 p, val);
419                                 else
420                                         DBG(0, "    [%p] 0x%08x -> 0x%08x\n",
421                                                 p, old[i], val);
422                                 old[i] = val;
423                         }
424                         q += 0x10;
425                         p = q;
426                 }
427         }
428
429         DBG(0, "%s: BCH:\n", __func__);
430         {
431                 void *BCH = gpmi->bch_regs;
432                 static u32 old[22];
433
434                 p = q = BCH;
435
436                 for (i = 0; i < ARRAY_SIZE(old); i++) {
437                         u32 val = readl(gpmi->bch_regs + i * 0x10);
438
439                         if (first || val != old[i]) {
440                                 if (first)
441                                         DBG(0, "    [%p] 0x%08x\n",
442                                                 q, val);
443                                 else
444                                         DBG(0, "    [%p] 0x%08x -> 0x%08x\n",
445                                                 q, old[i], val);
446                                 old[i] = val;
447                         }
448                         q += 0x10;
449                         p = q;
450                 }
451         }
452
453         DBG(0, "%s: APBH:\n", __func__);
454         {
455                 void *APBH = gpmi->dma_regs;
456                 static u32 old[7];
457                 static u32 chan[16][7];
458
459                 p = q = APBH;
460
461                 for (i = 0; i < ARRAY_SIZE(old); i++) {
462                         u32 val = readl(gpmi->dma_regs + i * 0x10);
463
464                         if (first || val != old[i]) {
465                                 if (first)
466                                         DBG(0, "    [%p] 0x%08x\n",
467                                                 q, val);
468                                 else
469                                         DBG(0, "    [%p] 0x%08x -> 0x%08x\n",
470                                                 q, old[i], val);
471                                 old[i] = val;
472                         }
473                         q += 0x10;
474                         p = q;
475                 }
476                 for (i = 0; i < ARRAY_SIZE(chan); i++) {
477                         int j;
478
479                         printk("CHAN %2d:\n", i);
480                         for (j = 0; j < ARRAY_SIZE(chan[i]); j++) {
481                                 u32 val;
482
483                                 p = q = APBH + 0x100 + i * 0x70 + j * 0x10;
484
485                                 val = readl(gpmi->dma_regs + 0x100 + i * 0x70 + j * 0x10);
486
487                                 if (first || val != chan[i][j]) {
488                                         if (first)
489                                                 DBG(0, "    [%p] 0x%08x\n",
490                                                         q, val);
491                                         else
492                                                 DBG(0, "    [%p] 0x%08x -> 0x%08x\n",
493                                                         q, chan[i][j], val);
494                                         chan[i][j] = val;
495                                 }
496                                 q += 0x10;
497                                 p = q;
498                         }
499                 }
500         }
501         first = 0;
502 }
503 #else
504 static inline void dump_dma_context(struct mxs_gpmi *gpmi, char *title)
505 {
506 }
507 #endif
508
509 static int mxs_gpmi_init_hw(struct mxs_gpmi *gpmi)
510 {
511         dump_dma_context(gpmi, "BEFORE INIT");
512
513         if (mxs_gpmi_readl(gpmi, HW_GPMI_CTRL0) & 0xc0000000) {
514                 DBG(0, "%s: Resetting GPMI\n", __func__);
515                 mxs_reset_block(gpmi->gpmi_regs);
516         }
517
518         mxs_gpmi_writel(BM_GPMI_CTRL1_GPMI_MODE, gpmi, HW_GPMI_CTRL1_CLR);
519         mxs_gpmi_writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
520                         gpmi, HW_GPMI_CTRL1_SET);
521
522         /* Disable write protection and Select BCH ECC */
523         mxs_gpmi_writel(BM_GPMI_CTRL1_DEV_RESET | BM_GPMI_CTRL1_BCH_MODE,
524                         gpmi, HW_GPMI_CTRL1_SET);
525
526         /* Select BCH ECC. */
527         mxs_gpmi_writel(BM_GPMI_CTRL1_BCH_MODE,
528                         gpmi, HW_GPMI_CTRL1_SET);
529
530         dump_dma_context(gpmi, "AFTER INIT");
531         return 0;
532 }
533
534 static int mxs_dma_chan_init(struct mxs_dma_chan *mxs_chan, int chan_id)
535 {
536         mxs_chan->ccw = dma_alloc_coherent(PAGE_SIZE, &mxs_chan->ccw_phys);
537         if (mxs_chan->ccw == NULL)
538                 return -ENOMEM;
539
540         DBG(0, "%s: mxs_chan[%d]=%p ccw=%p\n", __func__,
541                 chan_id, mxs_chan, mxs_chan->ccw);
542
543         memset(mxs_chan->ccw, 0, PAGE_SIZE);
544         mxs_chan->chan_id = chan_id;
545
546         mxs_dma_clkgate(mxs_chan, 1);
547         mxs_dma_reset_chan(mxs_chan);
548         mxs_dma_clkgate(mxs_chan, 0);
549         return 0;
550 }
551
552 static int mxs_dma_init(struct mxs_gpmi *gpmi, int dma_chan,
553                         int num_dma_channels)
554 {
555         int ret;
556         struct mxs_dma_engine *mxs_dma = &gpmi->mxs_dma;
557         int i;
558
559         writel(readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI) & ~BM_CLKCTRL_GPMI_CLKGATE,
560                 CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
561
562         mxs_dma->base = gpmi->dma_regs;
563
564         ret = mxs_reset_block(mxs_dma->base);
565         if (ret) {
566                 printk(KERN_ERR "%s: Failed to reset APBH DMA controller\n", __func__);
567                 return ret;
568         }
569
570         mxs_dma_writel(BM_APBH_CTRL0_APB_BURST_EN,
571                 mxs_dma, HW_APBHX_CTRL0 + MXS_SET_ADDR);
572         mxs_dma_writel(BM_APBH_CTRL0_APB_BURST8_EN,
573                 mxs_dma, HW_APBHX_CTRL0 + MXS_SET_ADDR);
574
575         for (i = 0; i < num_dma_channels; i++) {
576                 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
577
578                 mxs_chan->mxs_dma = mxs_dma;
579                 ret = mxs_dma_chan_init(mxs_chan, dma_chan + i);
580                 if (ret)
581                         return ret;
582         }
583         return 0;
584 }
585
586 static int mxs_dma_prep_slave(struct mxs_dma_chan *mxs_chan, void *buffer,
587                                 dma_addr_t buf_dma, int len,
588                                 enum dma_data_direction direction,
589                                 int append)
590 {
591         int ret;
592         int idx = append ? mxs_chan->ccw_idx : 0;
593         struct mxs_dma_ccw *ccw;
594
595         DBG(1, "%s: mxs_chan=%p status=%d append=%d ccw=%p\n", __func__,
596                 mxs_chan, mxs_chan->status, append, mxs_chan->ccw);
597
598         BUG_ON(mxs_chan->ccw == NULL);
599         BUG_ON(mxs_chan == NULL);
600
601         if (mxs_chan->status == DMA_IN_PROGRESS && !append)
602                 return -EBUSY;
603
604         if (mxs_chan->status != DMA_IN_PROGRESS && append) {
605                 ret = -EINVAL;
606                 goto err_out;
607         }
608
609         if (idx >= NUM_CCW) {
610                 printk(KERN_ERR "maximum number of segments exceeded: %d > %d\n",
611                         idx, NUM_CCW);
612                 ret = -EINVAL;
613                 goto err_out;
614         }
615
616         mxs_chan->status = DMA_IN_PROGRESS;
617
618         if (append) {
619                 BUG_ON(idx < 1);
620                 ccw = &mxs_chan->ccw[idx - 1];
621                 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
622                 ccw->bits |= CCW_CHAIN;
623                 ccw->bits &= ~CCW_IRQ;
624                 ccw->bits &= ~CCW_DEC_SEM;
625                 ccw->bits &= ~CCW_WAIT4END;
626
627                 DBG(3, "%s: Appending sg to list[%d]@%p: next=0x%08x bits=0x%08x\n",
628                         __func__, idx, ccw, ccw->next, ccw->bits);
629         } else {
630                 idx = 0;
631         }
632         ccw = &mxs_chan->ccw[idx++];
633         if (direction == DMA_NONE) {
634                 int j;
635                 u32 *pio = buffer;
636
637                 for (j = 0; j < len; j++)
638                         ccw->pio_words[j] = *pio++;
639
640                 if (dbg_lvl(3)) {
641                         DBG(0, "%s: Storing %d PIO words in ccw[%d]@%p:", __func__,
642                                 len, idx - 1, ccw);
643                         for (j = 0; j < len; j++) {
644                                 printk(" %08x", ccw->pio_words[j]);
645                         }
646                         printk("\n");
647                 }
648                 ccw->bits = 0;
649                 ccw->bits |= CCW_IRQ;
650                 ccw->bits |= CCW_DEC_SEM;
651                 ccw->bits |= CCW_WAIT4END;
652                 ccw->bits |= CCW_HALT_ON_TERM;
653                 ccw->bits |= CCW_TERM_FLUSH;
654                 ccw->bits |= BF_CCW(len, PIO_NUM);
655                 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
656         } else {
657                 if (len > MAX_XFER_BYTES) {
658                         printk(KERN_ERR "maximum bytes for sg entry exceeded: %d > %d\n",
659                                 len, MAX_XFER_BYTES);
660                         ret = -EINVAL;
661                         goto err_out;
662                 }
663
664                 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
665                 ccw->bufaddr = buf_dma;
666                 ccw->xfer_bytes = len;
667
668                 ccw->bits = 0;
669                 ccw->bits |= CCW_CHAIN;
670                 ccw->bits |= CCW_HALT_ON_TERM;
671                 ccw->bits |= CCW_TERM_FLUSH;
672                 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
673                                 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
674                                 COMMAND);
675
676                 ccw->bits &= ~CCW_CHAIN;
677                 ccw->bits |= CCW_IRQ;
678                 ccw->bits |= CCW_DEC_SEM;
679                 ccw->bits |= CCW_WAIT4END;
680                 DBG(3, "%s: DMA descriptor: ccw=%p next=0x%08x bufadr=%08x xfer_bytes=%08x bits=0x%08x\n",
681                         __func__, ccw, ccw->next, ccw->bufaddr,
682                         ccw->xfer_bytes, ccw->bits);
683         }
684
685         mxs_chan->ccw_idx = idx;
686
687         return 0;
688
689 err_out:
690         mxs_chan->ccw_idx = 0;
691         mxs_chan->status = DMA_ERROR;
692         return ret;
693 }
694
695 static int mxs_dma_submit(struct mxs_gpmi *gpmi, struct mxs_dma_chan *mxs_chan)
696 {
697         int ret;
698         int first = 1;
699         u32 stat1, stat2;
700         int chan_id = mxs_chan->chan_id;
701         u32 chan_mask = 1 << chan_id;
702         struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
703         long timeout = 1000;
704
705         dump_dma_context(gpmi, "BEFORE");
706
707         mxs_dma_enable_chan(mxs_chan);
708
709         dump_dma_context(gpmi, "WITHIN");
710
711         while (1) {
712                 stat1 = mxs_dma_readl(mxs_dma, HW_APBHX_CTRL1);
713                 stat2 = mxs_dma_readl(mxs_dma, HW_APBHX_CTRL2);
714                 if ((stat1 & chan_mask) || (stat2 & chan_mask)) {
715                         break;
716                 }
717                 if (first) {
718                         DBG(1, "Waiting for DMA channel %d to finish\n",
719                                 chan_id);
720                         first = 0;
721                 }
722                 if (timeout-- < 0)
723                         return -ETIME;
724                 udelay(100);
725         }
726
727         dump_dma_context(gpmi, "AFTER");
728
729         mxs_dma_writel(chan_mask, mxs_dma, HW_APBHX_CTRL1 + MXS_CLR_ADDR);
730         mxs_dma_writel(chan_mask, mxs_dma, HW_APBHX_CTRL2 + MXS_CLR_ADDR);
731         stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) |
732                 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1);
733         if (stat2 & chan_mask) {
734                 printk(KERN_ERR "DMA error in channel %d\n", chan_id);
735                 ret = -EIO;
736         } else if (stat1 & chan_mask) {
737                 DBG(0, "DMA channel %d finished\n", chan_id);
738                 ret = 0;
739         } else {
740                 printk(KERN_ERR "DMA channel %d early termination\n", chan_id);
741                 ret = -EINVAL;
742         }
743         mxs_chan->status = ret == 0 ? DMA_SUCCESS : DMA_ERROR;
744         return ret;
745 }
746
747 static void mxs_dma_terminate_all(struct mxs_dma_chan *mxs_chan)
748 {
749         mxs_dma_disable_chan(mxs_chan);
750 }
751
752 static int poll_bit(void __iomem *addr, unsigned int mask, long timeout)
753 {
754         while (!(readl(addr) & mask)) {
755                 udelay(1000);
756                 timeout--;
757         }
758         return timeout == 0 ? -ETIME : 0;
759 }
760
761 static int mxs_gpmi_dma_go(struct mxs_gpmi *gpmi,
762                         int wait_for_bch)
763 {
764         int error;
765         struct mxs_dma_chan *mxs_chan = mxs_gpmi_dma_chan(gpmi,
766                                                         gpmi->current_chip);
767
768         DBG(1, "> %s\n", __func__);
769
770         error = mxs_dma_submit(gpmi, mxs_chan);
771         DBG(1, "%s: mxs_dma_submit returned %d\n", __func__,
772                 error);
773         if (error)
774                 goto err;
775
776         if (wait_for_bch) {
777                 DBG(1, "%s: Waiting for BCH completion\n", __func__);
778                 error = poll_bit(gpmi->bch_regs + HW_BCH_CTRL,
779                                 BM_BCH_CTRL_COMPLETE_IRQ,
780                                 dma_timeout);
781                 DBG(1, "%s: poll_bit returned %d\n", __func__,
782                         error);
783                 DBG(1, "%s: BCH status %08x\n", __func__,
784                         mxs_bch_readl(gpmi, HW_BCH_STATUS0));
785                 if (mxs_bch_readl(gpmi, HW_BCH_CTRL) & BM_BCH_CTRL_COMPLETE_IRQ) {
786                         DBG(1, "%s: Clearing BCH IRQ\n", __func__);
787                         mxs_bch_writel(BM_BCH_CTRL_COMPLETE_IRQ, gpmi, HW_BCH_CTRL_CLR);
788                 }
789
790                 if (error)
791                         goto err;
792         }
793 out:
794         DBG(1, "< %s: %d\n", __func__, error);
795         return error;
796
797 err:
798         {
799                 struct mxs_dma_chan *mxs_chan = mxs_gpmi_dma_chan(gpmi,
800                                                                 gpmi->current_chip);
801                 dump_dma_context(gpmi, "ERROR");
802                 mxs_dma_terminate_all(mxs_chan);
803         }
804         goto out;
805 }
806
807 int mxs_gpmi_dma_setup(struct mxs_gpmi *gpmi, void *buffer, int length,
808                 int pio_words, enum dma_data_direction dir, int append)
809
810 {
811         int ret;
812         struct mxs_dma_chan *mxs_chan;
813         dma_addr_t buf_dma;
814
815         mxs_chan = mxs_gpmi_dma_chan(gpmi, gpmi->current_chip);
816         if (mxs_chan == NULL)
817                 return -EINVAL;
818
819         DBG(1, "%s: buffer=%p len=%u pio=%d append=%d\n", __func__,
820                 buffer, length, pio_words, append);
821
822         if (pio_words) {
823                 ret = mxs_dma_prep_slave(mxs_chan, gpmi->pio_data, ~0,
824                                         pio_words, DMA_NONE, append);
825                 if (ret) {
826                         mxs_dma_terminate_all(mxs_chan);
827                         printk(KERN_ERR
828                                 "%s: Failed to setup DMA PIO xfer for %d words: %d\n",
829                                 __func__, pio_words, ret);
830                         return ret;
831                 }
832                 if (buffer == NULL)
833                         return ret;
834
835                 append = 1;
836         }
837
838 #if 0
839         if (dir == DMA_FROM_DEVICE)
840                 memset(buffer, 0x55, length);
841 #endif
842         buf_dma = dma_map_single(buffer, length, dir);
843
844         DBG(1, "%s: buffer=%p dma_addr=%08x\n", __func__, buffer, buf_dma);
845
846         ret = mxs_dma_prep_slave(mxs_chan, buffer, buf_dma, length, dir, append);
847         if (ret) {
848                 mxs_dma_terminate_all(mxs_chan);
849                 DBG(1, "%s: mxs_dma_prep_slave() returned %d\n",
850                         __func__, ret);
851                 dma_unmap_single(buffer, length, dir);
852         }
853         return ret;
854 }
855
856 static int mxs_gpmi_dma_xfer(struct mxs_gpmi *gpmi,
857                         void *buffer, int length, int pio_words,
858                         enum dma_data_direction dir)
859 {
860         int ret;
861
862         ret = mxs_gpmi_dma_setup(gpmi, buffer, length,
863                                 pio_words, dir, 0);
864
865         if (ret) {
866                 DBG(1, "%s: mxs_gpmi_dma_setup() returned %d\n",
867                         __func__, ret);
868                 return ret;
869         }
870
871         DBG(1, "%s: starting DMA xfer\n", __func__);
872         ret = mxs_gpmi_dma_go(gpmi, 0);
873
874         DBG(1, "%s: DMA xfer done: %d\n", __func__, ret);
875         return ret;
876 }
877
878 /* low level accessor functions */
879 static int mxs_gpmi_read_data(struct mxs_gpmi *gpmi, int cs,
880                         void *buffer, size_t length)
881 {
882         int ret;
883         u32 command_mode;
884         u32 address;
885
886         DBG(2, "%s: buf=%p len=%d\n", __func__, buffer, length);
887
888         memset(buffer, 0x44, length);
889
890         command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
891         address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
892
893         gpmi->pio_data[0] =
894                 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
895                 BF_GPMI_CTRL0_CS_V1(cs) |
896                 BM_GPMI_CTRL0_WORD_LENGTH |
897                 BF_GPMI_CTRL0_ADDRESS(address) |
898                 BF_GPMI_CTRL0_XFER_COUNT(length);
899         gpmi->pio_data[1] = 0;
900
901         ret = mxs_gpmi_dma_xfer(gpmi, buffer, length, 2, DMA_FROM_DEVICE);
902         return ret;
903 }
904
905 /* mtd layer interface */
906 static void mxs_gpmi_select_chip(struct mtd_info *mtd, int cs)
907 {
908         struct nand_chip *chip = mtd->priv;
909         struct mxs_gpmi *gpmi = chip->priv;
910
911         gpmi->current_chip = cs;
912 }
913
914 static int mxs_gpmi_dev_ready(struct mtd_info *mtd)
915 {
916         int ready;
917         struct nand_chip *chip = mtd->priv;
918         struct mxs_gpmi *gpmi = chip->priv;
919         u32 mask;
920         u32 reg;
921         int cs = gpmi->current_chip;
922
923         if (cs < 0)
924                 return 0;
925
926         DBG(1, "> %s\n", __func__);
927
928         mask = BF_GPMI_STAT_READY_BUSY(1 << cs);
929         reg = mxs_gpmi_readl(gpmi, HW_GPMI_STAT);
930
931         ready = !!(reg & mask);
932         DBG(1, "< %s: %d\n", __func__, ready);
933         return ready;
934 }
935
936 static void mxs_gpmi_swap_bb_mark(struct mxs_gpmi *gpmi,
937                                 void *payload, void *auxiliary)
938 {
939         unsigned char *p = payload + gpmi->block_mark_byte_offset;
940         unsigned char *a = auxiliary;
941         unsigned int bit = gpmi->block_mark_bit_offset;
942         unsigned char mask;
943         unsigned char from_data;
944         unsigned char from_oob;
945
946         /*
947          * Get the byte from the data area that overlays the block mark. Since
948          * the ECC engine applies its own view to the bits in the page, the
949          * physical block mark won't (in general) appear on a byte boundary in
950          * the data.
951          */
952         from_data = (p[0] >> bit) | (p[1] << (8 - bit));
953
954         /* Get the byte from the OOB. */
955         from_oob = a[0];
956
957         /* Swap them. */
958         a[0] = from_data;
959
960         mask = (0x1 << bit) - 1;
961         p[0] = (p[0] & mask) | (from_oob << bit);
962
963         mask = ~0 << bit;
964         p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
965 }
966
967 static int mxs_gpmi_read_page(struct mtd_info *mtd, struct nand_chip *chip,
968                         uint8_t *buf)
969 {
970         int ret = -1;
971         struct mxs_gpmi *gpmi = chip->priv;
972         int cs = gpmi->current_chip;
973         u32 command_mode;
974         u32 address;
975         u32 ecc_command;
976         u32 buffer_mask;
977         dma_addr_t buf_dma;
978         dma_addr_t oob_dma;
979
980         DBG(3, "%s: read page to buffer %p\n", __func__, buf);
981
982         buf_dma = dma_map_single(gpmi->page_buf, mtd->writesize,
983                                 DMA_FROM_DEVICE);
984
985         oob_dma = dma_map_single(gpmi->oob_buf, mtd->oobsize,
986                                 DMA_FROM_DEVICE);
987
988         command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
989         address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
990
991         gpmi->pio_data[0] =
992                 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
993                 BF_GPMI_CTRL0_CS_V1(cs) |
994                 BM_GPMI_CTRL0_WORD_LENGTH |
995                 BF_GPMI_CTRL0_ADDRESS(address) |
996                 BF_GPMI_CTRL0_XFER_COUNT(0);
997         gpmi->pio_data[1] = 0;
998
999         ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 2, DMA_NONE, 0);
1000         if (ret) {
1001                 goto unmap;
1002         }
1003
1004         command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1005         address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1006         ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__DECODE;
1007         buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1008                 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1009
1010         gpmi->pio_data[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1011                 BM_GPMI_CTRL0_WORD_LENGTH |
1012                 BF_GPMI_CTRL0_CS_V1(cs) |
1013                 BF_GPMI_CTRL0_ADDRESS(address) |
1014                 BF_GPMI_CTRL0_XFER_COUNT(mtd->writesize + mtd->oobsize);
1015
1016         gpmi->pio_data[1] = 0;
1017
1018         gpmi->pio_data[2] = BM_GPMI_ECCCTRL_ENABLE_ECC |
1019                 BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) |
1020                 BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1021
1022         gpmi->pio_data[3] = mtd->writesize + mtd->oobsize;
1023         gpmi->pio_data[4] = buf_dma;
1024         gpmi->pio_data[5] = oob_dma;
1025
1026         ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 6, DMA_NONE, 1);
1027         if (ret) {
1028                 goto unmap;
1029         }
1030
1031         command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1032         address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1033
1034         gpmi->pio_data[0] =
1035                 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1036                 BM_GPMI_CTRL0_WORD_LENGTH |
1037                 BF_GPMI_CTRL0_CS_V1(cs) |
1038                 BF_GPMI_CTRL0_ADDRESS(address) |
1039                 BF_GPMI_CTRL0_XFER_COUNT(mtd->writesize + mtd->oobsize);
1040
1041         gpmi->pio_data[1] = 0;
1042
1043         ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 2, DMA_NONE, 1);
1044         if (ret == 0) {
1045                 ret = mxs_gpmi_dma_go(gpmi, 1);
1046         }
1047 unmap:
1048         dma_unmap_single(gpmi->oob_buf, mtd->oobsize,
1049                         DMA_FROM_DEVICE);
1050         dma_unmap_single(gpmi->page_buf, mtd->writesize,
1051                         DMA_FROM_DEVICE);
1052         {
1053 #define STATUS_GOOD             0x00
1054 #define STATUS_ERASED           0xff
1055 #define STATUS_UNCORRECTABLE    0xfe
1056                 /* Loop over status bytes, accumulating ECC status. */
1057                 struct nand_chip *chip = mtd->priv;
1058                 int failed = 0;
1059                 int corrected = 0;
1060                 u8 *status = gpmi->oob_buf + mtd->oobavail;
1061                 int i;
1062
1063                 for (i = 0; i < mtd->writesize / chip->ecc.size; i++, status++) {
1064                         if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1065                                 continue;
1066
1067                         if (*status == STATUS_UNCORRECTABLE) {
1068                                 failed++;
1069                                 continue;
1070                         }
1071                         corrected += *status;
1072                 }
1073                 /*
1074                  * Propagate ECC status to the owning MTD only when failed or
1075                  * corrected times nearly reaches our ECC correction threshold.
1076                  */
1077                 if (failed || corrected >= (chip->ecc.size - 1)) {
1078                         DBG(0, "%s: ECC failures: %d\n", __func__, failed);
1079                         mtd->ecc_stats.failed += failed;
1080                         mtd->ecc_stats.corrected += corrected;
1081                 }
1082         }
1083         if (ret == 0) {
1084                 if (gpmi->swap_block_mark)
1085                         mxs_gpmi_swap_bb_mark(gpmi, gpmi->page_buf, gpmi->oob_buf);
1086                 if (buf) {
1087                         memcpy(buf, gpmi->page_buf, mtd->writesize);
1088                 }
1089         } else {
1090                 printk(KERN_ERR "%s: FAILED to read page to buffer %p\n", __func__, buf);
1091         }
1092         return ret;
1093 }
1094
1095 static int mxs_gpmi_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1096                         int page, int sndcmd)
1097 {
1098         DBG(3, "%s: reading OOB of page %d\n", __func__, page);
1099
1100         memset(chip->oob_poi, dbg_lvl(0) ? 0xfe : 0xff, mtd->oobsize);
1101         chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1102         chip->read_buf(mtd, chip->oob_poi, mtd->oobavail);
1103         return 1;
1104 }
1105
1106 static void mxs_gpmi_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1107                                 const uint8_t *buf)
1108 {
1109         int ret = -1;
1110         struct mxs_gpmi *gpmi = chip->priv;
1111         int cs = gpmi->current_chip;
1112         u32 command_mode;
1113         u32 address;
1114         u32 ecc_command;
1115         u32 buffer_mask;
1116         dma_addr_t buf_dma;
1117         dma_addr_t oob_dma;
1118
1119         DBG(3, "%s: Writing buffer %p\n", __func__, buf);
1120
1121         memset(gpmi->oob_buf + mtd->oobavail, dbg_lvl(0) ? 0xef : 0xff,
1122                 mtd->oobsize - mtd->oobavail);
1123
1124         if (buf)
1125                 memcpy(gpmi->page_buf, buf, mtd->writesize);
1126
1127         if (gpmi->swap_block_mark)
1128                 mxs_gpmi_swap_bb_mark(gpmi, gpmi->page_buf, gpmi->oob_buf);
1129
1130         buf_dma = dma_map_single(gpmi->page_buf, mtd->writesize,
1131                                 DMA_TO_DEVICE);
1132
1133         oob_dma = dma_map_single(gpmi->oob_buf, mtd->oobsize,
1134                                 DMA_TO_DEVICE);
1135
1136         command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1137         address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1138         ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__ENCODE;
1139         buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
1140                                 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1141
1142         gpmi->pio_data[0] =
1143                 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1144                 BM_GPMI_CTRL0_WORD_LENGTH |
1145                 BF_GPMI_CTRL0_CS_V1(cs) |
1146                 BF_GPMI_CTRL0_ADDRESS(address) |
1147                 BF_GPMI_CTRL0_XFER_COUNT(0);
1148         gpmi->pio_data[1] = 0;
1149         gpmi->pio_data[2] =
1150                 BM_GPMI_ECCCTRL_ENABLE_ECC |
1151                 BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) |
1152                 BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1153         gpmi->pio_data[3] = mtd->writesize + mtd->oobsize;
1154         gpmi->pio_data[4] = buf_dma;
1155         gpmi->pio_data[5] = oob_dma;
1156
1157         ret = mxs_gpmi_dma_setup(gpmi, NULL, 0, 6, DMA_NONE, 0);
1158         if (ret == 0) {
1159                 ret = mxs_gpmi_dma_go(gpmi, 1);
1160         }
1161
1162         dma_unmap_single(gpmi->oob_buf, mtd->oobsize,
1163                         DMA_TO_DEVICE);
1164         dma_unmap_single(gpmi->page_buf, mtd->writesize,
1165                         DMA_TO_DEVICE);
1166         if (ret) {
1167                 printk(KERN_ERR "%s: FAILED!\n", __func__);
1168         }
1169 }
1170
1171 static int mxs_gpmi_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1172                         int page)
1173 {
1174         return -EINVAL;
1175 }
1176
1177 #if 0
1178 static void mxs_gpmi_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1179                                 const uint8_t *buf)
1180 {
1181         memcpy(gpmi->page_buf, buf, mtd->writesize);
1182
1183 }
1184 #endif
1185
1186 static void mxs_gpmi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
1187 {
1188         struct nand_chip *chip = mtd->priv;
1189         struct mxs_gpmi *gpmi = chip->priv;
1190         void *xfer_buf;
1191         int ret = 0;
1192
1193         if (len > mtd->writesize + mtd->oobsize) {
1194                 DBG(0, "%s: Allocating temporary buffer\n", __func__);
1195                 xfer_buf = kzalloc(len, GFP_KERNEL);
1196                 if (xfer_buf == NULL) {
1197                         printk(KERN_ERR
1198                                 "Failed to allocate %u byte for xfer buffer\n",
1199                                 len);
1200                         memset(buf, 0xee, len);
1201                 }
1202         } else {
1203                 xfer_buf = buf;
1204         }
1205
1206         DBG(3, "%s: reading %u byte to %p(%p)\n", __func__,
1207                 len, buf, xfer_buf);
1208
1209         ret = mxs_gpmi_read_data(gpmi, gpmi->current_chip, xfer_buf, len);
1210         if (xfer_buf != buf) {
1211                 if (ret == 0) {
1212                         memcpy(buf, xfer_buf, len);
1213                 }
1214                 kfree(xfer_buf);
1215         }
1216         DBG(1, "< %s %d\n", __func__, ret);
1217 }
1218
1219 static u_char mxs_gpmi_read_byte(struct mtd_info *mtd)
1220 {
1221         struct nand_chip *chip = mtd->priv;
1222         struct mxs_gpmi *gpmi = chip->priv;
1223         u_char *buf = (u_char *)gpmi->pio_data;
1224
1225         mxs_gpmi_read_buf(mtd, buf, 1);
1226         return *buf;
1227 }
1228
1229 static void mxs_gpmi_write_buf(struct mtd_info *mtd, const u_char *buf,
1230                         int len)
1231 {
1232         int ret;
1233         struct nand_chip *chip = mtd->priv;
1234         struct mxs_gpmi *gpmi = chip->priv;
1235         void *xfer_buf = (void *)buf; /* cast away the 'const' */
1236 #if 1
1237         u32 command_mode;
1238         u32 address;
1239         int cs = gpmi->current_chip;
1240
1241         DBG(3, "%s: writing %u byte from %p\n", __func__, len, buf);
1242
1243         command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1244         address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1245
1246         gpmi->pio_data[0] =
1247                 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1248                 BF_GPMI_CTRL0_CS_V1(cs) |
1249                 BM_GPMI_CTRL0_WORD_LENGTH |
1250                 BF_GPMI_CTRL0_ADDRESS(address) |
1251                 BF_GPMI_CTRL0_XFER_COUNT(len);
1252         gpmi->pio_data[1] = 0;
1253
1254         ret = mxs_gpmi_dma_xfer(gpmi, xfer_buf, len, 2, DMA_TO_DEVICE);
1255 #else
1256         ret = mxs_gpmi_send_data(gpmi, gpmi->current_chip, xfer_buf, len);
1257 #endif
1258         if (ret)
1259                 printk(KERN_ERR "%s: Failed to write %u byte from %p\n", __func__,
1260                         len, buf);
1261 }
1262
1263 static int mxs_gpmi_scan_bbt(struct mtd_info *mtd)
1264 {
1265         int ret;
1266
1267         DBG(0, "%s: \n", __func__);
1268         ret = nand_scan_bbt(mtd, create_bbt ? &gpmi_bbt_descr : NULL);
1269         DBG(0, "%s: nand_scan_bbt() returned %d\n", __func__, ret);
1270         return ret;
1271 }
1272
1273 static int mxs_gpmi_send_command(struct mxs_gpmi *gpmi, unsigned cs,
1274                         void *buffer, unsigned int length)
1275 {
1276         int error;
1277         u32 command_mode;
1278         u32 address;
1279
1280         DBG(1, "%s: Sending NAND command\n", __func__);
1281
1282         command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
1283         address = BV_GPMI_CTRL0_ADDRESS__NAND_CLE;
1284
1285         gpmi->pio_data[0] =
1286                 BF_GPMI_CTRL0_COMMAND_MODE(command_mode) |
1287                 BF_GPMI_CTRL0_CS_V1(cs) |
1288                 BM_GPMI_CTRL0_WORD_LENGTH |
1289                 BF_GPMI_CTRL0_ADDRESS(address) |
1290                 BM_GPMI_CTRL0_ADDRESS_INCREMENT |
1291                 BF_GPMI_CTRL0_XFER_COUNT(length);
1292
1293         gpmi->pio_data[1] = 0;
1294
1295         gpmi->pio_data[2] = 0;
1296
1297         error = mxs_gpmi_dma_xfer(gpmi, buffer, length, 3,
1298                                 DMA_TO_DEVICE);
1299         if (error)
1300                 printk(KERN_ERR "[%s] DMA error\n", __func__);
1301
1302         return error;
1303 }
1304
1305 static void mxs_gpmi_cmdctrl(struct mtd_info *mtd,
1306                         int data, unsigned int ctrl)
1307 {
1308         int ret;
1309         struct nand_chip *chip = mtd->priv;
1310         struct mxs_gpmi *gpmi = chip->priv;
1311         unsigned int i;
1312
1313         DBG(2, "%s: data=%04x ctrl=%04x\n", __func__,
1314                 data, ctrl);
1315         /*
1316          * Every operation begins with a command byte and a series of zero or
1317          * more address bytes. These are distinguished by either the Address
1318          * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
1319          * asserted. When MTD is ready to execute the command, it will deassert
1320          * both latch enables.
1321          *
1322          * Rather than run a separate DMA operation for every single byte, we
1323          * queue them up and run a single DMA operation for the entire series
1324          * of command and data bytes.
1325          */
1326         if ((ctrl & (NAND_ALE | NAND_CLE))) {
1327                 if (data != NAND_CMD_NONE) {
1328                         DBG(3, "%s: Storing cmd byte %02x\n", __func__, data & 0xff);
1329                         gpmi->cmd_buf[gpmi->command_length++] = data;
1330                 }
1331                 return;
1332         }
1333         /*
1334          * If control arrives here, MTD has deasserted both the ALE and CLE,
1335          * which means it's ready to run an operation. Check if we have any
1336          * bytes to send.
1337          */
1338         if (gpmi->command_length == 0)
1339                 return;
1340
1341         DBG(1, "%s: sending command...\n", __func__);
1342         for (i = 0; i < gpmi->command_length; i++)
1343                 DBG(2, " 0x%02x", gpmi->cmd_buf[i]);
1344         DBG(2, "\n");
1345
1346         ret = mxs_gpmi_send_command(gpmi,
1347                         gpmi->current_chip, gpmi->cmd_buf,
1348                         gpmi->command_length);
1349         if (ret) {
1350                 printk(KERN_ERR "[%s] Chip: %u, Error %d\n",
1351                         __func__, gpmi->current_chip, ret);
1352         }
1353
1354         gpmi->command_length = 0;
1355         DBG(1, "%s: ...Finished\n", __func__);
1356 }
1357
1358 static int mxs_gpmi_set_ecclayout(struct mxs_gpmi *gpmi,
1359                                 int page_size, int oob_size)
1360 {
1361         struct nand_chip *chip = gpmi->chip;
1362         struct mtd_info *mtd = gpmi->mtd;
1363         struct nand_ecclayout *layout = &gpmi->ecc_layout;
1364         const int meta_size = 10;
1365         const int block0_size = 512;
1366         const int blockn_size = 512;
1367         const int fl0_nblocks = (mtd->writesize >> 9) - !!block0_size;
1368         int i;
1369
1370         chip->ecc.mode = NAND_ECC_HW;
1371         chip->ecc.size = blockn_size;
1372         chip->ecc.layout = layout;
1373
1374         chip->bbt_td = &mxs_gpmi_bbt_main_descr;
1375         chip->bbt_md = &mxs_gpmi_bbt_mirror_descr;
1376
1377         if (create_bbt) {
1378                 chip->bbt_td->options |= NAND_BBT_WRITE | NAND_BBT_CREATE;
1379                 chip->bbt_md->options |= NAND_BBT_WRITE | NAND_BBT_CREATE;
1380         }
1381
1382         switch (page_size) {
1383         case 2048:
1384                 /* default GPMI OOB layout */
1385                 layout->eccbytes = 4 * 10 + 9;
1386                 gpmi->block0_ecc_strength = 8;
1387                 gpmi->blockn_ecc_strength = 8;
1388                 break;
1389
1390         case 4096:
1391                 if (mtd->oobsize == 128) {
1392                         gpmi->block0_ecc_strength = 8;
1393                         gpmi->blockn_ecc_strength = 8;
1394                 } else {
1395                         gpmi->block0_ecc_strength = 16;
1396                         gpmi->blockn_ecc_strength = 16;
1397                 }
1398                 break;
1399
1400         case 8192:
1401                 gpmi->block0_ecc_strength = 24;
1402                 gpmi->blockn_ecc_strength = 24;
1403                 break;
1404
1405         default:
1406                 printk(KERN_ERR "unsupported page size: %u\n", page_size);
1407                 return -EINVAL;
1408         }
1409
1410         {
1411                 int chunk0_data_size_in_bits = block0_size * 8;
1412                 int chunk0_ecc_size_in_bits  = gpmi->block0_ecc_strength * 13;
1413                 int chunkn_data_size_in_bits = blockn_size * 8;
1414                 int chunkn_ecc_size_in_bits  = gpmi->blockn_ecc_strength * 13;
1415                 int chunkn_total_size_in_bits = chunkn_data_size_in_bits +
1416                         chunkn_ecc_size_in_bits;
1417
1418                 /* Compute the bit offset of the block mark within the physical page. */
1419                 int block_mark_bit_offset = mtd->writesize * 8;
1420
1421                 /* Subtract the metadata bits. */
1422                 block_mark_bit_offset -= meta_size * 8;
1423
1424                 /* if the first block is metadata only,
1425                  * subtract the number of ecc bits of that block
1426                  */
1427                 if (block0_size == 0) {
1428                         block_mark_bit_offset -= chunk0_ecc_size_in_bits;
1429                 }
1430                 /*
1431                  * Compute the chunk number (starting at zero) in which the block mark
1432                  * appears.
1433                  */
1434                 int block_mark_chunk_number =
1435                         block_mark_bit_offset / chunkn_total_size_in_bits;
1436
1437                 /*
1438                  * Compute the bit offset of the block mark within its chunk, and
1439                  * validate it.
1440                  */
1441                 int block_mark_chunk_bit_offset = block_mark_bit_offset -
1442                         (block_mark_chunk_number * chunkn_total_size_in_bits);
1443
1444                 if (block_mark_chunk_bit_offset > chunkn_data_size_in_bits) {
1445                         /*
1446                          * If control arrives here, the block mark actually appears in
1447                          * the ECC bits of this chunk. This wont' work.
1448                          */
1449                         printf("Unsupported page geometry (block mark in ECC): %u:%u",
1450                                 mtd->writesize, mtd->oobsize);
1451                         return -EINVAL;
1452                 }
1453
1454                 /*
1455                  * Now that we know the chunk number in which the block mark appears,
1456                  * we can subtract all the ECC bits that appear before it.
1457                  */
1458                 block_mark_bit_offset -= block_mark_chunk_number *
1459                         chunkn_ecc_size_in_bits;
1460
1461                 /*
1462                  * We now know the absolute bit offset of the block mark within the
1463                  * ECC-based data. We can now compute the byte offset and the bit
1464                  * offset within the byte.
1465                  */
1466                 gpmi->block_mark_byte_offset = block_mark_bit_offset / 8;
1467                 gpmi->block_mark_bit_offset = block_mark_bit_offset % 8;
1468
1469                 DBG(0, "NAND geometry:\n");
1470                 DBG(0, "page size   : %5u byte\n", mtd->writesize);
1471                 DBG(0, "oob size    : %5u byte\n", mtd->oobsize);
1472                 DBG(0, "erase size  : %5u byte\n", mtd->erasesize);
1473                 DBG(0, "metadata    : %5u byte\n", meta_size);
1474                 DBG(0, "ECC:\n");
1475                 DBG(0, "chunk0 level: %5u\n", gpmi->block0_ecc_strength);
1476                 DBG(0, "chunk0 data : %5u bit (%5u byte)\n",
1477                         chunk0_data_size_in_bits,
1478                         DIV_ROUND_UP(chunk0_data_size_in_bits, 8));
1479                 DBG(0, "chunk0 ECC  : %5u bit (%5u byte)\n",
1480                         chunk0_ecc_size_in_bits,
1481                         DIV_ROUND_UP(chunk0_ecc_size_in_bits, 8));
1482
1483                 DBG(0, "chunkn level: %5u\n", gpmi->blockn_ecc_strength);
1484                 DBG(0, "chunkn data : %5u bit (%5u byte)\n",
1485                         chunkn_data_size_in_bits,
1486                         DIV_ROUND_UP(chunkn_data_size_in_bits, 8));
1487                 DBG(0, "chunkn ECC  : %5u bit (%5u byte)\n",
1488                         chunkn_ecc_size_in_bits,
1489                         DIV_ROUND_UP(chunkn_ecc_size_in_bits, 8));
1490                 DBG(0, "BB chunk    : %5d\n", block_mark_chunk_number);
1491                 DBG(0, "BB byte offs: %5u\n", gpmi->block_mark_byte_offset);
1492                 DBG(0, "BB bit offs : %5u\n", gpmi->block_mark_bit_offset);
1493         }
1494
1495         for (i = 0; i < layout->eccbytes; i++) {
1496                 layout->eccpos[i] = mtd->oobsize - i - 1;
1497         }
1498         layout->oobfree[0].length = meta_size;
1499
1500         chip->ecc.bytes = layout->eccbytes;
1501
1502         DBG(0, "%s: Resetting BCH\n", __func__);
1503         mxs_reset_block(gpmi->bch_regs);
1504
1505         mxs_bch_writel(
1506                 BF_BCH_FLASH0LAYOUT0_NBLOCKS(fl0_nblocks) |
1507                 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta_size) |
1508                 BF_BCH_FLASH0LAYOUT0_ECC0(gpmi->block0_ecc_strength >> 1) |
1509                 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size),
1510                 gpmi, HW_BCH_FLASH0LAYOUT0);
1511
1512         mxs_bch_writel(
1513                 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(mtd->writesize + mtd->oobsize) |
1514                 BF_BCH_FLASH0LAYOUT1_ECCN(gpmi->blockn_ecc_strength >> 1) |
1515                 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size),
1516                 gpmi, HW_BCH_FLASH0LAYOUT1);
1517
1518         mxs_bch_writel(0, gpmi, HW_BCH_LAYOUTSELECT);
1519
1520         mxs_bch_writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, gpmi, HW_BCH_CTRL_SET);
1521         return 0;
1522 }
1523
1524 int mxs_gpmi_nand_init(struct mtd_info *mtd, struct nand_chip *chip)
1525 {
1526         int ret;
1527         struct mxs_gpmi *gpmi;
1528
1529         gpmi = kzalloc(sizeof(struct mxs_gpmi), GFP_KERNEL);
1530         if (gpmi == NULL) {
1531                 ret = -ENOMEM;
1532                 return ret;
1533         }
1534         gpmi->mtd = mtd;
1535         gpmi->chip = chip;
1536
1537         gpmi->chip_count = CONFIG_SYS_MAX_NAND_DEVICE;
1538         gpmi->swap_block_mark = 1;
1539
1540         gpmi->gpmi_regs = __ioremap(GPMI_BASE_ADDR, SZ_4K, 1);
1541         if (gpmi->gpmi_regs == NULL) {
1542                 ret = -ENOMEM;
1543                 goto out;
1544         }
1545
1546         gpmi->bch_regs = __ioremap(BCH_BASE_ADDR, SZ_4K, 1);
1547         if (gpmi->bch_regs == NULL) {
1548                 ret = -ENOMEM;
1549                 goto out;
1550         }
1551
1552         gpmi->dma_regs = __ioremap(APBHDMA_BASE_ADDR, SZ_4K, 1);
1553         if (gpmi->dma_regs == NULL) {
1554                 ret = -ENOMEM;
1555                 goto out;
1556         }
1557
1558         ret = mxs_dma_init(gpmi, CONFIG_SYS_MXS_DMA_CHANNEL,
1559                         CONFIG_SYS_MAX_NAND_DEVICE);
1560         if (ret)
1561                 goto out;
1562
1563         ret = mxs_gpmi_init_hw(gpmi);
1564         if (ret)
1565                 goto out;
1566
1567         chip->priv = gpmi;
1568
1569         chip->select_chip = mxs_gpmi_select_chip;
1570         chip->cmd_ctrl = mxs_gpmi_cmdctrl;
1571         chip->dev_ready = mxs_gpmi_dev_ready;
1572
1573         chip->read_byte = mxs_gpmi_read_byte;
1574         chip->read_buf = mxs_gpmi_read_buf;
1575         chip->write_buf = mxs_gpmi_write_buf;
1576
1577         chip->scan_bbt = mxs_gpmi_scan_bbt;
1578
1579         chip->options |= NAND_NO_SUBPAGE_WRITE;
1580         chip->options |= NAND_USE_FLASH_BBT | NAND_USE_FLASH_BBT_NO_OOB;
1581
1582         chip->ecc.read_page = mxs_gpmi_read_page;
1583         chip->ecc.read_oob = mxs_gpmi_read_oob;
1584         chip->ecc.write_page = mxs_gpmi_write_page;
1585         chip->ecc.write_oob = mxs_gpmi_write_oob;
1586
1587         DBG(0, "%s: Scanning for NAND chips\n", __func__);
1588         ret = nand_scan_ident(mtd, gpmi->chip_count);
1589         if (ret) {
1590                 DBG(0, "%s: Failed to scan for NAND chips\n", __func__);
1591                 goto out;
1592         }
1593         DBG(0, "%s: pagesize=%d oobsize=%d\n", __func__,
1594                 mtd->writesize, mtd->oobsize);
1595
1596         gpmi->page_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
1597         if (gpmi->page_buf == NULL) {
1598                 ret = -ENOMEM;
1599                 return ret;
1600         }
1601         gpmi->oob_buf = gpmi->page_buf + mtd->writesize;
1602
1603         ret = mxs_gpmi_set_ecclayout(gpmi, mtd->writesize, mtd->oobsize);
1604         if (ret) {
1605                 DBG(0, "%s: Unsupported ECC layout\n", __func__);
1606                 return ret;
1607         }
1608         DBG(0, "%s: NAND scan succeeded\n", __func__);
1609         return 0;
1610 out:
1611         kfree(gpmi);
1612         return ret;
1613 }