2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/interrupt.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/wait.h>
23 #include <linux/mutex.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/module.h>
29 MODULE_LICENSE("GPL");
31 #define DENALI_NAND_NAME "denali-nand"
34 * We define a macro here that combines all interrupts this driver uses into
35 * a single constant value, for convenience.
37 #define DENALI_IRQ_ALL (INTR__DMA_CMD_COMP | \
38 INTR__ECC_TRANSACTION_DONE | \
40 INTR__PROGRAM_FAIL | \
42 INTR__PROGRAM_COMP | \
49 * indicates whether or not the internal value for the flash bank is
52 #define CHIP_SELECT_INVALID -1
55 * The bus interface clock, clk_x, is phase aligned with the core clock. The
56 * clk_x is an integral multiple N of the core clk. The value N is configured
57 * at IP delivery time, and its available value is 4, 5, or 6. We need to align
58 * to the largest value to make it work with any possible configuration.
60 #define DENALI_CLK_X_MULT 6
63 * this macro allows us to convert from an MTD structure to our own
64 * device context (denali) structure.
66 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
68 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
72 * These constants are defined by the driver to enable common driver
73 * configuration options.
75 #define SPARE_ACCESS 0x41
76 #define MAIN_ACCESS 0x42
77 #define MAIN_SPARE_ACCESS 0x43
80 #define DENALI_WRITE 0x100
83 * this is a helper macro that allows us to
84 * format the bank into the proper bits for the controller
86 #define BANK(x) ((x) << 24)
88 /* forward declarations */
89 static void clear_interrupts(struct denali_nand_info *denali);
90 static uint32_t wait_for_irq(struct denali_nand_info *denali,
92 static void denali_irq_enable(struct denali_nand_info *denali,
94 static uint32_t read_interrupt_status(struct denali_nand_info *denali);
97 * Certain operations for the denali NAND controller use an indexed mode to
98 * read/write data. The operation is performed by writing the address value
99 * of the command to the device memory followed by the data. This function
100 * abstracts this common operation.
102 static void index_addr(struct denali_nand_info *denali,
103 uint32_t address, uint32_t data)
105 iowrite32(address, denali->flash_mem);
106 iowrite32(data, denali->flash_mem + 0x10);
109 /* Perform an indexed read of the device */
110 static void index_addr_read_data(struct denali_nand_info *denali,
111 uint32_t address, uint32_t *pdata)
113 iowrite32(address, denali->flash_mem);
114 *pdata = ioread32(denali->flash_mem + 0x10);
118 * We need to buffer some data for some of the NAND core routines.
119 * The operations manage buffering that data.
121 static void reset_buf(struct denali_nand_info *denali)
123 denali->buf.head = denali->buf.tail = 0;
126 static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
128 denali->buf.buf[denali->buf.tail++] = byte;
131 /* reads the status of the device */
132 static void read_status(struct denali_nand_info *denali)
136 /* initialize the data buffer to store status */
139 cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
141 write_byte_to_buf(denali, NAND_STATUS_WP);
143 write_byte_to_buf(denali, 0);
146 /* resets a specific device connected to the core */
147 static void reset_bank(struct denali_nand_info *denali)
150 uint32_t irq_mask = INTR__RST_COMP | INTR__TIME_OUT;
152 clear_interrupts(denali);
154 iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
156 irq_status = wait_for_irq(denali, irq_mask);
158 if (irq_status & INTR__TIME_OUT)
159 dev_err(denali->dev, "reset bank failed.\n");
162 /* Reset the flash controller */
163 static uint16_t denali_nand_reset(struct denali_nand_info *denali)
167 for (i = 0; i < denali->max_banks; i++)
168 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
169 denali->flash_reg + INTR_STATUS(i));
171 for (i = 0; i < denali->max_banks; i++) {
172 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
173 while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
174 (INTR__RST_COMP | INTR__TIME_OUT)))
176 if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
179 "NAND Reset operation timed out on bank %d\n", i);
182 for (i = 0; i < denali->max_banks; i++)
183 iowrite32(INTR__RST_COMP | INTR__TIME_OUT,
184 denali->flash_reg + INTR_STATUS(i));
190 * Use the configuration feature register to determine the maximum number of
191 * banks that the hardware supports.
193 static void detect_max_banks(struct denali_nand_info *denali)
195 uint32_t features = ioread32(denali->flash_reg + FEATURES);
197 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
199 /* the encoding changed from rev 5.0 to 5.1 */
200 if (denali->revision < 0x0501)
201 denali->max_banks <<= 1;
204 static void denali_set_intr_modes(struct denali_nand_info *denali,
208 iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
210 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
214 * validation function to verify that the controlling software is making
217 static inline bool is_flash_bank_valid(int flash_bank)
219 return flash_bank >= 0 && flash_bank < 4;
222 static void denali_irq_init(struct denali_nand_info *denali)
227 /* Disable global interrupts */
228 denali_set_intr_modes(denali, false);
230 int_mask = DENALI_IRQ_ALL;
232 /* Clear all status bits */
233 for (i = 0; i < denali->max_banks; ++i)
234 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
236 denali_irq_enable(denali, int_mask);
239 static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
241 denali_set_intr_modes(denali, false);
244 static void denali_irq_enable(struct denali_nand_info *denali,
249 for (i = 0; i < denali->max_banks; ++i)
250 iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
254 * This function only returns when an interrupt that this driver cares about
255 * occurs. This is to reduce the overhead of servicing interrupts
257 static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
259 return read_interrupt_status(denali) & DENALI_IRQ_ALL;
262 /* Interrupts are cleared by writing a 1 to the appropriate status bit */
263 static inline void clear_interrupt(struct denali_nand_info *denali,
266 uint32_t intr_status_reg;
268 intr_status_reg = INTR_STATUS(denali->flash_bank);
270 iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
273 static void clear_interrupts(struct denali_nand_info *denali)
277 spin_lock_irq(&denali->irq_lock);
279 status = read_interrupt_status(denali);
280 clear_interrupt(denali, status);
282 denali->irq_status = 0x0;
283 spin_unlock_irq(&denali->irq_lock);
286 static uint32_t read_interrupt_status(struct denali_nand_info *denali)
288 uint32_t intr_status_reg;
290 intr_status_reg = INTR_STATUS(denali->flash_bank);
292 return ioread32(denali->flash_reg + intr_status_reg);
296 * This is the interrupt service routine. It handles all interrupts
297 * sent to this device. Note that on CE4100, this is a shared interrupt.
299 static irqreturn_t denali_isr(int irq, void *dev_id)
301 struct denali_nand_info *denali = dev_id;
303 irqreturn_t result = IRQ_NONE;
305 spin_lock(&denali->irq_lock);
307 /* check to see if a valid NAND chip has been selected. */
308 if (is_flash_bank_valid(denali->flash_bank)) {
310 * check to see if controller generated the interrupt,
311 * since this is a shared interrupt
313 irq_status = denali_irq_detected(denali);
314 if (irq_status != 0) {
315 /* handle interrupt */
316 /* first acknowledge it */
317 clear_interrupt(denali, irq_status);
319 * store the status in the device context for someone
322 denali->irq_status |= irq_status;
323 /* notify anyone who cares that it happened */
324 complete(&denali->complete);
325 /* tell the OS that we've handled this */
326 result = IRQ_HANDLED;
329 spin_unlock(&denali->irq_lock);
333 static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
335 unsigned long comp_res;
336 uint32_t intr_status;
337 unsigned long timeout = msecs_to_jiffies(1000);
341 wait_for_completion_timeout(&denali->complete, timeout);
342 spin_lock_irq(&denali->irq_lock);
343 intr_status = denali->irq_status;
345 if (intr_status & irq_mask) {
346 denali->irq_status &= ~irq_mask;
347 spin_unlock_irq(&denali->irq_lock);
348 /* our interrupt was detected */
353 * these are not the interrupts you are looking for -
356 spin_unlock_irq(&denali->irq_lock);
357 } while (comp_res != 0);
361 pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
362 intr_status, irq_mask);
370 * This helper function setups the registers for ECC and whether or not
371 * the spare area will be transferred.
373 static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
376 int ecc_en_flag, transfer_spare_flag;
378 /* set ECC, transfer spare bits if needed */
379 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
380 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
382 /* Enable spare area/ECC per user's request. */
383 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
384 iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
388 * sends a pipeline command operation to the controller. See the Denali NAND
389 * controller's user guide for more information (section 4.2.3.6).
391 static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
392 bool ecc_en, bool transfer_spare,
393 int access_type, int op)
398 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
400 clear_interrupts(denali);
402 addr = BANK(denali->flash_bank) | denali->page;
404 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
405 cmd = MODE_01 | addr;
406 iowrite32(cmd, denali->flash_mem);
407 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
408 /* read spare area */
409 cmd = MODE_10 | addr;
410 index_addr(denali, cmd, access_type);
412 cmd = MODE_01 | addr;
413 iowrite32(cmd, denali->flash_mem);
414 } else if (op == DENALI_READ) {
415 /* setup page read request for access type */
416 cmd = MODE_10 | addr;
417 index_addr(denali, cmd, access_type);
419 cmd = MODE_01 | addr;
420 iowrite32(cmd, denali->flash_mem);
425 /* helper function that simply writes a buffer to the flash */
426 static int write_data_to_flash_mem(struct denali_nand_info *denali,
427 const uint8_t *buf, int len)
433 * verify that the len is a multiple of 4.
434 * see comment in read_data_from_flash_mem()
436 BUG_ON((len % 4) != 0);
438 /* write the data to the flash memory */
439 buf32 = (uint32_t *)buf;
440 for (i = 0; i < len / 4; i++)
441 iowrite32(*buf32++, denali->flash_mem + 0x10);
442 return i * 4; /* intent is to return the number of bytes read */
445 /* helper function that simply reads a buffer from the flash */
446 static int read_data_from_flash_mem(struct denali_nand_info *denali,
447 uint8_t *buf, int len)
453 * we assume that len will be a multiple of 4, if not it would be nice
454 * to know about it ASAP rather than have random failures...
455 * This assumption is based on the fact that this function is designed
456 * to be used to read flash pages, which are typically multiples of 4.
458 BUG_ON((len % 4) != 0);
460 /* transfer the data from the flash */
461 buf32 = (uint32_t *)buf;
462 for (i = 0; i < len / 4; i++)
463 *buf32++ = ioread32(denali->flash_mem + 0x10);
464 return i * 4; /* intent is to return the number of bytes read */
467 /* writes OOB data to the device */
468 static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
470 struct denali_nand_info *denali = mtd_to_denali(mtd);
472 uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL;
477 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
478 DENALI_WRITE) == PASS) {
479 write_data_to_flash_mem(denali, buf, mtd->oobsize);
481 /* wait for operation to complete */
482 irq_status = wait_for_irq(denali, irq_mask);
484 if (irq_status == 0) {
485 dev_err(denali->dev, "OOB write failed\n");
489 dev_err(denali->dev, "unable to send pipeline command\n");
495 /* reads OOB data from the device */
496 static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
498 struct denali_nand_info *denali = mtd_to_denali(mtd);
499 uint32_t irq_mask = INTR__LOAD_COMP;
500 uint32_t irq_status, addr, cmd;
504 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
505 DENALI_READ) == PASS) {
506 read_data_from_flash_mem(denali, buf, mtd->oobsize);
509 * wait for command to be accepted
510 * can always use status0 bit as the
511 * mask is identical for each bank.
513 irq_status = wait_for_irq(denali, irq_mask);
516 dev_err(denali->dev, "page on OOB timeout %d\n",
520 * We set the device back to MAIN_ACCESS here as I observed
521 * instability with the controller if you do a block erase
522 * and the last transaction was a SPARE_ACCESS. Block erase
523 * is reliable (according to the MTD test infrastructure)
524 * if you are in MAIN_ACCESS.
526 addr = BANK(denali->flash_bank) | denali->page;
527 cmd = MODE_10 | addr;
528 index_addr(denali, cmd, MAIN_ACCESS);
532 static int denali_check_erased_page(struct mtd_info *mtd,
533 struct nand_chip *chip, uint8_t *buf,
534 unsigned long uncor_ecc_flags,
535 unsigned int max_bitflips)
537 uint8_t *ecc_code = chip->buffers->ecccode;
538 int ecc_steps = chip->ecc.steps;
539 int ecc_size = chip->ecc.size;
540 int ecc_bytes = chip->ecc.bytes;
543 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
548 for (i = 0; i < ecc_steps; i++) {
549 if (!(uncor_ecc_flags & BIT(i)))
552 stat = nand_check_erased_ecc_chunk(buf, ecc_size,
557 mtd->ecc_stats.failed++;
559 mtd->ecc_stats.corrected += stat;
560 max_bitflips = max_t(unsigned int, max_bitflips, stat);
564 ecc_code += ecc_bytes;
570 static int denali_hw_ecc_fixup(struct mtd_info *mtd,
571 struct denali_nand_info *denali,
572 unsigned long *uncor_ecc_flags)
574 struct nand_chip *chip = mtd_to_nand(mtd);
575 int bank = denali->flash_bank;
577 unsigned int max_bitflips;
579 ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank));
580 ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
582 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
584 * This flag is set when uncorrectable error occurs at least in
585 * one ECC sector. We can not know "how many sectors", or
586 * "which sector(s)". We need erase-page check for all sectors.
588 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
592 max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
595 * The register holds the maximum of per-sector corrected bitflips.
596 * This is suitable for the return value of the ->read_page() callback.
597 * Unfortunately, we can not know the total number of corrected bits in
598 * the page. Increase the stats by max_bitflips. (compromised solution)
600 mtd->ecc_stats.corrected += max_bitflips;
605 #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
606 #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
607 #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
608 #define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
609 #define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
610 #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
612 static int denali_sw_ecc_fixup(struct mtd_info *mtd,
613 struct denali_nand_info *denali,
614 unsigned long *uncor_ecc_flags, uint8_t *buf)
616 unsigned int ecc_size = denali->nand.ecc.size;
617 unsigned int bitflips = 0;
618 unsigned int max_bitflips = 0;
619 uint32_t err_addr, err_cor_info;
620 unsigned int err_byte, err_sector, err_device;
621 uint8_t err_cor_value;
622 unsigned int prev_sector = 0;
624 /* read the ECC errors. we'll ignore them for now */
625 denali_set_intr_modes(denali, false);
628 err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS);
629 err_sector = ECC_SECTOR(err_addr);
630 err_byte = ECC_BYTE(err_addr);
632 err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO);
633 err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
634 err_device = ECC_ERR_DEVICE(err_cor_info);
636 /* reset the bitflip counter when crossing ECC sector */
637 if (err_sector != prev_sector)
640 if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
642 * Check later if this is a real ECC error, or
645 *uncor_ecc_flags |= BIT(err_sector);
646 } else if (err_byte < ecc_size) {
648 * If err_byte is larger than ecc_size, means error
649 * happened in OOB, so we ignore it. It's no need for
650 * us to correct it err_device is represented the NAND
651 * error bits are happened in if there are more than
652 * one NAND connected.
655 unsigned int flips_in_byte;
657 offset = (err_sector * ecc_size + err_byte) *
658 denali->devnum + err_device;
660 /* correct the ECC error */
661 flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
662 buf[offset] ^= err_cor_value;
663 mtd->ecc_stats.corrected += flips_in_byte;
664 bitflips += flips_in_byte;
666 max_bitflips = max(max_bitflips, bitflips);
669 prev_sector = err_sector;
670 } while (!ECC_LAST_ERR(err_cor_info));
673 * Once handle all ecc errors, controller will trigger a
674 * ECC_TRANSACTION_DONE interrupt, so here just wait for
675 * a while for this interrupt
677 while (!(read_interrupt_status(denali) & INTR__ECC_TRANSACTION_DONE))
679 clear_interrupts(denali);
680 denali_set_intr_modes(denali, true);
685 /* programs the controller to either enable/disable DMA transfers */
686 static void denali_enable_dma(struct denali_nand_info *denali, bool en)
688 iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
689 ioread32(denali->flash_reg + DMA_ENABLE);
692 static void denali_setup_dma64(struct denali_nand_info *denali, int op)
695 const int page_count = 1;
696 uint64_t addr = denali->buf.dma_buf;
698 mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
700 /* DMA is a three step process */
703 * 1. setup transfer type, interrupt when complete,
704 * burst len = 64 bytes, the number of pages
706 index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
708 /* 2. set memory low address */
709 index_addr(denali, mode, addr);
711 /* 3. set memory high address */
712 index_addr(denali, mode, addr >> 32);
715 static void denali_setup_dma32(struct denali_nand_info *denali, int op)
718 const int page_count = 1;
719 uint32_t addr = denali->buf.dma_buf;
721 mode = MODE_10 | BANK(denali->flash_bank);
723 /* DMA is a four step process */
725 /* 1. setup transfer type and # of pages */
726 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
728 /* 2. set memory high address bits 23:8 */
729 index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
731 /* 3. set memory low address bits 23:8 */
732 index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
734 /* 4. interrupt when complete, burst len = 64 bytes */
735 index_addr(denali, mode | 0x14000, 0x2400);
738 static void denali_setup_dma(struct denali_nand_info *denali, int op)
740 if (denali->caps & DENALI_CAP_DMA_64BIT)
741 denali_setup_dma64(denali, op);
743 denali_setup_dma32(denali, op);
747 * writes a page. user specifies type, and this function handles the
748 * configuration details.
750 static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
751 const uint8_t *buf, int page, bool raw_xfer)
753 struct denali_nand_info *denali = mtd_to_denali(mtd);
754 dma_addr_t addr = denali->buf.dma_buf;
755 size_t size = mtd->writesize + mtd->oobsize;
757 uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
763 * if it is a raw xfer, we want to disable ecc and send the spare area.
764 * !raw_xfer - enable ecc
765 * raw_xfer - transfer spare
767 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
769 /* copy buffer into DMA buffer */
770 memcpy(denali->buf.buf, buf, mtd->writesize);
773 /* transfer the data to the spare area */
774 memcpy(denali->buf.buf + mtd->writesize,
779 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
781 clear_interrupts(denali);
782 denali_enable_dma(denali, true);
784 denali_setup_dma(denali, DENALI_WRITE);
786 /* wait for operation to complete */
787 irq_status = wait_for_irq(denali, irq_mask);
789 if (irq_status == 0) {
790 dev_err(denali->dev, "timeout on write_page (type = %d)\n",
795 denali_enable_dma(denali, false);
796 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
801 /* NAND core entry points */
804 * this is the callback that the NAND core calls to write a page. Since
805 * writing a page with ECC or without is similar, all the work is done
806 * by write_page above.
808 static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
809 const uint8_t *buf, int oob_required, int page)
812 * for regular page writes, we let HW handle all the ECC
813 * data written to the device.
815 return write_page(mtd, chip, buf, page, false);
819 * This is the callback that the NAND core calls to write a page without ECC.
820 * raw access is similar to ECC page writes, so all the work is done in the
821 * write_page() function above.
823 static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
824 const uint8_t *buf, int oob_required,
828 * for raw page writes, we want to disable ECC and simply write
829 * whatever data is in the buffer.
831 return write_page(mtd, chip, buf, page, true);
834 static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
837 return write_oob_data(mtd, chip->oob_poi, page);
840 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
843 read_oob_data(mtd, chip->oob_poi, page);
848 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
849 uint8_t *buf, int oob_required, int page)
851 struct denali_nand_info *denali = mtd_to_denali(mtd);
852 dma_addr_t addr = denali->buf.dma_buf;
853 size_t size = mtd->writesize + mtd->oobsize;
855 uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ?
856 INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR :
857 INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR;
858 unsigned long uncor_ecc_flags = 0;
863 setup_ecc_for_xfer(denali, true, false);
865 denali_enable_dma(denali, true);
866 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
868 clear_interrupts(denali);
869 denali_setup_dma(denali, DENALI_READ);
871 /* wait for operation to complete */
872 irq_status = wait_for_irq(denali, irq_mask);
874 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
876 memcpy(buf, denali->buf.buf, mtd->writesize);
878 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
879 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
880 else if (irq_status & INTR__ECC_ERR)
881 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
882 denali_enable_dma(denali, false);
887 if (uncor_ecc_flags) {
888 read_oob_data(mtd, chip->oob_poi, denali->page);
890 stat = denali_check_erased_page(mtd, chip, buf,
891 uncor_ecc_flags, stat);
897 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
898 uint8_t *buf, int oob_required, int page)
900 struct denali_nand_info *denali = mtd_to_denali(mtd);
901 dma_addr_t addr = denali->buf.dma_buf;
902 size_t size = mtd->writesize + mtd->oobsize;
903 uint32_t irq_mask = INTR__DMA_CMD_COMP;
907 setup_ecc_for_xfer(denali, false, true);
908 denali_enable_dma(denali, true);
910 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
912 clear_interrupts(denali);
913 denali_setup_dma(denali, DENALI_READ);
915 /* wait for operation to complete */
916 wait_for_irq(denali, irq_mask);
918 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
920 denali_enable_dma(denali, false);
922 memcpy(buf, denali->buf.buf, mtd->writesize);
923 memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
928 static uint8_t denali_read_byte(struct mtd_info *mtd)
930 struct denali_nand_info *denali = mtd_to_denali(mtd);
931 uint8_t result = 0xff;
933 if (denali->buf.head < denali->buf.tail)
934 result = denali->buf.buf[denali->buf.head++];
939 static void denali_select_chip(struct mtd_info *mtd, int chip)
941 struct denali_nand_info *denali = mtd_to_denali(mtd);
943 spin_lock_irq(&denali->irq_lock);
944 denali->flash_bank = chip;
945 spin_unlock_irq(&denali->irq_lock);
948 static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
953 static int denali_erase(struct mtd_info *mtd, int page)
955 struct denali_nand_info *denali = mtd_to_denali(mtd);
957 uint32_t cmd, irq_status;
959 clear_interrupts(denali);
961 /* setup page read request for access type */
962 cmd = MODE_10 | BANK(denali->flash_bank) | page;
963 index_addr(denali, cmd, 0x1);
965 /* wait for erase to complete or failure to occur */
966 irq_status = wait_for_irq(denali, INTR__ERASE_COMP | INTR__ERASE_FAIL);
968 return irq_status & INTR__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
971 static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
974 struct denali_nand_info *denali = mtd_to_denali(mtd);
979 case NAND_CMD_STATUS:
982 case NAND_CMD_READID:
986 * sometimes ManufactureId read from register is not right
987 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
988 * So here we send READID cmd to NAND insteand
990 addr = MODE_11 | BANK(denali->flash_bank);
991 index_addr(denali, addr | 0, 0x90);
992 index_addr(denali, addr | 1, col);
993 for (i = 0; i < 8; i++) {
994 index_addr_read_data(denali, addr | 2, &id);
995 write_byte_to_buf(denali, id);
1001 case NAND_CMD_READOOB:
1002 /* TODO: Read OOB data */
1005 pr_err(": unsupported command received 0x%x\n", cmd);
1010 #define DIV_ROUND_DOWN_ULL(ll, d) \
1011 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
1013 static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
1014 const struct nand_data_interface *conf)
1016 struct denali_nand_info *denali = mtd_to_denali(mtd);
1017 const struct nand_sdr_timings *timings;
1018 unsigned long t_clk;
1019 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
1020 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
1021 int addr_2_data_mask;
1024 timings = nand_get_sdr_timings(conf);
1025 if (IS_ERR(timings))
1026 return PTR_ERR(timings);
1028 /* clk_x period in picoseconds */
1029 t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
1033 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
1036 /* tREA -> ACC_CLKS */
1037 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk);
1038 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
1040 tmp = ioread32(denali->flash_reg + ACC_CLKS);
1041 tmp &= ~ACC_CLKS__VALUE;
1043 iowrite32(tmp, denali->flash_reg + ACC_CLKS);
1045 /* tRWH -> RE_2_WE */
1046 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk);
1047 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
1049 tmp = ioread32(denali->flash_reg + RE_2_WE);
1050 tmp &= ~RE_2_WE__VALUE;
1052 iowrite32(tmp, denali->flash_reg + RE_2_WE);
1054 /* tRHZ -> RE_2_RE */
1055 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk);
1056 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
1058 tmp = ioread32(denali->flash_reg + RE_2_RE);
1059 tmp &= ~RE_2_RE__VALUE;
1061 iowrite32(tmp, denali->flash_reg + RE_2_RE);
1063 /* tWHR -> WE_2_RE */
1064 we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
1065 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
1067 tmp = ioread32(denali->flash_reg + TWHR2_AND_WE_2_RE);
1068 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
1070 iowrite32(tmp, denali->flash_reg + TWHR2_AND_WE_2_RE);
1072 /* tADL -> ADDR_2_DATA */
1074 /* for older versions, ADDR_2_DATA is only 6 bit wide */
1075 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
1076 if (denali->revision < 0x0501)
1077 addr_2_data_mask >>= 1;
1079 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk);
1080 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
1082 tmp = ioread32(denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1083 tmp &= ~addr_2_data_mask;
1085 iowrite32(tmp, denali->flash_reg + TCWAW_AND_ADDR_2_DATA);
1087 /* tREH, tWH -> RDWR_EN_HI_CNT */
1088 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
1090 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
1092 tmp = ioread32(denali->flash_reg + RDWR_EN_HI_CNT);
1093 tmp &= ~RDWR_EN_HI_CNT__VALUE;
1095 iowrite32(tmp, denali->flash_reg + RDWR_EN_HI_CNT);
1097 /* tRP, tWP -> RDWR_EN_LO_CNT */
1098 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min),
1100 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
1102 rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT);
1103 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
1104 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
1106 tmp = ioread32(denali->flash_reg + RDWR_EN_LO_CNT);
1107 tmp &= ~RDWR_EN_LO_CNT__VALUE;
1109 iowrite32(tmp, denali->flash_reg + RDWR_EN_LO_CNT);
1111 /* tCS, tCEA -> CS_SETUP_CNT */
1112 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo,
1113 (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks,
1115 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
1117 tmp = ioread32(denali->flash_reg + CS_SETUP_CNT);
1118 tmp &= ~CS_SETUP_CNT__VALUE;
1120 iowrite32(tmp, denali->flash_reg + CS_SETUP_CNT);
1125 /* Initialization code to bring the device up to a known good state */
1126 static void denali_hw_init(struct denali_nand_info *denali)
1129 * The REVISION register may not be reliable. Platforms are allowed to
1132 if (!denali->revision)
1134 swab16(ioread32(denali->flash_reg + REVISION));
1137 * tell driver how many bit controller will skip before
1138 * writing ECC code in OOB, this register may be already
1139 * set by firmware. So we read this value out.
1140 * if this value is 0, just let it be.
1142 denali->bbtskipbytes = ioread32(denali->flash_reg +
1143 SPARE_AREA_SKIP_BYTES);
1144 detect_max_banks(denali);
1145 denali_nand_reset(denali);
1146 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1147 iowrite32(CHIP_EN_DONT_CARE__FLAG,
1148 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1150 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1152 /* Should set value for these registers when init */
1153 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1154 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1155 denali_irq_init(denali);
1158 int denali_calc_ecc_bytes(int step_size, int strength)
1160 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
1161 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
1163 EXPORT_SYMBOL(denali_calc_ecc_bytes);
1165 static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
1166 struct denali_nand_info *denali)
1168 int oobavail = mtd->oobsize - denali->bbtskipbytes;
1172 * If .size and .strength are already set (usually by DT),
1173 * check if they are supported by this controller.
1175 if (chip->ecc.size && chip->ecc.strength)
1176 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
1179 * We want .size and .strength closest to the chip's requirement
1180 * unless NAND_ECC_MAXIMIZE is requested.
1182 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
1183 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
1188 /* Max ECC strength is the last thing we can do */
1189 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
1192 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
1193 struct mtd_oob_region *oobregion)
1195 struct denali_nand_info *denali = mtd_to_denali(mtd);
1196 struct nand_chip *chip = mtd_to_nand(mtd);
1201 oobregion->offset = denali->bbtskipbytes;
1202 oobregion->length = chip->ecc.total;
1207 static int denali_ooblayout_free(struct mtd_info *mtd, int section,
1208 struct mtd_oob_region *oobregion)
1210 struct denali_nand_info *denali = mtd_to_denali(mtd);
1211 struct nand_chip *chip = mtd_to_nand(mtd);
1216 oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
1217 oobregion->length = mtd->oobsize - oobregion->offset;
1222 static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
1223 .ecc = denali_ooblayout_ecc,
1224 .free = denali_ooblayout_free,
1227 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1228 static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1230 static struct nand_bbt_descr bbt_main_descr = {
1231 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1232 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1237 .pattern = bbt_pattern,
1240 static struct nand_bbt_descr bbt_mirror_descr = {
1241 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1242 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1247 .pattern = mirror_pattern,
1250 /* initialize driver data structures */
1251 static void denali_drv_init(struct denali_nand_info *denali)
1254 * the completion object will be used to notify
1255 * the callee that the interrupt is done
1257 init_completion(&denali->complete);
1260 * the spinlock will be used to synchronize the ISR with any
1261 * element that might be access shared data (interrupt status)
1263 spin_lock_init(&denali->irq_lock);
1265 /* indicate that MTD has not selected a valid bank yet */
1266 denali->flash_bank = CHIP_SELECT_INVALID;
1268 /* initialize our irq_status variable to indicate no interrupts */
1269 denali->irq_status = 0;
1272 static int denali_multidev_fixup(struct denali_nand_info *denali)
1274 struct nand_chip *chip = &denali->nand;
1275 struct mtd_info *mtd = nand_to_mtd(chip);
1278 * Support for multi device:
1279 * When the IP configuration is x16 capable and two x8 chips are
1280 * connected in parallel, DEVICES_CONNECTED should be set to 2.
1281 * In this case, the core framework knows nothing about this fact,
1282 * so we should tell it the _logical_ pagesize and anything necessary.
1284 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1287 * On some SoCs, DEVICES_CONNECTED is not auto-detected.
1288 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
1290 if (denali->devnum == 0) {
1292 iowrite32(1, denali->flash_reg + DEVICES_CONNECTED);
1295 if (denali->devnum == 1)
1298 if (denali->devnum != 2) {
1299 dev_err(denali->dev, "unsupported number of devices %d\n",
1304 /* 2 chips in parallel */
1306 mtd->erasesize <<= 1;
1307 mtd->writesize <<= 1;
1309 chip->chipsize <<= 1;
1310 chip->page_shift += 1;
1311 chip->phys_erase_shift += 1;
1312 chip->bbt_erase_shift += 1;
1313 chip->chip_shift += 1;
1314 chip->pagemask <<= 1;
1315 chip->ecc.size <<= 1;
1316 chip->ecc.bytes <<= 1;
1317 chip->ecc.strength <<= 1;
1318 denali->bbtskipbytes <<= 1;
1323 int denali_init(struct denali_nand_info *denali)
1325 struct nand_chip *chip = &denali->nand;
1326 struct mtd_info *mtd = nand_to_mtd(chip);
1329 /* allocate a temporary buffer for nand_scan_ident() */
1330 denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
1331 GFP_DMA | GFP_KERNEL);
1332 if (!denali->buf.buf)
1335 mtd->dev.parent = denali->dev;
1336 denali_hw_init(denali);
1337 denali_drv_init(denali);
1339 /* Request IRQ after all the hardware initialization is finished */
1340 ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1341 IRQF_SHARED, DENALI_NAND_NAME, denali);
1343 dev_err(denali->dev, "Unable to request IRQ\n");
1347 /* now that our ISR is registered, we can enable interrupts */
1348 denali_set_intr_modes(denali, true);
1349 nand_set_flash_node(chip, denali->dev->of_node);
1350 /* Fallback to the default name if DT did not give "label" property */
1352 mtd->name = "denali-nand";
1354 /* register the driver with the NAND core subsystem */
1355 chip->select_chip = denali_select_chip;
1356 chip->cmdfunc = denali_cmdfunc;
1357 chip->read_byte = denali_read_byte;
1358 chip->waitfunc = denali_waitfunc;
1359 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
1360 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
1362 /* clk rate info is needed for setup_data_interface */
1363 if (denali->clk_x_rate)
1364 chip->setup_data_interface = denali_setup_data_interface;
1367 * scan for NAND devices attached to the controller
1368 * this is the first stage in a two step process to register
1369 * with the nand subsystem
1371 ret = nand_scan_ident(mtd, denali->max_banks, NULL);
1373 goto failed_req_irq;
1375 /* allocate the right size buffer now */
1376 devm_kfree(denali->dev, denali->buf.buf);
1377 denali->buf.buf = devm_kzalloc(denali->dev,
1378 mtd->writesize + mtd->oobsize,
1380 if (!denali->buf.buf) {
1382 goto failed_req_irq;
1385 ret = dma_set_mask(denali->dev,
1386 DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ?
1389 dev_err(denali->dev, "No usable DMA configuration\n");
1390 goto failed_req_irq;
1393 denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
1394 mtd->writesize + mtd->oobsize,
1396 if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
1397 dev_err(denali->dev, "Failed to map DMA buffer\n");
1399 goto failed_req_irq;
1403 * second stage of the NAND scan
1404 * this stage requires information regarding ECC and
1405 * bad block management.
1408 /* Bad block management */
1409 chip->bbt_td = &bbt_main_descr;
1410 chip->bbt_md = &bbt_mirror_descr;
1412 /* skip the scan for now until we have OOB read and write support */
1413 chip->bbt_options |= NAND_BBT_USE_FLASH;
1414 chip->options |= NAND_SKIP_BBTSCAN;
1415 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1417 /* no subpage writes on denali */
1418 chip->options |= NAND_NO_SUBPAGE_WRITE;
1420 ret = denali_ecc_setup(mtd, chip, denali);
1422 dev_err(denali->dev, "Failed to setup ECC settings.\n");
1423 goto failed_req_irq;
1426 dev_dbg(denali->dev,
1427 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1428 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1430 iowrite32(chip->ecc.strength, denali->flash_reg + ECC_CORRECTION);
1431 iowrite32(mtd->erasesize / mtd->writesize,
1432 denali->flash_reg + PAGES_PER_BLOCK);
1433 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
1434 denali->flash_reg + DEVICE_WIDTH);
1435 iowrite32(mtd->writesize, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
1436 iowrite32(mtd->oobsize, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
1438 iowrite32(chip->ecc.size, denali->flash_reg + CFG_DATA_BLOCK_SIZE);
1439 iowrite32(chip->ecc.size, denali->flash_reg + CFG_LAST_DATA_BLOCK_SIZE);
1440 /* chip->ecc.steps is set by nand_scan_tail(); not available here */
1441 iowrite32(mtd->writesize / chip->ecc.size,
1442 denali->flash_reg + CFG_NUM_DATA_BLOCKS);
1444 mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1446 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1447 chip->ecc.read_page = denali_read_page;
1448 chip->ecc.read_page_raw = denali_read_page_raw;
1449 chip->ecc.write_page = denali_write_page;
1450 chip->ecc.write_page_raw = denali_write_page_raw;
1451 chip->ecc.read_oob = denali_read_oob;
1452 chip->ecc.write_oob = denali_write_oob;
1453 chip->erase = denali_erase;
1455 ret = denali_multidev_fixup(denali);
1457 goto failed_req_irq;
1459 ret = nand_scan_tail(mtd);
1461 goto failed_req_irq;
1463 ret = mtd_device_register(mtd, NULL, 0);
1465 dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
1466 goto failed_req_irq;
1471 denali_irq_cleanup(denali->irq, denali);
1475 EXPORT_SYMBOL(denali_init);
1477 /* driver exit point */
1478 void denali_remove(struct denali_nand_info *denali)
1480 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
1482 * Pre-compute DMA buffer size to avoid any problems in case
1483 * nand_release() ever changes in a way that mtd->writesize and
1484 * mtd->oobsize are not reliable after this call.
1486 int bufsize = mtd->writesize + mtd->oobsize;
1489 denali_irq_cleanup(denali->irq, denali);
1490 dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
1493 EXPORT_SYMBOL(denali_remove);