3 * This is the generic MTD driver for NAND flash devices. It should be
4 * capable of working with almost all NAND chips currently available.
6 * Additional technical information is available on
7 * http://www.linux-mtd.infradead.org/doc/nand.html
9 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
13 * David Woodhouse for adding multichip support
15 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16 * rework for 2K page size chips
19 * Enable cached programming for 2k page size chips
20 * Check, if mtd->ecctype should be set to MTD_ECC_HW
21 * if we have HW ECC support.
22 * BBT table is not serialized, has to be fixed
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/delay.h>
34 #include <linux/errno.h>
35 #include <linux/err.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
39 #include <linux/nmi.h>
40 #include <linux/types.h>
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/nand.h>
43 #include <linux/mtd/nand_ecc.h>
44 #include <linux/mtd/nand_bch.h>
45 #include <linux/interrupt.h>
46 #include <linux/bitops.h>
48 #include <linux/mtd/partitions.h>
51 static int nand_get_device(struct mtd_info *mtd, int new_state);
53 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
54 struct mtd_oob_ops *ops);
56 /* Define default oob placement schemes for large and small page devices */
57 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
58 struct mtd_oob_region *oobregion)
60 struct nand_chip *chip = mtd_to_nand(mtd);
61 struct nand_ecc_ctrl *ecc = &chip->ecc;
67 oobregion->offset = 0;
68 oobregion->length = 4;
70 oobregion->offset = 6;
71 oobregion->length = ecc->total - 4;
77 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
78 struct mtd_oob_region *oobregion)
83 if (mtd->oobsize == 16) {
87 oobregion->length = 8;
88 oobregion->offset = 8;
90 oobregion->length = 2;
92 oobregion->offset = 3;
94 oobregion->offset = 6;
100 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
101 .ecc = nand_ooblayout_ecc_sp,
102 .free = nand_ooblayout_free_sp,
104 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
106 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
107 struct mtd_oob_region *oobregion)
109 struct nand_chip *chip = mtd_to_nand(mtd);
110 struct nand_ecc_ctrl *ecc = &chip->ecc;
115 oobregion->length = ecc->total;
116 oobregion->offset = mtd->oobsize - oobregion->length;
121 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
122 struct mtd_oob_region *oobregion)
124 struct nand_chip *chip = mtd_to_nand(mtd);
125 struct nand_ecc_ctrl *ecc = &chip->ecc;
130 oobregion->length = mtd->oobsize - ecc->total - 2;
131 oobregion->offset = 2;
136 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
137 .ecc = nand_ooblayout_ecc_lp,
138 .free = nand_ooblayout_free_lp,
140 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
143 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
144 * are placed at a fixed offset.
146 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
147 struct mtd_oob_region *oobregion)
149 struct nand_chip *chip = mtd_to_nand(mtd);
150 struct nand_ecc_ctrl *ecc = &chip->ecc;
155 switch (mtd->oobsize) {
157 oobregion->offset = 40;
160 oobregion->offset = 80;
166 oobregion->length = ecc->total;
167 if (oobregion->offset + oobregion->length > mtd->oobsize)
173 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
174 struct mtd_oob_region *oobregion)
176 struct nand_chip *chip = mtd_to_nand(mtd);
177 struct nand_ecc_ctrl *ecc = &chip->ecc;
180 if (section < 0 || section > 1)
183 switch (mtd->oobsize) {
195 oobregion->offset = 2;
196 oobregion->length = ecc_offset - 2;
198 oobregion->offset = ecc_offset + ecc->total;
199 oobregion->length = mtd->oobsize - oobregion->offset;
205 const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
206 .ecc = nand_ooblayout_ecc_lp_hamming,
207 .free = nand_ooblayout_free_lp_hamming,
210 static int check_offs_len(struct mtd_info *mtd,
211 loff_t ofs, uint64_t len)
213 struct nand_chip *chip = mtd_to_nand(mtd);
216 /* Start address must align on block boundary */
217 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
218 pr_debug("%s: unaligned address\n", __func__);
222 /* Length must align on block boundary */
223 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
224 pr_debug("%s: length not block aligned\n", __func__);
232 * nand_release_device - [GENERIC] release chip
233 * @mtd: MTD device structure
235 * Release chip lock and wake up anyone waiting on the device.
237 static void nand_release_device(struct mtd_info *mtd)
239 struct nand_chip *chip = mtd_to_nand(mtd);
241 /* Release the controller and the chip */
242 spin_lock(&chip->controller->lock);
243 chip->controller->active = NULL;
244 chip->state = FL_READY;
245 wake_up(&chip->controller->wq);
246 spin_unlock(&chip->controller->lock);
250 * nand_read_byte - [DEFAULT] read one byte from the chip
251 * @mtd: MTD device structure
253 * Default read function for 8bit buswidth
255 static uint8_t nand_read_byte(struct mtd_info *mtd)
257 struct nand_chip *chip = mtd_to_nand(mtd);
258 return readb(chip->IO_ADDR_R);
262 * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
263 * @mtd: MTD device structure
265 * Default read function for 16bit buswidth with endianness conversion.
268 static uint8_t nand_read_byte16(struct mtd_info *mtd)
270 struct nand_chip *chip = mtd_to_nand(mtd);
271 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
275 * nand_read_word - [DEFAULT] read one word from the chip
276 * @mtd: MTD device structure
278 * Default read function for 16bit buswidth without endianness conversion.
280 static u16 nand_read_word(struct mtd_info *mtd)
282 struct nand_chip *chip = mtd_to_nand(mtd);
283 return readw(chip->IO_ADDR_R);
287 * nand_select_chip - [DEFAULT] control CE line
288 * @mtd: MTD device structure
289 * @chipnr: chipnumber to select, -1 for deselect
291 * Default select function for 1 chip devices.
293 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
295 struct nand_chip *chip = mtd_to_nand(mtd);
299 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
310 * nand_write_byte - [DEFAULT] write single byte to chip
311 * @mtd: MTD device structure
312 * @byte: value to write
314 * Default function to write a byte to I/O[7:0]
316 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
318 struct nand_chip *chip = mtd_to_nand(mtd);
320 chip->write_buf(mtd, &byte, 1);
324 * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
325 * @mtd: MTD device structure
326 * @byte: value to write
328 * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
330 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
332 struct nand_chip *chip = mtd_to_nand(mtd);
333 uint16_t word = byte;
336 * It's not entirely clear what should happen to I/O[15:8] when writing
337 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
339 * When the host supports a 16-bit bus width, only data is
340 * transferred at the 16-bit width. All address and command line
341 * transfers shall use only the lower 8-bits of the data bus. During
342 * command transfers, the host may place any value on the upper
343 * 8-bits of the data bus. During address transfers, the host shall
344 * set the upper 8-bits of the data bus to 00h.
346 * One user of the write_byte callback is nand_onfi_set_features. The
347 * four parameters are specified to be written to I/O[7:0], but this is
348 * neither an address nor a command transfer. Let's assume a 0 on the
349 * upper I/O lines is OK.
351 chip->write_buf(mtd, (uint8_t *)&word, 2);
355 * nand_write_buf - [DEFAULT] write buffer to chip
356 * @mtd: MTD device structure
358 * @len: number of bytes to write
360 * Default write function for 8bit buswidth.
362 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
364 struct nand_chip *chip = mtd_to_nand(mtd);
366 iowrite8_rep(chip->IO_ADDR_W, buf, len);
370 * nand_read_buf - [DEFAULT] read chip data into buffer
371 * @mtd: MTD device structure
372 * @buf: buffer to store date
373 * @len: number of bytes to read
375 * Default read function for 8bit buswidth.
377 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
379 struct nand_chip *chip = mtd_to_nand(mtd);
381 ioread8_rep(chip->IO_ADDR_R, buf, len);
385 * nand_write_buf16 - [DEFAULT] write buffer to chip
386 * @mtd: MTD device structure
388 * @len: number of bytes to write
390 * Default write function for 16bit buswidth.
392 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
394 struct nand_chip *chip = mtd_to_nand(mtd);
395 u16 *p = (u16 *) buf;
397 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
401 * nand_read_buf16 - [DEFAULT] read chip data into buffer
402 * @mtd: MTD device structure
403 * @buf: buffer to store date
404 * @len: number of bytes to read
406 * Default read function for 16bit buswidth.
408 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
410 struct nand_chip *chip = mtd_to_nand(mtd);
411 u16 *p = (u16 *) buf;
413 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
417 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
418 * @mtd: MTD device structure
419 * @ofs: offset from device start
421 * Check, if the block is bad.
423 static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
425 int page, page_end, res;
426 struct nand_chip *chip = mtd_to_nand(mtd);
429 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
430 ofs += mtd->erasesize - mtd->writesize;
432 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
433 page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
435 for (; page < page_end; page++) {
436 res = chip->ecc.read_oob(mtd, chip, page);
440 bad = chip->oob_poi[chip->badblockpos];
442 if (likely(chip->badblockbits == 8))
445 res = hweight8(bad) < chip->badblockbits;
454 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
455 * @mtd: MTD device structure
456 * @ofs: offset from device start
458 * This is the default implementation, which can be overridden by a hardware
459 * specific driver. It provides the details for writing a bad block marker to a
462 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
464 struct nand_chip *chip = mtd_to_nand(mtd);
465 struct mtd_oob_ops ops;
466 uint8_t buf[2] = { 0, 0 };
467 int ret = 0, res, i = 0;
469 memset(&ops, 0, sizeof(ops));
471 ops.ooboffs = chip->badblockpos;
472 if (chip->options & NAND_BUSWIDTH_16) {
473 ops.ooboffs &= ~0x01;
474 ops.len = ops.ooblen = 2;
476 ops.len = ops.ooblen = 1;
478 ops.mode = MTD_OPS_PLACE_OOB;
480 /* Write to first/last page(s) if necessary */
481 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
482 ofs += mtd->erasesize - mtd->writesize;
484 res = nand_do_write_oob(mtd, ofs, &ops);
489 ofs += mtd->writesize;
490 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
496 * nand_block_markbad_lowlevel - mark a block bad
497 * @mtd: MTD device structure
498 * @ofs: offset from device start
500 * This function performs the generic NAND bad block marking steps (i.e., bad
501 * block table(s) and/or marker(s)). We only allow the hardware driver to
502 * specify how to write bad block markers to OOB (chip->block_markbad).
504 * We try operations in the following order:
505 * (1) erase the affected block, to allow OOB marker to be written cleanly
506 * (2) write bad block marker to OOB area of affected block (unless flag
507 * NAND_BBT_NO_OOB_BBM is present)
509 * Note that we retain the first error encountered in (2) or (3), finish the
510 * procedures, and dump the error in the end.
512 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
514 struct nand_chip *chip = mtd_to_nand(mtd);
517 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
518 struct erase_info einfo;
520 /* Attempt erase before marking OOB */
521 memset(&einfo, 0, sizeof(einfo));
524 einfo.len = 1ULL << chip->phys_erase_shift;
525 nand_erase_nand(mtd, &einfo, 0);
527 /* Write bad block marker to OOB */
528 nand_get_device(mtd, FL_WRITING);
529 ret = chip->block_markbad(mtd, ofs);
530 nand_release_device(mtd);
533 /* Mark block bad in BBT */
535 res = nand_markbad_bbt(mtd, ofs);
541 mtd->ecc_stats.badblocks++;
547 * nand_check_wp - [GENERIC] check if the chip is write protected
548 * @mtd: MTD device structure
550 * Check, if the device is write protected. The function expects, that the
551 * device is already selected.
553 static int nand_check_wp(struct mtd_info *mtd)
555 struct nand_chip *chip = mtd_to_nand(mtd);
557 /* Broken xD cards report WP despite being writable */
558 if (chip->options & NAND_BROKEN_XD)
561 /* Check the WP bit */
562 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
563 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
567 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
568 * @mtd: MTD device structure
569 * @ofs: offset from device start
571 * Check if the block is marked as reserved.
573 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
575 struct nand_chip *chip = mtd_to_nand(mtd);
579 /* Return info from the table */
580 return nand_isreserved_bbt(mtd, ofs);
584 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
585 * @mtd: MTD device structure
586 * @ofs: offset from device start
587 * @allowbbt: 1, if its allowed to access the bbt area
589 * Check, if the block is bad. Either by reading the bad block table or
590 * calling of the scan function.
592 static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
594 struct nand_chip *chip = mtd_to_nand(mtd);
597 return chip->block_bad(mtd, ofs);
599 /* Return info from the table */
600 return nand_isbad_bbt(mtd, ofs, allowbbt);
604 * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
605 * @mtd: MTD device structure
608 * Helper function for nand_wait_ready used when needing to wait in interrupt
611 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
613 struct nand_chip *chip = mtd_to_nand(mtd);
616 /* Wait for the device to get ready */
617 for (i = 0; i < timeo; i++) {
618 if (chip->dev_ready(mtd))
620 touch_softlockup_watchdog();
626 * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
627 * @mtd: MTD device structure
629 * Wait for the ready pin after a command, and warn if a timeout occurs.
631 void nand_wait_ready(struct mtd_info *mtd)
633 struct nand_chip *chip = mtd_to_nand(mtd);
634 unsigned long timeo = 400;
636 if (in_interrupt() || oops_in_progress)
637 return panic_nand_wait_ready(mtd, timeo);
639 /* Wait until command is processed or timeout occurs */
640 timeo = jiffies + msecs_to_jiffies(timeo);
642 if (chip->dev_ready(mtd))
645 } while (time_before(jiffies, timeo));
647 if (!chip->dev_ready(mtd))
648 pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
650 EXPORT_SYMBOL_GPL(nand_wait_ready);
653 * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
654 * @mtd: MTD device structure
655 * @timeo: Timeout in ms
657 * Wait for status ready (i.e. command done) or timeout.
659 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
661 register struct nand_chip *chip = mtd_to_nand(mtd);
663 timeo = jiffies + msecs_to_jiffies(timeo);
665 if ((chip->read_byte(mtd) & NAND_STATUS_READY))
667 touch_softlockup_watchdog();
668 } while (time_before(jiffies, timeo));
672 * nand_command - [DEFAULT] Send command to NAND device
673 * @mtd: MTD device structure
674 * @command: the command to be sent
675 * @column: the column address for this command, -1 if none
676 * @page_addr: the page address for this command, -1 if none
678 * Send command to NAND device. This function is used for small page devices
679 * (512 Bytes per page).
681 static void nand_command(struct mtd_info *mtd, unsigned int command,
682 int column, int page_addr)
684 register struct nand_chip *chip = mtd_to_nand(mtd);
685 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
687 /* Write out the command to the device */
688 if (command == NAND_CMD_SEQIN) {
691 if (column >= mtd->writesize) {
693 column -= mtd->writesize;
694 readcmd = NAND_CMD_READOOB;
695 } else if (column < 256) {
696 /* First 256 bytes --> READ0 */
697 readcmd = NAND_CMD_READ0;
700 readcmd = NAND_CMD_READ1;
702 chip->cmd_ctrl(mtd, readcmd, ctrl);
703 ctrl &= ~NAND_CTRL_CHANGE;
705 chip->cmd_ctrl(mtd, command, ctrl);
707 /* Address cycle, when necessary */
708 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
709 /* Serially input address */
711 /* Adjust columns for 16 bit buswidth */
712 if (chip->options & NAND_BUSWIDTH_16 &&
713 !nand_opcode_8bits(command))
715 chip->cmd_ctrl(mtd, column, ctrl);
716 ctrl &= ~NAND_CTRL_CHANGE;
718 if (page_addr != -1) {
719 chip->cmd_ctrl(mtd, page_addr, ctrl);
720 ctrl &= ~NAND_CTRL_CHANGE;
721 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
722 /* One more address cycle for devices > 32MiB */
723 if (chip->chipsize > (32 << 20))
724 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
726 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
729 * Program and erase have their own busy handlers status and sequential
734 case NAND_CMD_PAGEPROG:
735 case NAND_CMD_ERASE1:
736 case NAND_CMD_ERASE2:
738 case NAND_CMD_STATUS:
739 case NAND_CMD_READID:
740 case NAND_CMD_SET_FEATURES:
746 udelay(chip->chip_delay);
747 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
748 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
750 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
751 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
752 nand_wait_status_ready(mtd, 250);
755 /* This applies to read commands */
758 * If we don't have access to the busy pin, we apply the given
761 if (!chip->dev_ready) {
762 udelay(chip->chip_delay);
767 * Apply this short delay always to ensure that we do wait tWB in
768 * any case on any machine.
772 nand_wait_ready(mtd);
775 static void nand_ccs_delay(struct nand_chip *chip)
778 * The controller already takes care of waiting for tCCS when the RNDIN
779 * or RNDOUT command is sent, return directly.
781 if (!(chip->options & NAND_WAIT_TCCS))
785 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
786 * (which should be safe for all NANDs).
788 if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
789 ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
795 * nand_command_lp - [DEFAULT] Send command to NAND large page device
796 * @mtd: MTD device structure
797 * @command: the command to be sent
798 * @column: the column address for this command, -1 if none
799 * @page_addr: the page address for this command, -1 if none
801 * Send command to NAND device. This is the version for the new large page
802 * devices. We don't have the separate regions as we have in the small page
803 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
805 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
806 int column, int page_addr)
808 register struct nand_chip *chip = mtd_to_nand(mtd);
810 /* Emulate NAND_CMD_READOOB */
811 if (command == NAND_CMD_READOOB) {
812 column += mtd->writesize;
813 command = NAND_CMD_READ0;
816 /* Command latch cycle */
817 chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
819 if (column != -1 || page_addr != -1) {
820 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
822 /* Serially input address */
824 /* Adjust columns for 16 bit buswidth */
825 if (chip->options & NAND_BUSWIDTH_16 &&
826 !nand_opcode_8bits(command))
828 chip->cmd_ctrl(mtd, column, ctrl);
829 ctrl &= ~NAND_CTRL_CHANGE;
831 /* Only output a single addr cycle for 8bits opcodes. */
832 if (!nand_opcode_8bits(command))
833 chip->cmd_ctrl(mtd, column >> 8, ctrl);
835 if (page_addr != -1) {
836 chip->cmd_ctrl(mtd, page_addr, ctrl);
837 chip->cmd_ctrl(mtd, page_addr >> 8,
838 NAND_NCE | NAND_ALE);
839 /* One more address cycle for devices > 128MiB */
840 if (chip->chipsize > (128 << 20))
841 chip->cmd_ctrl(mtd, page_addr >> 16,
842 NAND_NCE | NAND_ALE);
845 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
848 * Program and erase have their own busy handlers status, sequential
849 * in and status need no delay.
853 case NAND_CMD_CACHEDPROG:
854 case NAND_CMD_PAGEPROG:
855 case NAND_CMD_ERASE1:
856 case NAND_CMD_ERASE2:
858 case NAND_CMD_STATUS:
859 case NAND_CMD_READID:
860 case NAND_CMD_SET_FEATURES:
864 nand_ccs_delay(chip);
870 udelay(chip->chip_delay);
871 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
872 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
873 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
874 NAND_NCE | NAND_CTRL_CHANGE);
875 /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
876 nand_wait_status_ready(mtd, 250);
879 case NAND_CMD_RNDOUT:
880 /* No ready / busy check necessary */
881 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
882 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
883 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
884 NAND_NCE | NAND_CTRL_CHANGE);
886 nand_ccs_delay(chip);
890 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
891 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
892 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
893 NAND_NCE | NAND_CTRL_CHANGE);
895 /* This applies to read commands */
898 * If we don't have access to the busy pin, we apply the given
901 if (!chip->dev_ready) {
902 udelay(chip->chip_delay);
908 * Apply this short delay always to ensure that we do wait tWB in
909 * any case on any machine.
913 nand_wait_ready(mtd);
917 * panic_nand_get_device - [GENERIC] Get chip for selected access
918 * @chip: the nand chip descriptor
919 * @mtd: MTD device structure
920 * @new_state: the state which is requested
922 * Used when in panic, no locks are taken.
924 static void panic_nand_get_device(struct nand_chip *chip,
925 struct mtd_info *mtd, int new_state)
927 /* Hardware controller shared among independent devices */
928 chip->controller->active = chip;
929 chip->state = new_state;
933 * nand_get_device - [GENERIC] Get chip for selected access
934 * @mtd: MTD device structure
935 * @new_state: the state which is requested
937 * Get the device and lock it for exclusive access
940 nand_get_device(struct mtd_info *mtd, int new_state)
942 struct nand_chip *chip = mtd_to_nand(mtd);
943 spinlock_t *lock = &chip->controller->lock;
944 wait_queue_head_t *wq = &chip->controller->wq;
945 DECLARE_WAITQUEUE(wait, current);
949 /* Hardware controller shared among independent devices */
950 if (!chip->controller->active)
951 chip->controller->active = chip;
953 if (chip->controller->active == chip && chip->state == FL_READY) {
954 chip->state = new_state;
958 if (new_state == FL_PM_SUSPENDED) {
959 if (chip->controller->active->state == FL_PM_SUSPENDED) {
960 chip->state = FL_PM_SUSPENDED;
965 set_current_state(TASK_UNINTERRUPTIBLE);
966 add_wait_queue(wq, &wait);
969 remove_wait_queue(wq, &wait);
974 * panic_nand_wait - [GENERIC] wait until the command is done
975 * @mtd: MTD device structure
976 * @chip: NAND chip structure
979 * Wait for command done. This is a helper function for nand_wait used when
980 * we are in interrupt context. May happen when in panic and trying to write
981 * an oops through mtdoops.
983 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
987 for (i = 0; i < timeo; i++) {
988 if (chip->dev_ready) {
989 if (chip->dev_ready(mtd))
992 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1000 * nand_wait - [DEFAULT] wait until the command is done
1001 * @mtd: MTD device structure
1002 * @chip: NAND chip structure
1004 * Wait for command done. This applies to erase and program only.
1006 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
1010 unsigned long timeo = 400;
1013 * Apply this short delay always to ensure that we do wait tWB in any
1014 * case on any machine.
1018 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1020 if (in_interrupt() || oops_in_progress)
1021 panic_nand_wait(mtd, chip, timeo);
1023 timeo = jiffies + msecs_to_jiffies(timeo);
1025 if (chip->dev_ready) {
1026 if (chip->dev_ready(mtd))
1029 if (chip->read_byte(mtd) & NAND_STATUS_READY)
1033 } while (time_before(jiffies, timeo));
1036 status = (int)chip->read_byte(mtd);
1037 /* This can happen if in case of timeout or buggy dev_ready */
1038 WARN_ON(!(status & NAND_STATUS_READY));
1043 * nand_reset_data_interface - Reset data interface and timings
1044 * @chip: The NAND chip
1045 * @chipnr: Internal die id
1047 * Reset the Data interface and timings to ONFI mode 0.
1049 * Returns 0 for success or negative error code otherwise.
1051 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
1053 struct mtd_info *mtd = nand_to_mtd(chip);
1054 const struct nand_data_interface *conf;
1057 if (!chip->setup_data_interface)
1061 * The ONFI specification says:
1063 * To transition from NV-DDR or NV-DDR2 to the SDR data
1064 * interface, the host shall use the Reset (FFh) command
1065 * using SDR timing mode 0. A device in any timing mode is
1066 * required to recognize Reset (FFh) command issued in SDR
1070 * Configure the data interface in SDR mode and set the
1071 * timings to timing mode 0.
1074 conf = nand_get_default_data_interface();
1075 ret = chip->setup_data_interface(mtd, chipnr, conf);
1077 pr_err("Failed to configure data interface to SDR timing mode 0\n");
1083 * nand_setup_data_interface - Setup the best data interface and timings
1084 * @chip: The NAND chip
1085 * @chipnr: Internal die id
1087 * Find and configure the best data interface and NAND timings supported by
1088 * the chip and the driver.
1089 * First tries to retrieve supported timing modes from ONFI information,
1090 * and if the NAND chip does not support ONFI, relies on the
1091 * ->onfi_timing_mode_default specified in the nand_ids table.
1093 * Returns 0 for success or negative error code otherwise.
1095 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1097 struct mtd_info *mtd = nand_to_mtd(chip);
1100 if (!chip->setup_data_interface || !chip->data_interface)
1104 * Ensure the timing mode has been changed on the chip side
1105 * before changing timings on the controller side.
1107 if (chip->onfi_version) {
1108 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1109 chip->onfi_timing_mode_default,
1112 ret = chip->onfi_set_features(mtd, chip,
1113 ONFI_FEATURE_ADDR_TIMING_MODE,
1119 ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
1125 * nand_init_data_interface - find the best data interface and timings
1126 * @chip: The NAND chip
1128 * Find the best data interface and NAND timings supported by the chip
1130 * First tries to retrieve supported timing modes from ONFI information,
1131 * and if the NAND chip does not support ONFI, relies on the
1132 * ->onfi_timing_mode_default specified in the nand_ids table. After this
1133 * function nand_chip->data_interface is initialized with the best timing mode
1136 * Returns 0 for success or negative error code otherwise.
1138 static int nand_init_data_interface(struct nand_chip *chip)
1140 struct mtd_info *mtd = nand_to_mtd(chip);
1141 int modes, mode, ret;
1143 if (!chip->setup_data_interface)
1147 * First try to identify the best timings from ONFI parameters and
1148 * if the NAND does not support ONFI, fallback to the default ONFI
1151 modes = onfi_get_async_timing_mode(chip);
1152 if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1153 if (!chip->onfi_timing_mode_default)
1156 modes = GENMASK(chip->onfi_timing_mode_default, 0);
1159 chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1161 if (!chip->data_interface)
1164 for (mode = fls(modes) - 1; mode >= 0; mode--) {
1165 ret = onfi_init_data_interface(chip, chip->data_interface,
1166 NAND_SDR_IFACE, mode);
1170 /* Pass -1 to only */
1171 ret = chip->setup_data_interface(mtd,
1172 NAND_DATA_IFACE_CHECK_ONLY,
1173 chip->data_interface);
1175 chip->onfi_timing_mode_default = mode;
1183 static void nand_release_data_interface(struct nand_chip *chip)
1185 kfree(chip->data_interface);
1189 * nand_reset - Reset and initialize a NAND device
1190 * @chip: The NAND chip
1191 * @chipnr: Internal die id
1193 * Returns 0 for success or negative error code otherwise
1195 int nand_reset(struct nand_chip *chip, int chipnr)
1197 struct mtd_info *mtd = nand_to_mtd(chip);
1200 ret = nand_reset_data_interface(chip, chipnr);
1205 * The CS line has to be released before we can apply the new NAND
1206 * interface settings, hence this weird ->select_chip() dance.
1208 chip->select_chip(mtd, chipnr);
1209 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1210 chip->select_chip(mtd, -1);
1212 chip->select_chip(mtd, chipnr);
1213 ret = nand_setup_data_interface(chip, chipnr);
1214 chip->select_chip(mtd, -1);
1222 * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1224 * @ofs: offset to start unlock from
1225 * @len: length to unlock
1226 * @invert: when = 0, unlock the range of blocks within the lower and
1227 * upper boundary address
1228 * when = 1, unlock the range of blocks outside the boundaries
1229 * of the lower and upper boundary address
1231 * Returs unlock status.
1233 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
1234 uint64_t len, int invert)
1238 struct nand_chip *chip = mtd_to_nand(mtd);
1240 /* Submit address of first page to unlock */
1241 page = ofs >> chip->page_shift;
1242 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
1244 /* Submit address of last page to unlock */
1245 page = (ofs + len) >> chip->page_shift;
1246 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
1247 (page | invert) & chip->pagemask);
1249 /* Call wait ready function */
1250 status = chip->waitfunc(mtd, chip);
1251 /* See if device thinks it succeeded */
1252 if (status & NAND_STATUS_FAIL) {
1253 pr_debug("%s: error status = 0x%08x\n",
1262 * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
1264 * @ofs: offset to start unlock from
1265 * @len: length to unlock
1267 * Returns unlock status.
1269 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1273 struct nand_chip *chip = mtd_to_nand(mtd);
1275 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1276 __func__, (unsigned long long)ofs, len);
1278 if (check_offs_len(mtd, ofs, len))
1281 /* Align to last block address if size addresses end of the device */
1282 if (ofs + len == mtd->size)
1283 len -= mtd->erasesize;
1285 nand_get_device(mtd, FL_UNLOCKING);
1287 /* Shift to get chip number */
1288 chipnr = ofs >> chip->chip_shift;
1292 * If we want to check the WP through READ STATUS and check the bit 7
1293 * we must reset the chip
1294 * some operation can also clear the bit 7 of status register
1295 * eg. erase/program a locked block
1297 nand_reset(chip, chipnr);
1299 chip->select_chip(mtd, chipnr);
1301 /* Check, if it is write protected */
1302 if (nand_check_wp(mtd)) {
1303 pr_debug("%s: device is write protected!\n",
1309 ret = __nand_unlock(mtd, ofs, len, 0);
1312 chip->select_chip(mtd, -1);
1313 nand_release_device(mtd);
1317 EXPORT_SYMBOL(nand_unlock);
1320 * nand_lock - [REPLACEABLE] locks all blocks present in the device
1322 * @ofs: offset to start unlock from
1323 * @len: length to unlock
1325 * This feature is not supported in many NAND parts. 'Micron' NAND parts do
1326 * have this feature, but it allows only to lock all blocks, not for specified
1327 * range for block. Implementing 'lock' feature by making use of 'unlock', for
1330 * Returns lock status.
1332 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1335 int chipnr, status, page;
1336 struct nand_chip *chip = mtd_to_nand(mtd);
1338 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1339 __func__, (unsigned long long)ofs, len);
1341 if (check_offs_len(mtd, ofs, len))
1344 nand_get_device(mtd, FL_LOCKING);
1346 /* Shift to get chip number */
1347 chipnr = ofs >> chip->chip_shift;
1351 * If we want to check the WP through READ STATUS and check the bit 7
1352 * we must reset the chip
1353 * some operation can also clear the bit 7 of status register
1354 * eg. erase/program a locked block
1356 nand_reset(chip, chipnr);
1358 chip->select_chip(mtd, chipnr);
1360 /* Check, if it is write protected */
1361 if (nand_check_wp(mtd)) {
1362 pr_debug("%s: device is write protected!\n",
1364 status = MTD_ERASE_FAILED;
1369 /* Submit address of first page to lock */
1370 page = ofs >> chip->page_shift;
1371 chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
1373 /* Call wait ready function */
1374 status = chip->waitfunc(mtd, chip);
1375 /* See if device thinks it succeeded */
1376 if (status & NAND_STATUS_FAIL) {
1377 pr_debug("%s: error status = 0x%08x\n",
1383 ret = __nand_unlock(mtd, ofs, len, 0x1);
1386 chip->select_chip(mtd, -1);
1387 nand_release_device(mtd);
1391 EXPORT_SYMBOL(nand_lock);
1394 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1395 * @buf: buffer to test
1396 * @len: buffer length
1397 * @bitflips_threshold: maximum number of bitflips
1399 * Check if a buffer contains only 0xff, which means the underlying region
1400 * has been erased and is ready to be programmed.
1401 * The bitflips_threshold specify the maximum number of bitflips before
1402 * considering the region is not erased.
1403 * Note: The logic of this function has been extracted from the memweight
1404 * implementation, except that nand_check_erased_buf function exit before
1405 * testing the whole buffer if the number of bitflips exceed the
1406 * bitflips_threshold value.
1408 * Returns a positive number of bitflips less than or equal to
1409 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1412 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1414 const unsigned char *bitmap = buf;
1418 for (; len && ((uintptr_t)bitmap) % sizeof(long);
1420 weight = hweight8(*bitmap);
1421 bitflips += BITS_PER_BYTE - weight;
1422 if (unlikely(bitflips > bitflips_threshold))
1426 for (; len >= sizeof(long);
1427 len -= sizeof(long), bitmap += sizeof(long)) {
1428 unsigned long d = *((unsigned long *)bitmap);
1431 weight = hweight_long(d);
1432 bitflips += BITS_PER_LONG - weight;
1433 if (unlikely(bitflips > bitflips_threshold))
1437 for (; len > 0; len--, bitmap++) {
1438 weight = hweight8(*bitmap);
1439 bitflips += BITS_PER_BYTE - weight;
1440 if (unlikely(bitflips > bitflips_threshold))
1448 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1450 * @data: data buffer to test
1451 * @datalen: data length
1453 * @ecclen: ECC length
1454 * @extraoob: extra OOB buffer
1455 * @extraooblen: extra OOB length
1456 * @bitflips_threshold: maximum number of bitflips
1458 * Check if a data buffer and its associated ECC and OOB data contains only
1459 * 0xff pattern, which means the underlying region has been erased and is
1460 * ready to be programmed.
1461 * The bitflips_threshold specify the maximum number of bitflips before
1462 * considering the region as not erased.
1465 * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1466 * different from the NAND page size. When fixing bitflips, ECC engines will
1467 * report the number of errors per chunk, and the NAND core infrastructure
1468 * expect you to return the maximum number of bitflips for the whole page.
1469 * This is why you should always use this function on a single chunk and
1470 * not on the whole page. After checking each chunk you should update your
1471 * max_bitflips value accordingly.
1472 * 2/ When checking for bitflips in erased pages you should not only check
1473 * the payload data but also their associated ECC data, because a user might
1474 * have programmed almost all bits to 1 but a few. In this case, we
1475 * shouldn't consider the chunk as erased, and checking ECC bytes prevent
1477 * 3/ The extraoob argument is optional, and should be used if some of your OOB
1478 * data are protected by the ECC engine.
1479 * It could also be used if you support subpages and want to attach some
1480 * extra OOB data to an ECC chunk.
1482 * Returns a positive number of bitflips less than or equal to
1483 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1484 * threshold. In case of success, the passed buffers are filled with 0xff.
1486 int nand_check_erased_ecc_chunk(void *data, int datalen,
1487 void *ecc, int ecclen,
1488 void *extraoob, int extraooblen,
1489 int bitflips_threshold)
1491 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1493 data_bitflips = nand_check_erased_buf(data, datalen,
1494 bitflips_threshold);
1495 if (data_bitflips < 0)
1496 return data_bitflips;
1498 bitflips_threshold -= data_bitflips;
1500 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1501 if (ecc_bitflips < 0)
1502 return ecc_bitflips;
1504 bitflips_threshold -= ecc_bitflips;
1506 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1507 bitflips_threshold);
1508 if (extraoob_bitflips < 0)
1509 return extraoob_bitflips;
1512 memset(data, 0xff, datalen);
1515 memset(ecc, 0xff, ecclen);
1517 if (extraoob_bitflips)
1518 memset(extraoob, 0xff, extraooblen);
1520 return data_bitflips + ecc_bitflips + extraoob_bitflips;
1522 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1525 * nand_read_page_raw - [INTERN] read raw page data without ecc
1526 * @mtd: mtd info structure
1527 * @chip: nand chip info structure
1528 * @buf: buffer to store read data
1529 * @oob_required: caller requires OOB data read to chip->oob_poi
1530 * @page: page number to read
1532 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1534 int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1535 uint8_t *buf, int oob_required, int page)
1537 chip->read_buf(mtd, buf, mtd->writesize);
1539 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1542 EXPORT_SYMBOL(nand_read_page_raw);
1545 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1546 * @mtd: mtd info structure
1547 * @chip: nand chip info structure
1548 * @buf: buffer to store read data
1549 * @oob_required: caller requires OOB data read to chip->oob_poi
1550 * @page: page number to read
1552 * We need a special oob layout and handling even when OOB isn't used.
1554 static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1555 struct nand_chip *chip, uint8_t *buf,
1556 int oob_required, int page)
1558 int eccsize = chip->ecc.size;
1559 int eccbytes = chip->ecc.bytes;
1560 uint8_t *oob = chip->oob_poi;
1563 for (steps = chip->ecc.steps; steps > 0; steps--) {
1564 chip->read_buf(mtd, buf, eccsize);
1567 if (chip->ecc.prepad) {
1568 chip->read_buf(mtd, oob, chip->ecc.prepad);
1569 oob += chip->ecc.prepad;
1572 chip->read_buf(mtd, oob, eccbytes);
1575 if (chip->ecc.postpad) {
1576 chip->read_buf(mtd, oob, chip->ecc.postpad);
1577 oob += chip->ecc.postpad;
1581 size = mtd->oobsize - (oob - chip->oob_poi);
1583 chip->read_buf(mtd, oob, size);
1589 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1590 * @mtd: mtd info structure
1591 * @chip: nand chip info structure
1592 * @buf: buffer to store read data
1593 * @oob_required: caller requires OOB data read to chip->oob_poi
1594 * @page: page number to read
1596 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1597 uint8_t *buf, int oob_required, int page)
1599 int i, eccsize = chip->ecc.size, ret;
1600 int eccbytes = chip->ecc.bytes;
1601 int eccsteps = chip->ecc.steps;
1603 uint8_t *ecc_calc = chip->buffers->ecccalc;
1604 uint8_t *ecc_code = chip->buffers->ecccode;
1605 unsigned int max_bitflips = 0;
1607 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1609 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1610 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1612 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1617 eccsteps = chip->ecc.steps;
1620 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1623 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1625 mtd->ecc_stats.failed++;
1627 mtd->ecc_stats.corrected += stat;
1628 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1631 return max_bitflips;
1635 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1636 * @mtd: mtd info structure
1637 * @chip: nand chip info structure
1638 * @data_offs: offset of requested data within the page
1639 * @readlen: data length
1640 * @bufpoi: buffer to store read data
1641 * @page: page number to read
1643 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1644 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1647 int start_step, end_step, num_steps, ret;
1649 int data_col_addr, i, gaps = 0;
1650 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1651 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1652 int index, section = 0;
1653 unsigned int max_bitflips = 0;
1654 struct mtd_oob_region oobregion = { };
1656 /* Column address within the page aligned to ECC size (256bytes) */
1657 start_step = data_offs / chip->ecc.size;
1658 end_step = (data_offs + readlen - 1) / chip->ecc.size;
1659 num_steps = end_step - start_step + 1;
1660 index = start_step * chip->ecc.bytes;
1662 /* Data size aligned to ECC ecc.size */
1663 datafrag_len = num_steps * chip->ecc.size;
1664 eccfrag_len = num_steps * chip->ecc.bytes;
1666 data_col_addr = start_step * chip->ecc.size;
1667 /* If we read not a page aligned data */
1668 if (data_col_addr != 0)
1669 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1671 p = bufpoi + data_col_addr;
1672 chip->read_buf(mtd, p, datafrag_len);
1675 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1676 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1679 * The performance is faster if we position offsets according to
1680 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1682 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
1686 if (oobregion.length < eccfrag_len)
1690 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
1691 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1694 * Send the command to read the particular ECC bytes take care
1695 * about buswidth alignment in read_buf.
1697 aligned_pos = oobregion.offset & ~(busw - 1);
1698 aligned_len = eccfrag_len;
1699 if (oobregion.offset & (busw - 1))
1701 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
1705 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
1706 mtd->writesize + aligned_pos, -1);
1707 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
1710 ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
1711 chip->oob_poi, index, eccfrag_len);
1715 p = bufpoi + data_col_addr;
1716 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1719 stat = chip->ecc.correct(mtd, p,
1720 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1721 if (stat == -EBADMSG &&
1722 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1723 /* check for empty pages with bitflips */
1724 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1725 &chip->buffers->ecccode[i],
1728 chip->ecc.strength);
1732 mtd->ecc_stats.failed++;
1734 mtd->ecc_stats.corrected += stat;
1735 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1738 return max_bitflips;
1742 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
1743 * @mtd: mtd info structure
1744 * @chip: nand chip info structure
1745 * @buf: buffer to store read data
1746 * @oob_required: caller requires OOB data read to chip->oob_poi
1747 * @page: page number to read
1749 * Not for syndrome calculating ECC controllers which need a special oob layout.
1751 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1752 uint8_t *buf, int oob_required, int page)
1754 int i, eccsize = chip->ecc.size, ret;
1755 int eccbytes = chip->ecc.bytes;
1756 int eccsteps = chip->ecc.steps;
1758 uint8_t *ecc_calc = chip->buffers->ecccalc;
1759 uint8_t *ecc_code = chip->buffers->ecccode;
1760 unsigned int max_bitflips = 0;
1762 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1763 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1764 chip->read_buf(mtd, p, eccsize);
1765 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1767 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1769 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1774 eccsteps = chip->ecc.steps;
1777 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1780 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1781 if (stat == -EBADMSG &&
1782 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1783 /* check for empty pages with bitflips */
1784 stat = nand_check_erased_ecc_chunk(p, eccsize,
1785 &ecc_code[i], eccbytes,
1787 chip->ecc.strength);
1791 mtd->ecc_stats.failed++;
1793 mtd->ecc_stats.corrected += stat;
1794 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1797 return max_bitflips;
1801 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
1802 * @mtd: mtd info structure
1803 * @chip: nand chip info structure
1804 * @buf: buffer to store read data
1805 * @oob_required: caller requires OOB data read to chip->oob_poi
1806 * @page: page number to read
1808 * Hardware ECC for large page chips, require OOB to be read first. For this
1809 * ECC mode, the write_page method is re-used from ECC_HW. These methods
1810 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
1811 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
1812 * the data area, by overwriting the NAND manufacturer bad block markings.
1814 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1815 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1817 int i, eccsize = chip->ecc.size, ret;
1818 int eccbytes = chip->ecc.bytes;
1819 int eccsteps = chip->ecc.steps;
1821 uint8_t *ecc_code = chip->buffers->ecccode;
1822 uint8_t *ecc_calc = chip->buffers->ecccalc;
1823 unsigned int max_bitflips = 0;
1825 /* Read the OOB area first */
1826 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1827 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1828 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1830 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1835 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1838 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1839 chip->read_buf(mtd, p, eccsize);
1840 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1842 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1843 if (stat == -EBADMSG &&
1844 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1845 /* check for empty pages with bitflips */
1846 stat = nand_check_erased_ecc_chunk(p, eccsize,
1847 &ecc_code[i], eccbytes,
1849 chip->ecc.strength);
1853 mtd->ecc_stats.failed++;
1855 mtd->ecc_stats.corrected += stat;
1856 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1859 return max_bitflips;
1863 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
1864 * @mtd: mtd info structure
1865 * @chip: nand chip info structure
1866 * @buf: buffer to store read data
1867 * @oob_required: caller requires OOB data read to chip->oob_poi
1868 * @page: page number to read
1870 * The hw generator calculates the error syndrome automatically. Therefore we
1871 * need a special oob layout and handling.
1873 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1874 uint8_t *buf, int oob_required, int page)
1876 int i, eccsize = chip->ecc.size;
1877 int eccbytes = chip->ecc.bytes;
1878 int eccsteps = chip->ecc.steps;
1879 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
1881 uint8_t *oob = chip->oob_poi;
1882 unsigned int max_bitflips = 0;
1884 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1887 chip->ecc.hwctl(mtd, NAND_ECC_READ);
1888 chip->read_buf(mtd, p, eccsize);
1890 if (chip->ecc.prepad) {
1891 chip->read_buf(mtd, oob, chip->ecc.prepad);
1892 oob += chip->ecc.prepad;
1895 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
1896 chip->read_buf(mtd, oob, eccbytes);
1897 stat = chip->ecc.correct(mtd, p, oob, NULL);
1901 if (chip->ecc.postpad) {
1902 chip->read_buf(mtd, oob, chip->ecc.postpad);
1903 oob += chip->ecc.postpad;
1906 if (stat == -EBADMSG &&
1907 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1908 /* check for empty pages with bitflips */
1909 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1913 chip->ecc.strength);
1917 mtd->ecc_stats.failed++;
1919 mtd->ecc_stats.corrected += stat;
1920 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1924 /* Calculate remaining oob bytes */
1925 i = mtd->oobsize - (oob - chip->oob_poi);
1927 chip->read_buf(mtd, oob, i);
1929 return max_bitflips;
1933 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
1934 * @mtd: mtd info structure
1935 * @oob: oob destination address
1936 * @ops: oob ops structure
1937 * @len: size of oob to transfer
1939 static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
1940 struct mtd_oob_ops *ops, size_t len)
1942 struct nand_chip *chip = mtd_to_nand(mtd);
1945 switch (ops->mode) {
1947 case MTD_OPS_PLACE_OOB:
1949 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
1952 case MTD_OPS_AUTO_OOB:
1953 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
1965 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
1966 * @mtd: MTD device structure
1967 * @retry_mode: the retry mode to use
1969 * Some vendors supply a special command to shift the Vt threshold, to be used
1970 * when there are too many bitflips in a page (i.e., ECC error). After setting
1971 * a new threshold, the host should retry reading the page.
1973 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
1975 struct nand_chip *chip = mtd_to_nand(mtd);
1977 pr_debug("setting READ RETRY mode %d\n", retry_mode);
1979 if (retry_mode >= chip->read_retries)
1982 if (!chip->setup_read_retry)
1985 return chip->setup_read_retry(mtd, retry_mode);
1989 * nand_do_read_ops - [INTERN] Read data with ECC
1990 * @mtd: MTD device structure
1991 * @from: offset to read from
1992 * @ops: oob ops structure
1994 * Internal function. Called with chip held.
1996 static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1997 struct mtd_oob_ops *ops)
1999 int chipnr, page, realpage, col, bytes, aligned, oob_required;
2000 struct nand_chip *chip = mtd_to_nand(mtd);
2002 uint32_t readlen = ops->len;
2003 uint32_t oobreadlen = ops->ooblen;
2004 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2006 uint8_t *bufpoi, *oob, *buf;
2008 unsigned int max_bitflips = 0;
2010 bool ecc_fail = false;
2012 chipnr = (int)(from >> chip->chip_shift);
2013 chip->select_chip(mtd, chipnr);
2015 realpage = (int)(from >> chip->page_shift);
2016 page = realpage & chip->pagemask;
2018 col = (int)(from & (mtd->writesize - 1));
2022 oob_required = oob ? 1 : 0;
2025 unsigned int ecc_failures = mtd->ecc_stats.failed;
2027 bytes = min(mtd->writesize - col, readlen);
2028 aligned = (bytes == mtd->writesize);
2032 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2033 use_bufpoi = !virt_addr_valid(buf) ||
2034 !IS_ALIGNED((unsigned long)buf,
2039 /* Is the current page in the buffer? */
2040 if (realpage != chip->pagebuf || oob) {
2041 bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
2043 if (use_bufpoi && aligned)
2044 pr_debug("%s: using read bounce buffer for buf@%p\n",
2048 if (nand_standard_page_accessors(&chip->ecc))
2049 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
2052 * Now read the page into the buffer. Absent an error,
2053 * the read methods return max bitflips per ecc step.
2055 if (unlikely(ops->mode == MTD_OPS_RAW))
2056 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
2059 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
2061 ret = chip->ecc.read_subpage(mtd, chip,
2065 ret = chip->ecc.read_page(mtd, chip, bufpoi,
2066 oob_required, page);
2069 /* Invalidate page cache */
2074 /* Transfer not aligned data */
2076 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2077 !(mtd->ecc_stats.failed - ecc_failures) &&
2078 (ops->mode != MTD_OPS_RAW)) {
2079 chip->pagebuf = realpage;
2080 chip->pagebuf_bitflips = ret;
2082 /* Invalidate page cache */
2085 memcpy(buf, chip->buffers->databuf + col, bytes);
2088 if (unlikely(oob)) {
2089 int toread = min(oobreadlen, max_oobsize);
2092 oob = nand_transfer_oob(mtd,
2094 oobreadlen -= toread;
2098 if (chip->options & NAND_NEED_READRDY) {
2099 /* Apply delay or wait for ready/busy pin */
2100 if (!chip->dev_ready)
2101 udelay(chip->chip_delay);
2103 nand_wait_ready(mtd);
2106 if (mtd->ecc_stats.failed - ecc_failures) {
2107 if (retry_mode + 1 < chip->read_retries) {
2109 ret = nand_setup_read_retry(mtd,
2114 /* Reset failures; retry */
2115 mtd->ecc_stats.failed = ecc_failures;
2118 /* No more retry modes; real failure */
2124 max_bitflips = max_t(unsigned int, max_bitflips, ret);
2126 memcpy(buf, chip->buffers->databuf + col, bytes);
2128 max_bitflips = max_t(unsigned int, max_bitflips,
2129 chip->pagebuf_bitflips);
2134 /* Reset to retry mode 0 */
2136 ret = nand_setup_read_retry(mtd, 0);
2145 /* For subsequent reads align to page boundary */
2147 /* Increment page address */
2150 page = realpage & chip->pagemask;
2151 /* Check, if we cross a chip boundary */
2154 chip->select_chip(mtd, -1);
2155 chip->select_chip(mtd, chipnr);
2158 chip->select_chip(mtd, -1);
2160 ops->retlen = ops->len - (size_t) readlen;
2162 ops->oobretlen = ops->ooblen - oobreadlen;
2170 return max_bitflips;
2174 * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
2175 * @mtd: MTD device structure
2176 * @from: offset to read from
2177 * @len: number of bytes to read
2178 * @retlen: pointer to variable to store the number of read bytes
2179 * @buf: the databuffer to put data
2181 * Get hold of the chip and call nand_do_read.
2183 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2184 size_t *retlen, uint8_t *buf)
2186 struct mtd_oob_ops ops;
2189 nand_get_device(mtd, FL_READING);
2190 memset(&ops, 0, sizeof(ops));
2193 ops.mode = MTD_OPS_PLACE_OOB;
2194 ret = nand_do_read_ops(mtd, from, &ops);
2195 *retlen = ops.retlen;
2196 nand_release_device(mtd);
2201 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2202 * @mtd: mtd info structure
2203 * @chip: nand chip info structure
2204 * @page: page number to read
2206 int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2208 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
2209 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2212 EXPORT_SYMBOL(nand_read_oob_std);
2215 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2217 * @mtd: mtd info structure
2218 * @chip: nand chip info structure
2219 * @page: page number to read
2221 int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2224 int length = mtd->oobsize;
2225 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2226 int eccsize = chip->ecc.size;
2227 uint8_t *bufpoi = chip->oob_poi;
2228 int i, toread, sndrnd = 0, pos;
2230 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
2231 for (i = 0; i < chip->ecc.steps; i++) {
2233 pos = eccsize + i * (eccsize + chunk);
2234 if (mtd->writesize > 512)
2235 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
2237 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
2240 toread = min_t(int, length, chunk);
2241 chip->read_buf(mtd, bufpoi, toread);
2246 chip->read_buf(mtd, bufpoi, length);
2250 EXPORT_SYMBOL(nand_read_oob_syndrome);
2253 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2254 * @mtd: mtd info structure
2255 * @chip: nand chip info structure
2256 * @page: page number to write
2258 int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
2261 const uint8_t *buf = chip->oob_poi;
2262 int length = mtd->oobsize;
2264 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
2265 chip->write_buf(mtd, buf, length);
2266 /* Send command to program the OOB data */
2267 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2269 status = chip->waitfunc(mtd, chip);
2271 return status & NAND_STATUS_FAIL ? -EIO : 0;
2273 EXPORT_SYMBOL(nand_write_oob_std);
2276 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2277 * with syndrome - only for large page flash
2278 * @mtd: mtd info structure
2279 * @chip: nand chip info structure
2280 * @page: page number to write
2282 int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2285 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2286 int eccsize = chip->ecc.size, length = mtd->oobsize;
2287 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
2288 const uint8_t *bufpoi = chip->oob_poi;
2291 * data-ecc-data-ecc ... ecc-oob
2293 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2295 if (!chip->ecc.prepad && !chip->ecc.postpad) {
2296 pos = steps * (eccsize + chunk);
2301 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
2302 for (i = 0; i < steps; i++) {
2304 if (mtd->writesize <= 512) {
2305 uint32_t fill = 0xFFFFFFFF;
2309 int num = min_t(int, len, 4);
2310 chip->write_buf(mtd, (uint8_t *)&fill,
2315 pos = eccsize + i * (eccsize + chunk);
2316 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
2320 len = min_t(int, length, chunk);
2321 chip->write_buf(mtd, bufpoi, len);
2326 chip->write_buf(mtd, bufpoi, length);
2328 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2329 status = chip->waitfunc(mtd, chip);
2331 return status & NAND_STATUS_FAIL ? -EIO : 0;
2333 EXPORT_SYMBOL(nand_write_oob_syndrome);
2336 * nand_do_read_oob - [INTERN] NAND read out-of-band
2337 * @mtd: MTD device structure
2338 * @from: offset to read from
2339 * @ops: oob operations description structure
2341 * NAND read out-of-band data from the spare area.
2343 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2344 struct mtd_oob_ops *ops)
2346 int page, realpage, chipnr;
2347 struct nand_chip *chip = mtd_to_nand(mtd);
2348 struct mtd_ecc_stats stats;
2349 int readlen = ops->ooblen;
2351 uint8_t *buf = ops->oobbuf;
2354 pr_debug("%s: from = 0x%08Lx, len = %i\n",
2355 __func__, (unsigned long long)from, readlen);
2357 stats = mtd->ecc_stats;
2359 len = mtd_oobavail(mtd, ops);
2361 if (unlikely(ops->ooboffs >= len)) {
2362 pr_debug("%s: attempt to start read outside oob\n",
2367 /* Do not allow reads past end of device */
2368 if (unlikely(from >= mtd->size ||
2369 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2370 (from >> chip->page_shift)) * len)) {
2371 pr_debug("%s: attempt to read beyond end of device\n",
2376 chipnr = (int)(from >> chip->chip_shift);
2377 chip->select_chip(mtd, chipnr);
2379 /* Shift to get page */
2380 realpage = (int)(from >> chip->page_shift);
2381 page = realpage & chip->pagemask;
2384 if (ops->mode == MTD_OPS_RAW)
2385 ret = chip->ecc.read_oob_raw(mtd, chip, page);
2387 ret = chip->ecc.read_oob(mtd, chip, page);
2392 len = min(len, readlen);
2393 buf = nand_transfer_oob(mtd, buf, ops, len);
2395 if (chip->options & NAND_NEED_READRDY) {
2396 /* Apply delay or wait for ready/busy pin */
2397 if (!chip->dev_ready)
2398 udelay(chip->chip_delay);
2400 nand_wait_ready(mtd);
2407 /* Increment page address */
2410 page = realpage & chip->pagemask;
2411 /* Check, if we cross a chip boundary */
2414 chip->select_chip(mtd, -1);
2415 chip->select_chip(mtd, chipnr);
2418 chip->select_chip(mtd, -1);
2420 ops->oobretlen = ops->ooblen - readlen;
2425 if (mtd->ecc_stats.failed - stats.failed)
2428 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2432 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2433 * @mtd: MTD device structure
2434 * @from: offset to read from
2435 * @ops: oob operation description structure
2437 * NAND read data and/or out-of-band data.
2439 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2440 struct mtd_oob_ops *ops)
2446 /* Do not allow reads past end of device */
2447 if (ops->datbuf && (from + ops->len) > mtd->size) {
2448 pr_debug("%s: attempt to read beyond end of device\n",
2453 if (ops->mode != MTD_OPS_PLACE_OOB &&
2454 ops->mode != MTD_OPS_AUTO_OOB &&
2455 ops->mode != MTD_OPS_RAW)
2458 nand_get_device(mtd, FL_READING);
2461 ret = nand_do_read_oob(mtd, from, ops);
2463 ret = nand_do_read_ops(mtd, from, ops);
2465 nand_release_device(mtd);
2471 * nand_write_page_raw - [INTERN] raw page write function
2472 * @mtd: mtd info structure
2473 * @chip: nand chip info structure
2475 * @oob_required: must write chip->oob_poi to OOB
2476 * @page: page number to write
2478 * Not for syndrome calculating ECC controllers, which use a special oob layout.
2480 int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2481 const uint8_t *buf, int oob_required, int page)
2483 chip->write_buf(mtd, buf, mtd->writesize);
2485 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2489 EXPORT_SYMBOL(nand_write_page_raw);
2492 * nand_write_page_raw_syndrome - [INTERN] raw page write function
2493 * @mtd: mtd info structure
2494 * @chip: nand chip info structure
2496 * @oob_required: must write chip->oob_poi to OOB
2497 * @page: page number to write
2499 * We need a special oob layout and handling even when ECC isn't checked.
2501 static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2502 struct nand_chip *chip,
2503 const uint8_t *buf, int oob_required,
2506 int eccsize = chip->ecc.size;
2507 int eccbytes = chip->ecc.bytes;
2508 uint8_t *oob = chip->oob_poi;
2511 for (steps = chip->ecc.steps; steps > 0; steps--) {
2512 chip->write_buf(mtd, buf, eccsize);
2515 if (chip->ecc.prepad) {
2516 chip->write_buf(mtd, oob, chip->ecc.prepad);
2517 oob += chip->ecc.prepad;
2520 chip->write_buf(mtd, oob, eccbytes);
2523 if (chip->ecc.postpad) {
2524 chip->write_buf(mtd, oob, chip->ecc.postpad);
2525 oob += chip->ecc.postpad;
2529 size = mtd->oobsize - (oob - chip->oob_poi);
2531 chip->write_buf(mtd, oob, size);
2536 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2537 * @mtd: mtd info structure
2538 * @chip: nand chip info structure
2540 * @oob_required: must write chip->oob_poi to OOB
2541 * @page: page number to write
2543 static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2544 const uint8_t *buf, int oob_required,
2547 int i, eccsize = chip->ecc.size, ret;
2548 int eccbytes = chip->ecc.bytes;
2549 int eccsteps = chip->ecc.steps;
2550 uint8_t *ecc_calc = chip->buffers->ecccalc;
2551 const uint8_t *p = buf;
2553 /* Software ECC calculation */
2554 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2555 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2557 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2562 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2566 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2567 * @mtd: mtd info structure
2568 * @chip: nand chip info structure
2570 * @oob_required: must write chip->oob_poi to OOB
2571 * @page: page number to write
2573 static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2574 const uint8_t *buf, int oob_required,
2577 int i, eccsize = chip->ecc.size, ret;
2578 int eccbytes = chip->ecc.bytes;
2579 int eccsteps = chip->ecc.steps;
2580 uint8_t *ecc_calc = chip->buffers->ecccalc;
2581 const uint8_t *p = buf;
2583 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2584 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2585 chip->write_buf(mtd, p, eccsize);
2586 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2589 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2594 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2601 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2602 * @mtd: mtd info structure
2603 * @chip: nand chip info structure
2604 * @offset: column address of subpage within the page
2605 * @data_len: data length
2607 * @oob_required: must write chip->oob_poi to OOB
2608 * @page: page number to write
2610 static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2611 struct nand_chip *chip, uint32_t offset,
2612 uint32_t data_len, const uint8_t *buf,
2613 int oob_required, int page)
2615 uint8_t *oob_buf = chip->oob_poi;
2616 uint8_t *ecc_calc = chip->buffers->ecccalc;
2617 int ecc_size = chip->ecc.size;
2618 int ecc_bytes = chip->ecc.bytes;
2619 int ecc_steps = chip->ecc.steps;
2620 uint32_t start_step = offset / ecc_size;
2621 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2622 int oob_bytes = mtd->oobsize / ecc_steps;
2625 for (step = 0; step < ecc_steps; step++) {
2626 /* configure controller for WRITE access */
2627 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2629 /* write data (untouched subpages already masked by 0xFF) */
2630 chip->write_buf(mtd, buf, ecc_size);
2632 /* mask ECC of un-touched subpages by padding 0xFF */
2633 if ((step < start_step) || (step > end_step))
2634 memset(ecc_calc, 0xff, ecc_bytes);
2636 chip->ecc.calculate(mtd, buf, ecc_calc);
2638 /* mask OOB of un-touched subpages by padding 0xFF */
2639 /* if oob_required, preserve OOB metadata of written subpage */
2640 if (!oob_required || (step < start_step) || (step > end_step))
2641 memset(oob_buf, 0xff, oob_bytes);
2644 ecc_calc += ecc_bytes;
2645 oob_buf += oob_bytes;
2648 /* copy calculated ECC for whole page to chip->buffer->oob */
2649 /* this include masked-value(0xFF) for unwritten subpages */
2650 ecc_calc = chip->buffers->ecccalc;
2651 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2656 /* write OOB buffer to NAND device */
2657 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2664 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
2665 * @mtd: mtd info structure
2666 * @chip: nand chip info structure
2668 * @oob_required: must write chip->oob_poi to OOB
2669 * @page: page number to write
2671 * The hw generator calculates the error syndrome automatically. Therefore we
2672 * need a special oob layout and handling.
2674 static int nand_write_page_syndrome(struct mtd_info *mtd,
2675 struct nand_chip *chip,
2676 const uint8_t *buf, int oob_required,
2679 int i, eccsize = chip->ecc.size;
2680 int eccbytes = chip->ecc.bytes;
2681 int eccsteps = chip->ecc.steps;
2682 const uint8_t *p = buf;
2683 uint8_t *oob = chip->oob_poi;
2685 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2687 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2688 chip->write_buf(mtd, p, eccsize);
2690 if (chip->ecc.prepad) {
2691 chip->write_buf(mtd, oob, chip->ecc.prepad);
2692 oob += chip->ecc.prepad;
2695 chip->ecc.calculate(mtd, p, oob);
2696 chip->write_buf(mtd, oob, eccbytes);
2699 if (chip->ecc.postpad) {
2700 chip->write_buf(mtd, oob, chip->ecc.postpad);
2701 oob += chip->ecc.postpad;
2705 /* Calculate remaining oob bytes */
2706 i = mtd->oobsize - (oob - chip->oob_poi);
2708 chip->write_buf(mtd, oob, i);
2714 * nand_write_page - write one page
2715 * @mtd: MTD device structure
2716 * @chip: NAND chip descriptor
2717 * @offset: address offset within the page
2718 * @data_len: length of actual data to be written
2719 * @buf: the data to write
2720 * @oob_required: must write chip->oob_poi to OOB
2721 * @page: page number to write
2722 * @cached: cached programming
2723 * @raw: use _raw version of write_page
2725 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2726 uint32_t offset, int data_len, const uint8_t *buf,
2727 int oob_required, int page, int cached, int raw)
2729 int status, subpage;
2731 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
2732 chip->ecc.write_subpage)
2733 subpage = offset || (data_len < mtd->writesize);
2737 if (nand_standard_page_accessors(&chip->ecc))
2738 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2741 status = chip->ecc.write_page_raw(mtd, chip, buf,
2742 oob_required, page);
2744 status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
2745 buf, oob_required, page);
2747 status = chip->ecc.write_page(mtd, chip, buf, oob_required,
2754 * Cached progamming disabled for now. Not sure if it's worth the
2755 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
2759 if (!cached || !NAND_HAS_CACHEPROG(chip)) {
2761 if (nand_standard_page_accessors(&chip->ecc))
2762 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
2763 status = chip->waitfunc(mtd, chip);
2765 * See if operation failed and additional status checks are
2768 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
2769 status = chip->errstat(mtd, chip, FL_WRITING, status,
2772 if (status & NAND_STATUS_FAIL)
2775 chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
2776 status = chip->waitfunc(mtd, chip);
2783 * nand_fill_oob - [INTERN] Transfer client buffer to oob
2784 * @mtd: MTD device structure
2785 * @oob: oob data buffer
2786 * @len: oob data write length
2787 * @ops: oob ops structure
2789 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2790 struct mtd_oob_ops *ops)
2792 struct nand_chip *chip = mtd_to_nand(mtd);
2796 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2797 * data from a previous OOB read.
2799 memset(chip->oob_poi, 0xff, mtd->oobsize);
2801 switch (ops->mode) {
2803 case MTD_OPS_PLACE_OOB:
2805 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2808 case MTD_OPS_AUTO_OOB:
2809 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2820 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
2823 * nand_do_write_ops - [INTERN] NAND write with ECC
2824 * @mtd: MTD device structure
2825 * @to: offset to write to
2826 * @ops: oob operations description structure
2828 * NAND write with ECC.
2830 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2831 struct mtd_oob_ops *ops)
2833 int chipnr, realpage, page, blockmask, column;
2834 struct nand_chip *chip = mtd_to_nand(mtd);
2835 uint32_t writelen = ops->len;
2837 uint32_t oobwritelen = ops->ooblen;
2838 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2840 uint8_t *oob = ops->oobbuf;
2841 uint8_t *buf = ops->datbuf;
2843 int oob_required = oob ? 1 : 0;
2849 /* Reject writes, which are not page aligned */
2850 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
2851 pr_notice("%s: attempt to write non page aligned data\n",
2856 column = to & (mtd->writesize - 1);
2858 chipnr = (int)(to >> chip->chip_shift);
2859 chip->select_chip(mtd, chipnr);
2861 /* Check, if it is write protected */
2862 if (nand_check_wp(mtd)) {
2867 realpage = (int)(to >> chip->page_shift);
2868 page = realpage & chip->pagemask;
2869 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2871 /* Invalidate the page cache, when we write to the cached page */
2872 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
2873 ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
2876 /* Don't allow multipage oob writes with offset */
2877 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
2883 int bytes = mtd->writesize;
2884 int cached = writelen > bytes && page != blockmask;
2885 uint8_t *wbuf = buf;
2887 int part_pagewr = (column || writelen < mtd->writesize);
2891 else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2892 use_bufpoi = !virt_addr_valid(buf) ||
2893 !IS_ALIGNED((unsigned long)buf,
2898 /* Partial page write?, or need to use bounce buffer */
2900 pr_debug("%s: using write bounce buffer for buf@%p\n",
2904 bytes = min_t(int, bytes - column, writelen);
2906 memset(chip->buffers->databuf, 0xff, mtd->writesize);
2907 memcpy(&chip->buffers->databuf[column], buf, bytes);
2908 wbuf = chip->buffers->databuf;
2911 if (unlikely(oob)) {
2912 size_t len = min(oobwritelen, oobmaxlen);
2913 oob = nand_fill_oob(mtd, oob, len, ops);
2916 /* We still need to erase leftover OOB data */
2917 memset(chip->oob_poi, 0xff, mtd->oobsize);
2920 ret = nand_write_page(mtd, chip, column, bytes, wbuf,
2921 oob_required, page, cached,
2922 (ops->mode == MTD_OPS_RAW));
2934 page = realpage & chip->pagemask;
2935 /* Check, if we cross a chip boundary */
2938 chip->select_chip(mtd, -1);
2939 chip->select_chip(mtd, chipnr);
2943 ops->retlen = ops->len - writelen;
2945 ops->oobretlen = ops->ooblen;
2948 chip->select_chip(mtd, -1);
2953 * panic_nand_write - [MTD Interface] NAND write with ECC
2954 * @mtd: MTD device structure
2955 * @to: offset to write to
2956 * @len: number of bytes to write
2957 * @retlen: pointer to variable to store the number of written bytes
2958 * @buf: the data to write
2960 * NAND write with ECC. Used when performing writes in interrupt context, this
2961 * may for example be called by mtdoops when writing an oops while in panic.
2963 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2964 size_t *retlen, const uint8_t *buf)
2966 struct nand_chip *chip = mtd_to_nand(mtd);
2967 struct mtd_oob_ops ops;
2970 /* Wait for the device to get ready */
2971 panic_nand_wait(mtd, chip, 400);
2973 /* Grab the device */
2974 panic_nand_get_device(chip, mtd, FL_WRITING);
2976 memset(&ops, 0, sizeof(ops));
2978 ops.datbuf = (uint8_t *)buf;
2979 ops.mode = MTD_OPS_PLACE_OOB;
2981 ret = nand_do_write_ops(mtd, to, &ops);
2983 *retlen = ops.retlen;
2988 * nand_write - [MTD Interface] NAND write with ECC
2989 * @mtd: MTD device structure
2990 * @to: offset to write to
2991 * @len: number of bytes to write
2992 * @retlen: pointer to variable to store the number of written bytes
2993 * @buf: the data to write
2995 * NAND write with ECC.
2997 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2998 size_t *retlen, const uint8_t *buf)
3000 struct mtd_oob_ops ops;
3003 nand_get_device(mtd, FL_WRITING);
3004 memset(&ops, 0, sizeof(ops));
3006 ops.datbuf = (uint8_t *)buf;
3007 ops.mode = MTD_OPS_PLACE_OOB;
3008 ret = nand_do_write_ops(mtd, to, &ops);
3009 *retlen = ops.retlen;
3010 nand_release_device(mtd);
3015 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
3016 * @mtd: MTD device structure
3017 * @to: offset to write to
3018 * @ops: oob operation description structure
3020 * NAND write out-of-band.
3022 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
3023 struct mtd_oob_ops *ops)
3025 int chipnr, page, status, len;
3026 struct nand_chip *chip = mtd_to_nand(mtd);
3028 pr_debug("%s: to = 0x%08x, len = %i\n",
3029 __func__, (unsigned int)to, (int)ops->ooblen);
3031 len = mtd_oobavail(mtd, ops);
3033 /* Do not allow write past end of page */
3034 if ((ops->ooboffs + ops->ooblen) > len) {
3035 pr_debug("%s: attempt to write past end of page\n",
3040 if (unlikely(ops->ooboffs >= len)) {
3041 pr_debug("%s: attempt to start write outside oob\n",
3046 /* Do not allow write past end of device */
3047 if (unlikely(to >= mtd->size ||
3048 ops->ooboffs + ops->ooblen >
3049 ((mtd->size >> chip->page_shift) -
3050 (to >> chip->page_shift)) * len)) {
3051 pr_debug("%s: attempt to write beyond end of device\n",
3056 chipnr = (int)(to >> chip->chip_shift);
3059 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
3060 * of my DiskOnChip 2000 test units) will clear the whole data page too
3061 * if we don't do this. I have no clue why, but I seem to have 'fixed'
3062 * it in the doc2000 driver in August 1999. dwmw2.
3064 nand_reset(chip, chipnr);
3066 chip->select_chip(mtd, chipnr);
3068 /* Shift to get page */
3069 page = (int)(to >> chip->page_shift);
3071 /* Check, if it is write protected */
3072 if (nand_check_wp(mtd)) {
3073 chip->select_chip(mtd, -1);
3077 /* Invalidate the page cache, if we write to the cached page */
3078 if (page == chip->pagebuf)
3081 nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3083 if (ops->mode == MTD_OPS_RAW)
3084 status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3086 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3088 chip->select_chip(mtd, -1);
3093 ops->oobretlen = ops->ooblen;
3099 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3100 * @mtd: MTD device structure
3101 * @to: offset to write to
3102 * @ops: oob operation description structure
3104 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3105 struct mtd_oob_ops *ops)
3107 int ret = -ENOTSUPP;
3111 /* Do not allow writes past end of device */
3112 if (ops->datbuf && (to + ops->len) > mtd->size) {
3113 pr_debug("%s: attempt to write beyond end of device\n",
3118 nand_get_device(mtd, FL_WRITING);
3120 switch (ops->mode) {
3121 case MTD_OPS_PLACE_OOB:
3122 case MTD_OPS_AUTO_OOB:
3131 ret = nand_do_write_oob(mtd, to, ops);
3133 ret = nand_do_write_ops(mtd, to, ops);
3136 nand_release_device(mtd);
3141 * single_erase - [GENERIC] NAND standard block erase command function
3142 * @mtd: MTD device structure
3143 * @page: the page address of the block which will be erased
3145 * Standard erase command for NAND chips. Returns NAND status.
3147 static int single_erase(struct mtd_info *mtd, int page)
3149 struct nand_chip *chip = mtd_to_nand(mtd);
3150 /* Send commands to erase a block */
3151 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
3152 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
3154 return chip->waitfunc(mtd, chip);
3158 * nand_erase - [MTD Interface] erase block(s)
3159 * @mtd: MTD device structure
3160 * @instr: erase instruction
3162 * Erase one ore more blocks.
3164 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3166 return nand_erase_nand(mtd, instr, 0);
3170 * nand_erase_nand - [INTERN] erase block(s)
3171 * @mtd: MTD device structure
3172 * @instr: erase instruction
3173 * @allowbbt: allow erasing the bbt area
3175 * Erase one ore more blocks.
3177 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3180 int page, status, pages_per_block, ret, chipnr;
3181 struct nand_chip *chip = mtd_to_nand(mtd);
3184 pr_debug("%s: start = 0x%012llx, len = %llu\n",
3185 __func__, (unsigned long long)instr->addr,
3186 (unsigned long long)instr->len);
3188 if (check_offs_len(mtd, instr->addr, instr->len))
3191 /* Grab the lock and see if the device is available */
3192 nand_get_device(mtd, FL_ERASING);
3194 /* Shift to get first page */
3195 page = (int)(instr->addr >> chip->page_shift);
3196 chipnr = (int)(instr->addr >> chip->chip_shift);
3198 /* Calculate pages in each block */
3199 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3201 /* Select the NAND device */
3202 chip->select_chip(mtd, chipnr);
3204 /* Check, if it is write protected */
3205 if (nand_check_wp(mtd)) {
3206 pr_debug("%s: device is write protected!\n",
3208 instr->state = MTD_ERASE_FAILED;
3212 /* Loop through the pages */
3215 instr->state = MTD_ERASING;
3218 /* Check if we have a bad block, we do not erase bad blocks! */
3219 if (nand_block_checkbad(mtd, ((loff_t) page) <<
3220 chip->page_shift, allowbbt)) {
3221 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3223 instr->state = MTD_ERASE_FAILED;
3228 * Invalidate the page cache, if we erase the block which
3229 * contains the current cached page.
3231 if (page <= chip->pagebuf && chip->pagebuf <
3232 (page + pages_per_block))
3235 status = chip->erase(mtd, page & chip->pagemask);
3238 * See if operation failed and additional status checks are
3241 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
3242 status = chip->errstat(mtd, chip, FL_ERASING,
3245 /* See if block erase succeeded */
3246 if (status & NAND_STATUS_FAIL) {
3247 pr_debug("%s: failed erase, page 0x%08x\n",
3249 instr->state = MTD_ERASE_FAILED;
3251 ((loff_t)page << chip->page_shift);
3255 /* Increment page address and decrement length */
3256 len -= (1ULL << chip->phys_erase_shift);
3257 page += pages_per_block;
3259 /* Check, if we cross a chip boundary */
3260 if (len && !(page & chip->pagemask)) {
3262 chip->select_chip(mtd, -1);
3263 chip->select_chip(mtd, chipnr);
3266 instr->state = MTD_ERASE_DONE;
3270 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3272 /* Deselect and wake up anyone waiting on the device */
3273 chip->select_chip(mtd, -1);
3274 nand_release_device(mtd);
3276 /* Do call back function */
3278 mtd_erase_callback(instr);
3280 /* Return more or less happy */
3285 * nand_sync - [MTD Interface] sync
3286 * @mtd: MTD device structure
3288 * Sync is actually a wait for chip ready function.
3290 static void nand_sync(struct mtd_info *mtd)
3292 pr_debug("%s: called\n", __func__);
3294 /* Grab the lock and see if the device is available */
3295 nand_get_device(mtd, FL_SYNCING);
3296 /* Release it and go back */
3297 nand_release_device(mtd);
3301 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3302 * @mtd: MTD device structure
3303 * @offs: offset relative to mtd start
3305 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3307 struct nand_chip *chip = mtd_to_nand(mtd);
3308 int chipnr = (int)(offs >> chip->chip_shift);
3311 /* Select the NAND device */
3312 nand_get_device(mtd, FL_READING);
3313 chip->select_chip(mtd, chipnr);
3315 ret = nand_block_checkbad(mtd, offs, 0);
3317 chip->select_chip(mtd, -1);
3318 nand_release_device(mtd);
3324 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3325 * @mtd: MTD device structure
3326 * @ofs: offset relative to mtd start
3328 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3332 ret = nand_block_isbad(mtd, ofs);
3334 /* If it was bad already, return success and do nothing */