2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
36 #include <linux/of_platform.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
45 #define MAX_WORD_RETRIES 3
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
64 size_t *retlen, const u_char *buf);
66 static void cfi_amdstd_destroy(struct mtd_info *);
68 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
69 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
71 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
72 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
75 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
76 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
78 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
79 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
80 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
82 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
83 .probe = NULL, /* Not usable directly */
84 .destroy = cfi_amdstd_destroy,
85 .name = "cfi_cmdset_0002",
90 /* #define DEBUG_CFI_FEATURES */
93 #ifdef DEBUG_CFI_FEATURES
94 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
96 const char* erase_suspend[3] = {
97 "Not supported", "Read only", "Read/write"
99 const char* top_bottom[6] = {
100 "No WP", "8x8KiB sectors at top & bottom, no WP",
101 "Bottom boot", "Top boot",
102 "Uniform, Bottom WP", "Uniform, Top WP"
105 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
106 printk(" Address sensitive unlock: %s\n",
107 (extp->SiliconRevision & 1) ? "Not required" : "Required");
109 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
110 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
112 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
114 if (extp->BlkProt == 0)
115 printk(" Block protection: Not supported\n");
117 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
120 printk(" Temporary block unprotect: %s\n",
121 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
122 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
123 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
124 printk(" Burst mode: %s\n",
125 extp->BurstMode ? "Supported" : "Not supported");
126 if (extp->PageMode == 0)
127 printk(" Page mode: Not supported\n");
129 printk(" Page mode: %d word page\n", extp->PageMode << 2);
131 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
132 extp->VppMin >> 4, extp->VppMin & 0xf);
133 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
134 extp->VppMax >> 4, extp->VppMax & 0xf);
136 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
137 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
139 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
143 #ifdef AMD_BOOTLOC_BUG
144 /* Wheee. Bring me the head of someone at AMD. */
145 static void fixup_amd_bootblock(struct mtd_info *mtd)
147 struct map_info *map = mtd->priv;
148 struct cfi_private *cfi = map->fldrv_priv;
149 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
150 __u8 major = extp->MajorVersion;
151 __u8 minor = extp->MinorVersion;
153 if (((major << 8) | minor) < 0x3131) {
154 /* CFI version 1.0 => don't trust bootloc */
156 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
157 map->name, cfi->mfr, cfi->id);
159 /* AFAICS all 29LV400 with a bottom boot block have a device ID
160 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
161 * These were badly detected as they have the 0x80 bit set
162 * so treat them as a special case.
164 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
166 /* Macronix added CFI to their 2nd generation
167 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
168 * Fujitsu, Spansion, EON, ESI and older Macronix)
171 * Therefore also check the manufacturer.
172 * This reduces the risk of false detection due to
173 * the 8-bit device ID.
175 (cfi->mfr == CFI_MFR_MACRONIX)) {
176 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
177 " detected\n", map->name);
178 extp->TopBottom = 2; /* bottom boot */
180 if (cfi->id & 0x80) {
181 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
182 extp->TopBottom = 3; /* top boot */
184 extp->TopBottom = 2; /* bottom boot */
187 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
188 " deduced %s from Device ID\n", map->name, major, minor,
189 extp->TopBottom == 2 ? "bottom" : "top");
194 static void fixup_use_write_buffers(struct mtd_info *mtd)
196 struct map_info *map = mtd->priv;
197 struct cfi_private *cfi = map->fldrv_priv;
198 if (cfi->cfiq->BufWriteTimeoutTyp) {
199 pr_debug("Using buffer write method\n" );
200 mtd->_write = cfi_amdstd_write_buffers;
204 /* Atmel chips don't use the same PRI format as AMD chips */
205 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
207 struct map_info *map = mtd->priv;
208 struct cfi_private *cfi = map->fldrv_priv;
209 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
210 struct cfi_pri_atmel atmel_pri;
212 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
213 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
215 if (atmel_pri.Features & 0x02)
216 extp->EraseSuspend = 2;
218 /* Some chips got it backwards... */
219 if (cfi->id == AT49BV6416) {
220 if (atmel_pri.BottomBoot)
225 if (atmel_pri.BottomBoot)
231 /* burst write mode not supported */
232 cfi->cfiq->BufWriteTimeoutTyp = 0;
233 cfi->cfiq->BufWriteTimeoutMax = 0;
236 static void fixup_use_secsi(struct mtd_info *mtd)
238 /* Setup for chips with a secsi area */
239 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
240 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
243 static void fixup_use_erase_chip(struct mtd_info *mtd)
245 struct map_info *map = mtd->priv;
246 struct cfi_private *cfi = map->fldrv_priv;
247 if ((cfi->cfiq->NumEraseRegions == 1) &&
248 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
249 mtd->_erase = cfi_amdstd_erase_chip;
255 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
258 static void fixup_use_atmel_lock(struct mtd_info *mtd)
260 mtd->_lock = cfi_atmel_lock;
261 mtd->_unlock = cfi_atmel_unlock;
262 mtd->flags |= MTD_POWERUP_LOCK;
265 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
267 struct map_info *map = mtd->priv;
268 struct cfi_private *cfi = map->fldrv_priv;
271 * These flashes report two separate eraseblock regions based on the
272 * sector_erase-size and block_erase-size, although they both operate on the
273 * same memory. This is not allowed according to CFI, so we just pick the
276 cfi->cfiq->NumEraseRegions = 1;
279 static void fixup_sst39vf(struct mtd_info *mtd)
281 struct map_info *map = mtd->priv;
282 struct cfi_private *cfi = map->fldrv_priv;
284 fixup_old_sst_eraseregion(mtd);
286 cfi->addr_unlock1 = 0x5555;
287 cfi->addr_unlock2 = 0x2AAA;
290 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
292 struct map_info *map = mtd->priv;
293 struct cfi_private *cfi = map->fldrv_priv;
295 fixup_old_sst_eraseregion(mtd);
297 cfi->addr_unlock1 = 0x555;
298 cfi->addr_unlock2 = 0x2AA;
300 cfi->sector_erase_cmd = CMD(0x50);
303 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
305 struct map_info *map = mtd->priv;
306 struct cfi_private *cfi = map->fldrv_priv;
308 fixup_sst39vf_rev_b(mtd);
311 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
312 * it should report a size of 8KBytes (0x0020*256).
314 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
315 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
318 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
320 struct map_info *map = mtd->priv;
321 struct cfi_private *cfi = map->fldrv_priv;
323 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
324 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
325 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
329 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
331 struct map_info *map = mtd->priv;
332 struct cfi_private *cfi = map->fldrv_priv;
334 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
335 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
336 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
340 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
342 struct map_info *map = mtd->priv;
343 struct cfi_private *cfi = map->fldrv_priv;
346 * S29NS512P flash uses more than 8bits to report number of sectors,
347 * which is not permitted by CFI.
349 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
350 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
353 /* Used to fix CFI-Tables of chips without Extended Query Tables */
354 static struct cfi_fixup cfi_nopri_fixup_table[] = {
355 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
356 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
357 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
358 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
359 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
360 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
361 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
362 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
366 static struct cfi_fixup cfi_fixup_table[] = {
367 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
368 #ifdef AMD_BOOTLOC_BUG
369 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
370 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
371 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
373 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
374 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
375 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
376 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
377 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
378 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
379 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
380 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
381 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
382 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
383 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
384 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
385 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
386 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
387 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
388 #if !FORCE_WORD_WRITE
389 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
393 static struct cfi_fixup jedec_fixup_table[] = {
394 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
395 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
396 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
400 static struct cfi_fixup fixup_table[] = {
401 /* The CFI vendor ids and the JEDEC vendor IDs appear
402 * to be common. It is like the devices id's are as
403 * well. This table is to pick all cases where
404 * we know that is the case.
406 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
407 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
412 static void cfi_fixup_major_minor(struct cfi_private *cfi,
413 struct cfi_pri_amdstd *extp)
415 if (cfi->mfr == CFI_MFR_SAMSUNG) {
416 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
417 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
419 * Samsung K8P2815UQB and K8D6x16UxM chips
420 * report major=0 / minor=0.
421 * K8D3x16UxC chips report major=3 / minor=3.
423 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
424 " Extended Query version to 1.%c\n",
426 extp->MajorVersion = '1';
431 * SST 38VF640x chips report major=0xFF / minor=0xFF.
433 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
434 extp->MajorVersion = '1';
435 extp->MinorVersion = '0';
439 static int is_m29ew(struct cfi_private *cfi)
441 if (cfi->mfr == CFI_MFR_INTEL &&
442 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
443 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
449 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
450 * Some revisions of the M29EW suffer from erase suspend hang ups. In
451 * particular, it can occur when the sequence
452 * Erase Confirm -> Suspend -> Program -> Resume
453 * causes a lockup due to internal timing issues. The consequence is that the
454 * erase cannot be resumed without inserting a dummy command after programming
455 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
456 * that writes an F0 command code before the RESUME command.
458 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
461 struct cfi_private *cfi = map->fldrv_priv;
462 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
464 map_write(map, CMD(0xF0), adr);
468 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
470 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
471 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
472 * command is issued after an ERASE RESUME operation without waiting for a
473 * minimum delay. The result is that once the ERASE seems to be completed
474 * (no bits are toggling), the contents of the Flash memory block on which
475 * the erase was ongoing could be inconsistent with the expected values
476 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
477 * values), causing a consequent failure of the ERASE operation.
478 * The occurrence of this issue could be high, especially when file system
479 * operations on the Flash are intensive. As a result, it is recommended
480 * that a patch be applied. Intensive file system operations can cause many
481 * calls to the garbage routine to free Flash space (also by erasing physical
482 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
483 * commands can occur. The problem disappears when a delay is inserted after
484 * the RESUME command by using the udelay() function available in Linux.
485 * The DELAY value must be tuned based on the customer's platform.
486 * The maximum value that fixes the problem in all cases is 500us.
487 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
489 * We have chosen 500µs because this latency is acceptable.
491 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
494 * Resolving the Delay After Resume Issue see Micron TN-13-07
495 * Worst case delay must be 500µs but 30-50µs should be ok as well
501 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
503 struct cfi_private *cfi = map->fldrv_priv;
504 struct device_node __maybe_unused *np = map->device_node;
505 struct mtd_info *mtd;
508 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
512 mtd->type = MTD_NORFLASH;
514 /* Fill in the default mtd operations */
515 mtd->_erase = cfi_amdstd_erase_varsize;
516 mtd->_write = cfi_amdstd_write_words;
517 mtd->_read = cfi_amdstd_read;
518 mtd->_sync = cfi_amdstd_sync;
519 mtd->_suspend = cfi_amdstd_suspend;
520 mtd->_resume = cfi_amdstd_resume;
521 mtd->flags = MTD_CAP_NORFLASH;
522 mtd->name = map->name;
524 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
526 pr_debug("MTD %s(): write buffer size %d\n", __func__,
529 mtd->_panic_write = cfi_amdstd_panic_write;
530 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
532 if (cfi->cfi_mode==CFI_MODE_CFI){
533 unsigned char bootloc;
534 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
535 struct cfi_pri_amdstd *extp;
537 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
540 * It's a real CFI chip, not one for which the probe
541 * routine faked a CFI structure.
543 cfi_fixup_major_minor(cfi, extp);
546 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
547 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
548 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
549 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
550 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
552 if (extp->MajorVersion != '1' ||
553 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
554 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
555 "version %c.%c (%#02x/%#02x).\n",
556 extp->MajorVersion, extp->MinorVersion,
557 extp->MajorVersion, extp->MinorVersion);
563 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
564 extp->MajorVersion, extp->MinorVersion);
566 /* Install our own private info structure */
567 cfi->cmdset_priv = extp;
569 /* Apply cfi device specific fixups */
570 cfi_fixup(mtd, cfi_fixup_table);
572 #ifdef DEBUG_CFI_FEATURES
573 /* Tell the user about it in lots of lovely detail */
574 cfi_tell_features(extp);
578 if (np && of_property_read_bool(
579 np, "use-advanced-sector-protection")
580 && extp->BlkProtUnprot == 8) {
581 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
582 mtd->_lock = cfi_ppb_lock;
583 mtd->_unlock = cfi_ppb_unlock;
584 mtd->_is_locked = cfi_ppb_is_locked;
588 bootloc = extp->TopBottom;
589 if ((bootloc < 2) || (bootloc > 5)) {
590 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
591 "bank location (%d). Assuming bottom.\n",
596 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
597 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
599 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
600 int j = (cfi->cfiq->NumEraseRegions-1)-i;
603 swap = cfi->cfiq->EraseRegionInfo[i];
604 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
605 cfi->cfiq->EraseRegionInfo[j] = swap;
608 /* Set the default CFI lock/unlock addresses */
609 cfi->addr_unlock1 = 0x555;
610 cfi->addr_unlock2 = 0x2aa;
612 cfi_fixup(mtd, cfi_nopri_fixup_table);
614 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
620 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
621 /* Apply jedec specific fixups */
622 cfi_fixup(mtd, jedec_fixup_table);
624 /* Apply generic fixups */
625 cfi_fixup(mtd, fixup_table);
627 for (i=0; i< cfi->numchips; i++) {
628 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
629 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
630 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
631 cfi->chips[i].ref_point_counter = 0;
632 init_waitqueue_head(&(cfi->chips[i].wq));
635 map->fldrv = &cfi_amdstd_chipdrv;
637 return cfi_amdstd_setup(mtd);
639 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
640 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
641 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
642 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
643 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
645 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
647 struct map_info *map = mtd->priv;
648 struct cfi_private *cfi = map->fldrv_priv;
649 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
650 unsigned long offset = 0;
653 printk(KERN_NOTICE "number of %s chips: %d\n",
654 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
655 /* Select the correct geometry setup */
656 mtd->size = devsize * cfi->numchips;
658 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
659 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
660 * mtd->numeraseregions, GFP_KERNEL);
661 if (!mtd->eraseregions)
664 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
665 unsigned long ernum, ersize;
666 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
667 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
669 if (mtd->erasesize < ersize) {
670 mtd->erasesize = ersize;
672 for (j=0; j<cfi->numchips; j++) {
673 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
674 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
675 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
677 offset += (ersize * ernum);
679 if (offset != devsize) {
681 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
685 __module_get(THIS_MODULE);
686 register_reboot_notifier(&mtd->reboot_notifier);
690 kfree(mtd->eraseregions);
692 kfree(cfi->cmdset_priv);
698 * Return true if the chip is ready.
700 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
701 * non-suspended sector) and is indicated by no toggle bits toggling.
703 * Note that anything more complicated than checking if no bits are toggling
704 * (including checking DQ5 for an error status) is tricky to get working
705 * correctly and is therefore not done (particularly with interleaved chips
706 * as each chip must be checked independently of the others).
708 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
712 d = map_read(map, addr);
713 t = map_read(map, addr);
715 return map_word_equal(map, d, t);
719 * Return true if the chip is ready and has the correct value.
721 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
722 * non-suspended sector) and it is indicated by no bits toggling.
724 * Error are indicated by toggling bits or bits held with the wrong value,
725 * or with bits toggling.
727 * Note that anything more complicated than checking if no bits are toggling
728 * (including checking DQ5 for an error status) is tricky to get working
729 * correctly and is therefore not done (particularly with interleaved chips
730 * as each chip must be checked independently of the others).
733 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
737 oldd = map_read(map, addr);
738 curd = map_read(map, addr);
740 return map_word_equal(map, oldd, curd) &&
741 map_word_equal(map, curd, expected);
744 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
746 DECLARE_WAITQUEUE(wait, current);
747 struct cfi_private *cfi = map->fldrv_priv;
749 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
752 timeo = jiffies + HZ;
754 switch (chip->state) {
758 if (chip_ready(map, adr))
761 if (time_after(jiffies, timeo)) {
762 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
765 mutex_unlock(&chip->mutex);
767 mutex_lock(&chip->mutex);
768 /* Someone else might have been playing with it. */
778 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
779 !(mode == FL_READY || mode == FL_POINT ||
780 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
783 /* We could check to see if we're trying to access the sector
784 * that is currently being erased. However, no user will try
785 * anything like that so we just wait for the timeout. */
788 /* It's harmless to issue the Erase-Suspend and Erase-Resume
789 * commands when the erase algorithm isn't in progress. */
790 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
791 chip->oldstate = FL_ERASING;
792 chip->state = FL_ERASE_SUSPENDING;
793 chip->erase_suspended = 1;
795 if (chip_ready(map, adr))
798 if (time_after(jiffies, timeo)) {
799 /* Should have suspended the erase by now.
800 * Send an Erase-Resume command as either
801 * there was an error (so leave the erase
802 * routine to recover from it) or we trying to
803 * use the erase-in-progress sector. */
804 put_chip(map, chip, adr);
805 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
809 mutex_unlock(&chip->mutex);
811 mutex_lock(&chip->mutex);
812 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
813 So we can just loop here. */
815 chip->state = FL_READY;
818 case FL_XIP_WHILE_ERASING:
819 if (mode != FL_READY && mode != FL_POINT &&
820 (!cfip || !(cfip->EraseSuspend&2)))
822 chip->oldstate = chip->state;
823 chip->state = FL_READY;
827 /* The machine is rebooting */
831 /* Only if there's no operation suspended... */
832 if (mode == FL_READY && chip->oldstate == FL_READY)
837 set_current_state(TASK_UNINTERRUPTIBLE);
838 add_wait_queue(&chip->wq, &wait);
839 mutex_unlock(&chip->mutex);
841 remove_wait_queue(&chip->wq, &wait);
842 mutex_lock(&chip->mutex);
848 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
850 struct cfi_private *cfi = map->fldrv_priv;
852 switch(chip->oldstate) {
854 cfi_fixup_m29ew_erase_suspend(map,
855 chip->in_progress_block_addr);
856 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
857 cfi_fixup_m29ew_delay_after_resume(cfi);
858 chip->oldstate = FL_READY;
859 chip->state = FL_ERASING;
862 case FL_XIP_WHILE_ERASING:
863 chip->state = chip->oldstate;
864 chip->oldstate = FL_READY;
871 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
876 #ifdef CONFIG_MTD_XIP
879 * No interrupt what so ever can be serviced while the flash isn't in array
880 * mode. This is ensured by the xip_disable() and xip_enable() functions
881 * enclosing any code path where the flash is known not to be in array mode.
882 * And within a XIP disabled code path, only functions marked with __xipram
883 * may be called and nothing else (it's a good thing to inspect generated
884 * assembly to make sure inline functions were actually inlined and that gcc
885 * didn't emit calls to its own support functions). Also configuring MTD CFI
886 * support to a single buswidth and a single interleave is also recommended.
889 static void xip_disable(struct map_info *map, struct flchip *chip,
892 /* TODO: chips with no XIP use should ignore and return */
893 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
897 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
900 struct cfi_private *cfi = map->fldrv_priv;
902 if (chip->state != FL_POINT && chip->state != FL_READY) {
903 map_write(map, CMD(0xf0), adr);
904 chip->state = FL_READY;
906 (void) map_read(map, adr);
912 * When a delay is required for the flash operation to complete, the
913 * xip_udelay() function is polling for both the given timeout and pending
914 * (but still masked) hardware interrupts. Whenever there is an interrupt
915 * pending then the flash erase operation is suspended, array mode restored
916 * and interrupts unmasked. Task scheduling might also happen at that
917 * point. The CPU eventually returns from the interrupt or the call to
918 * schedule() and the suspended flash operation is resumed for the remaining
919 * of the delay period.
921 * Warning: this function _will_ fool interrupt latency tracing tools.
924 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
925 unsigned long adr, int usec)
927 struct cfi_private *cfi = map->fldrv_priv;
928 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
929 map_word status, OK = CMD(0x80);
930 unsigned long suspended, start = xip_currtime();
935 if (xip_irqpending() && extp &&
936 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
937 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
939 * Let's suspend the erase operation when supported.
940 * Note that we currently don't try to suspend
941 * interleaved chips if there is already another
942 * operation suspended (imagine what happens
943 * when one chip was already done with the current
944 * operation while another chip suspended it, then
945 * we resume the whole thing at once). Yes, it
948 map_write(map, CMD(0xb0), adr);
949 usec -= xip_elapsed_since(start);
950 suspended = xip_currtime();
952 if (xip_elapsed_since(suspended) > 100000) {
954 * The chip doesn't want to suspend
955 * after waiting for 100 msecs.
956 * This is a critical error but there
957 * is not much we can do here.
961 status = map_read(map, adr);
962 } while (!map_word_andequal(map, status, OK, OK));
964 /* Suspend succeeded */
965 oldstate = chip->state;
966 if (!map_word_bitsset(map, status, CMD(0x40)))
968 chip->state = FL_XIP_WHILE_ERASING;
969 chip->erase_suspended = 1;
970 map_write(map, CMD(0xf0), adr);
971 (void) map_read(map, adr);
974 mutex_unlock(&chip->mutex);
979 * We're back. However someone else might have
980 * decided to go write to the chip if we are in
981 * a suspended erase state. If so let's wait
984 mutex_lock(&chip->mutex);
985 while (chip->state != FL_XIP_WHILE_ERASING) {
986 DECLARE_WAITQUEUE(wait, current);
987 set_current_state(TASK_UNINTERRUPTIBLE);
988 add_wait_queue(&chip->wq, &wait);
989 mutex_unlock(&chip->mutex);
991 remove_wait_queue(&chip->wq, &wait);
992 mutex_lock(&chip->mutex);
994 /* Disallow XIP again */
997 /* Correct Erase Suspend Hangups for M29EW */
998 cfi_fixup_m29ew_erase_suspend(map, adr);
999 /* Resume the write or erase operation */
1000 map_write(map, cfi->sector_erase_cmd, adr);
1001 chip->state = oldstate;
1002 start = xip_currtime();
1003 } else if (usec >= 1000000/HZ) {
1005 * Try to save on CPU power when waiting delay
1006 * is at least a system timer tick period.
1007 * No need to be extremely accurate here.
1011 status = map_read(map, adr);
1012 } while (!map_word_andequal(map, status, OK, OK)
1013 && xip_elapsed_since(start) < usec);
1016 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1019 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1020 * the flash is actively programming or erasing since we have to poll for
1021 * the operation to complete anyway. We can't do that in a generic way with
1022 * a XIP setup so do it before the actual flash operation in this case
1023 * and stub it out from INVALIDATE_CACHE_UDELAY.
1025 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1026 INVALIDATE_CACHED_RANGE(map, from, size)
1028 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1029 UDELAY(map, chip, adr, usec)
1034 * Activating this XIP support changes the way the code works a bit. For
1035 * example the code to suspend the current process when concurrent access
1036 * happens is never executed because xip_udelay() will always return with the
1037 * same chip state as it was entered with. This is why there is no care for
1038 * the presence of add_wait_queue() or schedule() calls from within a couple
1039 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1040 * The queueing and scheduling are always happening within xip_udelay().
1042 * Similarly, get_chip() and put_chip() just happen to always be executed
1043 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1044 * is in array mode, therefore never executing many cases therein and not
1045 * causing any problem with XIP.
1050 #define xip_disable(map, chip, adr)
1051 #define xip_enable(map, chip, adr)
1052 #define XIP_INVAL_CACHED_RANGE(x...)
1054 #define UDELAY(map, chip, adr, usec) \
1059 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1061 INVALIDATE_CACHED_RANGE(map, adr, len); \
1067 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1069 unsigned long cmd_addr;
1070 struct cfi_private *cfi = map->fldrv_priv;
1075 /* Ensure cmd read/writes are aligned. */
1076 cmd_addr = adr & ~(map_bankwidth(map)-1);
1078 mutex_lock(&chip->mutex);
1079 ret = get_chip(map, chip, cmd_addr, FL_READY);
1081 mutex_unlock(&chip->mutex);
1085 if (chip->state != FL_POINT && chip->state != FL_READY) {
1086 map_write(map, CMD(0xf0), cmd_addr);
1087 chip->state = FL_READY;
1090 map_copy_from(map, buf, adr, len);
1092 put_chip(map, chip, cmd_addr);
1094 mutex_unlock(&chip->mutex);
1099 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1101 struct map_info *map = mtd->priv;
1102 struct cfi_private *cfi = map->fldrv_priv;
1107 /* ofs: offset within the first chip that the first read should start */
1108 chipnum = (from >> cfi->chipshift);
1109 ofs = from - (chipnum << cfi->chipshift);
1112 unsigned long thislen;
1114 if (chipnum >= cfi->numchips)
1117 if ((len + ofs -1) >> cfi->chipshift)
1118 thislen = (1<<cfi->chipshift) - ofs;
1122 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1137 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1139 DECLARE_WAITQUEUE(wait, current);
1140 unsigned long timeo = jiffies + HZ;
1141 struct cfi_private *cfi = map->fldrv_priv;
1144 mutex_lock(&chip->mutex);
1146 if (chip->state != FL_READY){
1147 set_current_state(TASK_UNINTERRUPTIBLE);
1148 add_wait_queue(&chip->wq, &wait);
1150 mutex_unlock(&chip->mutex);
1153 remove_wait_queue(&chip->wq, &wait);
1154 timeo = jiffies + HZ;
1161 chip->state = FL_READY;
1163 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1164 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1165 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1167 map_copy_from(map, buf, adr, len);
1169 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1170 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1171 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1172 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1175 mutex_unlock(&chip->mutex);
1180 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1182 struct map_info *map = mtd->priv;
1183 struct cfi_private *cfi = map->fldrv_priv;
1188 /* ofs: offset within the first chip that the first read should start */
1189 /* 8 secsi bytes per chip */
1194 unsigned long thislen;
1196 if (chipnum >= cfi->numchips)
1199 if ((len + ofs -1) >> 3)
1200 thislen = (1<<3) - ofs;
1204 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1219 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1221 struct cfi_private *cfi = map->fldrv_priv;
1222 unsigned long timeo = jiffies + HZ;
1224 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1225 * have a max write time of a few hundreds usec). However, we should
1226 * use the maximum timeout value given by the chip at probe time
1227 * instead. Unfortunately, struct flchip does have a field for
1228 * maximum timeout, only for typical which can be far too short
1229 * depending of the conditions. The ' + 1' is to avoid having a
1230 * timeout of 0 jiffies if HZ is smaller than 1000.
1232 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1239 mutex_lock(&chip->mutex);
1240 ret = get_chip(map, chip, adr, FL_WRITING);
1242 mutex_unlock(&chip->mutex);
1246 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1247 __func__, adr, datum.x[0] );
1250 * Check for a NOP for the case when the datum to write is already
1251 * present - it saves time and works around buggy chips that corrupt
1252 * data at other locations when 0xff is written to a location that
1253 * already contains 0xff.
1255 oldd = map_read(map, adr);
1256 if (map_word_equal(map, oldd, datum)) {
1257 pr_debug("MTD %s(): NOP\n",
1262 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1264 xip_disable(map, chip, adr);
1266 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1267 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1268 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1269 map_write(map, datum, adr);
1270 chip->state = FL_WRITING;
1272 INVALIDATE_CACHE_UDELAY(map, chip,
1273 adr, map_bankwidth(map),
1274 chip->word_write_time);
1276 /* See comment above for timeout value. */
1277 timeo = jiffies + uWriteTimeout;
1279 if (chip->state != FL_WRITING) {
1280 /* Someone's suspended the write. Sleep */
1281 DECLARE_WAITQUEUE(wait, current);
1283 set_current_state(TASK_UNINTERRUPTIBLE);
1284 add_wait_queue(&chip->wq, &wait);
1285 mutex_unlock(&chip->mutex);
1287 remove_wait_queue(&chip->wq, &wait);
1288 timeo = jiffies + (HZ / 2); /* FIXME */
1289 mutex_lock(&chip->mutex);
1293 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1294 xip_enable(map, chip, adr);
1295 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1296 xip_disable(map, chip, adr);
1300 if (chip_ready(map, adr))
1303 /* Latency issues. Drop the lock, wait a while and retry */
1304 UDELAY(map, chip, adr, 1);
1306 /* Did we succeed? */
1307 if (!chip_good(map, adr, datum)) {
1308 /* reset on all failures. */
1309 map_write( map, CMD(0xF0), chip->start );
1310 /* FIXME - should have reset delay before continuing */
1312 if (++retry_cnt <= MAX_WORD_RETRIES)
1317 xip_enable(map, chip, adr);
1319 chip->state = FL_READY;
1321 put_chip(map, chip, adr);
1322 mutex_unlock(&chip->mutex);
1328 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1329 size_t *retlen, const u_char *buf)
1331 struct map_info *map = mtd->priv;
1332 struct cfi_private *cfi = map->fldrv_priv;
1335 unsigned long ofs, chipstart;
1336 DECLARE_WAITQUEUE(wait, current);
1338 chipnum = to >> cfi->chipshift;
1339 ofs = to - (chipnum << cfi->chipshift);
1340 chipstart = cfi->chips[chipnum].start;
1342 /* If it's not bus-aligned, do the first byte write */
1343 if (ofs & (map_bankwidth(map)-1)) {
1344 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1345 int i = ofs - bus_ofs;
1350 mutex_lock(&cfi->chips[chipnum].mutex);
1352 if (cfi->chips[chipnum].state != FL_READY) {
1353 set_current_state(TASK_UNINTERRUPTIBLE);
1354 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1356 mutex_unlock(&cfi->chips[chipnum].mutex);
1359 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1363 /* Load 'tmp_buf' with old contents of flash */
1364 tmp_buf = map_read(map, bus_ofs+chipstart);
1366 mutex_unlock(&cfi->chips[chipnum].mutex);
1368 /* Number of bytes to copy from buffer */
1369 n = min_t(int, len, map_bankwidth(map)-i);
1371 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1373 ret = do_write_oneword(map, &cfi->chips[chipnum],
1383 if (ofs >> cfi->chipshift) {
1386 if (chipnum == cfi->numchips)
1391 /* We are now aligned, write as much as possible */
1392 while(len >= map_bankwidth(map)) {
1395 datum = map_word_load(map, buf);
1397 ret = do_write_oneword(map, &cfi->chips[chipnum],
1402 ofs += map_bankwidth(map);
1403 buf += map_bankwidth(map);
1404 (*retlen) += map_bankwidth(map);
1405 len -= map_bankwidth(map);
1407 if (ofs >> cfi->chipshift) {
1410 if (chipnum == cfi->numchips)
1412 chipstart = cfi->chips[chipnum].start;
1416 /* Write the trailing bytes if any */
1417 if (len & (map_bankwidth(map)-1)) {
1421 mutex_lock(&cfi->chips[chipnum].mutex);
1423 if (cfi->chips[chipnum].state != FL_READY) {
1424 set_current_state(TASK_UNINTERRUPTIBLE);
1425 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1427 mutex_unlock(&cfi->chips[chipnum].mutex);
1430 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1434 tmp_buf = map_read(map, ofs + chipstart);
1436 mutex_unlock(&cfi->chips[chipnum].mutex);
1438 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1440 ret = do_write_oneword(map, &cfi->chips[chipnum],
1453 * FIXME: interleaved mode not tested, and probably not supported!
1455 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1456 unsigned long adr, const u_char *buf,
1459 struct cfi_private *cfi = map->fldrv_priv;
1460 unsigned long timeo = jiffies + HZ;
1461 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1462 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1464 unsigned long cmd_adr;
1471 mutex_lock(&chip->mutex);
1472 ret = get_chip(map, chip, adr, FL_WRITING);
1474 mutex_unlock(&chip->mutex);
1478 datum = map_word_load(map, buf);
1480 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1481 __func__, adr, datum.x[0] );
1483 XIP_INVAL_CACHED_RANGE(map, adr, len);
1485 xip_disable(map, chip, cmd_adr);
1487 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1488 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1490 /* Write Buffer Load */
1491 map_write(map, CMD(0x25), cmd_adr);
1493 chip->state = FL_WRITING_TO_BUFFER;
1495 /* Write length of data to come */
1496 words = len / map_bankwidth(map);
1497 map_write(map, CMD(words - 1), cmd_adr);
1500 while(z < words * map_bankwidth(map)) {
1501 datum = map_word_load(map, buf);
1502 map_write(map, datum, adr + z);
1504 z += map_bankwidth(map);
1505 buf += map_bankwidth(map);
1507 z -= map_bankwidth(map);
1511 /* Write Buffer Program Confirm: GO GO GO */
1512 map_write(map, CMD(0x29), cmd_adr);
1513 chip->state = FL_WRITING;
1515 INVALIDATE_CACHE_UDELAY(map, chip,
1516 adr, map_bankwidth(map),
1517 chip->word_write_time);
1519 timeo = jiffies + uWriteTimeout;
1522 if (chip->state != FL_WRITING) {
1523 /* Someone's suspended the write. Sleep */
1524 DECLARE_WAITQUEUE(wait, current);
1526 set_current_state(TASK_UNINTERRUPTIBLE);
1527 add_wait_queue(&chip->wq, &wait);
1528 mutex_unlock(&chip->mutex);
1530 remove_wait_queue(&chip->wq, &wait);
1531 timeo = jiffies + (HZ / 2); /* FIXME */
1532 mutex_lock(&chip->mutex);
1536 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1539 if (chip_ready(map, adr)) {
1540 xip_enable(map, chip, adr);
1544 /* Latency issues. Drop the lock, wait a while and retry */
1545 UDELAY(map, chip, adr, 1);
1549 * Recovery from write-buffer programming failures requires
1550 * the write-to-buffer-reset sequence. Since the last part
1551 * of the sequence also works as a normal reset, we can run
1552 * the same commands regardless of why we are here.
1554 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1556 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1557 cfi->device_type, NULL);
1558 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1559 cfi->device_type, NULL);
1560 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1561 cfi->device_type, NULL);
1562 xip_enable(map, chip, adr);
1563 /* FIXME - should have reset delay before continuing */
1565 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
1570 chip->state = FL_READY;
1572 put_chip(map, chip, adr);
1573 mutex_unlock(&chip->mutex);
1579 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1580 size_t *retlen, const u_char *buf)
1582 struct map_info *map = mtd->priv;
1583 struct cfi_private *cfi = map->fldrv_priv;
1584 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1589 chipnum = to >> cfi->chipshift;
1590 ofs = to - (chipnum << cfi->chipshift);
1592 /* If it's not bus-aligned, do the first word write */
1593 if (ofs & (map_bankwidth(map)-1)) {
1594 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1595 if (local_len > len)
1597 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1598 local_len, retlen, buf);
1605 if (ofs >> cfi->chipshift) {
1608 if (chipnum == cfi->numchips)
1613 /* Write buffer is worth it only if more than one word to write... */
1614 while (len >= map_bankwidth(map) * 2) {
1615 /* We must not cross write block boundaries */
1616 int size = wbufsize - (ofs & (wbufsize-1));
1620 if (size % map_bankwidth(map))
1621 size -= size % map_bankwidth(map);
1623 ret = do_write_buffer(map, &cfi->chips[chipnum],
1633 if (ofs >> cfi->chipshift) {
1636 if (chipnum == cfi->numchips)
1642 size_t retlen_dregs = 0;
1644 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1645 len, &retlen_dregs, buf);
1647 *retlen += retlen_dregs;
1655 * Wait for the flash chip to become ready to write data
1657 * This is only called during the panic_write() path. When panic_write()
1658 * is called, the kernel is in the process of a panic, and will soon be
1659 * dead. Therefore we don't take any locks, and attempt to get access
1660 * to the chip as soon as possible.
1662 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1665 struct cfi_private *cfi = map->fldrv_priv;
1670 * If the driver thinks the chip is idle, and no toggle bits
1671 * are changing, then the chip is actually idle for sure.
1673 if (chip->state == FL_READY && chip_ready(map, adr))
1677 * Try several times to reset the chip and then wait for it
1678 * to become idle. The upper limit of a few milliseconds of
1679 * delay isn't a big problem: the kernel is dying anyway. It
1680 * is more important to save the messages.
1682 while (retries > 0) {
1683 const unsigned long timeo = (HZ / 1000) + 1;
1685 /* send the reset command */
1686 map_write(map, CMD(0xF0), chip->start);
1688 /* wait for the chip to become ready */
1689 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1690 if (chip_ready(map, adr))
1697 /* the chip never became ready */
1702 * Write out one word of data to a single flash chip during a kernel panic
1704 * This is only called during the panic_write() path. When panic_write()
1705 * is called, the kernel is in the process of a panic, and will soon be
1706 * dead. Therefore we don't take any locks, and attempt to get access
1707 * to the chip as soon as possible.
1709 * The implementation of this routine is intentionally similar to
1710 * do_write_oneword(), in order to ease code maintenance.
1712 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1713 unsigned long adr, map_word datum)
1715 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1716 struct cfi_private *cfi = map->fldrv_priv;
1724 ret = cfi_amdstd_panic_wait(map, chip, adr);
1728 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1729 __func__, adr, datum.x[0]);
1732 * Check for a NOP for the case when the datum to write is already
1733 * present - it saves time and works around buggy chips that corrupt
1734 * data at other locations when 0xff is written to a location that
1735 * already contains 0xff.
1737 oldd = map_read(map, adr);
1738 if (map_word_equal(map, oldd, datum)) {
1739 pr_debug("MTD %s(): NOP\n", __func__);
1746 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1747 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1748 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1749 map_write(map, datum, adr);
1751 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1752 if (chip_ready(map, adr))
1758 if (!chip_good(map, adr, datum)) {
1759 /* reset on all failures. */
1760 map_write(map, CMD(0xF0), chip->start);
1761 /* FIXME - should have reset delay before continuing */
1763 if (++retry_cnt <= MAX_WORD_RETRIES)
1775 * Write out some data during a kernel panic
1777 * This is used by the mtdoops driver to save the dying messages from a
1778 * kernel which has panic'd.
1780 * This routine ignores all of the locking used throughout the rest of the
1781 * driver, in order to ensure that the data gets written out no matter what
1782 * state this driver (and the flash chip itself) was in when the kernel crashed.
1784 * The implementation of this routine is intentionally similar to
1785 * cfi_amdstd_write_words(), in order to ease code maintenance.
1787 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1788 size_t *retlen, const u_char *buf)
1790 struct map_info *map = mtd->priv;
1791 struct cfi_private *cfi = map->fldrv_priv;
1792 unsigned long ofs, chipstart;
1796 chipnum = to >> cfi->chipshift;
1797 ofs = to - (chipnum << cfi->chipshift);
1798 chipstart = cfi->chips[chipnum].start;
1800 /* If it's not bus aligned, do the first byte write */
1801 if (ofs & (map_bankwidth(map) - 1)) {
1802 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1803 int i = ofs - bus_ofs;
1807 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1811 /* Load 'tmp_buf' with old contents of flash */
1812 tmp_buf = map_read(map, bus_ofs + chipstart);
1814 /* Number of bytes to copy from buffer */
1815 n = min_t(int, len, map_bankwidth(map) - i);
1817 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1819 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1829 if (ofs >> cfi->chipshift) {
1832 if (chipnum == cfi->numchips)
1837 /* We are now aligned, write as much as possible */
1838 while (len >= map_bankwidth(map)) {
1841 datum = map_word_load(map, buf);
1843 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1848 ofs += map_bankwidth(map);
1849 buf += map_bankwidth(map);
1850 (*retlen) += map_bankwidth(map);
1851 len -= map_bankwidth(map);
1853 if (ofs >> cfi->chipshift) {
1856 if (chipnum == cfi->numchips)
1859 chipstart = cfi->chips[chipnum].start;
1863 /* Write the trailing bytes if any */
1864 if (len & (map_bankwidth(map) - 1)) {
1867 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1871 tmp_buf = map_read(map, ofs + chipstart);
1873 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1875 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1888 * Handle devices with one erase region, that only implement
1889 * the chip erase command.
1891 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1893 struct cfi_private *cfi = map->fldrv_priv;
1894 unsigned long timeo = jiffies + HZ;
1895 unsigned long int adr;
1896 DECLARE_WAITQUEUE(wait, current);
1899 adr = cfi->addr_unlock1;
1901 mutex_lock(&chip->mutex);
1902 ret = get_chip(map, chip, adr, FL_WRITING);
1904 mutex_unlock(&chip->mutex);
1908 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1909 __func__, chip->start );
1911 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1913 xip_disable(map, chip, adr);
1915 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1916 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1917 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1918 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1919 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1920 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1922 chip->state = FL_ERASING;
1923 chip->erase_suspended = 0;
1924 chip->in_progress_block_addr = adr;
1926 INVALIDATE_CACHE_UDELAY(map, chip,
1928 chip->erase_time*500);
1930 timeo = jiffies + (HZ*20);
1933 if (chip->state != FL_ERASING) {
1934 /* Someone's suspended the erase. Sleep */
1935 set_current_state(TASK_UNINTERRUPTIBLE);
1936 add_wait_queue(&chip->wq, &wait);
1937 mutex_unlock(&chip->mutex);
1939 remove_wait_queue(&chip->wq, &wait);
1940 mutex_lock(&chip->mutex);
1943 if (chip->erase_suspended) {
1944 /* This erase was suspended and resumed.
1945 Adjust the timeout */
1946 timeo = jiffies + (HZ*20); /* FIXME */
1947 chip->erase_suspended = 0;
1950 if (chip_ready(map, adr))
1953 if (time_after(jiffies, timeo)) {
1954 printk(KERN_WARNING "MTD %s(): software timeout\n",
1959 /* Latency issues. Drop the lock, wait a while and retry */
1960 UDELAY(map, chip, adr, 1000000/HZ);
1962 /* Did we succeed? */
1963 if (!chip_good(map, adr, map_word_ff(map))) {
1964 /* reset on all failures. */
1965 map_write( map, CMD(0xF0), chip->start );
1966 /* FIXME - should have reset delay before continuing */
1971 chip->state = FL_READY;
1972 xip_enable(map, chip, adr);
1974 put_chip(map, chip, adr);
1975 mutex_unlock(&chip->mutex);
1981 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1983 struct cfi_private *cfi = map->fldrv_priv;
1984 unsigned long timeo = jiffies + HZ;
1985 DECLARE_WAITQUEUE(wait, current);
1990 mutex_lock(&chip->mutex);
1991 ret = get_chip(map, chip, adr, FL_ERASING);
1993 mutex_unlock(&chip->mutex);
1997 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2000 XIP_INVAL_CACHED_RANGE(map, adr, len);
2002 xip_disable(map, chip, adr);
2004 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2005 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2006 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2007 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2008 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2009 map_write(map, cfi->sector_erase_cmd, adr);
2011 chip->state = FL_ERASING;
2012 chip->erase_suspended = 0;
2013 chip->in_progress_block_addr = adr;
2015 INVALIDATE_CACHE_UDELAY(map, chip,
2017 chip->erase_time*500);
2019 timeo = jiffies + (HZ*20);
2022 if (chip->state != FL_ERASING) {
2023 /* Someone's suspended the erase. Sleep */
2024 set_current_state(TASK_UNINTERRUPTIBLE);
2025 add_wait_queue(&chip->wq, &wait);
2026 mutex_unlock(&chip->mutex);
2028 remove_wait_queue(&chip->wq, &wait);
2029 mutex_lock(&chip->mutex);
2032 if (chip->erase_suspended) {
2033 /* This erase was suspended and resumed.
2034 Adjust the timeout */
2035 timeo = jiffies + (HZ*20); /* FIXME */
2036 chip->erase_suspended = 0;
2039 if (chip_ready(map, adr)) {
2040 xip_enable(map, chip, adr);
2044 if (time_after(jiffies, timeo)) {
2045 xip_enable(map, chip, adr);
2046 printk(KERN_WARNING "MTD %s(): software timeout\n",
2051 /* Latency issues. Drop the lock, wait a while and retry */
2052 UDELAY(map, chip, adr, 1000000/HZ);
2054 /* Did we succeed? */
2055 if (!chip_good(map, adr, map_word_ff(map))) {
2056 /* reset on all failures. */
2057 map_write( map, CMD(0xF0), chip->start );
2058 /* FIXME - should have reset delay before continuing */
2063 chip->state = FL_READY;
2065 put_chip(map, chip, adr);
2066 mutex_unlock(&chip->mutex);
2071 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2073 unsigned long ofs, len;
2079 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2083 instr->state = MTD_ERASE_DONE;
2084 mtd_erase_callback(instr);
2090 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2092 struct map_info *map = mtd->priv;
2093 struct cfi_private *cfi = map->fldrv_priv;
2096 if (instr->addr != 0)
2099 if (instr->len != mtd->size)
2102 ret = do_erase_chip(map, &cfi->chips[0]);
2106 instr->state = MTD_ERASE_DONE;
2107 mtd_erase_callback(instr);
2112 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2113 unsigned long adr, int len, void *thunk)
2115 struct cfi_private *cfi = map->fldrv_priv;
2118 mutex_lock(&chip->mutex);
2119 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2122 chip->state = FL_LOCKING;
2124 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2126 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2127 cfi->device_type, NULL);
2128 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2129 cfi->device_type, NULL);
2130 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2131 cfi->device_type, NULL);
2132 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2133 cfi->device_type, NULL);
2134 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2135 cfi->device_type, NULL);
2136 map_write(map, CMD(0x40), chip->start + adr);
2138 chip->state = FL_READY;
2139 put_chip(map, chip, adr + chip->start);
2143 mutex_unlock(&chip->mutex);
2147 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2148 unsigned long adr, int len, void *thunk)
2150 struct cfi_private *cfi = map->fldrv_priv;
2153 mutex_lock(&chip->mutex);
2154 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2157 chip->state = FL_UNLOCKING;
2159 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2161 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2162 cfi->device_type, NULL);
2163 map_write(map, CMD(0x70), adr);
2165 chip->state = FL_READY;
2166 put_chip(map, chip, adr + chip->start);
2170 mutex_unlock(&chip->mutex);
2174 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2176 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2179 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2181 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2185 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2189 struct flchip *chip;
2194 #define MAX_SECTORS 512
2196 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2197 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2198 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2200 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2201 struct flchip *chip,
2202 unsigned long adr, int len, void *thunk)
2204 struct cfi_private *cfi = map->fldrv_priv;
2205 unsigned long timeo;
2208 mutex_lock(&chip->mutex);
2209 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2211 mutex_unlock(&chip->mutex);
2215 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2217 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2218 cfi->device_type, NULL);
2219 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2220 cfi->device_type, NULL);
2221 /* PPB entry command */
2222 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2223 cfi->device_type, NULL);
2225 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2226 chip->state = FL_LOCKING;
2227 map_write(map, CMD(0xA0), chip->start + adr);
2228 map_write(map, CMD(0x00), chip->start + adr);
2229 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2231 * Unlocking of one specific sector is not supported, so we
2232 * have to unlock all sectors of this device instead
2234 chip->state = FL_UNLOCKING;
2235 map_write(map, CMD(0x80), chip->start);
2236 map_write(map, CMD(0x30), chip->start);
2237 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2238 chip->state = FL_JEDEC_QUERY;
2239 /* Return locked status: 0->locked, 1->unlocked */
2240 ret = !cfi_read_query(map, adr);
2245 * Wait for some time as unlocking of all sectors takes quite long
2247 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2249 if (chip_ready(map, adr))
2252 if (time_after(jiffies, timeo)) {
2253 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2258 UDELAY(map, chip, adr, 1);
2261 /* Exit BC commands */
2262 map_write(map, CMD(0x90), chip->start);
2263 map_write(map, CMD(0x00), chip->start);
2265 chip->state = FL_READY;
2266 put_chip(map, chip, adr + chip->start);
2267 mutex_unlock(&chip->mutex);
2272 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2275 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2276 DO_XXLOCK_ONEBLOCK_LOCK);
2279 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2282 struct mtd_erase_region_info *regions = mtd->eraseregions;
2283 struct map_info *map = mtd->priv;
2284 struct cfi_private *cfi = map->fldrv_priv;
2285 struct ppb_lock *sect;
2295 * PPB unlocking always unlocks all sectors of the flash chip.
2296 * We need to re-lock all previously locked sectors. So lets
2297 * first check the locking status of all sectors and save
2298 * it for future use.
2300 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2305 * This code to walk all sectors is a slightly modified version
2306 * of the cfi_varsize_frob() code.
2316 int size = regions[i].erasesize;
2319 * Only test sectors that shall not be unlocked. The other
2320 * sectors shall be unlocked, so lets keep their locking
2321 * status at "unlocked" (locked=0) for the final re-locking.
2323 if ((adr < ofs) || (adr >= (ofs + len))) {
2324 sect[sectors].chip = &cfi->chips[chipnum];
2325 sect[sectors].offset = offset;
2326 sect[sectors].locked = do_ppb_xxlock(
2327 map, &cfi->chips[chipnum], adr, 0,
2328 DO_XXLOCK_ONEBLOCK_GETLOCK);
2335 if (offset == regions[i].offset + size * regions[i].numblocks)
2338 if (adr >> cfi->chipshift) {
2342 if (chipnum >= cfi->numchips)
2347 if (sectors >= MAX_SECTORS) {
2348 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2355 /* Now unlock the whole chip */
2356 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2357 DO_XXLOCK_ONEBLOCK_UNLOCK);
2364 * PPB unlocking always unlocks all sectors of the flash chip.
2365 * We need to re-lock all previously locked sectors.
2367 for (i = 0; i < sectors; i++) {
2369 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2370 DO_XXLOCK_ONEBLOCK_LOCK);
2377 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2380 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2381 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2384 static void cfi_amdstd_sync (struct mtd_info *mtd)
2386 struct map_info *map = mtd->priv;
2387 struct cfi_private *cfi = map->fldrv_priv;
2389 struct flchip *chip;
2391 DECLARE_WAITQUEUE(wait, current);
2393 for (i=0; !ret && i<cfi->numchips; i++) {
2394 chip = &cfi->chips[i];
2397 mutex_lock(&chip->mutex);
2399 switch(chip->state) {
2403 case FL_JEDEC_QUERY:
2404 chip->oldstate = chip->state;
2405 chip->state = FL_SYNCING;
2406 /* No need to wake_up() on this state change -
2407 * as the whole point is that nobody can do anything
2408 * with the chip now anyway.
2411 mutex_unlock(&chip->mutex);
2415 /* Not an idle state */
2416 set_current_state(TASK_UNINTERRUPTIBLE);
2417 add_wait_queue(&chip->wq, &wait);
2419 mutex_unlock(&chip->mutex);
2423 remove_wait_queue(&chip->wq, &wait);
2429 /* Unlock the chips again */
2431 for (i--; i >=0; i--) {
2432 chip = &cfi->chips[i];
2434 mutex_lock(&chip->mutex);
2436 if (chip->state == FL_SYNCING) {
2437 chip->state = chip->oldstate;
2440 mutex_unlock(&chip->mutex);
2445 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2447 struct map_info *map = mtd->priv;
2448 struct cfi_private *cfi = map->fldrv_priv;
2450 struct flchip *chip;
2453 for (i=0; !ret && i<cfi->numchips; i++) {
2454 chip = &cfi->chips[i];
2456 mutex_lock(&chip->mutex);
2458 switch(chip->state) {
2462 case FL_JEDEC_QUERY:
2463 chip->oldstate = chip->state;
2464 chip->state = FL_PM_SUSPENDED;
2465 /* No need to wake_up() on this state change -
2466 * as the whole point is that nobody can do anything
2467 * with the chip now anyway.
2469 case FL_PM_SUSPENDED:
2476 mutex_unlock(&chip->mutex);
2479 /* Unlock the chips again */
2482 for (i--; i >=0; i--) {
2483 chip = &cfi->chips[i];
2485 mutex_lock(&chip->mutex);
2487 if (chip->state == FL_PM_SUSPENDED) {
2488 chip->state = chip->oldstate;
2491 mutex_unlock(&chip->mutex);
2499 static void cfi_amdstd_resume(struct mtd_info *mtd)
2501 struct map_info *map = mtd->priv;
2502 struct cfi_private *cfi = map->fldrv_priv;
2504 struct flchip *chip;
2506 for (i=0; i<cfi->numchips; i++) {
2508 chip = &cfi->chips[i];
2510 mutex_lock(&chip->mutex);
2512 if (chip->state == FL_PM_SUSPENDED) {
2513 chip->state = FL_READY;
2514 map_write(map, CMD(0xF0), chip->start);
2518 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2520 mutex_unlock(&chip->mutex);
2526 * Ensure that the flash device is put back into read array mode before
2527 * unloading the driver or rebooting. On some systems, rebooting while
2528 * the flash is in query/program/erase mode will prevent the CPU from
2529 * fetching the bootloader code, requiring a hard reset or power cycle.
2531 static int cfi_amdstd_reset(struct mtd_info *mtd)
2533 struct map_info *map = mtd->priv;
2534 struct cfi_private *cfi = map->fldrv_priv;
2536 struct flchip *chip;
2538 for (i = 0; i < cfi->numchips; i++) {
2540 chip = &cfi->chips[i];
2542 mutex_lock(&chip->mutex);
2544 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2546 map_write(map, CMD(0xF0), chip->start);
2547 chip->state = FL_SHUTDOWN;
2548 put_chip(map, chip, chip->start);
2551 mutex_unlock(&chip->mutex);
2558 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2561 struct mtd_info *mtd;
2563 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2564 cfi_amdstd_reset(mtd);
2569 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2571 struct map_info *map = mtd->priv;
2572 struct cfi_private *cfi = map->fldrv_priv;
2574 cfi_amdstd_reset(mtd);
2575 unregister_reboot_notifier(&mtd->reboot_notifier);
2576 kfree(cfi->cmdset_priv);
2579 kfree(mtd->eraseregions);
2582 MODULE_LICENSE("GPL");
2583 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2584 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2585 MODULE_ALIAS("cfi_cmdset_0006");
2586 MODULE_ALIAS("cfi_cmdset_0701");