]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6
[karo-tx-linux.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.185 2005/11/07 11:14:22 gleixner Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65                                             struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67                                             struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81                      size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83                         size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96         .probe          = NULL, /* Not usable directly */
97         .destroy        = cfi_intelext_destroy,
98         .name           = "cfi_cmdset_0001",
99         .module         = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108         int i;
109         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122         for (i=11; i<32; i++) {
123                 if (extp->FeatureSupport & (1<<i))
124                         printk("     - Unknown Bit %X:      supported\n", i);
125         }
126
127         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129         for (i=1; i<8; i++) {
130                 if (extp->SuspendCmdSupport & (1<<i))
131                         printk("     - Unknown Bit %X:               supported\n", i);
132         }
133
134         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137         for (i=2; i<3; i++) {
138                 if (extp->BlkStatusRegMask & (1<<i))
139                         printk("     - Unknown Bit %X Active: yes\n",i);
140         }
141         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143         for (i=6; i<16; i++) {
144                 if (extp->BlkStatusRegMask & (1<<i))
145                         printk("     - Unknown Bit %X Active: yes\n",i);
146         }
147
148         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150         if (extp->VppOptimal)
151                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165                             "erase on write disabled.\n");
166         extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177         if (cfip && (cfip->FeatureSupport&4)) {
178                 cfip->FeatureSupport &= ~4;
179                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180         }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188
189         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
190         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195         struct map_info *map = mtd->priv;
196         struct cfi_private *cfi = map->fldrv_priv;
197
198         /* Note this is done after the region info is endian swapped */
199         cfi->cfiq->EraseRegionInfo[1] =
200                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         if (!mtd->point && map_is_linear(map)) {
207                 mtd->point   = cfi_intelext_point;
208                 mtd->unpoint = cfi_intelext_unpoint;
209         }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         if (cfi->cfiq->BufWriteTimeoutTyp) {
217                 printk(KERN_INFO "Using buffer write method\n" );
218                 mtd->write = cfi_intelext_write_buffers;
219                 mtd->writev = cfi_intelext_writev;
220         }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242         { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245         /* The CFI vendor ids and the JEDEC vendor IDs appear
246          * to be common.  It is like the devices id's are as
247          * well.  This table is to pick all cases where
248          * we know that is the case.
249          */
250         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251         { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257         struct cfi_pri_intelext *extp;
258         unsigned int extp_size = sizeof(*extp);
259
260  again:
261         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262         if (!extp)
263                 return NULL;
264
265         if (extp->MajorVersion != '1' ||
266             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268                        "version %c.%c.\n",  extp->MajorVersion,
269                        extp->MinorVersion);
270                 kfree(extp);
271                 return NULL;
272         }
273
274         /* Do some byteswapping if necessary */
275         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280                 unsigned int extra_size = 0;
281                 int nb_parts, i;
282
283                 /* Protection Register info */
284                 extra_size += (extp->NumProtectionFields - 1) *
285                               sizeof(struct cfi_intelext_otpinfo);
286
287                 /* Burst Read info */
288                 extra_size += 2;
289                 if (extp_size < sizeof(*extp) + extra_size)
290                         goto need_more;
291                 extra_size += extp->extra[extra_size-1];
292
293                 /* Number of hardware-partitions */
294                 extra_size += 1;
295                 if (extp_size < sizeof(*extp) + extra_size)
296                         goto need_more;
297                 nb_parts = extp->extra[extra_size - 1];
298
299                 /* skip the sizeof(partregion) field in CFI 1.4 */
300                 if (extp->MinorVersion >= '4')
301                         extra_size += 2;
302
303                 for (i = 0; i < nb_parts; i++) {
304                         struct cfi_intelext_regioninfo *rinfo;
305                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306                         extra_size += sizeof(*rinfo);
307                         if (extp_size < sizeof(*extp) + extra_size)
308                                 goto need_more;
309                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310                         extra_size += (rinfo->NumBlockTypes - 1)
311                                       * sizeof(struct cfi_intelext_blockinfo);
312                 }
313
314                 if (extp->MinorVersion >= '4')
315                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317                 if (extp_size < sizeof(*extp) + extra_size) {
318                         need_more:
319                         extp_size = sizeof(*extp) + extra_size;
320                         kfree(extp);
321                         if (extp_size > 4096) {
322                                 printk(KERN_ERR
323                                         "%s: cfi_pri_intelext is too fat\n",
324                                         __FUNCTION__);
325                                 return NULL;
326                         }
327                         goto again;
328                 }
329         }
330
331         return extp;
332 }
333
334 /* This routine is made available to other mtd code via
335  * inter_module_register.  It must only be accessed through
336  * inter_module_get which will bump the use count of this module.  The
337  * addresses passed back in cfi are valid as long as the use count of
338  * this module is non-zero, i.e. between inter_module_get and
339  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
340  */
341 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
342 {
343         struct cfi_private *cfi = map->fldrv_priv;
344         struct mtd_info *mtd;
345         int i;
346
347         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
348         if (!mtd) {
349                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
350                 return NULL;
351         }
352         memset(mtd, 0, sizeof(*mtd));
353         mtd->priv = map;
354         mtd->type = MTD_NORFLASH;
355
356         /* Fill in the default mtd operations */
357         mtd->erase   = cfi_intelext_erase_varsize;
358         mtd->read    = cfi_intelext_read;
359         mtd->write   = cfi_intelext_write_words;
360         mtd->sync    = cfi_intelext_sync;
361         mtd->lock    = cfi_intelext_lock;
362         mtd->unlock  = cfi_intelext_unlock;
363         mtd->suspend = cfi_intelext_suspend;
364         mtd->resume  = cfi_intelext_resume;
365         mtd->flags   = MTD_CAP_NORFLASH;
366         mtd->name    = map->name;
367
368         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
369
370         if (cfi->cfi_mode == CFI_MODE_CFI) {
371                 /*
372                  * It's a real CFI chip, not one for which the probe
373                  * routine faked a CFI structure. So we read the feature
374                  * table from it.
375                  */
376                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377                 struct cfi_pri_intelext *extp;
378
379                 extp = read_pri_intelext(map, adr);
380                 if (!extp) {
381                         kfree(mtd);
382                         return NULL;
383                 }
384
385                 /* Install our own private info structure */
386                 cfi->cmdset_priv = extp;
387
388                 cfi_fixup(mtd, cfi_fixup_table);
389
390 #ifdef DEBUG_CFI_FEATURES
391                 /* Tell the user about it in lots of lovely detail */
392                 cfi_tell_features(extp);
393 #endif
394
395                 if(extp->SuspendCmdSupport & 1) {
396                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
397                 }
398         }
399         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
400                 /* Apply jedec specific fixups */
401                 cfi_fixup(mtd, jedec_fixup_table);
402         }
403         /* Apply generic fixups */
404         cfi_fixup(mtd, fixup_table);
405
406         for (i=0; i< cfi->numchips; i++) {
407                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
410                 cfi->chips[i].ref_point_counter = 0;
411         }
412
413         map->fldrv = &cfi_intelext_chipdrv;
414
415         return cfi_intelext_setup(mtd);
416 }
417
418 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
419 {
420         struct map_info *map = mtd->priv;
421         struct cfi_private *cfi = map->fldrv_priv;
422         unsigned long offset = 0;
423         int i,j;
424         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
425
426         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
427
428         mtd->size = devsize * cfi->numchips;
429
430         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432                         * mtd->numeraseregions, GFP_KERNEL);
433         if (!mtd->eraseregions) {
434                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435                 goto setup_err;
436         }
437
438         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439                 unsigned long ernum, ersize;
440                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
442
443                 if (mtd->erasesize < ersize) {
444                         mtd->erasesize = ersize;
445                 }
446                 for (j=0; j<cfi->numchips; j++) {
447                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
450                 }
451                 offset += (ersize * ernum);
452         }
453
454         if (offset != devsize) {
455                 /* Argh */
456                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457                 goto setup_err;
458         }
459
460         for (i=0; i<mtd->numeraseregions;i++){
461                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
462                        i,mtd->eraseregions[i].offset,
463                        mtd->eraseregions[i].erasesize,
464                        mtd->eraseregions[i].numblocks);
465         }
466
467 #ifdef CONFIG_MTD_OTP
468         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
469         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
474 #endif
475
476         /* This function has the potential to distort the reality
477            a bit and therefore should be called last. */
478         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479                 goto setup_err;
480
481         __module_get(THIS_MODULE);
482         register_reboot_notifier(&mtd->reboot_notifier);
483         return mtd;
484
485  setup_err:
486         if(mtd) {
487                 kfree(mtd->eraseregions);
488                 kfree(mtd);
489         }
490         kfree(cfi->cmdset_priv);
491         return NULL;
492 }
493
494 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
495                                         struct cfi_private **pcfi)
496 {
497         struct map_info *map = mtd->priv;
498         struct cfi_private *cfi = *pcfi;
499         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
500
501         /*
502          * Probing of multi-partition flash ships.
503          *
504          * To support multiple partitions when available, we simply arrange
505          * for each of them to have their own flchip structure even if they
506          * are on the same physical chip.  This means completely recreating
507          * a new cfi_private structure right here which is a blatent code
508          * layering violation, but this is still the least intrusive
509          * arrangement at this point. This can be rearranged in the future
510          * if someone feels motivated enough.  --nico
511          */
512         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
513             && extp->FeatureSupport & (1 << 9)) {
514                 struct cfi_private *newcfi;
515                 struct flchip *chip;
516                 struct flchip_shared *shared;
517                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
518
519                 /* Protection Register info */
520                 offs = (extp->NumProtectionFields - 1) *
521                        sizeof(struct cfi_intelext_otpinfo);
522
523                 /* Burst Read info */
524                 offs += extp->extra[offs+1]+2;
525
526                 /* Number of partition regions */
527                 numregions = extp->extra[offs];
528                 offs += 1;
529
530                 /* skip the sizeof(partregion) field in CFI 1.4 */
531                 if (extp->MinorVersion >= '4')
532                         offs += 2;
533
534                 /* Number of hardware partitions */
535                 numparts = 0;
536                 for (i = 0; i < numregions; i++) {
537                         struct cfi_intelext_regioninfo *rinfo;
538                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
539                         numparts += rinfo->NumIdentPartitions;
540                         offs += sizeof(*rinfo)
541                                 + (rinfo->NumBlockTypes - 1) *
542                                   sizeof(struct cfi_intelext_blockinfo);
543                 }
544
545                 /* Programming Region info */
546                 if (extp->MinorVersion >= '4') {
547                         struct cfi_intelext_programming_regioninfo *prinfo;
548                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549                         MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550                         MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551                         MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552                         mtd->flags |= MTD_PROGRAM_REGIONS;
553                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554                                map->name, MTD_PROGREGION_SIZE(mtd),
555                                MTD_PROGREGION_CTRLMODE_VALID(mtd),
556                                MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557                 }
558
559                 /*
560                  * All functions below currently rely on all chips having
561                  * the same geometry so we'll just assume that all hardware
562                  * partitions are of the same size too.
563                  */
564                 partshift = cfi->chipshift - __ffs(numparts);
565
566                 if ((1 << partshift) < mtd->erasesize) {
567                         printk( KERN_ERR
568                                 "%s: bad number of hw partitions (%d)\n",
569                                 __FUNCTION__, numparts);
570                         return -EINVAL;
571                 }
572
573                 numvirtchips = cfi->numchips * numparts;
574                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
575                 if (!newcfi)
576                         return -ENOMEM;
577                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
578                 if (!shared) {
579                         kfree(newcfi);
580                         return -ENOMEM;
581                 }
582                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
583                 newcfi->numchips = numvirtchips;
584                 newcfi->chipshift = partshift;
585
586                 chip = &newcfi->chips[0];
587                 for (i = 0; i < cfi->numchips; i++) {
588                         shared[i].writing = shared[i].erasing = NULL;
589                         spin_lock_init(&shared[i].lock);
590                         for (j = 0; j < numparts; j++) {
591                                 *chip = cfi->chips[i];
592                                 chip->start += j << partshift;
593                                 chip->priv = &shared[i];
594                                 /* those should be reset too since
595                                    they create memory references. */
596                                 init_waitqueue_head(&chip->wq);
597                                 spin_lock_init(&chip->_spinlock);
598                                 chip->mutex = &chip->_spinlock;
599                                 chip++;
600                         }
601                 }
602
603                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
604                                   "--> %d partitions of %d KiB\n",
605                                   map->name, cfi->numchips, cfi->interleave,
606                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
607
608                 map->fldrv_priv = newcfi;
609                 *pcfi = newcfi;
610                 kfree(cfi);
611         }
612
613         return 0;
614 }
615
616 /*
617  *  *********** CHIP ACCESS FUNCTIONS ***********
618  */
619
620 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
621 {
622         DECLARE_WAITQUEUE(wait, current);
623         struct cfi_private *cfi = map->fldrv_priv;
624         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
625         unsigned long timeo;
626         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
627
628  resettime:
629         timeo = jiffies + HZ;
630  retry:
631         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
632                 /*
633                  * OK. We have possibility for contension on the write/erase
634                  * operations which are global to the real chip and not per
635                  * partition.  So let's fight it over in the partition which
636                  * currently has authority on the operation.
637                  *
638                  * The rules are as follows:
639                  *
640                  * - any write operation must own shared->writing.
641                  *
642                  * - any erase operation must own _both_ shared->writing and
643                  *   shared->erasing.
644                  *
645                  * - contension arbitration is handled in the owner's context.
646                  *
647                  * The 'shared' struct can be read when its lock is taken.
648                  * However any writes to it can only be made when the current
649                  * owner's lock is also held.
650                  */
651                 struct flchip_shared *shared = chip->priv;
652                 struct flchip *contender;
653                 spin_lock(&shared->lock);
654                 contender = shared->writing;
655                 if (contender && contender != chip) {
656                         /*
657                          * The engine to perform desired operation on this
658                          * partition is already in use by someone else.
659                          * Let's fight over it in the context of the chip
660                          * currently using it.  If it is possible to suspend,
661                          * that other partition will do just that, otherwise
662                          * it'll happily send us to sleep.  In any case, when
663                          * get_chip returns success we're clear to go ahead.
664                          */
665                         int ret = spin_trylock(contender->mutex);
666                         spin_unlock(&shared->lock);
667                         if (!ret)
668                                 goto retry;
669                         spin_unlock(chip->mutex);
670                         ret = get_chip(map, contender, contender->start, mode);
671                         spin_lock(chip->mutex);
672                         if (ret) {
673                                 spin_unlock(contender->mutex);
674                                 return ret;
675                         }
676                         timeo = jiffies + HZ;
677                         spin_lock(&shared->lock);
678                 }
679
680                 /* We now own it */
681                 shared->writing = chip;
682                 if (mode == FL_ERASING)
683                         shared->erasing = chip;
684                 if (contender && contender != chip)
685                         spin_unlock(contender->mutex);
686                 spin_unlock(&shared->lock);
687         }
688
689         switch (chip->state) {
690
691         case FL_STATUS:
692                 for (;;) {
693                         status = map_read(map, adr);
694                         if (map_word_andequal(map, status, status_OK, status_OK))
695                                 break;
696
697                         /* At this point we're fine with write operations
698                            in other partitions as they don't conflict. */
699                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
700                                 break;
701
702                         if (time_after(jiffies, timeo)) {
703                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
704                                        map->name, status.x[0]);
705                                 return -EIO;
706                         }
707                         spin_unlock(chip->mutex);
708                         cfi_udelay(1);
709                         spin_lock(chip->mutex);
710                         /* Someone else might have been playing with it. */
711                         goto retry;
712                 }
713
714         case FL_READY:
715         case FL_CFI_QUERY:
716         case FL_JEDEC_QUERY:
717                 return 0;
718
719         case FL_ERASING:
720                 if (!cfip ||
721                     !(cfip->FeatureSupport & 2) ||
722                     !(mode == FL_READY || mode == FL_POINT ||
723                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
724                         goto sleep;
725
726
727                 /* Erase suspend */
728                 map_write(map, CMD(0xB0), adr);
729
730                 /* If the flash has finished erasing, then 'erase suspend'
731                  * appears to make some (28F320) flash devices switch to
732                  * 'read' mode.  Make sure that we switch to 'read status'
733                  * mode so we get the right data. --rmk
734                  */
735                 map_write(map, CMD(0x70), adr);
736                 chip->oldstate = FL_ERASING;
737                 chip->state = FL_ERASE_SUSPENDING;
738                 chip->erase_suspended = 1;
739                 for (;;) {
740                         status = map_read(map, adr);
741                         if (map_word_andequal(map, status, status_OK, status_OK))
742                                 break;
743
744                         if (time_after(jiffies, timeo)) {
745                                 /* Urgh. Resume and pretend we weren't here.  */
746                                 map_write(map, CMD(0xd0), adr);
747                                 /* Make sure we're in 'read status' mode if it had finished */
748                                 map_write(map, CMD(0x70), adr);
749                                 chip->state = FL_ERASING;
750                                 chip->oldstate = FL_READY;
751                                 printk(KERN_ERR "%s: Chip not ready after erase "
752                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
753                                 return -EIO;
754                         }
755
756                         spin_unlock(chip->mutex);
757                         cfi_udelay(1);
758                         spin_lock(chip->mutex);
759                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
760                            So we can just loop here. */
761                 }
762                 chip->state = FL_STATUS;
763                 return 0;
764
765         case FL_XIP_WHILE_ERASING:
766                 if (mode != FL_READY && mode != FL_POINT &&
767                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
768                         goto sleep;
769                 chip->oldstate = chip->state;
770                 chip->state = FL_READY;
771                 return 0;
772
773         case FL_POINT:
774                 /* Only if there's no operation suspended... */
775                 if (mode == FL_READY && chip->oldstate == FL_READY)
776                         return 0;
777
778         default:
779         sleep:
780                 set_current_state(TASK_UNINTERRUPTIBLE);
781                 add_wait_queue(&chip->wq, &wait);
782                 spin_unlock(chip->mutex);
783                 schedule();
784                 remove_wait_queue(&chip->wq, &wait);
785                 spin_lock(chip->mutex);
786                 goto resettime;
787         }
788 }
789
790 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
791 {
792         struct cfi_private *cfi = map->fldrv_priv;
793
794         if (chip->priv) {
795                 struct flchip_shared *shared = chip->priv;
796                 spin_lock(&shared->lock);
797                 if (shared->writing == chip && chip->oldstate == FL_READY) {
798                         /* We own the ability to write, but we're done */
799                         shared->writing = shared->erasing;
800                         if (shared->writing && shared->writing != chip) {
801                                 /* give back ownership to who we loaned it from */
802                                 struct flchip *loaner = shared->writing;
803                                 spin_lock(loaner->mutex);
804                                 spin_unlock(&shared->lock);
805                                 spin_unlock(chip->mutex);
806                                 put_chip(map, loaner, loaner->start);
807                                 spin_lock(chip->mutex);
808                                 spin_unlock(loaner->mutex);
809                                 wake_up(&chip->wq);
810                                 return;
811                         }
812                         shared->erasing = NULL;
813                         shared->writing = NULL;
814                 } else if (shared->erasing == chip && shared->writing != chip) {
815                         /*
816                          * We own the ability to erase without the ability
817                          * to write, which means the erase was suspended
818                          * and some other partition is currently writing.
819                          * Don't let the switch below mess things up since
820                          * we don't have ownership to resume anything.
821                          */
822                         spin_unlock(&shared->lock);
823                         wake_up(&chip->wq);
824                         return;
825                 }
826                 spin_unlock(&shared->lock);
827         }
828
829         switch(chip->oldstate) {
830         case FL_ERASING:
831                 chip->state = chip->oldstate;
832                 /* What if one interleaved chip has finished and the
833                    other hasn't? The old code would leave the finished
834                    one in READY mode. That's bad, and caused -EROFS
835                    errors to be returned from do_erase_oneblock because
836                    that's the only bit it checked for at the time.
837                    As the state machine appears to explicitly allow
838                    sending the 0x70 (Read Status) command to an erasing
839                    chip and expecting it to be ignored, that's what we
840                    do. */
841                 map_write(map, CMD(0xd0), adr);
842                 map_write(map, CMD(0x70), adr);
843                 chip->oldstate = FL_READY;
844                 chip->state = FL_ERASING;
845                 break;
846
847         case FL_XIP_WHILE_ERASING:
848                 chip->state = chip->oldstate;
849                 chip->oldstate = FL_READY;
850                 break;
851
852         case FL_READY:
853         case FL_STATUS:
854         case FL_JEDEC_QUERY:
855                 /* We should really make set_vpp() count, rather than doing this */
856                 DISABLE_VPP(map);
857                 break;
858         default:
859                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
860         }
861         wake_up(&chip->wq);
862 }
863
864 #ifdef CONFIG_MTD_XIP
865
866 /*
867  * No interrupt what so ever can be serviced while the flash isn't in array
868  * mode.  This is ensured by the xip_disable() and xip_enable() functions
869  * enclosing any code path where the flash is known not to be in array mode.
870  * And within a XIP disabled code path, only functions marked with __xipram
871  * may be called and nothing else (it's a good thing to inspect generated
872  * assembly to make sure inline functions were actually inlined and that gcc
873  * didn't emit calls to its own support functions). Also configuring MTD CFI
874  * support to a single buswidth and a single interleave is also recommended.
875  */
876
877 static void xip_disable(struct map_info *map, struct flchip *chip,
878                         unsigned long adr)
879 {
880         /* TODO: chips with no XIP use should ignore and return */
881         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
882         local_irq_disable();
883 }
884
885 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
886                                 unsigned long adr)
887 {
888         struct cfi_private *cfi = map->fldrv_priv;
889         if (chip->state != FL_POINT && chip->state != FL_READY) {
890                 map_write(map, CMD(0xff), adr);
891                 chip->state = FL_READY;
892         }
893         (void) map_read(map, adr);
894         xip_iprefetch();
895         local_irq_enable();
896 }
897
898 /*
899  * When a delay is required for the flash operation to complete, the
900  * xip_udelay() function is polling for both the given timeout and pending
901  * (but still masked) hardware interrupts.  Whenever there is an interrupt
902  * pending then the flash erase or write operation is suspended, array mode
903  * restored and interrupts unmasked.  Task scheduling might also happen at that
904  * point.  The CPU eventually returns from the interrupt or the call to
905  * schedule() and the suspended flash operation is resumed for the remaining
906  * of the delay period.
907  *
908  * Warning: this function _will_ fool interrupt latency tracing tools.
909  */
910
911 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
912                                 unsigned long adr, int usec)
913 {
914         struct cfi_private *cfi = map->fldrv_priv;
915         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
916         map_word status, OK = CMD(0x80);
917         unsigned long suspended, start = xip_currtime();
918         flstate_t oldstate, newstate;
919
920         do {
921                 cpu_relax();
922                 if (xip_irqpending() && cfip &&
923                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
924                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
925                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
926                         /*
927                          * Let's suspend the erase or write operation when
928                          * supported.  Note that we currently don't try to
929                          * suspend interleaved chips if there is already
930                          * another operation suspended (imagine what happens
931                          * when one chip was already done with the current
932                          * operation while another chip suspended it, then
933                          * we resume the whole thing at once).  Yes, it
934                          * can happen!
935                          */
936                         map_write(map, CMD(0xb0), adr);
937                         map_write(map, CMD(0x70), adr);
938                         usec -= xip_elapsed_since(start);
939                         suspended = xip_currtime();
940                         do {
941                                 if (xip_elapsed_since(suspended) > 100000) {
942                                         /*
943                                          * The chip doesn't want to suspend
944                                          * after waiting for 100 msecs.
945                                          * This is a critical error but there
946                                          * is not much we can do here.
947                                          */
948                                         return;
949                                 }
950                                 status = map_read(map, adr);
951                         } while (!map_word_andequal(map, status, OK, OK));
952
953                         /* Suspend succeeded */
954                         oldstate = chip->state;
955                         if (oldstate == FL_ERASING) {
956                                 if (!map_word_bitsset(map, status, CMD(0x40)))
957                                         break;
958                                 newstate = FL_XIP_WHILE_ERASING;
959                                 chip->erase_suspended = 1;
960                         } else {
961                                 if (!map_word_bitsset(map, status, CMD(0x04)))
962                                         break;
963                                 newstate = FL_XIP_WHILE_WRITING;
964                                 chip->write_suspended = 1;
965                         }
966                         chip->state = newstate;
967                         map_write(map, CMD(0xff), adr);
968                         (void) map_read(map, adr);
969                         asm volatile (".rep 8; nop; .endr");
970                         local_irq_enable();
971                         spin_unlock(chip->mutex);
972                         asm volatile (".rep 8; nop; .endr");
973                         cond_resched();
974
975                         /*
976                          * We're back.  However someone else might have
977                          * decided to go write to the chip if we are in
978                          * a suspended erase state.  If so let's wait
979                          * until it's done.
980                          */
981                         spin_lock(chip->mutex);
982                         while (chip->state != newstate) {
983                                 DECLARE_WAITQUEUE(wait, current);
984                                 set_current_state(TASK_UNINTERRUPTIBLE);
985                                 add_wait_queue(&chip->wq, &wait);
986                                 spin_unlock(chip->mutex);
987                                 schedule();
988                                 remove_wait_queue(&chip->wq, &wait);
989                                 spin_lock(chip->mutex);
990                         }
991                         /* Disallow XIP again */
992                         local_irq_disable();
993
994                         /* Resume the write or erase operation */
995                         map_write(map, CMD(0xd0), adr);
996                         map_write(map, CMD(0x70), adr);
997                         chip->state = oldstate;
998                         start = xip_currtime();
999                 } else if (usec >= 1000000/HZ) {
1000                         /*
1001                          * Try to save on CPU power when waiting delay
1002                          * is at least a system timer tick period.
1003                          * No need to be extremely accurate here.
1004                          */
1005                         xip_cpu_idle();
1006                 }
1007                 status = map_read(map, adr);
1008         } while (!map_word_andequal(map, status, OK, OK)
1009                  && xip_elapsed_since(start) < usec);
1010 }
1011
1012 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1013
1014 /*
1015  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1016  * the flash is actively programming or erasing since we have to poll for
1017  * the operation to complete anyway.  We can't do that in a generic way with
1018  * a XIP setup so do it before the actual flash operation in this case
1019  * and stub it out from INVALIDATE_CACHE_UDELAY.
1020  */
1021 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1022         INVALIDATE_CACHED_RANGE(map, from, size)
1023
1024 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1025         UDELAY(map, chip, adr, usec)
1026
1027 /*
1028  * Extra notes:
1029  *
1030  * Activating this XIP support changes the way the code works a bit.  For
1031  * example the code to suspend the current process when concurrent access
1032  * happens is never executed because xip_udelay() will always return with the
1033  * same chip state as it was entered with.  This is why there is no care for
1034  * the presence of add_wait_queue() or schedule() calls from within a couple
1035  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1036  * The queueing and scheduling are always happening within xip_udelay().
1037  *
1038  * Similarly, get_chip() and put_chip() just happen to always be executed
1039  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1040  * is in array mode, therefore never executing many cases therein and not
1041  * causing any problem with XIP.
1042  */
1043
1044 #else
1045
1046 #define xip_disable(map, chip, adr)
1047 #define xip_enable(map, chip, adr)
1048 #define XIP_INVAL_CACHED_RANGE(x...)
1049
1050 #define UDELAY(map, chip, adr, usec)  \
1051 do {  \
1052         spin_unlock(chip->mutex);  \
1053         cfi_udelay(usec);  \
1054         spin_lock(chip->mutex);  \
1055 } while (0)
1056
1057 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1058 do {  \
1059         spin_unlock(chip->mutex);  \
1060         INVALIDATE_CACHED_RANGE(map, adr, len);  \
1061         cfi_udelay(usec);  \
1062         spin_lock(chip->mutex);  \
1063 } while (0)
1064
1065 #endif
1066
1067 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1068 {
1069         unsigned long cmd_addr;
1070         struct cfi_private *cfi = map->fldrv_priv;
1071         int ret = 0;
1072
1073         adr += chip->start;
1074
1075         /* Ensure cmd read/writes are aligned. */
1076         cmd_addr = adr & ~(map_bankwidth(map)-1);
1077
1078         spin_lock(chip->mutex);
1079
1080         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1081
1082         if (!ret) {
1083                 if (chip->state != FL_POINT && chip->state != FL_READY)
1084                         map_write(map, CMD(0xff), cmd_addr);
1085
1086                 chip->state = FL_POINT;
1087                 chip->ref_point_counter++;
1088         }
1089         spin_unlock(chip->mutex);
1090
1091         return ret;
1092 }
1093
1094 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1095 {
1096         struct map_info *map = mtd->priv;
1097         struct cfi_private *cfi = map->fldrv_priv;
1098         unsigned long ofs;
1099         int chipnum;
1100         int ret = 0;
1101
1102         if (!map->virt || (from + len > mtd->size))
1103                 return -EINVAL;
1104
1105         *mtdbuf = (void *)map->virt + from;
1106         *retlen = 0;
1107
1108         /* Now lock the chip(s) to POINT state */
1109
1110         /* ofs: offset within the first chip that the first read should start */
1111         chipnum = (from >> cfi->chipshift);
1112         ofs = from - (chipnum << cfi->chipshift);
1113
1114         while (len) {
1115                 unsigned long thislen;
1116
1117                 if (chipnum >= cfi->numchips)
1118                         break;
1119
1120                 if ((len + ofs -1) >> cfi->chipshift)
1121                         thislen = (1<<cfi->chipshift) - ofs;
1122                 else
1123                         thislen = len;
1124
1125                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1126                 if (ret)
1127                         break;
1128
1129                 *retlen += thislen;
1130                 len -= thislen;
1131
1132                 ofs = 0;
1133                 chipnum++;
1134         }
1135         return 0;
1136 }
1137
1138 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1139 {
1140         struct map_info *map = mtd->priv;
1141         struct cfi_private *cfi = map->fldrv_priv;
1142         unsigned long ofs;
1143         int chipnum;
1144
1145         /* Now unlock the chip(s) POINT state */
1146
1147         /* ofs: offset within the first chip that the first read should start */
1148         chipnum = (from >> cfi->chipshift);
1149         ofs = from - (chipnum <<  cfi->chipshift);
1150
1151         while (len) {
1152                 unsigned long thislen;
1153                 struct flchip *chip;
1154
1155                 chip = &cfi->chips[chipnum];
1156                 if (chipnum >= cfi->numchips)
1157                         break;
1158
1159                 if ((len + ofs -1) >> cfi->chipshift)
1160                         thislen = (1<<cfi->chipshift) - ofs;
1161                 else
1162                         thislen = len;
1163
1164                 spin_lock(chip->mutex);
1165                 if (chip->state == FL_POINT) {
1166                         chip->ref_point_counter--;
1167                         if(chip->ref_point_counter == 0)
1168                                 chip->state = FL_READY;
1169                 } else
1170                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1171
1172                 put_chip(map, chip, chip->start);
1173                 spin_unlock(chip->mutex);
1174
1175                 len -= thislen;
1176                 ofs = 0;
1177                 chipnum++;
1178         }
1179 }
1180
1181 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1182 {
1183         unsigned long cmd_addr;
1184         struct cfi_private *cfi = map->fldrv_priv;
1185         int ret;
1186
1187         adr += chip->start;
1188
1189         /* Ensure cmd read/writes are aligned. */
1190         cmd_addr = adr & ~(map_bankwidth(map)-1);
1191
1192         spin_lock(chip->mutex);
1193         ret = get_chip(map, chip, cmd_addr, FL_READY);
1194         if (ret) {
1195                 spin_unlock(chip->mutex);
1196                 return ret;
1197         }
1198
1199         if (chip->state != FL_POINT && chip->state != FL_READY) {
1200                 map_write(map, CMD(0xff), cmd_addr);
1201
1202                 chip->state = FL_READY;
1203         }
1204
1205         map_copy_from(map, buf, adr, len);
1206
1207         put_chip(map, chip, cmd_addr);
1208
1209         spin_unlock(chip->mutex);
1210         return 0;
1211 }
1212
1213 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1214 {
1215         struct map_info *map = mtd->priv;
1216         struct cfi_private *cfi = map->fldrv_priv;
1217         unsigned long ofs;
1218         int chipnum;
1219         int ret = 0;
1220
1221         /* ofs: offset within the first chip that the first read should start */
1222         chipnum = (from >> cfi->chipshift);
1223         ofs = from - (chipnum <<  cfi->chipshift);
1224
1225         *retlen = 0;
1226
1227         while (len) {
1228                 unsigned long thislen;
1229
1230                 if (chipnum >= cfi->numchips)
1231                         break;
1232
1233                 if ((len + ofs -1) >> cfi->chipshift)
1234                         thislen = (1<<cfi->chipshift) - ofs;
1235                 else
1236                         thislen = len;
1237
1238                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1239                 if (ret)
1240                         break;
1241
1242                 *retlen += thislen;
1243                 len -= thislen;
1244                 buf += thislen;
1245
1246                 ofs = 0;
1247                 chipnum++;
1248         }
1249         return ret;
1250 }
1251
1252 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1253                                      unsigned long adr, map_word datum, int mode)
1254 {
1255         struct cfi_private *cfi = map->fldrv_priv;
1256         map_word status, status_OK, write_cmd;
1257         unsigned long timeo;
1258         int z, ret=0;
1259
1260         adr += chip->start;
1261
1262         /* Let's determine those according to the interleave only once */
1263         status_OK = CMD(0x80);
1264         switch (mode) {
1265         case FL_WRITING:
1266                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1267                 break;
1268         case FL_OTP_WRITE:
1269                 write_cmd = CMD(0xc0);
1270                 break;
1271         default:
1272                 return -EINVAL;
1273         }
1274
1275         spin_lock(chip->mutex);
1276         ret = get_chip(map, chip, adr, mode);
1277         if (ret) {
1278                 spin_unlock(chip->mutex);
1279                 return ret;
1280         }
1281
1282         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1283         ENABLE_VPP(map);
1284         xip_disable(map, chip, adr);
1285         map_write(map, write_cmd, adr);
1286         map_write(map, datum, adr);
1287         chip->state = mode;
1288
1289         INVALIDATE_CACHE_UDELAY(map, chip,
1290                                 adr, map_bankwidth(map),
1291                                 chip->word_write_time);
1292
1293         timeo = jiffies + (HZ/2);
1294         z = 0;
1295         for (;;) {
1296                 if (chip->state != mode) {
1297                         /* Someone's suspended the write. Sleep */
1298                         DECLARE_WAITQUEUE(wait, current);
1299
1300                         set_current_state(TASK_UNINTERRUPTIBLE);
1301                         add_wait_queue(&chip->wq, &wait);
1302                         spin_unlock(chip->mutex);
1303                         schedule();
1304                         remove_wait_queue(&chip->wq, &wait);
1305                         timeo = jiffies + (HZ / 2); /* FIXME */
1306                         spin_lock(chip->mutex);
1307                         continue;
1308                 }
1309
1310                 status = map_read(map, adr);
1311                 if (map_word_andequal(map, status, status_OK, status_OK))
1312                         break;
1313
1314                 /* OK Still waiting */
1315                 if (time_after(jiffies, timeo)) {
1316                         map_write(map, CMD(0x70), adr);
1317                         chip->state = FL_STATUS;
1318                         xip_enable(map, chip, adr);
1319                         printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1320                         ret = -EIO;
1321                         goto out;
1322                 }
1323
1324                 /* Latency issues. Drop the lock, wait a while and retry */
1325                 z++;
1326                 UDELAY(map, chip, adr, 1);
1327         }
1328         if (!z) {
1329                 chip->word_write_time--;
1330                 if (!chip->word_write_time)
1331                         chip->word_write_time = 1;
1332         }
1333         if (z > 1)
1334                 chip->word_write_time++;
1335
1336         /* Done and happy. */
1337         chip->state = FL_STATUS;
1338
1339         /* check for errors */
1340         if (map_word_bitsset(map, status, CMD(0x1a))) {
1341                 unsigned long chipstatus = MERGESTATUS(status);
1342
1343                 /* reset status */
1344                 map_write(map, CMD(0x50), adr);
1345                 map_write(map, CMD(0x70), adr);
1346                 xip_enable(map, chip, adr);
1347
1348                 if (chipstatus & 0x02) {
1349                         ret = -EROFS;
1350                 } else if (chipstatus & 0x08) {
1351                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1352                         ret = -EIO;
1353                 } else {
1354                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1355                         ret = -EINVAL;
1356                 }
1357
1358                 goto out;
1359         }
1360
1361         xip_enable(map, chip, adr);
1362  out:   put_chip(map, chip, adr);
1363         spin_unlock(chip->mutex);
1364         return ret;
1365 }
1366
1367
1368 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1369 {
1370         struct map_info *map = mtd->priv;
1371         struct cfi_private *cfi = map->fldrv_priv;
1372         int ret = 0;
1373         int chipnum;
1374         unsigned long ofs;
1375
1376         *retlen = 0;
1377         if (!len)
1378                 return 0;
1379
1380         chipnum = to >> cfi->chipshift;
1381         ofs = to  - (chipnum << cfi->chipshift);
1382
1383         /* If it's not bus-aligned, do the first byte write */
1384         if (ofs & (map_bankwidth(map)-1)) {
1385                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1386                 int gap = ofs - bus_ofs;
1387                 int n;
1388                 map_word datum;
1389
1390                 n = min_t(int, len, map_bankwidth(map)-gap);
1391                 datum = map_word_ff(map);
1392                 datum = map_word_load_partial(map, datum, buf, gap, n);
1393
1394                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1395                                                bus_ofs, datum, FL_WRITING);
1396                 if (ret)
1397                         return ret;
1398
1399                 len -= n;
1400                 ofs += n;
1401                 buf += n;
1402                 (*retlen) += n;
1403
1404                 if (ofs >> cfi->chipshift) {
1405                         chipnum ++;
1406                         ofs = 0;
1407                         if (chipnum == cfi->numchips)
1408                                 return 0;
1409                 }
1410         }
1411
1412         while(len >= map_bankwidth(map)) {
1413                 map_word datum = map_word_load(map, buf);
1414
1415                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1416                                        ofs, datum, FL_WRITING);
1417                 if (ret)
1418                         return ret;
1419
1420                 ofs += map_bankwidth(map);
1421                 buf += map_bankwidth(map);
1422                 (*retlen) += map_bankwidth(map);
1423                 len -= map_bankwidth(map);
1424
1425                 if (ofs >> cfi->chipshift) {
1426                         chipnum ++;
1427                         ofs = 0;
1428                         if (chipnum == cfi->numchips)
1429                                 return 0;
1430                 }
1431         }
1432
1433         if (len & (map_bankwidth(map)-1)) {
1434                 map_word datum;
1435
1436                 datum = map_word_ff(map);
1437                 datum = map_word_load_partial(map, datum, buf, 0, len);
1438
1439                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1440                                        ofs, datum, FL_WRITING);
1441                 if (ret)
1442                         return ret;
1443
1444                 (*retlen) += len;
1445         }
1446
1447         return 0;
1448 }
1449
1450
1451 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1452                                     unsigned long adr, const struct kvec **pvec,
1453                                     unsigned long *pvec_seek, int len)
1454 {
1455         struct cfi_private *cfi = map->fldrv_priv;
1456         map_word status, status_OK, write_cmd, datum;
1457         unsigned long cmd_adr, timeo;
1458         int wbufsize, z, ret=0, word_gap, words;
1459         const struct kvec *vec;
1460         unsigned long vec_seek;
1461
1462         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1463         adr += chip->start;
1464         cmd_adr = adr & ~(wbufsize-1);
1465
1466         /* Let's determine this according to the interleave only once */
1467         status_OK = CMD(0x80);
1468         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1469
1470         spin_lock(chip->mutex);
1471         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1472         if (ret) {
1473                 spin_unlock(chip->mutex);
1474                 return ret;
1475         }
1476
1477         XIP_INVAL_CACHED_RANGE(map, adr, len);
1478         ENABLE_VPP(map);
1479         xip_disable(map, chip, cmd_adr);
1480
1481         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1482            [...], the device will not accept any more Write to Buffer commands".
1483            So we must check here and reset those bits if they're set. Otherwise
1484            we're just pissing in the wind */
1485         if (chip->state != FL_STATUS)
1486                 map_write(map, CMD(0x70), cmd_adr);
1487         status = map_read(map, cmd_adr);
1488         if (map_word_bitsset(map, status, CMD(0x30))) {
1489                 xip_enable(map, chip, cmd_adr);
1490                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1491                 xip_disable(map, chip, cmd_adr);
1492                 map_write(map, CMD(0x50), cmd_adr);
1493                 map_write(map, CMD(0x70), cmd_adr);
1494         }
1495
1496         chip->state = FL_WRITING_TO_BUFFER;
1497
1498         z = 0;
1499         for (;;) {
1500                 map_write(map, write_cmd, cmd_adr);
1501
1502                 status = map_read(map, cmd_adr);
1503                 if (map_word_andequal(map, status, status_OK, status_OK))
1504                         break;
1505
1506                 UDELAY(map, chip, cmd_adr, 1);
1507
1508                 if (++z > 20) {
1509                         /* Argh. Not ready for write to buffer */
1510                         map_word Xstatus;
1511                         map_write(map, CMD(0x70), cmd_adr);
1512                         chip->state = FL_STATUS;
1513                         Xstatus = map_read(map, cmd_adr);
1514                         /* Odd. Clear status bits */
1515                         map_write(map, CMD(0x50), cmd_adr);
1516                         map_write(map, CMD(0x70), cmd_adr);
1517                         xip_enable(map, chip, cmd_adr);
1518                         printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1519                                map->name, status.x[0], Xstatus.x[0]);
1520                         ret = -EIO;
1521                         goto out;
1522                 }
1523         }
1524
1525         /* Figure out the number of words to write */
1526         word_gap = (-adr & (map_bankwidth(map)-1));
1527         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1528         if (!word_gap) {
1529                 words--;
1530         } else {
1531                 word_gap = map_bankwidth(map) - word_gap;
1532                 adr -= word_gap;
1533                 datum = map_word_ff(map);
1534         }
1535
1536         /* Write length of data to come */
1537         map_write(map, CMD(words), cmd_adr );
1538
1539         /* Write data */
1540         vec = *pvec;
1541         vec_seek = *pvec_seek;
1542         do {
1543                 int n = map_bankwidth(map) - word_gap;
1544                 if (n > vec->iov_len - vec_seek)
1545                         n = vec->iov_len - vec_seek;
1546                 if (n > len)
1547                         n = len;
1548
1549                 if (!word_gap && len < map_bankwidth(map))
1550                         datum = map_word_ff(map);
1551
1552                 datum = map_word_load_partial(map, datum,
1553                                               vec->iov_base + vec_seek,
1554                                               word_gap, n);
1555
1556                 len -= n;
1557                 word_gap += n;
1558                 if (!len || word_gap == map_bankwidth(map)) {
1559                         map_write(map, datum, adr);
1560                         adr += map_bankwidth(map);
1561                         word_gap = 0;
1562                 }
1563
1564                 vec_seek += n;
1565                 if (vec_seek == vec->iov_len) {
1566                         vec++;
1567                         vec_seek = 0;
1568                 }
1569         } while (len);
1570         *pvec = vec;
1571         *pvec_seek = vec_seek;
1572
1573         /* GO GO GO */
1574         map_write(map, CMD(0xd0), cmd_adr);
1575         chip->state = FL_WRITING;
1576
1577         INVALIDATE_CACHE_UDELAY(map, chip,
1578                                 cmd_adr, len,
1579                                 chip->buffer_write_time);
1580
1581         timeo = jiffies + (HZ/2);
1582         z = 0;
1583         for (;;) {
1584                 if (chip->state != FL_WRITING) {
1585                         /* Someone's suspended the write. Sleep */
1586                         DECLARE_WAITQUEUE(wait, current);
1587                         set_current_state(TASK_UNINTERRUPTIBLE);
1588                         add_wait_queue(&chip->wq, &wait);
1589                         spin_unlock(chip->mutex);
1590                         schedule();
1591                         remove_wait_queue(&chip->wq, &wait);
1592                         timeo = jiffies + (HZ / 2); /* FIXME */
1593                         spin_lock(chip->mutex);
1594                         continue;
1595                 }
1596
1597                 status = map_read(map, cmd_adr);
1598                 if (map_word_andequal(map, status, status_OK, status_OK))
1599                         break;
1600
1601                 /* OK Still waiting */
1602                 if (time_after(jiffies, timeo)) {
1603                         map_write(map, CMD(0x70), cmd_adr);
1604                         chip->state = FL_STATUS;
1605                         xip_enable(map, chip, cmd_adr);
1606                         printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1607                         ret = -EIO;
1608                         goto out;
1609                 }
1610
1611                 /* Latency issues. Drop the lock, wait a while and retry */
1612                 z++;
1613                 UDELAY(map, chip, cmd_adr, 1);
1614         }
1615         if (!z) {
1616                 chip->buffer_write_time--;
1617                 if (!chip->buffer_write_time)
1618                         chip->buffer_write_time = 1;
1619         }
1620         if (z > 1)
1621                 chip->buffer_write_time++;
1622
1623         /* Done and happy. */
1624         chip->state = FL_STATUS;
1625
1626         /* check for errors */
1627         if (map_word_bitsset(map, status, CMD(0x1a))) {
1628                 unsigned long chipstatus = MERGESTATUS(status);
1629
1630                 /* reset status */
1631                 map_write(map, CMD(0x50), cmd_adr);
1632                 map_write(map, CMD(0x70), cmd_adr);
1633                 xip_enable(map, chip, cmd_adr);
1634
1635                 if (chipstatus & 0x02) {
1636                         ret = -EROFS;
1637                 } else if (chipstatus & 0x08) {
1638                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1639                         ret = -EIO;
1640                 } else {
1641                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1642                         ret = -EINVAL;
1643                 }
1644
1645                 goto out;
1646         }
1647
1648         xip_enable(map, chip, cmd_adr);
1649  out:   put_chip(map, chip, cmd_adr);
1650         spin_unlock(chip->mutex);
1651         return ret;
1652 }
1653
1654 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1655                                 unsigned long count, loff_t to, size_t *retlen)
1656 {
1657         struct map_info *map = mtd->priv;
1658         struct cfi_private *cfi = map->fldrv_priv;
1659         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1660         int ret = 0;
1661         int chipnum;
1662         unsigned long ofs, vec_seek, i;
1663         size_t len = 0;
1664
1665         for (i = 0; i < count; i++)
1666                 len += vecs[i].iov_len;
1667
1668         *retlen = 0;
1669         if (!len)
1670                 return 0;
1671
1672         chipnum = to >> cfi->chipshift;
1673         ofs = to - (chipnum << cfi->chipshift);
1674         vec_seek = 0;
1675
1676         do {
1677                 /* We must not cross write block boundaries */
1678                 int size = wbufsize - (ofs & (wbufsize-1));
1679
1680                 if (size > len)
1681                         size = len;
1682                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1683                                       ofs, &vecs, &vec_seek, size);
1684                 if (ret)
1685                         return ret;
1686
1687                 ofs += size;
1688                 (*retlen) += size;
1689                 len -= size;
1690
1691                 if (ofs >> cfi->chipshift) {
1692                         chipnum ++;
1693                         ofs = 0;
1694                         if (chipnum == cfi->numchips)
1695                                 return 0;
1696                 }
1697         } while (len);
1698
1699         return 0;
1700 }
1701
1702 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1703                                        size_t len, size_t *retlen, const u_char *buf)
1704 {
1705         struct kvec vec;
1706
1707         vec.iov_base = (void *) buf;
1708         vec.iov_len = len;
1709
1710         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1711 }
1712
1713 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1714                                       unsigned long adr, int len, void *thunk)
1715 {
1716         struct cfi_private *cfi = map->fldrv_priv;
1717         map_word status, status_OK;
1718         unsigned long timeo;
1719         int retries = 3;
1720         DECLARE_WAITQUEUE(wait, current);
1721         int ret = 0;
1722
1723         adr += chip->start;
1724
1725         /* Let's determine this according to the interleave only once */
1726         status_OK = CMD(0x80);
1727
1728  retry:
1729         spin_lock(chip->mutex);
1730         ret = get_chip(map, chip, adr, FL_ERASING);
1731         if (ret) {
1732                 spin_unlock(chip->mutex);
1733                 return ret;
1734         }
1735
1736         XIP_INVAL_CACHED_RANGE(map, adr, len);
1737         ENABLE_VPP(map);
1738         xip_disable(map, chip, adr);
1739
1740         /* Clear the status register first */
1741         map_write(map, CMD(0x50), adr);
1742
1743         /* Now erase */
1744         map_write(map, CMD(0x20), adr);
1745         map_write(map, CMD(0xD0), adr);
1746         chip->state = FL_ERASING;
1747         chip->erase_suspended = 0;
1748
1749         INVALIDATE_CACHE_UDELAY(map, chip,
1750                                 adr, len,
1751                                 chip->erase_time*1000/2);
1752
1753         /* FIXME. Use a timer to check this, and return immediately. */
1754         /* Once the state machine's known to be working I'll do that */
1755
1756         timeo = jiffies + (HZ*20);
1757         for (;;) {
1758                 if (chip->state != FL_ERASING) {
1759                         /* Someone's suspended the erase. Sleep */
1760                         set_current_state(TASK_UNINTERRUPTIBLE);
1761                         add_wait_queue(&chip->wq, &wait);
1762                         spin_unlock(chip->mutex);
1763                         schedule();
1764                         remove_wait_queue(&chip->wq, &wait);
1765                         spin_lock(chip->mutex);
1766                         continue;
1767                 }
1768                 if (chip->erase_suspended) {
1769                         /* This erase was suspended and resumed.
1770                            Adjust the timeout */
1771                         timeo = jiffies + (HZ*20); /* FIXME */
1772                         chip->erase_suspended = 0;
1773                 }
1774
1775                 status = map_read(map, adr);
1776                 if (map_word_andequal(map, status, status_OK, status_OK))
1777                         break;
1778
1779                 /* OK Still waiting */
1780                 if (time_after(jiffies, timeo)) {
1781                         map_write(map, CMD(0x70), adr);
1782                         chip->state = FL_STATUS;
1783                         xip_enable(map, chip, adr);
1784                         printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1785                         ret = -EIO;
1786                         goto out;
1787                 }
1788
1789                 /* Latency issues. Drop the lock, wait a while and retry */
1790                 UDELAY(map, chip, adr, 1000000/HZ);
1791         }
1792
1793         /* We've broken this before. It doesn't hurt to be safe */
1794         map_write(map, CMD(0x70), adr);
1795         chip->state = FL_STATUS;
1796         status = map_read(map, adr);
1797
1798         /* check for errors */
1799         if (map_word_bitsset(map, status, CMD(0x3a))) {
1800                 unsigned long chipstatus = MERGESTATUS(status);
1801
1802                 /* Reset the error bits */
1803                 map_write(map, CMD(0x50), adr);
1804                 map_write(map, CMD(0x70), adr);
1805                 xip_enable(map, chip, adr);
1806
1807                 if ((chipstatus & 0x30) == 0x30) {
1808                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1809                         ret = -EINVAL;
1810                 } else if (chipstatus & 0x02) {
1811                         /* Protection bit set */
1812                         ret = -EROFS;
1813                 } else if (chipstatus & 0x8) {
1814                         /* Voltage */
1815                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1816                         ret = -EIO;
1817                 } else if (chipstatus & 0x20 && retries--) {
1818                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1819                         timeo = jiffies + HZ;
1820                         put_chip(map, chip, adr);
1821                         spin_unlock(chip->mutex);
1822                         goto retry;
1823                 } else {
1824                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1825                         ret = -EIO;
1826                 }
1827
1828                 goto out;
1829         }
1830
1831         xip_enable(map, chip, adr);
1832  out:   put_chip(map, chip, adr);
1833         spin_unlock(chip->mutex);
1834         return ret;
1835 }
1836
1837 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1838 {
1839         unsigned long ofs, len;
1840         int ret;
1841
1842         ofs = instr->addr;
1843         len = instr->len;
1844
1845         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1846         if (ret)
1847                 return ret;
1848
1849         instr->state = MTD_ERASE_DONE;
1850         mtd_erase_callback(instr);
1851
1852         return 0;
1853 }
1854
1855 static void cfi_intelext_sync (struct mtd_info *mtd)
1856 {
1857         struct map_info *map = mtd->priv;
1858         struct cfi_private *cfi = map->fldrv_priv;
1859         int i;
1860         struct flchip *chip;
1861         int ret = 0;
1862
1863         for (i=0; !ret && i<cfi->numchips; i++) {
1864                 chip = &cfi->chips[i];
1865
1866                 spin_lock(chip->mutex);
1867                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1868
1869                 if (!ret) {
1870                         chip->oldstate = chip->state;
1871                         chip->state = FL_SYNCING;
1872                         /* No need to wake_up() on this state change -
1873                          * as the whole point is that nobody can do anything
1874                          * with the chip now anyway.
1875                          */
1876                 }
1877                 spin_unlock(chip->mutex);
1878         }
1879
1880         /* Unlock the chips again */
1881
1882         for (i--; i >=0; i--) {
1883                 chip = &cfi->chips[i];
1884
1885                 spin_lock(chip->mutex);
1886
1887                 if (chip->state == FL_SYNCING) {
1888                         chip->state = chip->oldstate;
1889                         chip->oldstate = FL_READY;
1890                         wake_up(&chip->wq);
1891                 }
1892                 spin_unlock(chip->mutex);
1893         }
1894 }
1895
1896 #ifdef DEBUG_LOCK_BITS
1897 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1898                                                 struct flchip *chip,
1899                                                 unsigned long adr,
1900                                                 int len, void *thunk)
1901 {
1902         struct cfi_private *cfi = map->fldrv_priv;
1903         int status, ofs_factor = cfi->interleave * cfi->device_type;
1904
1905         adr += chip->start;
1906         xip_disable(map, chip, adr+(2*ofs_factor));
1907         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1908         chip->state = FL_JEDEC_QUERY;
1909         status = cfi_read_query(map, adr+(2*ofs_factor));
1910         xip_enable(map, chip, 0);
1911         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1912                adr, status);
1913         return 0;
1914 }
1915 #endif
1916
1917 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1918 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1919
1920 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1921                                        unsigned long adr, int len, void *thunk)
1922 {
1923         struct cfi_private *cfi = map->fldrv_priv;
1924         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1925         map_word status, status_OK;
1926         unsigned long timeo = jiffies + HZ;
1927         int ret;
1928
1929         adr += chip->start;
1930
1931         /* Let's determine this according to the interleave only once */
1932         status_OK = CMD(0x80);
1933
1934         spin_lock(chip->mutex);
1935         ret = get_chip(map, chip, adr, FL_LOCKING);
1936         if (ret) {
1937                 spin_unlock(chip->mutex);
1938                 return ret;
1939         }
1940
1941         ENABLE_VPP(map);
1942         xip_disable(map, chip, adr);
1943
1944         map_write(map, CMD(0x60), adr);
1945         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1946                 map_write(map, CMD(0x01), adr);
1947                 chip->state = FL_LOCKING;
1948         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1949                 map_write(map, CMD(0xD0), adr);
1950                 chip->state = FL_UNLOCKING;
1951         } else
1952                 BUG();
1953
1954         /*
1955          * If Instant Individual Block Locking supported then no need
1956          * to delay.
1957          */
1958
1959         if (!extp || !(extp->FeatureSupport & (1 << 5)))
1960                 UDELAY(map, chip, adr, 1000000/HZ);
1961
1962         /* FIXME. Use a timer to check this, and return immediately. */
1963         /* Once the state machine's known to be working I'll do that */
1964
1965         timeo = jiffies + (HZ*20);
1966         for (;;) {
1967
1968                 status = map_read(map, adr);
1969                 if (map_word_andequal(map, status, status_OK, status_OK))
1970                         break;
1971
1972                 /* OK Still waiting */
1973                 if (time_after(jiffies, timeo)) {
1974                         map_write(map, CMD(0x70), adr);
1975                         chip->state = FL_STATUS;
1976                         xip_enable(map, chip, adr);
1977                         printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1978                         put_chip(map, chip, adr);
1979                         spin_unlock(chip->mutex);
1980                         return -EIO;
1981                 }
1982
1983                 /* Latency issues. Drop the lock, wait a while and retry */
1984                 UDELAY(map, chip, adr, 1);
1985         }
1986
1987         /* Done and happy. */
1988         chip->state = FL_STATUS;
1989         xip_enable(map, chip, adr);
1990         put_chip(map, chip, adr);
1991         spin_unlock(chip->mutex);
1992         return 0;
1993 }
1994
1995 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1996 {
1997         int ret;
1998
1999 #ifdef DEBUG_LOCK_BITS
2000         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2001                __FUNCTION__, ofs, len);
2002         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2003                 ofs, len, 0);
2004 #endif
2005
2006         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2007                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2008
2009 #ifdef DEBUG_LOCK_BITS
2010         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2011                __FUNCTION__, ret);
2012         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2013                 ofs, len, 0);
2014 #endif
2015
2016         return ret;
2017 }
2018
2019 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2020 {
2021         int ret;
2022
2023 #ifdef DEBUG_LOCK_BITS
2024         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2025                __FUNCTION__, ofs, len);
2026         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2027                 ofs, len, 0);
2028 #endif
2029
2030         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2031                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2032
2033 #ifdef DEBUG_LOCK_BITS
2034         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2035                __FUNCTION__, ret);
2036         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2037                 ofs, len, 0);
2038 #endif
2039
2040         return ret;
2041 }
2042
2043 #ifdef CONFIG_MTD_OTP
2044
2045 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2046                         u_long data_offset, u_char *buf, u_int size,
2047                         u_long prot_offset, u_int groupno, u_int groupsize);
2048
2049 static int __xipram
2050 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2051             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2052 {
2053         struct cfi_private *cfi = map->fldrv_priv;
2054         int ret;
2055
2056         spin_lock(chip->mutex);
2057         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2058         if (ret) {
2059                 spin_unlock(chip->mutex);
2060                 return ret;
2061         }
2062
2063         /* let's ensure we're not reading back cached data from array mode */
2064         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2065
2066         xip_disable(map, chip, chip->start);
2067         if (chip->state != FL_JEDEC_QUERY) {
2068                 map_write(map, CMD(0x90), chip->start);
2069                 chip->state = FL_JEDEC_QUERY;
2070         }
2071         map_copy_from(map, buf, chip->start + offset, size);
2072         xip_enable(map, chip, chip->start);
2073
2074         /* then ensure we don't keep OTP data in the cache */
2075         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2076
2077         put_chip(map, chip, chip->start);
2078         spin_unlock(chip->mutex);
2079         return 0;
2080 }
2081
2082 static int
2083 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2084              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2085 {
2086         int ret;
2087
2088         while (size) {
2089                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2090                 int gap = offset - bus_ofs;
2091                 int n = min_t(int, size, map_bankwidth(map)-gap);
2092                 map_word datum = map_word_ff(map);
2093
2094                 datum = map_word_load_partial(map, datum, buf, gap, n);
2095                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2096                 if (ret)
2097                         return ret;
2098
2099                 offset += n;
2100                 buf += n;
2101                 size -= n;
2102         }
2103
2104         return 0;
2105 }
2106
2107 static int
2108 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2109             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2110 {
2111         struct cfi_private *cfi = map->fldrv_priv;
2112         map_word datum;
2113
2114         /* make sure area matches group boundaries */
2115         if (size != grpsz)
2116                 return -EXDEV;
2117
2118         datum = map_word_ff(map);
2119         datum = map_word_clr(map, datum, CMD(1 << grpno));
2120         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2121 }
2122
2123 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2124                                  size_t *retlen, u_char *buf,
2125                                  otp_op_t action, int user_regs)
2126 {
2127         struct map_info *map = mtd->priv;
2128         struct cfi_private *cfi = map->fldrv_priv;
2129         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2130         struct flchip *chip;
2131         struct cfi_intelext_otpinfo *otp;
2132         u_long devsize, reg_prot_offset, data_offset;
2133         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2134         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2135         int ret;
2136
2137         *retlen = 0;
2138
2139         /* Check that we actually have some OTP registers */
2140         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2141                 return -ENODATA;
2142
2143         /* we need real chips here not virtual ones */
2144         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2145         chip_step = devsize >> cfi->chipshift;
2146         chip_num = 0;
2147
2148         /* Some chips have OTP located in the _top_ partition only.
2149            For example: Intel 28F256L18T (T means top-parameter device) */
2150         if (cfi->mfr == MANUFACTURER_INTEL) {
2151                 switch (cfi->id) {
2152                 case 0x880b:
2153                 case 0x880c:
2154                 case 0x880d:
2155                         chip_num = chip_step - 1;
2156                 }
2157         }
2158
2159         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2160                 chip = &cfi->chips[chip_num];
2161                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2162
2163                 /* first OTP region */
2164                 field = 0;
2165                 reg_prot_offset = extp->ProtRegAddr;
2166                 reg_fact_groups = 1;
2167                 reg_fact_size = 1 << extp->FactProtRegSize;
2168                 reg_user_groups = 1;
2169                 reg_user_size = 1 << extp->UserProtRegSize;
2170
2171                 while (len > 0) {
2172                         /* flash geometry fixup */
2173                         data_offset = reg_prot_offset + 1;
2174                         data_offset *= cfi->interleave * cfi->device_type;
2175                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2176                         reg_fact_size *= cfi->interleave;
2177                         reg_user_size *= cfi->interleave;
2178
2179                         if (user_regs) {
2180                                 groups = reg_user_groups;
2181                                 groupsize = reg_user_size;
2182                                 /* skip over factory reg area */
2183                                 groupno = reg_fact_groups;
2184                                 data_offset += reg_fact_groups * reg_fact_size;
2185                         } else {
2186                                 groups = reg_fact_groups;
2187                                 groupsize = reg_fact_size;
2188                                 groupno = 0;
2189                         }
2190
2191                         while (len > 0 && groups > 0) {
2192                                 if (!action) {
2193                                         /*
2194                                          * Special case: if action is NULL
2195                                          * we fill buf with otp_info records.
2196                                          */
2197                                         struct otp_info *otpinfo;
2198                                         map_word lockword;
2199                                         len -= sizeof(struct otp_info);
2200                                         if (len <= 0)
2201                                                 return -ENOSPC;
2202                                         ret = do_otp_read(map, chip,
2203                                                           reg_prot_offset,
2204                                                           (u_char *)&lockword,
2205                                                           map_bankwidth(map),
2206                                                           0, 0,  0);
2207                                         if (ret)
2208                                                 return ret;
2209                                         otpinfo = (struct otp_info *)buf;
2210                                         otpinfo->start = from;
2211                                         otpinfo->length = groupsize;
2212                                         otpinfo->locked =
2213                                            !map_word_bitsset(map, lockword,
2214                                                              CMD(1 << groupno));
2215                                         from += groupsize;
2216                                         buf += sizeof(*otpinfo);
2217                                         *retlen += sizeof(*otpinfo);
2218                                 } else if (from >= groupsize) {
2219                                         from -= groupsize;
2220                                         data_offset += groupsize;
2221                                 } else {
2222                                         int size = groupsize;
2223                                         data_offset += from;
2224                                         size -= from;
2225                                         from = 0;
2226                                         if (size > len)
2227                                                 size = len;
2228                                         ret = action(map, chip, data_offset,
2229                                                      buf, size, reg_prot_offset,
2230                                                      groupno, groupsize);
2231                                         if (ret < 0)
2232                                                 return ret;
2233                                         buf += size;
2234                                         len -= size;
2235                                         *retlen += size;
2236                                         data_offset += size;
2237                                 }
2238                                 groupno++;
2239                                 groups--;
2240                         }
2241
2242                         /* next OTP region */
2243                         if (++field == extp->NumProtectionFields)
2244                                 break;
2245                         reg_prot_offset = otp->ProtRegAddr;
2246                         reg_fact_groups = otp->FactGroups;
2247                         reg_fact_size = 1 << otp->FactProtRegSize;
2248                         reg_user_groups = otp->UserGroups;
2249                         reg_user_size = 1 << otp->UserProtRegSize;
2250                         otp++;
2251                 }
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2258                                            size_t len, size_t *retlen,
2259                                             u_char *buf)
2260 {
2261         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2262                                      buf, do_otp_read, 0);
2263 }
2264
2265 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2266                                            size_t len, size_t *retlen,
2267                                             u_char *buf)
2268 {
2269         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2270                                      buf, do_otp_read, 1);
2271 }
2272
2273 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2274                                             size_t len, size_t *retlen,
2275                                              u_char *buf)
2276 {
2277         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2278                                      buf, do_otp_write, 1);
2279 }
2280
2281 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2282                                            loff_t from, size_t len)
2283 {
2284         size_t retlen;
2285         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2286                                      NULL, do_otp_lock, 1);
2287 }
2288
2289 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2290                                            struct otp_info *buf, size_t len)
2291 {
2292         size_t retlen;
2293         int ret;
2294
2295         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2296         return ret ? : retlen;
2297 }
2298
2299 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2300                                            struct otp_info *buf, size_t len)
2301 {
2302         size_t retlen;
2303         int ret;
2304
2305         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2306         return ret ? : retlen;
2307 }
2308
2309 #endif
2310
2311 static int cfi_intelext_suspend(struct mtd_info *mtd)
2312 {
2313         struct map_info *map = mtd->priv;
2314         struct cfi_private *cfi = map->fldrv_priv;
2315         int i;
2316         struct flchip *chip;
2317         int ret = 0;
2318
2319         for (i=0; !ret && i<cfi->numchips; i++) {
2320                 chip = &cfi->chips[i];
2321
2322                 spin_lock(chip->mutex);
2323
2324                 switch (chip->state) {
2325                 case FL_READY:
2326                 case FL_STATUS:
2327                 case FL_CFI_QUERY:
2328                 case FL_JEDEC_QUERY:
2329                         if (chip->oldstate == FL_READY) {
2330                                 chip->oldstate = chip->state;
2331                                 chip->state = FL_PM_SUSPENDED;
2332                                 /* No need to wake_up() on this state change -
2333                                  * as the whole point is that nobody can do anything
2334                                  * with the chip now anyway.
2335                                  */
2336                         } else {
2337                                 /* There seems to be an operation pending. We must wait for it. */
2338                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2339                                 ret = -EAGAIN;
2340                         }
2341                         break;
2342                 default:
2343                         /* Should we actually wait? Once upon a time these routines weren't
2344                            allowed to. Or should we return -EAGAIN, because the upper layers
2345                            ought to have already shut down anything which was using the device
2346                            anyway? The latter for now. */
2347                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2348                         ret = -EAGAIN;
2349                 case FL_PM_SUSPENDED:
2350                         break;
2351                 }
2352                 spin_unlock(chip->mutex);
2353         }
2354
2355         /* Unlock the chips again */
2356
2357         if (ret) {
2358                 for (i--; i >=0; i--) {
2359                         chip = &cfi->chips[i];
2360
2361                         spin_lock(chip->mutex);
2362
2363                         if (chip->state == FL_PM_SUSPENDED) {
2364                                 /* No need to force it into a known state here,
2365                                    because we're returning failure, and it didn't
2366                                    get power cycled */
2367                                 chip->state = chip->oldstate;
2368                                 chip->oldstate = FL_READY;
2369                                 wake_up(&chip->wq);
2370                         }
2371                         spin_unlock(chip->mutex);
2372                 }
2373         }
2374
2375         return ret;
2376 }
2377
2378 static void cfi_intelext_resume(struct mtd_info *mtd)
2379 {
2380         struct map_info *map = mtd->priv;
2381         struct cfi_private *cfi = map->fldrv_priv;
2382         int i;
2383         struct flchip *chip;
2384
2385         for (i=0; i<cfi->numchips; i++) {
2386
2387                 chip = &cfi->chips[i];
2388
2389                 spin_lock(chip->mutex);
2390
2391                 /* Go to known state. Chip may have been power cycled */
2392                 if (chip->state == FL_PM_SUSPENDED) {
2393                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2394                         chip->oldstate = chip->state = FL_READY;
2395                         wake_up(&chip->wq);
2396                 }
2397
2398                 spin_unlock(chip->mutex);
2399         }
2400 }
2401
2402 static int cfi_intelext_reset(struct mtd_info *mtd)
2403 {
2404         struct map_info *map = mtd->priv;
2405         struct cfi_private *cfi = map->fldrv_priv;
2406         int i, ret;
2407
2408         for (i=0; i < cfi->numchips; i++) {
2409                 struct flchip *chip = &cfi->chips[i];
2410
2411                 /* force the completion of any ongoing operation
2412                    and switch to array mode so any bootloader in
2413                    flash is accessible for soft reboot. */
2414                 spin_lock(chip->mutex);
2415                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2416                 if (!ret) {
2417                         map_write(map, CMD(0xff), chip->start);
2418                         chip->state = FL_READY;
2419                 }
2420                 spin_unlock(chip->mutex);
2421         }
2422
2423         return 0;
2424 }
2425
2426 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2427                                void *v)
2428 {
2429         struct mtd_info *mtd;
2430
2431         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2432         cfi_intelext_reset(mtd);
2433         return NOTIFY_DONE;
2434 }
2435
2436 static void cfi_intelext_destroy(struct mtd_info *mtd)
2437 {
2438         struct map_info *map = mtd->priv;
2439         struct cfi_private *cfi = map->fldrv_priv;
2440         cfi_intelext_reset(mtd);
2441         unregister_reboot_notifier(&mtd->reboot_notifier);
2442         kfree(cfi->cmdset_priv);
2443         kfree(cfi->cfiq);
2444         kfree(cfi->chips[0].priv);
2445         kfree(cfi);
2446         kfree(mtd->eraseregions);
2447 }
2448
2449 static char im_name_0001[] = "cfi_cmdset_0001";
2450 static char im_name_0003[] = "cfi_cmdset_0003";
2451 static char im_name_0200[] = "cfi_cmdset_0200";
2452
2453 static int __init cfi_intelext_init(void)
2454 {
2455         inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2456         inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2457         inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2458         return 0;
2459 }
2460
2461 static void __exit cfi_intelext_exit(void)
2462 {
2463         inter_module_unregister(im_name_0001);
2464         inter_module_unregister(im_name_0003);
2465         inter_module_unregister(im_name_0200);
2466 }
2467
2468 module_init(cfi_intelext_init);
2469 module_exit(cfi_intelext_exit);
2470
2471 MODULE_LICENSE("GPL");
2472 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2473 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");