]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
cfi-cmdset-0001: always update the chip status
[linux-2.6-omap-h63xx.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65                                             struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67                                             struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81                      size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83                         size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96         .probe          = NULL, /* Not usable directly */
97         .destroy        = cfi_intelext_destroy,
98         .name           = "cfi_cmdset_0001",
99         .module         = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108         int i;
109         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122         for (i=11; i<32; i++) {
123                 if (extp->FeatureSupport & (1<<i))
124                         printk("     - Unknown Bit %X:      supported\n", i);
125         }
126
127         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129         for (i=1; i<8; i++) {
130                 if (extp->SuspendCmdSupport & (1<<i))
131                         printk("     - Unknown Bit %X:               supported\n", i);
132         }
133
134         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137         for (i=2; i<3; i++) {
138                 if (extp->BlkStatusRegMask & (1<<i))
139                         printk("     - Unknown Bit %X Active: yes\n",i);
140         }
141         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143         for (i=6; i<16; i++) {
144                 if (extp->BlkStatusRegMask & (1<<i))
145                         printk("     - Unknown Bit %X Active: yes\n",i);
146         }
147
148         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150         if (extp->VppOptimal)
151                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165                             "erase on write disabled.\n");
166         extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177         if (cfip && (cfip->FeatureSupport&4)) {
178                 cfip->FeatureSupport &= ~4;
179                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180         }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188
189         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
190         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195         struct map_info *map = mtd->priv;
196         struct cfi_private *cfi = map->fldrv_priv;
197
198         /* Note this is done after the region info is endian swapped */
199         cfi->cfiq->EraseRegionInfo[1] =
200                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         if (!mtd->point && map_is_linear(map)) {
207                 mtd->point   = cfi_intelext_point;
208                 mtd->unpoint = cfi_intelext_unpoint;
209         }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         if (cfi->cfiq->BufWriteTimeoutTyp) {
217                 printk(KERN_INFO "Using buffer write method\n" );
218                 mtd->write = cfi_intelext_write_buffers;
219                 mtd->writev = cfi_intelext_writev;
220         }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242         { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245         /* The CFI vendor ids and the JEDEC vendor IDs appear
246          * to be common.  It is like the devices id's are as
247          * well.  This table is to pick all cases where
248          * we know that is the case.
249          */
250         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251         { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257         struct cfi_pri_intelext *extp;
258         unsigned int extp_size = sizeof(*extp);
259
260  again:
261         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262         if (!extp)
263                 return NULL;
264
265         if (extp->MajorVersion != '1' ||
266             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268                        "version %c.%c.\n",  extp->MajorVersion,
269                        extp->MinorVersion);
270                 kfree(extp);
271                 return NULL;
272         }
273
274         /* Do some byteswapping if necessary */
275         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280                 unsigned int extra_size = 0;
281                 int nb_parts, i;
282
283                 /* Protection Register info */
284                 extra_size += (extp->NumProtectionFields - 1) *
285                               sizeof(struct cfi_intelext_otpinfo);
286
287                 /* Burst Read info */
288                 extra_size += 2;
289                 if (extp_size < sizeof(*extp) + extra_size)
290                         goto need_more;
291                 extra_size += extp->extra[extra_size-1];
292
293                 /* Number of hardware-partitions */
294                 extra_size += 1;
295                 if (extp_size < sizeof(*extp) + extra_size)
296                         goto need_more;
297                 nb_parts = extp->extra[extra_size - 1];
298
299                 /* skip the sizeof(partregion) field in CFI 1.4 */
300                 if (extp->MinorVersion >= '4')
301                         extra_size += 2;
302
303                 for (i = 0; i < nb_parts; i++) {
304                         struct cfi_intelext_regioninfo *rinfo;
305                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306                         extra_size += sizeof(*rinfo);
307                         if (extp_size < sizeof(*extp) + extra_size)
308                                 goto need_more;
309                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310                         extra_size += (rinfo->NumBlockTypes - 1)
311                                       * sizeof(struct cfi_intelext_blockinfo);
312                 }
313
314                 if (extp->MinorVersion >= '4')
315                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317                 if (extp_size < sizeof(*extp) + extra_size) {
318                         need_more:
319                         extp_size = sizeof(*extp) + extra_size;
320                         kfree(extp);
321                         if (extp_size > 4096) {
322                                 printk(KERN_ERR
323                                         "%s: cfi_pri_intelext is too fat\n",
324                                         __FUNCTION__);
325                                 return NULL;
326                         }
327                         goto again;
328                 }
329         }
330
331         return extp;
332 }
333
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335 {
336         struct cfi_private *cfi = map->fldrv_priv;
337         struct mtd_info *mtd;
338         int i;
339
340         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
341         if (!mtd) {
342                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343                 return NULL;
344         }
345         memset(mtd, 0, sizeof(*mtd));
346         mtd->priv = map;
347         mtd->type = MTD_NORFLASH;
348
349         /* Fill in the default mtd operations */
350         mtd->erase   = cfi_intelext_erase_varsize;
351         mtd->read    = cfi_intelext_read;
352         mtd->write   = cfi_intelext_write_words;
353         mtd->sync    = cfi_intelext_sync;
354         mtd->lock    = cfi_intelext_lock;
355         mtd->unlock  = cfi_intelext_unlock;
356         mtd->suspend = cfi_intelext_suspend;
357         mtd->resume  = cfi_intelext_resume;
358         mtd->flags   = MTD_CAP_NORFLASH;
359         mtd->name    = map->name;
360
361         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362
363         if (cfi->cfi_mode == CFI_MODE_CFI) {
364                 /*
365                  * It's a real CFI chip, not one for which the probe
366                  * routine faked a CFI structure. So we read the feature
367                  * table from it.
368                  */
369                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370                 struct cfi_pri_intelext *extp;
371
372                 extp = read_pri_intelext(map, adr);
373                 if (!extp) {
374                         kfree(mtd);
375                         return NULL;
376                 }
377
378                 /* Install our own private info structure */
379                 cfi->cmdset_priv = extp;
380
381                 cfi_fixup(mtd, cfi_fixup_table);
382
383 #ifdef DEBUG_CFI_FEATURES
384                 /* Tell the user about it in lots of lovely detail */
385                 cfi_tell_features(extp);
386 #endif
387
388                 if(extp->SuspendCmdSupport & 1) {
389                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
390                 }
391         }
392         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393                 /* Apply jedec specific fixups */
394                 cfi_fixup(mtd, jedec_fixup_table);
395         }
396         /* Apply generic fixups */
397         cfi_fixup(mtd, fixup_table);
398
399         for (i=0; i< cfi->numchips; i++) {
400                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
401                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
402                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
403                 cfi->chips[i].ref_point_counter = 0;
404                 init_waitqueue_head(&(cfi->chips[i].wq));
405         }
406
407         map->fldrv = &cfi_intelext_chipdrv;
408
409         return cfi_intelext_setup(mtd);
410 }
411 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
412 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
414 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
415 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
416
417 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
418 {
419         struct map_info *map = mtd->priv;
420         struct cfi_private *cfi = map->fldrv_priv;
421         unsigned long offset = 0;
422         int i,j;
423         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
424
425         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
426
427         mtd->size = devsize * cfi->numchips;
428
429         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
430         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
431                         * mtd->numeraseregions, GFP_KERNEL);
432         if (!mtd->eraseregions) {
433                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
434                 goto setup_err;
435         }
436
437         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
438                 unsigned long ernum, ersize;
439                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
440                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
441
442                 if (mtd->erasesize < ersize) {
443                         mtd->erasesize = ersize;
444                 }
445                 for (j=0; j<cfi->numchips; j++) {
446                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
447                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
448                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
449                 }
450                 offset += (ersize * ernum);
451         }
452
453         if (offset != devsize) {
454                 /* Argh */
455                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
456                 goto setup_err;
457         }
458
459         for (i=0; i<mtd->numeraseregions;i++){
460                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
461                        i,mtd->eraseregions[i].offset,
462                        mtd->eraseregions[i].erasesize,
463                        mtd->eraseregions[i].numblocks);
464         }
465
466 #ifdef CONFIG_MTD_OTP
467         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
468         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
469         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
470         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
471         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
472         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
473 #endif
474
475         /* This function has the potential to distort the reality
476            a bit and therefore should be called last. */
477         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
478                 goto setup_err;
479
480         __module_get(THIS_MODULE);
481         register_reboot_notifier(&mtd->reboot_notifier);
482         return mtd;
483
484  setup_err:
485         if(mtd) {
486                 kfree(mtd->eraseregions);
487                 kfree(mtd);
488         }
489         kfree(cfi->cmdset_priv);
490         return NULL;
491 }
492
493 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
494                                         struct cfi_private **pcfi)
495 {
496         struct map_info *map = mtd->priv;
497         struct cfi_private *cfi = *pcfi;
498         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
499
500         /*
501          * Probing of multi-partition flash ships.
502          *
503          * To support multiple partitions when available, we simply arrange
504          * for each of them to have their own flchip structure even if they
505          * are on the same physical chip.  This means completely recreating
506          * a new cfi_private structure right here which is a blatent code
507          * layering violation, but this is still the least intrusive
508          * arrangement at this point. This can be rearranged in the future
509          * if someone feels motivated enough.  --nico
510          */
511         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
512             && extp->FeatureSupport & (1 << 9)) {
513                 struct cfi_private *newcfi;
514                 struct flchip *chip;
515                 struct flchip_shared *shared;
516                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
517
518                 /* Protection Register info */
519                 offs = (extp->NumProtectionFields - 1) *
520                        sizeof(struct cfi_intelext_otpinfo);
521
522                 /* Burst Read info */
523                 offs += extp->extra[offs+1]+2;
524
525                 /* Number of partition regions */
526                 numregions = extp->extra[offs];
527                 offs += 1;
528
529                 /* skip the sizeof(partregion) field in CFI 1.4 */
530                 if (extp->MinorVersion >= '4')
531                         offs += 2;
532
533                 /* Number of hardware partitions */
534                 numparts = 0;
535                 for (i = 0; i < numregions; i++) {
536                         struct cfi_intelext_regioninfo *rinfo;
537                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
538                         numparts += rinfo->NumIdentPartitions;
539                         offs += sizeof(*rinfo)
540                                 + (rinfo->NumBlockTypes - 1) *
541                                   sizeof(struct cfi_intelext_blockinfo);
542                 }
543
544                 /* Programming Region info */
545                 if (extp->MinorVersion >= '4') {
546                         struct cfi_intelext_programming_regioninfo *prinfo;
547                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
548                         MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
549                         MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
550                         MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
551                         mtd->flags |= MTD_PROGRAM_REGIONS;
552                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
553                                map->name, MTD_PROGREGION_SIZE(mtd),
554                                MTD_PROGREGION_CTRLMODE_VALID(mtd),
555                                MTD_PROGREGION_CTRLMODE_INVALID(mtd));
556                 }
557
558                 /*
559                  * All functions below currently rely on all chips having
560                  * the same geometry so we'll just assume that all hardware
561                  * partitions are of the same size too.
562                  */
563                 partshift = cfi->chipshift - __ffs(numparts);
564
565                 if ((1 << partshift) < mtd->erasesize) {
566                         printk( KERN_ERR
567                                 "%s: bad number of hw partitions (%d)\n",
568                                 __FUNCTION__, numparts);
569                         return -EINVAL;
570                 }
571
572                 numvirtchips = cfi->numchips * numparts;
573                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
574                 if (!newcfi)
575                         return -ENOMEM;
576                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
577                 if (!shared) {
578                         kfree(newcfi);
579                         return -ENOMEM;
580                 }
581                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
582                 newcfi->numchips = numvirtchips;
583                 newcfi->chipshift = partshift;
584
585                 chip = &newcfi->chips[0];
586                 for (i = 0; i < cfi->numchips; i++) {
587                         shared[i].writing = shared[i].erasing = NULL;
588                         spin_lock_init(&shared[i].lock);
589                         for (j = 0; j < numparts; j++) {
590                                 *chip = cfi->chips[i];
591                                 chip->start += j << partshift;
592                                 chip->priv = &shared[i];
593                                 /* those should be reset too since
594                                    they create memory references. */
595                                 init_waitqueue_head(&chip->wq);
596                                 spin_lock_init(&chip->_spinlock);
597                                 chip->mutex = &chip->_spinlock;
598                                 chip++;
599                         }
600                 }
601
602                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
603                                   "--> %d partitions of %d KiB\n",
604                                   map->name, cfi->numchips, cfi->interleave,
605                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
606
607                 map->fldrv_priv = newcfi;
608                 *pcfi = newcfi;
609                 kfree(cfi);
610         }
611
612         return 0;
613 }
614
615 /*
616  *  *********** CHIP ACCESS FUNCTIONS ***********
617  */
618
619 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
620 {
621         DECLARE_WAITQUEUE(wait, current);
622         struct cfi_private *cfi = map->fldrv_priv;
623         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
624         unsigned long timeo;
625         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
626
627  resettime:
628         timeo = jiffies + HZ;
629  retry:
630         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
631                 /*
632                  * OK. We have possibility for contension on the write/erase
633                  * operations which are global to the real chip and not per
634                  * partition.  So let's fight it over in the partition which
635                  * currently has authority on the operation.
636                  *
637                  * The rules are as follows:
638                  *
639                  * - any write operation must own shared->writing.
640                  *
641                  * - any erase operation must own _both_ shared->writing and
642                  *   shared->erasing.
643                  *
644                  * - contension arbitration is handled in the owner's context.
645                  *
646                  * The 'shared' struct can be read and/or written only when
647                  * its lock is taken.
648                  */
649                 struct flchip_shared *shared = chip->priv;
650                 struct flchip *contender;
651                 spin_lock(&shared->lock);
652                 contender = shared->writing;
653                 if (contender && contender != chip) {
654                         /*
655                          * The engine to perform desired operation on this
656                          * partition is already in use by someone else.
657                          * Let's fight over it in the context of the chip
658                          * currently using it.  If it is possible to suspend,
659                          * that other partition will do just that, otherwise
660                          * it'll happily send us to sleep.  In any case, when
661                          * get_chip returns success we're clear to go ahead.
662                          */
663                         int ret = spin_trylock(contender->mutex);
664                         spin_unlock(&shared->lock);
665                         if (!ret)
666                                 goto retry;
667                         spin_unlock(chip->mutex);
668                         ret = get_chip(map, contender, contender->start, mode);
669                         spin_lock(chip->mutex);
670                         if (ret) {
671                                 spin_unlock(contender->mutex);
672                                 return ret;
673                         }
674                         timeo = jiffies + HZ;
675                         spin_lock(&shared->lock);
676                         spin_unlock(contender->mutex);
677                 }
678
679                 /* We now own it */
680                 shared->writing = chip;
681                 if (mode == FL_ERASING)
682                         shared->erasing = chip;
683                 spin_unlock(&shared->lock);
684         }
685
686         switch (chip->state) {
687
688         case FL_STATUS:
689                 for (;;) {
690                         status = map_read(map, adr);
691                         if (map_word_andequal(map, status, status_OK, status_OK))
692                                 break;
693
694                         /* At this point we're fine with write operations
695                            in other partitions as they don't conflict. */
696                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
697                                 break;
698
699                         if (time_after(jiffies, timeo)) {
700                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
701                                        map->name, status.x[0]);
702                                 return -EIO;
703                         }
704                         spin_unlock(chip->mutex);
705                         cfi_udelay(1);
706                         spin_lock(chip->mutex);
707                         /* Someone else might have been playing with it. */
708                         goto retry;
709                 }
710
711         case FL_READY:
712         case FL_CFI_QUERY:
713         case FL_JEDEC_QUERY:
714                 return 0;
715
716         case FL_ERASING:
717                 if (!cfip ||
718                     !(cfip->FeatureSupport & 2) ||
719                     !(mode == FL_READY || mode == FL_POINT ||
720                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
721                         goto sleep;
722
723
724                 /* Erase suspend */
725                 map_write(map, CMD(0xB0), adr);
726
727                 /* If the flash has finished erasing, then 'erase suspend'
728                  * appears to make some (28F320) flash devices switch to
729                  * 'read' mode.  Make sure that we switch to 'read status'
730                  * mode so we get the right data. --rmk
731                  */
732                 map_write(map, CMD(0x70), adr);
733                 chip->oldstate = FL_ERASING;
734                 chip->state = FL_ERASE_SUSPENDING;
735                 chip->erase_suspended = 1;
736                 for (;;) {
737                         status = map_read(map, adr);
738                         if (map_word_andequal(map, status, status_OK, status_OK))
739                                 break;
740
741                         if (time_after(jiffies, timeo)) {
742                                 /* Urgh. Resume and pretend we weren't here.  */
743                                 map_write(map, CMD(0xd0), adr);
744                                 /* Make sure we're in 'read status' mode if it had finished */
745                                 map_write(map, CMD(0x70), adr);
746                                 chip->state = FL_ERASING;
747                                 chip->oldstate = FL_READY;
748                                 printk(KERN_ERR "%s: Chip not ready after erase "
749                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
750                                 return -EIO;
751                         }
752
753                         spin_unlock(chip->mutex);
754                         cfi_udelay(1);
755                         spin_lock(chip->mutex);
756                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
757                            So we can just loop here. */
758                 }
759                 chip->state = FL_STATUS;
760                 return 0;
761
762         case FL_XIP_WHILE_ERASING:
763                 if (mode != FL_READY && mode != FL_POINT &&
764                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
765                         goto sleep;
766                 chip->oldstate = chip->state;
767                 chip->state = FL_READY;
768                 return 0;
769
770         case FL_POINT:
771                 /* Only if there's no operation suspended... */
772                 if (mode == FL_READY && chip->oldstate == FL_READY)
773                         return 0;
774
775         default:
776         sleep:
777                 set_current_state(TASK_UNINTERRUPTIBLE);
778                 add_wait_queue(&chip->wq, &wait);
779                 spin_unlock(chip->mutex);
780                 schedule();
781                 remove_wait_queue(&chip->wq, &wait);
782                 spin_lock(chip->mutex);
783                 goto resettime;
784         }
785 }
786
787 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
788 {
789         struct cfi_private *cfi = map->fldrv_priv;
790
791         if (chip->priv) {
792                 struct flchip_shared *shared = chip->priv;
793                 spin_lock(&shared->lock);
794                 if (shared->writing == chip && chip->oldstate == FL_READY) {
795                         /* We own the ability to write, but we're done */
796                         shared->writing = shared->erasing;
797                         if (shared->writing && shared->writing != chip) {
798                                 /* give back ownership to who we loaned it from */
799                                 struct flchip *loaner = shared->writing;
800                                 spin_lock(loaner->mutex);
801                                 spin_unlock(&shared->lock);
802                                 spin_unlock(chip->mutex);
803                                 put_chip(map, loaner, loaner->start);
804                                 spin_lock(chip->mutex);
805                                 spin_unlock(loaner->mutex);
806                                 wake_up(&chip->wq);
807                                 return;
808                         }
809                         shared->erasing = NULL;
810                         shared->writing = NULL;
811                 } else if (shared->erasing == chip && shared->writing != chip) {
812                         /*
813                          * We own the ability to erase without the ability
814                          * to write, which means the erase was suspended
815                          * and some other partition is currently writing.
816                          * Don't let the switch below mess things up since
817                          * we don't have ownership to resume anything.
818                          */
819                         spin_unlock(&shared->lock);
820                         wake_up(&chip->wq);
821                         return;
822                 }
823                 spin_unlock(&shared->lock);
824         }
825
826         switch(chip->oldstate) {
827         case FL_ERASING:
828                 chip->state = chip->oldstate;
829                 /* What if one interleaved chip has finished and the
830                    other hasn't? The old code would leave the finished
831                    one in READY mode. That's bad, and caused -EROFS
832                    errors to be returned from do_erase_oneblock because
833                    that's the only bit it checked for at the time.
834                    As the state machine appears to explicitly allow
835                    sending the 0x70 (Read Status) command to an erasing
836                    chip and expecting it to be ignored, that's what we
837                    do. */
838                 map_write(map, CMD(0xd0), adr);
839                 map_write(map, CMD(0x70), adr);
840                 chip->oldstate = FL_READY;
841                 chip->state = FL_ERASING;
842                 break;
843
844         case FL_XIP_WHILE_ERASING:
845                 chip->state = chip->oldstate;
846                 chip->oldstate = FL_READY;
847                 break;
848
849         case FL_READY:
850         case FL_STATUS:
851         case FL_JEDEC_QUERY:
852                 /* We should really make set_vpp() count, rather than doing this */
853                 DISABLE_VPP(map);
854                 break;
855         default:
856                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
857         }
858         wake_up(&chip->wq);
859 }
860
861 #ifdef CONFIG_MTD_XIP
862
863 /*
864  * No interrupt what so ever can be serviced while the flash isn't in array
865  * mode.  This is ensured by the xip_disable() and xip_enable() functions
866  * enclosing any code path where the flash is known not to be in array mode.
867  * And within a XIP disabled code path, only functions marked with __xipram
868  * may be called and nothing else (it's a good thing to inspect generated
869  * assembly to make sure inline functions were actually inlined and that gcc
870  * didn't emit calls to its own support functions). Also configuring MTD CFI
871  * support to a single buswidth and a single interleave is also recommended.
872  */
873
874 static void xip_disable(struct map_info *map, struct flchip *chip,
875                         unsigned long adr)
876 {
877         /* TODO: chips with no XIP use should ignore and return */
878         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
879         local_irq_disable();
880 }
881
882 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
883                                 unsigned long adr)
884 {
885         struct cfi_private *cfi = map->fldrv_priv;
886         if (chip->state != FL_POINT && chip->state != FL_READY) {
887                 map_write(map, CMD(0xff), adr);
888                 chip->state = FL_READY;
889         }
890         (void) map_read(map, adr);
891         xip_iprefetch();
892         local_irq_enable();
893 }
894
895 /*
896  * When a delay is required for the flash operation to complete, the
897  * xip_udelay() function is polling for both the given timeout and pending
898  * (but still masked) hardware interrupts.  Whenever there is an interrupt
899  * pending then the flash erase or write operation is suspended, array mode
900  * restored and interrupts unmasked.  Task scheduling might also happen at that
901  * point.  The CPU eventually returns from the interrupt or the call to
902  * schedule() and the suspended flash operation is resumed for the remaining
903  * of the delay period.
904  *
905  * Warning: this function _will_ fool interrupt latency tracing tools.
906  */
907
908 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
909                                 unsigned long adr, int usec)
910 {
911         struct cfi_private *cfi = map->fldrv_priv;
912         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
913         map_word status, OK = CMD(0x80);
914         unsigned long suspended, start = xip_currtime();
915         flstate_t oldstate, newstate;
916
917         do {
918                 cpu_relax();
919                 if (xip_irqpending() && cfip &&
920                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
921                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
922                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
923                         /*
924                          * Let's suspend the erase or write operation when
925                          * supported.  Note that we currently don't try to
926                          * suspend interleaved chips if there is already
927                          * another operation suspended (imagine what happens
928                          * when one chip was already done with the current
929                          * operation while another chip suspended it, then
930                          * we resume the whole thing at once).  Yes, it
931                          * can happen!
932                          */
933                         map_write(map, CMD(0xb0), adr);
934                         map_write(map, CMD(0x70), adr);
935                         usec -= xip_elapsed_since(start);
936                         suspended = xip_currtime();
937                         do {
938                                 if (xip_elapsed_since(suspended) > 100000) {
939                                         /*
940                                          * The chip doesn't want to suspend
941                                          * after waiting for 100 msecs.
942                                          * This is a critical error but there
943                                          * is not much we can do here.
944                                          */
945                                         return;
946                                 }
947                                 status = map_read(map, adr);
948                         } while (!map_word_andequal(map, status, OK, OK));
949
950                         /* Suspend succeeded */
951                         oldstate = chip->state;
952                         if (oldstate == FL_ERASING) {
953                                 if (!map_word_bitsset(map, status, CMD(0x40)))
954                                         break;
955                                 newstate = FL_XIP_WHILE_ERASING;
956                                 chip->erase_suspended = 1;
957                         } else {
958                                 if (!map_word_bitsset(map, status, CMD(0x04)))
959                                         break;
960                                 newstate = FL_XIP_WHILE_WRITING;
961                                 chip->write_suspended = 1;
962                         }
963                         chip->state = newstate;
964                         map_write(map, CMD(0xff), adr);
965                         (void) map_read(map, adr);
966                         asm volatile (".rep 8; nop; .endr");
967                         local_irq_enable();
968                         spin_unlock(chip->mutex);
969                         asm volatile (".rep 8; nop; .endr");
970                         cond_resched();
971
972                         /*
973                          * We're back.  However someone else might have
974                          * decided to go write to the chip if we are in
975                          * a suspended erase state.  If so let's wait
976                          * until it's done.
977                          */
978                         spin_lock(chip->mutex);
979                         while (chip->state != newstate) {
980                                 DECLARE_WAITQUEUE(wait, current);
981                                 set_current_state(TASK_UNINTERRUPTIBLE);
982                                 add_wait_queue(&chip->wq, &wait);
983                                 spin_unlock(chip->mutex);
984                                 schedule();
985                                 remove_wait_queue(&chip->wq, &wait);
986                                 spin_lock(chip->mutex);
987                         }
988                         /* Disallow XIP again */
989                         local_irq_disable();
990
991                         /* Resume the write or erase operation */
992                         map_write(map, CMD(0xd0), adr);
993                         map_write(map, CMD(0x70), adr);
994                         chip->state = oldstate;
995                         start = xip_currtime();
996                 } else if (usec >= 1000000/HZ) {
997                         /*
998                          * Try to save on CPU power when waiting delay
999                          * is at least a system timer tick period.
1000                          * No need to be extremely accurate here.
1001                          */
1002                         xip_cpu_idle();
1003                 }
1004                 status = map_read(map, adr);
1005         } while (!map_word_andequal(map, status, OK, OK)
1006                  && xip_elapsed_since(start) < usec);
1007 }
1008
1009 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1010
1011 /*
1012  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1013  * the flash is actively programming or erasing since we have to poll for
1014  * the operation to complete anyway.  We can't do that in a generic way with
1015  * a XIP setup so do it before the actual flash operation in this case
1016  * and stub it out from INVALIDATE_CACHE_UDELAY.
1017  */
1018 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1019         INVALIDATE_CACHED_RANGE(map, from, size)
1020
1021 #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec)  \
1022         UDELAY(map, chip, cmd_adr, usec)
1023
1024 /*
1025  * Extra notes:
1026  *
1027  * Activating this XIP support changes the way the code works a bit.  For
1028  * example the code to suspend the current process when concurrent access
1029  * happens is never executed because xip_udelay() will always return with the
1030  * same chip state as it was entered with.  This is why there is no care for
1031  * the presence of add_wait_queue() or schedule() calls from within a couple
1032  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1033  * The queueing and scheduling are always happening within xip_udelay().
1034  *
1035  * Similarly, get_chip() and put_chip() just happen to always be executed
1036  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1037  * is in array mode, therefore never executing many cases therein and not
1038  * causing any problem with XIP.
1039  */
1040
1041 #else
1042
1043 #define xip_disable(map, chip, adr)
1044 #define xip_enable(map, chip, adr)
1045 #define XIP_INVAL_CACHED_RANGE(x...)
1046
1047 #define UDELAY(map, chip, adr, usec)  \
1048 do {  \
1049         spin_unlock(chip->mutex);  \
1050         cfi_udelay(usec);  \
1051         spin_lock(chip->mutex);  \
1052 } while (0)
1053
1054 #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec)  \
1055 do {  \
1056         spin_unlock(chip->mutex);  \
1057         INVALIDATE_CACHED_RANGE(map, adr, len);  \
1058         cfi_udelay(usec);  \
1059         spin_lock(chip->mutex);  \
1060 } while (0)
1061
1062 #endif
1063
1064 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1065 {
1066         unsigned long cmd_addr;
1067         struct cfi_private *cfi = map->fldrv_priv;
1068         int ret = 0;
1069
1070         adr += chip->start;
1071
1072         /* Ensure cmd read/writes are aligned. */
1073         cmd_addr = adr & ~(map_bankwidth(map)-1);
1074
1075         spin_lock(chip->mutex);
1076
1077         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1078
1079         if (!ret) {
1080                 if (chip->state != FL_POINT && chip->state != FL_READY)
1081                         map_write(map, CMD(0xff), cmd_addr);
1082
1083                 chip->state = FL_POINT;
1084                 chip->ref_point_counter++;
1085         }
1086         spin_unlock(chip->mutex);
1087
1088         return ret;
1089 }
1090
1091 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1092 {
1093         struct map_info *map = mtd->priv;
1094         struct cfi_private *cfi = map->fldrv_priv;
1095         unsigned long ofs;
1096         int chipnum;
1097         int ret = 0;
1098
1099         if (!map->virt || (from + len > mtd->size))
1100                 return -EINVAL;
1101
1102         *mtdbuf = (void *)map->virt + from;
1103         *retlen = 0;
1104
1105         /* Now lock the chip(s) to POINT state */
1106
1107         /* ofs: offset within the first chip that the first read should start */
1108         chipnum = (from >> cfi->chipshift);
1109         ofs = from - (chipnum << cfi->chipshift);
1110
1111         while (len) {
1112                 unsigned long thislen;
1113
1114                 if (chipnum >= cfi->numchips)
1115                         break;
1116
1117                 if ((len + ofs -1) >> cfi->chipshift)
1118                         thislen = (1<<cfi->chipshift) - ofs;
1119                 else
1120                         thislen = len;
1121
1122                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1123                 if (ret)
1124                         break;
1125
1126                 *retlen += thislen;
1127                 len -= thislen;
1128
1129                 ofs = 0;
1130                 chipnum++;
1131         }
1132         return 0;
1133 }
1134
1135 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1136 {
1137         struct map_info *map = mtd->priv;
1138         struct cfi_private *cfi = map->fldrv_priv;
1139         unsigned long ofs;
1140         int chipnum;
1141
1142         /* Now unlock the chip(s) POINT state */
1143
1144         /* ofs: offset within the first chip that the first read should start */
1145         chipnum = (from >> cfi->chipshift);
1146         ofs = from - (chipnum <<  cfi->chipshift);
1147
1148         while (len) {
1149                 unsigned long thislen;
1150                 struct flchip *chip;
1151
1152                 chip = &cfi->chips[chipnum];
1153                 if (chipnum >= cfi->numchips)
1154                         break;
1155
1156                 if ((len + ofs -1) >> cfi->chipshift)
1157                         thislen = (1<<cfi->chipshift) - ofs;
1158                 else
1159                         thislen = len;
1160
1161                 spin_lock(chip->mutex);
1162                 if (chip->state == FL_POINT) {
1163                         chip->ref_point_counter--;
1164                         if(chip->ref_point_counter == 0)
1165                                 chip->state = FL_READY;
1166                 } else
1167                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1168
1169                 put_chip(map, chip, chip->start);
1170                 spin_unlock(chip->mutex);
1171
1172                 len -= thislen;
1173                 ofs = 0;
1174                 chipnum++;
1175         }
1176 }
1177
1178 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1179 {
1180         unsigned long cmd_addr;
1181         struct cfi_private *cfi = map->fldrv_priv;
1182         int ret;
1183
1184         adr += chip->start;
1185
1186         /* Ensure cmd read/writes are aligned. */
1187         cmd_addr = adr & ~(map_bankwidth(map)-1);
1188
1189         spin_lock(chip->mutex);
1190         ret = get_chip(map, chip, cmd_addr, FL_READY);
1191         if (ret) {
1192                 spin_unlock(chip->mutex);
1193                 return ret;
1194         }
1195
1196         if (chip->state != FL_POINT && chip->state != FL_READY) {
1197                 map_write(map, CMD(0xff), cmd_addr);
1198
1199                 chip->state = FL_READY;
1200         }
1201
1202         map_copy_from(map, buf, adr, len);
1203
1204         put_chip(map, chip, cmd_addr);
1205
1206         spin_unlock(chip->mutex);
1207         return 0;
1208 }
1209
1210 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1211 {
1212         struct map_info *map = mtd->priv;
1213         struct cfi_private *cfi = map->fldrv_priv;
1214         unsigned long ofs;
1215         int chipnum;
1216         int ret = 0;
1217
1218         /* ofs: offset within the first chip that the first read should start */
1219         chipnum = (from >> cfi->chipshift);
1220         ofs = from - (chipnum <<  cfi->chipshift);
1221
1222         *retlen = 0;
1223
1224         while (len) {
1225                 unsigned long thislen;
1226
1227                 if (chipnum >= cfi->numchips)
1228                         break;
1229
1230                 if ((len + ofs -1) >> cfi->chipshift)
1231                         thislen = (1<<cfi->chipshift) - ofs;
1232                 else
1233                         thislen = len;
1234
1235                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1236                 if (ret)
1237                         break;
1238
1239                 *retlen += thislen;
1240                 len -= thislen;
1241                 buf += thislen;
1242
1243                 ofs = 0;
1244                 chipnum++;
1245         }
1246         return ret;
1247 }
1248
1249 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1250                                      unsigned long adr, map_word datum, int mode)
1251 {
1252         struct cfi_private *cfi = map->fldrv_priv;
1253         map_word status, status_OK, write_cmd;
1254         unsigned long timeo;
1255         int z, ret=0;
1256
1257         adr += chip->start;
1258
1259         /* Let's determine those according to the interleave only once */
1260         status_OK = CMD(0x80);
1261         switch (mode) {
1262         case FL_WRITING:
1263                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1264                 break;
1265         case FL_OTP_WRITE:
1266                 write_cmd = CMD(0xc0);
1267                 break;
1268         default:
1269                 return -EINVAL;
1270         }
1271
1272         spin_lock(chip->mutex);
1273         ret = get_chip(map, chip, adr, mode);
1274         if (ret) {
1275                 spin_unlock(chip->mutex);
1276                 return ret;
1277         }
1278
1279         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1280         ENABLE_VPP(map);
1281         xip_disable(map, chip, adr);
1282         map_write(map, write_cmd, adr);
1283         map_write(map, datum, adr);
1284         chip->state = mode;
1285
1286         INVALIDATE_CACHE_UDELAY(map, chip, adr,
1287                                 adr, map_bankwidth(map),
1288                                 chip->word_write_time);
1289
1290         timeo = jiffies + (HZ/2);
1291         z = 0;
1292         for (;;) {
1293                 if (chip->state != mode) {
1294                         /* Someone's suspended the write. Sleep */
1295                         DECLARE_WAITQUEUE(wait, current);
1296
1297                         set_current_state(TASK_UNINTERRUPTIBLE);
1298                         add_wait_queue(&chip->wq, &wait);
1299                         spin_unlock(chip->mutex);
1300                         schedule();
1301                         remove_wait_queue(&chip->wq, &wait);
1302                         timeo = jiffies + (HZ / 2); /* FIXME */
1303                         spin_lock(chip->mutex);
1304                         continue;
1305                 }
1306
1307                 status = map_read(map, adr);
1308                 if (map_word_andequal(map, status, status_OK, status_OK))
1309                         break;
1310
1311                 /* OK Still waiting */
1312                 if (time_after(jiffies, timeo)) {
1313                         map_write(map, CMD(0x70), adr);
1314                         chip->state = FL_STATUS;
1315                         xip_enable(map, chip, adr);
1316                         printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1317                         ret = -EIO;
1318                         goto out;
1319                 }
1320
1321                 /* Latency issues. Drop the lock, wait a while and retry */
1322                 z++;
1323                 UDELAY(map, chip, adr, 1);
1324         }
1325         if (!z) {
1326                 chip->word_write_time--;
1327                 if (!chip->word_write_time)
1328                         chip->word_write_time = 1;
1329         }
1330         if (z > 1)
1331                 chip->word_write_time++;
1332
1333         /* Done and happy. */
1334         chip->state = FL_STATUS;
1335
1336         /* check for errors */
1337         if (map_word_bitsset(map, status, CMD(0x1a))) {
1338                 unsigned long chipstatus = MERGESTATUS(status);
1339
1340                 /* reset status */
1341                 map_write(map, CMD(0x50), adr);
1342                 map_write(map, CMD(0x70), adr);
1343                 xip_enable(map, chip, adr);
1344
1345                 if (chipstatus & 0x02) {
1346                         ret = -EROFS;
1347                 } else if (chipstatus & 0x08) {
1348                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1349                         ret = -EIO;
1350                 } else {
1351                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1352                         ret = -EINVAL;
1353                 }
1354
1355                 goto out;
1356         }
1357
1358         xip_enable(map, chip, adr);
1359  out:   put_chip(map, chip, adr);
1360         spin_unlock(chip->mutex);
1361         return ret;
1362 }
1363
1364
1365 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1366 {
1367         struct map_info *map = mtd->priv;
1368         struct cfi_private *cfi = map->fldrv_priv;
1369         int ret = 0;
1370         int chipnum;
1371         unsigned long ofs;
1372
1373         *retlen = 0;
1374         if (!len)
1375                 return 0;
1376
1377         chipnum = to >> cfi->chipshift;
1378         ofs = to  - (chipnum << cfi->chipshift);
1379
1380         /* If it's not bus-aligned, do the first byte write */
1381         if (ofs & (map_bankwidth(map)-1)) {
1382                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1383                 int gap = ofs - bus_ofs;
1384                 int n;
1385                 map_word datum;
1386
1387                 n = min_t(int, len, map_bankwidth(map)-gap);
1388                 datum = map_word_ff(map);
1389                 datum = map_word_load_partial(map, datum, buf, gap, n);
1390
1391                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1392                                                bus_ofs, datum, FL_WRITING);
1393                 if (ret)
1394                         return ret;
1395
1396                 len -= n;
1397                 ofs += n;
1398                 buf += n;
1399                 (*retlen) += n;
1400
1401                 if (ofs >> cfi->chipshift) {
1402                         chipnum ++;
1403                         ofs = 0;
1404                         if (chipnum == cfi->numchips)
1405                                 return 0;
1406                 }
1407         }
1408
1409         while(len >= map_bankwidth(map)) {
1410                 map_word datum = map_word_load(map, buf);
1411
1412                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1413                                        ofs, datum, FL_WRITING);
1414                 if (ret)
1415                         return ret;
1416
1417                 ofs += map_bankwidth(map);
1418                 buf += map_bankwidth(map);
1419                 (*retlen) += map_bankwidth(map);
1420                 len -= map_bankwidth(map);
1421
1422                 if (ofs >> cfi->chipshift) {
1423                         chipnum ++;
1424                         ofs = 0;
1425                         if (chipnum == cfi->numchips)
1426                                 return 0;
1427                 }
1428         }
1429
1430         if (len & (map_bankwidth(map)-1)) {
1431                 map_word datum;
1432
1433                 datum = map_word_ff(map);
1434                 datum = map_word_load_partial(map, datum, buf, 0, len);
1435
1436                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1437                                        ofs, datum, FL_WRITING);
1438                 if (ret)
1439                         return ret;
1440
1441                 (*retlen) += len;
1442         }
1443
1444         return 0;
1445 }
1446
1447
1448 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1449                                     unsigned long adr, const struct kvec **pvec,
1450                                     unsigned long *pvec_seek, int len)
1451 {
1452         struct cfi_private *cfi = map->fldrv_priv;
1453         map_word status, status_OK, write_cmd, datum;
1454         unsigned long cmd_adr, timeo;
1455         int wbufsize, z, ret=0, word_gap, words;
1456         const struct kvec *vec;
1457         unsigned long vec_seek;
1458
1459         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1460         adr += chip->start;
1461         cmd_adr = adr & ~(wbufsize-1);
1462
1463         /* Let's determine this according to the interleave only once */
1464         status_OK = CMD(0x80);
1465         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1466
1467         spin_lock(chip->mutex);
1468         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1469         if (ret) {
1470                 spin_unlock(chip->mutex);
1471                 return ret;
1472         }
1473
1474         XIP_INVAL_CACHED_RANGE(map, adr, len);
1475         ENABLE_VPP(map);
1476         xip_disable(map, chip, cmd_adr);
1477
1478         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1479            [...], the device will not accept any more Write to Buffer commands".
1480            So we must check here and reset those bits if they're set. Otherwise
1481            we're just pissing in the wind */
1482         if (chip->state != FL_STATUS) {
1483                 map_write(map, CMD(0x70), cmd_adr);
1484                 chip->state = FL_STATUS;
1485         }
1486         status = map_read(map, cmd_adr);
1487         if (map_word_bitsset(map, status, CMD(0x30))) {
1488                 xip_enable(map, chip, cmd_adr);
1489                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1490                 xip_disable(map, chip, cmd_adr);
1491                 map_write(map, CMD(0x50), cmd_adr);
1492                 map_write(map, CMD(0x70), cmd_adr);
1493         }
1494
1495         chip->state = FL_WRITING_TO_BUFFER;
1496
1497         z = 0;
1498         for (;;) {
1499                 map_write(map, write_cmd, cmd_adr);
1500
1501                 status = map_read(map, cmd_adr);
1502                 if (map_word_andequal(map, status, status_OK, status_OK))
1503                         break;
1504
1505                 UDELAY(map, chip, cmd_adr, 1);
1506
1507                 if (++z > 20) {
1508                         /* Argh. Not ready for write to buffer */
1509                         map_word Xstatus;
1510                         map_write(map, CMD(0x70), cmd_adr);
1511                         chip->state = FL_STATUS;
1512                         Xstatus = map_read(map, cmd_adr);
1513                         /* Odd. Clear status bits */
1514                         map_write(map, CMD(0x50), cmd_adr);
1515                         map_write(map, CMD(0x70), cmd_adr);
1516                         xip_enable(map, chip, cmd_adr);
1517                         printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1518                                map->name, status.x[0], Xstatus.x[0]);
1519                         ret = -EIO;
1520                         goto out;
1521                 }
1522         }
1523
1524         /* Figure out the number of words to write */
1525         word_gap = (-adr & (map_bankwidth(map)-1));
1526         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1527         if (!word_gap) {
1528                 words--;
1529         } else {
1530                 word_gap = map_bankwidth(map) - word_gap;
1531                 adr -= word_gap;
1532                 datum = map_word_ff(map);
1533         }
1534
1535         /* Write length of data to come */
1536         map_write(map, CMD(words), cmd_adr );
1537
1538         /* Write data */
1539         vec = *pvec;
1540         vec_seek = *pvec_seek;
1541         do {
1542                 int n = map_bankwidth(map) - word_gap;
1543                 if (n > vec->iov_len - vec_seek)
1544                         n = vec->iov_len - vec_seek;
1545                 if (n > len)
1546                         n = len;
1547
1548                 if (!word_gap && len < map_bankwidth(map))
1549                         datum = map_word_ff(map);
1550
1551                 datum = map_word_load_partial(map, datum,
1552                                               vec->iov_base + vec_seek,
1553                                               word_gap, n);
1554
1555                 len -= n;
1556                 word_gap += n;
1557                 if (!len || word_gap == map_bankwidth(map)) {
1558                         map_write(map, datum, adr);
1559                         adr += map_bankwidth(map);
1560                         word_gap = 0;
1561                 }
1562
1563                 vec_seek += n;
1564                 if (vec_seek == vec->iov_len) {
1565                         vec++;
1566                         vec_seek = 0;
1567                 }
1568         } while (len);
1569         *pvec = vec;
1570         *pvec_seek = vec_seek;
1571
1572         /* GO GO GO */
1573         map_write(map, CMD(0xd0), cmd_adr);
1574         chip->state = FL_WRITING;
1575
1576         INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr,
1577                                 adr, len,
1578                                 chip->buffer_write_time);
1579
1580         timeo = jiffies + (HZ/2);
1581         z = 0;
1582         for (;;) {
1583                 if (chip->state != FL_WRITING) {
1584                         /* Someone's suspended the write. Sleep */
1585                         DECLARE_WAITQUEUE(wait, current);
1586                         set_current_state(TASK_UNINTERRUPTIBLE);
1587                         add_wait_queue(&chip->wq, &wait);
1588                         spin_unlock(chip->mutex);
1589                         schedule();
1590                         remove_wait_queue(&chip->wq, &wait);
1591                         timeo = jiffies + (HZ / 2); /* FIXME */
1592                         spin_lock(chip->mutex);
1593                         continue;
1594                 }
1595
1596                 status = map_read(map, cmd_adr);
1597                 if (map_word_andequal(map, status, status_OK, status_OK))
1598                         break;
1599
1600                 /* OK Still waiting */
1601                 if (time_after(jiffies, timeo)) {
1602                         map_write(map, CMD(0x70), cmd_adr);
1603                         chip->state = FL_STATUS;
1604                         xip_enable(map, chip, cmd_adr);
1605                         printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1606                         ret = -EIO;
1607                         goto out;
1608                 }
1609
1610                 /* Latency issues. Drop the lock, wait a while and retry */
1611                 z++;
1612                 UDELAY(map, chip, cmd_adr, 1);
1613         }
1614         if (!z) {
1615                 chip->buffer_write_time--;
1616                 if (!chip->buffer_write_time)
1617                         chip->buffer_write_time = 1;
1618         }
1619         if (z > 1)
1620                 chip->buffer_write_time++;
1621
1622         /* Done and happy. */
1623         chip->state = FL_STATUS;
1624
1625         /* check for errors */
1626         if (map_word_bitsset(map, status, CMD(0x1a))) {
1627                 unsigned long chipstatus = MERGESTATUS(status);
1628
1629                 /* reset status */
1630                 map_write(map, CMD(0x50), cmd_adr);
1631                 map_write(map, CMD(0x70), cmd_adr);
1632                 xip_enable(map, chip, cmd_adr);
1633
1634                 if (chipstatus & 0x02) {
1635                         ret = -EROFS;
1636                 } else if (chipstatus & 0x08) {
1637                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1638                         ret = -EIO;
1639                 } else {
1640                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1641                         ret = -EINVAL;
1642                 }
1643
1644                 goto out;
1645         }
1646
1647         xip_enable(map, chip, cmd_adr);
1648  out:   put_chip(map, chip, cmd_adr);
1649         spin_unlock(chip->mutex);
1650         return ret;
1651 }
1652
1653 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1654                                 unsigned long count, loff_t to, size_t *retlen)
1655 {
1656         struct map_info *map = mtd->priv;
1657         struct cfi_private *cfi = map->fldrv_priv;
1658         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1659         int ret = 0;
1660         int chipnum;
1661         unsigned long ofs, vec_seek, i;
1662         size_t len = 0;
1663
1664         for (i = 0; i < count; i++)
1665                 len += vecs[i].iov_len;
1666
1667         *retlen = 0;
1668         if (!len)
1669                 return 0;
1670
1671         chipnum = to >> cfi->chipshift;
1672         ofs = to - (chipnum << cfi->chipshift);
1673         vec_seek = 0;
1674
1675         do {
1676                 /* We must not cross write block boundaries */
1677                 int size = wbufsize - (ofs & (wbufsize-1));
1678
1679                 if (size > len)
1680                         size = len;
1681                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1682                                       ofs, &vecs, &vec_seek, size);
1683                 if (ret)
1684                         return ret;
1685
1686                 ofs += size;
1687                 (*retlen) += size;
1688                 len -= size;
1689
1690                 if (ofs >> cfi->chipshift) {
1691                         chipnum ++;
1692                         ofs = 0;
1693                         if (chipnum == cfi->numchips)
1694                                 return 0;
1695                 }
1696
1697                 /* Be nice and reschedule with the chip in a usable state for other
1698                    processes. */
1699                 cond_resched();
1700
1701         } while (len);
1702
1703         return 0;
1704 }
1705
1706 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1707                                        size_t len, size_t *retlen, const u_char *buf)
1708 {
1709         struct kvec vec;
1710
1711         vec.iov_base = (void *) buf;
1712         vec.iov_len = len;
1713
1714         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1715 }
1716
1717 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1718                                       unsigned long adr, int len, void *thunk)
1719 {
1720         struct cfi_private *cfi = map->fldrv_priv;
1721         map_word status, status_OK;
1722         unsigned long timeo;
1723         int retries = 3;
1724         DECLARE_WAITQUEUE(wait, current);
1725         int ret = 0;
1726
1727         adr += chip->start;
1728
1729         /* Let's determine this according to the interleave only once */
1730         status_OK = CMD(0x80);
1731
1732  retry:
1733         spin_lock(chip->mutex);
1734         ret = get_chip(map, chip, adr, FL_ERASING);
1735         if (ret) {
1736                 spin_unlock(chip->mutex);
1737                 return ret;
1738         }
1739
1740         XIP_INVAL_CACHED_RANGE(map, adr, len);
1741         ENABLE_VPP(map);
1742         xip_disable(map, chip, adr);
1743
1744         /* Clear the status register first */
1745         map_write(map, CMD(0x50), adr);
1746
1747         /* Now erase */
1748         map_write(map, CMD(0x20), adr);
1749         map_write(map, CMD(0xD0), adr);
1750         chip->state = FL_ERASING;
1751         chip->erase_suspended = 0;
1752
1753         INVALIDATE_CACHE_UDELAY(map, chip, adr,
1754                                 adr, len,
1755                                 chip->erase_time*1000/2);
1756
1757         /* FIXME. Use a timer to check this, and return immediately. */
1758         /* Once the state machine's known to be working I'll do that */
1759
1760         timeo = jiffies + (HZ*20);
1761         for (;;) {
1762                 if (chip->state != FL_ERASING) {
1763                         /* Someone's suspended the erase. Sleep */
1764                         set_current_state(TASK_UNINTERRUPTIBLE);
1765                         add_wait_queue(&chip->wq, &wait);
1766                         spin_unlock(chip->mutex);
1767                         schedule();
1768                         remove_wait_queue(&chip->wq, &wait);
1769                         spin_lock(chip->mutex);
1770                         continue;
1771                 }
1772                 if (chip->erase_suspended) {
1773                         /* This erase was suspended and resumed.
1774                            Adjust the timeout */
1775                         timeo = jiffies + (HZ*20); /* FIXME */
1776                         chip->erase_suspended = 0;
1777                 }
1778
1779                 status = map_read(map, adr);
1780                 if (map_word_andequal(map, status, status_OK, status_OK))
1781                         break;
1782
1783                 /* OK Still waiting */
1784                 if (time_after(jiffies, timeo)) {
1785                         map_write(map, CMD(0x70), adr);
1786                         chip->state = FL_STATUS;
1787                         xip_enable(map, chip, adr);
1788                         printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1789                         ret = -EIO;
1790                         goto out;
1791                 }
1792
1793                 /* Latency issues. Drop the lock, wait a while and retry */
1794                 UDELAY(map, chip, adr, 1000000/HZ);
1795         }
1796
1797         /* We've broken this before. It doesn't hurt to be safe */
1798         map_write(map, CMD(0x70), adr);
1799         chip->state = FL_STATUS;
1800         status = map_read(map, adr);
1801
1802         /* check for errors */
1803         if (map_word_bitsset(map, status, CMD(0x3a))) {
1804                 unsigned long chipstatus = MERGESTATUS(status);
1805
1806                 /* Reset the error bits */
1807                 map_write(map, CMD(0x50), adr);
1808                 map_write(map, CMD(0x70), adr);
1809                 xip_enable(map, chip, adr);
1810
1811                 if ((chipstatus & 0x30) == 0x30) {
1812                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1813                         ret = -EINVAL;
1814                 } else if (chipstatus & 0x02) {
1815                         /* Protection bit set */
1816                         ret = -EROFS;
1817                 } else if (chipstatus & 0x8) {
1818                         /* Voltage */
1819                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1820                         ret = -EIO;
1821                 } else if (chipstatus & 0x20 && retries--) {
1822                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1823                         timeo = jiffies + HZ;
1824                         put_chip(map, chip, adr);
1825                         spin_unlock(chip->mutex);
1826                         goto retry;
1827                 } else {
1828                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1829                         ret = -EIO;
1830                 }
1831
1832                 goto out;
1833         }
1834
1835         xip_enable(map, chip, adr);
1836  out:   put_chip(map, chip, adr);
1837         spin_unlock(chip->mutex);
1838         return ret;
1839 }
1840
1841 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1842 {
1843         unsigned long ofs, len;
1844         int ret;
1845
1846         ofs = instr->addr;
1847         len = instr->len;
1848
1849         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1850         if (ret)
1851                 return ret;
1852
1853         instr->state = MTD_ERASE_DONE;
1854         mtd_erase_callback(instr);
1855
1856         return 0;
1857 }
1858
1859 static void cfi_intelext_sync (struct mtd_info *mtd)
1860 {
1861         struct map_info *map = mtd->priv;
1862         struct cfi_private *cfi = map->fldrv_priv;
1863         int i;
1864         struct flchip *chip;
1865         int ret = 0;
1866
1867         for (i=0; !ret && i<cfi->numchips; i++) {
1868                 chip = &cfi->chips[i];
1869
1870                 spin_lock(chip->mutex);
1871                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1872
1873                 if (!ret) {
1874                         chip->oldstate = chip->state;
1875                         chip->state = FL_SYNCING;
1876                         /* No need to wake_up() on this state change -
1877                          * as the whole point is that nobody can do anything
1878                          * with the chip now anyway.
1879                          */
1880                 }
1881                 spin_unlock(chip->mutex);
1882         }
1883
1884         /* Unlock the chips again */
1885
1886         for (i--; i >=0; i--) {
1887                 chip = &cfi->chips[i];
1888
1889                 spin_lock(chip->mutex);
1890
1891                 if (chip->state == FL_SYNCING) {
1892                         chip->state = chip->oldstate;
1893                         chip->oldstate = FL_READY;
1894                         wake_up(&chip->wq);
1895                 }
1896                 spin_unlock(chip->mutex);
1897         }
1898 }
1899
1900 #ifdef DEBUG_LOCK_BITS
1901 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1902                                                 struct flchip *chip,
1903                                                 unsigned long adr,
1904                                                 int len, void *thunk)
1905 {
1906         struct cfi_private *cfi = map->fldrv_priv;
1907         int status, ofs_factor = cfi->interleave * cfi->device_type;
1908
1909         adr += chip->start;
1910         xip_disable(map, chip, adr+(2*ofs_factor));
1911         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1912         chip->state = FL_JEDEC_QUERY;
1913         status = cfi_read_query(map, adr+(2*ofs_factor));
1914         xip_enable(map, chip, 0);
1915         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1916                adr, status);
1917         return 0;
1918 }
1919 #endif
1920
1921 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1922 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1923
1924 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1925                                        unsigned long adr, int len, void *thunk)
1926 {
1927         struct cfi_private *cfi = map->fldrv_priv;
1928         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1929         map_word status, status_OK;
1930         unsigned long timeo = jiffies + HZ;
1931         int ret;
1932
1933         adr += chip->start;
1934
1935         /* Let's determine this according to the interleave only once */
1936         status_OK = CMD(0x80);
1937
1938         spin_lock(chip->mutex);
1939         ret = get_chip(map, chip, adr, FL_LOCKING);
1940         if (ret) {
1941                 spin_unlock(chip->mutex);
1942                 return ret;
1943         }
1944
1945         ENABLE_VPP(map);
1946         xip_disable(map, chip, adr);
1947
1948         map_write(map, CMD(0x60), adr);
1949         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1950                 map_write(map, CMD(0x01), adr);
1951                 chip->state = FL_LOCKING;
1952         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1953                 map_write(map, CMD(0xD0), adr);
1954                 chip->state = FL_UNLOCKING;
1955         } else
1956                 BUG();
1957
1958         /*
1959          * If Instant Individual Block Locking supported then no need
1960          * to delay.
1961          */
1962
1963         if (!extp || !(extp->FeatureSupport & (1 << 5)))
1964                 UDELAY(map, chip, adr, 1000000/HZ);
1965
1966         /* FIXME. Use a timer to check this, and return immediately. */
1967         /* Once the state machine's known to be working I'll do that */
1968
1969         timeo = jiffies + (HZ*20);
1970         for (;;) {
1971
1972                 status = map_read(map, adr);
1973                 if (map_word_andequal(map, status, status_OK, status_OK))
1974                         break;
1975
1976                 /* OK Still waiting */
1977                 if (time_after(jiffies, timeo)) {
1978                         map_write(map, CMD(0x70), adr);
1979                         chip->state = FL_STATUS;
1980                         xip_enable(map, chip, adr);
1981                         printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1982                         put_chip(map, chip, adr);
1983                         spin_unlock(chip->mutex);
1984                         return -EIO;
1985                 }
1986
1987                 /* Latency issues. Drop the lock, wait a while and retry */
1988                 UDELAY(map, chip, adr, 1);
1989         }
1990
1991         /* Done and happy. */
1992         chip->state = FL_STATUS;
1993         xip_enable(map, chip, adr);
1994         put_chip(map, chip, adr);
1995         spin_unlock(chip->mutex);
1996         return 0;
1997 }
1998
1999 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2000 {
2001         int ret;
2002
2003 #ifdef DEBUG_LOCK_BITS
2004         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2005                __FUNCTION__, ofs, len);
2006         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2007                 ofs, len, 0);
2008 #endif
2009
2010         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2011                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2012
2013 #ifdef DEBUG_LOCK_BITS
2014         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2015                __FUNCTION__, ret);
2016         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2017                 ofs, len, 0);
2018 #endif
2019
2020         return ret;
2021 }
2022
2023 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2024 {
2025         int ret;
2026
2027 #ifdef DEBUG_LOCK_BITS
2028         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2029                __FUNCTION__, ofs, len);
2030         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2031                 ofs, len, 0);
2032 #endif
2033
2034         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2035                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2036
2037 #ifdef DEBUG_LOCK_BITS
2038         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2039                __FUNCTION__, ret);
2040         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2041                 ofs, len, 0);
2042 #endif
2043
2044         return ret;
2045 }
2046
2047 #ifdef CONFIG_MTD_OTP
2048
2049 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2050                         u_long data_offset, u_char *buf, u_int size,
2051                         u_long prot_offset, u_int groupno, u_int groupsize);
2052
2053 static int __xipram
2054 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2055             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2056 {
2057         struct cfi_private *cfi = map->fldrv_priv;
2058         int ret;
2059
2060         spin_lock(chip->mutex);
2061         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2062         if (ret) {
2063                 spin_unlock(chip->mutex);
2064                 return ret;
2065         }
2066
2067         /* let's ensure we're not reading back cached data from array mode */
2068         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2069
2070         xip_disable(map, chip, chip->start);
2071         if (chip->state != FL_JEDEC_QUERY) {
2072                 map_write(map, CMD(0x90), chip->start);
2073                 chip->state = FL_JEDEC_QUERY;
2074         }
2075         map_copy_from(map, buf, chip->start + offset, size);
2076         xip_enable(map, chip, chip->start);
2077
2078         /* then ensure we don't keep OTP data in the cache */
2079         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2080
2081         put_chip(map, chip, chip->start);
2082         spin_unlock(chip->mutex);
2083         return 0;
2084 }
2085
2086 static int
2087 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2088              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2089 {
2090         int ret;
2091
2092         while (size) {
2093                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2094                 int gap = offset - bus_ofs;
2095                 int n = min_t(int, size, map_bankwidth(map)-gap);
2096                 map_word datum = map_word_ff(map);
2097
2098                 datum = map_word_load_partial(map, datum, buf, gap, n);
2099                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2100                 if (ret)
2101                         return ret;
2102
2103                 offset += n;
2104                 buf += n;
2105                 size -= n;
2106         }
2107
2108         return 0;
2109 }
2110
2111 static int
2112 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2113             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2114 {
2115         struct cfi_private *cfi = map->fldrv_priv;
2116         map_word datum;
2117
2118         /* make sure area matches group boundaries */
2119         if (size != grpsz)
2120                 return -EXDEV;
2121
2122         datum = map_word_ff(map);
2123         datum = map_word_clr(map, datum, CMD(1 << grpno));
2124         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2125 }
2126
2127 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2128                                  size_t *retlen, u_char *buf,
2129                                  otp_op_t action, int user_regs)
2130 {
2131         struct map_info *map = mtd->priv;
2132         struct cfi_private *cfi = map->fldrv_priv;
2133         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2134         struct flchip *chip;
2135         struct cfi_intelext_otpinfo *otp;
2136         u_long devsize, reg_prot_offset, data_offset;
2137         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2138         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2139         int ret;
2140
2141         *retlen = 0;
2142
2143         /* Check that we actually have some OTP registers */
2144         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2145                 return -ENODATA;
2146
2147         /* we need real chips here not virtual ones */
2148         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2149         chip_step = devsize >> cfi->chipshift;
2150         chip_num = 0;
2151
2152         /* Some chips have OTP located in the _top_ partition only.
2153            For example: Intel 28F256L18T (T means top-parameter device) */
2154         if (cfi->mfr == MANUFACTURER_INTEL) {
2155                 switch (cfi->id) {
2156                 case 0x880b:
2157                 case 0x880c:
2158                 case 0x880d:
2159                         chip_num = chip_step - 1;
2160                 }
2161         }
2162
2163         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2164                 chip = &cfi->chips[chip_num];
2165                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2166
2167                 /* first OTP region */
2168                 field = 0;
2169                 reg_prot_offset = extp->ProtRegAddr;
2170                 reg_fact_groups = 1;
2171                 reg_fact_size = 1 << extp->FactProtRegSize;
2172                 reg_user_groups = 1;
2173                 reg_user_size = 1 << extp->UserProtRegSize;
2174
2175                 while (len > 0) {
2176                         /* flash geometry fixup */
2177                         data_offset = reg_prot_offset + 1;
2178                         data_offset *= cfi->interleave * cfi->device_type;
2179                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2180                         reg_fact_size *= cfi->interleave;
2181                         reg_user_size *= cfi->interleave;
2182
2183                         if (user_regs) {
2184                                 groups = reg_user_groups;
2185                                 groupsize = reg_user_size;
2186                                 /* skip over factory reg area */
2187                                 groupno = reg_fact_groups;
2188                                 data_offset += reg_fact_groups * reg_fact_size;
2189                         } else {
2190                                 groups = reg_fact_groups;
2191                                 groupsize = reg_fact_size;
2192                                 groupno = 0;
2193                         }
2194
2195                         while (len > 0 && groups > 0) {
2196                                 if (!action) {
2197                                         /*
2198                                          * Special case: if action is NULL
2199                                          * we fill buf with otp_info records.
2200                                          */
2201                                         struct otp_info *otpinfo;
2202                                         map_word lockword;
2203                                         len -= sizeof(struct otp_info);
2204                                         if (len <= 0)
2205                                                 return -ENOSPC;
2206                                         ret = do_otp_read(map, chip,
2207                                                           reg_prot_offset,
2208                                                           (u_char *)&lockword,
2209                                                           map_bankwidth(map),
2210                                                           0, 0,  0);
2211                                         if (ret)
2212                                                 return ret;
2213                                         otpinfo = (struct otp_info *)buf;
2214                                         otpinfo->start = from;
2215                                         otpinfo->length = groupsize;
2216                                         otpinfo->locked =
2217                                            !map_word_bitsset(map, lockword,
2218                                                              CMD(1 << groupno));
2219                                         from += groupsize;
2220                                         buf += sizeof(*otpinfo);
2221                                         *retlen += sizeof(*otpinfo);
2222                                 } else if (from >= groupsize) {
2223                                         from -= groupsize;
2224                                         data_offset += groupsize;
2225                                 } else {
2226                                         int size = groupsize;
2227                                         data_offset += from;
2228                                         size -= from;
2229                                         from = 0;
2230                                         if (size > len)
2231                                                 size = len;
2232                                         ret = action(map, chip, data_offset,
2233                                                      buf, size, reg_prot_offset,
2234                                                      groupno, groupsize);
2235                                         if (ret < 0)
2236                                                 return ret;
2237                                         buf += size;
2238                                         len -= size;
2239                                         *retlen += size;
2240                                         data_offset += size;
2241                                 }
2242                                 groupno++;
2243                                 groups--;
2244                         }
2245
2246                         /* next OTP region */
2247                         if (++field == extp->NumProtectionFields)
2248                                 break;
2249                         reg_prot_offset = otp->ProtRegAddr;
2250                         reg_fact_groups = otp->FactGroups;
2251                         reg_fact_size = 1 << otp->FactProtRegSize;
2252                         reg_user_groups = otp->UserGroups;
2253                         reg_user_size = 1 << otp->UserProtRegSize;
2254                         otp++;
2255                 }
2256         }
2257
2258         return 0;
2259 }
2260
2261 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2262                                            size_t len, size_t *retlen,
2263                                             u_char *buf)
2264 {
2265         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2266                                      buf, do_otp_read, 0);
2267 }
2268
2269 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2270                                            size_t len, size_t *retlen,
2271                                             u_char *buf)
2272 {
2273         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2274                                      buf, do_otp_read, 1);
2275 }
2276
2277 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2278                                             size_t len, size_t *retlen,
2279                                              u_char *buf)
2280 {
2281         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2282                                      buf, do_otp_write, 1);
2283 }
2284
2285 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2286                                            loff_t from, size_t len)
2287 {
2288         size_t retlen;
2289         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2290                                      NULL, do_otp_lock, 1);
2291 }
2292
2293 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2294                                            struct otp_info *buf, size_t len)
2295 {
2296         size_t retlen;
2297         int ret;
2298
2299         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2300         return ret ? : retlen;
2301 }
2302
2303 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2304                                            struct otp_info *buf, size_t len)
2305 {
2306         size_t retlen;
2307         int ret;
2308
2309         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2310         return ret ? : retlen;
2311 }
2312
2313 #endif
2314
2315 static int cfi_intelext_suspend(struct mtd_info *mtd)
2316 {
2317         struct map_info *map = mtd->priv;
2318         struct cfi_private *cfi = map->fldrv_priv;
2319         int i;
2320         struct flchip *chip;
2321         int ret = 0;
2322
2323         for (i=0; !ret && i<cfi->numchips; i++) {
2324                 chip = &cfi->chips[i];
2325
2326                 spin_lock(chip->mutex);
2327
2328                 switch (chip->state) {
2329                 case FL_READY:
2330                 case FL_STATUS:
2331                 case FL_CFI_QUERY:
2332                 case FL_JEDEC_QUERY:
2333                         if (chip->oldstate == FL_READY) {
2334                                 chip->oldstate = chip->state;
2335                                 chip->state = FL_PM_SUSPENDED;
2336                                 /* No need to wake_up() on this state change -
2337                                  * as the whole point is that nobody can do anything
2338                                  * with the chip now anyway.
2339                                  */
2340                         } else {
2341                                 /* There seems to be an operation pending. We must wait for it. */
2342                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2343                                 ret = -EAGAIN;
2344                         }
2345                         break;
2346                 default:
2347                         /* Should we actually wait? Once upon a time these routines weren't
2348                            allowed to. Or should we return -EAGAIN, because the upper layers
2349                            ought to have already shut down anything which was using the device
2350                            anyway? The latter for now. */
2351                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2352                         ret = -EAGAIN;
2353                 case FL_PM_SUSPENDED:
2354                         break;
2355                 }
2356                 spin_unlock(chip->mutex);
2357         }
2358
2359         /* Unlock the chips again */
2360
2361         if (ret) {
2362                 for (i--; i >=0; i--) {
2363                         chip = &cfi->chips[i];
2364
2365                         spin_lock(chip->mutex);
2366
2367                         if (chip->state == FL_PM_SUSPENDED) {
2368                                 /* No need to force it into a known state here,
2369                                    because we're returning failure, and it didn't
2370                                    get power cycled */
2371                                 chip->state = chip->oldstate;
2372                                 chip->oldstate = FL_READY;
2373                                 wake_up(&chip->wq);
2374                         }
2375                         spin_unlock(chip->mutex);
2376                 }
2377         }
2378
2379         return ret;
2380 }
2381
2382 static void cfi_intelext_resume(struct mtd_info *mtd)
2383 {
2384         struct map_info *map = mtd->priv;
2385         struct cfi_private *cfi = map->fldrv_priv;
2386         int i;
2387         struct flchip *chip;
2388
2389         for (i=0; i<cfi->numchips; i++) {
2390
2391                 chip = &cfi->chips[i];
2392
2393                 spin_lock(chip->mutex);
2394
2395                 /* Go to known state. Chip may have been power cycled */
2396                 if (chip->state == FL_PM_SUSPENDED) {
2397                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2398                         chip->oldstate = chip->state = FL_READY;
2399                         wake_up(&chip->wq);
2400                 }
2401
2402                 spin_unlock(chip->mutex);
2403         }
2404 }
2405
2406 static int cfi_intelext_reset(struct mtd_info *mtd)
2407 {
2408         struct map_info *map = mtd->priv;
2409         struct cfi_private *cfi = map->fldrv_priv;
2410         int i, ret;
2411
2412         for (i=0; i < cfi->numchips; i++) {
2413                 struct flchip *chip = &cfi->chips[i];
2414
2415                 /* force the completion of any ongoing operation
2416                    and switch to array mode so any bootloader in
2417                    flash is accessible for soft reboot. */
2418                 spin_lock(chip->mutex);
2419                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2420                 if (!ret) {
2421                         map_write(map, CMD(0xff), chip->start);
2422                         chip->state = FL_READY;
2423                 }
2424                 spin_unlock(chip->mutex);
2425         }
2426
2427         return 0;
2428 }
2429
2430 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2431                                void *v)
2432 {
2433         struct mtd_info *mtd;
2434
2435         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2436         cfi_intelext_reset(mtd);
2437         return NOTIFY_DONE;
2438 }
2439
2440 static void cfi_intelext_destroy(struct mtd_info *mtd)
2441 {
2442         struct map_info *map = mtd->priv;
2443         struct cfi_private *cfi = map->fldrv_priv;
2444         cfi_intelext_reset(mtd);
2445         unregister_reboot_notifier(&mtd->reboot_notifier);
2446         kfree(cfi->cmdset_priv);
2447         kfree(cfi->cfiq);
2448         kfree(cfi->chips[0].priv);
2449         kfree(cfi);
2450         kfree(mtd->eraseregions);
2451 }
2452
2453 MODULE_LICENSE("GPL");
2454 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2455 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2456 MODULE_ALIAS("cfi_cmdset_0003");
2457 MODULE_ALIAS("cfi_cmdset_0200");