]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
[MTD] Use DIV_ROUND_UP
[linux-2.6-omap-h63xx.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@cam.org>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
39
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
45
46 #define MANUFACTURER_INTEL      0x0089
47 #define I82802AB        0x00ad
48 #define I82802AC        0x00ac
49 #define MANUFACTURER_ST         0x0020
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 #define AT49BV640D      0x02de
54
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69                                             struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71                                             struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76
77 static void cfi_intelext_destroy(struct mtd_info *);
78
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85                      size_t *retlen, void **virt, resource_size_t *phys);
86 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
87
88 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 #include "fwh_lock.h"
92
93
94
95 /*
96  *  *********** SETUP AND PROBE BITS  ***********
97  */
98
99 static struct mtd_chip_driver cfi_intelext_chipdrv = {
100         .probe          = NULL, /* Not usable directly */
101         .destroy        = cfi_intelext_destroy,
102         .name           = "cfi_cmdset_0001",
103         .module         = THIS_MODULE
104 };
105
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
108
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext *extp)
111 {
112         int i;
113         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
114         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
115         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
125         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126         for (i=11; i<32; i++) {
127                 if (extp->FeatureSupport & (1<<i))
128                         printk("     - Unknown Bit %X:      supported\n", i);
129         }
130
131         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133         for (i=1; i<8; i++) {
134                 if (extp->SuspendCmdSupport & (1<<i))
135                         printk("     - Unknown Bit %X:               supported\n", i);
136         }
137
138         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
140         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141         for (i=2; i<3; i++) {
142                 if (extp->BlkStatusRegMask & (1<<i))
143                         printk("     - Unknown Bit %X Active: yes\n",i);
144         }
145         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147         for (i=6; i<16; i++) {
148                 if (extp->BlkStatusRegMask & (1<<i))
149                         printk("     - Unknown Bit %X Active: yes\n",i);
150         }
151
152         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154         if (extp->VppOptimal)
155                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157 }
158 #endif
159
160 /* Atmel chips don't use the same PRI format as Intel chips */
161 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
166         struct cfi_pri_atmel atmel_pri;
167         uint32_t features = 0;
168
169         /* Reverse byteswapping */
170         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
171         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
172         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
173
174         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
175         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
176
177         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
178
179         if (atmel_pri.Features & 0x01) /* chip erase supported */
180                 features |= (1<<0);
181         if (atmel_pri.Features & 0x02) /* erase suspend supported */
182                 features |= (1<<1);
183         if (atmel_pri.Features & 0x04) /* program suspend supported */
184                 features |= (1<<2);
185         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
186                 features |= (1<<9);
187         if (atmel_pri.Features & 0x20) /* page mode read supported */
188                 features |= (1<<7);
189         if (atmel_pri.Features & 0x40) /* queued erase supported */
190                 features |= (1<<4);
191         if (atmel_pri.Features & 0x80) /* Protection bits supported */
192                 features |= (1<<6);
193
194         extp->FeatureSupport = features;
195
196         /* burst write mode not supported */
197         cfi->cfiq->BufWriteTimeoutTyp = 0;
198         cfi->cfiq->BufWriteTimeoutMax = 0;
199 }
200
201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
204 {
205         struct map_info *map = mtd->priv;
206         struct cfi_private *cfi = map->fldrv_priv;
207         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
208
209         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210                             "erase on write disabled.\n");
211         extp->SuspendCmdSupport &= ~1;
212 }
213 #endif
214
215 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
221
222         if (cfip && (cfip->FeatureSupport&4)) {
223                 cfip->FeatureSupport &= ~4;
224                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
225         }
226 }
227 #endif
228
229 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233
234         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
235         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
236 }
237
238 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
239 {
240         struct map_info *map = mtd->priv;
241         struct cfi_private *cfi = map->fldrv_priv;
242
243         /* Note this is done after the region info is endian swapped */
244         cfi->cfiq->EraseRegionInfo[1] =
245                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
246 };
247
248 static void fixup_use_point(struct mtd_info *mtd, void *param)
249 {
250         struct map_info *map = mtd->priv;
251         if (!mtd->point && map_is_linear(map)) {
252                 mtd->point   = cfi_intelext_point;
253                 mtd->unpoint = cfi_intelext_unpoint;
254         }
255 }
256
257 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261         if (cfi->cfiq->BufWriteTimeoutTyp) {
262                 printk(KERN_INFO "Using buffer write method\n" );
263                 mtd->write = cfi_intelext_write_buffers;
264                 mtd->writev = cfi_intelext_writev;
265         }
266 }
267
268 /*
269  * Some chips power-up with all sectors locked by default.
270  */
271 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
276
277         if (cfip->FeatureSupport&32) {
278                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
279                 mtd->flags |= MTD_POWERUP_LOCK;
280         }
281 }
282
283 static struct cfi_fixup cfi_fixup_table[] = {
284         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
285 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
286         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
287 #endif
288 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
289         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
290 #endif
291 #if !FORCE_WORD_WRITE
292         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
293 #endif
294         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
295         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
296         { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
297         { 0, 0, NULL, NULL }
298 };
299
300 static struct cfi_fixup jedec_fixup_table[] = {
301         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
302         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
303         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
304         { MANUFACTURER_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
305         { MANUFACTURER_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
306         { 0, 0, NULL, NULL }
307 };
308 static struct cfi_fixup fixup_table[] = {
309         /* The CFI vendor ids and the JEDEC vendor IDs appear
310          * to be common.  It is like the devices id's are as
311          * well.  This table is to pick all cases where
312          * we know that is the case.
313          */
314         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
315         { 0, 0, NULL, NULL }
316 };
317
318 static inline struct cfi_pri_intelext *
319 read_pri_intelext(struct map_info *map, __u16 adr)
320 {
321         struct cfi_pri_intelext *extp;
322         unsigned int extp_size = sizeof(*extp);
323
324  again:
325         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
326         if (!extp)
327                 return NULL;
328
329         if (extp->MajorVersion != '1' ||
330             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
331                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
332                        "version %c.%c.\n",  extp->MajorVersion,
333                        extp->MinorVersion);
334                 kfree(extp);
335                 return NULL;
336         }
337
338         /* Do some byteswapping if necessary */
339         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
340         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
341         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
342
343         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
344                 unsigned int extra_size = 0;
345                 int nb_parts, i;
346
347                 /* Protection Register info */
348                 extra_size += (extp->NumProtectionFields - 1) *
349                               sizeof(struct cfi_intelext_otpinfo);
350
351                 /* Burst Read info */
352                 extra_size += 2;
353                 if (extp_size < sizeof(*extp) + extra_size)
354                         goto need_more;
355                 extra_size += extp->extra[extra_size-1];
356
357                 /* Number of hardware-partitions */
358                 extra_size += 1;
359                 if (extp_size < sizeof(*extp) + extra_size)
360                         goto need_more;
361                 nb_parts = extp->extra[extra_size - 1];
362
363                 /* skip the sizeof(partregion) field in CFI 1.4 */
364                 if (extp->MinorVersion >= '4')
365                         extra_size += 2;
366
367                 for (i = 0; i < nb_parts; i++) {
368                         struct cfi_intelext_regioninfo *rinfo;
369                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
370                         extra_size += sizeof(*rinfo);
371                         if (extp_size < sizeof(*extp) + extra_size)
372                                 goto need_more;
373                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
374                         extra_size += (rinfo->NumBlockTypes - 1)
375                                       * sizeof(struct cfi_intelext_blockinfo);
376                 }
377
378                 if (extp->MinorVersion >= '4')
379                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
380
381                 if (extp_size < sizeof(*extp) + extra_size) {
382                         need_more:
383                         extp_size = sizeof(*extp) + extra_size;
384                         kfree(extp);
385                         if (extp_size > 4096) {
386                                 printk(KERN_ERR
387                                         "%s: cfi_pri_intelext is too fat\n",
388                                         __func__);
389                                 return NULL;
390                         }
391                         goto again;
392                 }
393         }
394
395         return extp;
396 }
397
398 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399 {
400         struct cfi_private *cfi = map->fldrv_priv;
401         struct mtd_info *mtd;
402         int i;
403
404         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
405         if (!mtd) {
406                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
407                 return NULL;
408         }
409         mtd->priv = map;
410         mtd->type = MTD_NORFLASH;
411
412         /* Fill in the default mtd operations */
413         mtd->erase   = cfi_intelext_erase_varsize;
414         mtd->read    = cfi_intelext_read;
415         mtd->write   = cfi_intelext_write_words;
416         mtd->sync    = cfi_intelext_sync;
417         mtd->lock    = cfi_intelext_lock;
418         mtd->unlock  = cfi_intelext_unlock;
419         mtd->suspend = cfi_intelext_suspend;
420         mtd->resume  = cfi_intelext_resume;
421         mtd->flags   = MTD_CAP_NORFLASH;
422         mtd->name    = map->name;
423         mtd->writesize = 1;
424
425         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
426
427         if (cfi->cfi_mode == CFI_MODE_CFI) {
428                 /*
429                  * It's a real CFI chip, not one for which the probe
430                  * routine faked a CFI structure. So we read the feature
431                  * table from it.
432                  */
433                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
434                 struct cfi_pri_intelext *extp;
435
436                 extp = read_pri_intelext(map, adr);
437                 if (!extp) {
438                         kfree(mtd);
439                         return NULL;
440                 }
441
442                 /* Install our own private info structure */
443                 cfi->cmdset_priv = extp;
444
445                 cfi_fixup(mtd, cfi_fixup_table);
446
447 #ifdef DEBUG_CFI_FEATURES
448                 /* Tell the user about it in lots of lovely detail */
449                 cfi_tell_features(extp);
450 #endif
451
452                 if(extp->SuspendCmdSupport & 1) {
453                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
454                 }
455         }
456         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
457                 /* Apply jedec specific fixups */
458                 cfi_fixup(mtd, jedec_fixup_table);
459         }
460         /* Apply generic fixups */
461         cfi_fixup(mtd, fixup_table);
462
463         for (i=0; i< cfi->numchips; i++) {
464                 if (cfi->cfiq->WordWriteTimeoutTyp)
465                         cfi->chips[i].word_write_time =
466                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
467                 else
468                         cfi->chips[i].word_write_time = 50000;
469
470                 if (cfi->cfiq->BufWriteTimeoutTyp)
471                         cfi->chips[i].buffer_write_time =
472                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
473                 /* No default; if it isn't specified, we won't use it */
474
475                 if (cfi->cfiq->BlockEraseTimeoutTyp)
476                         cfi->chips[i].erase_time =
477                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
478                 else
479                         cfi->chips[i].erase_time = 2000000;
480
481                 cfi->chips[i].ref_point_counter = 0;
482                 init_waitqueue_head(&(cfi->chips[i].wq));
483         }
484
485         map->fldrv = &cfi_intelext_chipdrv;
486
487         return cfi_intelext_setup(mtd);
488 }
489 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
490 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
491 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
492 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
493 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
494
495 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
496 {
497         struct map_info *map = mtd->priv;
498         struct cfi_private *cfi = map->fldrv_priv;
499         unsigned long offset = 0;
500         int i,j;
501         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
502
503         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
504
505         mtd->size = devsize * cfi->numchips;
506
507         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
508         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
509                         * mtd->numeraseregions, GFP_KERNEL);
510         if (!mtd->eraseregions) {
511                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
512                 goto setup_err;
513         }
514
515         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
516                 unsigned long ernum, ersize;
517                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
518                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
519
520                 if (mtd->erasesize < ersize) {
521                         mtd->erasesize = ersize;
522                 }
523                 for (j=0; j<cfi->numchips; j++) {
524                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
525                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
526                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
527                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
528                 }
529                 offset += (ersize * ernum);
530         }
531
532         if (offset != devsize) {
533                 /* Argh */
534                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
535                 goto setup_err;
536         }
537
538         for (i=0; i<mtd->numeraseregions;i++){
539                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
540                        i,mtd->eraseregions[i].offset,
541                        mtd->eraseregions[i].erasesize,
542                        mtd->eraseregions[i].numblocks);
543         }
544
545 #ifdef CONFIG_MTD_OTP
546         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
547         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
548         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
549         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
550         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
551         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
552 #endif
553
554         /* This function has the potential to distort the reality
555            a bit and therefore should be called last. */
556         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
557                 goto setup_err;
558
559         __module_get(THIS_MODULE);
560         register_reboot_notifier(&mtd->reboot_notifier);
561         return mtd;
562
563  setup_err:
564         if(mtd) {
565                 kfree(mtd->eraseregions);
566                 kfree(mtd);
567         }
568         kfree(cfi->cmdset_priv);
569         return NULL;
570 }
571
572 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
573                                         struct cfi_private **pcfi)
574 {
575         struct map_info *map = mtd->priv;
576         struct cfi_private *cfi = *pcfi;
577         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
578
579         /*
580          * Probing of multi-partition flash chips.
581          *
582          * To support multiple partitions when available, we simply arrange
583          * for each of them to have their own flchip structure even if they
584          * are on the same physical chip.  This means completely recreating
585          * a new cfi_private structure right here which is a blatent code
586          * layering violation, but this is still the least intrusive
587          * arrangement at this point. This can be rearranged in the future
588          * if someone feels motivated enough.  --nico
589          */
590         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
591             && extp->FeatureSupport & (1 << 9)) {
592                 struct cfi_private *newcfi;
593                 struct flchip *chip;
594                 struct flchip_shared *shared;
595                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
596
597                 /* Protection Register info */
598                 offs = (extp->NumProtectionFields - 1) *
599                        sizeof(struct cfi_intelext_otpinfo);
600
601                 /* Burst Read info */
602                 offs += extp->extra[offs+1]+2;
603
604                 /* Number of partition regions */
605                 numregions = extp->extra[offs];
606                 offs += 1;
607
608                 /* skip the sizeof(partregion) field in CFI 1.4 */
609                 if (extp->MinorVersion >= '4')
610                         offs += 2;
611
612                 /* Number of hardware partitions */
613                 numparts = 0;
614                 for (i = 0; i < numregions; i++) {
615                         struct cfi_intelext_regioninfo *rinfo;
616                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
617                         numparts += rinfo->NumIdentPartitions;
618                         offs += sizeof(*rinfo)
619                                 + (rinfo->NumBlockTypes - 1) *
620                                   sizeof(struct cfi_intelext_blockinfo);
621                 }
622
623                 if (!numparts)
624                         numparts = 1;
625
626                 /* Programming Region info */
627                 if (extp->MinorVersion >= '4') {
628                         struct cfi_intelext_programming_regioninfo *prinfo;
629                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
630                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
631                         mtd->flags &= ~MTD_BIT_WRITEABLE;
632                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
633                                map->name, mtd->writesize,
634                                cfi->interleave * prinfo->ControlValid,
635                                cfi->interleave * prinfo->ControlInvalid);
636                 }
637
638                 /*
639                  * All functions below currently rely on all chips having
640                  * the same geometry so we'll just assume that all hardware
641                  * partitions are of the same size too.
642                  */
643                 partshift = cfi->chipshift - __ffs(numparts);
644
645                 if ((1 << partshift) < mtd->erasesize) {
646                         printk( KERN_ERR
647                                 "%s: bad number of hw partitions (%d)\n",
648                                 __func__, numparts);
649                         return -EINVAL;
650                 }
651
652                 numvirtchips = cfi->numchips * numparts;
653                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
654                 if (!newcfi)
655                         return -ENOMEM;
656                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
657                 if (!shared) {
658                         kfree(newcfi);
659                         return -ENOMEM;
660                 }
661                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
662                 newcfi->numchips = numvirtchips;
663                 newcfi->chipshift = partshift;
664
665                 chip = &newcfi->chips[0];
666                 for (i = 0; i < cfi->numchips; i++) {
667                         shared[i].writing = shared[i].erasing = NULL;
668                         spin_lock_init(&shared[i].lock);
669                         for (j = 0; j < numparts; j++) {
670                                 *chip = cfi->chips[i];
671                                 chip->start += j << partshift;
672                                 chip->priv = &shared[i];
673                                 /* those should be reset too since
674                                    they create memory references. */
675                                 init_waitqueue_head(&chip->wq);
676                                 spin_lock_init(&chip->_spinlock);
677                                 chip->mutex = &chip->_spinlock;
678                                 chip++;
679                         }
680                 }
681
682                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
683                                   "--> %d partitions of %d KiB\n",
684                                   map->name, cfi->numchips, cfi->interleave,
685                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
686
687                 map->fldrv_priv = newcfi;
688                 *pcfi = newcfi;
689                 kfree(cfi);
690         }
691
692         return 0;
693 }
694
695 /*
696  *  *********** CHIP ACCESS FUNCTIONS ***********
697  */
698 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
699 {
700         DECLARE_WAITQUEUE(wait, current);
701         struct cfi_private *cfi = map->fldrv_priv;
702         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
703         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
704         unsigned long timeo = jiffies + HZ;
705
706         switch (chip->state) {
707
708         case FL_STATUS:
709                 for (;;) {
710                         status = map_read(map, adr);
711                         if (map_word_andequal(map, status, status_OK, status_OK))
712                                 break;
713
714                         /* At this point we're fine with write operations
715                            in other partitions as they don't conflict. */
716                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
717                                 break;
718
719                         spin_unlock(chip->mutex);
720                         cfi_udelay(1);
721                         spin_lock(chip->mutex);
722                         /* Someone else might have been playing with it. */
723                         return -EAGAIN;
724                 }
725                 /* Fall through */
726         case FL_READY:
727         case FL_CFI_QUERY:
728         case FL_JEDEC_QUERY:
729                 return 0;
730
731         case FL_ERASING:
732                 if (!cfip ||
733                     !(cfip->FeatureSupport & 2) ||
734                     !(mode == FL_READY || mode == FL_POINT ||
735                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
736                         goto sleep;
737
738
739                 /* Erase suspend */
740                 map_write(map, CMD(0xB0), adr);
741
742                 /* If the flash has finished erasing, then 'erase suspend'
743                  * appears to make some (28F320) flash devices switch to
744                  * 'read' mode.  Make sure that we switch to 'read status'
745                  * mode so we get the right data. --rmk
746                  */
747                 map_write(map, CMD(0x70), adr);
748                 chip->oldstate = FL_ERASING;
749                 chip->state = FL_ERASE_SUSPENDING;
750                 chip->erase_suspended = 1;
751                 for (;;) {
752                         status = map_read(map, adr);
753                         if (map_word_andequal(map, status, status_OK, status_OK))
754                                 break;
755
756                         if (time_after(jiffies, timeo)) {
757                                 /* Urgh. Resume and pretend we weren't here.  */
758                                 map_write(map, CMD(0xd0), adr);
759                                 /* Make sure we're in 'read status' mode if it had finished */
760                                 map_write(map, CMD(0x70), adr);
761                                 chip->state = FL_ERASING;
762                                 chip->oldstate = FL_READY;
763                                 printk(KERN_ERR "%s: Chip not ready after erase "
764                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
765                                 return -EIO;
766                         }
767
768                         spin_unlock(chip->mutex);
769                         cfi_udelay(1);
770                         spin_lock(chip->mutex);
771                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
772                            So we can just loop here. */
773                 }
774                 chip->state = FL_STATUS;
775                 return 0;
776
777         case FL_XIP_WHILE_ERASING:
778                 if (mode != FL_READY && mode != FL_POINT &&
779                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
780                         goto sleep;
781                 chip->oldstate = chip->state;
782                 chip->state = FL_READY;
783                 return 0;
784
785         case FL_SHUTDOWN:
786                 /* The machine is rebooting now,so no one can get chip anymore */
787                 return -EIO;
788         case FL_POINT:
789                 /* Only if there's no operation suspended... */
790                 if (mode == FL_READY && chip->oldstate == FL_READY)
791                         return 0;
792                 /* Fall through */
793         default:
794         sleep:
795                 set_current_state(TASK_UNINTERRUPTIBLE);
796                 add_wait_queue(&chip->wq, &wait);
797                 spin_unlock(chip->mutex);
798                 schedule();
799                 remove_wait_queue(&chip->wq, &wait);
800                 spin_lock(chip->mutex);
801                 return -EAGAIN;
802         }
803 }
804
805 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
806 {
807         int ret;
808         DECLARE_WAITQUEUE(wait, current);
809
810  retry:
811         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
812                            || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
813                 /*
814                  * OK. We have possibility for contention on the write/erase
815                  * operations which are global to the real chip and not per
816                  * partition.  So let's fight it over in the partition which
817                  * currently has authority on the operation.
818                  *
819                  * The rules are as follows:
820                  *
821                  * - any write operation must own shared->writing.
822                  *
823                  * - any erase operation must own _both_ shared->writing and
824                  *   shared->erasing.
825                  *
826                  * - contention arbitration is handled in the owner's context.
827                  *
828                  * The 'shared' struct can be read and/or written only when
829                  * its lock is taken.
830                  */
831                 struct flchip_shared *shared = chip->priv;
832                 struct flchip *contender;
833                 spin_lock(&shared->lock);
834                 contender = shared->writing;
835                 if (contender && contender != chip) {
836                         /*
837                          * The engine to perform desired operation on this
838                          * partition is already in use by someone else.
839                          * Let's fight over it in the context of the chip
840                          * currently using it.  If it is possible to suspend,
841                          * that other partition will do just that, otherwise
842                          * it'll happily send us to sleep.  In any case, when
843                          * get_chip returns success we're clear to go ahead.
844                          */
845                         ret = spin_trylock(contender->mutex);
846                         spin_unlock(&shared->lock);
847                         if (!ret)
848                                 goto retry;
849                         spin_unlock(chip->mutex);
850                         ret = chip_ready(map, contender, contender->start, mode);
851                         spin_lock(chip->mutex);
852
853                         if (ret == -EAGAIN) {
854                                 spin_unlock(contender->mutex);
855                                 goto retry;
856                         }
857                         if (ret) {
858                                 spin_unlock(contender->mutex);
859                                 return ret;
860                         }
861                         spin_lock(&shared->lock);
862                         spin_unlock(contender->mutex);
863                 }
864
865                 /* Check if we already have suspended erase
866                  * on this chip. Sleep. */
867                 if (mode == FL_ERASING && shared->erasing
868                     && shared->erasing->oldstate == FL_ERASING) {
869                         spin_unlock(&shared->lock);
870                         set_current_state(TASK_UNINTERRUPTIBLE);
871                         add_wait_queue(&chip->wq, &wait);
872                         spin_unlock(chip->mutex);
873                         schedule();
874                         remove_wait_queue(&chip->wq, &wait);
875                         spin_lock(chip->mutex);
876                         goto retry;
877                 }
878
879                 /* We now own it */
880                 shared->writing = chip;
881                 if (mode == FL_ERASING)
882                         shared->erasing = chip;
883                 spin_unlock(&shared->lock);
884         }
885         ret = chip_ready(map, chip, adr, mode);
886         if (ret == -EAGAIN)
887                 goto retry;
888
889         return ret;
890 }
891
892 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
893 {
894         struct cfi_private *cfi = map->fldrv_priv;
895
896         if (chip->priv) {
897                 struct flchip_shared *shared = chip->priv;
898                 spin_lock(&shared->lock);
899                 if (shared->writing == chip && chip->oldstate == FL_READY) {
900                         /* We own the ability to write, but we're done */
901                         shared->writing = shared->erasing;
902                         if (shared->writing && shared->writing != chip) {
903                                 /* give back ownership to who we loaned it from */
904                                 struct flchip *loaner = shared->writing;
905                                 spin_lock(loaner->mutex);
906                                 spin_unlock(&shared->lock);
907                                 spin_unlock(chip->mutex);
908                                 put_chip(map, loaner, loaner->start);
909                                 spin_lock(chip->mutex);
910                                 spin_unlock(loaner->mutex);
911                                 wake_up(&chip->wq);
912                                 return;
913                         }
914                         shared->erasing = NULL;
915                         shared->writing = NULL;
916                 } else if (shared->erasing == chip && shared->writing != chip) {
917                         /*
918                          * We own the ability to erase without the ability
919                          * to write, which means the erase was suspended
920                          * and some other partition is currently writing.
921                          * Don't let the switch below mess things up since
922                          * we don't have ownership to resume anything.
923                          */
924                         spin_unlock(&shared->lock);
925                         wake_up(&chip->wq);
926                         return;
927                 }
928                 spin_unlock(&shared->lock);
929         }
930
931         switch(chip->oldstate) {
932         case FL_ERASING:
933                 chip->state = chip->oldstate;
934                 /* What if one interleaved chip has finished and the
935                    other hasn't? The old code would leave the finished
936                    one in READY mode. That's bad, and caused -EROFS
937                    errors to be returned from do_erase_oneblock because
938                    that's the only bit it checked for at the time.
939                    As the state machine appears to explicitly allow
940                    sending the 0x70 (Read Status) command to an erasing
941                    chip and expecting it to be ignored, that's what we
942                    do. */
943                 map_write(map, CMD(0xd0), adr);
944                 map_write(map, CMD(0x70), adr);
945                 chip->oldstate = FL_READY;
946                 chip->state = FL_ERASING;
947                 break;
948
949         case FL_XIP_WHILE_ERASING:
950                 chip->state = chip->oldstate;
951                 chip->oldstate = FL_READY;
952                 break;
953
954         case FL_READY:
955         case FL_STATUS:
956         case FL_JEDEC_QUERY:
957                 /* We should really make set_vpp() count, rather than doing this */
958                 DISABLE_VPP(map);
959                 break;
960         default:
961                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
962         }
963         wake_up(&chip->wq);
964 }
965
966 #ifdef CONFIG_MTD_XIP
967
968 /*
969  * No interrupt what so ever can be serviced while the flash isn't in array
970  * mode.  This is ensured by the xip_disable() and xip_enable() functions
971  * enclosing any code path where the flash is known not to be in array mode.
972  * And within a XIP disabled code path, only functions marked with __xipram
973  * may be called and nothing else (it's a good thing to inspect generated
974  * assembly to make sure inline functions were actually inlined and that gcc
975  * didn't emit calls to its own support functions). Also configuring MTD CFI
976  * support to a single buswidth and a single interleave is also recommended.
977  */
978
979 static void xip_disable(struct map_info *map, struct flchip *chip,
980                         unsigned long adr)
981 {
982         /* TODO: chips with no XIP use should ignore and return */
983         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
984         local_irq_disable();
985 }
986
987 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
988                                 unsigned long adr)
989 {
990         struct cfi_private *cfi = map->fldrv_priv;
991         if (chip->state != FL_POINT && chip->state != FL_READY) {
992                 map_write(map, CMD(0xff), adr);
993                 chip->state = FL_READY;
994         }
995         (void) map_read(map, adr);
996         xip_iprefetch();
997         local_irq_enable();
998 }
999
1000 /*
1001  * When a delay is required for the flash operation to complete, the
1002  * xip_wait_for_operation() function is polling for both the given timeout
1003  * and pending (but still masked) hardware interrupts.  Whenever there is an
1004  * interrupt pending then the flash erase or write operation is suspended,
1005  * array mode restored and interrupts unmasked.  Task scheduling might also
1006  * happen at that point.  The CPU eventually returns from the interrupt or
1007  * the call to schedule() and the suspended flash operation is resumed for
1008  * the remaining of the delay period.
1009  *
1010  * Warning: this function _will_ fool interrupt latency tracing tools.
1011  */
1012
1013 static int __xipram xip_wait_for_operation(
1014                 struct map_info *map, struct flchip *chip,
1015                 unsigned long adr, unsigned int chip_op_time )
1016 {
1017         struct cfi_private *cfi = map->fldrv_priv;
1018         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1019         map_word status, OK = CMD(0x80);
1020         unsigned long usec, suspended, start, done;
1021         flstate_t oldstate, newstate;
1022
1023         start = xip_currtime();
1024         usec = chip_op_time * 8;
1025         if (usec == 0)
1026                 usec = 500000;
1027         done = 0;
1028
1029         do {
1030                 cpu_relax();
1031                 if (xip_irqpending() && cfip &&
1032                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1033                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1034                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1035                         /*
1036                          * Let's suspend the erase or write operation when
1037                          * supported.  Note that we currently don't try to
1038                          * suspend interleaved chips if there is already
1039                          * another operation suspended (imagine what happens
1040                          * when one chip was already done with the current
1041                          * operation while another chip suspended it, then
1042                          * we resume the whole thing at once).  Yes, it
1043                          * can happen!
1044                          */
1045                         usec -= done;
1046                         map_write(map, CMD(0xb0), adr);
1047                         map_write(map, CMD(0x70), adr);
1048                         suspended = xip_currtime();
1049                         do {
1050                                 if (xip_elapsed_since(suspended) > 100000) {
1051                                         /*
1052                                          * The chip doesn't want to suspend
1053                                          * after waiting for 100 msecs.
1054                                          * This is a critical error but there
1055                                          * is not much we can do here.
1056                                          */
1057                                         return -EIO;
1058                                 }
1059                                 status = map_read(map, adr);
1060                         } while (!map_word_andequal(map, status, OK, OK));
1061
1062                         /* Suspend succeeded */
1063                         oldstate = chip->state;
1064                         if (oldstate == FL_ERASING) {
1065                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1066                                         break;
1067                                 newstate = FL_XIP_WHILE_ERASING;
1068                                 chip->erase_suspended = 1;
1069                         } else {
1070                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1071                                         break;
1072                                 newstate = FL_XIP_WHILE_WRITING;
1073                                 chip->write_suspended = 1;
1074                         }
1075                         chip->state = newstate;
1076                         map_write(map, CMD(0xff), adr);
1077                         (void) map_read(map, adr);
1078                         xip_iprefetch();
1079                         local_irq_enable();
1080                         spin_unlock(chip->mutex);
1081                         xip_iprefetch();
1082                         cond_resched();
1083
1084                         /*
1085                          * We're back.  However someone else might have
1086                          * decided to go write to the chip if we are in
1087                          * a suspended erase state.  If so let's wait
1088                          * until it's done.
1089                          */
1090                         spin_lock(chip->mutex);
1091                         while (chip->state != newstate) {
1092                                 DECLARE_WAITQUEUE(wait, current);
1093                                 set_current_state(TASK_UNINTERRUPTIBLE);
1094                                 add_wait_queue(&chip->wq, &wait);
1095                                 spin_unlock(chip->mutex);
1096                                 schedule();
1097                                 remove_wait_queue(&chip->wq, &wait);
1098                                 spin_lock(chip->mutex);
1099                         }
1100                         /* Disallow XIP again */
1101                         local_irq_disable();
1102
1103                         /* Resume the write or erase operation */
1104                         map_write(map, CMD(0xd0), adr);
1105                         map_write(map, CMD(0x70), adr);
1106                         chip->state = oldstate;
1107                         start = xip_currtime();
1108                 } else if (usec >= 1000000/HZ) {
1109                         /*
1110                          * Try to save on CPU power when waiting delay
1111                          * is at least a system timer tick period.
1112                          * No need to be extremely accurate here.
1113                          */
1114                         xip_cpu_idle();
1115                 }
1116                 status = map_read(map, adr);
1117                 done = xip_elapsed_since(start);
1118         } while (!map_word_andequal(map, status, OK, OK)
1119                  && done < usec);
1120
1121         return (done >= usec) ? -ETIME : 0;
1122 }
1123
1124 /*
1125  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1126  * the flash is actively programming or erasing since we have to poll for
1127  * the operation to complete anyway.  We can't do that in a generic way with
1128  * a XIP setup so do it before the actual flash operation in this case
1129  * and stub it out from INVAL_CACHE_AND_WAIT.
1130  */
1131 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1132         INVALIDATE_CACHED_RANGE(map, from, size)
1133
1134 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1135         xip_wait_for_operation(map, chip, cmd_adr, usec)
1136
1137 #else
1138
1139 #define xip_disable(map, chip, adr)
1140 #define xip_enable(map, chip, adr)
1141 #define XIP_INVAL_CACHED_RANGE(x...)
1142 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1143
1144 static int inval_cache_and_wait_for_operation(
1145                 struct map_info *map, struct flchip *chip,
1146                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1147                 unsigned int chip_op_time)
1148 {
1149         struct cfi_private *cfi = map->fldrv_priv;
1150         map_word status, status_OK = CMD(0x80);
1151         int chip_state = chip->state;
1152         unsigned int timeo, sleep_time, reset_timeo;
1153
1154         spin_unlock(chip->mutex);
1155         if (inval_len)
1156                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1157         spin_lock(chip->mutex);
1158
1159         /* set our timeout to 8 times the expected delay */
1160         timeo = chip_op_time * 8;
1161         if (!timeo)
1162                 timeo = 500000;
1163         reset_timeo = timeo;
1164         sleep_time = chip_op_time / 2;
1165
1166         for (;;) {
1167                 status = map_read(map, cmd_adr);
1168                 if (map_word_andequal(map, status, status_OK, status_OK))
1169                         break;
1170
1171                 if (!timeo) {
1172                         map_write(map, CMD(0x70), cmd_adr);
1173                         chip->state = FL_STATUS;
1174                         return -ETIME;
1175                 }
1176
1177                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1178                 spin_unlock(chip->mutex);
1179                 if (sleep_time >= 1000000/HZ) {
1180                         /*
1181                          * Half of the normal delay still remaining
1182                          * can be performed with a sleeping delay instead
1183                          * of busy waiting.
1184                          */
1185                         msleep(sleep_time/1000);
1186                         timeo -= sleep_time;
1187                         sleep_time = 1000000/HZ;
1188                 } else {
1189                         udelay(1);
1190                         cond_resched();
1191                         timeo--;
1192                 }
1193                 spin_lock(chip->mutex);
1194
1195                 while (chip->state != chip_state) {
1196                         /* Someone's suspended the operation: sleep */
1197                         DECLARE_WAITQUEUE(wait, current);
1198                         set_current_state(TASK_UNINTERRUPTIBLE);
1199                         add_wait_queue(&chip->wq, &wait);
1200                         spin_unlock(chip->mutex);
1201                         schedule();
1202                         remove_wait_queue(&chip->wq, &wait);
1203                         spin_lock(chip->mutex);
1204                 }
1205                 if (chip->erase_suspended || chip->write_suspended)  {
1206                         /* Suspend has occured while sleep: reset timeout */
1207                         timeo = reset_timeo;
1208                         chip->erase_suspended = 0;
1209                         chip->write_suspended = 0;
1210                 }
1211         }
1212
1213         /* Done and happy. */
1214         chip->state = FL_STATUS;
1215         return 0;
1216 }
1217
1218 #endif
1219
1220 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1221         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1222
1223
1224 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1225 {
1226         unsigned long cmd_addr;
1227         struct cfi_private *cfi = map->fldrv_priv;
1228         int ret = 0;
1229
1230         adr += chip->start;
1231
1232         /* Ensure cmd read/writes are aligned. */
1233         cmd_addr = adr & ~(map_bankwidth(map)-1);
1234
1235         spin_lock(chip->mutex);
1236
1237         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1238
1239         if (!ret) {
1240                 if (chip->state != FL_POINT && chip->state != FL_READY)
1241                         map_write(map, CMD(0xff), cmd_addr);
1242
1243                 chip->state = FL_POINT;
1244                 chip->ref_point_counter++;
1245         }
1246         spin_unlock(chip->mutex);
1247
1248         return ret;
1249 }
1250
1251 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1252                 size_t *retlen, void **virt, resource_size_t *phys)
1253 {
1254         struct map_info *map = mtd->priv;
1255         struct cfi_private *cfi = map->fldrv_priv;
1256         unsigned long ofs, last_end = 0;
1257         int chipnum;
1258         int ret = 0;
1259
1260         if (!map->virt || (from + len > mtd->size))
1261                 return -EINVAL;
1262
1263         /* Now lock the chip(s) to POINT state */
1264
1265         /* ofs: offset within the first chip that the first read should start */
1266         chipnum = (from >> cfi->chipshift);
1267         ofs = from - (chipnum << cfi->chipshift);
1268
1269         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1270         *retlen = 0;
1271         if (phys)
1272                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1273
1274         while (len) {
1275                 unsigned long thislen;
1276
1277                 if (chipnum >= cfi->numchips)
1278                         break;
1279
1280                 /* We cannot point across chips that are virtually disjoint */
1281                 if (!last_end)
1282                         last_end = cfi->chips[chipnum].start;
1283                 else if (cfi->chips[chipnum].start != last_end)
1284                         break;
1285
1286                 if ((len + ofs -1) >> cfi->chipshift)
1287                         thislen = (1<<cfi->chipshift) - ofs;
1288                 else
1289                         thislen = len;
1290
1291                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1292                 if (ret)
1293                         break;
1294
1295                 *retlen += thislen;
1296                 len -= thislen;
1297
1298                 ofs = 0;
1299                 last_end += 1 << cfi->chipshift;
1300                 chipnum++;
1301         }
1302         return 0;
1303 }
1304
1305 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1306 {
1307         struct map_info *map = mtd->priv;
1308         struct cfi_private *cfi = map->fldrv_priv;
1309         unsigned long ofs;
1310         int chipnum;
1311
1312         /* Now unlock the chip(s) POINT state */
1313
1314         /* ofs: offset within the first chip that the first read should start */
1315         chipnum = (from >> cfi->chipshift);
1316         ofs = from - (chipnum <<  cfi->chipshift);
1317
1318         while (len) {
1319                 unsigned long thislen;
1320                 struct flchip *chip;
1321
1322                 chip = &cfi->chips[chipnum];
1323                 if (chipnum >= cfi->numchips)
1324                         break;
1325
1326                 if ((len + ofs -1) >> cfi->chipshift)
1327                         thislen = (1<<cfi->chipshift) - ofs;
1328                 else
1329                         thislen = len;
1330
1331                 spin_lock(chip->mutex);
1332                 if (chip->state == FL_POINT) {
1333                         chip->ref_point_counter--;
1334                         if(chip->ref_point_counter == 0)
1335                                 chip->state = FL_READY;
1336                 } else
1337                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1338
1339                 put_chip(map, chip, chip->start);
1340                 spin_unlock(chip->mutex);
1341
1342                 len -= thislen;
1343                 ofs = 0;
1344                 chipnum++;
1345         }
1346 }
1347
1348 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1349 {
1350         unsigned long cmd_addr;
1351         struct cfi_private *cfi = map->fldrv_priv;
1352         int ret;
1353
1354         adr += chip->start;
1355
1356         /* Ensure cmd read/writes are aligned. */
1357         cmd_addr = adr & ~(map_bankwidth(map)-1);
1358
1359         spin_lock(chip->mutex);
1360         ret = get_chip(map, chip, cmd_addr, FL_READY);
1361         if (ret) {
1362                 spin_unlock(chip->mutex);
1363                 return ret;
1364         }
1365
1366         if (chip->state != FL_POINT && chip->state != FL_READY) {
1367                 map_write(map, CMD(0xff), cmd_addr);
1368
1369                 chip->state = FL_READY;
1370         }
1371
1372         map_copy_from(map, buf, adr, len);
1373
1374         put_chip(map, chip, cmd_addr);
1375
1376         spin_unlock(chip->mutex);
1377         return 0;
1378 }
1379
1380 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1381 {
1382         struct map_info *map = mtd->priv;
1383         struct cfi_private *cfi = map->fldrv_priv;
1384         unsigned long ofs;
1385         int chipnum;
1386         int ret = 0;
1387
1388         /* ofs: offset within the first chip that the first read should start */
1389         chipnum = (from >> cfi->chipshift);
1390         ofs = from - (chipnum <<  cfi->chipshift);
1391
1392         *retlen = 0;
1393
1394         while (len) {
1395                 unsigned long thislen;
1396
1397                 if (chipnum >= cfi->numchips)
1398                         break;
1399
1400                 if ((len + ofs -1) >> cfi->chipshift)
1401                         thislen = (1<<cfi->chipshift) - ofs;
1402                 else
1403                         thislen = len;
1404
1405                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1406                 if (ret)
1407                         break;
1408
1409                 *retlen += thislen;
1410                 len -= thislen;
1411                 buf += thislen;
1412
1413                 ofs = 0;
1414                 chipnum++;
1415         }
1416         return ret;
1417 }
1418
1419 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1420                                      unsigned long adr, map_word datum, int mode)
1421 {
1422         struct cfi_private *cfi = map->fldrv_priv;
1423         map_word status, write_cmd;
1424         int ret=0;
1425
1426         adr += chip->start;
1427
1428         switch (mode) {
1429         case FL_WRITING:
1430                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1431                 break;
1432         case FL_OTP_WRITE:
1433                 write_cmd = CMD(0xc0);
1434                 break;
1435         default:
1436                 return -EINVAL;
1437         }
1438
1439         spin_lock(chip->mutex);
1440         ret = get_chip(map, chip, adr, mode);
1441         if (ret) {
1442                 spin_unlock(chip->mutex);
1443                 return ret;
1444         }
1445
1446         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1447         ENABLE_VPP(map);
1448         xip_disable(map, chip, adr);
1449         map_write(map, write_cmd, adr);
1450         map_write(map, datum, adr);
1451         chip->state = mode;
1452
1453         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1454                                    adr, map_bankwidth(map),
1455                                    chip->word_write_time);
1456         if (ret) {
1457                 xip_enable(map, chip, adr);
1458                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1459                 goto out;
1460         }
1461
1462         /* check for errors */
1463         status = map_read(map, adr);
1464         if (map_word_bitsset(map, status, CMD(0x1a))) {
1465                 unsigned long chipstatus = MERGESTATUS(status);
1466
1467                 /* reset status */
1468                 map_write(map, CMD(0x50), adr);
1469                 map_write(map, CMD(0x70), adr);
1470                 xip_enable(map, chip, adr);
1471
1472                 if (chipstatus & 0x02) {
1473                         ret = -EROFS;
1474                 } else if (chipstatus & 0x08) {
1475                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1476                         ret = -EIO;
1477                 } else {
1478                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1479                         ret = -EINVAL;
1480                 }
1481
1482                 goto out;
1483         }
1484
1485         xip_enable(map, chip, adr);
1486  out:   put_chip(map, chip, adr);
1487         spin_unlock(chip->mutex);
1488         return ret;
1489 }
1490
1491
1492 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1493 {
1494         struct map_info *map = mtd->priv;
1495         struct cfi_private *cfi = map->fldrv_priv;
1496         int ret = 0;
1497         int chipnum;
1498         unsigned long ofs;
1499
1500         *retlen = 0;
1501         if (!len)
1502                 return 0;
1503
1504         chipnum = to >> cfi->chipshift;
1505         ofs = to  - (chipnum << cfi->chipshift);
1506
1507         /* If it's not bus-aligned, do the first byte write */
1508         if (ofs & (map_bankwidth(map)-1)) {
1509                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1510                 int gap = ofs - bus_ofs;
1511                 int n;
1512                 map_word datum;
1513
1514                 n = min_t(int, len, map_bankwidth(map)-gap);
1515                 datum = map_word_ff(map);
1516                 datum = map_word_load_partial(map, datum, buf, gap, n);
1517
1518                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1519                                                bus_ofs, datum, FL_WRITING);
1520                 if (ret)
1521                         return ret;
1522
1523                 len -= n;
1524                 ofs += n;
1525                 buf += n;
1526                 (*retlen) += n;
1527
1528                 if (ofs >> cfi->chipshift) {
1529                         chipnum ++;
1530                         ofs = 0;
1531                         if (chipnum == cfi->numchips)
1532                                 return 0;
1533                 }
1534         }
1535
1536         while(len >= map_bankwidth(map)) {
1537                 map_word datum = map_word_load(map, buf);
1538
1539                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1540                                        ofs, datum, FL_WRITING);
1541                 if (ret)
1542                         return ret;
1543
1544                 ofs += map_bankwidth(map);
1545                 buf += map_bankwidth(map);
1546                 (*retlen) += map_bankwidth(map);
1547                 len -= map_bankwidth(map);
1548
1549                 if (ofs >> cfi->chipshift) {
1550                         chipnum ++;
1551                         ofs = 0;
1552                         if (chipnum == cfi->numchips)
1553                                 return 0;
1554                 }
1555         }
1556
1557         if (len & (map_bankwidth(map)-1)) {
1558                 map_word datum;
1559
1560                 datum = map_word_ff(map);
1561                 datum = map_word_load_partial(map, datum, buf, 0, len);
1562
1563                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1564                                        ofs, datum, FL_WRITING);
1565                 if (ret)
1566                         return ret;
1567
1568                 (*retlen) += len;
1569         }
1570
1571         return 0;
1572 }
1573
1574
1575 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1576                                     unsigned long adr, const struct kvec **pvec,
1577                                     unsigned long *pvec_seek, int len)
1578 {
1579         struct cfi_private *cfi = map->fldrv_priv;
1580         map_word status, write_cmd, datum;
1581         unsigned long cmd_adr;
1582         int ret, wbufsize, word_gap, words;
1583         const struct kvec *vec;
1584         unsigned long vec_seek;
1585         unsigned long initial_adr;
1586         int initial_len = len;
1587
1588         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1589         adr += chip->start;
1590         initial_adr = adr;
1591         cmd_adr = adr & ~(wbufsize-1);
1592
1593         /* Let's determine this according to the interleave only once */
1594         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1595
1596         spin_lock(chip->mutex);
1597         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1598         if (ret) {
1599                 spin_unlock(chip->mutex);
1600                 return ret;
1601         }
1602
1603         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1604         ENABLE_VPP(map);
1605         xip_disable(map, chip, cmd_adr);
1606
1607         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1608            [...], the device will not accept any more Write to Buffer commands".
1609            So we must check here and reset those bits if they're set. Otherwise
1610            we're just pissing in the wind */
1611         if (chip->state != FL_STATUS) {
1612                 map_write(map, CMD(0x70), cmd_adr);
1613                 chip->state = FL_STATUS;
1614         }
1615         status = map_read(map, cmd_adr);
1616         if (map_word_bitsset(map, status, CMD(0x30))) {
1617                 xip_enable(map, chip, cmd_adr);
1618                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1619                 xip_disable(map, chip, cmd_adr);
1620                 map_write(map, CMD(0x50), cmd_adr);
1621                 map_write(map, CMD(0x70), cmd_adr);
1622         }
1623
1624         chip->state = FL_WRITING_TO_BUFFER;
1625         map_write(map, write_cmd, cmd_adr);
1626         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1627         if (ret) {
1628                 /* Argh. Not ready for write to buffer */
1629                 map_word Xstatus = map_read(map, cmd_adr);
1630                 map_write(map, CMD(0x70), cmd_adr);
1631                 chip->state = FL_STATUS;
1632                 status = map_read(map, cmd_adr);
1633                 map_write(map, CMD(0x50), cmd_adr);
1634                 map_write(map, CMD(0x70), cmd_adr);
1635                 xip_enable(map, chip, cmd_adr);
1636                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1637                                 map->name, Xstatus.x[0], status.x[0]);
1638                 goto out;
1639         }
1640
1641         /* Figure out the number of words to write */
1642         word_gap = (-adr & (map_bankwidth(map)-1));
1643         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1644         if (!word_gap) {
1645                 words--;
1646         } else {
1647                 word_gap = map_bankwidth(map) - word_gap;
1648                 adr -= word_gap;
1649                 datum = map_word_ff(map);
1650         }
1651
1652         /* Write length of data to come */
1653         map_write(map, CMD(words), cmd_adr );
1654
1655         /* Write data */
1656         vec = *pvec;
1657         vec_seek = *pvec_seek;
1658         do {
1659                 int n = map_bankwidth(map) - word_gap;
1660                 if (n > vec->iov_len - vec_seek)
1661                         n = vec->iov_len - vec_seek;
1662                 if (n > len)
1663                         n = len;
1664
1665                 if (!word_gap && len < map_bankwidth(map))
1666                         datum = map_word_ff(map);
1667
1668                 datum = map_word_load_partial(map, datum,
1669                                               vec->iov_base + vec_seek,
1670                                               word_gap, n);
1671
1672                 len -= n;
1673                 word_gap += n;
1674                 if (!len || word_gap == map_bankwidth(map)) {
1675                         map_write(map, datum, adr);
1676                         adr += map_bankwidth(map);
1677                         word_gap = 0;
1678                 }
1679
1680                 vec_seek += n;
1681                 if (vec_seek == vec->iov_len) {
1682                         vec++;
1683                         vec_seek = 0;
1684                 }
1685         } while (len);
1686         *pvec = vec;
1687         *pvec_seek = vec_seek;
1688
1689         /* GO GO GO */
1690         map_write(map, CMD(0xd0), cmd_adr);
1691         chip->state = FL_WRITING;
1692
1693         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1694                                    initial_adr, initial_len,
1695                                    chip->buffer_write_time);
1696         if (ret) {
1697                 map_write(map, CMD(0x70), cmd_adr);
1698                 chip->state = FL_STATUS;
1699                 xip_enable(map, chip, cmd_adr);
1700                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1701                 goto out;
1702         }
1703
1704         /* check for errors */
1705         status = map_read(map, cmd_adr);
1706         if (map_word_bitsset(map, status, CMD(0x1a))) {
1707                 unsigned long chipstatus = MERGESTATUS(status);
1708
1709                 /* reset status */
1710                 map_write(map, CMD(0x50), cmd_adr);
1711                 map_write(map, CMD(0x70), cmd_adr);
1712                 xip_enable(map, chip, cmd_adr);
1713
1714                 if (chipstatus & 0x02) {
1715                         ret = -EROFS;
1716                 } else if (chipstatus & 0x08) {
1717                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1718                         ret = -EIO;
1719                 } else {
1720                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1721                         ret = -EINVAL;
1722                 }
1723
1724                 goto out;
1725         }
1726
1727         xip_enable(map, chip, cmd_adr);
1728  out:   put_chip(map, chip, cmd_adr);
1729         spin_unlock(chip->mutex);
1730         return ret;
1731 }
1732
1733 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1734                                 unsigned long count, loff_t to, size_t *retlen)
1735 {
1736         struct map_info *map = mtd->priv;
1737         struct cfi_private *cfi = map->fldrv_priv;
1738         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1739         int ret = 0;
1740         int chipnum;
1741         unsigned long ofs, vec_seek, i;
1742         size_t len = 0;
1743
1744         for (i = 0; i < count; i++)
1745                 len += vecs[i].iov_len;
1746
1747         *retlen = 0;
1748         if (!len)
1749                 return 0;
1750
1751         chipnum = to >> cfi->chipshift;
1752         ofs = to - (chipnum << cfi->chipshift);
1753         vec_seek = 0;
1754
1755         do {
1756                 /* We must not cross write block boundaries */
1757                 int size = wbufsize - (ofs & (wbufsize-1));
1758
1759                 if (size > len)
1760                         size = len;
1761                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1762                                       ofs, &vecs, &vec_seek, size);
1763                 if (ret)
1764                         return ret;
1765
1766                 ofs += size;
1767                 (*retlen) += size;
1768                 len -= size;
1769
1770                 if (ofs >> cfi->chipshift) {
1771                         chipnum ++;
1772                         ofs = 0;
1773                         if (chipnum == cfi->numchips)
1774                                 return 0;
1775                 }
1776
1777                 /* Be nice and reschedule with the chip in a usable state for other
1778                    processes. */
1779                 cond_resched();
1780
1781         } while (len);
1782
1783         return 0;
1784 }
1785
1786 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1787                                        size_t len, size_t *retlen, const u_char *buf)
1788 {
1789         struct kvec vec;
1790
1791         vec.iov_base = (void *) buf;
1792         vec.iov_len = len;
1793
1794         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1795 }
1796
1797 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1798                                       unsigned long adr, int len, void *thunk)
1799 {
1800         struct cfi_private *cfi = map->fldrv_priv;
1801         map_word status;
1802         int retries = 3;
1803         int ret;
1804
1805         adr += chip->start;
1806
1807  retry:
1808         spin_lock(chip->mutex);
1809         ret = get_chip(map, chip, adr, FL_ERASING);
1810         if (ret) {
1811                 spin_unlock(chip->mutex);
1812                 return ret;
1813         }
1814
1815         XIP_INVAL_CACHED_RANGE(map, adr, len);
1816         ENABLE_VPP(map);
1817         xip_disable(map, chip, adr);
1818
1819         /* Clear the status register first */
1820         map_write(map, CMD(0x50), adr);
1821
1822         /* Now erase */
1823         map_write(map, CMD(0x20), adr);
1824         map_write(map, CMD(0xD0), adr);
1825         chip->state = FL_ERASING;
1826         chip->erase_suspended = 0;
1827
1828         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1829                                    adr, len,
1830                                    chip->erase_time);
1831         if (ret) {
1832                 map_write(map, CMD(0x70), adr);
1833                 chip->state = FL_STATUS;
1834                 xip_enable(map, chip, adr);
1835                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1836                 goto out;
1837         }
1838
1839         /* We've broken this before. It doesn't hurt to be safe */
1840         map_write(map, CMD(0x70), adr);
1841         chip->state = FL_STATUS;
1842         status = map_read(map, adr);
1843
1844         /* check for errors */
1845         if (map_word_bitsset(map, status, CMD(0x3a))) {
1846                 unsigned long chipstatus = MERGESTATUS(status);
1847
1848                 /* Reset the error bits */
1849                 map_write(map, CMD(0x50), adr);
1850                 map_write(map, CMD(0x70), adr);
1851                 xip_enable(map, chip, adr);
1852
1853                 if ((chipstatus & 0x30) == 0x30) {
1854                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1855                         ret = -EINVAL;
1856                 } else if (chipstatus & 0x02) {
1857                         /* Protection bit set */
1858                         ret = -EROFS;
1859                 } else if (chipstatus & 0x8) {
1860                         /* Voltage */
1861                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1862                         ret = -EIO;
1863                 } else if (chipstatus & 0x20 && retries--) {
1864                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1865                         put_chip(map, chip, adr);
1866                         spin_unlock(chip->mutex);
1867                         goto retry;
1868                 } else {
1869                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1870                         ret = -EIO;
1871                 }
1872
1873                 goto out;
1874         }
1875
1876         xip_enable(map, chip, adr);
1877  out:   put_chip(map, chip, adr);
1878         spin_unlock(chip->mutex);
1879         return ret;
1880 }
1881
1882 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1883 {
1884         unsigned long ofs, len;
1885         int ret;
1886
1887         ofs = instr->addr;
1888         len = instr->len;
1889
1890         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1891         if (ret)
1892                 return ret;
1893
1894         instr->state = MTD_ERASE_DONE;
1895         mtd_erase_callback(instr);
1896
1897         return 0;
1898 }
1899
1900 static void cfi_intelext_sync (struct mtd_info *mtd)
1901 {
1902         struct map_info *map = mtd->priv;
1903         struct cfi_private *cfi = map->fldrv_priv;
1904         int i;
1905         struct flchip *chip;
1906         int ret = 0;
1907
1908         for (i=0; !ret && i<cfi->numchips; i++) {
1909                 chip = &cfi->chips[i];
1910
1911                 spin_lock(chip->mutex);
1912                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1913
1914                 if (!ret) {
1915                         chip->oldstate = chip->state;
1916                         chip->state = FL_SYNCING;
1917                         /* No need to wake_up() on this state change -
1918                          * as the whole point is that nobody can do anything
1919                          * with the chip now anyway.
1920                          */
1921                 }
1922                 spin_unlock(chip->mutex);
1923         }
1924
1925         /* Unlock the chips again */
1926
1927         for (i--; i >=0; i--) {
1928                 chip = &cfi->chips[i];
1929
1930                 spin_lock(chip->mutex);
1931
1932                 if (chip->state == FL_SYNCING) {
1933                         chip->state = chip->oldstate;
1934                         chip->oldstate = FL_READY;
1935                         wake_up(&chip->wq);
1936                 }
1937                 spin_unlock(chip->mutex);
1938         }
1939 }
1940
1941 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1942                                                 struct flchip *chip,
1943                                                 unsigned long adr,
1944                                                 int len, void *thunk)
1945 {
1946         struct cfi_private *cfi = map->fldrv_priv;
1947         int status, ofs_factor = cfi->interleave * cfi->device_type;
1948
1949         adr += chip->start;
1950         xip_disable(map, chip, adr+(2*ofs_factor));
1951         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1952         chip->state = FL_JEDEC_QUERY;
1953         status = cfi_read_query(map, adr+(2*ofs_factor));
1954         xip_enable(map, chip, 0);
1955         return status;
1956 }
1957
1958 #ifdef DEBUG_LOCK_BITS
1959 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1960                                                 struct flchip *chip,
1961                                                 unsigned long adr,
1962                                                 int len, void *thunk)
1963 {
1964         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1965                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1966         return 0;
1967 }
1968 #endif
1969
1970 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1971 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1972
1973 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1974                                        unsigned long adr, int len, void *thunk)
1975 {
1976         struct cfi_private *cfi = map->fldrv_priv;
1977         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1978         int udelay;
1979         int ret;
1980
1981         adr += chip->start;
1982
1983         spin_lock(chip->mutex);
1984         ret = get_chip(map, chip, adr, FL_LOCKING);
1985         if (ret) {
1986                 spin_unlock(chip->mutex);
1987                 return ret;
1988         }
1989
1990         ENABLE_VPP(map);
1991         xip_disable(map, chip, adr);
1992
1993         map_write(map, CMD(0x60), adr);
1994         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1995                 map_write(map, CMD(0x01), adr);
1996                 chip->state = FL_LOCKING;
1997         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1998                 map_write(map, CMD(0xD0), adr);
1999                 chip->state = FL_UNLOCKING;
2000         } else
2001                 BUG();
2002
2003         /*
2004          * If Instant Individual Block Locking supported then no need
2005          * to delay.
2006          */
2007         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2008
2009         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
2010         if (ret) {
2011                 map_write(map, CMD(0x70), adr);
2012                 chip->state = FL_STATUS;
2013                 xip_enable(map, chip, adr);
2014                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2015                 goto out;
2016         }
2017
2018         xip_enable(map, chip, adr);
2019 out:    put_chip(map, chip, adr);
2020         spin_unlock(chip->mutex);
2021         return ret;
2022 }
2023
2024 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2025 {
2026         int ret;
2027
2028 #ifdef DEBUG_LOCK_BITS
2029         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2030                __func__, ofs, len);
2031         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2032                 ofs, len, NULL);
2033 #endif
2034
2035         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2036                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2037
2038 #ifdef DEBUG_LOCK_BITS
2039         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2040                __func__, ret);
2041         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2042                 ofs, len, NULL);
2043 #endif
2044
2045         return ret;
2046 }
2047
2048 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2049 {
2050         int ret;
2051
2052 #ifdef DEBUG_LOCK_BITS
2053         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2054                __func__, ofs, len);
2055         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2056                 ofs, len, NULL);
2057 #endif
2058
2059         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2060                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2061
2062 #ifdef DEBUG_LOCK_BITS
2063         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2064                __func__, ret);
2065         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2066                 ofs, len, NULL);
2067 #endif
2068
2069         return ret;
2070 }
2071
2072 #ifdef CONFIG_MTD_OTP
2073
2074 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2075                         u_long data_offset, u_char *buf, u_int size,
2076                         u_long prot_offset, u_int groupno, u_int groupsize);
2077
2078 static int __xipram
2079 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2080             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2081 {
2082         struct cfi_private *cfi = map->fldrv_priv;
2083         int ret;
2084
2085         spin_lock(chip->mutex);
2086         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2087         if (ret) {
2088                 spin_unlock(chip->mutex);
2089                 return ret;
2090         }
2091
2092         /* let's ensure we're not reading back cached data from array mode */
2093         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2094
2095         xip_disable(map, chip, chip->start);
2096         if (chip->state != FL_JEDEC_QUERY) {
2097                 map_write(map, CMD(0x90), chip->start);
2098                 chip->state = FL_JEDEC_QUERY;
2099         }
2100         map_copy_from(map, buf, chip->start + offset, size);
2101         xip_enable(map, chip, chip->start);
2102
2103         /* then ensure we don't keep OTP data in the cache */
2104         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2105
2106         put_chip(map, chip, chip->start);
2107         spin_unlock(chip->mutex);
2108         return 0;
2109 }
2110
2111 static int
2112 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2113              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2114 {
2115         int ret;
2116
2117         while (size) {
2118                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2119                 int gap = offset - bus_ofs;
2120                 int n = min_t(int, size, map_bankwidth(map)-gap);
2121                 map_word datum = map_word_ff(map);
2122
2123                 datum = map_word_load_partial(map, datum, buf, gap, n);
2124                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2125                 if (ret)
2126                         return ret;
2127
2128                 offset += n;
2129                 buf += n;
2130                 size -= n;
2131         }
2132
2133         return 0;
2134 }
2135
2136 static int
2137 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2138             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2139 {
2140         struct cfi_private *cfi = map->fldrv_priv;
2141         map_word datum;
2142
2143         /* make sure area matches group boundaries */
2144         if (size != grpsz)
2145                 return -EXDEV;
2146
2147         datum = map_word_ff(map);
2148         datum = map_word_clr(map, datum, CMD(1 << grpno));
2149         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2150 }
2151
2152 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2153                                  size_t *retlen, u_char *buf,
2154                                  otp_op_t action, int user_regs)
2155 {
2156         struct map_info *map = mtd->priv;
2157         struct cfi_private *cfi = map->fldrv_priv;
2158         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2159         struct flchip *chip;
2160         struct cfi_intelext_otpinfo *otp;
2161         u_long devsize, reg_prot_offset, data_offset;
2162         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2163         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2164         int ret;
2165
2166         *retlen = 0;
2167
2168         /* Check that we actually have some OTP registers */
2169         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2170                 return -ENODATA;
2171
2172         /* we need real chips here not virtual ones */
2173         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2174         chip_step = devsize >> cfi->chipshift;
2175         chip_num = 0;
2176
2177         /* Some chips have OTP located in the _top_ partition only.
2178            For example: Intel 28F256L18T (T means top-parameter device) */
2179         if (cfi->mfr == MANUFACTURER_INTEL) {
2180                 switch (cfi->id) {
2181                 case 0x880b:
2182                 case 0x880c:
2183                 case 0x880d:
2184                         chip_num = chip_step - 1;
2185                 }
2186         }
2187
2188         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2189                 chip = &cfi->chips[chip_num];
2190                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2191
2192                 /* first OTP region */
2193                 field = 0;
2194                 reg_prot_offset = extp->ProtRegAddr;
2195                 reg_fact_groups = 1;
2196                 reg_fact_size = 1 << extp->FactProtRegSize;
2197                 reg_user_groups = 1;
2198                 reg_user_size = 1 << extp->UserProtRegSize;
2199
2200                 while (len > 0) {
2201                         /* flash geometry fixup */
2202                         data_offset = reg_prot_offset + 1;
2203                         data_offset *= cfi->interleave * cfi->device_type;
2204                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2205                         reg_fact_size *= cfi->interleave;
2206                         reg_user_size *= cfi->interleave;
2207
2208                         if (user_regs) {
2209                                 groups = reg_user_groups;
2210                                 groupsize = reg_user_size;
2211                                 /* skip over factory reg area */
2212                                 groupno = reg_fact_groups;
2213                                 data_offset += reg_fact_groups * reg_fact_size;
2214                         } else {
2215                                 groups = reg_fact_groups;
2216                                 groupsize = reg_fact_size;
2217                                 groupno = 0;
2218                         }
2219
2220                         while (len > 0 && groups > 0) {
2221                                 if (!action) {
2222                                         /*
2223                                          * Special case: if action is NULL
2224                                          * we fill buf with otp_info records.
2225                                          */
2226                                         struct otp_info *otpinfo;
2227                                         map_word lockword;
2228                                         len -= sizeof(struct otp_info);
2229                                         if (len <= 0)
2230                                                 return -ENOSPC;
2231                                         ret = do_otp_read(map, chip,
2232                                                           reg_prot_offset,
2233                                                           (u_char *)&lockword,
2234                                                           map_bankwidth(map),
2235                                                           0, 0,  0);
2236                                         if (ret)
2237                                                 return ret;
2238                                         otpinfo = (struct otp_info *)buf;
2239                                         otpinfo->start = from;
2240                                         otpinfo->length = groupsize;
2241                                         otpinfo->locked =
2242                                            !map_word_bitsset(map, lockword,
2243                                                              CMD(1 << groupno));
2244                                         from += groupsize;
2245                                         buf += sizeof(*otpinfo);
2246                                         *retlen += sizeof(*otpinfo);
2247                                 } else if (from >= groupsize) {
2248                                         from -= groupsize;
2249                                         data_offset += groupsize;
2250                                 } else {
2251                                         int size = groupsize;
2252                                         data_offset += from;
2253                                         size -= from;
2254                                         from = 0;
2255                                         if (size > len)
2256                                                 size = len;
2257                                         ret = action(map, chip, data_offset,
2258                                                      buf, size, reg_prot_offset,
2259                                                      groupno, groupsize);
2260                                         if (ret < 0)
2261                                                 return ret;
2262                                         buf += size;
2263                                         len -= size;
2264                                         *retlen += size;
2265                                         data_offset += size;
2266                                 }
2267                                 groupno++;
2268                                 groups--;
2269                         }
2270
2271                         /* next OTP region */
2272                         if (++field == extp->NumProtectionFields)
2273                                 break;
2274                         reg_prot_offset = otp->ProtRegAddr;
2275                         reg_fact_groups = otp->FactGroups;
2276                         reg_fact_size = 1 << otp->FactProtRegSize;
2277                         reg_user_groups = otp->UserGroups;
2278                         reg_user_size = 1 << otp->UserProtRegSize;
2279                         otp++;
2280                 }
2281         }
2282
2283         return 0;
2284 }
2285
2286 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2287                                            size_t len, size_t *retlen,
2288                                             u_char *buf)
2289 {
2290         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2291                                      buf, do_otp_read, 0);
2292 }
2293
2294 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2295                                            size_t len, size_t *retlen,
2296                                             u_char *buf)
2297 {
2298         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2299                                      buf, do_otp_read, 1);
2300 }
2301
2302 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2303                                             size_t len, size_t *retlen,
2304                                              u_char *buf)
2305 {
2306         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2307                                      buf, do_otp_write, 1);
2308 }
2309
2310 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2311                                            loff_t from, size_t len)
2312 {
2313         size_t retlen;
2314         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2315                                      NULL, do_otp_lock, 1);
2316 }
2317
2318 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2319                                            struct otp_info *buf, size_t len)
2320 {
2321         size_t retlen;
2322         int ret;
2323
2324         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2325         return ret ? : retlen;
2326 }
2327
2328 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2329                                            struct otp_info *buf, size_t len)
2330 {
2331         size_t retlen;
2332         int ret;
2333
2334         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2335         return ret ? : retlen;
2336 }
2337
2338 #endif
2339
2340 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2341 {
2342         struct mtd_erase_region_info *region;
2343         int block, status, i;
2344         unsigned long adr;
2345         size_t len;
2346
2347         for (i = 0; i < mtd->numeraseregions; i++) {
2348                 region = &mtd->eraseregions[i];
2349                 if (!region->lockmap)
2350                         continue;
2351
2352                 for (block = 0; block < region->numblocks; block++){
2353                         len = region->erasesize;
2354                         adr = region->offset + block * len;
2355
2356                         status = cfi_varsize_frob(mtd,
2357                                         do_getlockstatus_oneblock, adr, len, NULL);
2358                         if (status)
2359                                 set_bit(block, region->lockmap);
2360                         else
2361                                 clear_bit(block, region->lockmap);
2362                 }
2363         }
2364 }
2365
2366 static int cfi_intelext_suspend(struct mtd_info *mtd)
2367 {
2368         struct map_info *map = mtd->priv;
2369         struct cfi_private *cfi = map->fldrv_priv;
2370         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2371         int i;
2372         struct flchip *chip;
2373         int ret = 0;
2374
2375         if ((mtd->flags & MTD_POWERUP_LOCK)
2376             && extp && (extp->FeatureSupport & (1 << 5)))
2377                 cfi_intelext_save_locks(mtd);
2378
2379         for (i=0; !ret && i<cfi->numchips; i++) {
2380                 chip = &cfi->chips[i];
2381
2382                 spin_lock(chip->mutex);
2383
2384                 switch (chip->state) {
2385                 case FL_READY:
2386                 case FL_STATUS:
2387                 case FL_CFI_QUERY:
2388                 case FL_JEDEC_QUERY:
2389                         if (chip->oldstate == FL_READY) {
2390                                 /* place the chip in a known state before suspend */
2391                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2392                                 chip->oldstate = chip->state;
2393                                 chip->state = FL_PM_SUSPENDED;
2394                                 /* No need to wake_up() on this state change -
2395                                  * as the whole point is that nobody can do anything
2396                                  * with the chip now anyway.
2397                                  */
2398                         } else {
2399                                 /* There seems to be an operation pending. We must wait for it. */
2400                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2401                                 ret = -EAGAIN;
2402                         }
2403                         break;
2404                 default:
2405                         /* Should we actually wait? Once upon a time these routines weren't
2406                            allowed to. Or should we return -EAGAIN, because the upper layers
2407                            ought to have already shut down anything which was using the device
2408                            anyway? The latter for now. */
2409                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2410                         ret = -EAGAIN;
2411                 case FL_PM_SUSPENDED:
2412                         break;
2413                 }
2414                 spin_unlock(chip->mutex);
2415         }
2416
2417         /* Unlock the chips again */
2418
2419         if (ret) {
2420                 for (i--; i >=0; i--) {
2421                         chip = &cfi->chips[i];
2422
2423                         spin_lock(chip->mutex);
2424
2425                         if (chip->state == FL_PM_SUSPENDED) {
2426                                 /* No need to force it into a known state here,
2427                                    because we're returning failure, and it didn't
2428                                    get power cycled */
2429                                 chip->state = chip->oldstate;
2430                                 chip->oldstate = FL_READY;
2431                                 wake_up(&chip->wq);
2432                         }
2433                         spin_unlock(chip->mutex);
2434                 }
2435         }
2436
2437         return ret;
2438 }
2439
2440 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2441 {
2442         struct mtd_erase_region_info *region;
2443         int block, i;
2444         unsigned long adr;
2445         size_t len;
2446
2447         for (i = 0; i < mtd->numeraseregions; i++) {
2448                 region = &mtd->eraseregions[i];
2449                 if (!region->lockmap)
2450                         continue;
2451
2452                 for (block = 0; block < region->numblocks; block++) {
2453                         len = region->erasesize;
2454                         adr = region->offset + block * len;
2455
2456                         if (!test_bit(block, region->lockmap))
2457                                 cfi_intelext_unlock(mtd, adr, len);
2458                 }
2459         }
2460 }
2461
2462 static void cfi_intelext_resume(struct mtd_info *mtd)
2463 {
2464         struct map_info *map = mtd->priv;
2465         struct cfi_private *cfi = map->fldrv_priv;
2466         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2467         int i;
2468         struct flchip *chip;
2469
2470         for (i=0; i<cfi->numchips; i++) {
2471
2472                 chip = &cfi->chips[i];
2473
2474                 spin_lock(chip->mutex);
2475
2476                 /* Go to known state. Chip may have been power cycled */
2477                 if (chip->state == FL_PM_SUSPENDED) {
2478                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2479                         chip->oldstate = chip->state = FL_READY;
2480                         wake_up(&chip->wq);
2481                 }
2482
2483                 spin_unlock(chip->mutex);
2484         }
2485
2486         if ((mtd->flags & MTD_POWERUP_LOCK)
2487             && extp && (extp->FeatureSupport & (1 << 5)))
2488                 cfi_intelext_restore_locks(mtd);
2489 }
2490
2491 static int cfi_intelext_reset(struct mtd_info *mtd)
2492 {
2493         struct map_info *map = mtd->priv;
2494         struct cfi_private *cfi = map->fldrv_priv;
2495         int i, ret;
2496
2497         for (i=0; i < cfi->numchips; i++) {
2498                 struct flchip *chip = &cfi->chips[i];
2499
2500                 /* force the completion of any ongoing operation
2501                    and switch to array mode so any bootloader in
2502                    flash is accessible for soft reboot. */
2503                 spin_lock(chip->mutex);
2504                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2505                 if (!ret) {
2506                         map_write(map, CMD(0xff), chip->start);
2507                         chip->state = FL_SHUTDOWN;
2508                 }
2509                 spin_unlock(chip->mutex);
2510         }
2511
2512         return 0;
2513 }
2514
2515 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2516                                void *v)
2517 {
2518         struct mtd_info *mtd;
2519
2520         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2521         cfi_intelext_reset(mtd);
2522         return NOTIFY_DONE;
2523 }
2524
2525 static void cfi_intelext_destroy(struct mtd_info *mtd)
2526 {
2527         struct map_info *map = mtd->priv;
2528         struct cfi_private *cfi = map->fldrv_priv;
2529         struct mtd_erase_region_info *region;
2530         int i;
2531         cfi_intelext_reset(mtd);
2532         unregister_reboot_notifier(&mtd->reboot_notifier);
2533         kfree(cfi->cmdset_priv);
2534         kfree(cfi->cfiq);
2535         kfree(cfi->chips[0].priv);
2536         kfree(cfi);
2537         for (i = 0; i < mtd->numeraseregions; i++) {
2538                 region = &mtd->eraseregions[i];
2539                 if (region->lockmap)
2540                         kfree(region->lockmap);
2541         }
2542         kfree(mtd->eraseregions);
2543 }
2544
2545 MODULE_LICENSE("GPL");
2546 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2547 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2548 MODULE_ALIAS("cfi_cmdset_0003");
2549 MODULE_ALIAS("cfi_cmdset_0200");