]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[linux-2.6-omap-h63xx.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@cam.org>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
39
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
45
46 #define MANUFACTURER_INTEL      0x0089
47 #define I82802AB        0x00ad
48 #define I82802AC        0x00ac
49 #define MANUFACTURER_ST         0x0020
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 #define AT49BV640D      0x02de
54
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69                                             struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71                                             struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76
77 static void cfi_intelext_destroy(struct mtd_info *);
78
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85                      size_t *retlen, void **virt, resource_size_t *phys);
86 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
87
88 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 #include "fwh_lock.h"
92
93
94
95 /*
96  *  *********** SETUP AND PROBE BITS  ***********
97  */
98
99 static struct mtd_chip_driver cfi_intelext_chipdrv = {
100         .probe          = NULL, /* Not usable directly */
101         .destroy        = cfi_intelext_destroy,
102         .name           = "cfi_cmdset_0001",
103         .module         = THIS_MODULE
104 };
105
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
108
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext *extp)
111 {
112         int i;
113         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
114         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
115         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
125         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126         for (i=11; i<32; i++) {
127                 if (extp->FeatureSupport & (1<<i))
128                         printk("     - Unknown Bit %X:      supported\n", i);
129         }
130
131         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133         for (i=1; i<8; i++) {
134                 if (extp->SuspendCmdSupport & (1<<i))
135                         printk("     - Unknown Bit %X:               supported\n", i);
136         }
137
138         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
140         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141         for (i=2; i<3; i++) {
142                 if (extp->BlkStatusRegMask & (1<<i))
143                         printk("     - Unknown Bit %X Active: yes\n",i);
144         }
145         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147         for (i=6; i<16; i++) {
148                 if (extp->BlkStatusRegMask & (1<<i))
149                         printk("     - Unknown Bit %X Active: yes\n",i);
150         }
151
152         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154         if (extp->VppOptimal)
155                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157 }
158 #endif
159
160 /* Atmel chips don't use the same PRI format as Intel chips */
161 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
166         struct cfi_pri_atmel atmel_pri;
167         uint32_t features = 0;
168
169         /* Reverse byteswapping */
170         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
171         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
172         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
173
174         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
175         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
176
177         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
178
179         if (atmel_pri.Features & 0x01) /* chip erase supported */
180                 features |= (1<<0);
181         if (atmel_pri.Features & 0x02) /* erase suspend supported */
182                 features |= (1<<1);
183         if (atmel_pri.Features & 0x04) /* program suspend supported */
184                 features |= (1<<2);
185         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
186                 features |= (1<<9);
187         if (atmel_pri.Features & 0x20) /* page mode read supported */
188                 features |= (1<<7);
189         if (atmel_pri.Features & 0x40) /* queued erase supported */
190                 features |= (1<<4);
191         if (atmel_pri.Features & 0x80) /* Protection bits supported */
192                 features |= (1<<6);
193
194         extp->FeatureSupport = features;
195
196         /* burst write mode not supported */
197         cfi->cfiq->BufWriteTimeoutTyp = 0;
198         cfi->cfiq->BufWriteTimeoutMax = 0;
199 }
200
201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
204 {
205         struct map_info *map = mtd->priv;
206         struct cfi_private *cfi = map->fldrv_priv;
207         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
208
209         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210                             "erase on write disabled.\n");
211         extp->SuspendCmdSupport &= ~1;
212 }
213 #endif
214
215 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
221
222         if (cfip && (cfip->FeatureSupport&4)) {
223                 cfip->FeatureSupport &= ~4;
224                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
225         }
226 }
227 #endif
228
229 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233
234         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
235         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
236 }
237
238 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
239 {
240         struct map_info *map = mtd->priv;
241         struct cfi_private *cfi = map->fldrv_priv;
242
243         /* Note this is done after the region info is endian swapped */
244         cfi->cfiq->EraseRegionInfo[1] =
245                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
246 };
247
248 static void fixup_use_point(struct mtd_info *mtd, void *param)
249 {
250         struct map_info *map = mtd->priv;
251         if (!mtd->point && map_is_linear(map)) {
252                 mtd->point   = cfi_intelext_point;
253                 mtd->unpoint = cfi_intelext_unpoint;
254         }
255 }
256
257 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261         if (cfi->cfiq->BufWriteTimeoutTyp) {
262                 printk(KERN_INFO "Using buffer write method\n" );
263                 mtd->write = cfi_intelext_write_buffers;
264                 mtd->writev = cfi_intelext_writev;
265         }
266 }
267
268 /*
269  * Some chips power-up with all sectors locked by default.
270  */
271 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
276
277         if (cfip->FeatureSupport&32) {
278                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
279                 mtd->flags |= MTD_POWERUP_LOCK;
280         }
281 }
282
283 static struct cfi_fixup cfi_fixup_table[] = {
284         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
285 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
286         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
287 #endif
288 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
289         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
290 #endif
291 #if !FORCE_WORD_WRITE
292         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
293 #endif
294         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
295         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
296         { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
297         { 0, 0, NULL, NULL }
298 };
299
300 static struct cfi_fixup jedec_fixup_table[] = {
301         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
302         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
303         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
304         { MANUFACTURER_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
305         { MANUFACTURER_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
306         { 0, 0, NULL, NULL }
307 };
308 static struct cfi_fixup fixup_table[] = {
309         /* The CFI vendor ids and the JEDEC vendor IDs appear
310          * to be common.  It is like the devices id's are as
311          * well.  This table is to pick all cases where
312          * we know that is the case.
313          */
314         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
315         { 0, 0, NULL, NULL }
316 };
317
318 static inline struct cfi_pri_intelext *
319 read_pri_intelext(struct map_info *map, __u16 adr)
320 {
321         struct cfi_pri_intelext *extp;
322         unsigned int extp_size = sizeof(*extp);
323
324  again:
325         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
326         if (!extp)
327                 return NULL;
328
329         if (extp->MajorVersion != '1' ||
330             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
331                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
332                        "version %c.%c.\n",  extp->MajorVersion,
333                        extp->MinorVersion);
334                 kfree(extp);
335                 return NULL;
336         }
337
338         /* Do some byteswapping if necessary */
339         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
340         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
341         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
342
343         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
344                 unsigned int extra_size = 0;
345                 int nb_parts, i;
346
347                 /* Protection Register info */
348                 extra_size += (extp->NumProtectionFields - 1) *
349                               sizeof(struct cfi_intelext_otpinfo);
350
351                 /* Burst Read info */
352                 extra_size += 2;
353                 if (extp_size < sizeof(*extp) + extra_size)
354                         goto need_more;
355                 extra_size += extp->extra[extra_size-1];
356
357                 /* Number of hardware-partitions */
358                 extra_size += 1;
359                 if (extp_size < sizeof(*extp) + extra_size)
360                         goto need_more;
361                 nb_parts = extp->extra[extra_size - 1];
362
363                 /* skip the sizeof(partregion) field in CFI 1.4 */
364                 if (extp->MinorVersion >= '4')
365                         extra_size += 2;
366
367                 for (i = 0; i < nb_parts; i++) {
368                         struct cfi_intelext_regioninfo *rinfo;
369                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
370                         extra_size += sizeof(*rinfo);
371                         if (extp_size < sizeof(*extp) + extra_size)
372                                 goto need_more;
373                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
374                         extra_size += (rinfo->NumBlockTypes - 1)
375                                       * sizeof(struct cfi_intelext_blockinfo);
376                 }
377
378                 if (extp->MinorVersion >= '4')
379                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
380
381                 if (extp_size < sizeof(*extp) + extra_size) {
382                         need_more:
383                         extp_size = sizeof(*extp) + extra_size;
384                         kfree(extp);
385                         if (extp_size > 4096) {
386                                 printk(KERN_ERR
387                                         "%s: cfi_pri_intelext is too fat\n",
388                                         __func__);
389                                 return NULL;
390                         }
391                         goto again;
392                 }
393         }
394
395         return extp;
396 }
397
398 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399 {
400         struct cfi_private *cfi = map->fldrv_priv;
401         struct mtd_info *mtd;
402         int i;
403
404         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
405         if (!mtd) {
406                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
407                 return NULL;
408         }
409         mtd->priv = map;
410         mtd->type = MTD_NORFLASH;
411
412         /* Fill in the default mtd operations */
413         mtd->erase   = cfi_intelext_erase_varsize;
414         mtd->read    = cfi_intelext_read;
415         mtd->write   = cfi_intelext_write_words;
416         mtd->sync    = cfi_intelext_sync;
417         mtd->lock    = cfi_intelext_lock;
418         mtd->unlock  = cfi_intelext_unlock;
419         mtd->suspend = cfi_intelext_suspend;
420         mtd->resume  = cfi_intelext_resume;
421         mtd->flags   = MTD_CAP_NORFLASH;
422         mtd->name    = map->name;
423         mtd->writesize = 1;
424
425         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
426
427         if (cfi->cfi_mode == CFI_MODE_CFI) {
428                 /*
429                  * It's a real CFI chip, not one for which the probe
430                  * routine faked a CFI structure. So we read the feature
431                  * table from it.
432                  */
433                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
434                 struct cfi_pri_intelext *extp;
435
436                 extp = read_pri_intelext(map, adr);
437                 if (!extp) {
438                         kfree(mtd);
439                         return NULL;
440                 }
441
442                 /* Install our own private info structure */
443                 cfi->cmdset_priv = extp;
444
445                 cfi_fixup(mtd, cfi_fixup_table);
446
447 #ifdef DEBUG_CFI_FEATURES
448                 /* Tell the user about it in lots of lovely detail */
449                 cfi_tell_features(extp);
450 #endif
451
452                 if(extp->SuspendCmdSupport & 1) {
453                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
454                 }
455         }
456         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
457                 /* Apply jedec specific fixups */
458                 cfi_fixup(mtd, jedec_fixup_table);
459         }
460         /* Apply generic fixups */
461         cfi_fixup(mtd, fixup_table);
462
463         for (i=0; i< cfi->numchips; i++) {
464                 if (cfi->cfiq->WordWriteTimeoutTyp)
465                         cfi->chips[i].word_write_time =
466                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
467                 else
468                         cfi->chips[i].word_write_time = 50000;
469
470                 if (cfi->cfiq->BufWriteTimeoutTyp)
471                         cfi->chips[i].buffer_write_time =
472                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
473                 /* No default; if it isn't specified, we won't use it */
474
475                 if (cfi->cfiq->BlockEraseTimeoutTyp)
476                         cfi->chips[i].erase_time =
477                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
478                 else
479                         cfi->chips[i].erase_time = 2000000;
480
481                 if (cfi->cfiq->WordWriteTimeoutTyp &&
482                     cfi->cfiq->WordWriteTimeoutMax)
483                         cfi->chips[i].word_write_time_max =
484                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
485                                     cfi->cfiq->WordWriteTimeoutMax);
486                 else
487                         cfi->chips[i].word_write_time_max = 50000 * 8;
488
489                 if (cfi->cfiq->BufWriteTimeoutTyp &&
490                     cfi->cfiq->BufWriteTimeoutMax)
491                         cfi->chips[i].buffer_write_time_max =
492                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
493                                     cfi->cfiq->BufWriteTimeoutMax);
494
495                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
496                     cfi->cfiq->BlockEraseTimeoutMax)
497                         cfi->chips[i].erase_time_max =
498                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
499                                        cfi->cfiq->BlockEraseTimeoutMax);
500                 else
501                         cfi->chips[i].erase_time_max = 2000000 * 8;
502
503                 cfi->chips[i].ref_point_counter = 0;
504                 init_waitqueue_head(&(cfi->chips[i].wq));
505         }
506
507         map->fldrv = &cfi_intelext_chipdrv;
508
509         return cfi_intelext_setup(mtd);
510 }
511 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
512 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
513 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
514 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
515 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
516
517 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
518 {
519         struct map_info *map = mtd->priv;
520         struct cfi_private *cfi = map->fldrv_priv;
521         unsigned long offset = 0;
522         int i,j;
523         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
524
525         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
526
527         mtd->size = devsize * cfi->numchips;
528
529         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
530         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
531                         * mtd->numeraseregions, GFP_KERNEL);
532         if (!mtd->eraseregions) {
533                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
534                 goto setup_err;
535         }
536
537         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
538                 unsigned long ernum, ersize;
539                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
540                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
541
542                 if (mtd->erasesize < ersize) {
543                         mtd->erasesize = ersize;
544                 }
545                 for (j=0; j<cfi->numchips; j++) {
546                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
547                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
548                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
549                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
550                 }
551                 offset += (ersize * ernum);
552         }
553
554         if (offset != devsize) {
555                 /* Argh */
556                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
557                 goto setup_err;
558         }
559
560         for (i=0; i<mtd->numeraseregions;i++){
561                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
562                        i,(unsigned long long)mtd->eraseregions[i].offset,
563                        mtd->eraseregions[i].erasesize,
564                        mtd->eraseregions[i].numblocks);
565         }
566
567 #ifdef CONFIG_MTD_OTP
568         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
569         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
570         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
571         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
572         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
573         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
574 #endif
575
576         /* This function has the potential to distort the reality
577            a bit and therefore should be called last. */
578         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
579                 goto setup_err;
580
581         __module_get(THIS_MODULE);
582         register_reboot_notifier(&mtd->reboot_notifier);
583         return mtd;
584
585  setup_err:
586         if(mtd) {
587                 kfree(mtd->eraseregions);
588                 kfree(mtd);
589         }
590         kfree(cfi->cmdset_priv);
591         return NULL;
592 }
593
594 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
595                                         struct cfi_private **pcfi)
596 {
597         struct map_info *map = mtd->priv;
598         struct cfi_private *cfi = *pcfi;
599         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
600
601         /*
602          * Probing of multi-partition flash chips.
603          *
604          * To support multiple partitions when available, we simply arrange
605          * for each of them to have their own flchip structure even if they
606          * are on the same physical chip.  This means completely recreating
607          * a new cfi_private structure right here which is a blatent code
608          * layering violation, but this is still the least intrusive
609          * arrangement at this point. This can be rearranged in the future
610          * if someone feels motivated enough.  --nico
611          */
612         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
613             && extp->FeatureSupport & (1 << 9)) {
614                 struct cfi_private *newcfi;
615                 struct flchip *chip;
616                 struct flchip_shared *shared;
617                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
618
619                 /* Protection Register info */
620                 offs = (extp->NumProtectionFields - 1) *
621                        sizeof(struct cfi_intelext_otpinfo);
622
623                 /* Burst Read info */
624                 offs += extp->extra[offs+1]+2;
625
626                 /* Number of partition regions */
627                 numregions = extp->extra[offs];
628                 offs += 1;
629
630                 /* skip the sizeof(partregion) field in CFI 1.4 */
631                 if (extp->MinorVersion >= '4')
632                         offs += 2;
633
634                 /* Number of hardware partitions */
635                 numparts = 0;
636                 for (i = 0; i < numregions; i++) {
637                         struct cfi_intelext_regioninfo *rinfo;
638                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
639                         numparts += rinfo->NumIdentPartitions;
640                         offs += sizeof(*rinfo)
641                                 + (rinfo->NumBlockTypes - 1) *
642                                   sizeof(struct cfi_intelext_blockinfo);
643                 }
644
645                 if (!numparts)
646                         numparts = 1;
647
648                 /* Programming Region info */
649                 if (extp->MinorVersion >= '4') {
650                         struct cfi_intelext_programming_regioninfo *prinfo;
651                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
652                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
653                         mtd->flags &= ~MTD_BIT_WRITEABLE;
654                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
655                                map->name, mtd->writesize,
656                                cfi->interleave * prinfo->ControlValid,
657                                cfi->interleave * prinfo->ControlInvalid);
658                 }
659
660                 /*
661                  * All functions below currently rely on all chips having
662                  * the same geometry so we'll just assume that all hardware
663                  * partitions are of the same size too.
664                  */
665                 partshift = cfi->chipshift - __ffs(numparts);
666
667                 if ((1 << partshift) < mtd->erasesize) {
668                         printk( KERN_ERR
669                                 "%s: bad number of hw partitions (%d)\n",
670                                 __func__, numparts);
671                         return -EINVAL;
672                 }
673
674                 numvirtchips = cfi->numchips * numparts;
675                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
676                 if (!newcfi)
677                         return -ENOMEM;
678                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
679                 if (!shared) {
680                         kfree(newcfi);
681                         return -ENOMEM;
682                 }
683                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
684                 newcfi->numchips = numvirtchips;
685                 newcfi->chipshift = partshift;
686
687                 chip = &newcfi->chips[0];
688                 for (i = 0; i < cfi->numchips; i++) {
689                         shared[i].writing = shared[i].erasing = NULL;
690                         spin_lock_init(&shared[i].lock);
691                         for (j = 0; j < numparts; j++) {
692                                 *chip = cfi->chips[i];
693                                 chip->start += j << partshift;
694                                 chip->priv = &shared[i];
695                                 /* those should be reset too since
696                                    they create memory references. */
697                                 init_waitqueue_head(&chip->wq);
698                                 spin_lock_init(&chip->_spinlock);
699                                 chip->mutex = &chip->_spinlock;
700                                 chip++;
701                         }
702                 }
703
704                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
705                                   "--> %d partitions of %d KiB\n",
706                                   map->name, cfi->numchips, cfi->interleave,
707                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
708
709                 map->fldrv_priv = newcfi;
710                 *pcfi = newcfi;
711                 kfree(cfi);
712         }
713
714         return 0;
715 }
716
717 /*
718  *  *********** CHIP ACCESS FUNCTIONS ***********
719  */
720 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
721 {
722         DECLARE_WAITQUEUE(wait, current);
723         struct cfi_private *cfi = map->fldrv_priv;
724         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
725         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
726         unsigned long timeo = jiffies + HZ;
727
728         /* Prevent setting state FL_SYNCING for chip in suspended state. */
729         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
730                 goto sleep;
731
732         switch (chip->state) {
733
734         case FL_STATUS:
735                 for (;;) {
736                         status = map_read(map, adr);
737                         if (map_word_andequal(map, status, status_OK, status_OK))
738                                 break;
739
740                         /* At this point we're fine with write operations
741                            in other partitions as they don't conflict. */
742                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
743                                 break;
744
745                         spin_unlock(chip->mutex);
746                         cfi_udelay(1);
747                         spin_lock(chip->mutex);
748                         /* Someone else might have been playing with it. */
749                         return -EAGAIN;
750                 }
751                 /* Fall through */
752         case FL_READY:
753         case FL_CFI_QUERY:
754         case FL_JEDEC_QUERY:
755                 return 0;
756
757         case FL_ERASING:
758                 if (!cfip ||
759                     !(cfip->FeatureSupport & 2) ||
760                     !(mode == FL_READY || mode == FL_POINT ||
761                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
762                         goto sleep;
763
764
765                 /* Erase suspend */
766                 map_write(map, CMD(0xB0), adr);
767
768                 /* If the flash has finished erasing, then 'erase suspend'
769                  * appears to make some (28F320) flash devices switch to
770                  * 'read' mode.  Make sure that we switch to 'read status'
771                  * mode so we get the right data. --rmk
772                  */
773                 map_write(map, CMD(0x70), adr);
774                 chip->oldstate = FL_ERASING;
775                 chip->state = FL_ERASE_SUSPENDING;
776                 chip->erase_suspended = 1;
777                 for (;;) {
778                         status = map_read(map, adr);
779                         if (map_word_andequal(map, status, status_OK, status_OK))
780                                 break;
781
782                         if (time_after(jiffies, timeo)) {
783                                 /* Urgh. Resume and pretend we weren't here.  */
784                                 map_write(map, CMD(0xd0), adr);
785                                 /* Make sure we're in 'read status' mode if it had finished */
786                                 map_write(map, CMD(0x70), adr);
787                                 chip->state = FL_ERASING;
788                                 chip->oldstate = FL_READY;
789                                 printk(KERN_ERR "%s: Chip not ready after erase "
790                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
791                                 return -EIO;
792                         }
793
794                         spin_unlock(chip->mutex);
795                         cfi_udelay(1);
796                         spin_lock(chip->mutex);
797                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
798                            So we can just loop here. */
799                 }
800                 chip->state = FL_STATUS;
801                 return 0;
802
803         case FL_XIP_WHILE_ERASING:
804                 if (mode != FL_READY && mode != FL_POINT &&
805                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
806                         goto sleep;
807                 chip->oldstate = chip->state;
808                 chip->state = FL_READY;
809                 return 0;
810
811         case FL_SHUTDOWN:
812                 /* The machine is rebooting now,so no one can get chip anymore */
813                 return -EIO;
814         case FL_POINT:
815                 /* Only if there's no operation suspended... */
816                 if (mode == FL_READY && chip->oldstate == FL_READY)
817                         return 0;
818                 /* Fall through */
819         default:
820         sleep:
821                 set_current_state(TASK_UNINTERRUPTIBLE);
822                 add_wait_queue(&chip->wq, &wait);
823                 spin_unlock(chip->mutex);
824                 schedule();
825                 remove_wait_queue(&chip->wq, &wait);
826                 spin_lock(chip->mutex);
827                 return -EAGAIN;
828         }
829 }
830
831 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
832 {
833         int ret;
834         DECLARE_WAITQUEUE(wait, current);
835
836  retry:
837         if (chip->priv &&
838             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
839             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
840                 /*
841                  * OK. We have possibility for contention on the write/erase
842                  * operations which are global to the real chip and not per
843                  * partition.  So let's fight it over in the partition which
844                  * currently has authority on the operation.
845                  *
846                  * The rules are as follows:
847                  *
848                  * - any write operation must own shared->writing.
849                  *
850                  * - any erase operation must own _both_ shared->writing and
851                  *   shared->erasing.
852                  *
853                  * - contention arbitration is handled in the owner's context.
854                  *
855                  * The 'shared' struct can be read and/or written only when
856                  * its lock is taken.
857                  */
858                 struct flchip_shared *shared = chip->priv;
859                 struct flchip *contender;
860                 spin_lock(&shared->lock);
861                 contender = shared->writing;
862                 if (contender && contender != chip) {
863                         /*
864                          * The engine to perform desired operation on this
865                          * partition is already in use by someone else.
866                          * Let's fight over it in the context of the chip
867                          * currently using it.  If it is possible to suspend,
868                          * that other partition will do just that, otherwise
869                          * it'll happily send us to sleep.  In any case, when
870                          * get_chip returns success we're clear to go ahead.
871                          */
872                         ret = spin_trylock(contender->mutex);
873                         spin_unlock(&shared->lock);
874                         if (!ret)
875                                 goto retry;
876                         spin_unlock(chip->mutex);
877                         ret = chip_ready(map, contender, contender->start, mode);
878                         spin_lock(chip->mutex);
879
880                         if (ret == -EAGAIN) {
881                                 spin_unlock(contender->mutex);
882                                 goto retry;
883                         }
884                         if (ret) {
885                                 spin_unlock(contender->mutex);
886                                 return ret;
887                         }
888                         spin_lock(&shared->lock);
889
890                         /* We should not own chip if it is already
891                          * in FL_SYNCING state. Put contender and retry. */
892                         if (chip->state == FL_SYNCING) {
893                                 put_chip(map, contender, contender->start);
894                                 spin_unlock(contender->mutex);
895                                 goto retry;
896                         }
897                         spin_unlock(contender->mutex);
898                 }
899
900                 /* Check if we already have suspended erase
901                  * on this chip. Sleep. */
902                 if (mode == FL_ERASING && shared->erasing
903                     && shared->erasing->oldstate == FL_ERASING) {
904                         spin_unlock(&shared->lock);
905                         set_current_state(TASK_UNINTERRUPTIBLE);
906                         add_wait_queue(&chip->wq, &wait);
907                         spin_unlock(chip->mutex);
908                         schedule();
909                         remove_wait_queue(&chip->wq, &wait);
910                         spin_lock(chip->mutex);
911                         goto retry;
912                 }
913
914                 /* We now own it */
915                 shared->writing = chip;
916                 if (mode == FL_ERASING)
917                         shared->erasing = chip;
918                 spin_unlock(&shared->lock);
919         }
920         ret = chip_ready(map, chip, adr, mode);
921         if (ret == -EAGAIN)
922                 goto retry;
923
924         return ret;
925 }
926
927 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
928 {
929         struct cfi_private *cfi = map->fldrv_priv;
930
931         if (chip->priv) {
932                 struct flchip_shared *shared = chip->priv;
933                 spin_lock(&shared->lock);
934                 if (shared->writing == chip && chip->oldstate == FL_READY) {
935                         /* We own the ability to write, but we're done */
936                         shared->writing = shared->erasing;
937                         if (shared->writing && shared->writing != chip) {
938                                 /* give back ownership to who we loaned it from */
939                                 struct flchip *loaner = shared->writing;
940                                 spin_lock(loaner->mutex);
941                                 spin_unlock(&shared->lock);
942                                 spin_unlock(chip->mutex);
943                                 put_chip(map, loaner, loaner->start);
944                                 spin_lock(chip->mutex);
945                                 spin_unlock(loaner->mutex);
946                                 wake_up(&chip->wq);
947                                 return;
948                         }
949                         shared->erasing = NULL;
950                         shared->writing = NULL;
951                 } else if (shared->erasing == chip && shared->writing != chip) {
952                         /*
953                          * We own the ability to erase without the ability
954                          * to write, which means the erase was suspended
955                          * and some other partition is currently writing.
956                          * Don't let the switch below mess things up since
957                          * we don't have ownership to resume anything.
958                          */
959                         spin_unlock(&shared->lock);
960                         wake_up(&chip->wq);
961                         return;
962                 }
963                 spin_unlock(&shared->lock);
964         }
965
966         switch(chip->oldstate) {
967         case FL_ERASING:
968                 chip->state = chip->oldstate;
969                 /* What if one interleaved chip has finished and the
970                    other hasn't? The old code would leave the finished
971                    one in READY mode. That's bad, and caused -EROFS
972                    errors to be returned from do_erase_oneblock because
973                    that's the only bit it checked for at the time.
974                    As the state machine appears to explicitly allow
975                    sending the 0x70 (Read Status) command to an erasing
976                    chip and expecting it to be ignored, that's what we
977                    do. */
978                 map_write(map, CMD(0xd0), adr);
979                 map_write(map, CMD(0x70), adr);
980                 chip->oldstate = FL_READY;
981                 chip->state = FL_ERASING;
982                 break;
983
984         case FL_XIP_WHILE_ERASING:
985                 chip->state = chip->oldstate;
986                 chip->oldstate = FL_READY;
987                 break;
988
989         case FL_READY:
990         case FL_STATUS:
991         case FL_JEDEC_QUERY:
992                 /* We should really make set_vpp() count, rather than doing this */
993                 DISABLE_VPP(map);
994                 break;
995         default:
996                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
997         }
998         wake_up(&chip->wq);
999 }
1000
1001 #ifdef CONFIG_MTD_XIP
1002
1003 /*
1004  * No interrupt what so ever can be serviced while the flash isn't in array
1005  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1006  * enclosing any code path where the flash is known not to be in array mode.
1007  * And within a XIP disabled code path, only functions marked with __xipram
1008  * may be called and nothing else (it's a good thing to inspect generated
1009  * assembly to make sure inline functions were actually inlined and that gcc
1010  * didn't emit calls to its own support functions). Also configuring MTD CFI
1011  * support to a single buswidth and a single interleave is also recommended.
1012  */
1013
1014 static void xip_disable(struct map_info *map, struct flchip *chip,
1015                         unsigned long adr)
1016 {
1017         /* TODO: chips with no XIP use should ignore and return */
1018         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1019         local_irq_disable();
1020 }
1021
1022 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1023                                 unsigned long adr)
1024 {
1025         struct cfi_private *cfi = map->fldrv_priv;
1026         if (chip->state != FL_POINT && chip->state != FL_READY) {
1027                 map_write(map, CMD(0xff), adr);
1028                 chip->state = FL_READY;
1029         }
1030         (void) map_read(map, adr);
1031         xip_iprefetch();
1032         local_irq_enable();
1033 }
1034
1035 /*
1036  * When a delay is required for the flash operation to complete, the
1037  * xip_wait_for_operation() function is polling for both the given timeout
1038  * and pending (but still masked) hardware interrupts.  Whenever there is an
1039  * interrupt pending then the flash erase or write operation is suspended,
1040  * array mode restored and interrupts unmasked.  Task scheduling might also
1041  * happen at that point.  The CPU eventually returns from the interrupt or
1042  * the call to schedule() and the suspended flash operation is resumed for
1043  * the remaining of the delay period.
1044  *
1045  * Warning: this function _will_ fool interrupt latency tracing tools.
1046  */
1047
1048 static int __xipram xip_wait_for_operation(
1049                 struct map_info *map, struct flchip *chip,
1050                 unsigned long adr, unsigned int chip_op_time_max)
1051 {
1052         struct cfi_private *cfi = map->fldrv_priv;
1053         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1054         map_word status, OK = CMD(0x80);
1055         unsigned long usec, suspended, start, done;
1056         flstate_t oldstate, newstate;
1057
1058         start = xip_currtime();
1059         usec = chip_op_time_max;
1060         if (usec == 0)
1061                 usec = 500000;
1062         done = 0;
1063
1064         do {
1065                 cpu_relax();
1066                 if (xip_irqpending() && cfip &&
1067                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1068                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1069                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1070                         /*
1071                          * Let's suspend the erase or write operation when
1072                          * supported.  Note that we currently don't try to
1073                          * suspend interleaved chips if there is already
1074                          * another operation suspended (imagine what happens
1075                          * when one chip was already done with the current
1076                          * operation while another chip suspended it, then
1077                          * we resume the whole thing at once).  Yes, it
1078                          * can happen!
1079                          */
1080                         usec -= done;
1081                         map_write(map, CMD(0xb0), adr);
1082                         map_write(map, CMD(0x70), adr);
1083                         suspended = xip_currtime();
1084                         do {
1085                                 if (xip_elapsed_since(suspended) > 100000) {
1086                                         /*
1087                                          * The chip doesn't want to suspend
1088                                          * after waiting for 100 msecs.
1089                                          * This is a critical error but there
1090                                          * is not much we can do here.
1091                                          */
1092                                         return -EIO;
1093                                 }
1094                                 status = map_read(map, adr);
1095                         } while (!map_word_andequal(map, status, OK, OK));
1096
1097                         /* Suspend succeeded */
1098                         oldstate = chip->state;
1099                         if (oldstate == FL_ERASING) {
1100                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1101                                         break;
1102                                 newstate = FL_XIP_WHILE_ERASING;
1103                                 chip->erase_suspended = 1;
1104                         } else {
1105                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1106                                         break;
1107                                 newstate = FL_XIP_WHILE_WRITING;
1108                                 chip->write_suspended = 1;
1109                         }
1110                         chip->state = newstate;
1111                         map_write(map, CMD(0xff), adr);
1112                         (void) map_read(map, adr);
1113                         xip_iprefetch();
1114                         local_irq_enable();
1115                         spin_unlock(chip->mutex);
1116                         xip_iprefetch();
1117                         cond_resched();
1118
1119                         /*
1120                          * We're back.  However someone else might have
1121                          * decided to go write to the chip if we are in
1122                          * a suspended erase state.  If so let's wait
1123                          * until it's done.
1124                          */
1125                         spin_lock(chip->mutex);
1126                         while (chip->state != newstate) {
1127                                 DECLARE_WAITQUEUE(wait, current);
1128                                 set_current_state(TASK_UNINTERRUPTIBLE);
1129                                 add_wait_queue(&chip->wq, &wait);
1130                                 spin_unlock(chip->mutex);
1131                                 schedule();
1132                                 remove_wait_queue(&chip->wq, &wait);
1133                                 spin_lock(chip->mutex);
1134                         }
1135                         /* Disallow XIP again */
1136                         local_irq_disable();
1137
1138                         /* Resume the write or erase operation */
1139                         map_write(map, CMD(0xd0), adr);
1140                         map_write(map, CMD(0x70), adr);
1141                         chip->state = oldstate;
1142                         start = xip_currtime();
1143                 } else if (usec >= 1000000/HZ) {
1144                         /*
1145                          * Try to save on CPU power when waiting delay
1146                          * is at least a system timer tick period.
1147                          * No need to be extremely accurate here.
1148                          */
1149                         xip_cpu_idle();
1150                 }
1151                 status = map_read(map, adr);
1152                 done = xip_elapsed_since(start);
1153         } while (!map_word_andequal(map, status, OK, OK)
1154                  && done < usec);
1155
1156         return (done >= usec) ? -ETIME : 0;
1157 }
1158
1159 /*
1160  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1161  * the flash is actively programming or erasing since we have to poll for
1162  * the operation to complete anyway.  We can't do that in a generic way with
1163  * a XIP setup so do it before the actual flash operation in this case
1164  * and stub it out from INVAL_CACHE_AND_WAIT.
1165  */
1166 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1167         INVALIDATE_CACHED_RANGE(map, from, size)
1168
1169 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1170         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1171
1172 #else
1173
1174 #define xip_disable(map, chip, adr)
1175 #define xip_enable(map, chip, adr)
1176 #define XIP_INVAL_CACHED_RANGE(x...)
1177 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1178
1179 static int inval_cache_and_wait_for_operation(
1180                 struct map_info *map, struct flchip *chip,
1181                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1182                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1183 {
1184         struct cfi_private *cfi = map->fldrv_priv;
1185         map_word status, status_OK = CMD(0x80);
1186         int chip_state = chip->state;
1187         unsigned int timeo, sleep_time, reset_timeo;
1188
1189         spin_unlock(chip->mutex);
1190         if (inval_len)
1191                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1192         spin_lock(chip->mutex);
1193
1194         timeo = chip_op_time_max;
1195         if (!timeo)
1196                 timeo = 500000;
1197         reset_timeo = timeo;
1198         sleep_time = chip_op_time / 2;
1199
1200         for (;;) {
1201                 status = map_read(map, cmd_adr);
1202                 if (map_word_andequal(map, status, status_OK, status_OK))
1203                         break;
1204
1205                 if (!timeo) {
1206                         map_write(map, CMD(0x70), cmd_adr);
1207                         chip->state = FL_STATUS;
1208                         return -ETIME;
1209                 }
1210
1211                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1212                 spin_unlock(chip->mutex);
1213                 if (sleep_time >= 1000000/HZ) {
1214                         /*
1215                          * Half of the normal delay still remaining
1216                          * can be performed with a sleeping delay instead
1217                          * of busy waiting.
1218                          */
1219                         msleep(sleep_time/1000);
1220                         timeo -= sleep_time;
1221                         sleep_time = 1000000/HZ;
1222                 } else {
1223                         udelay(1);
1224                         cond_resched();
1225                         timeo--;
1226                 }
1227                 spin_lock(chip->mutex);
1228
1229                 while (chip->state != chip_state) {
1230                         /* Someone's suspended the operation: sleep */
1231                         DECLARE_WAITQUEUE(wait, current);
1232                         set_current_state(TASK_UNINTERRUPTIBLE);
1233                         add_wait_queue(&chip->wq, &wait);
1234                         spin_unlock(chip->mutex);
1235                         schedule();
1236                         remove_wait_queue(&chip->wq, &wait);
1237                         spin_lock(chip->mutex);
1238                 }
1239                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1240                         /* Erase suspend occured while sleep: reset timeout */
1241                         timeo = reset_timeo;
1242                         chip->erase_suspended = 0;
1243                 }
1244                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1245                         /* Write suspend occured while sleep: reset timeout */
1246                         timeo = reset_timeo;
1247                         chip->write_suspended = 0;
1248                 }
1249         }
1250
1251         /* Done and happy. */
1252         chip->state = FL_STATUS;
1253         return 0;
1254 }
1255
1256 #endif
1257
1258 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1259         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1260
1261
1262 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1263 {
1264         unsigned long cmd_addr;
1265         struct cfi_private *cfi = map->fldrv_priv;
1266         int ret = 0;
1267
1268         adr += chip->start;
1269
1270         /* Ensure cmd read/writes are aligned. */
1271         cmd_addr = adr & ~(map_bankwidth(map)-1);
1272
1273         spin_lock(chip->mutex);
1274
1275         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1276
1277         if (!ret) {
1278                 if (chip->state != FL_POINT && chip->state != FL_READY)
1279                         map_write(map, CMD(0xff), cmd_addr);
1280
1281                 chip->state = FL_POINT;
1282                 chip->ref_point_counter++;
1283         }
1284         spin_unlock(chip->mutex);
1285
1286         return ret;
1287 }
1288
1289 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1290                 size_t *retlen, void **virt, resource_size_t *phys)
1291 {
1292         struct map_info *map = mtd->priv;
1293         struct cfi_private *cfi = map->fldrv_priv;
1294         unsigned long ofs, last_end = 0;
1295         int chipnum;
1296         int ret = 0;
1297
1298         if (!map->virt || (from + len > mtd->size))
1299                 return -EINVAL;
1300
1301         /* Now lock the chip(s) to POINT state */
1302
1303         /* ofs: offset within the first chip that the first read should start */
1304         chipnum = (from >> cfi->chipshift);
1305         ofs = from - (chipnum << cfi->chipshift);
1306
1307         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1308         *retlen = 0;
1309         if (phys)
1310                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1311
1312         while (len) {
1313                 unsigned long thislen;
1314
1315                 if (chipnum >= cfi->numchips)
1316                         break;
1317
1318                 /* We cannot point across chips that are virtually disjoint */
1319                 if (!last_end)
1320                         last_end = cfi->chips[chipnum].start;
1321                 else if (cfi->chips[chipnum].start != last_end)
1322                         break;
1323
1324                 if ((len + ofs -1) >> cfi->chipshift)
1325                         thislen = (1<<cfi->chipshift) - ofs;
1326                 else
1327                         thislen = len;
1328
1329                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1330                 if (ret)
1331                         break;
1332
1333                 *retlen += thislen;
1334                 len -= thislen;
1335
1336                 ofs = 0;
1337                 last_end += 1 << cfi->chipshift;
1338                 chipnum++;
1339         }
1340         return 0;
1341 }
1342
1343 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1344 {
1345         struct map_info *map = mtd->priv;
1346         struct cfi_private *cfi = map->fldrv_priv;
1347         unsigned long ofs;
1348         int chipnum;
1349
1350         /* Now unlock the chip(s) POINT state */
1351
1352         /* ofs: offset within the first chip that the first read should start */
1353         chipnum = (from >> cfi->chipshift);
1354         ofs = from - (chipnum <<  cfi->chipshift);
1355
1356         while (len) {
1357                 unsigned long thislen;
1358                 struct flchip *chip;
1359
1360                 chip = &cfi->chips[chipnum];
1361                 if (chipnum >= cfi->numchips)
1362                         break;
1363
1364                 if ((len + ofs -1) >> cfi->chipshift)
1365                         thislen = (1<<cfi->chipshift) - ofs;
1366                 else
1367                         thislen = len;
1368
1369                 spin_lock(chip->mutex);
1370                 if (chip->state == FL_POINT) {
1371                         chip->ref_point_counter--;
1372                         if(chip->ref_point_counter == 0)
1373                                 chip->state = FL_READY;
1374                 } else
1375                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1376
1377                 put_chip(map, chip, chip->start);
1378                 spin_unlock(chip->mutex);
1379
1380                 len -= thislen;
1381                 ofs = 0;
1382                 chipnum++;
1383         }
1384 }
1385
1386 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1387 {
1388         unsigned long cmd_addr;
1389         struct cfi_private *cfi = map->fldrv_priv;
1390         int ret;
1391
1392         adr += chip->start;
1393
1394         /* Ensure cmd read/writes are aligned. */
1395         cmd_addr = adr & ~(map_bankwidth(map)-1);
1396
1397         spin_lock(chip->mutex);
1398         ret = get_chip(map, chip, cmd_addr, FL_READY);
1399         if (ret) {
1400                 spin_unlock(chip->mutex);
1401                 return ret;
1402         }
1403
1404         if (chip->state != FL_POINT && chip->state != FL_READY) {
1405                 map_write(map, CMD(0xff), cmd_addr);
1406
1407                 chip->state = FL_READY;
1408         }
1409
1410         map_copy_from(map, buf, adr, len);
1411
1412         put_chip(map, chip, cmd_addr);
1413
1414         spin_unlock(chip->mutex);
1415         return 0;
1416 }
1417
1418 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1419 {
1420         struct map_info *map = mtd->priv;
1421         struct cfi_private *cfi = map->fldrv_priv;
1422         unsigned long ofs;
1423         int chipnum;
1424         int ret = 0;
1425
1426         /* ofs: offset within the first chip that the first read should start */
1427         chipnum = (from >> cfi->chipshift);
1428         ofs = from - (chipnum <<  cfi->chipshift);
1429
1430         *retlen = 0;
1431
1432         while (len) {
1433                 unsigned long thislen;
1434
1435                 if (chipnum >= cfi->numchips)
1436                         break;
1437
1438                 if ((len + ofs -1) >> cfi->chipshift)
1439                         thislen = (1<<cfi->chipshift) - ofs;
1440                 else
1441                         thislen = len;
1442
1443                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1444                 if (ret)
1445                         break;
1446
1447                 *retlen += thislen;
1448                 len -= thislen;
1449                 buf += thislen;
1450
1451                 ofs = 0;
1452                 chipnum++;
1453         }
1454         return ret;
1455 }
1456
1457 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1458                                      unsigned long adr, map_word datum, int mode)
1459 {
1460         struct cfi_private *cfi = map->fldrv_priv;
1461         map_word status, write_cmd;
1462         int ret=0;
1463
1464         adr += chip->start;
1465
1466         switch (mode) {
1467         case FL_WRITING:
1468                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1469                 break;
1470         case FL_OTP_WRITE:
1471                 write_cmd = CMD(0xc0);
1472                 break;
1473         default:
1474                 return -EINVAL;
1475         }
1476
1477         spin_lock(chip->mutex);
1478         ret = get_chip(map, chip, adr, mode);
1479         if (ret) {
1480                 spin_unlock(chip->mutex);
1481                 return ret;
1482         }
1483
1484         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1485         ENABLE_VPP(map);
1486         xip_disable(map, chip, adr);
1487         map_write(map, write_cmd, adr);
1488         map_write(map, datum, adr);
1489         chip->state = mode;
1490
1491         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1492                                    adr, map_bankwidth(map),
1493                                    chip->word_write_time,
1494                                    chip->word_write_time_max);
1495         if (ret) {
1496                 xip_enable(map, chip, adr);
1497                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1498                 goto out;
1499         }
1500
1501         /* check for errors */
1502         status = map_read(map, adr);
1503         if (map_word_bitsset(map, status, CMD(0x1a))) {
1504                 unsigned long chipstatus = MERGESTATUS(status);
1505
1506                 /* reset status */
1507                 map_write(map, CMD(0x50), adr);
1508                 map_write(map, CMD(0x70), adr);
1509                 xip_enable(map, chip, adr);
1510
1511                 if (chipstatus & 0x02) {
1512                         ret = -EROFS;
1513                 } else if (chipstatus & 0x08) {
1514                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1515                         ret = -EIO;
1516                 } else {
1517                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1518                         ret = -EINVAL;
1519                 }
1520
1521                 goto out;
1522         }
1523
1524         xip_enable(map, chip, adr);
1525  out:   put_chip(map, chip, adr);
1526         spin_unlock(chip->mutex);
1527         return ret;
1528 }
1529
1530
1531 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1532 {
1533         struct map_info *map = mtd->priv;
1534         struct cfi_private *cfi = map->fldrv_priv;
1535         int ret = 0;
1536         int chipnum;
1537         unsigned long ofs;
1538
1539         *retlen = 0;
1540         if (!len)
1541                 return 0;
1542
1543         chipnum = to >> cfi->chipshift;
1544         ofs = to  - (chipnum << cfi->chipshift);
1545
1546         /* If it's not bus-aligned, do the first byte write */
1547         if (ofs & (map_bankwidth(map)-1)) {
1548                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1549                 int gap = ofs - bus_ofs;
1550                 int n;
1551                 map_word datum;
1552
1553                 n = min_t(int, len, map_bankwidth(map)-gap);
1554                 datum = map_word_ff(map);
1555                 datum = map_word_load_partial(map, datum, buf, gap, n);
1556
1557                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1558                                                bus_ofs, datum, FL_WRITING);
1559                 if (ret)
1560                         return ret;
1561
1562                 len -= n;
1563                 ofs += n;
1564                 buf += n;
1565                 (*retlen) += n;
1566
1567                 if (ofs >> cfi->chipshift) {
1568                         chipnum ++;
1569                         ofs = 0;
1570                         if (chipnum == cfi->numchips)
1571                                 return 0;
1572                 }
1573         }
1574
1575         while(len >= map_bankwidth(map)) {
1576                 map_word datum = map_word_load(map, buf);
1577
1578                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1579                                        ofs, datum, FL_WRITING);
1580                 if (ret)
1581                         return ret;
1582
1583                 ofs += map_bankwidth(map);
1584                 buf += map_bankwidth(map);
1585                 (*retlen) += map_bankwidth(map);
1586                 len -= map_bankwidth(map);
1587
1588                 if (ofs >> cfi->chipshift) {
1589                         chipnum ++;
1590                         ofs = 0;
1591                         if (chipnum == cfi->numchips)
1592                                 return 0;
1593                 }
1594         }
1595
1596         if (len & (map_bankwidth(map)-1)) {
1597                 map_word datum;
1598
1599                 datum = map_word_ff(map);
1600                 datum = map_word_load_partial(map, datum, buf, 0, len);
1601
1602                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1603                                        ofs, datum, FL_WRITING);
1604                 if (ret)
1605                         return ret;
1606
1607                 (*retlen) += len;
1608         }
1609
1610         return 0;
1611 }
1612
1613
1614 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1615                                     unsigned long adr, const struct kvec **pvec,
1616                                     unsigned long *pvec_seek, int len)
1617 {
1618         struct cfi_private *cfi = map->fldrv_priv;
1619         map_word status, write_cmd, datum;
1620         unsigned long cmd_adr;
1621         int ret, wbufsize, word_gap, words;
1622         const struct kvec *vec;
1623         unsigned long vec_seek;
1624         unsigned long initial_adr;
1625         int initial_len = len;
1626
1627         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1628         adr += chip->start;
1629         initial_adr = adr;
1630         cmd_adr = adr & ~(wbufsize-1);
1631
1632         /* Let's determine this according to the interleave only once */
1633         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1634
1635         spin_lock(chip->mutex);
1636         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1637         if (ret) {
1638                 spin_unlock(chip->mutex);
1639                 return ret;
1640         }
1641
1642         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1643         ENABLE_VPP(map);
1644         xip_disable(map, chip, cmd_adr);
1645
1646         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1647            [...], the device will not accept any more Write to Buffer commands".
1648            So we must check here and reset those bits if they're set. Otherwise
1649            we're just pissing in the wind */
1650         if (chip->state != FL_STATUS) {
1651                 map_write(map, CMD(0x70), cmd_adr);
1652                 chip->state = FL_STATUS;
1653         }
1654         status = map_read(map, cmd_adr);
1655         if (map_word_bitsset(map, status, CMD(0x30))) {
1656                 xip_enable(map, chip, cmd_adr);
1657                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1658                 xip_disable(map, chip, cmd_adr);
1659                 map_write(map, CMD(0x50), cmd_adr);
1660                 map_write(map, CMD(0x70), cmd_adr);
1661         }
1662
1663         chip->state = FL_WRITING_TO_BUFFER;
1664         map_write(map, write_cmd, cmd_adr);
1665         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1666         if (ret) {
1667                 /* Argh. Not ready for write to buffer */
1668                 map_word Xstatus = map_read(map, cmd_adr);
1669                 map_write(map, CMD(0x70), cmd_adr);
1670                 chip->state = FL_STATUS;
1671                 status = map_read(map, cmd_adr);
1672                 map_write(map, CMD(0x50), cmd_adr);
1673                 map_write(map, CMD(0x70), cmd_adr);
1674                 xip_enable(map, chip, cmd_adr);
1675                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1676                                 map->name, Xstatus.x[0], status.x[0]);
1677                 goto out;
1678         }
1679
1680         /* Figure out the number of words to write */
1681         word_gap = (-adr & (map_bankwidth(map)-1));
1682         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1683         if (!word_gap) {
1684                 words--;
1685         } else {
1686                 word_gap = map_bankwidth(map) - word_gap;
1687                 adr -= word_gap;
1688                 datum = map_word_ff(map);
1689         }
1690
1691         /* Write length of data to come */
1692         map_write(map, CMD(words), cmd_adr );
1693
1694         /* Write data */
1695         vec = *pvec;
1696         vec_seek = *pvec_seek;
1697         do {
1698                 int n = map_bankwidth(map) - word_gap;
1699                 if (n > vec->iov_len - vec_seek)
1700                         n = vec->iov_len - vec_seek;
1701                 if (n > len)
1702                         n = len;
1703
1704                 if (!word_gap && len < map_bankwidth(map))
1705                         datum = map_word_ff(map);
1706
1707                 datum = map_word_load_partial(map, datum,
1708                                               vec->iov_base + vec_seek,
1709                                               word_gap, n);
1710
1711                 len -= n;
1712                 word_gap += n;
1713                 if (!len || word_gap == map_bankwidth(map)) {
1714                         map_write(map, datum, adr);
1715                         adr += map_bankwidth(map);
1716                         word_gap = 0;
1717                 }
1718
1719                 vec_seek += n;
1720                 if (vec_seek == vec->iov_len) {
1721                         vec++;
1722                         vec_seek = 0;
1723                 }
1724         } while (len);
1725         *pvec = vec;
1726         *pvec_seek = vec_seek;
1727
1728         /* GO GO GO */
1729         map_write(map, CMD(0xd0), cmd_adr);
1730         chip->state = FL_WRITING;
1731
1732         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1733                                    initial_adr, initial_len,
1734                                    chip->buffer_write_time,
1735                                    chip->buffer_write_time_max);
1736         if (ret) {
1737                 map_write(map, CMD(0x70), cmd_adr);
1738                 chip->state = FL_STATUS;
1739                 xip_enable(map, chip, cmd_adr);
1740                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1741                 goto out;
1742         }
1743
1744         /* check for errors */
1745         status = map_read(map, cmd_adr);
1746         if (map_word_bitsset(map, status, CMD(0x1a))) {
1747                 unsigned long chipstatus = MERGESTATUS(status);
1748
1749                 /* reset status */
1750                 map_write(map, CMD(0x50), cmd_adr);
1751                 map_write(map, CMD(0x70), cmd_adr);
1752                 xip_enable(map, chip, cmd_adr);
1753
1754                 if (chipstatus & 0x02) {
1755                         ret = -EROFS;
1756                 } else if (chipstatus & 0x08) {
1757                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1758                         ret = -EIO;
1759                 } else {
1760                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1761                         ret = -EINVAL;
1762                 }
1763
1764                 goto out;
1765         }
1766
1767         xip_enable(map, chip, cmd_adr);
1768  out:   put_chip(map, chip, cmd_adr);
1769         spin_unlock(chip->mutex);
1770         return ret;
1771 }
1772
1773 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1774                                 unsigned long count, loff_t to, size_t *retlen)
1775 {
1776         struct map_info *map = mtd->priv;
1777         struct cfi_private *cfi = map->fldrv_priv;
1778         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1779         int ret = 0;
1780         int chipnum;
1781         unsigned long ofs, vec_seek, i;
1782         size_t len = 0;
1783
1784         for (i = 0; i < count; i++)
1785                 len += vecs[i].iov_len;
1786
1787         *retlen = 0;
1788         if (!len)
1789                 return 0;
1790
1791         chipnum = to >> cfi->chipshift;
1792         ofs = to - (chipnum << cfi->chipshift);
1793         vec_seek = 0;
1794
1795         do {
1796                 /* We must not cross write block boundaries */
1797                 int size = wbufsize - (ofs & (wbufsize-1));
1798
1799                 if (size > len)
1800                         size = len;
1801                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1802                                       ofs, &vecs, &vec_seek, size);
1803                 if (ret)
1804                         return ret;
1805
1806                 ofs += size;
1807                 (*retlen) += size;
1808                 len -= size;
1809
1810                 if (ofs >> cfi->chipshift) {
1811                         chipnum ++;
1812                         ofs = 0;
1813                         if (chipnum == cfi->numchips)
1814                                 return 0;
1815                 }
1816
1817                 /* Be nice and reschedule with the chip in a usable state for other
1818                    processes. */
1819                 cond_resched();
1820
1821         } while (len);
1822
1823         return 0;
1824 }
1825
1826 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1827                                        size_t len, size_t *retlen, const u_char *buf)
1828 {
1829         struct kvec vec;
1830
1831         vec.iov_base = (void *) buf;
1832         vec.iov_len = len;
1833
1834         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1835 }
1836
1837 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1838                                       unsigned long adr, int len, void *thunk)
1839 {
1840         struct cfi_private *cfi = map->fldrv_priv;
1841         map_word status;
1842         int retries = 3;
1843         int ret;
1844
1845         adr += chip->start;
1846
1847  retry:
1848         spin_lock(chip->mutex);
1849         ret = get_chip(map, chip, adr, FL_ERASING);
1850         if (ret) {
1851                 spin_unlock(chip->mutex);
1852                 return ret;
1853         }
1854
1855         XIP_INVAL_CACHED_RANGE(map, adr, len);
1856         ENABLE_VPP(map);
1857         xip_disable(map, chip, adr);
1858
1859         /* Clear the status register first */
1860         map_write(map, CMD(0x50), adr);
1861
1862         /* Now erase */
1863         map_write(map, CMD(0x20), adr);
1864         map_write(map, CMD(0xD0), adr);
1865         chip->state = FL_ERASING;
1866         chip->erase_suspended = 0;
1867
1868         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1869                                    adr, len,
1870                                    chip->erase_time,
1871                                    chip->erase_time_max);
1872         if (ret) {
1873                 map_write(map, CMD(0x70), adr);
1874                 chip->state = FL_STATUS;
1875                 xip_enable(map, chip, adr);
1876                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1877                 goto out;
1878         }
1879
1880         /* We've broken this before. It doesn't hurt to be safe */
1881         map_write(map, CMD(0x70), adr);
1882         chip->state = FL_STATUS;
1883         status = map_read(map, adr);
1884
1885         /* check for errors */
1886         if (map_word_bitsset(map, status, CMD(0x3a))) {
1887                 unsigned long chipstatus = MERGESTATUS(status);
1888
1889                 /* Reset the error bits */
1890                 map_write(map, CMD(0x50), adr);
1891                 map_write(map, CMD(0x70), adr);
1892                 xip_enable(map, chip, adr);
1893
1894                 if ((chipstatus & 0x30) == 0x30) {
1895                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1896                         ret = -EINVAL;
1897                 } else if (chipstatus & 0x02) {
1898                         /* Protection bit set */
1899                         ret = -EROFS;
1900                 } else if (chipstatus & 0x8) {
1901                         /* Voltage */
1902                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1903                         ret = -EIO;
1904                 } else if (chipstatus & 0x20 && retries--) {
1905                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1906                         put_chip(map, chip, adr);
1907                         spin_unlock(chip->mutex);
1908                         goto retry;
1909                 } else {
1910                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1911                         ret = -EIO;
1912                 }
1913
1914                 goto out;
1915         }
1916
1917         xip_enable(map, chip, adr);
1918  out:   put_chip(map, chip, adr);
1919         spin_unlock(chip->mutex);
1920         return ret;
1921 }
1922
1923 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1924 {
1925         unsigned long ofs, len;
1926         int ret;
1927
1928         ofs = instr->addr;
1929         len = instr->len;
1930
1931         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1932         if (ret)
1933                 return ret;
1934
1935         instr->state = MTD_ERASE_DONE;
1936         mtd_erase_callback(instr);
1937
1938         return 0;
1939 }
1940
1941 static void cfi_intelext_sync (struct mtd_info *mtd)
1942 {
1943         struct map_info *map = mtd->priv;
1944         struct cfi_private *cfi = map->fldrv_priv;
1945         int i;
1946         struct flchip *chip;
1947         int ret = 0;
1948
1949         for (i=0; !ret && i<cfi->numchips; i++) {
1950                 chip = &cfi->chips[i];
1951
1952                 spin_lock(chip->mutex);
1953                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1954
1955                 if (!ret) {
1956                         chip->oldstate = chip->state;
1957                         chip->state = FL_SYNCING;
1958                         /* No need to wake_up() on this state change -
1959                          * as the whole point is that nobody can do anything
1960                          * with the chip now anyway.
1961                          */
1962                 }
1963                 spin_unlock(chip->mutex);
1964         }
1965
1966         /* Unlock the chips again */
1967
1968         for (i--; i >=0; i--) {
1969                 chip = &cfi->chips[i];
1970
1971                 spin_lock(chip->mutex);
1972
1973                 if (chip->state == FL_SYNCING) {
1974                         chip->state = chip->oldstate;
1975                         chip->oldstate = FL_READY;
1976                         wake_up(&chip->wq);
1977                 }
1978                 spin_unlock(chip->mutex);
1979         }
1980 }
1981
1982 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1983                                                 struct flchip *chip,
1984                                                 unsigned long adr,
1985                                                 int len, void *thunk)
1986 {
1987         struct cfi_private *cfi = map->fldrv_priv;
1988         int status, ofs_factor = cfi->interleave * cfi->device_type;
1989
1990         adr += chip->start;
1991         xip_disable(map, chip, adr+(2*ofs_factor));
1992         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1993         chip->state = FL_JEDEC_QUERY;
1994         status = cfi_read_query(map, adr+(2*ofs_factor));
1995         xip_enable(map, chip, 0);
1996         return status;
1997 }
1998
1999 #ifdef DEBUG_LOCK_BITS
2000 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2001                                                 struct flchip *chip,
2002                                                 unsigned long adr,
2003                                                 int len, void *thunk)
2004 {
2005         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2006                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2007         return 0;
2008 }
2009 #endif
2010
2011 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2012 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2013
2014 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2015                                        unsigned long adr, int len, void *thunk)
2016 {
2017         struct cfi_private *cfi = map->fldrv_priv;
2018         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2019         int udelay;
2020         int ret;
2021
2022         adr += chip->start;
2023
2024         spin_lock(chip->mutex);
2025         ret = get_chip(map, chip, adr, FL_LOCKING);
2026         if (ret) {
2027                 spin_unlock(chip->mutex);
2028                 return ret;
2029         }
2030
2031         ENABLE_VPP(map);
2032         xip_disable(map, chip, adr);
2033
2034         map_write(map, CMD(0x60), adr);
2035         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2036                 map_write(map, CMD(0x01), adr);
2037                 chip->state = FL_LOCKING;
2038         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2039                 map_write(map, CMD(0xD0), adr);
2040                 chip->state = FL_UNLOCKING;
2041         } else
2042                 BUG();
2043
2044         /*
2045          * If Instant Individual Block Locking supported then no need
2046          * to delay.
2047          */
2048         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2049
2050         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2051         if (ret) {
2052                 map_write(map, CMD(0x70), adr);
2053                 chip->state = FL_STATUS;
2054                 xip_enable(map, chip, adr);
2055                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2056                 goto out;
2057         }
2058
2059         xip_enable(map, chip, adr);
2060 out:    put_chip(map, chip, adr);
2061         spin_unlock(chip->mutex);
2062         return ret;
2063 }
2064
2065 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2066 {
2067         int ret;
2068
2069 #ifdef DEBUG_LOCK_BITS
2070         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2071                __func__, ofs, len);
2072         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2073                 ofs, len, NULL);
2074 #endif
2075
2076         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2077                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2078
2079 #ifdef DEBUG_LOCK_BITS
2080         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2081                __func__, ret);
2082         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2083                 ofs, len, NULL);
2084 #endif
2085
2086         return ret;
2087 }
2088
2089 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2090 {
2091         int ret;
2092
2093 #ifdef DEBUG_LOCK_BITS
2094         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2095                __func__, ofs, len);
2096         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2097                 ofs, len, NULL);
2098 #endif
2099
2100         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2101                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2102
2103 #ifdef DEBUG_LOCK_BITS
2104         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2105                __func__, ret);
2106         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2107                 ofs, len, NULL);
2108 #endif
2109
2110         return ret;
2111 }
2112
2113 #ifdef CONFIG_MTD_OTP
2114
2115 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2116                         u_long data_offset, u_char *buf, u_int size,
2117                         u_long prot_offset, u_int groupno, u_int groupsize);
2118
2119 static int __xipram
2120 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2121             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2122 {
2123         struct cfi_private *cfi = map->fldrv_priv;
2124         int ret;
2125
2126         spin_lock(chip->mutex);
2127         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2128         if (ret) {
2129                 spin_unlock(chip->mutex);
2130                 return ret;
2131         }
2132
2133         /* let's ensure we're not reading back cached data from array mode */
2134         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2135
2136         xip_disable(map, chip, chip->start);
2137         if (chip->state != FL_JEDEC_QUERY) {
2138                 map_write(map, CMD(0x90), chip->start);
2139                 chip->state = FL_JEDEC_QUERY;
2140         }
2141         map_copy_from(map, buf, chip->start + offset, size);
2142         xip_enable(map, chip, chip->start);
2143
2144         /* then ensure we don't keep OTP data in the cache */
2145         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2146
2147         put_chip(map, chip, chip->start);
2148         spin_unlock(chip->mutex);
2149         return 0;
2150 }
2151
2152 static int
2153 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2154              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2155 {
2156         int ret;
2157
2158         while (size) {
2159                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2160                 int gap = offset - bus_ofs;
2161                 int n = min_t(int, size, map_bankwidth(map)-gap);
2162                 map_word datum = map_word_ff(map);
2163
2164                 datum = map_word_load_partial(map, datum, buf, gap, n);
2165                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2166                 if (ret)
2167                         return ret;
2168
2169                 offset += n;
2170                 buf += n;
2171                 size -= n;
2172         }
2173
2174         return 0;
2175 }
2176
2177 static int
2178 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2179             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2180 {
2181         struct cfi_private *cfi = map->fldrv_priv;
2182         map_word datum;
2183
2184         /* make sure area matches group boundaries */
2185         if (size != grpsz)
2186                 return -EXDEV;
2187
2188         datum = map_word_ff(map);
2189         datum = map_word_clr(map, datum, CMD(1 << grpno));
2190         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2191 }
2192
2193 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2194                                  size_t *retlen, u_char *buf,
2195                                  otp_op_t action, int user_regs)
2196 {
2197         struct map_info *map = mtd->priv;
2198         struct cfi_private *cfi = map->fldrv_priv;
2199         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2200         struct flchip *chip;
2201         struct cfi_intelext_otpinfo *otp;
2202         u_long devsize, reg_prot_offset, data_offset;
2203         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2204         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2205         int ret;
2206
2207         *retlen = 0;
2208
2209         /* Check that we actually have some OTP registers */
2210         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2211                 return -ENODATA;
2212
2213         /* we need real chips here not virtual ones */
2214         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2215         chip_step = devsize >> cfi->chipshift;
2216         chip_num = 0;
2217
2218         /* Some chips have OTP located in the _top_ partition only.
2219            For example: Intel 28F256L18T (T means top-parameter device) */
2220         if (cfi->mfr == MANUFACTURER_INTEL) {
2221                 switch (cfi->id) {
2222                 case 0x880b:
2223                 case 0x880c:
2224                 case 0x880d:
2225                         chip_num = chip_step - 1;
2226                 }
2227         }
2228
2229         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2230                 chip = &cfi->chips[chip_num];
2231                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2232
2233                 /* first OTP region */
2234                 field = 0;
2235                 reg_prot_offset = extp->ProtRegAddr;
2236                 reg_fact_groups = 1;
2237                 reg_fact_size = 1 << extp->FactProtRegSize;
2238                 reg_user_groups = 1;
2239                 reg_user_size = 1 << extp->UserProtRegSize;
2240
2241                 while (len > 0) {
2242                         /* flash geometry fixup */
2243                         data_offset = reg_prot_offset + 1;
2244                         data_offset *= cfi->interleave * cfi->device_type;
2245                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2246                         reg_fact_size *= cfi->interleave;
2247                         reg_user_size *= cfi->interleave;
2248
2249                         if (user_regs) {
2250                                 groups = reg_user_groups;
2251                                 groupsize = reg_user_size;
2252                                 /* skip over factory reg area */
2253                                 groupno = reg_fact_groups;
2254                                 data_offset += reg_fact_groups * reg_fact_size;
2255                         } else {
2256                                 groups = reg_fact_groups;
2257                                 groupsize = reg_fact_size;
2258                                 groupno = 0;
2259                         }
2260
2261                         while (len > 0 && groups > 0) {
2262                                 if (!action) {
2263                                         /*
2264                                          * Special case: if action is NULL
2265                                          * we fill buf with otp_info records.
2266                                          */
2267                                         struct otp_info *otpinfo;
2268                                         map_word lockword;
2269                                         len -= sizeof(struct otp_info);
2270                                         if (len <= 0)
2271                                                 return -ENOSPC;
2272                                         ret = do_otp_read(map, chip,
2273                                                           reg_prot_offset,
2274                                                           (u_char *)&lockword,
2275                                                           map_bankwidth(map),
2276                                                           0, 0,  0);
2277                                         if (ret)
2278                                                 return ret;
2279                                         otpinfo = (struct otp_info *)buf;
2280                                         otpinfo->start = from;
2281                                         otpinfo->length = groupsize;
2282                                         otpinfo->locked =
2283                                            !map_word_bitsset(map, lockword,
2284                                                              CMD(1 << groupno));
2285                                         from += groupsize;
2286                                         buf += sizeof(*otpinfo);
2287                                         *retlen += sizeof(*otpinfo);
2288                                 } else if (from >= groupsize) {
2289                                         from -= groupsize;
2290                                         data_offset += groupsize;
2291                                 } else {
2292                                         int size = groupsize;
2293                                         data_offset += from;
2294                                         size -= from;
2295                                         from = 0;
2296                                         if (size > len)
2297                                                 size = len;
2298                                         ret = action(map, chip, data_offset,
2299                                                      buf, size, reg_prot_offset,
2300                                                      groupno, groupsize);
2301                                         if (ret < 0)
2302                                                 return ret;
2303                                         buf += size;
2304                                         len -= size;
2305                                         *retlen += size;
2306                                         data_offset += size;
2307                                 }
2308                                 groupno++;
2309                                 groups--;
2310                         }
2311
2312                         /* next OTP region */
2313                         if (++field == extp->NumProtectionFields)
2314                                 break;
2315                         reg_prot_offset = otp->ProtRegAddr;
2316                         reg_fact_groups = otp->FactGroups;
2317                         reg_fact_size = 1 << otp->FactProtRegSize;
2318                         reg_user_groups = otp->UserGroups;
2319                         reg_user_size = 1 << otp->UserProtRegSize;
2320                         otp++;
2321                 }
2322         }
2323
2324         return 0;
2325 }
2326
2327 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2328                                            size_t len, size_t *retlen,
2329                                             u_char *buf)
2330 {
2331         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2332                                      buf, do_otp_read, 0);
2333 }
2334
2335 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2336                                            size_t len, size_t *retlen,
2337                                             u_char *buf)
2338 {
2339         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2340                                      buf, do_otp_read, 1);
2341 }
2342
2343 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2344                                             size_t len, size_t *retlen,
2345                                              u_char *buf)
2346 {
2347         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2348                                      buf, do_otp_write, 1);
2349 }
2350
2351 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2352                                            loff_t from, size_t len)
2353 {
2354         size_t retlen;
2355         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2356                                      NULL, do_otp_lock, 1);
2357 }
2358
2359 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2360                                            struct otp_info *buf, size_t len)
2361 {
2362         size_t retlen;
2363         int ret;
2364
2365         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2366         return ret ? : retlen;
2367 }
2368
2369 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2370                                            struct otp_info *buf, size_t len)
2371 {
2372         size_t retlen;
2373         int ret;
2374
2375         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2376         return ret ? : retlen;
2377 }
2378
2379 #endif
2380
2381 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2382 {
2383         struct mtd_erase_region_info *region;
2384         int block, status, i;
2385         unsigned long adr;
2386         size_t len;
2387
2388         for (i = 0; i < mtd->numeraseregions; i++) {
2389                 region = &mtd->eraseregions[i];
2390                 if (!region->lockmap)
2391                         continue;
2392
2393                 for (block = 0; block < region->numblocks; block++){
2394                         len = region->erasesize;
2395                         adr = region->offset + block * len;
2396
2397                         status = cfi_varsize_frob(mtd,
2398                                         do_getlockstatus_oneblock, adr, len, NULL);
2399                         if (status)
2400                                 set_bit(block, region->lockmap);
2401                         else
2402                                 clear_bit(block, region->lockmap);
2403                 }
2404         }
2405 }
2406
2407 static int cfi_intelext_suspend(struct mtd_info *mtd)
2408 {
2409         struct map_info *map = mtd->priv;
2410         struct cfi_private *cfi = map->fldrv_priv;
2411         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2412         int i;
2413         struct flchip *chip;
2414         int ret = 0;
2415
2416         if ((mtd->flags & MTD_POWERUP_LOCK)
2417             && extp && (extp->FeatureSupport & (1 << 5)))
2418                 cfi_intelext_save_locks(mtd);
2419
2420         for (i=0; !ret && i<cfi->numchips; i++) {
2421                 chip = &cfi->chips[i];
2422
2423                 spin_lock(chip->mutex);
2424
2425                 switch (chip->state) {
2426                 case FL_READY:
2427                 case FL_STATUS:
2428                 case FL_CFI_QUERY:
2429                 case FL_JEDEC_QUERY:
2430                         if (chip->oldstate == FL_READY) {
2431                                 /* place the chip in a known state before suspend */
2432                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2433                                 chip->oldstate = chip->state;
2434                                 chip->state = FL_PM_SUSPENDED;
2435                                 /* No need to wake_up() on this state change -
2436                                  * as the whole point is that nobody can do anything
2437                                  * with the chip now anyway.
2438                                  */
2439                         } else {
2440                                 /* There seems to be an operation pending. We must wait for it. */
2441                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2442                                 ret = -EAGAIN;
2443                         }
2444                         break;
2445                 default:
2446                         /* Should we actually wait? Once upon a time these routines weren't
2447                            allowed to. Or should we return -EAGAIN, because the upper layers
2448                            ought to have already shut down anything which was using the device
2449                            anyway? The latter for now. */
2450                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2451                         ret = -EAGAIN;
2452                 case FL_PM_SUSPENDED:
2453                         break;
2454                 }
2455                 spin_unlock(chip->mutex);
2456         }
2457
2458         /* Unlock the chips again */
2459
2460         if (ret) {
2461                 for (i--; i >=0; i--) {
2462                         chip = &cfi->chips[i];
2463
2464                         spin_lock(chip->mutex);
2465
2466                         if (chip->state == FL_PM_SUSPENDED) {
2467                                 /* No need to force it into a known state here,
2468                                    because we're returning failure, and it didn't
2469                                    get power cycled */
2470                                 chip->state = chip->oldstate;
2471                                 chip->oldstate = FL_READY;
2472                                 wake_up(&chip->wq);
2473                         }
2474                         spin_unlock(chip->mutex);
2475                 }
2476         }
2477
2478         return ret;
2479 }
2480
2481 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2482 {
2483         struct mtd_erase_region_info *region;
2484         int block, i;
2485         unsigned long adr;
2486         size_t len;
2487
2488         for (i = 0; i < mtd->numeraseregions; i++) {
2489                 region = &mtd->eraseregions[i];
2490                 if (!region->lockmap)
2491                         continue;
2492
2493                 for (block = 0; block < region->numblocks; block++) {
2494                         len = region->erasesize;
2495                         adr = region->offset + block * len;
2496
2497                         if (!test_bit(block, region->lockmap))
2498                                 cfi_intelext_unlock(mtd, adr, len);
2499                 }
2500         }
2501 }
2502
2503 static void cfi_intelext_resume(struct mtd_info *mtd)
2504 {
2505         struct map_info *map = mtd->priv;
2506         struct cfi_private *cfi = map->fldrv_priv;
2507         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2508         int i;
2509         struct flchip *chip;
2510
2511         for (i=0; i<cfi->numchips; i++) {
2512
2513                 chip = &cfi->chips[i];
2514
2515                 spin_lock(chip->mutex);
2516
2517                 /* Go to known state. Chip may have been power cycled */
2518                 if (chip->state == FL_PM_SUSPENDED) {
2519                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2520                         chip->oldstate = chip->state = FL_READY;
2521                         wake_up(&chip->wq);
2522                 }
2523
2524                 spin_unlock(chip->mutex);
2525         }
2526
2527         if ((mtd->flags & MTD_POWERUP_LOCK)
2528             && extp && (extp->FeatureSupport & (1 << 5)))
2529                 cfi_intelext_restore_locks(mtd);
2530 }
2531
2532 static int cfi_intelext_reset(struct mtd_info *mtd)
2533 {
2534         struct map_info *map = mtd->priv;
2535         struct cfi_private *cfi = map->fldrv_priv;
2536         int i, ret;
2537
2538         for (i=0; i < cfi->numchips; i++) {
2539                 struct flchip *chip = &cfi->chips[i];
2540
2541                 /* force the completion of any ongoing operation
2542                    and switch to array mode so any bootloader in
2543                    flash is accessible for soft reboot. */
2544                 spin_lock(chip->mutex);
2545                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2546                 if (!ret) {
2547                         map_write(map, CMD(0xff), chip->start);
2548                         chip->state = FL_SHUTDOWN;
2549                 }
2550                 spin_unlock(chip->mutex);
2551         }
2552
2553         return 0;
2554 }
2555
2556 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2557                                void *v)
2558 {
2559         struct mtd_info *mtd;
2560
2561         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2562         cfi_intelext_reset(mtd);
2563         return NOTIFY_DONE;
2564 }
2565
2566 static void cfi_intelext_destroy(struct mtd_info *mtd)
2567 {
2568         struct map_info *map = mtd->priv;
2569         struct cfi_private *cfi = map->fldrv_priv;
2570         struct mtd_erase_region_info *region;
2571         int i;
2572         cfi_intelext_reset(mtd);
2573         unregister_reboot_notifier(&mtd->reboot_notifier);
2574         kfree(cfi->cmdset_priv);
2575         kfree(cfi->cfiq);
2576         kfree(cfi->chips[0].priv);
2577         kfree(cfi);
2578         for (i = 0; i < mtd->numeraseregions; i++) {
2579                 region = &mtd->eraseregions[i];
2580                 if (region->lockmap)
2581                         kfree(region->lockmap);
2582         }
2583         kfree(mtd->eraseregions);
2584 }
2585
2586 MODULE_LICENSE("GPL");
2587 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2588 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2589 MODULE_ALIAS("cfi_cmdset_0003");
2590 MODULE_ALIAS("cfi_cmdset_0200");