xref: /linux/drivers/mtd/chips/cfi_cmdset_0001.c (revision 98366c20a275e957416e9516db5dcb7195b4e101)
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000	Nicolas Pitre <nico@cam.org>
11  * 	- completely revamped method functions so they are aware and
12  * 	  independent of the flash geometry (buswidth, interleave, etc.)
13  * 	- scalability vs code size is completely set at compile-time
14  * 	  (see include/linux/mtd/cfi.h for selection)
15  *	- optimized write buffer method
16  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *	- reworked lock/unlock/erase support for var size flash
18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
19  * 	- auto unlock sectors on resume for auto locking flash on power up
20  */
21 
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29 
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
41 
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44 
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
47 
48 #define MANUFACTURER_INTEL	0x0089
49 #define I82802AB	0x00ad
50 #define I82802AC	0x00ac
51 #define MANUFACTURER_ST         0x0020
52 #define M50LPW080       0x002F
53 
54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 #ifdef CONFIG_MTD_OTP
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68 					    struct otp_info *, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70 					    struct otp_info *, size_t);
71 #endif
72 static int cfi_intelext_suspend (struct mtd_info *);
73 static void cfi_intelext_resume (struct mtd_info *);
74 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
75 
76 static void cfi_intelext_destroy(struct mtd_info *);
77 
78 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
79 
80 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
82 
83 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84 		     size_t *retlen, u_char **mtdbuf);
85 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
86 			size_t len);
87 
88 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 #include "fwh_lock.h"
92 
93 
94 
95 /*
96  *  *********** SETUP AND PROBE BITS  ***********
97  */
98 
99 static struct mtd_chip_driver cfi_intelext_chipdrv = {
100 	.probe		= NULL, /* Not usable directly */
101 	.destroy	= cfi_intelext_destroy,
102 	.name		= "cfi_cmdset_0001",
103 	.module		= THIS_MODULE
104 };
105 
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
108 
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext *extp)
111 {
112 	int i;
113 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
114 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
115 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
125 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126 	for (i=11; i<32; i++) {
127 		if (extp->FeatureSupport & (1<<i))
128 			printk("     - Unknown Bit %X:      supported\n", i);
129 	}
130 
131 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133 	for (i=1; i<8; i++) {
134 		if (extp->SuspendCmdSupport & (1<<i))
135 			printk("     - Unknown Bit %X:               supported\n", i);
136 	}
137 
138 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
140 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141 	for (i=2; i<3; i++) {
142 		if (extp->BlkStatusRegMask & (1<<i))
143 			printk("     - Unknown Bit %X Active: yes\n",i);
144 	}
145 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147 	for (i=6; i<16; i++) {
148 		if (extp->BlkStatusRegMask & (1<<i))
149 			printk("     - Unknown Bit %X Active: yes\n",i);
150 	}
151 
152 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154 	if (extp->VppOptimal)
155 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157 }
158 #endif
159 
160 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
161 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
162 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
163 {
164 	struct map_info *map = mtd->priv;
165 	struct cfi_private *cfi = map->fldrv_priv;
166 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
167 
168 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
169 	                    "erase on write disabled.\n");
170 	extp->SuspendCmdSupport &= ~1;
171 }
172 #endif
173 
174 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
175 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
176 {
177 	struct map_info *map = mtd->priv;
178 	struct cfi_private *cfi = map->fldrv_priv;
179 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
180 
181 	if (cfip && (cfip->FeatureSupport&4)) {
182 		cfip->FeatureSupport &= ~4;
183 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
184 	}
185 }
186 #endif
187 
188 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
189 {
190 	struct map_info *map = mtd->priv;
191 	struct cfi_private *cfi = map->fldrv_priv;
192 
193 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
194 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
195 }
196 
197 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
198 {
199 	struct map_info *map = mtd->priv;
200 	struct cfi_private *cfi = map->fldrv_priv;
201 
202 	/* Note this is done after the region info is endian swapped */
203 	cfi->cfiq->EraseRegionInfo[1] =
204 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
205 };
206 
207 static void fixup_use_point(struct mtd_info *mtd, void *param)
208 {
209 	struct map_info *map = mtd->priv;
210 	if (!mtd->point && map_is_linear(map)) {
211 		mtd->point   = cfi_intelext_point;
212 		mtd->unpoint = cfi_intelext_unpoint;
213 	}
214 }
215 
216 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
217 {
218 	struct map_info *map = mtd->priv;
219 	struct cfi_private *cfi = map->fldrv_priv;
220 	if (cfi->cfiq->BufWriteTimeoutTyp) {
221 		printk(KERN_INFO "Using buffer write method\n" );
222 		mtd->write = cfi_intelext_write_buffers;
223 		mtd->writev = cfi_intelext_writev;
224 	}
225 }
226 
227 /*
228  * Some chips power-up with all sectors locked by default.
229  */
230 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
231 {
232 	printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
233 	mtd->flags |= MTD_STUPID_LOCK;
234 }
235 
236 static struct cfi_fixup cfi_fixup_table[] = {
237 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
238 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
239 #endif
240 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
241 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
242 #endif
243 #if !FORCE_WORD_WRITE
244 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
245 #endif
246 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
247 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
248 	{ MANUFACTURER_INTEL, 0x891c,	      fixup_use_powerup_lock, NULL, },
249 	{ 0, 0, NULL, NULL }
250 };
251 
252 static struct cfi_fixup jedec_fixup_table[] = {
253 	{ MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
254 	{ MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
255 	{ MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
256 	{ 0, 0, NULL, NULL }
257 };
258 static struct cfi_fixup fixup_table[] = {
259 	/* The CFI vendor ids and the JEDEC vendor IDs appear
260 	 * to be common.  It is like the devices id's are as
261 	 * well.  This table is to pick all cases where
262 	 * we know that is the case.
263 	 */
264 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
265 	{ 0, 0, NULL, NULL }
266 };
267 
268 static inline struct cfi_pri_intelext *
269 read_pri_intelext(struct map_info *map, __u16 adr)
270 {
271 	struct cfi_pri_intelext *extp;
272 	unsigned int extp_size = sizeof(*extp);
273 
274  again:
275 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
276 	if (!extp)
277 		return NULL;
278 
279 	if (extp->MajorVersion != '1' ||
280 	    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
281 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
282 		       "version %c.%c.\n",  extp->MajorVersion,
283 		       extp->MinorVersion);
284 		kfree(extp);
285 		return NULL;
286 	}
287 
288 	/* Do some byteswapping if necessary */
289 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
290 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
291 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
292 
293 	if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
294 		unsigned int extra_size = 0;
295 		int nb_parts, i;
296 
297 		/* Protection Register info */
298 		extra_size += (extp->NumProtectionFields - 1) *
299 			      sizeof(struct cfi_intelext_otpinfo);
300 
301 		/* Burst Read info */
302 		extra_size += 2;
303 		if (extp_size < sizeof(*extp) + extra_size)
304 			goto need_more;
305 		extra_size += extp->extra[extra_size-1];
306 
307 		/* Number of hardware-partitions */
308 		extra_size += 1;
309 		if (extp_size < sizeof(*extp) + extra_size)
310 			goto need_more;
311 		nb_parts = extp->extra[extra_size - 1];
312 
313 		/* skip the sizeof(partregion) field in CFI 1.4 */
314 		if (extp->MinorVersion >= '4')
315 			extra_size += 2;
316 
317 		for (i = 0; i < nb_parts; i++) {
318 			struct cfi_intelext_regioninfo *rinfo;
319 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
320 			extra_size += sizeof(*rinfo);
321 			if (extp_size < sizeof(*extp) + extra_size)
322 				goto need_more;
323 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
324 			extra_size += (rinfo->NumBlockTypes - 1)
325 				      * sizeof(struct cfi_intelext_blockinfo);
326 		}
327 
328 		if (extp->MinorVersion >= '4')
329 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
330 
331 		if (extp_size < sizeof(*extp) + extra_size) {
332 			need_more:
333 			extp_size = sizeof(*extp) + extra_size;
334 			kfree(extp);
335 			if (extp_size > 4096) {
336 				printk(KERN_ERR
337 					"%s: cfi_pri_intelext is too fat\n",
338 					__FUNCTION__);
339 				return NULL;
340 			}
341 			goto again;
342 		}
343 	}
344 
345 	return extp;
346 }
347 
348 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
349 {
350 	struct cfi_private *cfi = map->fldrv_priv;
351 	struct mtd_info *mtd;
352 	int i;
353 
354 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
355 	if (!mtd) {
356 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
357 		return NULL;
358 	}
359 	mtd->priv = map;
360 	mtd->type = MTD_NORFLASH;
361 
362 	/* Fill in the default mtd operations */
363 	mtd->erase   = cfi_intelext_erase_varsize;
364 	mtd->read    = cfi_intelext_read;
365 	mtd->write   = cfi_intelext_write_words;
366 	mtd->sync    = cfi_intelext_sync;
367 	mtd->lock    = cfi_intelext_lock;
368 	mtd->unlock  = cfi_intelext_unlock;
369 	mtd->suspend = cfi_intelext_suspend;
370 	mtd->resume  = cfi_intelext_resume;
371 	mtd->flags   = MTD_CAP_NORFLASH;
372 	mtd->name    = map->name;
373 	mtd->writesize = 1;
374 
375 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
376 
377 	if (cfi->cfi_mode == CFI_MODE_CFI) {
378 		/*
379 		 * It's a real CFI chip, not one for which the probe
380 		 * routine faked a CFI structure. So we read the feature
381 		 * table from it.
382 		 */
383 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
384 		struct cfi_pri_intelext *extp;
385 
386 		extp = read_pri_intelext(map, adr);
387 		if (!extp) {
388 			kfree(mtd);
389 			return NULL;
390 		}
391 
392 		/* Install our own private info structure */
393 		cfi->cmdset_priv = extp;
394 
395 		cfi_fixup(mtd, cfi_fixup_table);
396 
397 #ifdef DEBUG_CFI_FEATURES
398 		/* Tell the user about it in lots of lovely detail */
399 		cfi_tell_features(extp);
400 #endif
401 
402 		if(extp->SuspendCmdSupport & 1) {
403 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
404 		}
405 	}
406 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
407 		/* Apply jedec specific fixups */
408 		cfi_fixup(mtd, jedec_fixup_table);
409 	}
410 	/* Apply generic fixups */
411 	cfi_fixup(mtd, fixup_table);
412 
413 	for (i=0; i< cfi->numchips; i++) {
414 		if (cfi->cfiq->WordWriteTimeoutTyp)
415 			cfi->chips[i].word_write_time =
416 				1<<cfi->cfiq->WordWriteTimeoutTyp;
417 		else
418 			cfi->chips[i].word_write_time = 50000;
419 
420 		if (cfi->cfiq->BufWriteTimeoutTyp)
421 			cfi->chips[i].buffer_write_time =
422 				1<<cfi->cfiq->BufWriteTimeoutTyp;
423 		/* No default; if it isn't specified, we won't use it */
424 
425 		if (cfi->cfiq->BlockEraseTimeoutTyp)
426 			cfi->chips[i].erase_time =
427 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
428 		else
429 			cfi->chips[i].erase_time = 2000000;
430 
431 		cfi->chips[i].ref_point_counter = 0;
432 		init_waitqueue_head(&(cfi->chips[i].wq));
433 	}
434 
435 	map->fldrv = &cfi_intelext_chipdrv;
436 
437 	return cfi_intelext_setup(mtd);
438 }
439 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
443 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
444 
445 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
446 {
447 	struct map_info *map = mtd->priv;
448 	struct cfi_private *cfi = map->fldrv_priv;
449 	unsigned long offset = 0;
450 	int i,j;
451 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
452 
453 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
454 
455 	mtd->size = devsize * cfi->numchips;
456 
457 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
458 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
459 			* mtd->numeraseregions, GFP_KERNEL);
460 	if (!mtd->eraseregions) {
461 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
462 		goto setup_err;
463 	}
464 
465 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
466 		unsigned long ernum, ersize;
467 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
468 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
469 
470 		if (mtd->erasesize < ersize) {
471 			mtd->erasesize = ersize;
472 		}
473 		for (j=0; j<cfi->numchips; j++) {
474 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
475 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
476 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
477 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
478 		}
479 		offset += (ersize * ernum);
480 	}
481 
482 	if (offset != devsize) {
483 		/* Argh */
484 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
485 		goto setup_err;
486 	}
487 
488 	for (i=0; i<mtd->numeraseregions;i++){
489 		printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
490 		       i,mtd->eraseregions[i].offset,
491 		       mtd->eraseregions[i].erasesize,
492 		       mtd->eraseregions[i].numblocks);
493 	}
494 
495 #ifdef CONFIG_MTD_OTP
496 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
497 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
498 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
499 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
500 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
501 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
502 #endif
503 
504 	/* This function has the potential to distort the reality
505 	   a bit and therefore should be called last. */
506 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
507 		goto setup_err;
508 
509 	__module_get(THIS_MODULE);
510 	register_reboot_notifier(&mtd->reboot_notifier);
511 	return mtd;
512 
513  setup_err:
514 	if(mtd) {
515 		kfree(mtd->eraseregions);
516 		kfree(mtd);
517 	}
518 	kfree(cfi->cmdset_priv);
519 	return NULL;
520 }
521 
522 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
523 					struct cfi_private **pcfi)
524 {
525 	struct map_info *map = mtd->priv;
526 	struct cfi_private *cfi = *pcfi;
527 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
528 
529 	/*
530 	 * Probing of multi-partition flash chips.
531 	 *
532 	 * To support multiple partitions when available, we simply arrange
533 	 * for each of them to have their own flchip structure even if they
534 	 * are on the same physical chip.  This means completely recreating
535 	 * a new cfi_private structure right here which is a blatent code
536 	 * layering violation, but this is still the least intrusive
537 	 * arrangement at this point. This can be rearranged in the future
538 	 * if someone feels motivated enough.  --nico
539 	 */
540 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
541 	    && extp->FeatureSupport & (1 << 9)) {
542 		struct cfi_private *newcfi;
543 		struct flchip *chip;
544 		struct flchip_shared *shared;
545 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
546 
547 		/* Protection Register info */
548 		offs = (extp->NumProtectionFields - 1) *
549 		       sizeof(struct cfi_intelext_otpinfo);
550 
551 		/* Burst Read info */
552 		offs += extp->extra[offs+1]+2;
553 
554 		/* Number of partition regions */
555 		numregions = extp->extra[offs];
556 		offs += 1;
557 
558 		/* skip the sizeof(partregion) field in CFI 1.4 */
559 		if (extp->MinorVersion >= '4')
560 			offs += 2;
561 
562 		/* Number of hardware partitions */
563 		numparts = 0;
564 		for (i = 0; i < numregions; i++) {
565 			struct cfi_intelext_regioninfo *rinfo;
566 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
567 			numparts += rinfo->NumIdentPartitions;
568 			offs += sizeof(*rinfo)
569 				+ (rinfo->NumBlockTypes - 1) *
570 				  sizeof(struct cfi_intelext_blockinfo);
571 		}
572 
573 		/* Programming Region info */
574 		if (extp->MinorVersion >= '4') {
575 			struct cfi_intelext_programming_regioninfo *prinfo;
576 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
577 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
578 			mtd->flags &= ~MTD_BIT_WRITEABLE;
579 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
580 			       map->name, mtd->writesize,
581 			       cfi->interleave * prinfo->ControlValid,
582 			       cfi->interleave * prinfo->ControlInvalid);
583 		}
584 
585 		/*
586 		 * All functions below currently rely on all chips having
587 		 * the same geometry so we'll just assume that all hardware
588 		 * partitions are of the same size too.
589 		 */
590 		partshift = cfi->chipshift - __ffs(numparts);
591 
592 		if ((1 << partshift) < mtd->erasesize) {
593 			printk( KERN_ERR
594 				"%s: bad number of hw partitions (%d)\n",
595 				__FUNCTION__, numparts);
596 			return -EINVAL;
597 		}
598 
599 		numvirtchips = cfi->numchips * numparts;
600 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
601 		if (!newcfi)
602 			return -ENOMEM;
603 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
604 		if (!shared) {
605 			kfree(newcfi);
606 			return -ENOMEM;
607 		}
608 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
609 		newcfi->numchips = numvirtchips;
610 		newcfi->chipshift = partshift;
611 
612 		chip = &newcfi->chips[0];
613 		for (i = 0; i < cfi->numchips; i++) {
614 			shared[i].writing = shared[i].erasing = NULL;
615 			spin_lock_init(&shared[i].lock);
616 			for (j = 0; j < numparts; j++) {
617 				*chip = cfi->chips[i];
618 				chip->start += j << partshift;
619 				chip->priv = &shared[i];
620 				/* those should be reset too since
621 				   they create memory references. */
622 				init_waitqueue_head(&chip->wq);
623 				spin_lock_init(&chip->_spinlock);
624 				chip->mutex = &chip->_spinlock;
625 				chip++;
626 			}
627 		}
628 
629 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
630 				  "--> %d partitions of %d KiB\n",
631 				  map->name, cfi->numchips, cfi->interleave,
632 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
633 
634 		map->fldrv_priv = newcfi;
635 		*pcfi = newcfi;
636 		kfree(cfi);
637 	}
638 
639 	return 0;
640 }
641 
642 /*
643  *  *********** CHIP ACCESS FUNCTIONS ***********
644  */
645 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
646 {
647 	DECLARE_WAITQUEUE(wait, current);
648 	struct cfi_private *cfi = map->fldrv_priv;
649 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
650 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
651 	unsigned long timeo = jiffies + HZ;
652 
653 	switch (chip->state) {
654 
655 	case FL_STATUS:
656 		for (;;) {
657 			status = map_read(map, adr);
658 			if (map_word_andequal(map, status, status_OK, status_OK))
659 				break;
660 
661 			/* At this point we're fine with write operations
662 			   in other partitions as they don't conflict. */
663 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
664 				break;
665 
666 			spin_unlock(chip->mutex);
667 			cfi_udelay(1);
668 			spin_lock(chip->mutex);
669 			/* Someone else might have been playing with it. */
670 			return -EAGAIN;
671 		}
672 
673 	case FL_READY:
674 	case FL_CFI_QUERY:
675 	case FL_JEDEC_QUERY:
676 		return 0;
677 
678 	case FL_ERASING:
679 		if (!cfip ||
680 		    !(cfip->FeatureSupport & 2) ||
681 		    !(mode == FL_READY || mode == FL_POINT ||
682 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
683 			goto sleep;
684 
685 
686 		/* Erase suspend */
687 		map_write(map, CMD(0xB0), adr);
688 
689 		/* If the flash has finished erasing, then 'erase suspend'
690 		 * appears to make some (28F320) flash devices switch to
691 		 * 'read' mode.  Make sure that we switch to 'read status'
692 		 * mode so we get the right data. --rmk
693 		 */
694 		map_write(map, CMD(0x70), adr);
695 		chip->oldstate = FL_ERASING;
696 		chip->state = FL_ERASE_SUSPENDING;
697 		chip->erase_suspended = 1;
698 		for (;;) {
699 			status = map_read(map, adr);
700 			if (map_word_andequal(map, status, status_OK, status_OK))
701 			        break;
702 
703 			if (time_after(jiffies, timeo)) {
704 				/* Urgh. Resume and pretend we weren't here.  */
705 				map_write(map, CMD(0xd0), adr);
706 				/* Make sure we're in 'read status' mode if it had finished */
707 				map_write(map, CMD(0x70), adr);
708 				chip->state = FL_ERASING;
709 				chip->oldstate = FL_READY;
710 				printk(KERN_ERR "%s: Chip not ready after erase "
711 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
712 				return -EIO;
713 			}
714 
715 			spin_unlock(chip->mutex);
716 			cfi_udelay(1);
717 			spin_lock(chip->mutex);
718 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
719 			   So we can just loop here. */
720 		}
721 		chip->state = FL_STATUS;
722 		return 0;
723 
724 	case FL_XIP_WHILE_ERASING:
725 		if (mode != FL_READY && mode != FL_POINT &&
726 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
727 			goto sleep;
728 		chip->oldstate = chip->state;
729 		chip->state = FL_READY;
730 		return 0;
731 
732 	case FL_POINT:
733 		/* Only if there's no operation suspended... */
734 		if (mode == FL_READY && chip->oldstate == FL_READY)
735 			return 0;
736 
737 	case FL_SHUTDOWN:
738 		/* The machine is rebooting now,so no one can get chip anymore */
739 		return -EIO;
740 	default:
741 	sleep:
742 		set_current_state(TASK_UNINTERRUPTIBLE);
743 		add_wait_queue(&chip->wq, &wait);
744 		spin_unlock(chip->mutex);
745 		schedule();
746 		remove_wait_queue(&chip->wq, &wait);
747 		spin_lock(chip->mutex);
748 		return -EAGAIN;
749 	}
750 }
751 
752 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753 {
754 	int ret;
755 
756  retry:
757 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
758 			   || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
759 		/*
760 		 * OK. We have possibility for contention on the write/erase
761 		 * operations which are global to the real chip and not per
762 		 * partition.  So let's fight it over in the partition which
763 		 * currently has authority on the operation.
764 		 *
765 		 * The rules are as follows:
766 		 *
767 		 * - any write operation must own shared->writing.
768 		 *
769 		 * - any erase operation must own _both_ shared->writing and
770 		 *   shared->erasing.
771 		 *
772 		 * - contention arbitration is handled in the owner's context.
773 		 *
774 		 * The 'shared' struct can be read and/or written only when
775 		 * its lock is taken.
776 		 */
777 		struct flchip_shared *shared = chip->priv;
778 		struct flchip *contender;
779 		spin_lock(&shared->lock);
780 		contender = shared->writing;
781 		if (contender && contender != chip) {
782 			/*
783 			 * The engine to perform desired operation on this
784 			 * partition is already in use by someone else.
785 			 * Let's fight over it in the context of the chip
786 			 * currently using it.  If it is possible to suspend,
787 			 * that other partition will do just that, otherwise
788 			 * it'll happily send us to sleep.  In any case, when
789 			 * get_chip returns success we're clear to go ahead.
790 			 */
791 			ret = spin_trylock(contender->mutex);
792 			spin_unlock(&shared->lock);
793 			if (!ret)
794 				goto retry;
795 			spin_unlock(chip->mutex);
796 			ret = chip_ready(map, contender, contender->start, mode);
797 			spin_lock(chip->mutex);
798 
799 			if (ret == -EAGAIN) {
800 				spin_unlock(contender->mutex);
801 				goto retry;
802 			}
803 			if (ret) {
804 				spin_unlock(contender->mutex);
805 				return ret;
806 			}
807 			spin_lock(&shared->lock);
808 			spin_unlock(contender->mutex);
809 		}
810 
811 		/* We now own it */
812 		shared->writing = chip;
813 		if (mode == FL_ERASING)
814 			shared->erasing = chip;
815 		spin_unlock(&shared->lock);
816 	}
817 	ret = chip_ready(map, chip, adr, mode);
818 	if (ret == -EAGAIN)
819 		goto retry;
820 
821 	return ret;
822 }
823 
824 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
825 {
826 	struct cfi_private *cfi = map->fldrv_priv;
827 
828 	if (chip->priv) {
829 		struct flchip_shared *shared = chip->priv;
830 		spin_lock(&shared->lock);
831 		if (shared->writing == chip && chip->oldstate == FL_READY) {
832 			/* We own the ability to write, but we're done */
833 			shared->writing = shared->erasing;
834 			if (shared->writing && shared->writing != chip) {
835 				/* give back ownership to who we loaned it from */
836 				struct flchip *loaner = shared->writing;
837 				spin_lock(loaner->mutex);
838 				spin_unlock(&shared->lock);
839 				spin_unlock(chip->mutex);
840 				put_chip(map, loaner, loaner->start);
841 				spin_lock(chip->mutex);
842 				spin_unlock(loaner->mutex);
843 				wake_up(&chip->wq);
844 				return;
845 			}
846 			shared->erasing = NULL;
847 			shared->writing = NULL;
848 		} else if (shared->erasing == chip && shared->writing != chip) {
849 			/*
850 			 * We own the ability to erase without the ability
851 			 * to write, which means the erase was suspended
852 			 * and some other partition is currently writing.
853 			 * Don't let the switch below mess things up since
854 			 * we don't have ownership to resume anything.
855 			 */
856 			spin_unlock(&shared->lock);
857 			wake_up(&chip->wq);
858 			return;
859 		}
860 		spin_unlock(&shared->lock);
861 	}
862 
863 	switch(chip->oldstate) {
864 	case FL_ERASING:
865 		chip->state = chip->oldstate;
866 		/* What if one interleaved chip has finished and the
867 		   other hasn't? The old code would leave the finished
868 		   one in READY mode. That's bad, and caused -EROFS
869 		   errors to be returned from do_erase_oneblock because
870 		   that's the only bit it checked for at the time.
871 		   As the state machine appears to explicitly allow
872 		   sending the 0x70 (Read Status) command to an erasing
873 		   chip and expecting it to be ignored, that's what we
874 		   do. */
875 		map_write(map, CMD(0xd0), adr);
876 		map_write(map, CMD(0x70), adr);
877 		chip->oldstate = FL_READY;
878 		chip->state = FL_ERASING;
879 		break;
880 
881 	case FL_XIP_WHILE_ERASING:
882 		chip->state = chip->oldstate;
883 		chip->oldstate = FL_READY;
884 		break;
885 
886 	case FL_READY:
887 	case FL_STATUS:
888 	case FL_JEDEC_QUERY:
889 		/* We should really make set_vpp() count, rather than doing this */
890 		DISABLE_VPP(map);
891 		break;
892 	default:
893 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
894 	}
895 	wake_up(&chip->wq);
896 }
897 
898 #ifdef CONFIG_MTD_XIP
899 
900 /*
901  * No interrupt what so ever can be serviced while the flash isn't in array
902  * mode.  This is ensured by the xip_disable() and xip_enable() functions
903  * enclosing any code path where the flash is known not to be in array mode.
904  * And within a XIP disabled code path, only functions marked with __xipram
905  * may be called and nothing else (it's a good thing to inspect generated
906  * assembly to make sure inline functions were actually inlined and that gcc
907  * didn't emit calls to its own support functions). Also configuring MTD CFI
908  * support to a single buswidth and a single interleave is also recommended.
909  */
910 
911 static void xip_disable(struct map_info *map, struct flchip *chip,
912 			unsigned long adr)
913 {
914 	/* TODO: chips with no XIP use should ignore and return */
915 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
916 	local_irq_disable();
917 }
918 
919 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
920 				unsigned long adr)
921 {
922 	struct cfi_private *cfi = map->fldrv_priv;
923 	if (chip->state != FL_POINT && chip->state != FL_READY) {
924 		map_write(map, CMD(0xff), adr);
925 		chip->state = FL_READY;
926 	}
927 	(void) map_read(map, adr);
928 	xip_iprefetch();
929 	local_irq_enable();
930 }
931 
932 /*
933  * When a delay is required for the flash operation to complete, the
934  * xip_wait_for_operation() function is polling for both the given timeout
935  * and pending (but still masked) hardware interrupts.  Whenever there is an
936  * interrupt pending then the flash erase or write operation is suspended,
937  * array mode restored and interrupts unmasked.  Task scheduling might also
938  * happen at that point.  The CPU eventually returns from the interrupt or
939  * the call to schedule() and the suspended flash operation is resumed for
940  * the remaining of the delay period.
941  *
942  * Warning: this function _will_ fool interrupt latency tracing tools.
943  */
944 
945 static int __xipram xip_wait_for_operation(
946 		struct map_info *map, struct flchip *chip,
947 		unsigned long adr, unsigned int chip_op_time )
948 {
949 	struct cfi_private *cfi = map->fldrv_priv;
950 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
951 	map_word status, OK = CMD(0x80);
952 	unsigned long usec, suspended, start, done;
953 	flstate_t oldstate, newstate;
954 
955        	start = xip_currtime();
956 	usec = chip_op_time * 8;
957 	if (usec == 0)
958 		usec = 500000;
959 	done = 0;
960 
961 	do {
962 		cpu_relax();
963 		if (xip_irqpending() && cfip &&
964 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
965 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
966 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
967 			/*
968 			 * Let's suspend the erase or write operation when
969 			 * supported.  Note that we currently don't try to
970 			 * suspend interleaved chips if there is already
971 			 * another operation suspended (imagine what happens
972 			 * when one chip was already done with the current
973 			 * operation while another chip suspended it, then
974 			 * we resume the whole thing at once).  Yes, it
975 			 * can happen!
976 			 */
977 			usec -= done;
978 			map_write(map, CMD(0xb0), adr);
979 			map_write(map, CMD(0x70), adr);
980 			suspended = xip_currtime();
981 			do {
982 				if (xip_elapsed_since(suspended) > 100000) {
983 					/*
984 					 * The chip doesn't want to suspend
985 					 * after waiting for 100 msecs.
986 					 * This is a critical error but there
987 					 * is not much we can do here.
988 					 */
989 					return -EIO;
990 				}
991 				status = map_read(map, adr);
992 			} while (!map_word_andequal(map, status, OK, OK));
993 
994 			/* Suspend succeeded */
995 			oldstate = chip->state;
996 			if (oldstate == FL_ERASING) {
997 				if (!map_word_bitsset(map, status, CMD(0x40)))
998 					break;
999 				newstate = FL_XIP_WHILE_ERASING;
1000 				chip->erase_suspended = 1;
1001 			} else {
1002 				if (!map_word_bitsset(map, status, CMD(0x04)))
1003 					break;
1004 				newstate = FL_XIP_WHILE_WRITING;
1005 				chip->write_suspended = 1;
1006 			}
1007 			chip->state = newstate;
1008 			map_write(map, CMD(0xff), adr);
1009 			(void) map_read(map, adr);
1010 			asm volatile (".rep 8; nop; .endr");
1011 			local_irq_enable();
1012 			spin_unlock(chip->mutex);
1013 			asm volatile (".rep 8; nop; .endr");
1014 			cond_resched();
1015 
1016 			/*
1017 			 * We're back.  However someone else might have
1018 			 * decided to go write to the chip if we are in
1019 			 * a suspended erase state.  If so let's wait
1020 			 * until it's done.
1021 			 */
1022 			spin_lock(chip->mutex);
1023 			while (chip->state != newstate) {
1024 				DECLARE_WAITQUEUE(wait, current);
1025 				set_current_state(TASK_UNINTERRUPTIBLE);
1026 				add_wait_queue(&chip->wq, &wait);
1027 				spin_unlock(chip->mutex);
1028 				schedule();
1029 				remove_wait_queue(&chip->wq, &wait);
1030 				spin_lock(chip->mutex);
1031 			}
1032 			/* Disallow XIP again */
1033 			local_irq_disable();
1034 
1035 			/* Resume the write or erase operation */
1036 			map_write(map, CMD(0xd0), adr);
1037 			map_write(map, CMD(0x70), adr);
1038 			chip->state = oldstate;
1039 			start = xip_currtime();
1040 		} else if (usec >= 1000000/HZ) {
1041 			/*
1042 			 * Try to save on CPU power when waiting delay
1043 			 * is at least a system timer tick period.
1044 			 * No need to be extremely accurate here.
1045 			 */
1046 			xip_cpu_idle();
1047 		}
1048 		status = map_read(map, adr);
1049 		done = xip_elapsed_since(start);
1050 	} while (!map_word_andequal(map, status, OK, OK)
1051 		 && done < usec);
1052 
1053 	return (done >= usec) ? -ETIME : 0;
1054 }
1055 
1056 /*
1057  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1058  * the flash is actively programming or erasing since we have to poll for
1059  * the operation to complete anyway.  We can't do that in a generic way with
1060  * a XIP setup so do it before the actual flash operation in this case
1061  * and stub it out from INVAL_CACHE_AND_WAIT.
1062  */
1063 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1064 	INVALIDATE_CACHED_RANGE(map, from, size)
1065 
1066 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1067 	xip_wait_for_operation(map, chip, cmd_adr, usec)
1068 
1069 #else
1070 
1071 #define xip_disable(map, chip, adr)
1072 #define xip_enable(map, chip, adr)
1073 #define XIP_INVAL_CACHED_RANGE(x...)
1074 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1075 
1076 static int inval_cache_and_wait_for_operation(
1077 		struct map_info *map, struct flchip *chip,
1078 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1079 		unsigned int chip_op_time)
1080 {
1081 	struct cfi_private *cfi = map->fldrv_priv;
1082 	map_word status, status_OK = CMD(0x80);
1083 	int chip_state = chip->state;
1084 	unsigned int timeo, sleep_time;
1085 
1086 	spin_unlock(chip->mutex);
1087 	if (inval_len)
1088 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1089 	spin_lock(chip->mutex);
1090 
1091 	/* set our timeout to 8 times the expected delay */
1092 	timeo = chip_op_time * 8;
1093 	if (!timeo)
1094 		timeo = 500000;
1095 	sleep_time = chip_op_time / 2;
1096 
1097 	for (;;) {
1098 		status = map_read(map, cmd_adr);
1099 		if (map_word_andequal(map, status, status_OK, status_OK))
1100 			break;
1101 
1102 		if (!timeo) {
1103 			map_write(map, CMD(0x70), cmd_adr);
1104 			chip->state = FL_STATUS;
1105 			return -ETIME;
1106 		}
1107 
1108 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1109 		spin_unlock(chip->mutex);
1110 		if (sleep_time >= 1000000/HZ) {
1111 			/*
1112 			 * Half of the normal delay still remaining
1113 			 * can be performed with a sleeping delay instead
1114 			 * of busy waiting.
1115 			 */
1116 			msleep(sleep_time/1000);
1117 			timeo -= sleep_time;
1118 			sleep_time = 1000000/HZ;
1119 		} else {
1120 			udelay(1);
1121 			cond_resched();
1122 			timeo--;
1123 		}
1124 		spin_lock(chip->mutex);
1125 
1126 		while (chip->state != chip_state) {
1127 			/* Someone's suspended the operation: sleep */
1128 			DECLARE_WAITQUEUE(wait, current);
1129 			set_current_state(TASK_UNINTERRUPTIBLE);
1130 			add_wait_queue(&chip->wq, &wait);
1131 			spin_unlock(chip->mutex);
1132 			schedule();
1133 			remove_wait_queue(&chip->wq, &wait);
1134 			spin_lock(chip->mutex);
1135 		}
1136 	}
1137 
1138 	/* Done and happy. */
1139  	chip->state = FL_STATUS;
1140 	return 0;
1141 }
1142 
1143 #endif
1144 
1145 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1146 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1147 
1148 
1149 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1150 {
1151 	unsigned long cmd_addr;
1152 	struct cfi_private *cfi = map->fldrv_priv;
1153 	int ret = 0;
1154 
1155 	adr += chip->start;
1156 
1157 	/* Ensure cmd read/writes are aligned. */
1158 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1159 
1160 	spin_lock(chip->mutex);
1161 
1162 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1163 
1164 	if (!ret) {
1165 		if (chip->state != FL_POINT && chip->state != FL_READY)
1166 			map_write(map, CMD(0xff), cmd_addr);
1167 
1168 		chip->state = FL_POINT;
1169 		chip->ref_point_counter++;
1170 	}
1171 	spin_unlock(chip->mutex);
1172 
1173 	return ret;
1174 }
1175 
1176 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1177 {
1178 	struct map_info *map = mtd->priv;
1179 	struct cfi_private *cfi = map->fldrv_priv;
1180 	unsigned long ofs, last_end = 0;
1181 	int chipnum;
1182 	int ret = 0;
1183 
1184 	if (!map->virt || (from + len > mtd->size))
1185 		return -EINVAL;
1186 
1187 	/* Now lock the chip(s) to POINT state */
1188 
1189 	/* ofs: offset within the first chip that the first read should start */
1190 	chipnum = (from >> cfi->chipshift);
1191 	ofs = from - (chipnum << cfi->chipshift);
1192 
1193 	*mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1194 	*retlen = 0;
1195 
1196 	while (len) {
1197 		unsigned long thislen;
1198 
1199 		if (chipnum >= cfi->numchips)
1200 			break;
1201 
1202 		/* We cannot point across chips that are virtually disjoint */
1203 		if (!last_end)
1204 			last_end = cfi->chips[chipnum].start;
1205 		else if (cfi->chips[chipnum].start != last_end)
1206 			break;
1207 
1208 		if ((len + ofs -1) >> cfi->chipshift)
1209 			thislen = (1<<cfi->chipshift) - ofs;
1210 		else
1211 			thislen = len;
1212 
1213 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1214 		if (ret)
1215 			break;
1216 
1217 		*retlen += thislen;
1218 		len -= thislen;
1219 
1220 		ofs = 0;
1221 		last_end += 1 << cfi->chipshift;
1222 		chipnum++;
1223 	}
1224 	return 0;
1225 }
1226 
1227 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1228 {
1229 	struct map_info *map = mtd->priv;
1230 	struct cfi_private *cfi = map->fldrv_priv;
1231 	unsigned long ofs;
1232 	int chipnum;
1233 
1234 	/* Now unlock the chip(s) POINT state */
1235 
1236 	/* ofs: offset within the first chip that the first read should start */
1237 	chipnum = (from >> cfi->chipshift);
1238 	ofs = from - (chipnum <<  cfi->chipshift);
1239 
1240 	while (len) {
1241 		unsigned long thislen;
1242 		struct flchip *chip;
1243 
1244 		chip = &cfi->chips[chipnum];
1245 		if (chipnum >= cfi->numchips)
1246 			break;
1247 
1248 		if ((len + ofs -1) >> cfi->chipshift)
1249 			thislen = (1<<cfi->chipshift) - ofs;
1250 		else
1251 			thislen = len;
1252 
1253 		spin_lock(chip->mutex);
1254 		if (chip->state == FL_POINT) {
1255 			chip->ref_point_counter--;
1256 			if(chip->ref_point_counter == 0)
1257 				chip->state = FL_READY;
1258 		} else
1259 			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1260 
1261 		put_chip(map, chip, chip->start);
1262 		spin_unlock(chip->mutex);
1263 
1264 		len -= thislen;
1265 		ofs = 0;
1266 		chipnum++;
1267 	}
1268 }
1269 
1270 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1271 {
1272 	unsigned long cmd_addr;
1273 	struct cfi_private *cfi = map->fldrv_priv;
1274 	int ret;
1275 
1276 	adr += chip->start;
1277 
1278 	/* Ensure cmd read/writes are aligned. */
1279 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1280 
1281 	spin_lock(chip->mutex);
1282 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1283 	if (ret) {
1284 		spin_unlock(chip->mutex);
1285 		return ret;
1286 	}
1287 
1288 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1289 		map_write(map, CMD(0xff), cmd_addr);
1290 
1291 		chip->state = FL_READY;
1292 	}
1293 
1294 	map_copy_from(map, buf, adr, len);
1295 
1296 	put_chip(map, chip, cmd_addr);
1297 
1298 	spin_unlock(chip->mutex);
1299 	return 0;
1300 }
1301 
1302 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1303 {
1304 	struct map_info *map = mtd->priv;
1305 	struct cfi_private *cfi = map->fldrv_priv;
1306 	unsigned long ofs;
1307 	int chipnum;
1308 	int ret = 0;
1309 
1310 	/* ofs: offset within the first chip that the first read should start */
1311 	chipnum = (from >> cfi->chipshift);
1312 	ofs = from - (chipnum <<  cfi->chipshift);
1313 
1314 	*retlen = 0;
1315 
1316 	while (len) {
1317 		unsigned long thislen;
1318 
1319 		if (chipnum >= cfi->numchips)
1320 			break;
1321 
1322 		if ((len + ofs -1) >> cfi->chipshift)
1323 			thislen = (1<<cfi->chipshift) - ofs;
1324 		else
1325 			thislen = len;
1326 
1327 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1328 		if (ret)
1329 			break;
1330 
1331 		*retlen += thislen;
1332 		len -= thislen;
1333 		buf += thislen;
1334 
1335 		ofs = 0;
1336 		chipnum++;
1337 	}
1338 	return ret;
1339 }
1340 
1341 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1342 				     unsigned long adr, map_word datum, int mode)
1343 {
1344 	struct cfi_private *cfi = map->fldrv_priv;
1345 	map_word status, write_cmd;
1346 	int ret=0;
1347 
1348 	adr += chip->start;
1349 
1350 	switch (mode) {
1351 	case FL_WRITING:
1352 		write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1353 		break;
1354 	case FL_OTP_WRITE:
1355 		write_cmd = CMD(0xc0);
1356 		break;
1357 	default:
1358 		return -EINVAL;
1359 	}
1360 
1361 	spin_lock(chip->mutex);
1362 	ret = get_chip(map, chip, adr, mode);
1363 	if (ret) {
1364 		spin_unlock(chip->mutex);
1365 		return ret;
1366 	}
1367 
1368 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1369 	ENABLE_VPP(map);
1370 	xip_disable(map, chip, adr);
1371 	map_write(map, write_cmd, adr);
1372 	map_write(map, datum, adr);
1373 	chip->state = mode;
1374 
1375 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1376 				   adr, map_bankwidth(map),
1377 				   chip->word_write_time);
1378 	if (ret) {
1379 		xip_enable(map, chip, adr);
1380 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1381 		goto out;
1382 	}
1383 
1384 	/* check for errors */
1385 	status = map_read(map, adr);
1386 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1387 		unsigned long chipstatus = MERGESTATUS(status);
1388 
1389 		/* reset status */
1390 		map_write(map, CMD(0x50), adr);
1391 		map_write(map, CMD(0x70), adr);
1392 		xip_enable(map, chip, adr);
1393 
1394 		if (chipstatus & 0x02) {
1395 			ret = -EROFS;
1396 		} else if (chipstatus & 0x08) {
1397 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1398 			ret = -EIO;
1399 		} else {
1400 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1401 			ret = -EINVAL;
1402 		}
1403 
1404 		goto out;
1405 	}
1406 
1407 	xip_enable(map, chip, adr);
1408  out:	put_chip(map, chip, adr);
1409 	spin_unlock(chip->mutex);
1410 	return ret;
1411 }
1412 
1413 
1414 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1415 {
1416 	struct map_info *map = mtd->priv;
1417 	struct cfi_private *cfi = map->fldrv_priv;
1418 	int ret = 0;
1419 	int chipnum;
1420 	unsigned long ofs;
1421 
1422 	*retlen = 0;
1423 	if (!len)
1424 		return 0;
1425 
1426 	chipnum = to >> cfi->chipshift;
1427 	ofs = to  - (chipnum << cfi->chipshift);
1428 
1429 	/* If it's not bus-aligned, do the first byte write */
1430 	if (ofs & (map_bankwidth(map)-1)) {
1431 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1432 		int gap = ofs - bus_ofs;
1433 		int n;
1434 		map_word datum;
1435 
1436 		n = min_t(int, len, map_bankwidth(map)-gap);
1437 		datum = map_word_ff(map);
1438 		datum = map_word_load_partial(map, datum, buf, gap, n);
1439 
1440 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1441 					       bus_ofs, datum, FL_WRITING);
1442 		if (ret)
1443 			return ret;
1444 
1445 		len -= n;
1446 		ofs += n;
1447 		buf += n;
1448 		(*retlen) += n;
1449 
1450 		if (ofs >> cfi->chipshift) {
1451 			chipnum ++;
1452 			ofs = 0;
1453 			if (chipnum == cfi->numchips)
1454 				return 0;
1455 		}
1456 	}
1457 
1458 	while(len >= map_bankwidth(map)) {
1459 		map_word datum = map_word_load(map, buf);
1460 
1461 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1462 				       ofs, datum, FL_WRITING);
1463 		if (ret)
1464 			return ret;
1465 
1466 		ofs += map_bankwidth(map);
1467 		buf += map_bankwidth(map);
1468 		(*retlen) += map_bankwidth(map);
1469 		len -= map_bankwidth(map);
1470 
1471 		if (ofs >> cfi->chipshift) {
1472 			chipnum ++;
1473 			ofs = 0;
1474 			if (chipnum == cfi->numchips)
1475 				return 0;
1476 		}
1477 	}
1478 
1479 	if (len & (map_bankwidth(map)-1)) {
1480 		map_word datum;
1481 
1482 		datum = map_word_ff(map);
1483 		datum = map_word_load_partial(map, datum, buf, 0, len);
1484 
1485 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1486 				       ofs, datum, FL_WRITING);
1487 		if (ret)
1488 			return ret;
1489 
1490 		(*retlen) += len;
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 
1497 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1498 				    unsigned long adr, const struct kvec **pvec,
1499 				    unsigned long *pvec_seek, int len)
1500 {
1501 	struct cfi_private *cfi = map->fldrv_priv;
1502 	map_word status, write_cmd, datum;
1503 	unsigned long cmd_adr;
1504 	int ret, wbufsize, word_gap, words;
1505 	const struct kvec *vec;
1506 	unsigned long vec_seek;
1507 
1508 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1509 	adr += chip->start;
1510 	cmd_adr = adr & ~(wbufsize-1);
1511 
1512 	/* Let's determine this according to the interleave only once */
1513 	write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1514 
1515 	spin_lock(chip->mutex);
1516 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1517 	if (ret) {
1518 		spin_unlock(chip->mutex);
1519 		return ret;
1520 	}
1521 
1522 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1523 	ENABLE_VPP(map);
1524 	xip_disable(map, chip, cmd_adr);
1525 
1526 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1527 	   [...], the device will not accept any more Write to Buffer commands".
1528 	   So we must check here and reset those bits if they're set. Otherwise
1529 	   we're just pissing in the wind */
1530 	if (chip->state != FL_STATUS) {
1531 		map_write(map, CMD(0x70), cmd_adr);
1532 		chip->state = FL_STATUS;
1533 	}
1534 	status = map_read(map, cmd_adr);
1535 	if (map_word_bitsset(map, status, CMD(0x30))) {
1536 		xip_enable(map, chip, cmd_adr);
1537 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1538 		xip_disable(map, chip, cmd_adr);
1539 		map_write(map, CMD(0x50), cmd_adr);
1540 		map_write(map, CMD(0x70), cmd_adr);
1541 	}
1542 
1543 	chip->state = FL_WRITING_TO_BUFFER;
1544 	map_write(map, write_cmd, cmd_adr);
1545 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1546 	if (ret) {
1547 		/* Argh. Not ready for write to buffer */
1548 		map_word Xstatus = map_read(map, cmd_adr);
1549 		map_write(map, CMD(0x70), cmd_adr);
1550 		chip->state = FL_STATUS;
1551 		status = map_read(map, cmd_adr);
1552 		map_write(map, CMD(0x50), cmd_adr);
1553 		map_write(map, CMD(0x70), cmd_adr);
1554 		xip_enable(map, chip, cmd_adr);
1555 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1556 				map->name, Xstatus.x[0], status.x[0]);
1557 		goto out;
1558 	}
1559 
1560 	/* Figure out the number of words to write */
1561 	word_gap = (-adr & (map_bankwidth(map)-1));
1562 	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1563 	if (!word_gap) {
1564 		words--;
1565 	} else {
1566 		word_gap = map_bankwidth(map) - word_gap;
1567 		adr -= word_gap;
1568 		datum = map_word_ff(map);
1569 	}
1570 
1571 	/* Write length of data to come */
1572 	map_write(map, CMD(words), cmd_adr );
1573 
1574 	/* Write data */
1575 	vec = *pvec;
1576 	vec_seek = *pvec_seek;
1577 	do {
1578 		int n = map_bankwidth(map) - word_gap;
1579 		if (n > vec->iov_len - vec_seek)
1580 			n = vec->iov_len - vec_seek;
1581 		if (n > len)
1582 			n = len;
1583 
1584 		if (!word_gap && len < map_bankwidth(map))
1585 			datum = map_word_ff(map);
1586 
1587 		datum = map_word_load_partial(map, datum,
1588 					      vec->iov_base + vec_seek,
1589 					      word_gap, n);
1590 
1591 		len -= n;
1592 		word_gap += n;
1593 		if (!len || word_gap == map_bankwidth(map)) {
1594 			map_write(map, datum, adr);
1595 			adr += map_bankwidth(map);
1596 			word_gap = 0;
1597 		}
1598 
1599 		vec_seek += n;
1600 		if (vec_seek == vec->iov_len) {
1601 			vec++;
1602 			vec_seek = 0;
1603 		}
1604 	} while (len);
1605 	*pvec = vec;
1606 	*pvec_seek = vec_seek;
1607 
1608 	/* GO GO GO */
1609 	map_write(map, CMD(0xd0), cmd_adr);
1610 	chip->state = FL_WRITING;
1611 
1612 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1613 				   adr, len,
1614 				   chip->buffer_write_time);
1615 	if (ret) {
1616 		map_write(map, CMD(0x70), cmd_adr);
1617 		chip->state = FL_STATUS;
1618 		xip_enable(map, chip, cmd_adr);
1619 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1620 		goto out;
1621 	}
1622 
1623 	/* check for errors */
1624 	status = map_read(map, cmd_adr);
1625 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1626 		unsigned long chipstatus = MERGESTATUS(status);
1627 
1628 		/* reset status */
1629 		map_write(map, CMD(0x50), cmd_adr);
1630 		map_write(map, CMD(0x70), cmd_adr);
1631 		xip_enable(map, chip, cmd_adr);
1632 
1633 		if (chipstatus & 0x02) {
1634 			ret = -EROFS;
1635 		} else if (chipstatus & 0x08) {
1636 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1637 			ret = -EIO;
1638 		} else {
1639 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1640 			ret = -EINVAL;
1641 		}
1642 
1643 		goto out;
1644 	}
1645 
1646 	xip_enable(map, chip, cmd_adr);
1647  out:	put_chip(map, chip, cmd_adr);
1648 	spin_unlock(chip->mutex);
1649 	return ret;
1650 }
1651 
1652 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1653 				unsigned long count, loff_t to, size_t *retlen)
1654 {
1655 	struct map_info *map = mtd->priv;
1656 	struct cfi_private *cfi = map->fldrv_priv;
1657 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1658 	int ret = 0;
1659 	int chipnum;
1660 	unsigned long ofs, vec_seek, i;
1661 	size_t len = 0;
1662 
1663 	for (i = 0; i < count; i++)
1664 		len += vecs[i].iov_len;
1665 
1666 	*retlen = 0;
1667 	if (!len)
1668 		return 0;
1669 
1670 	chipnum = to >> cfi->chipshift;
1671 	ofs = to - (chipnum << cfi->chipshift);
1672 	vec_seek = 0;
1673 
1674 	do {
1675 		/* We must not cross write block boundaries */
1676 		int size = wbufsize - (ofs & (wbufsize-1));
1677 
1678 		if (size > len)
1679 			size = len;
1680 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1681 				      ofs, &vecs, &vec_seek, size);
1682 		if (ret)
1683 			return ret;
1684 
1685 		ofs += size;
1686 		(*retlen) += size;
1687 		len -= size;
1688 
1689 		if (ofs >> cfi->chipshift) {
1690 			chipnum ++;
1691 			ofs = 0;
1692 			if (chipnum == cfi->numchips)
1693 				return 0;
1694 		}
1695 
1696 		/* Be nice and reschedule with the chip in a usable state for other
1697 		   processes. */
1698 		cond_resched();
1699 
1700 	} while (len);
1701 
1702 	return 0;
1703 }
1704 
1705 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1706 				       size_t len, size_t *retlen, const u_char *buf)
1707 {
1708 	struct kvec vec;
1709 
1710 	vec.iov_base = (void *) buf;
1711 	vec.iov_len = len;
1712 
1713 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1714 }
1715 
1716 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1717 				      unsigned long adr, int len, void *thunk)
1718 {
1719 	struct cfi_private *cfi = map->fldrv_priv;
1720 	map_word status;
1721 	int retries = 3;
1722 	int ret;
1723 
1724 	adr += chip->start;
1725 
1726  retry:
1727 	spin_lock(chip->mutex);
1728 	ret = get_chip(map, chip, adr, FL_ERASING);
1729 	if (ret) {
1730 		spin_unlock(chip->mutex);
1731 		return ret;
1732 	}
1733 
1734 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1735 	ENABLE_VPP(map);
1736 	xip_disable(map, chip, adr);
1737 
1738 	/* Clear the status register first */
1739 	map_write(map, CMD(0x50), adr);
1740 
1741 	/* Now erase */
1742 	map_write(map, CMD(0x20), adr);
1743 	map_write(map, CMD(0xD0), adr);
1744 	chip->state = FL_ERASING;
1745 	chip->erase_suspended = 0;
1746 
1747 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1748 				   adr, len,
1749 				   chip->erase_time);
1750 	if (ret) {
1751 		map_write(map, CMD(0x70), adr);
1752 		chip->state = FL_STATUS;
1753 		xip_enable(map, chip, adr);
1754 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1755 		goto out;
1756 	}
1757 
1758 	/* We've broken this before. It doesn't hurt to be safe */
1759 	map_write(map, CMD(0x70), adr);
1760 	chip->state = FL_STATUS;
1761 	status = map_read(map, adr);
1762 
1763 	/* check for errors */
1764 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1765 		unsigned long chipstatus = MERGESTATUS(status);
1766 
1767 		/* Reset the error bits */
1768 		map_write(map, CMD(0x50), adr);
1769 		map_write(map, CMD(0x70), adr);
1770 		xip_enable(map, chip, adr);
1771 
1772 		if ((chipstatus & 0x30) == 0x30) {
1773 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1774 			ret = -EINVAL;
1775 		} else if (chipstatus & 0x02) {
1776 			/* Protection bit set */
1777 			ret = -EROFS;
1778 		} else if (chipstatus & 0x8) {
1779 			/* Voltage */
1780 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1781 			ret = -EIO;
1782 		} else if (chipstatus & 0x20 && retries--) {
1783 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1784 			put_chip(map, chip, adr);
1785 			spin_unlock(chip->mutex);
1786 			goto retry;
1787 		} else {
1788 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1789 			ret = -EIO;
1790 		}
1791 
1792 		goto out;
1793 	}
1794 
1795 	xip_enable(map, chip, adr);
1796  out:	put_chip(map, chip, adr);
1797 	spin_unlock(chip->mutex);
1798 	return ret;
1799 }
1800 
1801 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1802 {
1803 	unsigned long ofs, len;
1804 	int ret;
1805 
1806 	ofs = instr->addr;
1807 	len = instr->len;
1808 
1809 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1810 	if (ret)
1811 		return ret;
1812 
1813 	instr->state = MTD_ERASE_DONE;
1814 	mtd_erase_callback(instr);
1815 
1816 	return 0;
1817 }
1818 
1819 static void cfi_intelext_sync (struct mtd_info *mtd)
1820 {
1821 	struct map_info *map = mtd->priv;
1822 	struct cfi_private *cfi = map->fldrv_priv;
1823 	int i;
1824 	struct flchip *chip;
1825 	int ret = 0;
1826 
1827 	for (i=0; !ret && i<cfi->numchips; i++) {
1828 		chip = &cfi->chips[i];
1829 
1830 		spin_lock(chip->mutex);
1831 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1832 
1833 		if (!ret) {
1834 			chip->oldstate = chip->state;
1835 			chip->state = FL_SYNCING;
1836 			/* No need to wake_up() on this state change -
1837 			 * as the whole point is that nobody can do anything
1838 			 * with the chip now anyway.
1839 			 */
1840 		}
1841 		spin_unlock(chip->mutex);
1842 	}
1843 
1844 	/* Unlock the chips again */
1845 
1846 	for (i--; i >=0; i--) {
1847 		chip = &cfi->chips[i];
1848 
1849 		spin_lock(chip->mutex);
1850 
1851 		if (chip->state == FL_SYNCING) {
1852 			chip->state = chip->oldstate;
1853 			chip->oldstate = FL_READY;
1854 			wake_up(&chip->wq);
1855 		}
1856 		spin_unlock(chip->mutex);
1857 	}
1858 }
1859 
1860 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1861 						struct flchip *chip,
1862 						unsigned long adr,
1863 						int len, void *thunk)
1864 {
1865 	struct cfi_private *cfi = map->fldrv_priv;
1866 	int status, ofs_factor = cfi->interleave * cfi->device_type;
1867 
1868 	adr += chip->start;
1869 	xip_disable(map, chip, adr+(2*ofs_factor));
1870 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
1871 	chip->state = FL_JEDEC_QUERY;
1872 	status = cfi_read_query(map, adr+(2*ofs_factor));
1873 	xip_enable(map, chip, 0);
1874 	return status;
1875 }
1876 
1877 #ifdef DEBUG_LOCK_BITS
1878 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1879 						struct flchip *chip,
1880 						unsigned long adr,
1881 						int len, void *thunk)
1882 {
1883 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1884 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1885 	return 0;
1886 }
1887 #endif
1888 
1889 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
1890 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
1891 
1892 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1893 				       unsigned long adr, int len, void *thunk)
1894 {
1895 	struct cfi_private *cfi = map->fldrv_priv;
1896 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1897 	int udelay;
1898 	int ret;
1899 
1900 	adr += chip->start;
1901 
1902 	spin_lock(chip->mutex);
1903 	ret = get_chip(map, chip, adr, FL_LOCKING);
1904 	if (ret) {
1905 		spin_unlock(chip->mutex);
1906 		return ret;
1907 	}
1908 
1909 	ENABLE_VPP(map);
1910 	xip_disable(map, chip, adr);
1911 
1912 	map_write(map, CMD(0x60), adr);
1913 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1914 		map_write(map, CMD(0x01), adr);
1915 		chip->state = FL_LOCKING;
1916 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1917 		map_write(map, CMD(0xD0), adr);
1918 		chip->state = FL_UNLOCKING;
1919 	} else
1920 		BUG();
1921 
1922 	/*
1923 	 * If Instant Individual Block Locking supported then no need
1924 	 * to delay.
1925 	 */
1926 	udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1927 
1928 	ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1929 	if (ret) {
1930 		map_write(map, CMD(0x70), adr);
1931 		chip->state = FL_STATUS;
1932 		xip_enable(map, chip, adr);
1933 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1934 		goto out;
1935 	}
1936 
1937 	xip_enable(map, chip, adr);
1938 out:	put_chip(map, chip, adr);
1939 	spin_unlock(chip->mutex);
1940 	return ret;
1941 }
1942 
1943 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1944 {
1945 	int ret;
1946 
1947 #ifdef DEBUG_LOCK_BITS
1948 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1949 	       __FUNCTION__, ofs, len);
1950 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1951 		ofs, len, NULL);
1952 #endif
1953 
1954 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1955 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1956 
1957 #ifdef DEBUG_LOCK_BITS
1958 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1959 	       __FUNCTION__, ret);
1960 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1961 		ofs, len, NULL);
1962 #endif
1963 
1964 	return ret;
1965 }
1966 
1967 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1968 {
1969 	int ret;
1970 
1971 #ifdef DEBUG_LOCK_BITS
1972 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1973 	       __FUNCTION__, ofs, len);
1974 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1975 		ofs, len, NULL);
1976 #endif
1977 
1978 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1979 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1980 
1981 #ifdef DEBUG_LOCK_BITS
1982 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1983 	       __FUNCTION__, ret);
1984 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1985 		ofs, len, NULL);
1986 #endif
1987 
1988 	return ret;
1989 }
1990 
1991 #ifdef CONFIG_MTD_OTP
1992 
1993 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1994 			u_long data_offset, u_char *buf, u_int size,
1995 			u_long prot_offset, u_int groupno, u_int groupsize);
1996 
1997 static int __xipram
1998 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1999 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2000 {
2001 	struct cfi_private *cfi = map->fldrv_priv;
2002 	int ret;
2003 
2004 	spin_lock(chip->mutex);
2005 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2006 	if (ret) {
2007 		spin_unlock(chip->mutex);
2008 		return ret;
2009 	}
2010 
2011 	/* let's ensure we're not reading back cached data from array mode */
2012 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2013 
2014 	xip_disable(map, chip, chip->start);
2015 	if (chip->state != FL_JEDEC_QUERY) {
2016 		map_write(map, CMD(0x90), chip->start);
2017 		chip->state = FL_JEDEC_QUERY;
2018 	}
2019 	map_copy_from(map, buf, chip->start + offset, size);
2020 	xip_enable(map, chip, chip->start);
2021 
2022 	/* then ensure we don't keep OTP data in the cache */
2023 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2024 
2025 	put_chip(map, chip, chip->start);
2026 	spin_unlock(chip->mutex);
2027 	return 0;
2028 }
2029 
2030 static int
2031 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2032 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2033 {
2034 	int ret;
2035 
2036 	while (size) {
2037 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2038 		int gap = offset - bus_ofs;
2039 		int n = min_t(int, size, map_bankwidth(map)-gap);
2040 		map_word datum = map_word_ff(map);
2041 
2042 		datum = map_word_load_partial(map, datum, buf, gap, n);
2043 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2044 		if (ret)
2045 			return ret;
2046 
2047 		offset += n;
2048 		buf += n;
2049 		size -= n;
2050 	}
2051 
2052 	return 0;
2053 }
2054 
2055 static int
2056 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2057 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2058 {
2059 	struct cfi_private *cfi = map->fldrv_priv;
2060 	map_word datum;
2061 
2062 	/* make sure area matches group boundaries */
2063 	if (size != grpsz)
2064 		return -EXDEV;
2065 
2066 	datum = map_word_ff(map);
2067 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2068 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2069 }
2070 
2071 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2072 				 size_t *retlen, u_char *buf,
2073 				 otp_op_t action, int user_regs)
2074 {
2075 	struct map_info *map = mtd->priv;
2076 	struct cfi_private *cfi = map->fldrv_priv;
2077 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2078 	struct flchip *chip;
2079 	struct cfi_intelext_otpinfo *otp;
2080 	u_long devsize, reg_prot_offset, data_offset;
2081 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2082 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2083 	int ret;
2084 
2085 	*retlen = 0;
2086 
2087 	/* Check that we actually have some OTP registers */
2088 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2089 		return -ENODATA;
2090 
2091 	/* we need real chips here not virtual ones */
2092 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2093 	chip_step = devsize >> cfi->chipshift;
2094 	chip_num = 0;
2095 
2096 	/* Some chips have OTP located in the _top_ partition only.
2097 	   For example: Intel 28F256L18T (T means top-parameter device) */
2098 	if (cfi->mfr == MANUFACTURER_INTEL) {
2099 		switch (cfi->id) {
2100 		case 0x880b:
2101 		case 0x880c:
2102 		case 0x880d:
2103 			chip_num = chip_step - 1;
2104 		}
2105 	}
2106 
2107 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2108 		chip = &cfi->chips[chip_num];
2109 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2110 
2111 		/* first OTP region */
2112 		field = 0;
2113 		reg_prot_offset = extp->ProtRegAddr;
2114 		reg_fact_groups = 1;
2115 		reg_fact_size = 1 << extp->FactProtRegSize;
2116 		reg_user_groups = 1;
2117 		reg_user_size = 1 << extp->UserProtRegSize;
2118 
2119 		while (len > 0) {
2120 			/* flash geometry fixup */
2121 			data_offset = reg_prot_offset + 1;
2122 			data_offset *= cfi->interleave * cfi->device_type;
2123 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2124 			reg_fact_size *= cfi->interleave;
2125 			reg_user_size *= cfi->interleave;
2126 
2127 			if (user_regs) {
2128 				groups = reg_user_groups;
2129 				groupsize = reg_user_size;
2130 				/* skip over factory reg area */
2131 				groupno = reg_fact_groups;
2132 				data_offset += reg_fact_groups * reg_fact_size;
2133 			} else {
2134 				groups = reg_fact_groups;
2135 				groupsize = reg_fact_size;
2136 				groupno = 0;
2137 			}
2138 
2139 			while (len > 0 && groups > 0) {
2140 				if (!action) {
2141 					/*
2142 					 * Special case: if action is NULL
2143 					 * we fill buf with otp_info records.
2144 					 */
2145 					struct otp_info *otpinfo;
2146 					map_word lockword;
2147 					len -= sizeof(struct otp_info);
2148 					if (len <= 0)
2149 						return -ENOSPC;
2150 					ret = do_otp_read(map, chip,
2151 							  reg_prot_offset,
2152 							  (u_char *)&lockword,
2153 							  map_bankwidth(map),
2154 							  0, 0,  0);
2155 					if (ret)
2156 						return ret;
2157 					otpinfo = (struct otp_info *)buf;
2158 					otpinfo->start = from;
2159 					otpinfo->length = groupsize;
2160 					otpinfo->locked =
2161 					   !map_word_bitsset(map, lockword,
2162 							     CMD(1 << groupno));
2163 					from += groupsize;
2164 					buf += sizeof(*otpinfo);
2165 					*retlen += sizeof(*otpinfo);
2166 				} else if (from >= groupsize) {
2167 					from -= groupsize;
2168 					data_offset += groupsize;
2169 				} else {
2170 					int size = groupsize;
2171 					data_offset += from;
2172 					size -= from;
2173 					from = 0;
2174 					if (size > len)
2175 						size = len;
2176 					ret = action(map, chip, data_offset,
2177 						     buf, size, reg_prot_offset,
2178 						     groupno, groupsize);
2179 					if (ret < 0)
2180 						return ret;
2181 					buf += size;
2182 					len -= size;
2183 					*retlen += size;
2184 					data_offset += size;
2185 				}
2186 				groupno++;
2187 				groups--;
2188 			}
2189 
2190 			/* next OTP region */
2191 			if (++field == extp->NumProtectionFields)
2192 				break;
2193 			reg_prot_offset = otp->ProtRegAddr;
2194 			reg_fact_groups = otp->FactGroups;
2195 			reg_fact_size = 1 << otp->FactProtRegSize;
2196 			reg_user_groups = otp->UserGroups;
2197 			reg_user_size = 1 << otp->UserProtRegSize;
2198 			otp++;
2199 		}
2200 	}
2201 
2202 	return 0;
2203 }
2204 
2205 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2206 					   size_t len, size_t *retlen,
2207 					    u_char *buf)
2208 {
2209 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2210 				     buf, do_otp_read, 0);
2211 }
2212 
2213 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2214 					   size_t len, size_t *retlen,
2215 					    u_char *buf)
2216 {
2217 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2218 				     buf, do_otp_read, 1);
2219 }
2220 
2221 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2222 					    size_t len, size_t *retlen,
2223 					     u_char *buf)
2224 {
2225 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2226 				     buf, do_otp_write, 1);
2227 }
2228 
2229 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2230 					   loff_t from, size_t len)
2231 {
2232 	size_t retlen;
2233 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2234 				     NULL, do_otp_lock, 1);
2235 }
2236 
2237 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2238 					   struct otp_info *buf, size_t len)
2239 {
2240 	size_t retlen;
2241 	int ret;
2242 
2243 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2244 	return ret ? : retlen;
2245 }
2246 
2247 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2248 					   struct otp_info *buf, size_t len)
2249 {
2250 	size_t retlen;
2251 	int ret;
2252 
2253 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2254 	return ret ? : retlen;
2255 }
2256 
2257 #endif
2258 
2259 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2260 {
2261 	struct mtd_erase_region_info *region;
2262 	int block, status, i;
2263 	unsigned long adr;
2264 	size_t len;
2265 
2266 	for (i = 0; i < mtd->numeraseregions; i++) {
2267 		region = &mtd->eraseregions[i];
2268 		if (!region->lockmap)
2269 			continue;
2270 
2271 		for (block = 0; block < region->numblocks; block++){
2272 			len = region->erasesize;
2273 			adr = region->offset + block * len;
2274 
2275 			status = cfi_varsize_frob(mtd,
2276 					do_getlockstatus_oneblock, adr, len, NULL);
2277 			if (status)
2278 				set_bit(block, region->lockmap);
2279 			else
2280 				clear_bit(block, region->lockmap);
2281 		}
2282 	}
2283 }
2284 
2285 static int cfi_intelext_suspend(struct mtd_info *mtd)
2286 {
2287 	struct map_info *map = mtd->priv;
2288 	struct cfi_private *cfi = map->fldrv_priv;
2289 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2290 	int i;
2291 	struct flchip *chip;
2292 	int ret = 0;
2293 
2294 	if ((mtd->flags & MTD_STUPID_LOCK)
2295 	    && extp && (extp->FeatureSupport & (1 << 5)))
2296 		cfi_intelext_save_locks(mtd);
2297 
2298 	for (i=0; !ret && i<cfi->numchips; i++) {
2299 		chip = &cfi->chips[i];
2300 
2301 		spin_lock(chip->mutex);
2302 
2303 		switch (chip->state) {
2304 		case FL_READY:
2305 		case FL_STATUS:
2306 		case FL_CFI_QUERY:
2307 		case FL_JEDEC_QUERY:
2308 			if (chip->oldstate == FL_READY) {
2309 				/* place the chip in a known state before suspend */
2310 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2311 				chip->oldstate = chip->state;
2312 				chip->state = FL_PM_SUSPENDED;
2313 				/* No need to wake_up() on this state change -
2314 				 * as the whole point is that nobody can do anything
2315 				 * with the chip now anyway.
2316 				 */
2317 			} else {
2318 				/* There seems to be an operation pending. We must wait for it. */
2319 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2320 				ret = -EAGAIN;
2321 			}
2322 			break;
2323 		default:
2324 			/* Should we actually wait? Once upon a time these routines weren't
2325 			   allowed to. Or should we return -EAGAIN, because the upper layers
2326 			   ought to have already shut down anything which was using the device
2327 			   anyway? The latter for now. */
2328 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2329 			ret = -EAGAIN;
2330 		case FL_PM_SUSPENDED:
2331 			break;
2332 		}
2333 		spin_unlock(chip->mutex);
2334 	}
2335 
2336 	/* Unlock the chips again */
2337 
2338 	if (ret) {
2339 		for (i--; i >=0; i--) {
2340 			chip = &cfi->chips[i];
2341 
2342 			spin_lock(chip->mutex);
2343 
2344 			if (chip->state == FL_PM_SUSPENDED) {
2345 				/* No need to force it into a known state here,
2346 				   because we're returning failure, and it didn't
2347 				   get power cycled */
2348 				chip->state = chip->oldstate;
2349 				chip->oldstate = FL_READY;
2350 				wake_up(&chip->wq);
2351 			}
2352 			spin_unlock(chip->mutex);
2353 		}
2354 	}
2355 
2356 	return ret;
2357 }
2358 
2359 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2360 {
2361 	struct mtd_erase_region_info *region;
2362 	int block, i;
2363 	unsigned long adr;
2364 	size_t len;
2365 
2366 	for (i = 0; i < mtd->numeraseregions; i++) {
2367 		region = &mtd->eraseregions[i];
2368 		if (!region->lockmap)
2369 			continue;
2370 
2371 		for (block = 0; block < region->numblocks; block++) {
2372 			len = region->erasesize;
2373 			adr = region->offset + block * len;
2374 
2375 			if (!test_bit(block, region->lockmap))
2376 				cfi_intelext_unlock(mtd, adr, len);
2377 		}
2378 	}
2379 }
2380 
2381 static void cfi_intelext_resume(struct mtd_info *mtd)
2382 {
2383 	struct map_info *map = mtd->priv;
2384 	struct cfi_private *cfi = map->fldrv_priv;
2385 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2386 	int i;
2387 	struct flchip *chip;
2388 
2389 	for (i=0; i<cfi->numchips; i++) {
2390 
2391 		chip = &cfi->chips[i];
2392 
2393 		spin_lock(chip->mutex);
2394 
2395 		/* Go to known state. Chip may have been power cycled */
2396 		if (chip->state == FL_PM_SUSPENDED) {
2397 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2398 			chip->oldstate = chip->state = FL_READY;
2399 			wake_up(&chip->wq);
2400 		}
2401 
2402 		spin_unlock(chip->mutex);
2403 	}
2404 
2405 	if ((mtd->flags & MTD_STUPID_LOCK)
2406 	    && extp && (extp->FeatureSupport & (1 << 5)))
2407 		cfi_intelext_restore_locks(mtd);
2408 }
2409 
2410 static int cfi_intelext_reset(struct mtd_info *mtd)
2411 {
2412 	struct map_info *map = mtd->priv;
2413 	struct cfi_private *cfi = map->fldrv_priv;
2414 	int i, ret;
2415 
2416 	for (i=0; i < cfi->numchips; i++) {
2417 		struct flchip *chip = &cfi->chips[i];
2418 
2419 		/* force the completion of any ongoing operation
2420 		   and switch to array mode so any bootloader in
2421 		   flash is accessible for soft reboot. */
2422 		spin_lock(chip->mutex);
2423 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2424 		if (!ret) {
2425 			map_write(map, CMD(0xff), chip->start);
2426 			chip->state = FL_SHUTDOWN;
2427 		}
2428 		spin_unlock(chip->mutex);
2429 	}
2430 
2431 	return 0;
2432 }
2433 
2434 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2435 			       void *v)
2436 {
2437 	struct mtd_info *mtd;
2438 
2439 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2440 	cfi_intelext_reset(mtd);
2441 	return NOTIFY_DONE;
2442 }
2443 
2444 static void cfi_intelext_destroy(struct mtd_info *mtd)
2445 {
2446 	struct map_info *map = mtd->priv;
2447 	struct cfi_private *cfi = map->fldrv_priv;
2448 	struct mtd_erase_region_info *region;
2449 	int i;
2450 	cfi_intelext_reset(mtd);
2451 	unregister_reboot_notifier(&mtd->reboot_notifier);
2452 	kfree(cfi->cmdset_priv);
2453 	kfree(cfi->cfiq);
2454 	kfree(cfi->chips[0].priv);
2455 	kfree(cfi);
2456 	for (i = 0; i < mtd->numeraseregions; i++) {
2457 		region = &mtd->eraseregions[i];
2458 		if (region->lockmap)
2459 			kfree(region->lockmap);
2460 	}
2461 	kfree(mtd->eraseregions);
2462 }
2463 
2464 MODULE_LICENSE("GPL");
2465 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2466 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2467 MODULE_ALIAS("cfi_cmdset_0003");
2468 MODULE_ALIAS("cfi_cmdset_0200");
2469