xref: /linux/drivers/mtd/chips/cfi_cmdset_0001.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000	Nicolas Pitre <nico@cam.org>
11  * 	- completely revamped method functions so they are aware and
12  * 	  independent of the flash geometry (buswidth, interleave, etc.)
13  * 	- scalability vs code size is completely set at compile-time
14  * 	  (see include/linux/mtd/cfi.h for selection)
15  *	- optimized write buffer method
16  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *	- reworked lock/unlock/erase support for var size flash
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 #define MANUFACTURER_INTEL	0x0089
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50 
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 					    struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 					    struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72 
73 static void cfi_intelext_destroy(struct mtd_info *);
74 
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76 
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79 
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 		     size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 			size_t len);
84 
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88 
89 
90 
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94 
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 	.probe		= NULL, /* Not usable directly */
97 	.destroy	= cfi_intelext_destroy,
98 	.name		= "cfi_cmdset_0001",
99 	.module		= THIS_MODULE
100 };
101 
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104 
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108 	int i;
109 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 	for (i=11; i<32; i++) {
123 		if (extp->FeatureSupport & (1<<i))
124 			printk("     - Unknown Bit %X:      supported\n", i);
125 	}
126 
127 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 	for (i=1; i<8; i++) {
130 		if (extp->SuspendCmdSupport & (1<<i))
131 			printk("     - Unknown Bit %X:               supported\n", i);
132 	}
133 
134 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 	for (i=2; i<3; i++) {
138 		if (extp->BlkStatusRegMask & (1<<i))
139 			printk("     - Unknown Bit %X Active: yes\n",i);
140 	}
141 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 	for (i=6; i<16; i++) {
144 		if (extp->BlkStatusRegMask & (1<<i))
145 			printk("     - Unknown Bit %X Active: yes\n",i);
146 	}
147 
148 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 	if (extp->VppOptimal)
151 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155 
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160 	struct map_info *map = mtd->priv;
161 	struct cfi_private *cfi = map->fldrv_priv;
162 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163 
164 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 	                    "erase on write disabled.\n");
166 	extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169 
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173 	struct map_info *map = mtd->priv;
174 	struct cfi_private *cfi = map->fldrv_priv;
175 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176 
177 	if (cfip && (cfip->FeatureSupport&4)) {
178 		cfip->FeatureSupport &= ~4;
179 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 	}
181 }
182 #endif
183 
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186 	struct map_info *map = mtd->priv;
187 	struct cfi_private *cfi = map->fldrv_priv;
188 
189 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
190 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
191 }
192 
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195 	struct map_info *map = mtd->priv;
196 	struct cfi_private *cfi = map->fldrv_priv;
197 
198 	/* Note this is done after the region info is endian swapped */
199 	cfi->cfiq->EraseRegionInfo[1] =
200 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202 
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205 	struct map_info *map = mtd->priv;
206 	if (!mtd->point && map_is_linear(map)) {
207 		mtd->point   = cfi_intelext_point;
208 		mtd->unpoint = cfi_intelext_unpoint;
209 	}
210 }
211 
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214 	struct map_info *map = mtd->priv;
215 	struct cfi_private *cfi = map->fldrv_priv;
216 	if (cfi->cfiq->BufWriteTimeoutTyp) {
217 		printk(KERN_INFO "Using buffer write method\n" );
218 		mtd->write = cfi_intelext_write_buffers;
219 		mtd->writev = cfi_intelext_writev;
220 	}
221 }
222 
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 	{ 0, 0, NULL, NULL }
236 };
237 
238 static struct cfi_fixup jedec_fixup_table[] = {
239 	{ MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240 	{ MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241 	{ MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242 	{ 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245 	/* The CFI vendor ids and the JEDEC vendor IDs appear
246 	 * to be common.  It is like the devices id's are as
247 	 * well.  This table is to pick all cases where
248 	 * we know that is the case.
249 	 */
250 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 	{ 0, 0, NULL, NULL }
252 };
253 
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257 	struct cfi_pri_intelext *extp;
258 	unsigned int extp_size = sizeof(*extp);
259 
260  again:
261 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 	if (!extp)
263 		return NULL;
264 
265 	if (extp->MajorVersion != '1' ||
266 	    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268 		       "version %c.%c.\n",  extp->MajorVersion,
269 		       extp->MinorVersion);
270 		kfree(extp);
271 		return NULL;
272 	}
273 
274 	/* Do some byteswapping if necessary */
275 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278 
279 	if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280 		unsigned int extra_size = 0;
281 		int nb_parts, i;
282 
283 		/* Protection Register info */
284 		extra_size += (extp->NumProtectionFields - 1) *
285 			      sizeof(struct cfi_intelext_otpinfo);
286 
287 		/* Burst Read info */
288 		extra_size += 2;
289 		if (extp_size < sizeof(*extp) + extra_size)
290 			goto need_more;
291 		extra_size += extp->extra[extra_size-1];
292 
293 		/* Number of hardware-partitions */
294 		extra_size += 1;
295 		if (extp_size < sizeof(*extp) + extra_size)
296 			goto need_more;
297 		nb_parts = extp->extra[extra_size - 1];
298 
299 		/* skip the sizeof(partregion) field in CFI 1.4 */
300 		if (extp->MinorVersion >= '4')
301 			extra_size += 2;
302 
303 		for (i = 0; i < nb_parts; i++) {
304 			struct cfi_intelext_regioninfo *rinfo;
305 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 			extra_size += sizeof(*rinfo);
307 			if (extp_size < sizeof(*extp) + extra_size)
308 				goto need_more;
309 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 			extra_size += (rinfo->NumBlockTypes - 1)
311 				      * sizeof(struct cfi_intelext_blockinfo);
312 		}
313 
314 		if (extp->MinorVersion >= '4')
315 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316 
317 		if (extp_size < sizeof(*extp) + extra_size) {
318 			need_more:
319 			extp_size = sizeof(*extp) + extra_size;
320 			kfree(extp);
321 			if (extp_size > 4096) {
322 				printk(KERN_ERR
323 					"%s: cfi_pri_intelext is too fat\n",
324 					__FUNCTION__);
325 				return NULL;
326 			}
327 			goto again;
328 		}
329 	}
330 
331 	return extp;
332 }
333 
334 /* This routine is made available to other mtd code via
335  * inter_module_register.  It must only be accessed through
336  * inter_module_get which will bump the use count of this module.  The
337  * addresses passed back in cfi are valid as long as the use count of
338  * this module is non-zero, i.e. between inter_module_get and
339  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
340  */
341 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
342 {
343 	struct cfi_private *cfi = map->fldrv_priv;
344 	struct mtd_info *mtd;
345 	int i;
346 
347 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
348 	if (!mtd) {
349 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
350 		return NULL;
351 	}
352 	memset(mtd, 0, sizeof(*mtd));
353 	mtd->priv = map;
354 	mtd->type = MTD_NORFLASH;
355 
356 	/* Fill in the default mtd operations */
357 	mtd->erase   = cfi_intelext_erase_varsize;
358 	mtd->read    = cfi_intelext_read;
359 	mtd->write   = cfi_intelext_write_words;
360 	mtd->sync    = cfi_intelext_sync;
361 	mtd->lock    = cfi_intelext_lock;
362 	mtd->unlock  = cfi_intelext_unlock;
363 	mtd->suspend = cfi_intelext_suspend;
364 	mtd->resume  = cfi_intelext_resume;
365 	mtd->flags   = MTD_CAP_NORFLASH;
366 	mtd->name    = map->name;
367 
368 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
369 
370 	if (cfi->cfi_mode == CFI_MODE_CFI) {
371 		/*
372 		 * It's a real CFI chip, not one for which the probe
373 		 * routine faked a CFI structure. So we read the feature
374 		 * table from it.
375 		 */
376 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377 		struct cfi_pri_intelext *extp;
378 
379 		extp = read_pri_intelext(map, adr);
380 		if (!extp) {
381 			kfree(mtd);
382 			return NULL;
383 		}
384 
385 		/* Install our own private info structure */
386 		cfi->cmdset_priv = extp;
387 
388 		cfi_fixup(mtd, cfi_fixup_table);
389 
390 #ifdef DEBUG_CFI_FEATURES
391 		/* Tell the user about it in lots of lovely detail */
392 		cfi_tell_features(extp);
393 #endif
394 
395 		if(extp->SuspendCmdSupport & 1) {
396 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
397 		}
398 	}
399 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
400 		/* Apply jedec specific fixups */
401 		cfi_fixup(mtd, jedec_fixup_table);
402 	}
403 	/* Apply generic fixups */
404 	cfi_fixup(mtd, fixup_table);
405 
406 	for (i=0; i< cfi->numchips; i++) {
407 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
410 		cfi->chips[i].ref_point_counter = 0;
411 	}
412 
413 	map->fldrv = &cfi_intelext_chipdrv;
414 
415 	return cfi_intelext_setup(mtd);
416 }
417 
418 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
419 {
420 	struct map_info *map = mtd->priv;
421 	struct cfi_private *cfi = map->fldrv_priv;
422 	unsigned long offset = 0;
423 	int i,j;
424 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
425 
426 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
427 
428 	mtd->size = devsize * cfi->numchips;
429 
430 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432 			* mtd->numeraseregions, GFP_KERNEL);
433 	if (!mtd->eraseregions) {
434 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435 		goto setup_err;
436 	}
437 
438 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439 		unsigned long ernum, ersize;
440 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
442 
443 		if (mtd->erasesize < ersize) {
444 			mtd->erasesize = ersize;
445 		}
446 		for (j=0; j<cfi->numchips; j++) {
447 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
450 		}
451 		offset += (ersize * ernum);
452 	}
453 
454 	if (offset != devsize) {
455 		/* Argh */
456 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457 		goto setup_err;
458 	}
459 
460 	for (i=0; i<mtd->numeraseregions;i++){
461 		printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
462 		       i,mtd->eraseregions[i].offset,
463 		       mtd->eraseregions[i].erasesize,
464 		       mtd->eraseregions[i].numblocks);
465 	}
466 
467 #ifdef CONFIG_MTD_OTP
468 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
469 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
474 #endif
475 
476 	/* This function has the potential to distort the reality
477 	   a bit and therefore should be called last. */
478 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479 		goto setup_err;
480 
481 	__module_get(THIS_MODULE);
482 	register_reboot_notifier(&mtd->reboot_notifier);
483 	return mtd;
484 
485  setup_err:
486 	if(mtd) {
487 		kfree(mtd->eraseregions);
488 		kfree(mtd);
489 	}
490 	kfree(cfi->cmdset_priv);
491 	return NULL;
492 }
493 
494 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
495 					struct cfi_private **pcfi)
496 {
497 	struct map_info *map = mtd->priv;
498 	struct cfi_private *cfi = *pcfi;
499 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
500 
501 	/*
502 	 * Probing of multi-partition flash ships.
503 	 *
504 	 * To support multiple partitions when available, we simply arrange
505 	 * for each of them to have their own flchip structure even if they
506 	 * are on the same physical chip.  This means completely recreating
507 	 * a new cfi_private structure right here which is a blatent code
508 	 * layering violation, but this is still the least intrusive
509 	 * arrangement at this point. This can be rearranged in the future
510 	 * if someone feels motivated enough.  --nico
511 	 */
512 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
513 	    && extp->FeatureSupport & (1 << 9)) {
514 		struct cfi_private *newcfi;
515 		struct flchip *chip;
516 		struct flchip_shared *shared;
517 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
518 
519 		/* Protection Register info */
520 		offs = (extp->NumProtectionFields - 1) *
521 		       sizeof(struct cfi_intelext_otpinfo);
522 
523 		/* Burst Read info */
524 		offs += extp->extra[offs+1]+2;
525 
526 		/* Number of partition regions */
527 		numregions = extp->extra[offs];
528 		offs += 1;
529 
530 		/* skip the sizeof(partregion) field in CFI 1.4 */
531 		if (extp->MinorVersion >= '4')
532 			offs += 2;
533 
534 		/* Number of hardware partitions */
535 		numparts = 0;
536 		for (i = 0; i < numregions; i++) {
537 			struct cfi_intelext_regioninfo *rinfo;
538 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
539 			numparts += rinfo->NumIdentPartitions;
540 			offs += sizeof(*rinfo)
541 				+ (rinfo->NumBlockTypes - 1) *
542 				  sizeof(struct cfi_intelext_blockinfo);
543 		}
544 
545 		/* Programming Region info */
546 		if (extp->MinorVersion >= '4') {
547 			struct cfi_intelext_programming_regioninfo *prinfo;
548 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 			MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550 			MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 			MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 			mtd->flags |= MTD_PROGRAM_REGIONS;
553 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 			       map->name, MTD_PROGREGION_SIZE(mtd),
555 			       MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 			       MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557 		}
558 
559 		/*
560 		 * All functions below currently rely on all chips having
561 		 * the same geometry so we'll just assume that all hardware
562 		 * partitions are of the same size too.
563 		 */
564 		partshift = cfi->chipshift - __ffs(numparts);
565 
566 		if ((1 << partshift) < mtd->erasesize) {
567 			printk( KERN_ERR
568 				"%s: bad number of hw partitions (%d)\n",
569 				__FUNCTION__, numparts);
570 			return -EINVAL;
571 		}
572 
573 		numvirtchips = cfi->numchips * numparts;
574 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
575 		if (!newcfi)
576 			return -ENOMEM;
577 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
578 		if (!shared) {
579 			kfree(newcfi);
580 			return -ENOMEM;
581 		}
582 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
583 		newcfi->numchips = numvirtchips;
584 		newcfi->chipshift = partshift;
585 
586 		chip = &newcfi->chips[0];
587 		for (i = 0; i < cfi->numchips; i++) {
588 			shared[i].writing = shared[i].erasing = NULL;
589 			spin_lock_init(&shared[i].lock);
590 			for (j = 0; j < numparts; j++) {
591 				*chip = cfi->chips[i];
592 				chip->start += j << partshift;
593 				chip->priv = &shared[i];
594 				/* those should be reset too since
595 				   they create memory references. */
596 				init_waitqueue_head(&chip->wq);
597 				spin_lock_init(&chip->_spinlock);
598 				chip->mutex = &chip->_spinlock;
599 				chip++;
600 			}
601 		}
602 
603 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
604 				  "--> %d partitions of %d KiB\n",
605 				  map->name, cfi->numchips, cfi->interleave,
606 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
607 
608 		map->fldrv_priv = newcfi;
609 		*pcfi = newcfi;
610 		kfree(cfi);
611 	}
612 
613 	return 0;
614 }
615 
616 /*
617  *  *********** CHIP ACCESS FUNCTIONS ***********
618  */
619 
620 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
621 {
622 	DECLARE_WAITQUEUE(wait, current);
623 	struct cfi_private *cfi = map->fldrv_priv;
624 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
625 	unsigned long timeo;
626 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
627 
628  resettime:
629 	timeo = jiffies + HZ;
630  retry:
631 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
632 		/*
633 		 * OK. We have possibility for contension on the write/erase
634 		 * operations which are global to the real chip and not per
635 		 * partition.  So let's fight it over in the partition which
636 		 * currently has authority on the operation.
637 		 *
638 		 * The rules are as follows:
639 		 *
640 		 * - any write operation must own shared->writing.
641 		 *
642 		 * - any erase operation must own _both_ shared->writing and
643 		 *   shared->erasing.
644 		 *
645 		 * - contension arbitration is handled in the owner's context.
646 		 *
647 		 * The 'shared' struct can be read and/or written only when
648 		 * its lock is taken.
649 		 */
650 		struct flchip_shared *shared = chip->priv;
651 		struct flchip *contender;
652 		spin_lock(&shared->lock);
653 		contender = shared->writing;
654 		if (contender && contender != chip) {
655 			/*
656 			 * The engine to perform desired operation on this
657 			 * partition is already in use by someone else.
658 			 * Let's fight over it in the context of the chip
659 			 * currently using it.  If it is possible to suspend,
660 			 * that other partition will do just that, otherwise
661 			 * it'll happily send us to sleep.  In any case, when
662 			 * get_chip returns success we're clear to go ahead.
663 			 */
664 			int ret = spin_trylock(contender->mutex);
665 			spin_unlock(&shared->lock);
666 			if (!ret)
667 				goto retry;
668 			spin_unlock(chip->mutex);
669 			ret = get_chip(map, contender, contender->start, mode);
670 			spin_lock(chip->mutex);
671 			if (ret) {
672 				spin_unlock(contender->mutex);
673 				return ret;
674 			}
675 			timeo = jiffies + HZ;
676 			spin_lock(&shared->lock);
677 			spin_unlock(contender->mutex);
678 		}
679 
680 		/* We now own it */
681 		shared->writing = chip;
682 		if (mode == FL_ERASING)
683 			shared->erasing = chip;
684 		spin_unlock(&shared->lock);
685 	}
686 
687 	switch (chip->state) {
688 
689 	case FL_STATUS:
690 		for (;;) {
691 			status = map_read(map, adr);
692 			if (map_word_andequal(map, status, status_OK, status_OK))
693 				break;
694 
695 			/* At this point we're fine with write operations
696 			   in other partitions as they don't conflict. */
697 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
698 				break;
699 
700 			if (time_after(jiffies, timeo)) {
701 				printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
702 				       map->name, status.x[0]);
703 				return -EIO;
704 			}
705 			spin_unlock(chip->mutex);
706 			cfi_udelay(1);
707 			spin_lock(chip->mutex);
708 			/* Someone else might have been playing with it. */
709 			goto retry;
710 		}
711 
712 	case FL_READY:
713 	case FL_CFI_QUERY:
714 	case FL_JEDEC_QUERY:
715 		return 0;
716 
717 	case FL_ERASING:
718 		if (!cfip ||
719 		    !(cfip->FeatureSupport & 2) ||
720 		    !(mode == FL_READY || mode == FL_POINT ||
721 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
722 			goto sleep;
723 
724 
725 		/* Erase suspend */
726 		map_write(map, CMD(0xB0), adr);
727 
728 		/* If the flash has finished erasing, then 'erase suspend'
729 		 * appears to make some (28F320) flash devices switch to
730 		 * 'read' mode.  Make sure that we switch to 'read status'
731 		 * mode so we get the right data. --rmk
732 		 */
733 		map_write(map, CMD(0x70), adr);
734 		chip->oldstate = FL_ERASING;
735 		chip->state = FL_ERASE_SUSPENDING;
736 		chip->erase_suspended = 1;
737 		for (;;) {
738 			status = map_read(map, adr);
739 			if (map_word_andequal(map, status, status_OK, status_OK))
740 			        break;
741 
742 			if (time_after(jiffies, timeo)) {
743 				/* Urgh. Resume and pretend we weren't here.  */
744 				map_write(map, CMD(0xd0), adr);
745 				/* Make sure we're in 'read status' mode if it had finished */
746 				map_write(map, CMD(0x70), adr);
747 				chip->state = FL_ERASING;
748 				chip->oldstate = FL_READY;
749 				printk(KERN_ERR "%s: Chip not ready after erase "
750 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
751 				return -EIO;
752 			}
753 
754 			spin_unlock(chip->mutex);
755 			cfi_udelay(1);
756 			spin_lock(chip->mutex);
757 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
758 			   So we can just loop here. */
759 		}
760 		chip->state = FL_STATUS;
761 		return 0;
762 
763 	case FL_XIP_WHILE_ERASING:
764 		if (mode != FL_READY && mode != FL_POINT &&
765 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
766 			goto sleep;
767 		chip->oldstate = chip->state;
768 		chip->state = FL_READY;
769 		return 0;
770 
771 	case FL_POINT:
772 		/* Only if there's no operation suspended... */
773 		if (mode == FL_READY && chip->oldstate == FL_READY)
774 			return 0;
775 
776 	default:
777 	sleep:
778 		set_current_state(TASK_UNINTERRUPTIBLE);
779 		add_wait_queue(&chip->wq, &wait);
780 		spin_unlock(chip->mutex);
781 		schedule();
782 		remove_wait_queue(&chip->wq, &wait);
783 		spin_lock(chip->mutex);
784 		goto resettime;
785 	}
786 }
787 
788 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
789 {
790 	struct cfi_private *cfi = map->fldrv_priv;
791 
792 	if (chip->priv) {
793 		struct flchip_shared *shared = chip->priv;
794 		spin_lock(&shared->lock);
795 		if (shared->writing == chip && chip->oldstate == FL_READY) {
796 			/* We own the ability to write, but we're done */
797 			shared->writing = shared->erasing;
798 			if (shared->writing && shared->writing != chip) {
799 				/* give back ownership to who we loaned it from */
800 				struct flchip *loaner = shared->writing;
801 				spin_lock(loaner->mutex);
802 				spin_unlock(&shared->lock);
803 				spin_unlock(chip->mutex);
804 				put_chip(map, loaner, loaner->start);
805 				spin_lock(chip->mutex);
806 				spin_unlock(loaner->mutex);
807 				wake_up(&chip->wq);
808 				return;
809 			}
810 			shared->erasing = NULL;
811 			shared->writing = NULL;
812 		} else if (shared->erasing == chip && shared->writing != chip) {
813 			/*
814 			 * We own the ability to erase without the ability
815 			 * to write, which means the erase was suspended
816 			 * and some other partition is currently writing.
817 			 * Don't let the switch below mess things up since
818 			 * we don't have ownership to resume anything.
819 			 */
820 			spin_unlock(&shared->lock);
821 			wake_up(&chip->wq);
822 			return;
823 		}
824 		spin_unlock(&shared->lock);
825 	}
826 
827 	switch(chip->oldstate) {
828 	case FL_ERASING:
829 		chip->state = chip->oldstate;
830 		/* What if one interleaved chip has finished and the
831 		   other hasn't? The old code would leave the finished
832 		   one in READY mode. That's bad, and caused -EROFS
833 		   errors to be returned from do_erase_oneblock because
834 		   that's the only bit it checked for at the time.
835 		   As the state machine appears to explicitly allow
836 		   sending the 0x70 (Read Status) command to an erasing
837 		   chip and expecting it to be ignored, that's what we
838 		   do. */
839 		map_write(map, CMD(0xd0), adr);
840 		map_write(map, CMD(0x70), adr);
841 		chip->oldstate = FL_READY;
842 		chip->state = FL_ERASING;
843 		break;
844 
845 	case FL_XIP_WHILE_ERASING:
846 		chip->state = chip->oldstate;
847 		chip->oldstate = FL_READY;
848 		break;
849 
850 	case FL_READY:
851 	case FL_STATUS:
852 	case FL_JEDEC_QUERY:
853 		/* We should really make set_vpp() count, rather than doing this */
854 		DISABLE_VPP(map);
855 		break;
856 	default:
857 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
858 	}
859 	wake_up(&chip->wq);
860 }
861 
862 #ifdef CONFIG_MTD_XIP
863 
864 /*
865  * No interrupt what so ever can be serviced while the flash isn't in array
866  * mode.  This is ensured by the xip_disable() and xip_enable() functions
867  * enclosing any code path where the flash is known not to be in array mode.
868  * And within a XIP disabled code path, only functions marked with __xipram
869  * may be called and nothing else (it's a good thing to inspect generated
870  * assembly to make sure inline functions were actually inlined and that gcc
871  * didn't emit calls to its own support functions). Also configuring MTD CFI
872  * support to a single buswidth and a single interleave is also recommended.
873  */
874 
875 static void xip_disable(struct map_info *map, struct flchip *chip,
876 			unsigned long adr)
877 {
878 	/* TODO: chips with no XIP use should ignore and return */
879 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
880 	local_irq_disable();
881 }
882 
883 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
884 				unsigned long adr)
885 {
886 	struct cfi_private *cfi = map->fldrv_priv;
887 	if (chip->state != FL_POINT && chip->state != FL_READY) {
888 		map_write(map, CMD(0xff), adr);
889 		chip->state = FL_READY;
890 	}
891 	(void) map_read(map, adr);
892 	xip_iprefetch();
893 	local_irq_enable();
894 }
895 
896 /*
897  * When a delay is required for the flash operation to complete, the
898  * xip_udelay() function is polling for both the given timeout and pending
899  * (but still masked) hardware interrupts.  Whenever there is an interrupt
900  * pending then the flash erase or write operation is suspended, array mode
901  * restored and interrupts unmasked.  Task scheduling might also happen at that
902  * point.  The CPU eventually returns from the interrupt or the call to
903  * schedule() and the suspended flash operation is resumed for the remaining
904  * of the delay period.
905  *
906  * Warning: this function _will_ fool interrupt latency tracing tools.
907  */
908 
909 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
910 				unsigned long adr, int usec)
911 {
912 	struct cfi_private *cfi = map->fldrv_priv;
913 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
914 	map_word status, OK = CMD(0x80);
915 	unsigned long suspended, start = xip_currtime();
916 	flstate_t oldstate, newstate;
917 
918 	do {
919 		cpu_relax();
920 		if (xip_irqpending() && cfip &&
921 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
922 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
923 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
924 			/*
925 			 * Let's suspend the erase or write operation when
926 			 * supported.  Note that we currently don't try to
927 			 * suspend interleaved chips if there is already
928 			 * another operation suspended (imagine what happens
929 			 * when one chip was already done with the current
930 			 * operation while another chip suspended it, then
931 			 * we resume the whole thing at once).  Yes, it
932 			 * can happen!
933 			 */
934 			map_write(map, CMD(0xb0), adr);
935 			map_write(map, CMD(0x70), adr);
936 			usec -= xip_elapsed_since(start);
937 			suspended = xip_currtime();
938 			do {
939 				if (xip_elapsed_since(suspended) > 100000) {
940 					/*
941 					 * The chip doesn't want to suspend
942 					 * after waiting for 100 msecs.
943 					 * This is a critical error but there
944 					 * is not much we can do here.
945 					 */
946 					return;
947 				}
948 				status = map_read(map, adr);
949 			} while (!map_word_andequal(map, status, OK, OK));
950 
951 			/* Suspend succeeded */
952 			oldstate = chip->state;
953 			if (oldstate == FL_ERASING) {
954 				if (!map_word_bitsset(map, status, CMD(0x40)))
955 					break;
956 				newstate = FL_XIP_WHILE_ERASING;
957 				chip->erase_suspended = 1;
958 			} else {
959 				if (!map_word_bitsset(map, status, CMD(0x04)))
960 					break;
961 				newstate = FL_XIP_WHILE_WRITING;
962 				chip->write_suspended = 1;
963 			}
964 			chip->state = newstate;
965 			map_write(map, CMD(0xff), adr);
966 			(void) map_read(map, adr);
967 			asm volatile (".rep 8; nop; .endr");
968 			local_irq_enable();
969 			spin_unlock(chip->mutex);
970 			asm volatile (".rep 8; nop; .endr");
971 			cond_resched();
972 
973 			/*
974 			 * We're back.  However someone else might have
975 			 * decided to go write to the chip if we are in
976 			 * a suspended erase state.  If so let's wait
977 			 * until it's done.
978 			 */
979 			spin_lock(chip->mutex);
980 			while (chip->state != newstate) {
981 				DECLARE_WAITQUEUE(wait, current);
982 				set_current_state(TASK_UNINTERRUPTIBLE);
983 				add_wait_queue(&chip->wq, &wait);
984 				spin_unlock(chip->mutex);
985 				schedule();
986 				remove_wait_queue(&chip->wq, &wait);
987 				spin_lock(chip->mutex);
988 			}
989 			/* Disallow XIP again */
990 			local_irq_disable();
991 
992 			/* Resume the write or erase operation */
993 			map_write(map, CMD(0xd0), adr);
994 			map_write(map, CMD(0x70), adr);
995 			chip->state = oldstate;
996 			start = xip_currtime();
997 		} else if (usec >= 1000000/HZ) {
998 			/*
999 			 * Try to save on CPU power when waiting delay
1000 			 * is at least a system timer tick period.
1001 			 * No need to be extremely accurate here.
1002 			 */
1003 			xip_cpu_idle();
1004 		}
1005 		status = map_read(map, adr);
1006 	} while (!map_word_andequal(map, status, OK, OK)
1007 		 && xip_elapsed_since(start) < usec);
1008 }
1009 
1010 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1011 
1012 /*
1013  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1014  * the flash is actively programming or erasing since we have to poll for
1015  * the operation to complete anyway.  We can't do that in a generic way with
1016  * a XIP setup so do it before the actual flash operation in this case
1017  * and stub it out from INVALIDATE_CACHE_UDELAY.
1018  */
1019 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1020 	INVALIDATE_CACHED_RANGE(map, from, size)
1021 
1022 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1023 	UDELAY(map, chip, adr, usec)
1024 
1025 /*
1026  * Extra notes:
1027  *
1028  * Activating this XIP support changes the way the code works a bit.  For
1029  * example the code to suspend the current process when concurrent access
1030  * happens is never executed because xip_udelay() will always return with the
1031  * same chip state as it was entered with.  This is why there is no care for
1032  * the presence of add_wait_queue() or schedule() calls from within a couple
1033  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1034  * The queueing and scheduling are always happening within xip_udelay().
1035  *
1036  * Similarly, get_chip() and put_chip() just happen to always be executed
1037  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1038  * is in array mode, therefore never executing many cases therein and not
1039  * causing any problem with XIP.
1040  */
1041 
1042 #else
1043 
1044 #define xip_disable(map, chip, adr)
1045 #define xip_enable(map, chip, adr)
1046 #define XIP_INVAL_CACHED_RANGE(x...)
1047 
1048 #define UDELAY(map, chip, adr, usec)  \
1049 do {  \
1050 	spin_unlock(chip->mutex);  \
1051 	cfi_udelay(usec);  \
1052 	spin_lock(chip->mutex);  \
1053 } while (0)
1054 
1055 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1056 do {  \
1057 	spin_unlock(chip->mutex);  \
1058 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1059 	cfi_udelay(usec);  \
1060 	spin_lock(chip->mutex);  \
1061 } while (0)
1062 
1063 #endif
1064 
1065 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1066 {
1067 	unsigned long cmd_addr;
1068 	struct cfi_private *cfi = map->fldrv_priv;
1069 	int ret = 0;
1070 
1071 	adr += chip->start;
1072 
1073 	/* Ensure cmd read/writes are aligned. */
1074 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1075 
1076 	spin_lock(chip->mutex);
1077 
1078 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1079 
1080 	if (!ret) {
1081 		if (chip->state != FL_POINT && chip->state != FL_READY)
1082 			map_write(map, CMD(0xff), cmd_addr);
1083 
1084 		chip->state = FL_POINT;
1085 		chip->ref_point_counter++;
1086 	}
1087 	spin_unlock(chip->mutex);
1088 
1089 	return ret;
1090 }
1091 
1092 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1093 {
1094 	struct map_info *map = mtd->priv;
1095 	struct cfi_private *cfi = map->fldrv_priv;
1096 	unsigned long ofs;
1097 	int chipnum;
1098 	int ret = 0;
1099 
1100 	if (!map->virt || (from + len > mtd->size))
1101 		return -EINVAL;
1102 
1103 	*mtdbuf = (void *)map->virt + from;
1104 	*retlen = 0;
1105 
1106 	/* Now lock the chip(s) to POINT state */
1107 
1108 	/* ofs: offset within the first chip that the first read should start */
1109 	chipnum = (from >> cfi->chipshift);
1110 	ofs = from - (chipnum << cfi->chipshift);
1111 
1112 	while (len) {
1113 		unsigned long thislen;
1114 
1115 		if (chipnum >= cfi->numchips)
1116 			break;
1117 
1118 		if ((len + ofs -1) >> cfi->chipshift)
1119 			thislen = (1<<cfi->chipshift) - ofs;
1120 		else
1121 			thislen = len;
1122 
1123 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1124 		if (ret)
1125 			break;
1126 
1127 		*retlen += thislen;
1128 		len -= thislen;
1129 
1130 		ofs = 0;
1131 		chipnum++;
1132 	}
1133 	return 0;
1134 }
1135 
1136 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1137 {
1138 	struct map_info *map = mtd->priv;
1139 	struct cfi_private *cfi = map->fldrv_priv;
1140 	unsigned long ofs;
1141 	int chipnum;
1142 
1143 	/* Now unlock the chip(s) POINT state */
1144 
1145 	/* ofs: offset within the first chip that the first read should start */
1146 	chipnum = (from >> cfi->chipshift);
1147 	ofs = from - (chipnum <<  cfi->chipshift);
1148 
1149 	while (len) {
1150 		unsigned long thislen;
1151 		struct flchip *chip;
1152 
1153 		chip = &cfi->chips[chipnum];
1154 		if (chipnum >= cfi->numchips)
1155 			break;
1156 
1157 		if ((len + ofs -1) >> cfi->chipshift)
1158 			thislen = (1<<cfi->chipshift) - ofs;
1159 		else
1160 			thislen = len;
1161 
1162 		spin_lock(chip->mutex);
1163 		if (chip->state == FL_POINT) {
1164 			chip->ref_point_counter--;
1165 			if(chip->ref_point_counter == 0)
1166 				chip->state = FL_READY;
1167 		} else
1168 			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1169 
1170 		put_chip(map, chip, chip->start);
1171 		spin_unlock(chip->mutex);
1172 
1173 		len -= thislen;
1174 		ofs = 0;
1175 		chipnum++;
1176 	}
1177 }
1178 
1179 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1180 {
1181 	unsigned long cmd_addr;
1182 	struct cfi_private *cfi = map->fldrv_priv;
1183 	int ret;
1184 
1185 	adr += chip->start;
1186 
1187 	/* Ensure cmd read/writes are aligned. */
1188 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1189 
1190 	spin_lock(chip->mutex);
1191 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1192 	if (ret) {
1193 		spin_unlock(chip->mutex);
1194 		return ret;
1195 	}
1196 
1197 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1198 		map_write(map, CMD(0xff), cmd_addr);
1199 
1200 		chip->state = FL_READY;
1201 	}
1202 
1203 	map_copy_from(map, buf, adr, len);
1204 
1205 	put_chip(map, chip, cmd_addr);
1206 
1207 	spin_unlock(chip->mutex);
1208 	return 0;
1209 }
1210 
1211 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1212 {
1213 	struct map_info *map = mtd->priv;
1214 	struct cfi_private *cfi = map->fldrv_priv;
1215 	unsigned long ofs;
1216 	int chipnum;
1217 	int ret = 0;
1218 
1219 	/* ofs: offset within the first chip that the first read should start */
1220 	chipnum = (from >> cfi->chipshift);
1221 	ofs = from - (chipnum <<  cfi->chipshift);
1222 
1223 	*retlen = 0;
1224 
1225 	while (len) {
1226 		unsigned long thislen;
1227 
1228 		if (chipnum >= cfi->numchips)
1229 			break;
1230 
1231 		if ((len + ofs -1) >> cfi->chipshift)
1232 			thislen = (1<<cfi->chipshift) - ofs;
1233 		else
1234 			thislen = len;
1235 
1236 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1237 		if (ret)
1238 			break;
1239 
1240 		*retlen += thislen;
1241 		len -= thislen;
1242 		buf += thislen;
1243 
1244 		ofs = 0;
1245 		chipnum++;
1246 	}
1247 	return ret;
1248 }
1249 
1250 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1251 				     unsigned long adr, map_word datum, int mode)
1252 {
1253 	struct cfi_private *cfi = map->fldrv_priv;
1254 	map_word status, status_OK, write_cmd;
1255 	unsigned long timeo;
1256 	int z, ret=0;
1257 
1258 	adr += chip->start;
1259 
1260 	/* Let's determine those according to the interleave only once */
1261 	status_OK = CMD(0x80);
1262 	switch (mode) {
1263 	case FL_WRITING:
1264 		write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1265 		break;
1266 	case FL_OTP_WRITE:
1267 		write_cmd = CMD(0xc0);
1268 		break;
1269 	default:
1270 		return -EINVAL;
1271 	}
1272 
1273 	spin_lock(chip->mutex);
1274 	ret = get_chip(map, chip, adr, mode);
1275 	if (ret) {
1276 		spin_unlock(chip->mutex);
1277 		return ret;
1278 	}
1279 
1280 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1281 	ENABLE_VPP(map);
1282 	xip_disable(map, chip, adr);
1283 	map_write(map, write_cmd, adr);
1284 	map_write(map, datum, adr);
1285 	chip->state = mode;
1286 
1287 	INVALIDATE_CACHE_UDELAY(map, chip,
1288 				adr, map_bankwidth(map),
1289 				chip->word_write_time);
1290 
1291 	timeo = jiffies + (HZ/2);
1292 	z = 0;
1293 	for (;;) {
1294 		if (chip->state != mode) {
1295 			/* Someone's suspended the write. Sleep */
1296 			DECLARE_WAITQUEUE(wait, current);
1297 
1298 			set_current_state(TASK_UNINTERRUPTIBLE);
1299 			add_wait_queue(&chip->wq, &wait);
1300 			spin_unlock(chip->mutex);
1301 			schedule();
1302 			remove_wait_queue(&chip->wq, &wait);
1303 			timeo = jiffies + (HZ / 2); /* FIXME */
1304 			spin_lock(chip->mutex);
1305 			continue;
1306 		}
1307 
1308 		status = map_read(map, adr);
1309 		if (map_word_andequal(map, status, status_OK, status_OK))
1310 			break;
1311 
1312 		/* OK Still waiting */
1313 		if (time_after(jiffies, timeo)) {
1314 			map_write(map, CMD(0x70), adr);
1315 			chip->state = FL_STATUS;
1316 			xip_enable(map, chip, adr);
1317 			printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1318 			ret = -EIO;
1319 			goto out;
1320 		}
1321 
1322 		/* Latency issues. Drop the lock, wait a while and retry */
1323 		z++;
1324 		UDELAY(map, chip, adr, 1);
1325 	}
1326 	if (!z) {
1327 		chip->word_write_time--;
1328 		if (!chip->word_write_time)
1329 			chip->word_write_time = 1;
1330 	}
1331 	if (z > 1)
1332 		chip->word_write_time++;
1333 
1334 	/* Done and happy. */
1335 	chip->state = FL_STATUS;
1336 
1337 	/* check for errors */
1338 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1339 		unsigned long chipstatus = MERGESTATUS(status);
1340 
1341 		/* reset status */
1342 		map_write(map, CMD(0x50), adr);
1343 		map_write(map, CMD(0x70), adr);
1344 		xip_enable(map, chip, adr);
1345 
1346 		if (chipstatus & 0x02) {
1347 			ret = -EROFS;
1348 		} else if (chipstatus & 0x08) {
1349 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1350 			ret = -EIO;
1351 		} else {
1352 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1353 			ret = -EINVAL;
1354 		}
1355 
1356 		goto out;
1357 	}
1358 
1359 	xip_enable(map, chip, adr);
1360  out:	put_chip(map, chip, adr);
1361 	spin_unlock(chip->mutex);
1362 	return ret;
1363 }
1364 
1365 
1366 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1367 {
1368 	struct map_info *map = mtd->priv;
1369 	struct cfi_private *cfi = map->fldrv_priv;
1370 	int ret = 0;
1371 	int chipnum;
1372 	unsigned long ofs;
1373 
1374 	*retlen = 0;
1375 	if (!len)
1376 		return 0;
1377 
1378 	chipnum = to >> cfi->chipshift;
1379 	ofs = to  - (chipnum << cfi->chipshift);
1380 
1381 	/* If it's not bus-aligned, do the first byte write */
1382 	if (ofs & (map_bankwidth(map)-1)) {
1383 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1384 		int gap = ofs - bus_ofs;
1385 		int n;
1386 		map_word datum;
1387 
1388 		n = min_t(int, len, map_bankwidth(map)-gap);
1389 		datum = map_word_ff(map);
1390 		datum = map_word_load_partial(map, datum, buf, gap, n);
1391 
1392 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1393 					       bus_ofs, datum, FL_WRITING);
1394 		if (ret)
1395 			return ret;
1396 
1397 		len -= n;
1398 		ofs += n;
1399 		buf += n;
1400 		(*retlen) += n;
1401 
1402 		if (ofs >> cfi->chipshift) {
1403 			chipnum ++;
1404 			ofs = 0;
1405 			if (chipnum == cfi->numchips)
1406 				return 0;
1407 		}
1408 	}
1409 
1410 	while(len >= map_bankwidth(map)) {
1411 		map_word datum = map_word_load(map, buf);
1412 
1413 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1414 				       ofs, datum, FL_WRITING);
1415 		if (ret)
1416 			return ret;
1417 
1418 		ofs += map_bankwidth(map);
1419 		buf += map_bankwidth(map);
1420 		(*retlen) += map_bankwidth(map);
1421 		len -= map_bankwidth(map);
1422 
1423 		if (ofs >> cfi->chipshift) {
1424 			chipnum ++;
1425 			ofs = 0;
1426 			if (chipnum == cfi->numchips)
1427 				return 0;
1428 		}
1429 	}
1430 
1431 	if (len & (map_bankwidth(map)-1)) {
1432 		map_word datum;
1433 
1434 		datum = map_word_ff(map);
1435 		datum = map_word_load_partial(map, datum, buf, 0, len);
1436 
1437 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1438 				       ofs, datum, FL_WRITING);
1439 		if (ret)
1440 			return ret;
1441 
1442 		(*retlen) += len;
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 
1449 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1450 				    unsigned long adr, const struct kvec **pvec,
1451 				    unsigned long *pvec_seek, int len)
1452 {
1453 	struct cfi_private *cfi = map->fldrv_priv;
1454 	map_word status, status_OK, write_cmd, datum;
1455 	unsigned long cmd_adr, timeo;
1456 	int wbufsize, z, ret=0, word_gap, words;
1457 	const struct kvec *vec;
1458 	unsigned long vec_seek;
1459 
1460 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1461 	adr += chip->start;
1462 	cmd_adr = adr & ~(wbufsize-1);
1463 
1464 	/* Let's determine this according to the interleave only once */
1465 	status_OK = CMD(0x80);
1466 	write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1467 
1468 	spin_lock(chip->mutex);
1469 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1470 	if (ret) {
1471 		spin_unlock(chip->mutex);
1472 		return ret;
1473 	}
1474 
1475 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1476 	ENABLE_VPP(map);
1477 	xip_disable(map, chip, cmd_adr);
1478 
1479 	/* �4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1480 	   [...], the device will not accept any more Write to Buffer commands".
1481 	   So we must check here and reset those bits if they're set. Otherwise
1482 	   we're just pissing in the wind */
1483 	if (chip->state != FL_STATUS)
1484 		map_write(map, CMD(0x70), cmd_adr);
1485 	status = map_read(map, cmd_adr);
1486 	if (map_word_bitsset(map, status, CMD(0x30))) {
1487 		xip_enable(map, chip, cmd_adr);
1488 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1489 		xip_disable(map, chip, cmd_adr);
1490 		map_write(map, CMD(0x50), cmd_adr);
1491 		map_write(map, CMD(0x70), cmd_adr);
1492 	}
1493 
1494 	chip->state = FL_WRITING_TO_BUFFER;
1495 
1496 	z = 0;
1497 	for (;;) {
1498 		map_write(map, write_cmd, cmd_adr);
1499 
1500 		status = map_read(map, cmd_adr);
1501 		if (map_word_andequal(map, status, status_OK, status_OK))
1502 			break;
1503 
1504 		UDELAY(map, chip, cmd_adr, 1);
1505 
1506 		if (++z > 20) {
1507 			/* Argh. Not ready for write to buffer */
1508 			map_word Xstatus;
1509 			map_write(map, CMD(0x70), cmd_adr);
1510 			chip->state = FL_STATUS;
1511 			Xstatus = map_read(map, cmd_adr);
1512 			/* Odd. Clear status bits */
1513 			map_write(map, CMD(0x50), cmd_adr);
1514 			map_write(map, CMD(0x70), cmd_adr);
1515 			xip_enable(map, chip, cmd_adr);
1516 			printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1517 			       map->name, status.x[0], Xstatus.x[0]);
1518 			ret = -EIO;
1519 			goto out;
1520 		}
1521 	}
1522 
1523 	/* Figure out the number of words to write */
1524 	word_gap = (-adr & (map_bankwidth(map)-1));
1525 	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1526 	if (!word_gap) {
1527 		words--;
1528 	} else {
1529 		word_gap = map_bankwidth(map) - word_gap;
1530 		adr -= word_gap;
1531 		datum = map_word_ff(map);
1532 	}
1533 
1534 	/* Write length of data to come */
1535 	map_write(map, CMD(words), cmd_adr );
1536 
1537 	/* Write data */
1538 	vec = *pvec;
1539 	vec_seek = *pvec_seek;
1540 	do {
1541 		int n = map_bankwidth(map) - word_gap;
1542 		if (n > vec->iov_len - vec_seek)
1543 			n = vec->iov_len - vec_seek;
1544 		if (n > len)
1545 			n = len;
1546 
1547 		if (!word_gap && len < map_bankwidth(map))
1548 			datum = map_word_ff(map);
1549 
1550 		datum = map_word_load_partial(map, datum,
1551 					      vec->iov_base + vec_seek,
1552 					      word_gap, n);
1553 
1554 		len -= n;
1555 		word_gap += n;
1556 		if (!len || word_gap == map_bankwidth(map)) {
1557 			map_write(map, datum, adr);
1558 			adr += map_bankwidth(map);
1559 			word_gap = 0;
1560 		}
1561 
1562 		vec_seek += n;
1563 		if (vec_seek == vec->iov_len) {
1564 			vec++;
1565 			vec_seek = 0;
1566 		}
1567 	} while (len);
1568 	*pvec = vec;
1569 	*pvec_seek = vec_seek;
1570 
1571 	/* GO GO GO */
1572 	map_write(map, CMD(0xd0), cmd_adr);
1573 	chip->state = FL_WRITING;
1574 
1575 	INVALIDATE_CACHE_UDELAY(map, chip,
1576 				cmd_adr, len,
1577 				chip->buffer_write_time);
1578 
1579 	timeo = jiffies + (HZ/2);
1580 	z = 0;
1581 	for (;;) {
1582 		if (chip->state != FL_WRITING) {
1583 			/* Someone's suspended the write. Sleep */
1584 			DECLARE_WAITQUEUE(wait, current);
1585 			set_current_state(TASK_UNINTERRUPTIBLE);
1586 			add_wait_queue(&chip->wq, &wait);
1587 			spin_unlock(chip->mutex);
1588 			schedule();
1589 			remove_wait_queue(&chip->wq, &wait);
1590 			timeo = jiffies + (HZ / 2); /* FIXME */
1591 			spin_lock(chip->mutex);
1592 			continue;
1593 		}
1594 
1595 		status = map_read(map, cmd_adr);
1596 		if (map_word_andequal(map, status, status_OK, status_OK))
1597 			break;
1598 
1599 		/* OK Still waiting */
1600 		if (time_after(jiffies, timeo)) {
1601 			map_write(map, CMD(0x70), cmd_adr);
1602 			chip->state = FL_STATUS;
1603 			xip_enable(map, chip, cmd_adr);
1604 			printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1605 			ret = -EIO;
1606 			goto out;
1607 		}
1608 
1609 		/* Latency issues. Drop the lock, wait a while and retry */
1610 		z++;
1611 		UDELAY(map, chip, cmd_adr, 1);
1612 	}
1613 	if (!z) {
1614 		chip->buffer_write_time--;
1615 		if (!chip->buffer_write_time)
1616 			chip->buffer_write_time = 1;
1617 	}
1618 	if (z > 1)
1619 		chip->buffer_write_time++;
1620 
1621 	/* Done and happy. */
1622  	chip->state = FL_STATUS;
1623 
1624 	/* check for errors */
1625 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1626 		unsigned long chipstatus = MERGESTATUS(status);
1627 
1628 		/* reset status */
1629 		map_write(map, CMD(0x50), cmd_adr);
1630 		map_write(map, CMD(0x70), cmd_adr);
1631 		xip_enable(map, chip, cmd_adr);
1632 
1633 		if (chipstatus & 0x02) {
1634 			ret = -EROFS;
1635 		} else if (chipstatus & 0x08) {
1636 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1637 			ret = -EIO;
1638 		} else {
1639 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1640 			ret = -EINVAL;
1641 		}
1642 
1643 		goto out;
1644 	}
1645 
1646 	xip_enable(map, chip, cmd_adr);
1647  out:	put_chip(map, chip, cmd_adr);
1648 	spin_unlock(chip->mutex);
1649 	return ret;
1650 }
1651 
1652 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1653 				unsigned long count, loff_t to, size_t *retlen)
1654 {
1655 	struct map_info *map = mtd->priv;
1656 	struct cfi_private *cfi = map->fldrv_priv;
1657 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1658 	int ret = 0;
1659 	int chipnum;
1660 	unsigned long ofs, vec_seek, i;
1661 	size_t len = 0;
1662 
1663 	for (i = 0; i < count; i++)
1664 		len += vecs[i].iov_len;
1665 
1666 	*retlen = 0;
1667 	if (!len)
1668 		return 0;
1669 
1670 	chipnum = to >> cfi->chipshift;
1671 	ofs = to - (chipnum << cfi->chipshift);
1672 	vec_seek = 0;
1673 
1674 	do {
1675 		/* We must not cross write block boundaries */
1676 		int size = wbufsize - (ofs & (wbufsize-1));
1677 
1678 		if (size > len)
1679 			size = len;
1680 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1681 				      ofs, &vecs, &vec_seek, size);
1682 		if (ret)
1683 			return ret;
1684 
1685 		ofs += size;
1686 		(*retlen) += size;
1687 		len -= size;
1688 
1689 		if (ofs >> cfi->chipshift) {
1690 			chipnum ++;
1691 			ofs = 0;
1692 			if (chipnum == cfi->numchips)
1693 				return 0;
1694 		}
1695 	} while (len);
1696 
1697 	return 0;
1698 }
1699 
1700 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1701 				       size_t len, size_t *retlen, const u_char *buf)
1702 {
1703 	struct kvec vec;
1704 
1705 	vec.iov_base = (void *) buf;
1706 	vec.iov_len = len;
1707 
1708 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1709 }
1710 
1711 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1712 				      unsigned long adr, int len, void *thunk)
1713 {
1714 	struct cfi_private *cfi = map->fldrv_priv;
1715 	map_word status, status_OK;
1716 	unsigned long timeo;
1717 	int retries = 3;
1718 	DECLARE_WAITQUEUE(wait, current);
1719 	int ret = 0;
1720 
1721 	adr += chip->start;
1722 
1723 	/* Let's determine this according to the interleave only once */
1724 	status_OK = CMD(0x80);
1725 
1726  retry:
1727 	spin_lock(chip->mutex);
1728 	ret = get_chip(map, chip, adr, FL_ERASING);
1729 	if (ret) {
1730 		spin_unlock(chip->mutex);
1731 		return ret;
1732 	}
1733 
1734 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1735 	ENABLE_VPP(map);
1736 	xip_disable(map, chip, adr);
1737 
1738 	/* Clear the status register first */
1739 	map_write(map, CMD(0x50), adr);
1740 
1741 	/* Now erase */
1742 	map_write(map, CMD(0x20), adr);
1743 	map_write(map, CMD(0xD0), adr);
1744 	chip->state = FL_ERASING;
1745 	chip->erase_suspended = 0;
1746 
1747 	INVALIDATE_CACHE_UDELAY(map, chip,
1748 				adr, len,
1749 				chip->erase_time*1000/2);
1750 
1751 	/* FIXME. Use a timer to check this, and return immediately. */
1752 	/* Once the state machine's known to be working I'll do that */
1753 
1754 	timeo = jiffies + (HZ*20);
1755 	for (;;) {
1756 		if (chip->state != FL_ERASING) {
1757 			/* Someone's suspended the erase. Sleep */
1758 			set_current_state(TASK_UNINTERRUPTIBLE);
1759 			add_wait_queue(&chip->wq, &wait);
1760 			spin_unlock(chip->mutex);
1761 			schedule();
1762 			remove_wait_queue(&chip->wq, &wait);
1763 			spin_lock(chip->mutex);
1764 			continue;
1765 		}
1766 		if (chip->erase_suspended) {
1767 			/* This erase was suspended and resumed.
1768 			   Adjust the timeout */
1769 			timeo = jiffies + (HZ*20); /* FIXME */
1770 			chip->erase_suspended = 0;
1771 		}
1772 
1773 		status = map_read(map, adr);
1774 		if (map_word_andequal(map, status, status_OK, status_OK))
1775 			break;
1776 
1777 		/* OK Still waiting */
1778 		if (time_after(jiffies, timeo)) {
1779 			map_write(map, CMD(0x70), adr);
1780 			chip->state = FL_STATUS;
1781 			xip_enable(map, chip, adr);
1782 			printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1783 			ret = -EIO;
1784 			goto out;
1785 		}
1786 
1787 		/* Latency issues. Drop the lock, wait a while and retry */
1788 		UDELAY(map, chip, adr, 1000000/HZ);
1789 	}
1790 
1791 	/* We've broken this before. It doesn't hurt to be safe */
1792 	map_write(map, CMD(0x70), adr);
1793 	chip->state = FL_STATUS;
1794 	status = map_read(map, adr);
1795 
1796 	/* check for errors */
1797 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1798 		unsigned long chipstatus = MERGESTATUS(status);
1799 
1800 		/* Reset the error bits */
1801 		map_write(map, CMD(0x50), adr);
1802 		map_write(map, CMD(0x70), adr);
1803 		xip_enable(map, chip, adr);
1804 
1805 		if ((chipstatus & 0x30) == 0x30) {
1806 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1807 			ret = -EINVAL;
1808 		} else if (chipstatus & 0x02) {
1809 			/* Protection bit set */
1810 			ret = -EROFS;
1811 		} else if (chipstatus & 0x8) {
1812 			/* Voltage */
1813 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1814 			ret = -EIO;
1815 		} else if (chipstatus & 0x20 && retries--) {
1816 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1817 			timeo = jiffies + HZ;
1818 			put_chip(map, chip, adr);
1819 			spin_unlock(chip->mutex);
1820 			goto retry;
1821 		} else {
1822 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1823 			ret = -EIO;
1824 		}
1825 
1826 		goto out;
1827 	}
1828 
1829 	xip_enable(map, chip, adr);
1830  out:	put_chip(map, chip, adr);
1831 	spin_unlock(chip->mutex);
1832 	return ret;
1833 }
1834 
1835 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1836 {
1837 	unsigned long ofs, len;
1838 	int ret;
1839 
1840 	ofs = instr->addr;
1841 	len = instr->len;
1842 
1843 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1844 	if (ret)
1845 		return ret;
1846 
1847 	instr->state = MTD_ERASE_DONE;
1848 	mtd_erase_callback(instr);
1849 
1850 	return 0;
1851 }
1852 
1853 static void cfi_intelext_sync (struct mtd_info *mtd)
1854 {
1855 	struct map_info *map = mtd->priv;
1856 	struct cfi_private *cfi = map->fldrv_priv;
1857 	int i;
1858 	struct flchip *chip;
1859 	int ret = 0;
1860 
1861 	for (i=0; !ret && i<cfi->numchips; i++) {
1862 		chip = &cfi->chips[i];
1863 
1864 		spin_lock(chip->mutex);
1865 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1866 
1867 		if (!ret) {
1868 			chip->oldstate = chip->state;
1869 			chip->state = FL_SYNCING;
1870 			/* No need to wake_up() on this state change -
1871 			 * as the whole point is that nobody can do anything
1872 			 * with the chip now anyway.
1873 			 */
1874 		}
1875 		spin_unlock(chip->mutex);
1876 	}
1877 
1878 	/* Unlock the chips again */
1879 
1880 	for (i--; i >=0; i--) {
1881 		chip = &cfi->chips[i];
1882 
1883 		spin_lock(chip->mutex);
1884 
1885 		if (chip->state == FL_SYNCING) {
1886 			chip->state = chip->oldstate;
1887 			chip->oldstate = FL_READY;
1888 			wake_up(&chip->wq);
1889 		}
1890 		spin_unlock(chip->mutex);
1891 	}
1892 }
1893 
1894 #ifdef DEBUG_LOCK_BITS
1895 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1896 						struct flchip *chip,
1897 						unsigned long adr,
1898 						int len, void *thunk)
1899 {
1900 	struct cfi_private *cfi = map->fldrv_priv;
1901 	int status, ofs_factor = cfi->interleave * cfi->device_type;
1902 
1903 	adr += chip->start;
1904 	xip_disable(map, chip, adr+(2*ofs_factor));
1905 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
1906 	chip->state = FL_JEDEC_QUERY;
1907 	status = cfi_read_query(map, adr+(2*ofs_factor));
1908 	xip_enable(map, chip, 0);
1909 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1910 	       adr, status);
1911 	return 0;
1912 }
1913 #endif
1914 
1915 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
1916 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
1917 
1918 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1919 				       unsigned long adr, int len, void *thunk)
1920 {
1921 	struct cfi_private *cfi = map->fldrv_priv;
1922 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1923 	map_word status, status_OK;
1924 	unsigned long timeo = jiffies + HZ;
1925 	int ret;
1926 
1927 	adr += chip->start;
1928 
1929 	/* Let's determine this according to the interleave only once */
1930 	status_OK = CMD(0x80);
1931 
1932 	spin_lock(chip->mutex);
1933 	ret = get_chip(map, chip, adr, FL_LOCKING);
1934 	if (ret) {
1935 		spin_unlock(chip->mutex);
1936 		return ret;
1937 	}
1938 
1939 	ENABLE_VPP(map);
1940 	xip_disable(map, chip, adr);
1941 
1942 	map_write(map, CMD(0x60), adr);
1943 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1944 		map_write(map, CMD(0x01), adr);
1945 		chip->state = FL_LOCKING;
1946 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1947 		map_write(map, CMD(0xD0), adr);
1948 		chip->state = FL_UNLOCKING;
1949 	} else
1950 		BUG();
1951 
1952 	/*
1953 	 * If Instant Individual Block Locking supported then no need
1954 	 * to delay.
1955 	 */
1956 
1957 	if (!extp || !(extp->FeatureSupport & (1 << 5)))
1958 		UDELAY(map, chip, adr, 1000000/HZ);
1959 
1960 	/* FIXME. Use a timer to check this, and return immediately. */
1961 	/* Once the state machine's known to be working I'll do that */
1962 
1963 	timeo = jiffies + (HZ*20);
1964 	for (;;) {
1965 
1966 		status = map_read(map, adr);
1967 		if (map_word_andequal(map, status, status_OK, status_OK))
1968 			break;
1969 
1970 		/* OK Still waiting */
1971 		if (time_after(jiffies, timeo)) {
1972 			map_write(map, CMD(0x70), adr);
1973 			chip->state = FL_STATUS;
1974 			xip_enable(map, chip, adr);
1975 			printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1976 			put_chip(map, chip, adr);
1977 			spin_unlock(chip->mutex);
1978 			return -EIO;
1979 		}
1980 
1981 		/* Latency issues. Drop the lock, wait a while and retry */
1982 		UDELAY(map, chip, adr, 1);
1983 	}
1984 
1985 	/* Done and happy. */
1986 	chip->state = FL_STATUS;
1987 	xip_enable(map, chip, adr);
1988 	put_chip(map, chip, adr);
1989 	spin_unlock(chip->mutex);
1990 	return 0;
1991 }
1992 
1993 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1994 {
1995 	int ret;
1996 
1997 #ifdef DEBUG_LOCK_BITS
1998 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1999 	       __FUNCTION__, ofs, len);
2000 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2001 		ofs, len, 0);
2002 #endif
2003 
2004 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2005 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2006 
2007 #ifdef DEBUG_LOCK_BITS
2008 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2009 	       __FUNCTION__, ret);
2010 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2011 		ofs, len, 0);
2012 #endif
2013 
2014 	return ret;
2015 }
2016 
2017 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2018 {
2019 	int ret;
2020 
2021 #ifdef DEBUG_LOCK_BITS
2022 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2023 	       __FUNCTION__, ofs, len);
2024 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2025 		ofs, len, 0);
2026 #endif
2027 
2028 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2029 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2030 
2031 #ifdef DEBUG_LOCK_BITS
2032 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2033 	       __FUNCTION__, ret);
2034 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2035 		ofs, len, 0);
2036 #endif
2037 
2038 	return ret;
2039 }
2040 
2041 #ifdef CONFIG_MTD_OTP
2042 
2043 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2044 			u_long data_offset, u_char *buf, u_int size,
2045 			u_long prot_offset, u_int groupno, u_int groupsize);
2046 
2047 static int __xipram
2048 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2049 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2050 {
2051 	struct cfi_private *cfi = map->fldrv_priv;
2052 	int ret;
2053 
2054 	spin_lock(chip->mutex);
2055 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2056 	if (ret) {
2057 		spin_unlock(chip->mutex);
2058 		return ret;
2059 	}
2060 
2061 	/* let's ensure we're not reading back cached data from array mode */
2062 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2063 
2064 	xip_disable(map, chip, chip->start);
2065 	if (chip->state != FL_JEDEC_QUERY) {
2066 		map_write(map, CMD(0x90), chip->start);
2067 		chip->state = FL_JEDEC_QUERY;
2068 	}
2069 	map_copy_from(map, buf, chip->start + offset, size);
2070 	xip_enable(map, chip, chip->start);
2071 
2072 	/* then ensure we don't keep OTP data in the cache */
2073 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2074 
2075 	put_chip(map, chip, chip->start);
2076 	spin_unlock(chip->mutex);
2077 	return 0;
2078 }
2079 
2080 static int
2081 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2082 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2083 {
2084 	int ret;
2085 
2086 	while (size) {
2087 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2088 		int gap = offset - bus_ofs;
2089 		int n = min_t(int, size, map_bankwidth(map)-gap);
2090 		map_word datum = map_word_ff(map);
2091 
2092 		datum = map_word_load_partial(map, datum, buf, gap, n);
2093 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2094 		if (ret)
2095 			return ret;
2096 
2097 		offset += n;
2098 		buf += n;
2099 		size -= n;
2100 	}
2101 
2102 	return 0;
2103 }
2104 
2105 static int
2106 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2107 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2108 {
2109 	struct cfi_private *cfi = map->fldrv_priv;
2110 	map_word datum;
2111 
2112 	/* make sure area matches group boundaries */
2113 	if (size != grpsz)
2114 		return -EXDEV;
2115 
2116 	datum = map_word_ff(map);
2117 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2118 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2119 }
2120 
2121 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2122 				 size_t *retlen, u_char *buf,
2123 				 otp_op_t action, int user_regs)
2124 {
2125 	struct map_info *map = mtd->priv;
2126 	struct cfi_private *cfi = map->fldrv_priv;
2127 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2128 	struct flchip *chip;
2129 	struct cfi_intelext_otpinfo *otp;
2130 	u_long devsize, reg_prot_offset, data_offset;
2131 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2132 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2133 	int ret;
2134 
2135 	*retlen = 0;
2136 
2137 	/* Check that we actually have some OTP registers */
2138 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2139 		return -ENODATA;
2140 
2141 	/* we need real chips here not virtual ones */
2142 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2143 	chip_step = devsize >> cfi->chipshift;
2144 	chip_num = 0;
2145 
2146 	/* Some chips have OTP located in the _top_ partition only.
2147 	   For example: Intel 28F256L18T (T means top-parameter device) */
2148 	if (cfi->mfr == MANUFACTURER_INTEL) {
2149 		switch (cfi->id) {
2150 		case 0x880b:
2151 		case 0x880c:
2152 		case 0x880d:
2153 			chip_num = chip_step - 1;
2154 		}
2155 	}
2156 
2157 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2158 		chip = &cfi->chips[chip_num];
2159 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2160 
2161 		/* first OTP region */
2162 		field = 0;
2163 		reg_prot_offset = extp->ProtRegAddr;
2164 		reg_fact_groups = 1;
2165 		reg_fact_size = 1 << extp->FactProtRegSize;
2166 		reg_user_groups = 1;
2167 		reg_user_size = 1 << extp->UserProtRegSize;
2168 
2169 		while (len > 0) {
2170 			/* flash geometry fixup */
2171 			data_offset = reg_prot_offset + 1;
2172 			data_offset *= cfi->interleave * cfi->device_type;
2173 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2174 			reg_fact_size *= cfi->interleave;
2175 			reg_user_size *= cfi->interleave;
2176 
2177 			if (user_regs) {
2178 				groups = reg_user_groups;
2179 				groupsize = reg_user_size;
2180 				/* skip over factory reg area */
2181 				groupno = reg_fact_groups;
2182 				data_offset += reg_fact_groups * reg_fact_size;
2183 			} else {
2184 				groups = reg_fact_groups;
2185 				groupsize = reg_fact_size;
2186 				groupno = 0;
2187 			}
2188 
2189 			while (len > 0 && groups > 0) {
2190 				if (!action) {
2191 					/*
2192 					 * Special case: if action is NULL
2193 					 * we fill buf with otp_info records.
2194 					 */
2195 					struct otp_info *otpinfo;
2196 					map_word lockword;
2197 					len -= sizeof(struct otp_info);
2198 					if (len <= 0)
2199 						return -ENOSPC;
2200 					ret = do_otp_read(map, chip,
2201 							  reg_prot_offset,
2202 							  (u_char *)&lockword,
2203 							  map_bankwidth(map),
2204 							  0, 0,  0);
2205 					if (ret)
2206 						return ret;
2207 					otpinfo = (struct otp_info *)buf;
2208 					otpinfo->start = from;
2209 					otpinfo->length = groupsize;
2210 					otpinfo->locked =
2211 					   !map_word_bitsset(map, lockword,
2212 							     CMD(1 << groupno));
2213 					from += groupsize;
2214 					buf += sizeof(*otpinfo);
2215 					*retlen += sizeof(*otpinfo);
2216 				} else if (from >= groupsize) {
2217 					from -= groupsize;
2218 					data_offset += groupsize;
2219 				} else {
2220 					int size = groupsize;
2221 					data_offset += from;
2222 					size -= from;
2223 					from = 0;
2224 					if (size > len)
2225 						size = len;
2226 					ret = action(map, chip, data_offset,
2227 						     buf, size, reg_prot_offset,
2228 						     groupno, groupsize);
2229 					if (ret < 0)
2230 						return ret;
2231 					buf += size;
2232 					len -= size;
2233 					*retlen += size;
2234 					data_offset += size;
2235 				}
2236 				groupno++;
2237 				groups--;
2238 			}
2239 
2240 			/* next OTP region */
2241 			if (++field == extp->NumProtectionFields)
2242 				break;
2243 			reg_prot_offset = otp->ProtRegAddr;
2244 			reg_fact_groups = otp->FactGroups;
2245 			reg_fact_size = 1 << otp->FactProtRegSize;
2246 			reg_user_groups = otp->UserGroups;
2247 			reg_user_size = 1 << otp->UserProtRegSize;
2248 			otp++;
2249 		}
2250 	}
2251 
2252 	return 0;
2253 }
2254 
2255 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2256 					   size_t len, size_t *retlen,
2257 					    u_char *buf)
2258 {
2259 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2260 				     buf, do_otp_read, 0);
2261 }
2262 
2263 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2264 					   size_t len, size_t *retlen,
2265 					    u_char *buf)
2266 {
2267 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2268 				     buf, do_otp_read, 1);
2269 }
2270 
2271 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2272 					    size_t len, size_t *retlen,
2273 					     u_char *buf)
2274 {
2275 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2276 				     buf, do_otp_write, 1);
2277 }
2278 
2279 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2280 					   loff_t from, size_t len)
2281 {
2282 	size_t retlen;
2283 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2284 				     NULL, do_otp_lock, 1);
2285 }
2286 
2287 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2288 					   struct otp_info *buf, size_t len)
2289 {
2290 	size_t retlen;
2291 	int ret;
2292 
2293 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2294 	return ret ? : retlen;
2295 }
2296 
2297 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2298 					   struct otp_info *buf, size_t len)
2299 {
2300 	size_t retlen;
2301 	int ret;
2302 
2303 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2304 	return ret ? : retlen;
2305 }
2306 
2307 #endif
2308 
2309 static int cfi_intelext_suspend(struct mtd_info *mtd)
2310 {
2311 	struct map_info *map = mtd->priv;
2312 	struct cfi_private *cfi = map->fldrv_priv;
2313 	int i;
2314 	struct flchip *chip;
2315 	int ret = 0;
2316 
2317 	for (i=0; !ret && i<cfi->numchips; i++) {
2318 		chip = &cfi->chips[i];
2319 
2320 		spin_lock(chip->mutex);
2321 
2322 		switch (chip->state) {
2323 		case FL_READY:
2324 		case FL_STATUS:
2325 		case FL_CFI_QUERY:
2326 		case FL_JEDEC_QUERY:
2327 			if (chip->oldstate == FL_READY) {
2328 				chip->oldstate = chip->state;
2329 				chip->state = FL_PM_SUSPENDED;
2330 				/* No need to wake_up() on this state change -
2331 				 * as the whole point is that nobody can do anything
2332 				 * with the chip now anyway.
2333 				 */
2334 			} else {
2335 				/* There seems to be an operation pending. We must wait for it. */
2336 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2337 				ret = -EAGAIN;
2338 			}
2339 			break;
2340 		default:
2341 			/* Should we actually wait? Once upon a time these routines weren't
2342 			   allowed to. Or should we return -EAGAIN, because the upper layers
2343 			   ought to have already shut down anything which was using the device
2344 			   anyway? The latter for now. */
2345 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2346 			ret = -EAGAIN;
2347 		case FL_PM_SUSPENDED:
2348 			break;
2349 		}
2350 		spin_unlock(chip->mutex);
2351 	}
2352 
2353 	/* Unlock the chips again */
2354 
2355 	if (ret) {
2356 		for (i--; i >=0; i--) {
2357 			chip = &cfi->chips[i];
2358 
2359 			spin_lock(chip->mutex);
2360 
2361 			if (chip->state == FL_PM_SUSPENDED) {
2362 				/* No need to force it into a known state here,
2363 				   because we're returning failure, and it didn't
2364 				   get power cycled */
2365 				chip->state = chip->oldstate;
2366 				chip->oldstate = FL_READY;
2367 				wake_up(&chip->wq);
2368 			}
2369 			spin_unlock(chip->mutex);
2370 		}
2371 	}
2372 
2373 	return ret;
2374 }
2375 
2376 static void cfi_intelext_resume(struct mtd_info *mtd)
2377 {
2378 	struct map_info *map = mtd->priv;
2379 	struct cfi_private *cfi = map->fldrv_priv;
2380 	int i;
2381 	struct flchip *chip;
2382 
2383 	for (i=0; i<cfi->numchips; i++) {
2384 
2385 		chip = &cfi->chips[i];
2386 
2387 		spin_lock(chip->mutex);
2388 
2389 		/* Go to known state. Chip may have been power cycled */
2390 		if (chip->state == FL_PM_SUSPENDED) {
2391 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2392 			chip->oldstate = chip->state = FL_READY;
2393 			wake_up(&chip->wq);
2394 		}
2395 
2396 		spin_unlock(chip->mutex);
2397 	}
2398 }
2399 
2400 static int cfi_intelext_reset(struct mtd_info *mtd)
2401 {
2402 	struct map_info *map = mtd->priv;
2403 	struct cfi_private *cfi = map->fldrv_priv;
2404 	int i, ret;
2405 
2406 	for (i=0; i < cfi->numchips; i++) {
2407 		struct flchip *chip = &cfi->chips[i];
2408 
2409 		/* force the completion of any ongoing operation
2410 		   and switch to array mode so any bootloader in
2411 		   flash is accessible for soft reboot. */
2412 		spin_lock(chip->mutex);
2413 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2414 		if (!ret) {
2415 			map_write(map, CMD(0xff), chip->start);
2416 			chip->state = FL_READY;
2417 		}
2418 		spin_unlock(chip->mutex);
2419 	}
2420 
2421 	return 0;
2422 }
2423 
2424 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2425 			       void *v)
2426 {
2427 	struct mtd_info *mtd;
2428 
2429 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2430 	cfi_intelext_reset(mtd);
2431 	return NOTIFY_DONE;
2432 }
2433 
2434 static void cfi_intelext_destroy(struct mtd_info *mtd)
2435 {
2436 	struct map_info *map = mtd->priv;
2437 	struct cfi_private *cfi = map->fldrv_priv;
2438 	cfi_intelext_reset(mtd);
2439 	unregister_reboot_notifier(&mtd->reboot_notifier);
2440 	kfree(cfi->cmdset_priv);
2441 	kfree(cfi->cfiq);
2442 	kfree(cfi->chips[0].priv);
2443 	kfree(cfi);
2444 	kfree(mtd->eraseregions);
2445 }
2446 
2447 static char im_name_0001[] = "cfi_cmdset_0001";
2448 static char im_name_0003[] = "cfi_cmdset_0003";
2449 static char im_name_0200[] = "cfi_cmdset_0200";
2450 
2451 static int __init cfi_intelext_init(void)
2452 {
2453 	inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2454 	inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2455 	inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2456 	return 0;
2457 }
2458 
2459 static void __exit cfi_intelext_exit(void)
2460 {
2461 	inter_module_unregister(im_name_0001);
2462 	inter_module_unregister(im_name_0003);
2463 	inter_module_unregister(im_name_0200);
2464 }
2465 
2466 module_init(cfi_intelext_init);
2467 module_exit(cfi_intelext_exit);
2468 
2469 MODULE_LICENSE("GPL");
2470 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2471 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2472