xref: /linux/drivers/mtd/chips/cfi_cmdset_0001.c (revision de2fe5e07d58424bc286fff3fd3c1b0bf933cd58)
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000	Nicolas Pitre <nico@cam.org>
11  * 	- completely revamped method functions so they are aware and
12  * 	  independent of the flash geometry (buswidth, interleave, etc.)
13  * 	- scalability vs code size is completely set at compile-time
14  * 	  (see include/linux/mtd/cfi.h for selection)
15  *	- optimized write buffer method
16  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *	- reworked lock/unlock/erase support for var size flash
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 #define MANUFACTURER_INTEL	0x0089
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50 
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 					    struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 					    struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72 
73 static void cfi_intelext_destroy(struct mtd_info *);
74 
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76 
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79 
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 		     size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 			size_t len);
84 
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88 
89 
90 
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94 
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 	.probe		= NULL, /* Not usable directly */
97 	.destroy	= cfi_intelext_destroy,
98 	.name		= "cfi_cmdset_0001",
99 	.module		= THIS_MODULE
100 };
101 
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104 
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108 	int i;
109 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 	for (i=11; i<32; i++) {
123 		if (extp->FeatureSupport & (1<<i))
124 			printk("     - Unknown Bit %X:      supported\n", i);
125 	}
126 
127 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 	for (i=1; i<8; i++) {
130 		if (extp->SuspendCmdSupport & (1<<i))
131 			printk("     - Unknown Bit %X:               supported\n", i);
132 	}
133 
134 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 	for (i=2; i<3; i++) {
138 		if (extp->BlkStatusRegMask & (1<<i))
139 			printk("     - Unknown Bit %X Active: yes\n",i);
140 	}
141 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 	for (i=6; i<16; i++) {
144 		if (extp->BlkStatusRegMask & (1<<i))
145 			printk("     - Unknown Bit %X Active: yes\n",i);
146 	}
147 
148 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 	if (extp->VppOptimal)
151 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155 
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160 	struct map_info *map = mtd->priv;
161 	struct cfi_private *cfi = map->fldrv_priv;
162 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163 
164 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 	                    "erase on write disabled.\n");
166 	extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169 
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173 	struct map_info *map = mtd->priv;
174 	struct cfi_private *cfi = map->fldrv_priv;
175 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176 
177 	if (cfip && (cfip->FeatureSupport&4)) {
178 		cfip->FeatureSupport &= ~4;
179 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 	}
181 }
182 #endif
183 
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186 	struct map_info *map = mtd->priv;
187 	struct cfi_private *cfi = map->fldrv_priv;
188 
189 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
190 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
191 }
192 
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195 	struct map_info *map = mtd->priv;
196 	struct cfi_private *cfi = map->fldrv_priv;
197 
198 	/* Note this is done after the region info is endian swapped */
199 	cfi->cfiq->EraseRegionInfo[1] =
200 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202 
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205 	struct map_info *map = mtd->priv;
206 	if (!mtd->point && map_is_linear(map)) {
207 		mtd->point   = cfi_intelext_point;
208 		mtd->unpoint = cfi_intelext_unpoint;
209 	}
210 }
211 
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214 	struct map_info *map = mtd->priv;
215 	struct cfi_private *cfi = map->fldrv_priv;
216 	if (cfi->cfiq->BufWriteTimeoutTyp) {
217 		printk(KERN_INFO "Using buffer write method\n" );
218 		mtd->write = cfi_intelext_write_buffers;
219 		mtd->writev = cfi_intelext_writev;
220 	}
221 }
222 
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 	{ 0, 0, NULL, NULL }
236 };
237 
238 static struct cfi_fixup jedec_fixup_table[] = {
239 	{ MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240 	{ MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241 	{ MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242 	{ 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245 	/* The CFI vendor ids and the JEDEC vendor IDs appear
246 	 * to be common.  It is like the devices id's are as
247 	 * well.  This table is to pick all cases where
248 	 * we know that is the case.
249 	 */
250 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 	{ 0, 0, NULL, NULL }
252 };
253 
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257 	struct cfi_pri_intelext *extp;
258 	unsigned int extp_size = sizeof(*extp);
259 
260  again:
261 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 	if (!extp)
263 		return NULL;
264 
265 	if (extp->MajorVersion != '1' ||
266 	    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268 		       "version %c.%c.\n",  extp->MajorVersion,
269 		       extp->MinorVersion);
270 		kfree(extp);
271 		return NULL;
272 	}
273 
274 	/* Do some byteswapping if necessary */
275 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278 
279 	if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280 		unsigned int extra_size = 0;
281 		int nb_parts, i;
282 
283 		/* Protection Register info */
284 		extra_size += (extp->NumProtectionFields - 1) *
285 			      sizeof(struct cfi_intelext_otpinfo);
286 
287 		/* Burst Read info */
288 		extra_size += 2;
289 		if (extp_size < sizeof(*extp) + extra_size)
290 			goto need_more;
291 		extra_size += extp->extra[extra_size-1];
292 
293 		/* Number of hardware-partitions */
294 		extra_size += 1;
295 		if (extp_size < sizeof(*extp) + extra_size)
296 			goto need_more;
297 		nb_parts = extp->extra[extra_size - 1];
298 
299 		/* skip the sizeof(partregion) field in CFI 1.4 */
300 		if (extp->MinorVersion >= '4')
301 			extra_size += 2;
302 
303 		for (i = 0; i < nb_parts; i++) {
304 			struct cfi_intelext_regioninfo *rinfo;
305 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 			extra_size += sizeof(*rinfo);
307 			if (extp_size < sizeof(*extp) + extra_size)
308 				goto need_more;
309 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 			extra_size += (rinfo->NumBlockTypes - 1)
311 				      * sizeof(struct cfi_intelext_blockinfo);
312 		}
313 
314 		if (extp->MinorVersion >= '4')
315 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316 
317 		if (extp_size < sizeof(*extp) + extra_size) {
318 			need_more:
319 			extp_size = sizeof(*extp) + extra_size;
320 			kfree(extp);
321 			if (extp_size > 4096) {
322 				printk(KERN_ERR
323 					"%s: cfi_pri_intelext is too fat\n",
324 					__FUNCTION__);
325 				return NULL;
326 			}
327 			goto again;
328 		}
329 	}
330 
331 	return extp;
332 }
333 
334 /* This routine is made available to other mtd code via
335  * inter_module_register.  It must only be accessed through
336  * inter_module_get which will bump the use count of this module.  The
337  * addresses passed back in cfi are valid as long as the use count of
338  * this module is non-zero, i.e. between inter_module_get and
339  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
340  */
341 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
342 {
343 	struct cfi_private *cfi = map->fldrv_priv;
344 	struct mtd_info *mtd;
345 	int i;
346 
347 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
348 	if (!mtd) {
349 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
350 		return NULL;
351 	}
352 	memset(mtd, 0, sizeof(*mtd));
353 	mtd->priv = map;
354 	mtd->type = MTD_NORFLASH;
355 
356 	/* Fill in the default mtd operations */
357 	mtd->erase   = cfi_intelext_erase_varsize;
358 	mtd->read    = cfi_intelext_read;
359 	mtd->write   = cfi_intelext_write_words;
360 	mtd->sync    = cfi_intelext_sync;
361 	mtd->lock    = cfi_intelext_lock;
362 	mtd->unlock  = cfi_intelext_unlock;
363 	mtd->suspend = cfi_intelext_suspend;
364 	mtd->resume  = cfi_intelext_resume;
365 	mtd->flags   = MTD_CAP_NORFLASH;
366 	mtd->name    = map->name;
367 
368 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
369 
370 	if (cfi->cfi_mode == CFI_MODE_CFI) {
371 		/*
372 		 * It's a real CFI chip, not one for which the probe
373 		 * routine faked a CFI structure. So we read the feature
374 		 * table from it.
375 		 */
376 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377 		struct cfi_pri_intelext *extp;
378 
379 		extp = read_pri_intelext(map, adr);
380 		if (!extp) {
381 			kfree(mtd);
382 			return NULL;
383 		}
384 
385 		/* Install our own private info structure */
386 		cfi->cmdset_priv = extp;
387 
388 		cfi_fixup(mtd, cfi_fixup_table);
389 
390 #ifdef DEBUG_CFI_FEATURES
391 		/* Tell the user about it in lots of lovely detail */
392 		cfi_tell_features(extp);
393 #endif
394 
395 		if(extp->SuspendCmdSupport & 1) {
396 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
397 		}
398 	}
399 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
400 		/* Apply jedec specific fixups */
401 		cfi_fixup(mtd, jedec_fixup_table);
402 	}
403 	/* Apply generic fixups */
404 	cfi_fixup(mtd, fixup_table);
405 
406 	for (i=0; i< cfi->numchips; i++) {
407 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
410 		cfi->chips[i].ref_point_counter = 0;
411 		init_waitqueue_head(&(cfi->chips[i].wq));
412 	}
413 
414 	map->fldrv = &cfi_intelext_chipdrv;
415 
416 	return cfi_intelext_setup(mtd);
417 }
418 
419 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
420 {
421 	struct map_info *map = mtd->priv;
422 	struct cfi_private *cfi = map->fldrv_priv;
423 	unsigned long offset = 0;
424 	int i,j;
425 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
426 
427 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
428 
429 	mtd->size = devsize * cfi->numchips;
430 
431 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
432 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
433 			* mtd->numeraseregions, GFP_KERNEL);
434 	if (!mtd->eraseregions) {
435 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
436 		goto setup_err;
437 	}
438 
439 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
440 		unsigned long ernum, ersize;
441 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
442 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
443 
444 		if (mtd->erasesize < ersize) {
445 			mtd->erasesize = ersize;
446 		}
447 		for (j=0; j<cfi->numchips; j++) {
448 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
449 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
450 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
451 		}
452 		offset += (ersize * ernum);
453 	}
454 
455 	if (offset != devsize) {
456 		/* Argh */
457 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
458 		goto setup_err;
459 	}
460 
461 	for (i=0; i<mtd->numeraseregions;i++){
462 		printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
463 		       i,mtd->eraseregions[i].offset,
464 		       mtd->eraseregions[i].erasesize,
465 		       mtd->eraseregions[i].numblocks);
466 	}
467 
468 #ifdef CONFIG_MTD_OTP
469 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
470 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
471 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
472 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
473 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
474 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
475 #endif
476 
477 	/* This function has the potential to distort the reality
478 	   a bit and therefore should be called last. */
479 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
480 		goto setup_err;
481 
482 	__module_get(THIS_MODULE);
483 	register_reboot_notifier(&mtd->reboot_notifier);
484 	return mtd;
485 
486  setup_err:
487 	if(mtd) {
488 		kfree(mtd->eraseregions);
489 		kfree(mtd);
490 	}
491 	kfree(cfi->cmdset_priv);
492 	return NULL;
493 }
494 
495 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
496 					struct cfi_private **pcfi)
497 {
498 	struct map_info *map = mtd->priv;
499 	struct cfi_private *cfi = *pcfi;
500 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
501 
502 	/*
503 	 * Probing of multi-partition flash ships.
504 	 *
505 	 * To support multiple partitions when available, we simply arrange
506 	 * for each of them to have their own flchip structure even if they
507 	 * are on the same physical chip.  This means completely recreating
508 	 * a new cfi_private structure right here which is a blatent code
509 	 * layering violation, but this is still the least intrusive
510 	 * arrangement at this point. This can be rearranged in the future
511 	 * if someone feels motivated enough.  --nico
512 	 */
513 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
514 	    && extp->FeatureSupport & (1 << 9)) {
515 		struct cfi_private *newcfi;
516 		struct flchip *chip;
517 		struct flchip_shared *shared;
518 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
519 
520 		/* Protection Register info */
521 		offs = (extp->NumProtectionFields - 1) *
522 		       sizeof(struct cfi_intelext_otpinfo);
523 
524 		/* Burst Read info */
525 		offs += extp->extra[offs+1]+2;
526 
527 		/* Number of partition regions */
528 		numregions = extp->extra[offs];
529 		offs += 1;
530 
531 		/* skip the sizeof(partregion) field in CFI 1.4 */
532 		if (extp->MinorVersion >= '4')
533 			offs += 2;
534 
535 		/* Number of hardware partitions */
536 		numparts = 0;
537 		for (i = 0; i < numregions; i++) {
538 			struct cfi_intelext_regioninfo *rinfo;
539 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
540 			numparts += rinfo->NumIdentPartitions;
541 			offs += sizeof(*rinfo)
542 				+ (rinfo->NumBlockTypes - 1) *
543 				  sizeof(struct cfi_intelext_blockinfo);
544 		}
545 
546 		/* Programming Region info */
547 		if (extp->MinorVersion >= '4') {
548 			struct cfi_intelext_programming_regioninfo *prinfo;
549 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
550 			MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
551 			MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
552 			MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
553 			mtd->flags |= MTD_PROGRAM_REGIONS;
554 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
555 			       map->name, MTD_PROGREGION_SIZE(mtd),
556 			       MTD_PROGREGION_CTRLMODE_VALID(mtd),
557 			       MTD_PROGREGION_CTRLMODE_INVALID(mtd));
558 		}
559 
560 		/*
561 		 * All functions below currently rely on all chips having
562 		 * the same geometry so we'll just assume that all hardware
563 		 * partitions are of the same size too.
564 		 */
565 		partshift = cfi->chipshift - __ffs(numparts);
566 
567 		if ((1 << partshift) < mtd->erasesize) {
568 			printk( KERN_ERR
569 				"%s: bad number of hw partitions (%d)\n",
570 				__FUNCTION__, numparts);
571 			return -EINVAL;
572 		}
573 
574 		numvirtchips = cfi->numchips * numparts;
575 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
576 		if (!newcfi)
577 			return -ENOMEM;
578 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
579 		if (!shared) {
580 			kfree(newcfi);
581 			return -ENOMEM;
582 		}
583 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
584 		newcfi->numchips = numvirtchips;
585 		newcfi->chipshift = partshift;
586 
587 		chip = &newcfi->chips[0];
588 		for (i = 0; i < cfi->numchips; i++) {
589 			shared[i].writing = shared[i].erasing = NULL;
590 			spin_lock_init(&shared[i].lock);
591 			for (j = 0; j < numparts; j++) {
592 				*chip = cfi->chips[i];
593 				chip->start += j << partshift;
594 				chip->priv = &shared[i];
595 				/* those should be reset too since
596 				   they create memory references. */
597 				init_waitqueue_head(&chip->wq);
598 				spin_lock_init(&chip->_spinlock);
599 				chip->mutex = &chip->_spinlock;
600 				chip++;
601 			}
602 		}
603 
604 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
605 				  "--> %d partitions of %d KiB\n",
606 				  map->name, cfi->numchips, cfi->interleave,
607 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
608 
609 		map->fldrv_priv = newcfi;
610 		*pcfi = newcfi;
611 		kfree(cfi);
612 	}
613 
614 	return 0;
615 }
616 
617 /*
618  *  *********** CHIP ACCESS FUNCTIONS ***********
619  */
620 
621 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
622 {
623 	DECLARE_WAITQUEUE(wait, current);
624 	struct cfi_private *cfi = map->fldrv_priv;
625 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
626 	unsigned long timeo;
627 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
628 
629  resettime:
630 	timeo = jiffies + HZ;
631  retry:
632 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
633 		/*
634 		 * OK. We have possibility for contension on the write/erase
635 		 * operations which are global to the real chip and not per
636 		 * partition.  So let's fight it over in the partition which
637 		 * currently has authority on the operation.
638 		 *
639 		 * The rules are as follows:
640 		 *
641 		 * - any write operation must own shared->writing.
642 		 *
643 		 * - any erase operation must own _both_ shared->writing and
644 		 *   shared->erasing.
645 		 *
646 		 * - contension arbitration is handled in the owner's context.
647 		 *
648 		 * The 'shared' struct can be read and/or written only when
649 		 * its lock is taken.
650 		 */
651 		struct flchip_shared *shared = chip->priv;
652 		struct flchip *contender;
653 		spin_lock(&shared->lock);
654 		contender = shared->writing;
655 		if (contender && contender != chip) {
656 			/*
657 			 * The engine to perform desired operation on this
658 			 * partition is already in use by someone else.
659 			 * Let's fight over it in the context of the chip
660 			 * currently using it.  If it is possible to suspend,
661 			 * that other partition will do just that, otherwise
662 			 * it'll happily send us to sleep.  In any case, when
663 			 * get_chip returns success we're clear to go ahead.
664 			 */
665 			int ret = spin_trylock(contender->mutex);
666 			spin_unlock(&shared->lock);
667 			if (!ret)
668 				goto retry;
669 			spin_unlock(chip->mutex);
670 			ret = get_chip(map, contender, contender->start, mode);
671 			spin_lock(chip->mutex);
672 			if (ret) {
673 				spin_unlock(contender->mutex);
674 				return ret;
675 			}
676 			timeo = jiffies + HZ;
677 			spin_lock(&shared->lock);
678 			spin_unlock(contender->mutex);
679 		}
680 
681 		/* We now own it */
682 		shared->writing = chip;
683 		if (mode == FL_ERASING)
684 			shared->erasing = chip;
685 		spin_unlock(&shared->lock);
686 	}
687 
688 	switch (chip->state) {
689 
690 	case FL_STATUS:
691 		for (;;) {
692 			status = map_read(map, adr);
693 			if (map_word_andequal(map, status, status_OK, status_OK))
694 				break;
695 
696 			/* At this point we're fine with write operations
697 			   in other partitions as they don't conflict. */
698 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
699 				break;
700 
701 			if (time_after(jiffies, timeo)) {
702 				printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
703 				       map->name, status.x[0]);
704 				return -EIO;
705 			}
706 			spin_unlock(chip->mutex);
707 			cfi_udelay(1);
708 			spin_lock(chip->mutex);
709 			/* Someone else might have been playing with it. */
710 			goto retry;
711 		}
712 
713 	case FL_READY:
714 	case FL_CFI_QUERY:
715 	case FL_JEDEC_QUERY:
716 		return 0;
717 
718 	case FL_ERASING:
719 		if (!cfip ||
720 		    !(cfip->FeatureSupport & 2) ||
721 		    !(mode == FL_READY || mode == FL_POINT ||
722 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
723 			goto sleep;
724 
725 
726 		/* Erase suspend */
727 		map_write(map, CMD(0xB0), adr);
728 
729 		/* If the flash has finished erasing, then 'erase suspend'
730 		 * appears to make some (28F320) flash devices switch to
731 		 * 'read' mode.  Make sure that we switch to 'read status'
732 		 * mode so we get the right data. --rmk
733 		 */
734 		map_write(map, CMD(0x70), adr);
735 		chip->oldstate = FL_ERASING;
736 		chip->state = FL_ERASE_SUSPENDING;
737 		chip->erase_suspended = 1;
738 		for (;;) {
739 			status = map_read(map, adr);
740 			if (map_word_andequal(map, status, status_OK, status_OK))
741 			        break;
742 
743 			if (time_after(jiffies, timeo)) {
744 				/* Urgh. Resume and pretend we weren't here.  */
745 				map_write(map, CMD(0xd0), adr);
746 				/* Make sure we're in 'read status' mode if it had finished */
747 				map_write(map, CMD(0x70), adr);
748 				chip->state = FL_ERASING;
749 				chip->oldstate = FL_READY;
750 				printk(KERN_ERR "%s: Chip not ready after erase "
751 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
752 				return -EIO;
753 			}
754 
755 			spin_unlock(chip->mutex);
756 			cfi_udelay(1);
757 			spin_lock(chip->mutex);
758 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
759 			   So we can just loop here. */
760 		}
761 		chip->state = FL_STATUS;
762 		return 0;
763 
764 	case FL_XIP_WHILE_ERASING:
765 		if (mode != FL_READY && mode != FL_POINT &&
766 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
767 			goto sleep;
768 		chip->oldstate = chip->state;
769 		chip->state = FL_READY;
770 		return 0;
771 
772 	case FL_POINT:
773 		/* Only if there's no operation suspended... */
774 		if (mode == FL_READY && chip->oldstate == FL_READY)
775 			return 0;
776 
777 	default:
778 	sleep:
779 		set_current_state(TASK_UNINTERRUPTIBLE);
780 		add_wait_queue(&chip->wq, &wait);
781 		spin_unlock(chip->mutex);
782 		schedule();
783 		remove_wait_queue(&chip->wq, &wait);
784 		spin_lock(chip->mutex);
785 		goto resettime;
786 	}
787 }
788 
789 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
790 {
791 	struct cfi_private *cfi = map->fldrv_priv;
792 
793 	if (chip->priv) {
794 		struct flchip_shared *shared = chip->priv;
795 		spin_lock(&shared->lock);
796 		if (shared->writing == chip && chip->oldstate == FL_READY) {
797 			/* We own the ability to write, but we're done */
798 			shared->writing = shared->erasing;
799 			if (shared->writing && shared->writing != chip) {
800 				/* give back ownership to who we loaned it from */
801 				struct flchip *loaner = shared->writing;
802 				spin_lock(loaner->mutex);
803 				spin_unlock(&shared->lock);
804 				spin_unlock(chip->mutex);
805 				put_chip(map, loaner, loaner->start);
806 				spin_lock(chip->mutex);
807 				spin_unlock(loaner->mutex);
808 				wake_up(&chip->wq);
809 				return;
810 			}
811 			shared->erasing = NULL;
812 			shared->writing = NULL;
813 		} else if (shared->erasing == chip && shared->writing != chip) {
814 			/*
815 			 * We own the ability to erase without the ability
816 			 * to write, which means the erase was suspended
817 			 * and some other partition is currently writing.
818 			 * Don't let the switch below mess things up since
819 			 * we don't have ownership to resume anything.
820 			 */
821 			spin_unlock(&shared->lock);
822 			wake_up(&chip->wq);
823 			return;
824 		}
825 		spin_unlock(&shared->lock);
826 	}
827 
828 	switch(chip->oldstate) {
829 	case FL_ERASING:
830 		chip->state = chip->oldstate;
831 		/* What if one interleaved chip has finished and the
832 		   other hasn't? The old code would leave the finished
833 		   one in READY mode. That's bad, and caused -EROFS
834 		   errors to be returned from do_erase_oneblock because
835 		   that's the only bit it checked for at the time.
836 		   As the state machine appears to explicitly allow
837 		   sending the 0x70 (Read Status) command to an erasing
838 		   chip and expecting it to be ignored, that's what we
839 		   do. */
840 		map_write(map, CMD(0xd0), adr);
841 		map_write(map, CMD(0x70), adr);
842 		chip->oldstate = FL_READY;
843 		chip->state = FL_ERASING;
844 		break;
845 
846 	case FL_XIP_WHILE_ERASING:
847 		chip->state = chip->oldstate;
848 		chip->oldstate = FL_READY;
849 		break;
850 
851 	case FL_READY:
852 	case FL_STATUS:
853 	case FL_JEDEC_QUERY:
854 		/* We should really make set_vpp() count, rather than doing this */
855 		DISABLE_VPP(map);
856 		break;
857 	default:
858 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
859 	}
860 	wake_up(&chip->wq);
861 }
862 
863 #ifdef CONFIG_MTD_XIP
864 
865 /*
866  * No interrupt what so ever can be serviced while the flash isn't in array
867  * mode.  This is ensured by the xip_disable() and xip_enable() functions
868  * enclosing any code path where the flash is known not to be in array mode.
869  * And within a XIP disabled code path, only functions marked with __xipram
870  * may be called and nothing else (it's a good thing to inspect generated
871  * assembly to make sure inline functions were actually inlined and that gcc
872  * didn't emit calls to its own support functions). Also configuring MTD CFI
873  * support to a single buswidth and a single interleave is also recommended.
874  */
875 
876 static void xip_disable(struct map_info *map, struct flchip *chip,
877 			unsigned long adr)
878 {
879 	/* TODO: chips with no XIP use should ignore and return */
880 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
881 	local_irq_disable();
882 }
883 
884 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
885 				unsigned long adr)
886 {
887 	struct cfi_private *cfi = map->fldrv_priv;
888 	if (chip->state != FL_POINT && chip->state != FL_READY) {
889 		map_write(map, CMD(0xff), adr);
890 		chip->state = FL_READY;
891 	}
892 	(void) map_read(map, adr);
893 	xip_iprefetch();
894 	local_irq_enable();
895 }
896 
897 /*
898  * When a delay is required for the flash operation to complete, the
899  * xip_udelay() function is polling for both the given timeout and pending
900  * (but still masked) hardware interrupts.  Whenever there is an interrupt
901  * pending then the flash erase or write operation is suspended, array mode
902  * restored and interrupts unmasked.  Task scheduling might also happen at that
903  * point.  The CPU eventually returns from the interrupt or the call to
904  * schedule() and the suspended flash operation is resumed for the remaining
905  * of the delay period.
906  *
907  * Warning: this function _will_ fool interrupt latency tracing tools.
908  */
909 
910 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
911 				unsigned long adr, int usec)
912 {
913 	struct cfi_private *cfi = map->fldrv_priv;
914 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
915 	map_word status, OK = CMD(0x80);
916 	unsigned long suspended, start = xip_currtime();
917 	flstate_t oldstate, newstate;
918 
919 	do {
920 		cpu_relax();
921 		if (xip_irqpending() && cfip &&
922 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
923 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
924 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
925 			/*
926 			 * Let's suspend the erase or write operation when
927 			 * supported.  Note that we currently don't try to
928 			 * suspend interleaved chips if there is already
929 			 * another operation suspended (imagine what happens
930 			 * when one chip was already done with the current
931 			 * operation while another chip suspended it, then
932 			 * we resume the whole thing at once).  Yes, it
933 			 * can happen!
934 			 */
935 			map_write(map, CMD(0xb0), adr);
936 			map_write(map, CMD(0x70), adr);
937 			usec -= xip_elapsed_since(start);
938 			suspended = xip_currtime();
939 			do {
940 				if (xip_elapsed_since(suspended) > 100000) {
941 					/*
942 					 * The chip doesn't want to suspend
943 					 * after waiting for 100 msecs.
944 					 * This is a critical error but there
945 					 * is not much we can do here.
946 					 */
947 					return;
948 				}
949 				status = map_read(map, adr);
950 			} while (!map_word_andequal(map, status, OK, OK));
951 
952 			/* Suspend succeeded */
953 			oldstate = chip->state;
954 			if (oldstate == FL_ERASING) {
955 				if (!map_word_bitsset(map, status, CMD(0x40)))
956 					break;
957 				newstate = FL_XIP_WHILE_ERASING;
958 				chip->erase_suspended = 1;
959 			} else {
960 				if (!map_word_bitsset(map, status, CMD(0x04)))
961 					break;
962 				newstate = FL_XIP_WHILE_WRITING;
963 				chip->write_suspended = 1;
964 			}
965 			chip->state = newstate;
966 			map_write(map, CMD(0xff), adr);
967 			(void) map_read(map, adr);
968 			asm volatile (".rep 8; nop; .endr");
969 			local_irq_enable();
970 			spin_unlock(chip->mutex);
971 			asm volatile (".rep 8; nop; .endr");
972 			cond_resched();
973 
974 			/*
975 			 * We're back.  However someone else might have
976 			 * decided to go write to the chip if we are in
977 			 * a suspended erase state.  If so let's wait
978 			 * until it's done.
979 			 */
980 			spin_lock(chip->mutex);
981 			while (chip->state != newstate) {
982 				DECLARE_WAITQUEUE(wait, current);
983 				set_current_state(TASK_UNINTERRUPTIBLE);
984 				add_wait_queue(&chip->wq, &wait);
985 				spin_unlock(chip->mutex);
986 				schedule();
987 				remove_wait_queue(&chip->wq, &wait);
988 				spin_lock(chip->mutex);
989 			}
990 			/* Disallow XIP again */
991 			local_irq_disable();
992 
993 			/* Resume the write or erase operation */
994 			map_write(map, CMD(0xd0), adr);
995 			map_write(map, CMD(0x70), adr);
996 			chip->state = oldstate;
997 			start = xip_currtime();
998 		} else if (usec >= 1000000/HZ) {
999 			/*
1000 			 * Try to save on CPU power when waiting delay
1001 			 * is at least a system timer tick period.
1002 			 * No need to be extremely accurate here.
1003 			 */
1004 			xip_cpu_idle();
1005 		}
1006 		status = map_read(map, adr);
1007 	} while (!map_word_andequal(map, status, OK, OK)
1008 		 && xip_elapsed_since(start) < usec);
1009 }
1010 
1011 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1012 
1013 /*
1014  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1015  * the flash is actively programming or erasing since we have to poll for
1016  * the operation to complete anyway.  We can't do that in a generic way with
1017  * a XIP setup so do it before the actual flash operation in this case
1018  * and stub it out from INVALIDATE_CACHE_UDELAY.
1019  */
1020 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1021 	INVALIDATE_CACHED_RANGE(map, from, size)
1022 
1023 #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec)  \
1024 	UDELAY(map, chip, cmd_adr, usec)
1025 
1026 /*
1027  * Extra notes:
1028  *
1029  * Activating this XIP support changes the way the code works a bit.  For
1030  * example the code to suspend the current process when concurrent access
1031  * happens is never executed because xip_udelay() will always return with the
1032  * same chip state as it was entered with.  This is why there is no care for
1033  * the presence of add_wait_queue() or schedule() calls from within a couple
1034  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1035  * The queueing and scheduling are always happening within xip_udelay().
1036  *
1037  * Similarly, get_chip() and put_chip() just happen to always be executed
1038  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1039  * is in array mode, therefore never executing many cases therein and not
1040  * causing any problem with XIP.
1041  */
1042 
1043 #else
1044 
1045 #define xip_disable(map, chip, adr)
1046 #define xip_enable(map, chip, adr)
1047 #define XIP_INVAL_CACHED_RANGE(x...)
1048 
1049 #define UDELAY(map, chip, adr, usec)  \
1050 do {  \
1051 	spin_unlock(chip->mutex);  \
1052 	cfi_udelay(usec);  \
1053 	spin_lock(chip->mutex);  \
1054 } while (0)
1055 
1056 #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec)  \
1057 do {  \
1058 	spin_unlock(chip->mutex);  \
1059 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1060 	cfi_udelay(usec);  \
1061 	spin_lock(chip->mutex);  \
1062 } while (0)
1063 
1064 #endif
1065 
1066 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1067 {
1068 	unsigned long cmd_addr;
1069 	struct cfi_private *cfi = map->fldrv_priv;
1070 	int ret = 0;
1071 
1072 	adr += chip->start;
1073 
1074 	/* Ensure cmd read/writes are aligned. */
1075 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1076 
1077 	spin_lock(chip->mutex);
1078 
1079 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1080 
1081 	if (!ret) {
1082 		if (chip->state != FL_POINT && chip->state != FL_READY)
1083 			map_write(map, CMD(0xff), cmd_addr);
1084 
1085 		chip->state = FL_POINT;
1086 		chip->ref_point_counter++;
1087 	}
1088 	spin_unlock(chip->mutex);
1089 
1090 	return ret;
1091 }
1092 
1093 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1094 {
1095 	struct map_info *map = mtd->priv;
1096 	struct cfi_private *cfi = map->fldrv_priv;
1097 	unsigned long ofs;
1098 	int chipnum;
1099 	int ret = 0;
1100 
1101 	if (!map->virt || (from + len > mtd->size))
1102 		return -EINVAL;
1103 
1104 	*mtdbuf = (void *)map->virt + from;
1105 	*retlen = 0;
1106 
1107 	/* Now lock the chip(s) to POINT state */
1108 
1109 	/* ofs: offset within the first chip that the first read should start */
1110 	chipnum = (from >> cfi->chipshift);
1111 	ofs = from - (chipnum << cfi->chipshift);
1112 
1113 	while (len) {
1114 		unsigned long thislen;
1115 
1116 		if (chipnum >= cfi->numchips)
1117 			break;
1118 
1119 		if ((len + ofs -1) >> cfi->chipshift)
1120 			thislen = (1<<cfi->chipshift) - ofs;
1121 		else
1122 			thislen = len;
1123 
1124 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1125 		if (ret)
1126 			break;
1127 
1128 		*retlen += thislen;
1129 		len -= thislen;
1130 
1131 		ofs = 0;
1132 		chipnum++;
1133 	}
1134 	return 0;
1135 }
1136 
1137 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1138 {
1139 	struct map_info *map = mtd->priv;
1140 	struct cfi_private *cfi = map->fldrv_priv;
1141 	unsigned long ofs;
1142 	int chipnum;
1143 
1144 	/* Now unlock the chip(s) POINT state */
1145 
1146 	/* ofs: offset within the first chip that the first read should start */
1147 	chipnum = (from >> cfi->chipshift);
1148 	ofs = from - (chipnum <<  cfi->chipshift);
1149 
1150 	while (len) {
1151 		unsigned long thislen;
1152 		struct flchip *chip;
1153 
1154 		chip = &cfi->chips[chipnum];
1155 		if (chipnum >= cfi->numchips)
1156 			break;
1157 
1158 		if ((len + ofs -1) >> cfi->chipshift)
1159 			thislen = (1<<cfi->chipshift) - ofs;
1160 		else
1161 			thislen = len;
1162 
1163 		spin_lock(chip->mutex);
1164 		if (chip->state == FL_POINT) {
1165 			chip->ref_point_counter--;
1166 			if(chip->ref_point_counter == 0)
1167 				chip->state = FL_READY;
1168 		} else
1169 			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1170 
1171 		put_chip(map, chip, chip->start);
1172 		spin_unlock(chip->mutex);
1173 
1174 		len -= thislen;
1175 		ofs = 0;
1176 		chipnum++;
1177 	}
1178 }
1179 
1180 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1181 {
1182 	unsigned long cmd_addr;
1183 	struct cfi_private *cfi = map->fldrv_priv;
1184 	int ret;
1185 
1186 	adr += chip->start;
1187 
1188 	/* Ensure cmd read/writes are aligned. */
1189 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1190 
1191 	spin_lock(chip->mutex);
1192 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1193 	if (ret) {
1194 		spin_unlock(chip->mutex);
1195 		return ret;
1196 	}
1197 
1198 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1199 		map_write(map, CMD(0xff), cmd_addr);
1200 
1201 		chip->state = FL_READY;
1202 	}
1203 
1204 	map_copy_from(map, buf, adr, len);
1205 
1206 	put_chip(map, chip, cmd_addr);
1207 
1208 	spin_unlock(chip->mutex);
1209 	return 0;
1210 }
1211 
1212 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1213 {
1214 	struct map_info *map = mtd->priv;
1215 	struct cfi_private *cfi = map->fldrv_priv;
1216 	unsigned long ofs;
1217 	int chipnum;
1218 	int ret = 0;
1219 
1220 	/* ofs: offset within the first chip that the first read should start */
1221 	chipnum = (from >> cfi->chipshift);
1222 	ofs = from - (chipnum <<  cfi->chipshift);
1223 
1224 	*retlen = 0;
1225 
1226 	while (len) {
1227 		unsigned long thislen;
1228 
1229 		if (chipnum >= cfi->numchips)
1230 			break;
1231 
1232 		if ((len + ofs -1) >> cfi->chipshift)
1233 			thislen = (1<<cfi->chipshift) - ofs;
1234 		else
1235 			thislen = len;
1236 
1237 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1238 		if (ret)
1239 			break;
1240 
1241 		*retlen += thislen;
1242 		len -= thislen;
1243 		buf += thislen;
1244 
1245 		ofs = 0;
1246 		chipnum++;
1247 	}
1248 	return ret;
1249 }
1250 
1251 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1252 				     unsigned long adr, map_word datum, int mode)
1253 {
1254 	struct cfi_private *cfi = map->fldrv_priv;
1255 	map_word status, status_OK, write_cmd;
1256 	unsigned long timeo;
1257 	int z, ret=0;
1258 
1259 	adr += chip->start;
1260 
1261 	/* Let's determine those according to the interleave only once */
1262 	status_OK = CMD(0x80);
1263 	switch (mode) {
1264 	case FL_WRITING:
1265 		write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1266 		break;
1267 	case FL_OTP_WRITE:
1268 		write_cmd = CMD(0xc0);
1269 		break;
1270 	default:
1271 		return -EINVAL;
1272 	}
1273 
1274 	spin_lock(chip->mutex);
1275 	ret = get_chip(map, chip, adr, mode);
1276 	if (ret) {
1277 		spin_unlock(chip->mutex);
1278 		return ret;
1279 	}
1280 
1281 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1282 	ENABLE_VPP(map);
1283 	xip_disable(map, chip, adr);
1284 	map_write(map, write_cmd, adr);
1285 	map_write(map, datum, adr);
1286 	chip->state = mode;
1287 
1288 	INVALIDATE_CACHE_UDELAY(map, chip, adr,
1289 				adr, map_bankwidth(map),
1290 				chip->word_write_time);
1291 
1292 	timeo = jiffies + (HZ/2);
1293 	z = 0;
1294 	for (;;) {
1295 		if (chip->state != mode) {
1296 			/* Someone's suspended the write. Sleep */
1297 			DECLARE_WAITQUEUE(wait, current);
1298 
1299 			set_current_state(TASK_UNINTERRUPTIBLE);
1300 			add_wait_queue(&chip->wq, &wait);
1301 			spin_unlock(chip->mutex);
1302 			schedule();
1303 			remove_wait_queue(&chip->wq, &wait);
1304 			timeo = jiffies + (HZ / 2); /* FIXME */
1305 			spin_lock(chip->mutex);
1306 			continue;
1307 		}
1308 
1309 		status = map_read(map, adr);
1310 		if (map_word_andequal(map, status, status_OK, status_OK))
1311 			break;
1312 
1313 		/* OK Still waiting */
1314 		if (time_after(jiffies, timeo)) {
1315 			map_write(map, CMD(0x70), adr);
1316 			chip->state = FL_STATUS;
1317 			xip_enable(map, chip, adr);
1318 			printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1319 			ret = -EIO;
1320 			goto out;
1321 		}
1322 
1323 		/* Latency issues. Drop the lock, wait a while and retry */
1324 		z++;
1325 		UDELAY(map, chip, adr, 1);
1326 	}
1327 	if (!z) {
1328 		chip->word_write_time--;
1329 		if (!chip->word_write_time)
1330 			chip->word_write_time = 1;
1331 	}
1332 	if (z > 1)
1333 		chip->word_write_time++;
1334 
1335 	/* Done and happy. */
1336 	chip->state = FL_STATUS;
1337 
1338 	/* check for errors */
1339 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1340 		unsigned long chipstatus = MERGESTATUS(status);
1341 
1342 		/* reset status */
1343 		map_write(map, CMD(0x50), adr);
1344 		map_write(map, CMD(0x70), adr);
1345 		xip_enable(map, chip, adr);
1346 
1347 		if (chipstatus & 0x02) {
1348 			ret = -EROFS;
1349 		} else if (chipstatus & 0x08) {
1350 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1351 			ret = -EIO;
1352 		} else {
1353 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1354 			ret = -EINVAL;
1355 		}
1356 
1357 		goto out;
1358 	}
1359 
1360 	xip_enable(map, chip, adr);
1361  out:	put_chip(map, chip, adr);
1362 	spin_unlock(chip->mutex);
1363 	return ret;
1364 }
1365 
1366 
1367 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1368 {
1369 	struct map_info *map = mtd->priv;
1370 	struct cfi_private *cfi = map->fldrv_priv;
1371 	int ret = 0;
1372 	int chipnum;
1373 	unsigned long ofs;
1374 
1375 	*retlen = 0;
1376 	if (!len)
1377 		return 0;
1378 
1379 	chipnum = to >> cfi->chipshift;
1380 	ofs = to  - (chipnum << cfi->chipshift);
1381 
1382 	/* If it's not bus-aligned, do the first byte write */
1383 	if (ofs & (map_bankwidth(map)-1)) {
1384 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1385 		int gap = ofs - bus_ofs;
1386 		int n;
1387 		map_word datum;
1388 
1389 		n = min_t(int, len, map_bankwidth(map)-gap);
1390 		datum = map_word_ff(map);
1391 		datum = map_word_load_partial(map, datum, buf, gap, n);
1392 
1393 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1394 					       bus_ofs, datum, FL_WRITING);
1395 		if (ret)
1396 			return ret;
1397 
1398 		len -= n;
1399 		ofs += n;
1400 		buf += n;
1401 		(*retlen) += n;
1402 
1403 		if (ofs >> cfi->chipshift) {
1404 			chipnum ++;
1405 			ofs = 0;
1406 			if (chipnum == cfi->numchips)
1407 				return 0;
1408 		}
1409 	}
1410 
1411 	while(len >= map_bankwidth(map)) {
1412 		map_word datum = map_word_load(map, buf);
1413 
1414 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1415 				       ofs, datum, FL_WRITING);
1416 		if (ret)
1417 			return ret;
1418 
1419 		ofs += map_bankwidth(map);
1420 		buf += map_bankwidth(map);
1421 		(*retlen) += map_bankwidth(map);
1422 		len -= map_bankwidth(map);
1423 
1424 		if (ofs >> cfi->chipshift) {
1425 			chipnum ++;
1426 			ofs = 0;
1427 			if (chipnum == cfi->numchips)
1428 				return 0;
1429 		}
1430 	}
1431 
1432 	if (len & (map_bankwidth(map)-1)) {
1433 		map_word datum;
1434 
1435 		datum = map_word_ff(map);
1436 		datum = map_word_load_partial(map, datum, buf, 0, len);
1437 
1438 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1439 				       ofs, datum, FL_WRITING);
1440 		if (ret)
1441 			return ret;
1442 
1443 		(*retlen) += len;
1444 	}
1445 
1446 	return 0;
1447 }
1448 
1449 
1450 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1451 				    unsigned long adr, const struct kvec **pvec,
1452 				    unsigned long *pvec_seek, int len)
1453 {
1454 	struct cfi_private *cfi = map->fldrv_priv;
1455 	map_word status, status_OK, write_cmd, datum;
1456 	unsigned long cmd_adr, timeo;
1457 	int wbufsize, z, ret=0, word_gap, words;
1458 	const struct kvec *vec;
1459 	unsigned long vec_seek;
1460 
1461 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1462 	adr += chip->start;
1463 	cmd_adr = adr & ~(wbufsize-1);
1464 
1465 	/* Let's determine this according to the interleave only once */
1466 	status_OK = CMD(0x80);
1467 	write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1468 
1469 	spin_lock(chip->mutex);
1470 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1471 	if (ret) {
1472 		spin_unlock(chip->mutex);
1473 		return ret;
1474 	}
1475 
1476 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1477 	ENABLE_VPP(map);
1478 	xip_disable(map, chip, cmd_adr);
1479 
1480 	/* �4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1481 	   [...], the device will not accept any more Write to Buffer commands".
1482 	   So we must check here and reset those bits if they're set. Otherwise
1483 	   we're just pissing in the wind */
1484 	if (chip->state != FL_STATUS)
1485 		map_write(map, CMD(0x70), cmd_adr);
1486 	status = map_read(map, cmd_adr);
1487 	if (map_word_bitsset(map, status, CMD(0x30))) {
1488 		xip_enable(map, chip, cmd_adr);
1489 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1490 		xip_disable(map, chip, cmd_adr);
1491 		map_write(map, CMD(0x50), cmd_adr);
1492 		map_write(map, CMD(0x70), cmd_adr);
1493 	}
1494 
1495 	chip->state = FL_WRITING_TO_BUFFER;
1496 
1497 	z = 0;
1498 	for (;;) {
1499 		map_write(map, write_cmd, cmd_adr);
1500 
1501 		status = map_read(map, cmd_adr);
1502 		if (map_word_andequal(map, status, status_OK, status_OK))
1503 			break;
1504 
1505 		UDELAY(map, chip, cmd_adr, 1);
1506 
1507 		if (++z > 20) {
1508 			/* Argh. Not ready for write to buffer */
1509 			map_word Xstatus;
1510 			map_write(map, CMD(0x70), cmd_adr);
1511 			chip->state = FL_STATUS;
1512 			Xstatus = map_read(map, cmd_adr);
1513 			/* Odd. Clear status bits */
1514 			map_write(map, CMD(0x50), cmd_adr);
1515 			map_write(map, CMD(0x70), cmd_adr);
1516 			xip_enable(map, chip, cmd_adr);
1517 			printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1518 			       map->name, status.x[0], Xstatus.x[0]);
1519 			ret = -EIO;
1520 			goto out;
1521 		}
1522 	}
1523 
1524 	/* Figure out the number of words to write */
1525 	word_gap = (-adr & (map_bankwidth(map)-1));
1526 	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1527 	if (!word_gap) {
1528 		words--;
1529 	} else {
1530 		word_gap = map_bankwidth(map) - word_gap;
1531 		adr -= word_gap;
1532 		datum = map_word_ff(map);
1533 	}
1534 
1535 	/* Write length of data to come */
1536 	map_write(map, CMD(words), cmd_adr );
1537 
1538 	/* Write data */
1539 	vec = *pvec;
1540 	vec_seek = *pvec_seek;
1541 	do {
1542 		int n = map_bankwidth(map) - word_gap;
1543 		if (n > vec->iov_len - vec_seek)
1544 			n = vec->iov_len - vec_seek;
1545 		if (n > len)
1546 			n = len;
1547 
1548 		if (!word_gap && len < map_bankwidth(map))
1549 			datum = map_word_ff(map);
1550 
1551 		datum = map_word_load_partial(map, datum,
1552 					      vec->iov_base + vec_seek,
1553 					      word_gap, n);
1554 
1555 		len -= n;
1556 		word_gap += n;
1557 		if (!len || word_gap == map_bankwidth(map)) {
1558 			map_write(map, datum, adr);
1559 			adr += map_bankwidth(map);
1560 			word_gap = 0;
1561 		}
1562 
1563 		vec_seek += n;
1564 		if (vec_seek == vec->iov_len) {
1565 			vec++;
1566 			vec_seek = 0;
1567 		}
1568 	} while (len);
1569 	*pvec = vec;
1570 	*pvec_seek = vec_seek;
1571 
1572 	/* GO GO GO */
1573 	map_write(map, CMD(0xd0), cmd_adr);
1574 	chip->state = FL_WRITING;
1575 
1576 	INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr,
1577 				adr, len,
1578 				chip->buffer_write_time);
1579 
1580 	timeo = jiffies + (HZ/2);
1581 	z = 0;
1582 	for (;;) {
1583 		if (chip->state != FL_WRITING) {
1584 			/* Someone's suspended the write. Sleep */
1585 			DECLARE_WAITQUEUE(wait, current);
1586 			set_current_state(TASK_UNINTERRUPTIBLE);
1587 			add_wait_queue(&chip->wq, &wait);
1588 			spin_unlock(chip->mutex);
1589 			schedule();
1590 			remove_wait_queue(&chip->wq, &wait);
1591 			timeo = jiffies + (HZ / 2); /* FIXME */
1592 			spin_lock(chip->mutex);
1593 			continue;
1594 		}
1595 
1596 		status = map_read(map, cmd_adr);
1597 		if (map_word_andequal(map, status, status_OK, status_OK))
1598 			break;
1599 
1600 		/* OK Still waiting */
1601 		if (time_after(jiffies, timeo)) {
1602 			map_write(map, CMD(0x70), cmd_adr);
1603 			chip->state = FL_STATUS;
1604 			xip_enable(map, chip, cmd_adr);
1605 			printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1606 			ret = -EIO;
1607 			goto out;
1608 		}
1609 
1610 		/* Latency issues. Drop the lock, wait a while and retry */
1611 		z++;
1612 		UDELAY(map, chip, cmd_adr, 1);
1613 	}
1614 	if (!z) {
1615 		chip->buffer_write_time--;
1616 		if (!chip->buffer_write_time)
1617 			chip->buffer_write_time = 1;
1618 	}
1619 	if (z > 1)
1620 		chip->buffer_write_time++;
1621 
1622 	/* Done and happy. */
1623  	chip->state = FL_STATUS;
1624 
1625 	/* check for errors */
1626 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1627 		unsigned long chipstatus = MERGESTATUS(status);
1628 
1629 		/* reset status */
1630 		map_write(map, CMD(0x50), cmd_adr);
1631 		map_write(map, CMD(0x70), cmd_adr);
1632 		xip_enable(map, chip, cmd_adr);
1633 
1634 		if (chipstatus & 0x02) {
1635 			ret = -EROFS;
1636 		} else if (chipstatus & 0x08) {
1637 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1638 			ret = -EIO;
1639 		} else {
1640 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1641 			ret = -EINVAL;
1642 		}
1643 
1644 		goto out;
1645 	}
1646 
1647 	xip_enable(map, chip, cmd_adr);
1648  out:	put_chip(map, chip, cmd_adr);
1649 	spin_unlock(chip->mutex);
1650 	return ret;
1651 }
1652 
1653 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1654 				unsigned long count, loff_t to, size_t *retlen)
1655 {
1656 	struct map_info *map = mtd->priv;
1657 	struct cfi_private *cfi = map->fldrv_priv;
1658 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1659 	int ret = 0;
1660 	int chipnum;
1661 	unsigned long ofs, vec_seek, i;
1662 	size_t len = 0;
1663 
1664 	for (i = 0; i < count; i++)
1665 		len += vecs[i].iov_len;
1666 
1667 	*retlen = 0;
1668 	if (!len)
1669 		return 0;
1670 
1671 	chipnum = to >> cfi->chipshift;
1672 	ofs = to - (chipnum << cfi->chipshift);
1673 	vec_seek = 0;
1674 
1675 	do {
1676 		/* We must not cross write block boundaries */
1677 		int size = wbufsize - (ofs & (wbufsize-1));
1678 
1679 		if (size > len)
1680 			size = len;
1681 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1682 				      ofs, &vecs, &vec_seek, size);
1683 		if (ret)
1684 			return ret;
1685 
1686 		ofs += size;
1687 		(*retlen) += size;
1688 		len -= size;
1689 
1690 		if (ofs >> cfi->chipshift) {
1691 			chipnum ++;
1692 			ofs = 0;
1693 			if (chipnum == cfi->numchips)
1694 				return 0;
1695 		}
1696 	} while (len);
1697 
1698 	return 0;
1699 }
1700 
1701 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1702 				       size_t len, size_t *retlen, const u_char *buf)
1703 {
1704 	struct kvec vec;
1705 
1706 	vec.iov_base = (void *) buf;
1707 	vec.iov_len = len;
1708 
1709 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1710 }
1711 
1712 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1713 				      unsigned long adr, int len, void *thunk)
1714 {
1715 	struct cfi_private *cfi = map->fldrv_priv;
1716 	map_word status, status_OK;
1717 	unsigned long timeo;
1718 	int retries = 3;
1719 	DECLARE_WAITQUEUE(wait, current);
1720 	int ret = 0;
1721 
1722 	adr += chip->start;
1723 
1724 	/* Let's determine this according to the interleave only once */
1725 	status_OK = CMD(0x80);
1726 
1727  retry:
1728 	spin_lock(chip->mutex);
1729 	ret = get_chip(map, chip, adr, FL_ERASING);
1730 	if (ret) {
1731 		spin_unlock(chip->mutex);
1732 		return ret;
1733 	}
1734 
1735 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1736 	ENABLE_VPP(map);
1737 	xip_disable(map, chip, adr);
1738 
1739 	/* Clear the status register first */
1740 	map_write(map, CMD(0x50), adr);
1741 
1742 	/* Now erase */
1743 	map_write(map, CMD(0x20), adr);
1744 	map_write(map, CMD(0xD0), adr);
1745 	chip->state = FL_ERASING;
1746 	chip->erase_suspended = 0;
1747 
1748 	INVALIDATE_CACHE_UDELAY(map, chip, adr,
1749 				adr, len,
1750 				chip->erase_time*1000/2);
1751 
1752 	/* FIXME. Use a timer to check this, and return immediately. */
1753 	/* Once the state machine's known to be working I'll do that */
1754 
1755 	timeo = jiffies + (HZ*20);
1756 	for (;;) {
1757 		if (chip->state != FL_ERASING) {
1758 			/* Someone's suspended the erase. Sleep */
1759 			set_current_state(TASK_UNINTERRUPTIBLE);
1760 			add_wait_queue(&chip->wq, &wait);
1761 			spin_unlock(chip->mutex);
1762 			schedule();
1763 			remove_wait_queue(&chip->wq, &wait);
1764 			spin_lock(chip->mutex);
1765 			continue;
1766 		}
1767 		if (chip->erase_suspended) {
1768 			/* This erase was suspended and resumed.
1769 			   Adjust the timeout */
1770 			timeo = jiffies + (HZ*20); /* FIXME */
1771 			chip->erase_suspended = 0;
1772 		}
1773 
1774 		status = map_read(map, adr);
1775 		if (map_word_andequal(map, status, status_OK, status_OK))
1776 			break;
1777 
1778 		/* OK Still waiting */
1779 		if (time_after(jiffies, timeo)) {
1780 			map_write(map, CMD(0x70), adr);
1781 			chip->state = FL_STATUS;
1782 			xip_enable(map, chip, adr);
1783 			printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1784 			ret = -EIO;
1785 			goto out;
1786 		}
1787 
1788 		/* Latency issues. Drop the lock, wait a while and retry */
1789 		UDELAY(map, chip, adr, 1000000/HZ);
1790 	}
1791 
1792 	/* We've broken this before. It doesn't hurt to be safe */
1793 	map_write(map, CMD(0x70), adr);
1794 	chip->state = FL_STATUS;
1795 	status = map_read(map, adr);
1796 
1797 	/* check for errors */
1798 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1799 		unsigned long chipstatus = MERGESTATUS(status);
1800 
1801 		/* Reset the error bits */
1802 		map_write(map, CMD(0x50), adr);
1803 		map_write(map, CMD(0x70), adr);
1804 		xip_enable(map, chip, adr);
1805 
1806 		if ((chipstatus & 0x30) == 0x30) {
1807 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1808 			ret = -EINVAL;
1809 		} else if (chipstatus & 0x02) {
1810 			/* Protection bit set */
1811 			ret = -EROFS;
1812 		} else if (chipstatus & 0x8) {
1813 			/* Voltage */
1814 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1815 			ret = -EIO;
1816 		} else if (chipstatus & 0x20 && retries--) {
1817 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1818 			timeo = jiffies + HZ;
1819 			put_chip(map, chip, adr);
1820 			spin_unlock(chip->mutex);
1821 			goto retry;
1822 		} else {
1823 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1824 			ret = -EIO;
1825 		}
1826 
1827 		goto out;
1828 	}
1829 
1830 	xip_enable(map, chip, adr);
1831  out:	put_chip(map, chip, adr);
1832 	spin_unlock(chip->mutex);
1833 	return ret;
1834 }
1835 
1836 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1837 {
1838 	unsigned long ofs, len;
1839 	int ret;
1840 
1841 	ofs = instr->addr;
1842 	len = instr->len;
1843 
1844 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1845 	if (ret)
1846 		return ret;
1847 
1848 	instr->state = MTD_ERASE_DONE;
1849 	mtd_erase_callback(instr);
1850 
1851 	return 0;
1852 }
1853 
1854 static void cfi_intelext_sync (struct mtd_info *mtd)
1855 {
1856 	struct map_info *map = mtd->priv;
1857 	struct cfi_private *cfi = map->fldrv_priv;
1858 	int i;
1859 	struct flchip *chip;
1860 	int ret = 0;
1861 
1862 	for (i=0; !ret && i<cfi->numchips; i++) {
1863 		chip = &cfi->chips[i];
1864 
1865 		spin_lock(chip->mutex);
1866 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1867 
1868 		if (!ret) {
1869 			chip->oldstate = chip->state;
1870 			chip->state = FL_SYNCING;
1871 			/* No need to wake_up() on this state change -
1872 			 * as the whole point is that nobody can do anything
1873 			 * with the chip now anyway.
1874 			 */
1875 		}
1876 		spin_unlock(chip->mutex);
1877 	}
1878 
1879 	/* Unlock the chips again */
1880 
1881 	for (i--; i >=0; i--) {
1882 		chip = &cfi->chips[i];
1883 
1884 		spin_lock(chip->mutex);
1885 
1886 		if (chip->state == FL_SYNCING) {
1887 			chip->state = chip->oldstate;
1888 			chip->oldstate = FL_READY;
1889 			wake_up(&chip->wq);
1890 		}
1891 		spin_unlock(chip->mutex);
1892 	}
1893 }
1894 
1895 #ifdef DEBUG_LOCK_BITS
1896 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1897 						struct flchip *chip,
1898 						unsigned long adr,
1899 						int len, void *thunk)
1900 {
1901 	struct cfi_private *cfi = map->fldrv_priv;
1902 	int status, ofs_factor = cfi->interleave * cfi->device_type;
1903 
1904 	adr += chip->start;
1905 	xip_disable(map, chip, adr+(2*ofs_factor));
1906 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
1907 	chip->state = FL_JEDEC_QUERY;
1908 	status = cfi_read_query(map, adr+(2*ofs_factor));
1909 	xip_enable(map, chip, 0);
1910 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1911 	       adr, status);
1912 	return 0;
1913 }
1914 #endif
1915 
1916 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
1917 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
1918 
1919 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1920 				       unsigned long adr, int len, void *thunk)
1921 {
1922 	struct cfi_private *cfi = map->fldrv_priv;
1923 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1924 	map_word status, status_OK;
1925 	unsigned long timeo = jiffies + HZ;
1926 	int ret;
1927 
1928 	adr += chip->start;
1929 
1930 	/* Let's determine this according to the interleave only once */
1931 	status_OK = CMD(0x80);
1932 
1933 	spin_lock(chip->mutex);
1934 	ret = get_chip(map, chip, adr, FL_LOCKING);
1935 	if (ret) {
1936 		spin_unlock(chip->mutex);
1937 		return ret;
1938 	}
1939 
1940 	ENABLE_VPP(map);
1941 	xip_disable(map, chip, adr);
1942 
1943 	map_write(map, CMD(0x60), adr);
1944 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1945 		map_write(map, CMD(0x01), adr);
1946 		chip->state = FL_LOCKING;
1947 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1948 		map_write(map, CMD(0xD0), adr);
1949 		chip->state = FL_UNLOCKING;
1950 	} else
1951 		BUG();
1952 
1953 	/*
1954 	 * If Instant Individual Block Locking supported then no need
1955 	 * to delay.
1956 	 */
1957 
1958 	if (!extp || !(extp->FeatureSupport & (1 << 5)))
1959 		UDELAY(map, chip, adr, 1000000/HZ);
1960 
1961 	/* FIXME. Use a timer to check this, and return immediately. */
1962 	/* Once the state machine's known to be working I'll do that */
1963 
1964 	timeo = jiffies + (HZ*20);
1965 	for (;;) {
1966 
1967 		status = map_read(map, adr);
1968 		if (map_word_andequal(map, status, status_OK, status_OK))
1969 			break;
1970 
1971 		/* OK Still waiting */
1972 		if (time_after(jiffies, timeo)) {
1973 			map_write(map, CMD(0x70), adr);
1974 			chip->state = FL_STATUS;
1975 			xip_enable(map, chip, adr);
1976 			printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1977 			put_chip(map, chip, adr);
1978 			spin_unlock(chip->mutex);
1979 			return -EIO;
1980 		}
1981 
1982 		/* Latency issues. Drop the lock, wait a while and retry */
1983 		UDELAY(map, chip, adr, 1);
1984 	}
1985 
1986 	/* Done and happy. */
1987 	chip->state = FL_STATUS;
1988 	xip_enable(map, chip, adr);
1989 	put_chip(map, chip, adr);
1990 	spin_unlock(chip->mutex);
1991 	return 0;
1992 }
1993 
1994 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1995 {
1996 	int ret;
1997 
1998 #ifdef DEBUG_LOCK_BITS
1999 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2000 	       __FUNCTION__, ofs, len);
2001 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2002 		ofs, len, 0);
2003 #endif
2004 
2005 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2006 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2007 
2008 #ifdef DEBUG_LOCK_BITS
2009 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2010 	       __FUNCTION__, ret);
2011 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2012 		ofs, len, 0);
2013 #endif
2014 
2015 	return ret;
2016 }
2017 
2018 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2019 {
2020 	int ret;
2021 
2022 #ifdef DEBUG_LOCK_BITS
2023 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2024 	       __FUNCTION__, ofs, len);
2025 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2026 		ofs, len, 0);
2027 #endif
2028 
2029 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2030 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2031 
2032 #ifdef DEBUG_LOCK_BITS
2033 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2034 	       __FUNCTION__, ret);
2035 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2036 		ofs, len, 0);
2037 #endif
2038 
2039 	return ret;
2040 }
2041 
2042 #ifdef CONFIG_MTD_OTP
2043 
2044 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2045 			u_long data_offset, u_char *buf, u_int size,
2046 			u_long prot_offset, u_int groupno, u_int groupsize);
2047 
2048 static int __xipram
2049 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2050 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2051 {
2052 	struct cfi_private *cfi = map->fldrv_priv;
2053 	int ret;
2054 
2055 	spin_lock(chip->mutex);
2056 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2057 	if (ret) {
2058 		spin_unlock(chip->mutex);
2059 		return ret;
2060 	}
2061 
2062 	/* let's ensure we're not reading back cached data from array mode */
2063 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2064 
2065 	xip_disable(map, chip, chip->start);
2066 	if (chip->state != FL_JEDEC_QUERY) {
2067 		map_write(map, CMD(0x90), chip->start);
2068 		chip->state = FL_JEDEC_QUERY;
2069 	}
2070 	map_copy_from(map, buf, chip->start + offset, size);
2071 	xip_enable(map, chip, chip->start);
2072 
2073 	/* then ensure we don't keep OTP data in the cache */
2074 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2075 
2076 	put_chip(map, chip, chip->start);
2077 	spin_unlock(chip->mutex);
2078 	return 0;
2079 }
2080 
2081 static int
2082 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2083 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2084 {
2085 	int ret;
2086 
2087 	while (size) {
2088 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2089 		int gap = offset - bus_ofs;
2090 		int n = min_t(int, size, map_bankwidth(map)-gap);
2091 		map_word datum = map_word_ff(map);
2092 
2093 		datum = map_word_load_partial(map, datum, buf, gap, n);
2094 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2095 		if (ret)
2096 			return ret;
2097 
2098 		offset += n;
2099 		buf += n;
2100 		size -= n;
2101 	}
2102 
2103 	return 0;
2104 }
2105 
2106 static int
2107 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2108 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2109 {
2110 	struct cfi_private *cfi = map->fldrv_priv;
2111 	map_word datum;
2112 
2113 	/* make sure area matches group boundaries */
2114 	if (size != grpsz)
2115 		return -EXDEV;
2116 
2117 	datum = map_word_ff(map);
2118 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2119 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2120 }
2121 
2122 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2123 				 size_t *retlen, u_char *buf,
2124 				 otp_op_t action, int user_regs)
2125 {
2126 	struct map_info *map = mtd->priv;
2127 	struct cfi_private *cfi = map->fldrv_priv;
2128 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2129 	struct flchip *chip;
2130 	struct cfi_intelext_otpinfo *otp;
2131 	u_long devsize, reg_prot_offset, data_offset;
2132 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2133 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2134 	int ret;
2135 
2136 	*retlen = 0;
2137 
2138 	/* Check that we actually have some OTP registers */
2139 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2140 		return -ENODATA;
2141 
2142 	/* we need real chips here not virtual ones */
2143 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2144 	chip_step = devsize >> cfi->chipshift;
2145 	chip_num = 0;
2146 
2147 	/* Some chips have OTP located in the _top_ partition only.
2148 	   For example: Intel 28F256L18T (T means top-parameter device) */
2149 	if (cfi->mfr == MANUFACTURER_INTEL) {
2150 		switch (cfi->id) {
2151 		case 0x880b:
2152 		case 0x880c:
2153 		case 0x880d:
2154 			chip_num = chip_step - 1;
2155 		}
2156 	}
2157 
2158 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2159 		chip = &cfi->chips[chip_num];
2160 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2161 
2162 		/* first OTP region */
2163 		field = 0;
2164 		reg_prot_offset = extp->ProtRegAddr;
2165 		reg_fact_groups = 1;
2166 		reg_fact_size = 1 << extp->FactProtRegSize;
2167 		reg_user_groups = 1;
2168 		reg_user_size = 1 << extp->UserProtRegSize;
2169 
2170 		while (len > 0) {
2171 			/* flash geometry fixup */
2172 			data_offset = reg_prot_offset + 1;
2173 			data_offset *= cfi->interleave * cfi->device_type;
2174 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2175 			reg_fact_size *= cfi->interleave;
2176 			reg_user_size *= cfi->interleave;
2177 
2178 			if (user_regs) {
2179 				groups = reg_user_groups;
2180 				groupsize = reg_user_size;
2181 				/* skip over factory reg area */
2182 				groupno = reg_fact_groups;
2183 				data_offset += reg_fact_groups * reg_fact_size;
2184 			} else {
2185 				groups = reg_fact_groups;
2186 				groupsize = reg_fact_size;
2187 				groupno = 0;
2188 			}
2189 
2190 			while (len > 0 && groups > 0) {
2191 				if (!action) {
2192 					/*
2193 					 * Special case: if action is NULL
2194 					 * we fill buf with otp_info records.
2195 					 */
2196 					struct otp_info *otpinfo;
2197 					map_word lockword;
2198 					len -= sizeof(struct otp_info);
2199 					if (len <= 0)
2200 						return -ENOSPC;
2201 					ret = do_otp_read(map, chip,
2202 							  reg_prot_offset,
2203 							  (u_char *)&lockword,
2204 							  map_bankwidth(map),
2205 							  0, 0,  0);
2206 					if (ret)
2207 						return ret;
2208 					otpinfo = (struct otp_info *)buf;
2209 					otpinfo->start = from;
2210 					otpinfo->length = groupsize;
2211 					otpinfo->locked =
2212 					   !map_word_bitsset(map, lockword,
2213 							     CMD(1 << groupno));
2214 					from += groupsize;
2215 					buf += sizeof(*otpinfo);
2216 					*retlen += sizeof(*otpinfo);
2217 				} else if (from >= groupsize) {
2218 					from -= groupsize;
2219 					data_offset += groupsize;
2220 				} else {
2221 					int size = groupsize;
2222 					data_offset += from;
2223 					size -= from;
2224 					from = 0;
2225 					if (size > len)
2226 						size = len;
2227 					ret = action(map, chip, data_offset,
2228 						     buf, size, reg_prot_offset,
2229 						     groupno, groupsize);
2230 					if (ret < 0)
2231 						return ret;
2232 					buf += size;
2233 					len -= size;
2234 					*retlen += size;
2235 					data_offset += size;
2236 				}
2237 				groupno++;
2238 				groups--;
2239 			}
2240 
2241 			/* next OTP region */
2242 			if (++field == extp->NumProtectionFields)
2243 				break;
2244 			reg_prot_offset = otp->ProtRegAddr;
2245 			reg_fact_groups = otp->FactGroups;
2246 			reg_fact_size = 1 << otp->FactProtRegSize;
2247 			reg_user_groups = otp->UserGroups;
2248 			reg_user_size = 1 << otp->UserProtRegSize;
2249 			otp++;
2250 		}
2251 	}
2252 
2253 	return 0;
2254 }
2255 
2256 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2257 					   size_t len, size_t *retlen,
2258 					    u_char *buf)
2259 {
2260 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2261 				     buf, do_otp_read, 0);
2262 }
2263 
2264 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2265 					   size_t len, size_t *retlen,
2266 					    u_char *buf)
2267 {
2268 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2269 				     buf, do_otp_read, 1);
2270 }
2271 
2272 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2273 					    size_t len, size_t *retlen,
2274 					     u_char *buf)
2275 {
2276 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2277 				     buf, do_otp_write, 1);
2278 }
2279 
2280 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2281 					   loff_t from, size_t len)
2282 {
2283 	size_t retlen;
2284 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2285 				     NULL, do_otp_lock, 1);
2286 }
2287 
2288 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2289 					   struct otp_info *buf, size_t len)
2290 {
2291 	size_t retlen;
2292 	int ret;
2293 
2294 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2295 	return ret ? : retlen;
2296 }
2297 
2298 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2299 					   struct otp_info *buf, size_t len)
2300 {
2301 	size_t retlen;
2302 	int ret;
2303 
2304 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2305 	return ret ? : retlen;
2306 }
2307 
2308 #endif
2309 
2310 static int cfi_intelext_suspend(struct mtd_info *mtd)
2311 {
2312 	struct map_info *map = mtd->priv;
2313 	struct cfi_private *cfi = map->fldrv_priv;
2314 	int i;
2315 	struct flchip *chip;
2316 	int ret = 0;
2317 
2318 	for (i=0; !ret && i<cfi->numchips; i++) {
2319 		chip = &cfi->chips[i];
2320 
2321 		spin_lock(chip->mutex);
2322 
2323 		switch (chip->state) {
2324 		case FL_READY:
2325 		case FL_STATUS:
2326 		case FL_CFI_QUERY:
2327 		case FL_JEDEC_QUERY:
2328 			if (chip->oldstate == FL_READY) {
2329 				chip->oldstate = chip->state;
2330 				chip->state = FL_PM_SUSPENDED;
2331 				/* No need to wake_up() on this state change -
2332 				 * as the whole point is that nobody can do anything
2333 				 * with the chip now anyway.
2334 				 */
2335 			} else {
2336 				/* There seems to be an operation pending. We must wait for it. */
2337 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2338 				ret = -EAGAIN;
2339 			}
2340 			break;
2341 		default:
2342 			/* Should we actually wait? Once upon a time these routines weren't
2343 			   allowed to. Or should we return -EAGAIN, because the upper layers
2344 			   ought to have already shut down anything which was using the device
2345 			   anyway? The latter for now. */
2346 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2347 			ret = -EAGAIN;
2348 		case FL_PM_SUSPENDED:
2349 			break;
2350 		}
2351 		spin_unlock(chip->mutex);
2352 	}
2353 
2354 	/* Unlock the chips again */
2355 
2356 	if (ret) {
2357 		for (i--; i >=0; i--) {
2358 			chip = &cfi->chips[i];
2359 
2360 			spin_lock(chip->mutex);
2361 
2362 			if (chip->state == FL_PM_SUSPENDED) {
2363 				/* No need to force it into a known state here,
2364 				   because we're returning failure, and it didn't
2365 				   get power cycled */
2366 				chip->state = chip->oldstate;
2367 				chip->oldstate = FL_READY;
2368 				wake_up(&chip->wq);
2369 			}
2370 			spin_unlock(chip->mutex);
2371 		}
2372 	}
2373 
2374 	return ret;
2375 }
2376 
2377 static void cfi_intelext_resume(struct mtd_info *mtd)
2378 {
2379 	struct map_info *map = mtd->priv;
2380 	struct cfi_private *cfi = map->fldrv_priv;
2381 	int i;
2382 	struct flchip *chip;
2383 
2384 	for (i=0; i<cfi->numchips; i++) {
2385 
2386 		chip = &cfi->chips[i];
2387 
2388 		spin_lock(chip->mutex);
2389 
2390 		/* Go to known state. Chip may have been power cycled */
2391 		if (chip->state == FL_PM_SUSPENDED) {
2392 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2393 			chip->oldstate = chip->state = FL_READY;
2394 			wake_up(&chip->wq);
2395 		}
2396 
2397 		spin_unlock(chip->mutex);
2398 	}
2399 }
2400 
2401 static int cfi_intelext_reset(struct mtd_info *mtd)
2402 {
2403 	struct map_info *map = mtd->priv;
2404 	struct cfi_private *cfi = map->fldrv_priv;
2405 	int i, ret;
2406 
2407 	for (i=0; i < cfi->numchips; i++) {
2408 		struct flchip *chip = &cfi->chips[i];
2409 
2410 		/* force the completion of any ongoing operation
2411 		   and switch to array mode so any bootloader in
2412 		   flash is accessible for soft reboot. */
2413 		spin_lock(chip->mutex);
2414 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2415 		if (!ret) {
2416 			map_write(map, CMD(0xff), chip->start);
2417 			chip->state = FL_READY;
2418 		}
2419 		spin_unlock(chip->mutex);
2420 	}
2421 
2422 	return 0;
2423 }
2424 
2425 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2426 			       void *v)
2427 {
2428 	struct mtd_info *mtd;
2429 
2430 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2431 	cfi_intelext_reset(mtd);
2432 	return NOTIFY_DONE;
2433 }
2434 
2435 static void cfi_intelext_destroy(struct mtd_info *mtd)
2436 {
2437 	struct map_info *map = mtd->priv;
2438 	struct cfi_private *cfi = map->fldrv_priv;
2439 	cfi_intelext_reset(mtd);
2440 	unregister_reboot_notifier(&mtd->reboot_notifier);
2441 	kfree(cfi->cmdset_priv);
2442 	kfree(cfi->cfiq);
2443 	kfree(cfi->chips[0].priv);
2444 	kfree(cfi);
2445 	kfree(mtd->eraseregions);
2446 }
2447 
2448 static char im_name_0001[] = "cfi_cmdset_0001";
2449 static char im_name_0003[] = "cfi_cmdset_0003";
2450 static char im_name_0200[] = "cfi_cmdset_0200";
2451 
2452 static int __init cfi_intelext_init(void)
2453 {
2454 	inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2455 	inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2456 	inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2457 	return 0;
2458 }
2459 
2460 static void __exit cfi_intelext_exit(void)
2461 {
2462 	inter_module_unregister(im_name_0001);
2463 	inter_module_unregister(im_name_0003);
2464 	inter_module_unregister(im_name_0200);
2465 }
2466 
2467 module_init(cfi_intelext_init);
2468 module_exit(cfi_intelext_exit);
2469 
2470 MODULE_LICENSE("GPL");
2471 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2472 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2473