xref: /linux/drivers/mtd/chips/cfi_cmdset_0001.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000	Nicolas Pitre <nico@cam.org>
11  * 	- completely revamped method functions so they are aware and
12  * 	  independent of the flash geometry (buswidth, interleave, etc.)
13  * 	- scalability vs code size is completely set at compile-time
14  * 	  (see include/linux/mtd/cfi.h for selection)
15  *	- optimized write buffer method
16  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *	- reworked lock/unlock/erase support for var size flash
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 #define MANUFACTURER_INTEL	0x0089
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50 
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 					    struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 					    struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72 
73 static void cfi_intelext_destroy(struct mtd_info *);
74 
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76 
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79 
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 		     size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 			size_t len);
84 
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88 
89 
90 
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94 
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 	.probe		= NULL, /* Not usable directly */
97 	.destroy	= cfi_intelext_destroy,
98 	.name		= "cfi_cmdset_0001",
99 	.module		= THIS_MODULE
100 };
101 
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104 
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108 	int i;
109 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 	for (i=11; i<32; i++) {
123 		if (extp->FeatureSupport & (1<<i))
124 			printk("     - Unknown Bit %X:      supported\n", i);
125 	}
126 
127 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 	for (i=1; i<8; i++) {
130 		if (extp->SuspendCmdSupport & (1<<i))
131 			printk("     - Unknown Bit %X:               supported\n", i);
132 	}
133 
134 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 	for (i=2; i<3; i++) {
138 		if (extp->BlkStatusRegMask & (1<<i))
139 			printk("     - Unknown Bit %X Active: yes\n",i);
140 	}
141 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 	for (i=6; i<16; i++) {
144 		if (extp->BlkStatusRegMask & (1<<i))
145 			printk("     - Unknown Bit %X Active: yes\n",i);
146 	}
147 
148 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 	if (extp->VppOptimal)
151 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155 
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160 	struct map_info *map = mtd->priv;
161 	struct cfi_private *cfi = map->fldrv_priv;
162 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163 
164 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 	                    "erase on write disabled.\n");
166 	extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169 
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173 	struct map_info *map = mtd->priv;
174 	struct cfi_private *cfi = map->fldrv_priv;
175 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176 
177 	if (cfip && (cfip->FeatureSupport&4)) {
178 		cfip->FeatureSupport &= ~4;
179 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 	}
181 }
182 #endif
183 
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186 	struct map_info *map = mtd->priv;
187 	struct cfi_private *cfi = map->fldrv_priv;
188 
189 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
190 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
191 }
192 
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195 	struct map_info *map = mtd->priv;
196 	struct cfi_private *cfi = map->fldrv_priv;
197 
198 	/* Note this is done after the region info is endian swapped */
199 	cfi->cfiq->EraseRegionInfo[1] =
200 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202 
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205 	struct map_info *map = mtd->priv;
206 	if (!mtd->point && map_is_linear(map)) {
207 		mtd->point   = cfi_intelext_point;
208 		mtd->unpoint = cfi_intelext_unpoint;
209 	}
210 }
211 
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214 	struct map_info *map = mtd->priv;
215 	struct cfi_private *cfi = map->fldrv_priv;
216 	if (cfi->cfiq->BufWriteTimeoutTyp) {
217 		printk(KERN_INFO "Using buffer write method\n" );
218 		mtd->write = cfi_intelext_write_buffers;
219 		mtd->writev = cfi_intelext_writev;
220 	}
221 }
222 
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 	{ 0, 0, NULL, NULL }
236 };
237 
238 static struct cfi_fixup jedec_fixup_table[] = {
239 	{ MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240 	{ MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241 	{ MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242 	{ 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245 	/* The CFI vendor ids and the JEDEC vendor IDs appear
246 	 * to be common.  It is like the devices id's are as
247 	 * well.  This table is to pick all cases where
248 	 * we know that is the case.
249 	 */
250 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 	{ 0, 0, NULL, NULL }
252 };
253 
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257 	struct cfi_pri_intelext *extp;
258 	unsigned int extp_size = sizeof(*extp);
259 
260  again:
261 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 	if (!extp)
263 		return NULL;
264 
265 	if (extp->MajorVersion != '1' ||
266 	    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268 		       "version %c.%c.\n",  extp->MajorVersion,
269 		       extp->MinorVersion);
270 		kfree(extp);
271 		return NULL;
272 	}
273 
274 	/* Do some byteswapping if necessary */
275 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278 
279 	if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280 		unsigned int extra_size = 0;
281 		int nb_parts, i;
282 
283 		/* Protection Register info */
284 		extra_size += (extp->NumProtectionFields - 1) *
285 			      sizeof(struct cfi_intelext_otpinfo);
286 
287 		/* Burst Read info */
288 		extra_size += 2;
289 		if (extp_size < sizeof(*extp) + extra_size)
290 			goto need_more;
291 		extra_size += extp->extra[extra_size-1];
292 
293 		/* Number of hardware-partitions */
294 		extra_size += 1;
295 		if (extp_size < sizeof(*extp) + extra_size)
296 			goto need_more;
297 		nb_parts = extp->extra[extra_size - 1];
298 
299 		/* skip the sizeof(partregion) field in CFI 1.4 */
300 		if (extp->MinorVersion >= '4')
301 			extra_size += 2;
302 
303 		for (i = 0; i < nb_parts; i++) {
304 			struct cfi_intelext_regioninfo *rinfo;
305 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 			extra_size += sizeof(*rinfo);
307 			if (extp_size < sizeof(*extp) + extra_size)
308 				goto need_more;
309 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 			extra_size += (rinfo->NumBlockTypes - 1)
311 				      * sizeof(struct cfi_intelext_blockinfo);
312 		}
313 
314 		if (extp->MinorVersion >= '4')
315 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316 
317 		if (extp_size < sizeof(*extp) + extra_size) {
318 			need_more:
319 			extp_size = sizeof(*extp) + extra_size;
320 			kfree(extp);
321 			if (extp_size > 4096) {
322 				printk(KERN_ERR
323 					"%s: cfi_pri_intelext is too fat\n",
324 					__FUNCTION__);
325 				return NULL;
326 			}
327 			goto again;
328 		}
329 	}
330 
331 	return extp;
332 }
333 
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335 {
336 	struct cfi_private *cfi = map->fldrv_priv;
337 	struct mtd_info *mtd;
338 	int i;
339 
340 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
341 	if (!mtd) {
342 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343 		return NULL;
344 	}
345 	mtd->priv = map;
346 	mtd->type = MTD_NORFLASH;
347 
348 	/* Fill in the default mtd operations */
349 	mtd->erase   = cfi_intelext_erase_varsize;
350 	mtd->read    = cfi_intelext_read;
351 	mtd->write   = cfi_intelext_write_words;
352 	mtd->sync    = cfi_intelext_sync;
353 	mtd->lock    = cfi_intelext_lock;
354 	mtd->unlock  = cfi_intelext_unlock;
355 	mtd->suspend = cfi_intelext_suspend;
356 	mtd->resume  = cfi_intelext_resume;
357 	mtd->flags   = MTD_CAP_NORFLASH;
358 	mtd->name    = map->name;
359 	mtd->writesize = 1;
360 
361 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362 
363 	if (cfi->cfi_mode == CFI_MODE_CFI) {
364 		/*
365 		 * It's a real CFI chip, not one for which the probe
366 		 * routine faked a CFI structure. So we read the feature
367 		 * table from it.
368 		 */
369 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370 		struct cfi_pri_intelext *extp;
371 
372 		extp = read_pri_intelext(map, adr);
373 		if (!extp) {
374 			kfree(mtd);
375 			return NULL;
376 		}
377 
378 		/* Install our own private info structure */
379 		cfi->cmdset_priv = extp;
380 
381 		cfi_fixup(mtd, cfi_fixup_table);
382 
383 #ifdef DEBUG_CFI_FEATURES
384 		/* Tell the user about it in lots of lovely detail */
385 		cfi_tell_features(extp);
386 #endif
387 
388 		if(extp->SuspendCmdSupport & 1) {
389 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
390 		}
391 	}
392 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393 		/* Apply jedec specific fixups */
394 		cfi_fixup(mtd, jedec_fixup_table);
395 	}
396 	/* Apply generic fixups */
397 	cfi_fixup(mtd, fixup_table);
398 
399 	for (i=0; i< cfi->numchips; i++) {
400 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
401 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
402 		cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
403 		cfi->chips[i].ref_point_counter = 0;
404 		init_waitqueue_head(&(cfi->chips[i].wq));
405 	}
406 
407 	map->fldrv = &cfi_intelext_chipdrv;
408 
409 	return cfi_intelext_setup(mtd);
410 }
411 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
412 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
414 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
415 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
416 
417 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
418 {
419 	struct map_info *map = mtd->priv;
420 	struct cfi_private *cfi = map->fldrv_priv;
421 	unsigned long offset = 0;
422 	int i,j;
423 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
424 
425 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
426 
427 	mtd->size = devsize * cfi->numchips;
428 
429 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
430 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
431 			* mtd->numeraseregions, GFP_KERNEL);
432 	if (!mtd->eraseregions) {
433 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
434 		goto setup_err;
435 	}
436 
437 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
438 		unsigned long ernum, ersize;
439 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
440 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
441 
442 		if (mtd->erasesize < ersize) {
443 			mtd->erasesize = ersize;
444 		}
445 		for (j=0; j<cfi->numchips; j++) {
446 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
447 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
448 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
449 		}
450 		offset += (ersize * ernum);
451 	}
452 
453 	if (offset != devsize) {
454 		/* Argh */
455 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
456 		goto setup_err;
457 	}
458 
459 	for (i=0; i<mtd->numeraseregions;i++){
460 		printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
461 		       i,mtd->eraseregions[i].offset,
462 		       mtd->eraseregions[i].erasesize,
463 		       mtd->eraseregions[i].numblocks);
464 	}
465 
466 #ifdef CONFIG_MTD_OTP
467 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
468 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
469 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
470 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
471 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
472 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
473 #endif
474 
475 	/* This function has the potential to distort the reality
476 	   a bit and therefore should be called last. */
477 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
478 		goto setup_err;
479 
480 	__module_get(THIS_MODULE);
481 	register_reboot_notifier(&mtd->reboot_notifier);
482 	return mtd;
483 
484  setup_err:
485 	if(mtd) {
486 		kfree(mtd->eraseregions);
487 		kfree(mtd);
488 	}
489 	kfree(cfi->cmdset_priv);
490 	return NULL;
491 }
492 
493 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
494 					struct cfi_private **pcfi)
495 {
496 	struct map_info *map = mtd->priv;
497 	struct cfi_private *cfi = *pcfi;
498 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
499 
500 	/*
501 	 * Probing of multi-partition flash ships.
502 	 *
503 	 * To support multiple partitions when available, we simply arrange
504 	 * for each of them to have their own flchip structure even if they
505 	 * are on the same physical chip.  This means completely recreating
506 	 * a new cfi_private structure right here which is a blatent code
507 	 * layering violation, but this is still the least intrusive
508 	 * arrangement at this point. This can be rearranged in the future
509 	 * if someone feels motivated enough.  --nico
510 	 */
511 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
512 	    && extp->FeatureSupport & (1 << 9)) {
513 		struct cfi_private *newcfi;
514 		struct flchip *chip;
515 		struct flchip_shared *shared;
516 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
517 
518 		/* Protection Register info */
519 		offs = (extp->NumProtectionFields - 1) *
520 		       sizeof(struct cfi_intelext_otpinfo);
521 
522 		/* Burst Read info */
523 		offs += extp->extra[offs+1]+2;
524 
525 		/* Number of partition regions */
526 		numregions = extp->extra[offs];
527 		offs += 1;
528 
529 		/* skip the sizeof(partregion) field in CFI 1.4 */
530 		if (extp->MinorVersion >= '4')
531 			offs += 2;
532 
533 		/* Number of hardware partitions */
534 		numparts = 0;
535 		for (i = 0; i < numregions; i++) {
536 			struct cfi_intelext_regioninfo *rinfo;
537 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
538 			numparts += rinfo->NumIdentPartitions;
539 			offs += sizeof(*rinfo)
540 				+ (rinfo->NumBlockTypes - 1) *
541 				  sizeof(struct cfi_intelext_blockinfo);
542 		}
543 
544 		/* Programming Region info */
545 		if (extp->MinorVersion >= '4') {
546 			struct cfi_intelext_programming_regioninfo *prinfo;
547 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
548 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
549 			MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
550 			MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
551 			mtd->flags &= ~MTD_BIT_WRITEABLE;
552 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
553 			       map->name, mtd->writesize,
554 			       MTD_PROGREGION_CTRLMODE_VALID(mtd),
555 			       MTD_PROGREGION_CTRLMODE_INVALID(mtd));
556 		}
557 
558 		/*
559 		 * All functions below currently rely on all chips having
560 		 * the same geometry so we'll just assume that all hardware
561 		 * partitions are of the same size too.
562 		 */
563 		partshift = cfi->chipshift - __ffs(numparts);
564 
565 		if ((1 << partshift) < mtd->erasesize) {
566 			printk( KERN_ERR
567 				"%s: bad number of hw partitions (%d)\n",
568 				__FUNCTION__, numparts);
569 			return -EINVAL;
570 		}
571 
572 		numvirtchips = cfi->numchips * numparts;
573 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
574 		if (!newcfi)
575 			return -ENOMEM;
576 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
577 		if (!shared) {
578 			kfree(newcfi);
579 			return -ENOMEM;
580 		}
581 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
582 		newcfi->numchips = numvirtchips;
583 		newcfi->chipshift = partshift;
584 
585 		chip = &newcfi->chips[0];
586 		for (i = 0; i < cfi->numchips; i++) {
587 			shared[i].writing = shared[i].erasing = NULL;
588 			spin_lock_init(&shared[i].lock);
589 			for (j = 0; j < numparts; j++) {
590 				*chip = cfi->chips[i];
591 				chip->start += j << partshift;
592 				chip->priv = &shared[i];
593 				/* those should be reset too since
594 				   they create memory references. */
595 				init_waitqueue_head(&chip->wq);
596 				spin_lock_init(&chip->_spinlock);
597 				chip->mutex = &chip->_spinlock;
598 				chip++;
599 			}
600 		}
601 
602 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
603 				  "--> %d partitions of %d KiB\n",
604 				  map->name, cfi->numchips, cfi->interleave,
605 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
606 
607 		map->fldrv_priv = newcfi;
608 		*pcfi = newcfi;
609 		kfree(cfi);
610 	}
611 
612 	return 0;
613 }
614 
615 /*
616  *  *********** CHIP ACCESS FUNCTIONS ***********
617  */
618 
619 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
620 {
621 	DECLARE_WAITQUEUE(wait, current);
622 	struct cfi_private *cfi = map->fldrv_priv;
623 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
624 	unsigned long timeo;
625 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
626 
627  resettime:
628 	timeo = jiffies + HZ;
629  retry:
630 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
631 		/*
632 		 * OK. We have possibility for contension on the write/erase
633 		 * operations which are global to the real chip and not per
634 		 * partition.  So let's fight it over in the partition which
635 		 * currently has authority on the operation.
636 		 *
637 		 * The rules are as follows:
638 		 *
639 		 * - any write operation must own shared->writing.
640 		 *
641 		 * - any erase operation must own _both_ shared->writing and
642 		 *   shared->erasing.
643 		 *
644 		 * - contension arbitration is handled in the owner's context.
645 		 *
646 		 * The 'shared' struct can be read and/or written only when
647 		 * its lock is taken.
648 		 */
649 		struct flchip_shared *shared = chip->priv;
650 		struct flchip *contender;
651 		spin_lock(&shared->lock);
652 		contender = shared->writing;
653 		if (contender && contender != chip) {
654 			/*
655 			 * The engine to perform desired operation on this
656 			 * partition is already in use by someone else.
657 			 * Let's fight over it in the context of the chip
658 			 * currently using it.  If it is possible to suspend,
659 			 * that other partition will do just that, otherwise
660 			 * it'll happily send us to sleep.  In any case, when
661 			 * get_chip returns success we're clear to go ahead.
662 			 */
663 			int ret = spin_trylock(contender->mutex);
664 			spin_unlock(&shared->lock);
665 			if (!ret)
666 				goto retry;
667 			spin_unlock(chip->mutex);
668 			ret = get_chip(map, contender, contender->start, mode);
669 			spin_lock(chip->mutex);
670 			if (ret) {
671 				spin_unlock(contender->mutex);
672 				return ret;
673 			}
674 			timeo = jiffies + HZ;
675 			spin_lock(&shared->lock);
676 			spin_unlock(contender->mutex);
677 		}
678 
679 		/* We now own it */
680 		shared->writing = chip;
681 		if (mode == FL_ERASING)
682 			shared->erasing = chip;
683 		spin_unlock(&shared->lock);
684 	}
685 
686 	switch (chip->state) {
687 
688 	case FL_STATUS:
689 		for (;;) {
690 			status = map_read(map, adr);
691 			if (map_word_andequal(map, status, status_OK, status_OK))
692 				break;
693 
694 			/* At this point we're fine with write operations
695 			   in other partitions as they don't conflict. */
696 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
697 				break;
698 
699 			if (time_after(jiffies, timeo)) {
700 				printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
701 				       map->name, status.x[0]);
702 				return -EIO;
703 			}
704 			spin_unlock(chip->mutex);
705 			cfi_udelay(1);
706 			spin_lock(chip->mutex);
707 			/* Someone else might have been playing with it. */
708 			goto retry;
709 		}
710 
711 	case FL_READY:
712 	case FL_CFI_QUERY:
713 	case FL_JEDEC_QUERY:
714 		return 0;
715 
716 	case FL_ERASING:
717 		if (!cfip ||
718 		    !(cfip->FeatureSupport & 2) ||
719 		    !(mode == FL_READY || mode == FL_POINT ||
720 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
721 			goto sleep;
722 
723 
724 		/* Erase suspend */
725 		map_write(map, CMD(0xB0), adr);
726 
727 		/* If the flash has finished erasing, then 'erase suspend'
728 		 * appears to make some (28F320) flash devices switch to
729 		 * 'read' mode.  Make sure that we switch to 'read status'
730 		 * mode so we get the right data. --rmk
731 		 */
732 		map_write(map, CMD(0x70), adr);
733 		chip->oldstate = FL_ERASING;
734 		chip->state = FL_ERASE_SUSPENDING;
735 		chip->erase_suspended = 1;
736 		for (;;) {
737 			status = map_read(map, adr);
738 			if (map_word_andequal(map, status, status_OK, status_OK))
739 			        break;
740 
741 			if (time_after(jiffies, timeo)) {
742 				/* Urgh. Resume and pretend we weren't here.  */
743 				map_write(map, CMD(0xd0), adr);
744 				/* Make sure we're in 'read status' mode if it had finished */
745 				map_write(map, CMD(0x70), adr);
746 				chip->state = FL_ERASING;
747 				chip->oldstate = FL_READY;
748 				printk(KERN_ERR "%s: Chip not ready after erase "
749 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
750 				return -EIO;
751 			}
752 
753 			spin_unlock(chip->mutex);
754 			cfi_udelay(1);
755 			spin_lock(chip->mutex);
756 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
757 			   So we can just loop here. */
758 		}
759 		chip->state = FL_STATUS;
760 		return 0;
761 
762 	case FL_XIP_WHILE_ERASING:
763 		if (mode != FL_READY && mode != FL_POINT &&
764 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
765 			goto sleep;
766 		chip->oldstate = chip->state;
767 		chip->state = FL_READY;
768 		return 0;
769 
770 	case FL_POINT:
771 		/* Only if there's no operation suspended... */
772 		if (mode == FL_READY && chip->oldstate == FL_READY)
773 			return 0;
774 
775 	default:
776 	sleep:
777 		set_current_state(TASK_UNINTERRUPTIBLE);
778 		add_wait_queue(&chip->wq, &wait);
779 		spin_unlock(chip->mutex);
780 		schedule();
781 		remove_wait_queue(&chip->wq, &wait);
782 		spin_lock(chip->mutex);
783 		goto resettime;
784 	}
785 }
786 
787 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
788 {
789 	struct cfi_private *cfi = map->fldrv_priv;
790 
791 	if (chip->priv) {
792 		struct flchip_shared *shared = chip->priv;
793 		spin_lock(&shared->lock);
794 		if (shared->writing == chip && chip->oldstate == FL_READY) {
795 			/* We own the ability to write, but we're done */
796 			shared->writing = shared->erasing;
797 			if (shared->writing && shared->writing != chip) {
798 				/* give back ownership to who we loaned it from */
799 				struct flchip *loaner = shared->writing;
800 				spin_lock(loaner->mutex);
801 				spin_unlock(&shared->lock);
802 				spin_unlock(chip->mutex);
803 				put_chip(map, loaner, loaner->start);
804 				spin_lock(chip->mutex);
805 				spin_unlock(loaner->mutex);
806 				wake_up(&chip->wq);
807 				return;
808 			}
809 			shared->erasing = NULL;
810 			shared->writing = NULL;
811 		} else if (shared->erasing == chip && shared->writing != chip) {
812 			/*
813 			 * We own the ability to erase without the ability
814 			 * to write, which means the erase was suspended
815 			 * and some other partition is currently writing.
816 			 * Don't let the switch below mess things up since
817 			 * we don't have ownership to resume anything.
818 			 */
819 			spin_unlock(&shared->lock);
820 			wake_up(&chip->wq);
821 			return;
822 		}
823 		spin_unlock(&shared->lock);
824 	}
825 
826 	switch(chip->oldstate) {
827 	case FL_ERASING:
828 		chip->state = chip->oldstate;
829 		/* What if one interleaved chip has finished and the
830 		   other hasn't? The old code would leave the finished
831 		   one in READY mode. That's bad, and caused -EROFS
832 		   errors to be returned from do_erase_oneblock because
833 		   that's the only bit it checked for at the time.
834 		   As the state machine appears to explicitly allow
835 		   sending the 0x70 (Read Status) command to an erasing
836 		   chip and expecting it to be ignored, that's what we
837 		   do. */
838 		map_write(map, CMD(0xd0), adr);
839 		map_write(map, CMD(0x70), adr);
840 		chip->oldstate = FL_READY;
841 		chip->state = FL_ERASING;
842 		break;
843 
844 	case FL_XIP_WHILE_ERASING:
845 		chip->state = chip->oldstate;
846 		chip->oldstate = FL_READY;
847 		break;
848 
849 	case FL_READY:
850 	case FL_STATUS:
851 	case FL_JEDEC_QUERY:
852 		/* We should really make set_vpp() count, rather than doing this */
853 		DISABLE_VPP(map);
854 		break;
855 	default:
856 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
857 	}
858 	wake_up(&chip->wq);
859 }
860 
861 #ifdef CONFIG_MTD_XIP
862 
863 /*
864  * No interrupt what so ever can be serviced while the flash isn't in array
865  * mode.  This is ensured by the xip_disable() and xip_enable() functions
866  * enclosing any code path where the flash is known not to be in array mode.
867  * And within a XIP disabled code path, only functions marked with __xipram
868  * may be called and nothing else (it's a good thing to inspect generated
869  * assembly to make sure inline functions were actually inlined and that gcc
870  * didn't emit calls to its own support functions). Also configuring MTD CFI
871  * support to a single buswidth and a single interleave is also recommended.
872  */
873 
874 static void xip_disable(struct map_info *map, struct flchip *chip,
875 			unsigned long adr)
876 {
877 	/* TODO: chips with no XIP use should ignore and return */
878 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
879 	local_irq_disable();
880 }
881 
882 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
883 				unsigned long adr)
884 {
885 	struct cfi_private *cfi = map->fldrv_priv;
886 	if (chip->state != FL_POINT && chip->state != FL_READY) {
887 		map_write(map, CMD(0xff), adr);
888 		chip->state = FL_READY;
889 	}
890 	(void) map_read(map, adr);
891 	xip_iprefetch();
892 	local_irq_enable();
893 }
894 
895 /*
896  * When a delay is required for the flash operation to complete, the
897  * xip_wait_for_operation() function is polling for both the given timeout
898  * and pending (but still masked) hardware interrupts.  Whenever there is an
899  * interrupt pending then the flash erase or write operation is suspended,
900  * array mode restored and interrupts unmasked.  Task scheduling might also
901  * happen at that point.  The CPU eventually returns from the interrupt or
902  * the call to schedule() and the suspended flash operation is resumed for
903  * the remaining of the delay period.
904  *
905  * Warning: this function _will_ fool interrupt latency tracing tools.
906  */
907 
908 static int __xipram xip_wait_for_operation(
909 		struct map_info *map, struct flchip *chip,
910 		unsigned long adr, unsigned int chip_op_time )
911 {
912 	struct cfi_private *cfi = map->fldrv_priv;
913 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
914 	map_word status, OK = CMD(0x80);
915 	unsigned long usec, suspended, start, done;
916 	flstate_t oldstate, newstate;
917 
918        	start = xip_currtime();
919 	usec = chip_op_time * 8;
920 	if (usec == 0)
921 		usec = 500000;
922 	done = 0;
923 
924 	do {
925 		cpu_relax();
926 		if (xip_irqpending() && cfip &&
927 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
928 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
929 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
930 			/*
931 			 * Let's suspend the erase or write operation when
932 			 * supported.  Note that we currently don't try to
933 			 * suspend interleaved chips if there is already
934 			 * another operation suspended (imagine what happens
935 			 * when one chip was already done with the current
936 			 * operation while another chip suspended it, then
937 			 * we resume the whole thing at once).  Yes, it
938 			 * can happen!
939 			 */
940 			usec -= done;
941 			map_write(map, CMD(0xb0), adr);
942 			map_write(map, CMD(0x70), adr);
943 			suspended = xip_currtime();
944 			do {
945 				if (xip_elapsed_since(suspended) > 100000) {
946 					/*
947 					 * The chip doesn't want to suspend
948 					 * after waiting for 100 msecs.
949 					 * This is a critical error but there
950 					 * is not much we can do here.
951 					 */
952 					return -EIO;
953 				}
954 				status = map_read(map, adr);
955 			} while (!map_word_andequal(map, status, OK, OK));
956 
957 			/* Suspend succeeded */
958 			oldstate = chip->state;
959 			if (oldstate == FL_ERASING) {
960 				if (!map_word_bitsset(map, status, CMD(0x40)))
961 					break;
962 				newstate = FL_XIP_WHILE_ERASING;
963 				chip->erase_suspended = 1;
964 			} else {
965 				if (!map_word_bitsset(map, status, CMD(0x04)))
966 					break;
967 				newstate = FL_XIP_WHILE_WRITING;
968 				chip->write_suspended = 1;
969 			}
970 			chip->state = newstate;
971 			map_write(map, CMD(0xff), adr);
972 			(void) map_read(map, adr);
973 			asm volatile (".rep 8; nop; .endr");
974 			local_irq_enable();
975 			spin_unlock(chip->mutex);
976 			asm volatile (".rep 8; nop; .endr");
977 			cond_resched();
978 
979 			/*
980 			 * We're back.  However someone else might have
981 			 * decided to go write to the chip if we are in
982 			 * a suspended erase state.  If so let's wait
983 			 * until it's done.
984 			 */
985 			spin_lock(chip->mutex);
986 			while (chip->state != newstate) {
987 				DECLARE_WAITQUEUE(wait, current);
988 				set_current_state(TASK_UNINTERRUPTIBLE);
989 				add_wait_queue(&chip->wq, &wait);
990 				spin_unlock(chip->mutex);
991 				schedule();
992 				remove_wait_queue(&chip->wq, &wait);
993 				spin_lock(chip->mutex);
994 			}
995 			/* Disallow XIP again */
996 			local_irq_disable();
997 
998 			/* Resume the write or erase operation */
999 			map_write(map, CMD(0xd0), adr);
1000 			map_write(map, CMD(0x70), adr);
1001 			chip->state = oldstate;
1002 			start = xip_currtime();
1003 		} else if (usec >= 1000000/HZ) {
1004 			/*
1005 			 * Try to save on CPU power when waiting delay
1006 			 * is at least a system timer tick period.
1007 			 * No need to be extremely accurate here.
1008 			 */
1009 			xip_cpu_idle();
1010 		}
1011 		status = map_read(map, adr);
1012 		done = xip_elapsed_since(start);
1013 	} while (!map_word_andequal(map, status, OK, OK)
1014 		 && done < usec);
1015 
1016 	return (done >= usec) ? -ETIME : 0;
1017 }
1018 
1019 /*
1020  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1021  * the flash is actively programming or erasing since we have to poll for
1022  * the operation to complete anyway.  We can't do that in a generic way with
1023  * a XIP setup so do it before the actual flash operation in this case
1024  * and stub it out from INVAL_CACHE_AND_WAIT.
1025  */
1026 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1027 	INVALIDATE_CACHED_RANGE(map, from, size)
1028 
1029 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1030 	xip_wait_for_operation(map, chip, cmd_adr, usec)
1031 
1032 #else
1033 
1034 #define xip_disable(map, chip, adr)
1035 #define xip_enable(map, chip, adr)
1036 #define XIP_INVAL_CACHED_RANGE(x...)
1037 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1038 
1039 static int inval_cache_and_wait_for_operation(
1040 		struct map_info *map, struct flchip *chip,
1041 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1042 		unsigned int chip_op_time)
1043 {
1044 	struct cfi_private *cfi = map->fldrv_priv;
1045 	map_word status, status_OK = CMD(0x80);
1046 	int chip_state = chip->state;
1047 	unsigned int timeo, sleep_time;
1048 
1049 	spin_unlock(chip->mutex);
1050 	if (inval_len)
1051 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1052 	spin_lock(chip->mutex);
1053 
1054 	/* set our timeout to 8 times the expected delay */
1055 	timeo = chip_op_time * 8;
1056 	if (!timeo)
1057 		timeo = 500000;
1058 	sleep_time = chip_op_time / 2;
1059 
1060 	for (;;) {
1061 		status = map_read(map, cmd_adr);
1062 		if (map_word_andequal(map, status, status_OK, status_OK))
1063 			break;
1064 
1065 		if (!timeo) {
1066 			map_write(map, CMD(0x70), cmd_adr);
1067 			chip->state = FL_STATUS;
1068 			return -ETIME;
1069 		}
1070 
1071 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1072 		spin_unlock(chip->mutex);
1073 		if (sleep_time >= 1000000/HZ) {
1074 			/*
1075 			 * Half of the normal delay still remaining
1076 			 * can be performed with a sleeping delay instead
1077 			 * of busy waiting.
1078 			 */
1079 			msleep(sleep_time/1000);
1080 			timeo -= sleep_time;
1081 			sleep_time = 1000000/HZ;
1082 		} else {
1083 			udelay(1);
1084 			cond_resched();
1085 			timeo--;
1086 		}
1087 		spin_lock(chip->mutex);
1088 
1089 		while (chip->state != chip_state) {
1090 			/* Someone's suspended the operation: sleep */
1091 			DECLARE_WAITQUEUE(wait, current);
1092 			set_current_state(TASK_UNINTERRUPTIBLE);
1093 			add_wait_queue(&chip->wq, &wait);
1094 			spin_unlock(chip->mutex);
1095 			schedule();
1096 			remove_wait_queue(&chip->wq, &wait);
1097 			spin_lock(chip->mutex);
1098 		}
1099 	}
1100 
1101 	/* Done and happy. */
1102  	chip->state = FL_STATUS;
1103 	return 0;
1104 }
1105 
1106 #endif
1107 
1108 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1109 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1110 
1111 
1112 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1113 {
1114 	unsigned long cmd_addr;
1115 	struct cfi_private *cfi = map->fldrv_priv;
1116 	int ret = 0;
1117 
1118 	adr += chip->start;
1119 
1120 	/* Ensure cmd read/writes are aligned. */
1121 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1122 
1123 	spin_lock(chip->mutex);
1124 
1125 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1126 
1127 	if (!ret) {
1128 		if (chip->state != FL_POINT && chip->state != FL_READY)
1129 			map_write(map, CMD(0xff), cmd_addr);
1130 
1131 		chip->state = FL_POINT;
1132 		chip->ref_point_counter++;
1133 	}
1134 	spin_unlock(chip->mutex);
1135 
1136 	return ret;
1137 }
1138 
1139 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1140 {
1141 	struct map_info *map = mtd->priv;
1142 	struct cfi_private *cfi = map->fldrv_priv;
1143 	unsigned long ofs;
1144 	int chipnum;
1145 	int ret = 0;
1146 
1147 	if (!map->virt || (from + len > mtd->size))
1148 		return -EINVAL;
1149 
1150 	*mtdbuf = (void *)map->virt + from;
1151 	*retlen = 0;
1152 
1153 	/* Now lock the chip(s) to POINT state */
1154 
1155 	/* ofs: offset within the first chip that the first read should start */
1156 	chipnum = (from >> cfi->chipshift);
1157 	ofs = from - (chipnum << cfi->chipshift);
1158 
1159 	while (len) {
1160 		unsigned long thislen;
1161 
1162 		if (chipnum >= cfi->numchips)
1163 			break;
1164 
1165 		if ((len + ofs -1) >> cfi->chipshift)
1166 			thislen = (1<<cfi->chipshift) - ofs;
1167 		else
1168 			thislen = len;
1169 
1170 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1171 		if (ret)
1172 			break;
1173 
1174 		*retlen += thislen;
1175 		len -= thislen;
1176 
1177 		ofs = 0;
1178 		chipnum++;
1179 	}
1180 	return 0;
1181 }
1182 
1183 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1184 {
1185 	struct map_info *map = mtd->priv;
1186 	struct cfi_private *cfi = map->fldrv_priv;
1187 	unsigned long ofs;
1188 	int chipnum;
1189 
1190 	/* Now unlock the chip(s) POINT state */
1191 
1192 	/* ofs: offset within the first chip that the first read should start */
1193 	chipnum = (from >> cfi->chipshift);
1194 	ofs = from - (chipnum <<  cfi->chipshift);
1195 
1196 	while (len) {
1197 		unsigned long thislen;
1198 		struct flchip *chip;
1199 
1200 		chip = &cfi->chips[chipnum];
1201 		if (chipnum >= cfi->numchips)
1202 			break;
1203 
1204 		if ((len + ofs -1) >> cfi->chipshift)
1205 			thislen = (1<<cfi->chipshift) - ofs;
1206 		else
1207 			thislen = len;
1208 
1209 		spin_lock(chip->mutex);
1210 		if (chip->state == FL_POINT) {
1211 			chip->ref_point_counter--;
1212 			if(chip->ref_point_counter == 0)
1213 				chip->state = FL_READY;
1214 		} else
1215 			printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1216 
1217 		put_chip(map, chip, chip->start);
1218 		spin_unlock(chip->mutex);
1219 
1220 		len -= thislen;
1221 		ofs = 0;
1222 		chipnum++;
1223 	}
1224 }
1225 
1226 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1227 {
1228 	unsigned long cmd_addr;
1229 	struct cfi_private *cfi = map->fldrv_priv;
1230 	int ret;
1231 
1232 	adr += chip->start;
1233 
1234 	/* Ensure cmd read/writes are aligned. */
1235 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1236 
1237 	spin_lock(chip->mutex);
1238 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1239 	if (ret) {
1240 		spin_unlock(chip->mutex);
1241 		return ret;
1242 	}
1243 
1244 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1245 		map_write(map, CMD(0xff), cmd_addr);
1246 
1247 		chip->state = FL_READY;
1248 	}
1249 
1250 	map_copy_from(map, buf, adr, len);
1251 
1252 	put_chip(map, chip, cmd_addr);
1253 
1254 	spin_unlock(chip->mutex);
1255 	return 0;
1256 }
1257 
1258 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1259 {
1260 	struct map_info *map = mtd->priv;
1261 	struct cfi_private *cfi = map->fldrv_priv;
1262 	unsigned long ofs;
1263 	int chipnum;
1264 	int ret = 0;
1265 
1266 	/* ofs: offset within the first chip that the first read should start */
1267 	chipnum = (from >> cfi->chipshift);
1268 	ofs = from - (chipnum <<  cfi->chipshift);
1269 
1270 	*retlen = 0;
1271 
1272 	while (len) {
1273 		unsigned long thislen;
1274 
1275 		if (chipnum >= cfi->numchips)
1276 			break;
1277 
1278 		if ((len + ofs -1) >> cfi->chipshift)
1279 			thislen = (1<<cfi->chipshift) - ofs;
1280 		else
1281 			thislen = len;
1282 
1283 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1284 		if (ret)
1285 			break;
1286 
1287 		*retlen += thislen;
1288 		len -= thislen;
1289 		buf += thislen;
1290 
1291 		ofs = 0;
1292 		chipnum++;
1293 	}
1294 	return ret;
1295 }
1296 
1297 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1298 				     unsigned long adr, map_word datum, int mode)
1299 {
1300 	struct cfi_private *cfi = map->fldrv_priv;
1301 	map_word status, write_cmd;
1302 	int ret=0;
1303 
1304 	adr += chip->start;
1305 
1306 	switch (mode) {
1307 	case FL_WRITING:
1308 		write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1309 		break;
1310 	case FL_OTP_WRITE:
1311 		write_cmd = CMD(0xc0);
1312 		break;
1313 	default:
1314 		return -EINVAL;
1315 	}
1316 
1317 	spin_lock(chip->mutex);
1318 	ret = get_chip(map, chip, adr, mode);
1319 	if (ret) {
1320 		spin_unlock(chip->mutex);
1321 		return ret;
1322 	}
1323 
1324 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1325 	ENABLE_VPP(map);
1326 	xip_disable(map, chip, adr);
1327 	map_write(map, write_cmd, adr);
1328 	map_write(map, datum, adr);
1329 	chip->state = mode;
1330 
1331 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1332 				   adr, map_bankwidth(map),
1333 				   chip->word_write_time);
1334 	if (ret) {
1335 		xip_enable(map, chip, adr);
1336 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1337 		goto out;
1338 	}
1339 
1340 	/* check for errors */
1341 	status = map_read(map, adr);
1342 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1343 		unsigned long chipstatus = MERGESTATUS(status);
1344 
1345 		/* reset status */
1346 		map_write(map, CMD(0x50), adr);
1347 		map_write(map, CMD(0x70), adr);
1348 		xip_enable(map, chip, adr);
1349 
1350 		if (chipstatus & 0x02) {
1351 			ret = -EROFS;
1352 		} else if (chipstatus & 0x08) {
1353 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1354 			ret = -EIO;
1355 		} else {
1356 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1357 			ret = -EINVAL;
1358 		}
1359 
1360 		goto out;
1361 	}
1362 
1363 	xip_enable(map, chip, adr);
1364  out:	put_chip(map, chip, adr);
1365 	spin_unlock(chip->mutex);
1366 	return ret;
1367 }
1368 
1369 
1370 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1371 {
1372 	struct map_info *map = mtd->priv;
1373 	struct cfi_private *cfi = map->fldrv_priv;
1374 	int ret = 0;
1375 	int chipnum;
1376 	unsigned long ofs;
1377 
1378 	*retlen = 0;
1379 	if (!len)
1380 		return 0;
1381 
1382 	chipnum = to >> cfi->chipshift;
1383 	ofs = to  - (chipnum << cfi->chipshift);
1384 
1385 	/* If it's not bus-aligned, do the first byte write */
1386 	if (ofs & (map_bankwidth(map)-1)) {
1387 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1388 		int gap = ofs - bus_ofs;
1389 		int n;
1390 		map_word datum;
1391 
1392 		n = min_t(int, len, map_bankwidth(map)-gap);
1393 		datum = map_word_ff(map);
1394 		datum = map_word_load_partial(map, datum, buf, gap, n);
1395 
1396 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1397 					       bus_ofs, datum, FL_WRITING);
1398 		if (ret)
1399 			return ret;
1400 
1401 		len -= n;
1402 		ofs += n;
1403 		buf += n;
1404 		(*retlen) += n;
1405 
1406 		if (ofs >> cfi->chipshift) {
1407 			chipnum ++;
1408 			ofs = 0;
1409 			if (chipnum == cfi->numchips)
1410 				return 0;
1411 		}
1412 	}
1413 
1414 	while(len >= map_bankwidth(map)) {
1415 		map_word datum = map_word_load(map, buf);
1416 
1417 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1418 				       ofs, datum, FL_WRITING);
1419 		if (ret)
1420 			return ret;
1421 
1422 		ofs += map_bankwidth(map);
1423 		buf += map_bankwidth(map);
1424 		(*retlen) += map_bankwidth(map);
1425 		len -= map_bankwidth(map);
1426 
1427 		if (ofs >> cfi->chipshift) {
1428 			chipnum ++;
1429 			ofs = 0;
1430 			if (chipnum == cfi->numchips)
1431 				return 0;
1432 		}
1433 	}
1434 
1435 	if (len & (map_bankwidth(map)-1)) {
1436 		map_word datum;
1437 
1438 		datum = map_word_ff(map);
1439 		datum = map_word_load_partial(map, datum, buf, 0, len);
1440 
1441 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1442 				       ofs, datum, FL_WRITING);
1443 		if (ret)
1444 			return ret;
1445 
1446 		(*retlen) += len;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 
1453 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1454 				    unsigned long adr, const struct kvec **pvec,
1455 				    unsigned long *pvec_seek, int len)
1456 {
1457 	struct cfi_private *cfi = map->fldrv_priv;
1458 	map_word status, write_cmd, datum;
1459 	unsigned long cmd_adr;
1460 	int ret, wbufsize, word_gap, words;
1461 	const struct kvec *vec;
1462 	unsigned long vec_seek;
1463 
1464 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1465 	adr += chip->start;
1466 	cmd_adr = adr & ~(wbufsize-1);
1467 
1468 	/* Let's determine this according to the interleave only once */
1469 	write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1470 
1471 	spin_lock(chip->mutex);
1472 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1473 	if (ret) {
1474 		spin_unlock(chip->mutex);
1475 		return ret;
1476 	}
1477 
1478 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1479 	ENABLE_VPP(map);
1480 	xip_disable(map, chip, cmd_adr);
1481 
1482 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1483 	   [...], the device will not accept any more Write to Buffer commands".
1484 	   So we must check here and reset those bits if they're set. Otherwise
1485 	   we're just pissing in the wind */
1486 	if (chip->state != FL_STATUS) {
1487 		map_write(map, CMD(0x70), cmd_adr);
1488 		chip->state = FL_STATUS;
1489 	}
1490 	status = map_read(map, cmd_adr);
1491 	if (map_word_bitsset(map, status, CMD(0x30))) {
1492 		xip_enable(map, chip, cmd_adr);
1493 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1494 		xip_disable(map, chip, cmd_adr);
1495 		map_write(map, CMD(0x50), cmd_adr);
1496 		map_write(map, CMD(0x70), cmd_adr);
1497 	}
1498 
1499 	chip->state = FL_WRITING_TO_BUFFER;
1500 	map_write(map, write_cmd, cmd_adr);
1501 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1502 	if (ret) {
1503 		/* Argh. Not ready for write to buffer */
1504 		map_word Xstatus = map_read(map, cmd_adr);
1505 		map_write(map, CMD(0x70), cmd_adr);
1506 		chip->state = FL_STATUS;
1507 		status = map_read(map, cmd_adr);
1508 		map_write(map, CMD(0x50), cmd_adr);
1509 		map_write(map, CMD(0x70), cmd_adr);
1510 		xip_enable(map, chip, cmd_adr);
1511 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1512 				map->name, Xstatus.x[0], status.x[0]);
1513 		goto out;
1514 	}
1515 
1516 	/* Figure out the number of words to write */
1517 	word_gap = (-adr & (map_bankwidth(map)-1));
1518 	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1519 	if (!word_gap) {
1520 		words--;
1521 	} else {
1522 		word_gap = map_bankwidth(map) - word_gap;
1523 		adr -= word_gap;
1524 		datum = map_word_ff(map);
1525 	}
1526 
1527 	/* Write length of data to come */
1528 	map_write(map, CMD(words), cmd_adr );
1529 
1530 	/* Write data */
1531 	vec = *pvec;
1532 	vec_seek = *pvec_seek;
1533 	do {
1534 		int n = map_bankwidth(map) - word_gap;
1535 		if (n > vec->iov_len - vec_seek)
1536 			n = vec->iov_len - vec_seek;
1537 		if (n > len)
1538 			n = len;
1539 
1540 		if (!word_gap && len < map_bankwidth(map))
1541 			datum = map_word_ff(map);
1542 
1543 		datum = map_word_load_partial(map, datum,
1544 					      vec->iov_base + vec_seek,
1545 					      word_gap, n);
1546 
1547 		len -= n;
1548 		word_gap += n;
1549 		if (!len || word_gap == map_bankwidth(map)) {
1550 			map_write(map, datum, adr);
1551 			adr += map_bankwidth(map);
1552 			word_gap = 0;
1553 		}
1554 
1555 		vec_seek += n;
1556 		if (vec_seek == vec->iov_len) {
1557 			vec++;
1558 			vec_seek = 0;
1559 		}
1560 	} while (len);
1561 	*pvec = vec;
1562 	*pvec_seek = vec_seek;
1563 
1564 	/* GO GO GO */
1565 	map_write(map, CMD(0xd0), cmd_adr);
1566 	chip->state = FL_WRITING;
1567 
1568 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1569 				   adr, len,
1570 				   chip->buffer_write_time);
1571 	if (ret) {
1572 		map_write(map, CMD(0x70), cmd_adr);
1573 		chip->state = FL_STATUS;
1574 		xip_enable(map, chip, cmd_adr);
1575 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1576 		goto out;
1577 	}
1578 
1579 	/* check for errors */
1580 	status = map_read(map, cmd_adr);
1581 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1582 		unsigned long chipstatus = MERGESTATUS(status);
1583 
1584 		/* reset status */
1585 		map_write(map, CMD(0x50), cmd_adr);
1586 		map_write(map, CMD(0x70), cmd_adr);
1587 		xip_enable(map, chip, cmd_adr);
1588 
1589 		if (chipstatus & 0x02) {
1590 			ret = -EROFS;
1591 		} else if (chipstatus & 0x08) {
1592 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1593 			ret = -EIO;
1594 		} else {
1595 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1596 			ret = -EINVAL;
1597 		}
1598 
1599 		goto out;
1600 	}
1601 
1602 	xip_enable(map, chip, cmd_adr);
1603  out:	put_chip(map, chip, cmd_adr);
1604 	spin_unlock(chip->mutex);
1605 	return ret;
1606 }
1607 
1608 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1609 				unsigned long count, loff_t to, size_t *retlen)
1610 {
1611 	struct map_info *map = mtd->priv;
1612 	struct cfi_private *cfi = map->fldrv_priv;
1613 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1614 	int ret = 0;
1615 	int chipnum;
1616 	unsigned long ofs, vec_seek, i;
1617 	size_t len = 0;
1618 
1619 	for (i = 0; i < count; i++)
1620 		len += vecs[i].iov_len;
1621 
1622 	*retlen = 0;
1623 	if (!len)
1624 		return 0;
1625 
1626 	chipnum = to >> cfi->chipshift;
1627 	ofs = to - (chipnum << cfi->chipshift);
1628 	vec_seek = 0;
1629 
1630 	do {
1631 		/* We must not cross write block boundaries */
1632 		int size = wbufsize - (ofs & (wbufsize-1));
1633 
1634 		if (size > len)
1635 			size = len;
1636 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1637 				      ofs, &vecs, &vec_seek, size);
1638 		if (ret)
1639 			return ret;
1640 
1641 		ofs += size;
1642 		(*retlen) += size;
1643 		len -= size;
1644 
1645 		if (ofs >> cfi->chipshift) {
1646 			chipnum ++;
1647 			ofs = 0;
1648 			if (chipnum == cfi->numchips)
1649 				return 0;
1650 		}
1651 
1652 		/* Be nice and reschedule with the chip in a usable state for other
1653 		   processes. */
1654 		cond_resched();
1655 
1656 	} while (len);
1657 
1658 	return 0;
1659 }
1660 
1661 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1662 				       size_t len, size_t *retlen, const u_char *buf)
1663 {
1664 	struct kvec vec;
1665 
1666 	vec.iov_base = (void *) buf;
1667 	vec.iov_len = len;
1668 
1669 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1670 }
1671 
1672 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1673 				      unsigned long adr, int len, void *thunk)
1674 {
1675 	struct cfi_private *cfi = map->fldrv_priv;
1676 	map_word status;
1677 	int retries = 3;
1678 	int ret;
1679 
1680 	adr += chip->start;
1681 
1682  retry:
1683 	spin_lock(chip->mutex);
1684 	ret = get_chip(map, chip, adr, FL_ERASING);
1685 	if (ret) {
1686 		spin_unlock(chip->mutex);
1687 		return ret;
1688 	}
1689 
1690 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1691 	ENABLE_VPP(map);
1692 	xip_disable(map, chip, adr);
1693 
1694 	/* Clear the status register first */
1695 	map_write(map, CMD(0x50), adr);
1696 
1697 	/* Now erase */
1698 	map_write(map, CMD(0x20), adr);
1699 	map_write(map, CMD(0xD0), adr);
1700 	chip->state = FL_ERASING;
1701 	chip->erase_suspended = 0;
1702 
1703 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1704 				   adr, len,
1705 				   chip->erase_time);
1706 	if (ret) {
1707 		map_write(map, CMD(0x70), adr);
1708 		chip->state = FL_STATUS;
1709 		xip_enable(map, chip, adr);
1710 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1711 		goto out;
1712 	}
1713 
1714 	/* We've broken this before. It doesn't hurt to be safe */
1715 	map_write(map, CMD(0x70), adr);
1716 	chip->state = FL_STATUS;
1717 	status = map_read(map, adr);
1718 
1719 	/* check for errors */
1720 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1721 		unsigned long chipstatus = MERGESTATUS(status);
1722 
1723 		/* Reset the error bits */
1724 		map_write(map, CMD(0x50), adr);
1725 		map_write(map, CMD(0x70), adr);
1726 		xip_enable(map, chip, adr);
1727 
1728 		if ((chipstatus & 0x30) == 0x30) {
1729 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1730 			ret = -EINVAL;
1731 		} else if (chipstatus & 0x02) {
1732 			/* Protection bit set */
1733 			ret = -EROFS;
1734 		} else if (chipstatus & 0x8) {
1735 			/* Voltage */
1736 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1737 			ret = -EIO;
1738 		} else if (chipstatus & 0x20 && retries--) {
1739 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1740 			put_chip(map, chip, adr);
1741 			spin_unlock(chip->mutex);
1742 			goto retry;
1743 		} else {
1744 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1745 			ret = -EIO;
1746 		}
1747 
1748 		goto out;
1749 	}
1750 
1751 	xip_enable(map, chip, adr);
1752  out:	put_chip(map, chip, adr);
1753 	spin_unlock(chip->mutex);
1754 	return ret;
1755 }
1756 
1757 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1758 {
1759 	unsigned long ofs, len;
1760 	int ret;
1761 
1762 	ofs = instr->addr;
1763 	len = instr->len;
1764 
1765 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1766 	if (ret)
1767 		return ret;
1768 
1769 	instr->state = MTD_ERASE_DONE;
1770 	mtd_erase_callback(instr);
1771 
1772 	return 0;
1773 }
1774 
1775 static void cfi_intelext_sync (struct mtd_info *mtd)
1776 {
1777 	struct map_info *map = mtd->priv;
1778 	struct cfi_private *cfi = map->fldrv_priv;
1779 	int i;
1780 	struct flchip *chip;
1781 	int ret = 0;
1782 
1783 	for (i=0; !ret && i<cfi->numchips; i++) {
1784 		chip = &cfi->chips[i];
1785 
1786 		spin_lock(chip->mutex);
1787 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1788 
1789 		if (!ret) {
1790 			chip->oldstate = chip->state;
1791 			chip->state = FL_SYNCING;
1792 			/* No need to wake_up() on this state change -
1793 			 * as the whole point is that nobody can do anything
1794 			 * with the chip now anyway.
1795 			 */
1796 		}
1797 		spin_unlock(chip->mutex);
1798 	}
1799 
1800 	/* Unlock the chips again */
1801 
1802 	for (i--; i >=0; i--) {
1803 		chip = &cfi->chips[i];
1804 
1805 		spin_lock(chip->mutex);
1806 
1807 		if (chip->state == FL_SYNCING) {
1808 			chip->state = chip->oldstate;
1809 			chip->oldstate = FL_READY;
1810 			wake_up(&chip->wq);
1811 		}
1812 		spin_unlock(chip->mutex);
1813 	}
1814 }
1815 
1816 #ifdef DEBUG_LOCK_BITS
1817 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1818 						struct flchip *chip,
1819 						unsigned long adr,
1820 						int len, void *thunk)
1821 {
1822 	struct cfi_private *cfi = map->fldrv_priv;
1823 	int status, ofs_factor = cfi->interleave * cfi->device_type;
1824 
1825 	adr += chip->start;
1826 	xip_disable(map, chip, adr+(2*ofs_factor));
1827 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
1828 	chip->state = FL_JEDEC_QUERY;
1829 	status = cfi_read_query(map, adr+(2*ofs_factor));
1830 	xip_enable(map, chip, 0);
1831 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1832 	       adr, status);
1833 	return 0;
1834 }
1835 #endif
1836 
1837 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
1838 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
1839 
1840 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1841 				       unsigned long adr, int len, void *thunk)
1842 {
1843 	struct cfi_private *cfi = map->fldrv_priv;
1844 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1845 	int udelay;
1846 	int ret;
1847 
1848 	adr += chip->start;
1849 
1850 	spin_lock(chip->mutex);
1851 	ret = get_chip(map, chip, adr, FL_LOCKING);
1852 	if (ret) {
1853 		spin_unlock(chip->mutex);
1854 		return ret;
1855 	}
1856 
1857 	ENABLE_VPP(map);
1858 	xip_disable(map, chip, adr);
1859 
1860 	map_write(map, CMD(0x60), adr);
1861 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1862 		map_write(map, CMD(0x01), adr);
1863 		chip->state = FL_LOCKING;
1864 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1865 		map_write(map, CMD(0xD0), adr);
1866 		chip->state = FL_UNLOCKING;
1867 	} else
1868 		BUG();
1869 
1870 	/*
1871 	 * If Instant Individual Block Locking supported then no need
1872 	 * to delay.
1873 	 */
1874 	udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1875 
1876 	ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1877 	if (ret) {
1878 		map_write(map, CMD(0x70), adr);
1879 		chip->state = FL_STATUS;
1880 		xip_enable(map, chip, adr);
1881 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1882 		goto out;
1883 	}
1884 
1885 	xip_enable(map, chip, adr);
1886 out:	put_chip(map, chip, adr);
1887 	spin_unlock(chip->mutex);
1888 	return ret;
1889 }
1890 
1891 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1892 {
1893 	int ret;
1894 
1895 #ifdef DEBUG_LOCK_BITS
1896 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1897 	       __FUNCTION__, ofs, len);
1898 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1899 		ofs, len, 0);
1900 #endif
1901 
1902 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1903 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1904 
1905 #ifdef DEBUG_LOCK_BITS
1906 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1907 	       __FUNCTION__, ret);
1908 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1909 		ofs, len, 0);
1910 #endif
1911 
1912 	return ret;
1913 }
1914 
1915 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1916 {
1917 	int ret;
1918 
1919 #ifdef DEBUG_LOCK_BITS
1920 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1921 	       __FUNCTION__, ofs, len);
1922 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1923 		ofs, len, 0);
1924 #endif
1925 
1926 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1927 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1928 
1929 #ifdef DEBUG_LOCK_BITS
1930 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1931 	       __FUNCTION__, ret);
1932 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1933 		ofs, len, 0);
1934 #endif
1935 
1936 	return ret;
1937 }
1938 
1939 #ifdef CONFIG_MTD_OTP
1940 
1941 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1942 			u_long data_offset, u_char *buf, u_int size,
1943 			u_long prot_offset, u_int groupno, u_int groupsize);
1944 
1945 static int __xipram
1946 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1947 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1948 {
1949 	struct cfi_private *cfi = map->fldrv_priv;
1950 	int ret;
1951 
1952 	spin_lock(chip->mutex);
1953 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1954 	if (ret) {
1955 		spin_unlock(chip->mutex);
1956 		return ret;
1957 	}
1958 
1959 	/* let's ensure we're not reading back cached data from array mode */
1960 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1961 
1962 	xip_disable(map, chip, chip->start);
1963 	if (chip->state != FL_JEDEC_QUERY) {
1964 		map_write(map, CMD(0x90), chip->start);
1965 		chip->state = FL_JEDEC_QUERY;
1966 	}
1967 	map_copy_from(map, buf, chip->start + offset, size);
1968 	xip_enable(map, chip, chip->start);
1969 
1970 	/* then ensure we don't keep OTP data in the cache */
1971 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1972 
1973 	put_chip(map, chip, chip->start);
1974 	spin_unlock(chip->mutex);
1975 	return 0;
1976 }
1977 
1978 static int
1979 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1980 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1981 {
1982 	int ret;
1983 
1984 	while (size) {
1985 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1986 		int gap = offset - bus_ofs;
1987 		int n = min_t(int, size, map_bankwidth(map)-gap);
1988 		map_word datum = map_word_ff(map);
1989 
1990 		datum = map_word_load_partial(map, datum, buf, gap, n);
1991 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1992 		if (ret)
1993 			return ret;
1994 
1995 		offset += n;
1996 		buf += n;
1997 		size -= n;
1998 	}
1999 
2000 	return 0;
2001 }
2002 
2003 static int
2004 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2005 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2006 {
2007 	struct cfi_private *cfi = map->fldrv_priv;
2008 	map_word datum;
2009 
2010 	/* make sure area matches group boundaries */
2011 	if (size != grpsz)
2012 		return -EXDEV;
2013 
2014 	datum = map_word_ff(map);
2015 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2016 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2017 }
2018 
2019 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2020 				 size_t *retlen, u_char *buf,
2021 				 otp_op_t action, int user_regs)
2022 {
2023 	struct map_info *map = mtd->priv;
2024 	struct cfi_private *cfi = map->fldrv_priv;
2025 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2026 	struct flchip *chip;
2027 	struct cfi_intelext_otpinfo *otp;
2028 	u_long devsize, reg_prot_offset, data_offset;
2029 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2030 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2031 	int ret;
2032 
2033 	*retlen = 0;
2034 
2035 	/* Check that we actually have some OTP registers */
2036 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2037 		return -ENODATA;
2038 
2039 	/* we need real chips here not virtual ones */
2040 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2041 	chip_step = devsize >> cfi->chipshift;
2042 	chip_num = 0;
2043 
2044 	/* Some chips have OTP located in the _top_ partition only.
2045 	   For example: Intel 28F256L18T (T means top-parameter device) */
2046 	if (cfi->mfr == MANUFACTURER_INTEL) {
2047 		switch (cfi->id) {
2048 		case 0x880b:
2049 		case 0x880c:
2050 		case 0x880d:
2051 			chip_num = chip_step - 1;
2052 		}
2053 	}
2054 
2055 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2056 		chip = &cfi->chips[chip_num];
2057 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2058 
2059 		/* first OTP region */
2060 		field = 0;
2061 		reg_prot_offset = extp->ProtRegAddr;
2062 		reg_fact_groups = 1;
2063 		reg_fact_size = 1 << extp->FactProtRegSize;
2064 		reg_user_groups = 1;
2065 		reg_user_size = 1 << extp->UserProtRegSize;
2066 
2067 		while (len > 0) {
2068 			/* flash geometry fixup */
2069 			data_offset = reg_prot_offset + 1;
2070 			data_offset *= cfi->interleave * cfi->device_type;
2071 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2072 			reg_fact_size *= cfi->interleave;
2073 			reg_user_size *= cfi->interleave;
2074 
2075 			if (user_regs) {
2076 				groups = reg_user_groups;
2077 				groupsize = reg_user_size;
2078 				/* skip over factory reg area */
2079 				groupno = reg_fact_groups;
2080 				data_offset += reg_fact_groups * reg_fact_size;
2081 			} else {
2082 				groups = reg_fact_groups;
2083 				groupsize = reg_fact_size;
2084 				groupno = 0;
2085 			}
2086 
2087 			while (len > 0 && groups > 0) {
2088 				if (!action) {
2089 					/*
2090 					 * Special case: if action is NULL
2091 					 * we fill buf with otp_info records.
2092 					 */
2093 					struct otp_info *otpinfo;
2094 					map_word lockword;
2095 					len -= sizeof(struct otp_info);
2096 					if (len <= 0)
2097 						return -ENOSPC;
2098 					ret = do_otp_read(map, chip,
2099 							  reg_prot_offset,
2100 							  (u_char *)&lockword,
2101 							  map_bankwidth(map),
2102 							  0, 0,  0);
2103 					if (ret)
2104 						return ret;
2105 					otpinfo = (struct otp_info *)buf;
2106 					otpinfo->start = from;
2107 					otpinfo->length = groupsize;
2108 					otpinfo->locked =
2109 					   !map_word_bitsset(map, lockword,
2110 							     CMD(1 << groupno));
2111 					from += groupsize;
2112 					buf += sizeof(*otpinfo);
2113 					*retlen += sizeof(*otpinfo);
2114 				} else if (from >= groupsize) {
2115 					from -= groupsize;
2116 					data_offset += groupsize;
2117 				} else {
2118 					int size = groupsize;
2119 					data_offset += from;
2120 					size -= from;
2121 					from = 0;
2122 					if (size > len)
2123 						size = len;
2124 					ret = action(map, chip, data_offset,
2125 						     buf, size, reg_prot_offset,
2126 						     groupno, groupsize);
2127 					if (ret < 0)
2128 						return ret;
2129 					buf += size;
2130 					len -= size;
2131 					*retlen += size;
2132 					data_offset += size;
2133 				}
2134 				groupno++;
2135 				groups--;
2136 			}
2137 
2138 			/* next OTP region */
2139 			if (++field == extp->NumProtectionFields)
2140 				break;
2141 			reg_prot_offset = otp->ProtRegAddr;
2142 			reg_fact_groups = otp->FactGroups;
2143 			reg_fact_size = 1 << otp->FactProtRegSize;
2144 			reg_user_groups = otp->UserGroups;
2145 			reg_user_size = 1 << otp->UserProtRegSize;
2146 			otp++;
2147 		}
2148 	}
2149 
2150 	return 0;
2151 }
2152 
2153 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2154 					   size_t len, size_t *retlen,
2155 					    u_char *buf)
2156 {
2157 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2158 				     buf, do_otp_read, 0);
2159 }
2160 
2161 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2162 					   size_t len, size_t *retlen,
2163 					    u_char *buf)
2164 {
2165 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2166 				     buf, do_otp_read, 1);
2167 }
2168 
2169 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2170 					    size_t len, size_t *retlen,
2171 					     u_char *buf)
2172 {
2173 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2174 				     buf, do_otp_write, 1);
2175 }
2176 
2177 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2178 					   loff_t from, size_t len)
2179 {
2180 	size_t retlen;
2181 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2182 				     NULL, do_otp_lock, 1);
2183 }
2184 
2185 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2186 					   struct otp_info *buf, size_t len)
2187 {
2188 	size_t retlen;
2189 	int ret;
2190 
2191 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2192 	return ret ? : retlen;
2193 }
2194 
2195 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2196 					   struct otp_info *buf, size_t len)
2197 {
2198 	size_t retlen;
2199 	int ret;
2200 
2201 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2202 	return ret ? : retlen;
2203 }
2204 
2205 #endif
2206 
2207 static int cfi_intelext_suspend(struct mtd_info *mtd)
2208 {
2209 	struct map_info *map = mtd->priv;
2210 	struct cfi_private *cfi = map->fldrv_priv;
2211 	int i;
2212 	struct flchip *chip;
2213 	int ret = 0;
2214 
2215 	for (i=0; !ret && i<cfi->numchips; i++) {
2216 		chip = &cfi->chips[i];
2217 
2218 		spin_lock(chip->mutex);
2219 
2220 		switch (chip->state) {
2221 		case FL_READY:
2222 		case FL_STATUS:
2223 		case FL_CFI_QUERY:
2224 		case FL_JEDEC_QUERY:
2225 			if (chip->oldstate == FL_READY) {
2226 				/* place the chip in a known state before suspend */
2227 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2228 				chip->oldstate = chip->state;
2229 				chip->state = FL_PM_SUSPENDED;
2230 				/* No need to wake_up() on this state change -
2231 				 * as the whole point is that nobody can do anything
2232 				 * with the chip now anyway.
2233 				 */
2234 			} else {
2235 				/* There seems to be an operation pending. We must wait for it. */
2236 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2237 				ret = -EAGAIN;
2238 			}
2239 			break;
2240 		default:
2241 			/* Should we actually wait? Once upon a time these routines weren't
2242 			   allowed to. Or should we return -EAGAIN, because the upper layers
2243 			   ought to have already shut down anything which was using the device
2244 			   anyway? The latter for now. */
2245 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2246 			ret = -EAGAIN;
2247 		case FL_PM_SUSPENDED:
2248 			break;
2249 		}
2250 		spin_unlock(chip->mutex);
2251 	}
2252 
2253 	/* Unlock the chips again */
2254 
2255 	if (ret) {
2256 		for (i--; i >=0; i--) {
2257 			chip = &cfi->chips[i];
2258 
2259 			spin_lock(chip->mutex);
2260 
2261 			if (chip->state == FL_PM_SUSPENDED) {
2262 				/* No need to force it into a known state here,
2263 				   because we're returning failure, and it didn't
2264 				   get power cycled */
2265 				chip->state = chip->oldstate;
2266 				chip->oldstate = FL_READY;
2267 				wake_up(&chip->wq);
2268 			}
2269 			spin_unlock(chip->mutex);
2270 		}
2271 	}
2272 
2273 	return ret;
2274 }
2275 
2276 static void cfi_intelext_resume(struct mtd_info *mtd)
2277 {
2278 	struct map_info *map = mtd->priv;
2279 	struct cfi_private *cfi = map->fldrv_priv;
2280 	int i;
2281 	struct flchip *chip;
2282 
2283 	for (i=0; i<cfi->numchips; i++) {
2284 
2285 		chip = &cfi->chips[i];
2286 
2287 		spin_lock(chip->mutex);
2288 
2289 		/* Go to known state. Chip may have been power cycled */
2290 		if (chip->state == FL_PM_SUSPENDED) {
2291 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2292 			chip->oldstate = chip->state = FL_READY;
2293 			wake_up(&chip->wq);
2294 		}
2295 
2296 		spin_unlock(chip->mutex);
2297 	}
2298 }
2299 
2300 static int cfi_intelext_reset(struct mtd_info *mtd)
2301 {
2302 	struct map_info *map = mtd->priv;
2303 	struct cfi_private *cfi = map->fldrv_priv;
2304 	int i, ret;
2305 
2306 	for (i=0; i < cfi->numchips; i++) {
2307 		struct flchip *chip = &cfi->chips[i];
2308 
2309 		/* force the completion of any ongoing operation
2310 		   and switch to array mode so any bootloader in
2311 		   flash is accessible for soft reboot. */
2312 		spin_lock(chip->mutex);
2313 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2314 		if (!ret) {
2315 			map_write(map, CMD(0xff), chip->start);
2316 			chip->state = FL_READY;
2317 		}
2318 		spin_unlock(chip->mutex);
2319 	}
2320 
2321 	return 0;
2322 }
2323 
2324 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2325 			       void *v)
2326 {
2327 	struct mtd_info *mtd;
2328 
2329 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2330 	cfi_intelext_reset(mtd);
2331 	return NOTIFY_DONE;
2332 }
2333 
2334 static void cfi_intelext_destroy(struct mtd_info *mtd)
2335 {
2336 	struct map_info *map = mtd->priv;
2337 	struct cfi_private *cfi = map->fldrv_priv;
2338 	cfi_intelext_reset(mtd);
2339 	unregister_reboot_notifier(&mtd->reboot_notifier);
2340 	kfree(cfi->cmdset_priv);
2341 	kfree(cfi->cfiq);
2342 	kfree(cfi->chips[0].priv);
2343 	kfree(cfi);
2344 	kfree(mtd->eraseregions);
2345 }
2346 
2347 MODULE_LICENSE("GPL");
2348 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2349 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2350 MODULE_ALIAS("cfi_cmdset_0003");
2351 MODULE_ALIAS("cfi_cmdset_0200");
2352