xref: /linux/drivers/mtd/chips/cfi_cmdset_0001.c (revision 20d0021394c1b070bf04b22c5bc8fdb437edd4c5)
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $
8  *
9  *
10  * 10/10/2000	Nicolas Pitre <nico@cam.org>
11  * 	- completely revamped method functions so they are aware and
12  * 	  independent of the flash geometry (buswidth, interleave, etc.)
13  * 	- scalability vs code size is completely set at compile-time
14  * 	  (see include/linux/mtd/cfi.h for selection)
15  *	- optimized write buffer method
16  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *	- reworked lock/unlock/erase support for var size flash
18  */
19 
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 #define MANUFACTURER_INTEL	0x0089
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50 
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55 static void cfi_intelext_sync (struct mtd_info *);
56 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 #ifdef CONFIG_MTD_OTP
59 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64 					    struct otp_info *, size_t);
65 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66 					    struct otp_info *, size_t);
67 #endif
68 static int cfi_intelext_suspend (struct mtd_info *);
69 static void cfi_intelext_resume (struct mtd_info *);
70 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
71 
72 static void cfi_intelext_destroy(struct mtd_info *);
73 
74 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
75 
76 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
78 
79 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80 		     size_t *retlen, u_char **mtdbuf);
81 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
82 			size_t len);
83 
84 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
86 #include "fwh_lock.h"
87 
88 
89 
90 /*
91  *  *********** SETUP AND PROBE BITS  ***********
92  */
93 
94 static struct mtd_chip_driver cfi_intelext_chipdrv = {
95 	.probe		= NULL, /* Not usable directly */
96 	.destroy	= cfi_intelext_destroy,
97 	.name		= "cfi_cmdset_0001",
98 	.module		= THIS_MODULE
99 };
100 
101 /* #define DEBUG_LOCK_BITS */
102 /* #define DEBUG_CFI_FEATURES */
103 
104 #ifdef DEBUG_CFI_FEATURES
105 static void cfi_tell_features(struct cfi_pri_intelext *extp)
106 {
107 	int i;
108 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
109 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
111 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
112 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
113 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
114 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
115 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
116 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 	for (i=10; i<32; i++) {
120 		if (extp->FeatureSupport & (1<<i))
121 			printk("     - Unknown Bit %X:      supported\n", i);
122 	}
123 
124 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 	for (i=1; i<8; i++) {
127 		if (extp->SuspendCmdSupport & (1<<i))
128 			printk("     - Unknown Bit %X:               supported\n", i);
129 	}
130 
131 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 	for (i=2; i<16; i++) {
135 		if (extp->BlkStatusRegMask & (1<<i))
136 			printk("     - Unknown Bit %X Active: yes\n",i);
137 	}
138 
139 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 	if (extp->VppOptimal)
142 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144 }
145 #endif
146 
147 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150 {
151 	struct map_info *map = mtd->priv;
152 	struct cfi_private *cfi = map->fldrv_priv;
153 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
154 
155 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
156 	                    "erase on write disabled.\n");
157 	extp->SuspendCmdSupport &= ~1;
158 }
159 #endif
160 
161 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
162 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
163 {
164 	struct map_info *map = mtd->priv;
165 	struct cfi_private *cfi = map->fldrv_priv;
166 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
167 
168 	if (cfip && (cfip->FeatureSupport&4)) {
169 		cfip->FeatureSupport &= ~4;
170 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
171 	}
172 }
173 #endif
174 
175 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176 {
177 	struct map_info *map = mtd->priv;
178 	struct cfi_private *cfi = map->fldrv_priv;
179 
180 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
181 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
182 }
183 
184 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185 {
186 	struct map_info *map = mtd->priv;
187 	struct cfi_private *cfi = map->fldrv_priv;
188 
189 	/* Note this is done after the region info is endian swapped */
190 	cfi->cfiq->EraseRegionInfo[1] =
191 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
192 };
193 
194 static void fixup_use_point(struct mtd_info *mtd, void *param)
195 {
196 	struct map_info *map = mtd->priv;
197 	if (!mtd->point && map_is_linear(map)) {
198 		mtd->point   = cfi_intelext_point;
199 		mtd->unpoint = cfi_intelext_unpoint;
200 	}
201 }
202 
203 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
204 {
205 	struct map_info *map = mtd->priv;
206 	struct cfi_private *cfi = map->fldrv_priv;
207 	if (cfi->cfiq->BufWriteTimeoutTyp) {
208 		printk(KERN_INFO "Using buffer write method\n" );
209 		mtd->write = cfi_intelext_write_buffers;
210 	}
211 }
212 
213 static struct cfi_fixup cfi_fixup_table[] = {
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
216 #endif
217 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
219 #endif
220 #if !FORCE_WORD_WRITE
221 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
222 #endif
223 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
224 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
225 	{ 0, 0, NULL, NULL }
226 };
227 
228 static struct cfi_fixup jedec_fixup_table[] = {
229 	{ MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
230 	{ MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
231 	{ MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
232 	{ 0, 0, NULL, NULL }
233 };
234 static struct cfi_fixup fixup_table[] = {
235 	/* The CFI vendor ids and the JEDEC vendor IDs appear
236 	 * to be common.  It is like the devices id's are as
237 	 * well.  This table is to pick all cases where
238 	 * we know that is the case.
239 	 */
240 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
241 	{ 0, 0, NULL, NULL }
242 };
243 
244 static inline struct cfi_pri_intelext *
245 read_pri_intelext(struct map_info *map, __u16 adr)
246 {
247 	struct cfi_pri_intelext *extp;
248 	unsigned int extp_size = sizeof(*extp);
249 
250  again:
251 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
252 	if (!extp)
253 		return NULL;
254 
255 	/* Do some byteswapping if necessary */
256 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
259 
260 	if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
261 		unsigned int extra_size = 0;
262 		int nb_parts, i;
263 
264 		/* Protection Register info */
265 		extra_size += (extp->NumProtectionFields - 1) *
266 			      sizeof(struct cfi_intelext_otpinfo);
267 
268 		/* Burst Read info */
269 		extra_size += 6;
270 
271 		/* Number of hardware-partitions */
272 		extra_size += 1;
273 		if (extp_size < sizeof(*extp) + extra_size)
274 			goto need_more;
275 		nb_parts = extp->extra[extra_size - 1];
276 
277 		for (i = 0; i < nb_parts; i++) {
278 			struct cfi_intelext_regioninfo *rinfo;
279 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
280 			extra_size += sizeof(*rinfo);
281 			if (extp_size < sizeof(*extp) + extra_size)
282 				goto need_more;
283 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
284 			extra_size += (rinfo->NumBlockTypes - 1)
285 				      * sizeof(struct cfi_intelext_blockinfo);
286 		}
287 
288 		if (extp_size < sizeof(*extp) + extra_size) {
289 			need_more:
290 			extp_size = sizeof(*extp) + extra_size;
291 			kfree(extp);
292 			if (extp_size > 4096) {
293 				printk(KERN_ERR
294 					"%s: cfi_pri_intelext is too fat\n",
295 					__FUNCTION__);
296 				return NULL;
297 			}
298 			goto again;
299 		}
300 	}
301 
302 	return extp;
303 }
304 
305 /* This routine is made available to other mtd code via
306  * inter_module_register.  It must only be accessed through
307  * inter_module_get which will bump the use count of this module.  The
308  * addresses passed back in cfi are valid as long as the use count of
309  * this module is non-zero, i.e. between inter_module_get and
310  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
311  */
312 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
313 {
314 	struct cfi_private *cfi = map->fldrv_priv;
315 	struct mtd_info *mtd;
316 	int i;
317 
318 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
319 	if (!mtd) {
320 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
321 		return NULL;
322 	}
323 	memset(mtd, 0, sizeof(*mtd));
324 	mtd->priv = map;
325 	mtd->type = MTD_NORFLASH;
326 
327 	/* Fill in the default mtd operations */
328 	mtd->erase   = cfi_intelext_erase_varsize;
329 	mtd->read    = cfi_intelext_read;
330 	mtd->write   = cfi_intelext_write_words;
331 	mtd->sync    = cfi_intelext_sync;
332 	mtd->lock    = cfi_intelext_lock;
333 	mtd->unlock  = cfi_intelext_unlock;
334 	mtd->suspend = cfi_intelext_suspend;
335 	mtd->resume  = cfi_intelext_resume;
336 	mtd->flags   = MTD_CAP_NORFLASH;
337 	mtd->name    = map->name;
338 
339 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
340 
341 	if (cfi->cfi_mode == CFI_MODE_CFI) {
342 		/*
343 		 * It's a real CFI chip, not one for which the probe
344 		 * routine faked a CFI structure. So we read the feature
345 		 * table from it.
346 		 */
347 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
348 		struct cfi_pri_intelext *extp;
349 
350 		extp = read_pri_intelext(map, adr);
351 		if (!extp) {
352 			kfree(mtd);
353 			return NULL;
354 		}
355 
356 		/* Install our own private info structure */
357 		cfi->cmdset_priv = extp;
358 
359 		cfi_fixup(mtd, cfi_fixup_table);
360 
361 #ifdef DEBUG_CFI_FEATURES
362 		/* Tell the user about it in lots of lovely detail */
363 		cfi_tell_features(extp);
364 #endif
365 
366 		if(extp->SuspendCmdSupport & 1) {
367 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
368 		}
369 	}
370 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
371 		/* Apply jedec specific fixups */
372 		cfi_fixup(mtd, jedec_fixup_table);
373 	}
374 	/* Apply generic fixups */
375 	cfi_fixup(mtd, fixup_table);
376 
377 	for (i=0; i< cfi->numchips; i++) {
378 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
379 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 		cfi->chips[i].ref_point_counter = 0;
382 	}
383 
384 	map->fldrv = &cfi_intelext_chipdrv;
385 
386 	return cfi_intelext_setup(mtd);
387 }
388 
389 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
390 {
391 	struct map_info *map = mtd->priv;
392 	struct cfi_private *cfi = map->fldrv_priv;
393 	unsigned long offset = 0;
394 	int i,j;
395 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
396 
397 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
398 
399 	mtd->size = devsize * cfi->numchips;
400 
401 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 			* mtd->numeraseregions, GFP_KERNEL);
404 	if (!mtd->eraseregions) {
405 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
406 		goto setup_err;
407 	}
408 
409 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 		unsigned long ernum, ersize;
411 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
412 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
413 
414 		if (mtd->erasesize < ersize) {
415 			mtd->erasesize = ersize;
416 		}
417 		for (j=0; j<cfi->numchips; j++) {
418 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
419 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
420 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
421 		}
422 		offset += (ersize * ernum);
423 	}
424 
425 	if (offset != devsize) {
426 		/* Argh */
427 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
428 		goto setup_err;
429 	}
430 
431 	for (i=0; i<mtd->numeraseregions;i++){
432 		printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
433 		       i,mtd->eraseregions[i].offset,
434 		       mtd->eraseregions[i].erasesize,
435 		       mtd->eraseregions[i].numblocks);
436 	}
437 
438 #ifdef CONFIG_MTD_OTP
439 	mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
440 	mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
441 	mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
442 	mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
443 	mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
444 	mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
445 #endif
446 
447 	/* This function has the potential to distort the reality
448 	   a bit and therefore should be called last. */
449 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
450 		goto setup_err;
451 
452 	__module_get(THIS_MODULE);
453 	register_reboot_notifier(&mtd->reboot_notifier);
454 	return mtd;
455 
456  setup_err:
457 	if(mtd) {
458 		if(mtd->eraseregions)
459 			kfree(mtd->eraseregions);
460 		kfree(mtd);
461 	}
462 	kfree(cfi->cmdset_priv);
463 	return NULL;
464 }
465 
466 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
467 					struct cfi_private **pcfi)
468 {
469 	struct map_info *map = mtd->priv;
470 	struct cfi_private *cfi = *pcfi;
471 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
472 
473 	/*
474 	 * Probing of multi-partition flash ships.
475 	 *
476 	 * To support multiple partitions when available, we simply arrange
477 	 * for each of them to have their own flchip structure even if they
478 	 * are on the same physical chip.  This means completely recreating
479 	 * a new cfi_private structure right here which is a blatent code
480 	 * layering violation, but this is still the least intrusive
481 	 * arrangement at this point. This can be rearranged in the future
482 	 * if someone feels motivated enough.  --nico
483 	 */
484 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
485 	    && extp->FeatureSupport & (1 << 9)) {
486 		struct cfi_private *newcfi;
487 		struct flchip *chip;
488 		struct flchip_shared *shared;
489 		int offs, numregions, numparts, partshift, numvirtchips, i, j;
490 
491 		/* Protection Register info */
492 		offs = (extp->NumProtectionFields - 1) *
493 		       sizeof(struct cfi_intelext_otpinfo);
494 
495 		/* Burst Read info */
496 		offs += 6;
497 
498 		/* Number of partition regions */
499 		numregions = extp->extra[offs];
500 		offs += 1;
501 
502 		/* Number of hardware partitions */
503 		numparts = 0;
504 		for (i = 0; i < numregions; i++) {
505 			struct cfi_intelext_regioninfo *rinfo;
506 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
507 			numparts += rinfo->NumIdentPartitions;
508 			offs += sizeof(*rinfo)
509 				+ (rinfo->NumBlockTypes - 1) *
510 				  sizeof(struct cfi_intelext_blockinfo);
511 		}
512 
513 		/*
514 		 * All functions below currently rely on all chips having
515 		 * the same geometry so we'll just assume that all hardware
516 		 * partitions are of the same size too.
517 		 */
518 		partshift = cfi->chipshift - __ffs(numparts);
519 
520 		if ((1 << partshift) < mtd->erasesize) {
521 			printk( KERN_ERR
522 				"%s: bad number of hw partitions (%d)\n",
523 				__FUNCTION__, numparts);
524 			return -EINVAL;
525 		}
526 
527 		numvirtchips = cfi->numchips * numparts;
528 		newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
529 		if (!newcfi)
530 			return -ENOMEM;
531 		shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
532 		if (!shared) {
533 			kfree(newcfi);
534 			return -ENOMEM;
535 		}
536 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
537 		newcfi->numchips = numvirtchips;
538 		newcfi->chipshift = partshift;
539 
540 		chip = &newcfi->chips[0];
541 		for (i = 0; i < cfi->numchips; i++) {
542 			shared[i].writing = shared[i].erasing = NULL;
543 			spin_lock_init(&shared[i].lock);
544 			for (j = 0; j < numparts; j++) {
545 				*chip = cfi->chips[i];
546 				chip->start += j << partshift;
547 				chip->priv = &shared[i];
548 				/* those should be reset too since
549 				   they create memory references. */
550 				init_waitqueue_head(&chip->wq);
551 				spin_lock_init(&chip->_spinlock);
552 				chip->mutex = &chip->_spinlock;
553 				chip++;
554 			}
555 		}
556 
557 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
558 				  "--> %d partitions of %d KiB\n",
559 				  map->name, cfi->numchips, cfi->interleave,
560 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
561 
562 		map->fldrv_priv = newcfi;
563 		*pcfi = newcfi;
564 		kfree(cfi);
565 	}
566 
567 	return 0;
568 }
569 
570 /*
571  *  *********** CHIP ACCESS FUNCTIONS ***********
572  */
573 
574 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
575 {
576 	DECLARE_WAITQUEUE(wait, current);
577 	struct cfi_private *cfi = map->fldrv_priv;
578 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
579 	unsigned long timeo;
580 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
581 
582  resettime:
583 	timeo = jiffies + HZ;
584  retry:
585 	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
586 		/*
587 		 * OK. We have possibility for contension on the write/erase
588 		 * operations which are global to the real chip and not per
589 		 * partition.  So let's fight it over in the partition which
590 		 * currently has authority on the operation.
591 		 *
592 		 * The rules are as follows:
593 		 *
594 		 * - any write operation must own shared->writing.
595 		 *
596 		 * - any erase operation must own _both_ shared->writing and
597 		 *   shared->erasing.
598 		 *
599 		 * - contension arbitration is handled in the owner's context.
600 		 *
601 		 * The 'shared' struct can be read when its lock is taken.
602 		 * However any writes to it can only be made when the current
603 		 * owner's lock is also held.
604 		 */
605 		struct flchip_shared *shared = chip->priv;
606 		struct flchip *contender;
607 		spin_lock(&shared->lock);
608 		contender = shared->writing;
609 		if (contender && contender != chip) {
610 			/*
611 			 * The engine to perform desired operation on this
612 			 * partition is already in use by someone else.
613 			 * Let's fight over it in the context of the chip
614 			 * currently using it.  If it is possible to suspend,
615 			 * that other partition will do just that, otherwise
616 			 * it'll happily send us to sleep.  In any case, when
617 			 * get_chip returns success we're clear to go ahead.
618 			 */
619 			int ret = spin_trylock(contender->mutex);
620 			spin_unlock(&shared->lock);
621 			if (!ret)
622 				goto retry;
623 			spin_unlock(chip->mutex);
624 			ret = get_chip(map, contender, contender->start, mode);
625 			spin_lock(chip->mutex);
626 			if (ret) {
627 				spin_unlock(contender->mutex);
628 				return ret;
629 			}
630 			timeo = jiffies + HZ;
631 			spin_lock(&shared->lock);
632 		}
633 
634 		/* We now own it */
635 		shared->writing = chip;
636 		if (mode == FL_ERASING)
637 			shared->erasing = chip;
638 		if (contender && contender != chip)
639 			spin_unlock(contender->mutex);
640 		spin_unlock(&shared->lock);
641 	}
642 
643 	switch (chip->state) {
644 
645 	case FL_STATUS:
646 		for (;;) {
647 			status = map_read(map, adr);
648 			if (map_word_andequal(map, status, status_OK, status_OK))
649 				break;
650 
651 			/* At this point we're fine with write operations
652 			   in other partitions as they don't conflict. */
653 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
654 				break;
655 
656 			if (time_after(jiffies, timeo)) {
657 				printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
658 				       status.x[0]);
659 				return -EIO;
660 			}
661 			spin_unlock(chip->mutex);
662 			cfi_udelay(1);
663 			spin_lock(chip->mutex);
664 			/* Someone else might have been playing with it. */
665 			goto retry;
666 		}
667 
668 	case FL_READY:
669 	case FL_CFI_QUERY:
670 	case FL_JEDEC_QUERY:
671 		return 0;
672 
673 	case FL_ERASING:
674 		if (!cfip ||
675 		    !(cfip->FeatureSupport & 2) ||
676 		    !(mode == FL_READY || mode == FL_POINT ||
677 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
678 			goto sleep;
679 
680 
681 		/* Erase suspend */
682 		map_write(map, CMD(0xB0), adr);
683 
684 		/* If the flash has finished erasing, then 'erase suspend'
685 		 * appears to make some (28F320) flash devices switch to
686 		 * 'read' mode.  Make sure that we switch to 'read status'
687 		 * mode so we get the right data. --rmk
688 		 */
689 		map_write(map, CMD(0x70), adr);
690 		chip->oldstate = FL_ERASING;
691 		chip->state = FL_ERASE_SUSPENDING;
692 		chip->erase_suspended = 1;
693 		for (;;) {
694 			status = map_read(map, adr);
695 			if (map_word_andequal(map, status, status_OK, status_OK))
696 			        break;
697 
698 			if (time_after(jiffies, timeo)) {
699 				/* Urgh. Resume and pretend we weren't here.  */
700 				map_write(map, CMD(0xd0), adr);
701 				/* Make sure we're in 'read status' mode if it had finished */
702 				map_write(map, CMD(0x70), adr);
703 				chip->state = FL_ERASING;
704 				chip->oldstate = FL_READY;
705 				printk(KERN_ERR "Chip not ready after erase "
706 				       "suspended: status = 0x%lx\n", status.x[0]);
707 				return -EIO;
708 			}
709 
710 			spin_unlock(chip->mutex);
711 			cfi_udelay(1);
712 			spin_lock(chip->mutex);
713 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
714 			   So we can just loop here. */
715 		}
716 		chip->state = FL_STATUS;
717 		return 0;
718 
719 	case FL_XIP_WHILE_ERASING:
720 		if (mode != FL_READY && mode != FL_POINT &&
721 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
722 			goto sleep;
723 		chip->oldstate = chip->state;
724 		chip->state = FL_READY;
725 		return 0;
726 
727 	case FL_POINT:
728 		/* Only if there's no operation suspended... */
729 		if (mode == FL_READY && chip->oldstate == FL_READY)
730 			return 0;
731 
732 	default:
733 	sleep:
734 		set_current_state(TASK_UNINTERRUPTIBLE);
735 		add_wait_queue(&chip->wq, &wait);
736 		spin_unlock(chip->mutex);
737 		schedule();
738 		remove_wait_queue(&chip->wq, &wait);
739 		spin_lock(chip->mutex);
740 		goto resettime;
741 	}
742 }
743 
744 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
745 {
746 	struct cfi_private *cfi = map->fldrv_priv;
747 
748 	if (chip->priv) {
749 		struct flchip_shared *shared = chip->priv;
750 		spin_lock(&shared->lock);
751 		if (shared->writing == chip && chip->oldstate == FL_READY) {
752 			/* We own the ability to write, but we're done */
753 			shared->writing = shared->erasing;
754 			if (shared->writing && shared->writing != chip) {
755 				/* give back ownership to who we loaned it from */
756 				struct flchip *loaner = shared->writing;
757 				spin_lock(loaner->mutex);
758 				spin_unlock(&shared->lock);
759 				spin_unlock(chip->mutex);
760 				put_chip(map, loaner, loaner->start);
761 				spin_lock(chip->mutex);
762 				spin_unlock(loaner->mutex);
763 				wake_up(&chip->wq);
764 				return;
765 			}
766 			shared->erasing = NULL;
767 			shared->writing = NULL;
768 		} else if (shared->erasing == chip && shared->writing != chip) {
769 			/*
770 			 * We own the ability to erase without the ability
771 			 * to write, which means the erase was suspended
772 			 * and some other partition is currently writing.
773 			 * Don't let the switch below mess things up since
774 			 * we don't have ownership to resume anything.
775 			 */
776 			spin_unlock(&shared->lock);
777 			wake_up(&chip->wq);
778 			return;
779 		}
780 		spin_unlock(&shared->lock);
781 	}
782 
783 	switch(chip->oldstate) {
784 	case FL_ERASING:
785 		chip->state = chip->oldstate;
786 		/* What if one interleaved chip has finished and the
787 		   other hasn't? The old code would leave the finished
788 		   one in READY mode. That's bad, and caused -EROFS
789 		   errors to be returned from do_erase_oneblock because
790 		   that's the only bit it checked for at the time.
791 		   As the state machine appears to explicitly allow
792 		   sending the 0x70 (Read Status) command to an erasing
793 		   chip and expecting it to be ignored, that's what we
794 		   do. */
795 		map_write(map, CMD(0xd0), adr);
796 		map_write(map, CMD(0x70), adr);
797 		chip->oldstate = FL_READY;
798 		chip->state = FL_ERASING;
799 		break;
800 
801 	case FL_XIP_WHILE_ERASING:
802 		chip->state = chip->oldstate;
803 		chip->oldstate = FL_READY;
804 		break;
805 
806 	case FL_READY:
807 	case FL_STATUS:
808 	case FL_JEDEC_QUERY:
809 		/* We should really make set_vpp() count, rather than doing this */
810 		DISABLE_VPP(map);
811 		break;
812 	default:
813 		printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
814 	}
815 	wake_up(&chip->wq);
816 }
817 
818 #ifdef CONFIG_MTD_XIP
819 
820 /*
821  * No interrupt what so ever can be serviced while the flash isn't in array
822  * mode.  This is ensured by the xip_disable() and xip_enable() functions
823  * enclosing any code path where the flash is known not to be in array mode.
824  * And within a XIP disabled code path, only functions marked with __xipram
825  * may be called and nothing else (it's a good thing to inspect generated
826  * assembly to make sure inline functions were actually inlined and that gcc
827  * didn't emit calls to its own support functions). Also configuring MTD CFI
828  * support to a single buswidth and a single interleave is also recommended.
829  */
830 
831 static void xip_disable(struct map_info *map, struct flchip *chip,
832 			unsigned long adr)
833 {
834 	/* TODO: chips with no XIP use should ignore and return */
835 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
836 	local_irq_disable();
837 }
838 
839 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
840 				unsigned long adr)
841 {
842 	struct cfi_private *cfi = map->fldrv_priv;
843 	if (chip->state != FL_POINT && chip->state != FL_READY) {
844 		map_write(map, CMD(0xff), adr);
845 		chip->state = FL_READY;
846 	}
847 	(void) map_read(map, adr);
848 	xip_iprefetch();
849 	local_irq_enable();
850 }
851 
852 /*
853  * When a delay is required for the flash operation to complete, the
854  * xip_udelay() function is polling for both the given timeout and pending
855  * (but still masked) hardware interrupts.  Whenever there is an interrupt
856  * pending then the flash erase or write operation is suspended, array mode
857  * restored and interrupts unmasked.  Task scheduling might also happen at that
858  * point.  The CPU eventually returns from the interrupt or the call to
859  * schedule() and the suspended flash operation is resumed for the remaining
860  * of the delay period.
861  *
862  * Warning: this function _will_ fool interrupt latency tracing tools.
863  */
864 
865 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
866 				unsigned long adr, int usec)
867 {
868 	struct cfi_private *cfi = map->fldrv_priv;
869 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
870 	map_word status, OK = CMD(0x80);
871 	unsigned long suspended, start = xip_currtime();
872 	flstate_t oldstate, newstate;
873 
874 	do {
875 		cpu_relax();
876 		if (xip_irqpending() && cfip &&
877 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
878 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
879 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
880 			/*
881 			 * Let's suspend the erase or write operation when
882 			 * supported.  Note that we currently don't try to
883 			 * suspend interleaved chips if there is already
884 			 * another operation suspended (imagine what happens
885 			 * when one chip was already done with the current
886 			 * operation while another chip suspended it, then
887 			 * we resume the whole thing at once).  Yes, it
888 			 * can happen!
889 			 */
890 			map_write(map, CMD(0xb0), adr);
891 			map_write(map, CMD(0x70), adr);
892 			usec -= xip_elapsed_since(start);
893 			suspended = xip_currtime();
894 			do {
895 				if (xip_elapsed_since(suspended) > 100000) {
896 					/*
897 					 * The chip doesn't want to suspend
898 					 * after waiting for 100 msecs.
899 					 * This is a critical error but there
900 					 * is not much we can do here.
901 					 */
902 					return;
903 				}
904 				status = map_read(map, adr);
905 			} while (!map_word_andequal(map, status, OK, OK));
906 
907 			/* Suspend succeeded */
908 			oldstate = chip->state;
909 			if (oldstate == FL_ERASING) {
910 				if (!map_word_bitsset(map, status, CMD(0x40)))
911 					break;
912 				newstate = FL_XIP_WHILE_ERASING;
913 				chip->erase_suspended = 1;
914 			} else {
915 				if (!map_word_bitsset(map, status, CMD(0x04)))
916 					break;
917 				newstate = FL_XIP_WHILE_WRITING;
918 				chip->write_suspended = 1;
919 			}
920 			chip->state = newstate;
921 			map_write(map, CMD(0xff), adr);
922 			(void) map_read(map, adr);
923 			asm volatile (".rep 8; nop; .endr");
924 			local_irq_enable();
925 			spin_unlock(chip->mutex);
926 			asm volatile (".rep 8; nop; .endr");
927 			cond_resched();
928 
929 			/*
930 			 * We're back.  However someone else might have
931 			 * decided to go write to the chip if we are in
932 			 * a suspended erase state.  If so let's wait
933 			 * until it's done.
934 			 */
935 			spin_lock(chip->mutex);
936 			while (chip->state != newstate) {
937 				DECLARE_WAITQUEUE(wait, current);
938 				set_current_state(TASK_UNINTERRUPTIBLE);
939 				add_wait_queue(&chip->wq, &wait);
940 				spin_unlock(chip->mutex);
941 				schedule();
942 				remove_wait_queue(&chip->wq, &wait);
943 				spin_lock(chip->mutex);
944 			}
945 			/* Disallow XIP again */
946 			local_irq_disable();
947 
948 			/* Resume the write or erase operation */
949 			map_write(map, CMD(0xd0), adr);
950 			map_write(map, CMD(0x70), adr);
951 			chip->state = oldstate;
952 			start = xip_currtime();
953 		} else if (usec >= 1000000/HZ) {
954 			/*
955 			 * Try to save on CPU power when waiting delay
956 			 * is at least a system timer tick period.
957 			 * No need to be extremely accurate here.
958 			 */
959 			xip_cpu_idle();
960 		}
961 		status = map_read(map, adr);
962 	} while (!map_word_andequal(map, status, OK, OK)
963 		 && xip_elapsed_since(start) < usec);
964 }
965 
966 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
967 
968 /*
969  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
970  * the flash is actively programming or erasing since we have to poll for
971  * the operation to complete anyway.  We can't do that in a generic way with
972  * a XIP setup so do it before the actual flash operation in this case
973  * and stub it out from INVALIDATE_CACHE_UDELAY.
974  */
975 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
976 	INVALIDATE_CACHED_RANGE(map, from, size)
977 
978 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
979 	UDELAY(map, chip, adr, usec)
980 
981 /*
982  * Extra notes:
983  *
984  * Activating this XIP support changes the way the code works a bit.  For
985  * example the code to suspend the current process when concurrent access
986  * happens is never executed because xip_udelay() will always return with the
987  * same chip state as it was entered with.  This is why there is no care for
988  * the presence of add_wait_queue() or schedule() calls from within a couple
989  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
990  * The queueing and scheduling are always happening within xip_udelay().
991  *
992  * Similarly, get_chip() and put_chip() just happen to always be executed
993  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
994  * is in array mode, therefore never executing many cases therein and not
995  * causing any problem with XIP.
996  */
997 
998 #else
999 
1000 #define xip_disable(map, chip, adr)
1001 #define xip_enable(map, chip, adr)
1002 #define XIP_INVAL_CACHED_RANGE(x...)
1003 
1004 #define UDELAY(map, chip, adr, usec)  \
1005 do {  \
1006 	spin_unlock(chip->mutex);  \
1007 	cfi_udelay(usec);  \
1008 	spin_lock(chip->mutex);  \
1009 } while (0)
1010 
1011 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1012 do {  \
1013 	spin_unlock(chip->mutex);  \
1014 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1015 	cfi_udelay(usec);  \
1016 	spin_lock(chip->mutex);  \
1017 } while (0)
1018 
1019 #endif
1020 
1021 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1022 {
1023 	unsigned long cmd_addr;
1024 	struct cfi_private *cfi = map->fldrv_priv;
1025 	int ret = 0;
1026 
1027 	adr += chip->start;
1028 
1029 	/* Ensure cmd read/writes are aligned. */
1030 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1031 
1032 	spin_lock(chip->mutex);
1033 
1034 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1035 
1036 	if (!ret) {
1037 		if (chip->state != FL_POINT && chip->state != FL_READY)
1038 			map_write(map, CMD(0xff), cmd_addr);
1039 
1040 		chip->state = FL_POINT;
1041 		chip->ref_point_counter++;
1042 	}
1043 	spin_unlock(chip->mutex);
1044 
1045 	return ret;
1046 }
1047 
1048 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1049 {
1050 	struct map_info *map = mtd->priv;
1051 	struct cfi_private *cfi = map->fldrv_priv;
1052 	unsigned long ofs;
1053 	int chipnum;
1054 	int ret = 0;
1055 
1056 	if (!map->virt || (from + len > mtd->size))
1057 		return -EINVAL;
1058 
1059 	*mtdbuf = (void *)map->virt + from;
1060 	*retlen = 0;
1061 
1062 	/* Now lock the chip(s) to POINT state */
1063 
1064 	/* ofs: offset within the first chip that the first read should start */
1065 	chipnum = (from >> cfi->chipshift);
1066 	ofs = from - (chipnum << cfi->chipshift);
1067 
1068 	while (len) {
1069 		unsigned long thislen;
1070 
1071 		if (chipnum >= cfi->numchips)
1072 			break;
1073 
1074 		if ((len + ofs -1) >> cfi->chipshift)
1075 			thislen = (1<<cfi->chipshift) - ofs;
1076 		else
1077 			thislen = len;
1078 
1079 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1080 		if (ret)
1081 			break;
1082 
1083 		*retlen += thislen;
1084 		len -= thislen;
1085 
1086 		ofs = 0;
1087 		chipnum++;
1088 	}
1089 	return 0;
1090 }
1091 
1092 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1093 {
1094 	struct map_info *map = mtd->priv;
1095 	struct cfi_private *cfi = map->fldrv_priv;
1096 	unsigned long ofs;
1097 	int chipnum;
1098 
1099 	/* Now unlock the chip(s) POINT state */
1100 
1101 	/* ofs: offset within the first chip that the first read should start */
1102 	chipnum = (from >> cfi->chipshift);
1103 	ofs = from - (chipnum <<  cfi->chipshift);
1104 
1105 	while (len) {
1106 		unsigned long thislen;
1107 		struct flchip *chip;
1108 
1109 		chip = &cfi->chips[chipnum];
1110 		if (chipnum >= cfi->numchips)
1111 			break;
1112 
1113 		if ((len + ofs -1) >> cfi->chipshift)
1114 			thislen = (1<<cfi->chipshift) - ofs;
1115 		else
1116 			thislen = len;
1117 
1118 		spin_lock(chip->mutex);
1119 		if (chip->state == FL_POINT) {
1120 			chip->ref_point_counter--;
1121 			if(chip->ref_point_counter == 0)
1122 				chip->state = FL_READY;
1123 		} else
1124 			printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1125 
1126 		put_chip(map, chip, chip->start);
1127 		spin_unlock(chip->mutex);
1128 
1129 		len -= thislen;
1130 		ofs = 0;
1131 		chipnum++;
1132 	}
1133 }
1134 
1135 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1136 {
1137 	unsigned long cmd_addr;
1138 	struct cfi_private *cfi = map->fldrv_priv;
1139 	int ret;
1140 
1141 	adr += chip->start;
1142 
1143 	/* Ensure cmd read/writes are aligned. */
1144 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1145 
1146 	spin_lock(chip->mutex);
1147 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1148 	if (ret) {
1149 		spin_unlock(chip->mutex);
1150 		return ret;
1151 	}
1152 
1153 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1154 		map_write(map, CMD(0xff), cmd_addr);
1155 
1156 		chip->state = FL_READY;
1157 	}
1158 
1159 	map_copy_from(map, buf, adr, len);
1160 
1161 	put_chip(map, chip, cmd_addr);
1162 
1163 	spin_unlock(chip->mutex);
1164 	return 0;
1165 }
1166 
1167 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1168 {
1169 	struct map_info *map = mtd->priv;
1170 	struct cfi_private *cfi = map->fldrv_priv;
1171 	unsigned long ofs;
1172 	int chipnum;
1173 	int ret = 0;
1174 
1175 	/* ofs: offset within the first chip that the first read should start */
1176 	chipnum = (from >> cfi->chipshift);
1177 	ofs = from - (chipnum <<  cfi->chipshift);
1178 
1179 	*retlen = 0;
1180 
1181 	while (len) {
1182 		unsigned long thislen;
1183 
1184 		if (chipnum >= cfi->numchips)
1185 			break;
1186 
1187 		if ((len + ofs -1) >> cfi->chipshift)
1188 			thislen = (1<<cfi->chipshift) - ofs;
1189 		else
1190 			thislen = len;
1191 
1192 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1193 		if (ret)
1194 			break;
1195 
1196 		*retlen += thislen;
1197 		len -= thislen;
1198 		buf += thislen;
1199 
1200 		ofs = 0;
1201 		chipnum++;
1202 	}
1203 	return ret;
1204 }
1205 
1206 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1207 				     unsigned long adr, map_word datum, int mode)
1208 {
1209 	struct cfi_private *cfi = map->fldrv_priv;
1210 	map_word status, status_OK, write_cmd;
1211 	unsigned long timeo;
1212 	int z, ret=0;
1213 
1214 	adr += chip->start;
1215 
1216 	/* Let's determine this according to the interleave only once */
1217 	status_OK = CMD(0x80);
1218 	switch (mode) {
1219 	case FL_WRITING:   write_cmd = CMD(0x40); break;
1220 	case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1221 	default: return -EINVAL;
1222 	}
1223 
1224 	spin_lock(chip->mutex);
1225 	ret = get_chip(map, chip, adr, mode);
1226 	if (ret) {
1227 		spin_unlock(chip->mutex);
1228 		return ret;
1229 	}
1230 
1231 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1232 	ENABLE_VPP(map);
1233 	xip_disable(map, chip, adr);
1234 	map_write(map, write_cmd, adr);
1235 	map_write(map, datum, adr);
1236 	chip->state = mode;
1237 
1238 	INVALIDATE_CACHE_UDELAY(map, chip,
1239 				adr, map_bankwidth(map),
1240 				chip->word_write_time);
1241 
1242 	timeo = jiffies + (HZ/2);
1243 	z = 0;
1244 	for (;;) {
1245 		if (chip->state != mode) {
1246 			/* Someone's suspended the write. Sleep */
1247 			DECLARE_WAITQUEUE(wait, current);
1248 
1249 			set_current_state(TASK_UNINTERRUPTIBLE);
1250 			add_wait_queue(&chip->wq, &wait);
1251 			spin_unlock(chip->mutex);
1252 			schedule();
1253 			remove_wait_queue(&chip->wq, &wait);
1254 			timeo = jiffies + (HZ / 2); /* FIXME */
1255 			spin_lock(chip->mutex);
1256 			continue;
1257 		}
1258 
1259 		status = map_read(map, adr);
1260 		if (map_word_andequal(map, status, status_OK, status_OK))
1261 			break;
1262 
1263 		/* OK Still waiting */
1264 		if (time_after(jiffies, timeo)) {
1265 			chip->state = FL_STATUS;
1266 			xip_enable(map, chip, adr);
1267 			printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1268 			ret = -EIO;
1269 			goto out;
1270 		}
1271 
1272 		/* Latency issues. Drop the lock, wait a while and retry */
1273 		z++;
1274 		UDELAY(map, chip, adr, 1);
1275 	}
1276 	if (!z) {
1277 		chip->word_write_time--;
1278 		if (!chip->word_write_time)
1279 			chip->word_write_time++;
1280 	}
1281 	if (z > 1)
1282 		chip->word_write_time++;
1283 
1284 	/* Done and happy. */
1285 	chip->state = FL_STATUS;
1286 
1287 	/* check for lock bit */
1288 	if (map_word_bitsset(map, status, CMD(0x02))) {
1289 		/* clear status */
1290 		map_write(map, CMD(0x50), adr);
1291 		/* put back into read status register mode */
1292 		map_write(map, CMD(0x70), adr);
1293 		ret = -EROFS;
1294 	}
1295 
1296 	xip_enable(map, chip, adr);
1297  out:	put_chip(map, chip, adr);
1298 	spin_unlock(chip->mutex);
1299 
1300 	return ret;
1301 }
1302 
1303 
1304 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1305 {
1306 	struct map_info *map = mtd->priv;
1307 	struct cfi_private *cfi = map->fldrv_priv;
1308 	int ret = 0;
1309 	int chipnum;
1310 	unsigned long ofs;
1311 
1312 	*retlen = 0;
1313 	if (!len)
1314 		return 0;
1315 
1316 	chipnum = to >> cfi->chipshift;
1317 	ofs = to  - (chipnum << cfi->chipshift);
1318 
1319 	/* If it's not bus-aligned, do the first byte write */
1320 	if (ofs & (map_bankwidth(map)-1)) {
1321 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1322 		int gap = ofs - bus_ofs;
1323 		int n;
1324 		map_word datum;
1325 
1326 		n = min_t(int, len, map_bankwidth(map)-gap);
1327 		datum = map_word_ff(map);
1328 		datum = map_word_load_partial(map, datum, buf, gap, n);
1329 
1330 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1331 					       bus_ofs, datum, FL_WRITING);
1332 		if (ret)
1333 			return ret;
1334 
1335 		len -= n;
1336 		ofs += n;
1337 		buf += n;
1338 		(*retlen) += n;
1339 
1340 		if (ofs >> cfi->chipshift) {
1341 			chipnum ++;
1342 			ofs = 0;
1343 			if (chipnum == cfi->numchips)
1344 				return 0;
1345 		}
1346 	}
1347 
1348 	while(len >= map_bankwidth(map)) {
1349 		map_word datum = map_word_load(map, buf);
1350 
1351 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1352 				       ofs, datum, FL_WRITING);
1353 		if (ret)
1354 			return ret;
1355 
1356 		ofs += map_bankwidth(map);
1357 		buf += map_bankwidth(map);
1358 		(*retlen) += map_bankwidth(map);
1359 		len -= map_bankwidth(map);
1360 
1361 		if (ofs >> cfi->chipshift) {
1362 			chipnum ++;
1363 			ofs = 0;
1364 			if (chipnum == cfi->numchips)
1365 				return 0;
1366 		}
1367 	}
1368 
1369 	if (len & (map_bankwidth(map)-1)) {
1370 		map_word datum;
1371 
1372 		datum = map_word_ff(map);
1373 		datum = map_word_load_partial(map, datum, buf, 0, len);
1374 
1375 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1376 				       ofs, datum, FL_WRITING);
1377 		if (ret)
1378 			return ret;
1379 
1380 		(*retlen) += len;
1381 	}
1382 
1383 	return 0;
1384 }
1385 
1386 
1387 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1388 				    unsigned long adr, const u_char *buf, int len)
1389 {
1390 	struct cfi_private *cfi = map->fldrv_priv;
1391 	map_word status, status_OK;
1392 	unsigned long cmd_adr, timeo;
1393 	int wbufsize, z, ret=0, bytes, words;
1394 
1395 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1396 	adr += chip->start;
1397 	cmd_adr = adr & ~(wbufsize-1);
1398 
1399 	/* Let's determine this according to the interleave only once */
1400 	status_OK = CMD(0x80);
1401 
1402 	spin_lock(chip->mutex);
1403 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1404 	if (ret) {
1405 		spin_unlock(chip->mutex);
1406 		return ret;
1407 	}
1408 
1409 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1410 	ENABLE_VPP(map);
1411 	xip_disable(map, chip, cmd_adr);
1412 
1413 	/* �4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1414 	   [...], the device will not accept any more Write to Buffer commands".
1415 	   So we must check here and reset those bits if they're set. Otherwise
1416 	   we're just pissing in the wind */
1417 	if (chip->state != FL_STATUS)
1418 		map_write(map, CMD(0x70), cmd_adr);
1419 	status = map_read(map, cmd_adr);
1420 	if (map_word_bitsset(map, status, CMD(0x30))) {
1421 		xip_enable(map, chip, cmd_adr);
1422 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1423 		xip_disable(map, chip, cmd_adr);
1424 		map_write(map, CMD(0x50), cmd_adr);
1425 		map_write(map, CMD(0x70), cmd_adr);
1426 	}
1427 
1428 	chip->state = FL_WRITING_TO_BUFFER;
1429 
1430 	z = 0;
1431 	for (;;) {
1432 		map_write(map, CMD(0xe8), cmd_adr);
1433 
1434 		status = map_read(map, cmd_adr);
1435 		if (map_word_andequal(map, status, status_OK, status_OK))
1436 			break;
1437 
1438 		UDELAY(map, chip, cmd_adr, 1);
1439 
1440 		if (++z > 20) {
1441 			/* Argh. Not ready for write to buffer */
1442 			map_word Xstatus;
1443 			map_write(map, CMD(0x70), cmd_adr);
1444 			chip->state = FL_STATUS;
1445 			Xstatus = map_read(map, cmd_adr);
1446 			/* Odd. Clear status bits */
1447 			map_write(map, CMD(0x50), cmd_adr);
1448 			map_write(map, CMD(0x70), cmd_adr);
1449 			xip_enable(map, chip, cmd_adr);
1450 			printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1451 			       status.x[0], Xstatus.x[0]);
1452 			ret = -EIO;
1453 			goto out;
1454 		}
1455 	}
1456 
1457 	/* Write length of data to come */
1458 	bytes = len & (map_bankwidth(map)-1);
1459 	words = len / map_bankwidth(map);
1460 	map_write(map, CMD(words - !bytes), cmd_adr );
1461 
1462 	/* Write data */
1463 	z = 0;
1464 	while(z < words * map_bankwidth(map)) {
1465 		map_word datum = map_word_load(map, buf);
1466 		map_write(map, datum, adr+z);
1467 
1468 		z += map_bankwidth(map);
1469 		buf += map_bankwidth(map);
1470 	}
1471 
1472 	if (bytes) {
1473 		map_word datum;
1474 
1475 		datum = map_word_ff(map);
1476 		datum = map_word_load_partial(map, datum, buf, 0, bytes);
1477 		map_write(map, datum, adr+z);
1478 	}
1479 
1480 	/* GO GO GO */
1481 	map_write(map, CMD(0xd0), cmd_adr);
1482 	chip->state = FL_WRITING;
1483 
1484 	INVALIDATE_CACHE_UDELAY(map, chip,
1485 				cmd_adr, len,
1486 				chip->buffer_write_time);
1487 
1488 	timeo = jiffies + (HZ/2);
1489 	z = 0;
1490 	for (;;) {
1491 		if (chip->state != FL_WRITING) {
1492 			/* Someone's suspended the write. Sleep */
1493 			DECLARE_WAITQUEUE(wait, current);
1494 			set_current_state(TASK_UNINTERRUPTIBLE);
1495 			add_wait_queue(&chip->wq, &wait);
1496 			spin_unlock(chip->mutex);
1497 			schedule();
1498 			remove_wait_queue(&chip->wq, &wait);
1499 			timeo = jiffies + (HZ / 2); /* FIXME */
1500 			spin_lock(chip->mutex);
1501 			continue;
1502 		}
1503 
1504 		status = map_read(map, cmd_adr);
1505 		if (map_word_andequal(map, status, status_OK, status_OK))
1506 			break;
1507 
1508 		/* OK Still waiting */
1509 		if (time_after(jiffies, timeo)) {
1510 			chip->state = FL_STATUS;
1511 			xip_enable(map, chip, cmd_adr);
1512 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1513 			ret = -EIO;
1514 			goto out;
1515 		}
1516 
1517 		/* Latency issues. Drop the lock, wait a while and retry */
1518 		z++;
1519 		UDELAY(map, chip, cmd_adr, 1);
1520 	}
1521 	if (!z) {
1522 		chip->buffer_write_time--;
1523 		if (!chip->buffer_write_time)
1524 			chip->buffer_write_time++;
1525 	}
1526 	if (z > 1)
1527 		chip->buffer_write_time++;
1528 
1529 	/* Done and happy. */
1530  	chip->state = FL_STATUS;
1531 
1532 	/* check for lock bit */
1533 	if (map_word_bitsset(map, status, CMD(0x02))) {
1534 		/* clear status */
1535 		map_write(map, CMD(0x50), cmd_adr);
1536 		/* put back into read status register mode */
1537 		map_write(map, CMD(0x70), adr);
1538 		ret = -EROFS;
1539 	}
1540 
1541 	xip_enable(map, chip, cmd_adr);
1542  out:	put_chip(map, chip, cmd_adr);
1543 	spin_unlock(chip->mutex);
1544 	return ret;
1545 }
1546 
1547 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1548 				       size_t len, size_t *retlen, const u_char *buf)
1549 {
1550 	struct map_info *map = mtd->priv;
1551 	struct cfi_private *cfi = map->fldrv_priv;
1552 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1553 	int ret = 0;
1554 	int chipnum;
1555 	unsigned long ofs;
1556 
1557 	*retlen = 0;
1558 	if (!len)
1559 		return 0;
1560 
1561 	chipnum = to >> cfi->chipshift;
1562 	ofs = to  - (chipnum << cfi->chipshift);
1563 
1564 	/* If it's not bus-aligned, do the first word write */
1565 	if (ofs & (map_bankwidth(map)-1)) {
1566 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1567 		if (local_len > len)
1568 			local_len = len;
1569 		ret = cfi_intelext_write_words(mtd, to, local_len,
1570 					       retlen, buf);
1571 		if (ret)
1572 			return ret;
1573 		ofs += local_len;
1574 		buf += local_len;
1575 		len -= local_len;
1576 
1577 		if (ofs >> cfi->chipshift) {
1578 			chipnum ++;
1579 			ofs = 0;
1580 			if (chipnum == cfi->numchips)
1581 				return 0;
1582 		}
1583 	}
1584 
1585 	while(len) {
1586 		/* We must not cross write block boundaries */
1587 		int size = wbufsize - (ofs & (wbufsize-1));
1588 
1589 		if (size > len)
1590 			size = len;
1591 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1592 				      ofs, buf, size);
1593 		if (ret)
1594 			return ret;
1595 
1596 		ofs += size;
1597 		buf += size;
1598 		(*retlen) += size;
1599 		len -= size;
1600 
1601 		if (ofs >> cfi->chipshift) {
1602 			chipnum ++;
1603 			ofs = 0;
1604 			if (chipnum == cfi->numchips)
1605 				return 0;
1606 		}
1607 	}
1608 	return 0;
1609 }
1610 
1611 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1612 				      unsigned long adr, int len, void *thunk)
1613 {
1614 	struct cfi_private *cfi = map->fldrv_priv;
1615 	map_word status, status_OK;
1616 	unsigned long timeo;
1617 	int retries = 3;
1618 	DECLARE_WAITQUEUE(wait, current);
1619 	int ret = 0;
1620 
1621 	adr += chip->start;
1622 
1623 	/* Let's determine this according to the interleave only once */
1624 	status_OK = CMD(0x80);
1625 
1626  retry:
1627 	spin_lock(chip->mutex);
1628 	ret = get_chip(map, chip, adr, FL_ERASING);
1629 	if (ret) {
1630 		spin_unlock(chip->mutex);
1631 		return ret;
1632 	}
1633 
1634 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1635 	ENABLE_VPP(map);
1636 	xip_disable(map, chip, adr);
1637 
1638 	/* Clear the status register first */
1639 	map_write(map, CMD(0x50), adr);
1640 
1641 	/* Now erase */
1642 	map_write(map, CMD(0x20), adr);
1643 	map_write(map, CMD(0xD0), adr);
1644 	chip->state = FL_ERASING;
1645 	chip->erase_suspended = 0;
1646 
1647 	INVALIDATE_CACHE_UDELAY(map, chip,
1648 				adr, len,
1649 				chip->erase_time*1000/2);
1650 
1651 	/* FIXME. Use a timer to check this, and return immediately. */
1652 	/* Once the state machine's known to be working I'll do that */
1653 
1654 	timeo = jiffies + (HZ*20);
1655 	for (;;) {
1656 		if (chip->state != FL_ERASING) {
1657 			/* Someone's suspended the erase. Sleep */
1658 			set_current_state(TASK_UNINTERRUPTIBLE);
1659 			add_wait_queue(&chip->wq, &wait);
1660 			spin_unlock(chip->mutex);
1661 			schedule();
1662 			remove_wait_queue(&chip->wq, &wait);
1663 			spin_lock(chip->mutex);
1664 			continue;
1665 		}
1666 		if (chip->erase_suspended) {
1667 			/* This erase was suspended and resumed.
1668 			   Adjust the timeout */
1669 			timeo = jiffies + (HZ*20); /* FIXME */
1670 			chip->erase_suspended = 0;
1671 		}
1672 
1673 		status = map_read(map, adr);
1674 		if (map_word_andequal(map, status, status_OK, status_OK))
1675 			break;
1676 
1677 		/* OK Still waiting */
1678 		if (time_after(jiffies, timeo)) {
1679 			map_word Xstatus;
1680 			map_write(map, CMD(0x70), adr);
1681 			chip->state = FL_STATUS;
1682 			Xstatus = map_read(map, adr);
1683 			/* Clear status bits */
1684 			map_write(map, CMD(0x50), adr);
1685 			map_write(map, CMD(0x70), adr);
1686 			xip_enable(map, chip, adr);
1687 			printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1688 			       adr, status.x[0], Xstatus.x[0]);
1689 			ret = -EIO;
1690 			goto out;
1691 		}
1692 
1693 		/* Latency issues. Drop the lock, wait a while and retry */
1694 		UDELAY(map, chip, adr, 1000000/HZ);
1695 	}
1696 
1697 	/* We've broken this before. It doesn't hurt to be safe */
1698 	map_write(map, CMD(0x70), adr);
1699 	chip->state = FL_STATUS;
1700 	status = map_read(map, adr);
1701 
1702 	/* check for lock bit */
1703 	if (map_word_bitsset(map, status, CMD(0x3a))) {
1704 		unsigned long chipstatus;
1705 
1706 		/* Reset the error bits */
1707 		map_write(map, CMD(0x50), adr);
1708 		map_write(map, CMD(0x70), adr);
1709 		xip_enable(map, chip, adr);
1710 
1711 		chipstatus = MERGESTATUS(status);
1712 
1713 		if ((chipstatus & 0x30) == 0x30) {
1714 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus);
1715 			ret = -EIO;
1716 		} else if (chipstatus & 0x02) {
1717 			/* Protection bit set */
1718 			ret = -EROFS;
1719 		} else if (chipstatus & 0x8) {
1720 			/* Voltage */
1721 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus);
1722 			ret = -EIO;
1723 		} else if (chipstatus & 0x20) {
1724 			if (retries--) {
1725 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1726 				timeo = jiffies + HZ;
1727 				put_chip(map, chip, adr);
1728 				spin_unlock(chip->mutex);
1729 				goto retry;
1730 			}
1731 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1732 			ret = -EIO;
1733 		}
1734 	} else {
1735 		xip_enable(map, chip, adr);
1736 		ret = 0;
1737 	}
1738 
1739  out:	put_chip(map, chip, adr);
1740 	spin_unlock(chip->mutex);
1741 	return ret;
1742 }
1743 
1744 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1745 {
1746 	unsigned long ofs, len;
1747 	int ret;
1748 
1749 	ofs = instr->addr;
1750 	len = instr->len;
1751 
1752 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1753 	if (ret)
1754 		return ret;
1755 
1756 	instr->state = MTD_ERASE_DONE;
1757 	mtd_erase_callback(instr);
1758 
1759 	return 0;
1760 }
1761 
1762 static void cfi_intelext_sync (struct mtd_info *mtd)
1763 {
1764 	struct map_info *map = mtd->priv;
1765 	struct cfi_private *cfi = map->fldrv_priv;
1766 	int i;
1767 	struct flchip *chip;
1768 	int ret = 0;
1769 
1770 	for (i=0; !ret && i<cfi->numchips; i++) {
1771 		chip = &cfi->chips[i];
1772 
1773 		spin_lock(chip->mutex);
1774 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
1775 
1776 		if (!ret) {
1777 			chip->oldstate = chip->state;
1778 			chip->state = FL_SYNCING;
1779 			/* No need to wake_up() on this state change -
1780 			 * as the whole point is that nobody can do anything
1781 			 * with the chip now anyway.
1782 			 */
1783 		}
1784 		spin_unlock(chip->mutex);
1785 	}
1786 
1787 	/* Unlock the chips again */
1788 
1789 	for (i--; i >=0; i--) {
1790 		chip = &cfi->chips[i];
1791 
1792 		spin_lock(chip->mutex);
1793 
1794 		if (chip->state == FL_SYNCING) {
1795 			chip->state = chip->oldstate;
1796 			chip->oldstate = FL_READY;
1797 			wake_up(&chip->wq);
1798 		}
1799 		spin_unlock(chip->mutex);
1800 	}
1801 }
1802 
1803 #ifdef DEBUG_LOCK_BITS
1804 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1805 						struct flchip *chip,
1806 						unsigned long adr,
1807 						int len, void *thunk)
1808 {
1809 	struct cfi_private *cfi = map->fldrv_priv;
1810 	int status, ofs_factor = cfi->interleave * cfi->device_type;
1811 
1812 	adr += chip->start;
1813 	xip_disable(map, chip, adr+(2*ofs_factor));
1814 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
1815 	chip->state = FL_JEDEC_QUERY;
1816 	status = cfi_read_query(map, adr+(2*ofs_factor));
1817 	xip_enable(map, chip, 0);
1818 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1819 	       adr, status);
1820 	return 0;
1821 }
1822 #endif
1823 
1824 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
1825 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
1826 
1827 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1828 				       unsigned long adr, int len, void *thunk)
1829 {
1830 	struct cfi_private *cfi = map->fldrv_priv;
1831 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1832 	map_word status, status_OK;
1833 	unsigned long timeo = jiffies + HZ;
1834 	int ret;
1835 
1836 	adr += chip->start;
1837 
1838 	/* Let's determine this according to the interleave only once */
1839 	status_OK = CMD(0x80);
1840 
1841 	spin_lock(chip->mutex);
1842 	ret = get_chip(map, chip, adr, FL_LOCKING);
1843 	if (ret) {
1844 		spin_unlock(chip->mutex);
1845 		return ret;
1846 	}
1847 
1848 	ENABLE_VPP(map);
1849 	xip_disable(map, chip, adr);
1850 
1851 	map_write(map, CMD(0x60), adr);
1852 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1853 		map_write(map, CMD(0x01), adr);
1854 		chip->state = FL_LOCKING;
1855 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1856 		map_write(map, CMD(0xD0), adr);
1857 		chip->state = FL_UNLOCKING;
1858 	} else
1859 		BUG();
1860 
1861 	/*
1862 	 * If Instant Individual Block Locking supported then no need
1863 	 * to delay.
1864 	 */
1865 
1866 	if (!extp || !(extp->FeatureSupport & (1 << 5)))
1867 		UDELAY(map, chip, adr, 1000000/HZ);
1868 
1869 	/* FIXME. Use a timer to check this, and return immediately. */
1870 	/* Once the state machine's known to be working I'll do that */
1871 
1872 	timeo = jiffies + (HZ*20);
1873 	for (;;) {
1874 
1875 		status = map_read(map, adr);
1876 		if (map_word_andequal(map, status, status_OK, status_OK))
1877 			break;
1878 
1879 		/* OK Still waiting */
1880 		if (time_after(jiffies, timeo)) {
1881 			map_word Xstatus;
1882 			map_write(map, CMD(0x70), adr);
1883 			chip->state = FL_STATUS;
1884 			Xstatus = map_read(map, adr);
1885 			xip_enable(map, chip, adr);
1886 			printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1887 			       status.x[0], Xstatus.x[0]);
1888 			put_chip(map, chip, adr);
1889 			spin_unlock(chip->mutex);
1890 			return -EIO;
1891 		}
1892 
1893 		/* Latency issues. Drop the lock, wait a while and retry */
1894 		UDELAY(map, chip, adr, 1);
1895 	}
1896 
1897 	/* Done and happy. */
1898 	chip->state = FL_STATUS;
1899 	xip_enable(map, chip, adr);
1900 	put_chip(map, chip, adr);
1901 	spin_unlock(chip->mutex);
1902 	return 0;
1903 }
1904 
1905 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1906 {
1907 	int ret;
1908 
1909 #ifdef DEBUG_LOCK_BITS
1910 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1911 	       __FUNCTION__, ofs, len);
1912 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1913 		ofs, len, 0);
1914 #endif
1915 
1916 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1917 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1918 
1919 #ifdef DEBUG_LOCK_BITS
1920 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1921 	       __FUNCTION__, ret);
1922 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1923 		ofs, len, 0);
1924 #endif
1925 
1926 	return ret;
1927 }
1928 
1929 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1930 {
1931 	int ret;
1932 
1933 #ifdef DEBUG_LOCK_BITS
1934 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1935 	       __FUNCTION__, ofs, len);
1936 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1937 		ofs, len, 0);
1938 #endif
1939 
1940 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1941 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1942 
1943 #ifdef DEBUG_LOCK_BITS
1944 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1945 	       __FUNCTION__, ret);
1946 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1947 		ofs, len, 0);
1948 #endif
1949 
1950 	return ret;
1951 }
1952 
1953 #ifdef CONFIG_MTD_OTP
1954 
1955 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1956 			u_long data_offset, u_char *buf, u_int size,
1957 			u_long prot_offset, u_int groupno, u_int groupsize);
1958 
1959 static int __xipram
1960 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1961 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1962 {
1963 	struct cfi_private *cfi = map->fldrv_priv;
1964 	int ret;
1965 
1966 	spin_lock(chip->mutex);
1967 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1968 	if (ret) {
1969 		spin_unlock(chip->mutex);
1970 		return ret;
1971 	}
1972 
1973 	/* let's ensure we're not reading back cached data from array mode */
1974 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1975 
1976 	xip_disable(map, chip, chip->start);
1977 	if (chip->state != FL_JEDEC_QUERY) {
1978 		map_write(map, CMD(0x90), chip->start);
1979 		chip->state = FL_JEDEC_QUERY;
1980 	}
1981 	map_copy_from(map, buf, chip->start + offset, size);
1982 	xip_enable(map, chip, chip->start);
1983 
1984 	/* then ensure we don't keep OTP data in the cache */
1985 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1986 
1987 	put_chip(map, chip, chip->start);
1988 	spin_unlock(chip->mutex);
1989 	return 0;
1990 }
1991 
1992 static int
1993 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1994 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1995 {
1996 	int ret;
1997 
1998 	while (size) {
1999 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2000 		int gap = offset - bus_ofs;
2001 		int n = min_t(int, size, map_bankwidth(map)-gap);
2002 		map_word datum = map_word_ff(map);
2003 
2004 		datum = map_word_load_partial(map, datum, buf, gap, n);
2005 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2006 		if (ret)
2007 			return ret;
2008 
2009 		offset += n;
2010 		buf += n;
2011 		size -= n;
2012 	}
2013 
2014 	return 0;
2015 }
2016 
2017 static int
2018 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2019 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2020 {
2021 	struct cfi_private *cfi = map->fldrv_priv;
2022 	map_word datum;
2023 
2024 	/* make sure area matches group boundaries */
2025 	if (size != grpsz)
2026 		return -EXDEV;
2027 
2028 	datum = map_word_ff(map);
2029 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2030 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2031 }
2032 
2033 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2034 				 size_t *retlen, u_char *buf,
2035 				 otp_op_t action, int user_regs)
2036 {
2037 	struct map_info *map = mtd->priv;
2038 	struct cfi_private *cfi = map->fldrv_priv;
2039 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2040 	struct flchip *chip;
2041 	struct cfi_intelext_otpinfo *otp;
2042 	u_long devsize, reg_prot_offset, data_offset;
2043 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2044 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2045 	int ret;
2046 
2047 	*retlen = 0;
2048 
2049 	/* Check that we actually have some OTP registers */
2050 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2051 		return -ENODATA;
2052 
2053 	/* we need real chips here not virtual ones */
2054 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2055 	chip_step = devsize >> cfi->chipshift;
2056 	chip_num = 0;
2057 
2058 	/* Some chips have OTP located in the _top_ partition only.
2059 	   For example: Intel 28F256L18T (T means top-parameter device) */
2060 	if (cfi->mfr == MANUFACTURER_INTEL) {
2061 		switch (cfi->id) {
2062 		case 0x880b:
2063 		case 0x880c:
2064 		case 0x880d:
2065 			chip_num = chip_step - 1;
2066 		}
2067 	}
2068 
2069 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2070 		chip = &cfi->chips[chip_num];
2071 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2072 
2073 		/* first OTP region */
2074 		field = 0;
2075 		reg_prot_offset = extp->ProtRegAddr;
2076 		reg_fact_groups = 1;
2077 		reg_fact_size = 1 << extp->FactProtRegSize;
2078 		reg_user_groups = 1;
2079 		reg_user_size = 1 << extp->UserProtRegSize;
2080 
2081 		while (len > 0) {
2082 			/* flash geometry fixup */
2083 			data_offset = reg_prot_offset + 1;
2084 			data_offset *= cfi->interleave * cfi->device_type;
2085 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2086 			reg_fact_size *= cfi->interleave;
2087 			reg_user_size *= cfi->interleave;
2088 
2089 			if (user_regs) {
2090 				groups = reg_user_groups;
2091 				groupsize = reg_user_size;
2092 				/* skip over factory reg area */
2093 				groupno = reg_fact_groups;
2094 				data_offset += reg_fact_groups * reg_fact_size;
2095 			} else {
2096 				groups = reg_fact_groups;
2097 				groupsize = reg_fact_size;
2098 				groupno = 0;
2099 			}
2100 
2101 			while (len > 0 && groups > 0) {
2102 				if (!action) {
2103 					/*
2104 					 * Special case: if action is NULL
2105 					 * we fill buf with otp_info records.
2106 					 */
2107 					struct otp_info *otpinfo;
2108 					map_word lockword;
2109 					len -= sizeof(struct otp_info);
2110 					if (len <= 0)
2111 						return -ENOSPC;
2112 					ret = do_otp_read(map, chip,
2113 							  reg_prot_offset,
2114 							  (u_char *)&lockword,
2115 							  map_bankwidth(map),
2116 							  0, 0,  0);
2117 					if (ret)
2118 						return ret;
2119 					otpinfo = (struct otp_info *)buf;
2120 					otpinfo->start = from;
2121 					otpinfo->length = groupsize;
2122 					otpinfo->locked =
2123 					   !map_word_bitsset(map, lockword,
2124 							     CMD(1 << groupno));
2125 					from += groupsize;
2126 					buf += sizeof(*otpinfo);
2127 					*retlen += sizeof(*otpinfo);
2128 				} else if (from >= groupsize) {
2129 					from -= groupsize;
2130 					data_offset += groupsize;
2131 				} else {
2132 					int size = groupsize;
2133 					data_offset += from;
2134 					size -= from;
2135 					from = 0;
2136 					if (size > len)
2137 						size = len;
2138 					ret = action(map, chip, data_offset,
2139 						     buf, size, reg_prot_offset,
2140 						     groupno, groupsize);
2141 					if (ret < 0)
2142 						return ret;
2143 					buf += size;
2144 					len -= size;
2145 					*retlen += size;
2146 					data_offset += size;
2147 				}
2148 				groupno++;
2149 				groups--;
2150 			}
2151 
2152 			/* next OTP region */
2153 			if (++field == extp->NumProtectionFields)
2154 				break;
2155 			reg_prot_offset = otp->ProtRegAddr;
2156 			reg_fact_groups = otp->FactGroups;
2157 			reg_fact_size = 1 << otp->FactProtRegSize;
2158 			reg_user_groups = otp->UserGroups;
2159 			reg_user_size = 1 << otp->UserProtRegSize;
2160 			otp++;
2161 		}
2162 	}
2163 
2164 	return 0;
2165 }
2166 
2167 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2168 					   size_t len, size_t *retlen,
2169 					    u_char *buf)
2170 {
2171 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2172 				     buf, do_otp_read, 0);
2173 }
2174 
2175 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2176 					   size_t len, size_t *retlen,
2177 					    u_char *buf)
2178 {
2179 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2180 				     buf, do_otp_read, 1);
2181 }
2182 
2183 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2184 					    size_t len, size_t *retlen,
2185 					     u_char *buf)
2186 {
2187 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2188 				     buf, do_otp_write, 1);
2189 }
2190 
2191 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2192 					   loff_t from, size_t len)
2193 {
2194 	size_t retlen;
2195 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2196 				     NULL, do_otp_lock, 1);
2197 }
2198 
2199 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2200 					   struct otp_info *buf, size_t len)
2201 {
2202 	size_t retlen;
2203 	int ret;
2204 
2205 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2206 	return ret ? : retlen;
2207 }
2208 
2209 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2210 					   struct otp_info *buf, size_t len)
2211 {
2212 	size_t retlen;
2213 	int ret;
2214 
2215 	ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2216 	return ret ? : retlen;
2217 }
2218 
2219 #endif
2220 
2221 static int cfi_intelext_suspend(struct mtd_info *mtd)
2222 {
2223 	struct map_info *map = mtd->priv;
2224 	struct cfi_private *cfi = map->fldrv_priv;
2225 	int i;
2226 	struct flchip *chip;
2227 	int ret = 0;
2228 
2229 	for (i=0; !ret && i<cfi->numchips; i++) {
2230 		chip = &cfi->chips[i];
2231 
2232 		spin_lock(chip->mutex);
2233 
2234 		switch (chip->state) {
2235 		case FL_READY:
2236 		case FL_STATUS:
2237 		case FL_CFI_QUERY:
2238 		case FL_JEDEC_QUERY:
2239 			if (chip->oldstate == FL_READY) {
2240 				chip->oldstate = chip->state;
2241 				chip->state = FL_PM_SUSPENDED;
2242 				/* No need to wake_up() on this state change -
2243 				 * as the whole point is that nobody can do anything
2244 				 * with the chip now anyway.
2245 				 */
2246 			} else {
2247 				/* There seems to be an operation pending. We must wait for it. */
2248 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2249 				ret = -EAGAIN;
2250 			}
2251 			break;
2252 		default:
2253 			/* Should we actually wait? Once upon a time these routines weren't
2254 			   allowed to. Or should we return -EAGAIN, because the upper layers
2255 			   ought to have already shut down anything which was using the device
2256 			   anyway? The latter for now. */
2257 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2258 			ret = -EAGAIN;
2259 		case FL_PM_SUSPENDED:
2260 			break;
2261 		}
2262 		spin_unlock(chip->mutex);
2263 	}
2264 
2265 	/* Unlock the chips again */
2266 
2267 	if (ret) {
2268 		for (i--; i >=0; i--) {
2269 			chip = &cfi->chips[i];
2270 
2271 			spin_lock(chip->mutex);
2272 
2273 			if (chip->state == FL_PM_SUSPENDED) {
2274 				/* No need to force it into a known state here,
2275 				   because we're returning failure, and it didn't
2276 				   get power cycled */
2277 				chip->state = chip->oldstate;
2278 				chip->oldstate = FL_READY;
2279 				wake_up(&chip->wq);
2280 			}
2281 			spin_unlock(chip->mutex);
2282 		}
2283 	}
2284 
2285 	return ret;
2286 }
2287 
2288 static void cfi_intelext_resume(struct mtd_info *mtd)
2289 {
2290 	struct map_info *map = mtd->priv;
2291 	struct cfi_private *cfi = map->fldrv_priv;
2292 	int i;
2293 	struct flchip *chip;
2294 
2295 	for (i=0; i<cfi->numchips; i++) {
2296 
2297 		chip = &cfi->chips[i];
2298 
2299 		spin_lock(chip->mutex);
2300 
2301 		/* Go to known state. Chip may have been power cycled */
2302 		if (chip->state == FL_PM_SUSPENDED) {
2303 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2304 			chip->oldstate = chip->state = FL_READY;
2305 			wake_up(&chip->wq);
2306 		}
2307 
2308 		spin_unlock(chip->mutex);
2309 	}
2310 }
2311 
2312 static int cfi_intelext_reset(struct mtd_info *mtd)
2313 {
2314 	struct map_info *map = mtd->priv;
2315 	struct cfi_private *cfi = map->fldrv_priv;
2316 	int i, ret;
2317 
2318 	for (i=0; i < cfi->numchips; i++) {
2319 		struct flchip *chip = &cfi->chips[i];
2320 
2321 		/* force the completion of any ongoing operation
2322 		   and switch to array mode so any bootloader in
2323 		   flash is accessible for soft reboot. */
2324 		spin_lock(chip->mutex);
2325 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2326 		if (!ret) {
2327 			map_write(map, CMD(0xff), chip->start);
2328 			chip->state = FL_READY;
2329 		}
2330 		spin_unlock(chip->mutex);
2331 	}
2332 
2333 	return 0;
2334 }
2335 
2336 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2337 			       void *v)
2338 {
2339 	struct mtd_info *mtd;
2340 
2341 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2342 	cfi_intelext_reset(mtd);
2343 	return NOTIFY_DONE;
2344 }
2345 
2346 static void cfi_intelext_destroy(struct mtd_info *mtd)
2347 {
2348 	struct map_info *map = mtd->priv;
2349 	struct cfi_private *cfi = map->fldrv_priv;
2350 	cfi_intelext_reset(mtd);
2351 	unregister_reboot_notifier(&mtd->reboot_notifier);
2352 	kfree(cfi->cmdset_priv);
2353 	kfree(cfi->cfiq);
2354 	kfree(cfi->chips[0].priv);
2355 	kfree(cfi);
2356 	kfree(mtd->eraseregions);
2357 }
2358 
2359 static char im_name_1[]="cfi_cmdset_0001";
2360 static char im_name_3[]="cfi_cmdset_0003";
2361 
2362 static int __init cfi_intelext_init(void)
2363 {
2364 	inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2365 	inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2366 	return 0;
2367 }
2368 
2369 static void __exit cfi_intelext_exit(void)
2370 {
2371 	inter_module_unregister(im_name_1);
2372 	inter_module_unregister(im_name_3);
2373 }
2374 
2375 module_init(cfi_intelext_init);
2376 module_exit(cfi_intelext_exit);
2377 
2378 MODULE_LICENSE("GPL");
2379 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2380 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2381