xref: /linux/drivers/mtd/chips/cfi_cmdset_0002.c (revision 6feb348783767e3f38d7612e6551ee8b580ac4e9)
1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17  *
18  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19  *
20  * This code is GPL
21  */
22 
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
30 
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
40 
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
43 
44 #define MAX_WORD_RETRIES 3
45 
46 #define MANUFACTURER_AMD	0x0001
47 #define MANUFACTURER_ATMEL	0x001F
48 #define MANUFACTURER_MACRONIX	0x00C2
49 #define MANUFACTURER_SST	0x00BF
50 #define SST49LF004B	        0x0060
51 #define SST49LF040B	        0x0050
52 #define SST49LF008A		0x005a
53 #define AT49BV6416		0x00d6
54 
55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_amdstd_sync (struct mtd_info *);
61 static int cfi_amdstd_suspend (struct mtd_info *);
62 static void cfi_amdstd_resume (struct mtd_info *);
63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 
65 static void cfi_amdstd_destroy(struct mtd_info *);
66 
67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
69 
70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72 #include "fwh_lock.h"
73 
74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
76 
77 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 	.probe		= NULL, /* Not usable directly */
79 	.destroy	= cfi_amdstd_destroy,
80 	.name		= "cfi_cmdset_0002",
81 	.module		= THIS_MODULE
82 };
83 
84 
85 /* #define DEBUG_CFI_FEATURES */
86 
87 
88 #ifdef DEBUG_CFI_FEATURES
89 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
90 {
91 	const char* erase_suspend[3] = {
92 		"Not supported", "Read only", "Read/write"
93 	};
94 	const char* top_bottom[6] = {
95 		"No WP", "8x8KiB sectors at top & bottom, no WP",
96 		"Bottom boot", "Top boot",
97 		"Uniform, Bottom WP", "Uniform, Top WP"
98 	};
99 
100 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
101 	printk("  Address sensitive unlock: %s\n",
102 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
103 
104 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
106 	else
107 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
108 
109 	if (extp->BlkProt == 0)
110 		printk("  Block protection: Not supported\n");
111 	else
112 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
113 
114 
115 	printk("  Temporary block unprotect: %s\n",
116 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 	printk("  Burst mode: %s\n",
120 	       extp->BurstMode ? "Supported" : "Not supported");
121 	if (extp->PageMode == 0)
122 		printk("  Page mode: Not supported\n");
123 	else
124 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
125 
126 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127 	       extp->VppMin >> 4, extp->VppMin & 0xf);
128 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129 	       extp->VppMax >> 4, extp->VppMax & 0xf);
130 
131 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
133 	else
134 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135 }
136 #endif
137 
138 #ifdef AMD_BOOTLOC_BUG
139 /* Wheee. Bring me the head of someone at AMD. */
140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
141 {
142 	struct map_info *map = mtd->priv;
143 	struct cfi_private *cfi = map->fldrv_priv;
144 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 	__u8 major = extp->MajorVersion;
146 	__u8 minor = extp->MinorVersion;
147 
148 	if (((major << 8) | minor) < 0x3131) {
149 		/* CFI version 1.0 => don't trust bootloc */
150 
151 		DEBUG(MTD_DEBUG_LEVEL1,
152 			"%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
153 			map->name, cfi->mfr, cfi->id);
154 
155 		/* AFAICS all 29LV400 with a bottom boot block have a device ID
156 		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
157 		 * These were badly detected as they have the 0x80 bit set
158 		 * so treat them as a special case.
159 		 */
160 		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
161 
162 			/* Macronix added CFI to their 2nd generation
163 			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
164 			 * Fujitsu, Spansion, EON, ESI and older Macronix)
165 			 * has CFI.
166 			 *
167 			 * Therefore also check the manufacturer.
168 			 * This reduces the risk of false detection due to
169 			 * the 8-bit device ID.
170 			 */
171 			(cfi->mfr == MANUFACTURER_MACRONIX)) {
172 			DEBUG(MTD_DEBUG_LEVEL1,
173 				"%s: Macronix MX29LV400C with bottom boot block"
174 				" detected\n", map->name);
175 			extp->TopBottom = 2;	/* bottom boot */
176 		} else
177 		if (cfi->id & 0x80) {
178 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
179 			extp->TopBottom = 3;	/* top boot */
180 		} else {
181 			extp->TopBottom = 2;	/* bottom boot */
182 		}
183 
184 		DEBUG(MTD_DEBUG_LEVEL1,
185 			"%s: AMD CFI PRI V%c.%c has no boot block field;"
186 			" deduced %s from Device ID\n", map->name, major, minor,
187 			extp->TopBottom == 2 ? "bottom" : "top");
188 	}
189 }
190 #endif
191 
192 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
193 {
194 	struct map_info *map = mtd->priv;
195 	struct cfi_private *cfi = map->fldrv_priv;
196 	if (cfi->cfiq->BufWriteTimeoutTyp) {
197 		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
198 		mtd->write = cfi_amdstd_write_buffers;
199 	}
200 }
201 
202 /* Atmel chips don't use the same PRI format as AMD chips */
203 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
204 {
205 	struct map_info *map = mtd->priv;
206 	struct cfi_private *cfi = map->fldrv_priv;
207 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
208 	struct cfi_pri_atmel atmel_pri;
209 
210 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
211 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
212 
213 	if (atmel_pri.Features & 0x02)
214 		extp->EraseSuspend = 2;
215 
216 	/* Some chips got it backwards... */
217 	if (cfi->id == AT49BV6416) {
218 		if (atmel_pri.BottomBoot)
219 			extp->TopBottom = 3;
220 		else
221 			extp->TopBottom = 2;
222 	} else {
223 		if (atmel_pri.BottomBoot)
224 			extp->TopBottom = 2;
225 		else
226 			extp->TopBottom = 3;
227 	}
228 
229 	/* burst write mode not supported */
230 	cfi->cfiq->BufWriteTimeoutTyp = 0;
231 	cfi->cfiq->BufWriteTimeoutMax = 0;
232 }
233 
234 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
235 {
236 	/* Setup for chips with a secsi area */
237 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
238 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
239 }
240 
241 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
242 {
243 	struct map_info *map = mtd->priv;
244 	struct cfi_private *cfi = map->fldrv_priv;
245 	if ((cfi->cfiq->NumEraseRegions == 1) &&
246 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
247 		mtd->erase = cfi_amdstd_erase_chip;
248 	}
249 
250 }
251 
252 /*
253  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
254  * locked by default.
255  */
256 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
257 {
258 	mtd->lock = cfi_atmel_lock;
259 	mtd->unlock = cfi_atmel_unlock;
260 	mtd->flags |= MTD_POWERUP_LOCK;
261 }
262 
263 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
264 {
265 	struct map_info *map = mtd->priv;
266 	struct cfi_private *cfi = map->fldrv_priv;
267 
268 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
269 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
270 		pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
271 	}
272 }
273 
274 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
275 {
276 	struct map_info *map = mtd->priv;
277 	struct cfi_private *cfi = map->fldrv_priv;
278 
279 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
280 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
281 		pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
282 	}
283 }
284 
285 static struct cfi_fixup cfi_fixup_table[] = {
286 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
287 #ifdef AMD_BOOTLOC_BUG
288 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
289 	{ MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
290 #endif
291 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
292 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
293 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
294 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
295 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
296 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
297 	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
298 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
299 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
300 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
301 #if !FORCE_WORD_WRITE
302 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
303 #endif
304 	{ 0, 0, NULL, NULL }
305 };
306 static struct cfi_fixup jedec_fixup_table[] = {
307 	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
308 	{ MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
309 	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
310 	{ 0, 0, NULL, NULL }
311 };
312 
313 static struct cfi_fixup fixup_table[] = {
314 	/* The CFI vendor ids and the JEDEC vendor IDs appear
315 	 * to be common.  It is like the devices id's are as
316 	 * well.  This table is to pick all cases where
317 	 * we know that is the case.
318 	 */
319 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
320 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
321 	{ 0, 0, NULL, NULL }
322 };
323 
324 
325 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
326 {
327 	struct cfi_private *cfi = map->fldrv_priv;
328 	struct mtd_info *mtd;
329 	int i;
330 
331 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
332 	if (!mtd) {
333 		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
334 		return NULL;
335 	}
336 	mtd->priv = map;
337 	mtd->type = MTD_NORFLASH;
338 
339 	/* Fill in the default mtd operations */
340 	mtd->erase   = cfi_amdstd_erase_varsize;
341 	mtd->write   = cfi_amdstd_write_words;
342 	mtd->read    = cfi_amdstd_read;
343 	mtd->sync    = cfi_amdstd_sync;
344 	mtd->suspend = cfi_amdstd_suspend;
345 	mtd->resume  = cfi_amdstd_resume;
346 	mtd->flags   = MTD_CAP_NORFLASH;
347 	mtd->name    = map->name;
348 	mtd->writesize = 1;
349 
350 	if (cfi->cfi_mode==CFI_MODE_CFI){
351 		unsigned char bootloc;
352 		/*
353 		 * It's a real CFI chip, not one for which the probe
354 		 * routine faked a CFI structure. So we read the feature
355 		 * table from it.
356 		 */
357 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
358 		struct cfi_pri_amdstd *extp;
359 
360 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
361 		if (!extp) {
362 			kfree(mtd);
363 			return NULL;
364 		}
365 
366 		if (extp->MajorVersion != '1' ||
367 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
368 			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
369 			       "version %c.%c.\n",  extp->MajorVersion,
370 			       extp->MinorVersion);
371 			kfree(extp);
372 			kfree(mtd);
373 			return NULL;
374 		}
375 
376 		/* Install our own private info structure */
377 		cfi->cmdset_priv = extp;
378 
379 		/* Apply cfi device specific fixups */
380 		cfi_fixup(mtd, cfi_fixup_table);
381 
382 #ifdef DEBUG_CFI_FEATURES
383 		/* Tell the user about it in lots of lovely detail */
384 		cfi_tell_features(extp);
385 #endif
386 
387 		bootloc = extp->TopBottom;
388 		if ((bootloc != 2) && (bootloc != 3)) {
389 			printk(KERN_WARNING "%s: CFI does not contain boot "
390 			       "bank location. Assuming top.\n", map->name);
391 			bootloc = 2;
392 		}
393 
394 		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
395 			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
396 
397 			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
398 				int j = (cfi->cfiq->NumEraseRegions-1)-i;
399 				__u32 swap;
400 
401 				swap = cfi->cfiq->EraseRegionInfo[i];
402 				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
403 				cfi->cfiq->EraseRegionInfo[j] = swap;
404 			}
405 		}
406 		/* Set the default CFI lock/unlock addresses */
407 		cfi->addr_unlock1 = 0x555;
408 		cfi->addr_unlock2 = 0x2aa;
409 
410 	} /* CFI mode */
411 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
412 		/* Apply jedec specific fixups */
413 		cfi_fixup(mtd, jedec_fixup_table);
414 	}
415 	/* Apply generic fixups */
416 	cfi_fixup(mtd, fixup_table);
417 
418 	for (i=0; i< cfi->numchips; i++) {
419 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
420 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
421 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
422 		cfi->chips[i].ref_point_counter = 0;
423 		init_waitqueue_head(&(cfi->chips[i].wq));
424 	}
425 
426 	map->fldrv = &cfi_amdstd_chipdrv;
427 
428 	return cfi_amdstd_setup(mtd);
429 }
430 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
431 
432 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
433 {
434 	struct map_info *map = mtd->priv;
435 	struct cfi_private *cfi = map->fldrv_priv;
436 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
437 	unsigned long offset = 0;
438 	int i,j;
439 
440 	printk(KERN_NOTICE "number of %s chips: %d\n",
441 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
442 	/* Select the correct geometry setup */
443 	mtd->size = devsize * cfi->numchips;
444 
445 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
446 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
447 				    * mtd->numeraseregions, GFP_KERNEL);
448 	if (!mtd->eraseregions) {
449 		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
450 		goto setup_err;
451 	}
452 
453 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
454 		unsigned long ernum, ersize;
455 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
456 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
457 
458 		if (mtd->erasesize < ersize) {
459 			mtd->erasesize = ersize;
460 		}
461 		for (j=0; j<cfi->numchips; j++) {
462 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
463 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
464 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
465 		}
466 		offset += (ersize * ernum);
467 	}
468 	if (offset != devsize) {
469 		/* Argh */
470 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
471 		goto setup_err;
472 	}
473 #if 0
474 	// debug
475 	for (i=0; i<mtd->numeraseregions;i++){
476 		printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
477 		       i,mtd->eraseregions[i].offset,
478 		       mtd->eraseregions[i].erasesize,
479 		       mtd->eraseregions[i].numblocks);
480 	}
481 #endif
482 
483 	/* FIXME: erase-suspend-program is broken.  See
484 	   http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
485 	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
486 
487 	__module_get(THIS_MODULE);
488 	return mtd;
489 
490  setup_err:
491 	if(mtd) {
492 		kfree(mtd->eraseregions);
493 		kfree(mtd);
494 	}
495 	kfree(cfi->cmdset_priv);
496 	kfree(cfi->cfiq);
497 	return NULL;
498 }
499 
500 /*
501  * Return true if the chip is ready.
502  *
503  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
504  * non-suspended sector) and is indicated by no toggle bits toggling.
505  *
506  * Note that anything more complicated than checking if no bits are toggling
507  * (including checking DQ5 for an error status) is tricky to get working
508  * correctly and is therefore not done	(particulary with interleaved chips
509  * as each chip must be checked independantly of the others).
510  */
511 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
512 {
513 	map_word d, t;
514 
515 	d = map_read(map, addr);
516 	t = map_read(map, addr);
517 
518 	return map_word_equal(map, d, t);
519 }
520 
521 /*
522  * Return true if the chip is ready and has the correct value.
523  *
524  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
525  * non-suspended sector) and it is indicated by no bits toggling.
526  *
527  * Error are indicated by toggling bits or bits held with the wrong value,
528  * or with bits toggling.
529  *
530  * Note that anything more complicated than checking if no bits are toggling
531  * (including checking DQ5 for an error status) is tricky to get working
532  * correctly and is therefore not done	(particulary with interleaved chips
533  * as each chip must be checked independantly of the others).
534  *
535  */
536 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
537 {
538 	map_word oldd, curd;
539 
540 	oldd = map_read(map, addr);
541 	curd = map_read(map, addr);
542 
543 	return	map_word_equal(map, oldd, curd) &&
544 		map_word_equal(map, curd, expected);
545 }
546 
547 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
548 {
549 	DECLARE_WAITQUEUE(wait, current);
550 	struct cfi_private *cfi = map->fldrv_priv;
551 	unsigned long timeo;
552 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
553 
554  resettime:
555 	timeo = jiffies + HZ;
556  retry:
557 	switch (chip->state) {
558 
559 	case FL_STATUS:
560 		for (;;) {
561 			if (chip_ready(map, adr))
562 				break;
563 
564 			if (time_after(jiffies, timeo)) {
565 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
566 				spin_unlock(chip->mutex);
567 				return -EIO;
568 			}
569 			spin_unlock(chip->mutex);
570 			cfi_udelay(1);
571 			spin_lock(chip->mutex);
572 			/* Someone else might have been playing with it. */
573 			goto retry;
574 		}
575 
576 	case FL_READY:
577 	case FL_CFI_QUERY:
578 	case FL_JEDEC_QUERY:
579 		return 0;
580 
581 	case FL_ERASING:
582 		if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
583 			goto sleep;
584 
585 		if (!(   mode == FL_READY
586 		      || mode == FL_POINT
587 		      || !cfip
588 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
589 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
590 		    )))
591 			goto sleep;
592 
593 		/* We could check to see if we're trying to access the sector
594 		 * that is currently being erased. However, no user will try
595 		 * anything like that so we just wait for the timeout. */
596 
597 		/* Erase suspend */
598 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
599 		 * commands when the erase algorithm isn't in progress. */
600 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
601 		chip->oldstate = FL_ERASING;
602 		chip->state = FL_ERASE_SUSPENDING;
603 		chip->erase_suspended = 1;
604 		for (;;) {
605 			if (chip_ready(map, adr))
606 				break;
607 
608 			if (time_after(jiffies, timeo)) {
609 				/* Should have suspended the erase by now.
610 				 * Send an Erase-Resume command as either
611 				 * there was an error (so leave the erase
612 				 * routine to recover from it) or we trying to
613 				 * use the erase-in-progress sector. */
614 				map_write(map, CMD(0x30), chip->in_progress_block_addr);
615 				chip->state = FL_ERASING;
616 				chip->oldstate = FL_READY;
617 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
618 				return -EIO;
619 			}
620 
621 			spin_unlock(chip->mutex);
622 			cfi_udelay(1);
623 			spin_lock(chip->mutex);
624 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
625 			   So we can just loop here. */
626 		}
627 		chip->state = FL_READY;
628 		return 0;
629 
630 	case FL_XIP_WHILE_ERASING:
631 		if (mode != FL_READY && mode != FL_POINT &&
632 		    (!cfip || !(cfip->EraseSuspend&2)))
633 			goto sleep;
634 		chip->oldstate = chip->state;
635 		chip->state = FL_READY;
636 		return 0;
637 
638 	case FL_POINT:
639 		/* Only if there's no operation suspended... */
640 		if (mode == FL_READY && chip->oldstate == FL_READY)
641 			return 0;
642 
643 	default:
644 	sleep:
645 		set_current_state(TASK_UNINTERRUPTIBLE);
646 		add_wait_queue(&chip->wq, &wait);
647 		spin_unlock(chip->mutex);
648 		schedule();
649 		remove_wait_queue(&chip->wq, &wait);
650 		spin_lock(chip->mutex);
651 		goto resettime;
652 	}
653 }
654 
655 
656 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
657 {
658 	struct cfi_private *cfi = map->fldrv_priv;
659 
660 	switch(chip->oldstate) {
661 	case FL_ERASING:
662 		chip->state = chip->oldstate;
663 		map_write(map, CMD(0x30), chip->in_progress_block_addr);
664 		chip->oldstate = FL_READY;
665 		chip->state = FL_ERASING;
666 		break;
667 
668 	case FL_XIP_WHILE_ERASING:
669 		chip->state = chip->oldstate;
670 		chip->oldstate = FL_READY;
671 		break;
672 
673 	case FL_READY:
674 	case FL_STATUS:
675 		/* We should really make set_vpp() count, rather than doing this */
676 		DISABLE_VPP(map);
677 		break;
678 	default:
679 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
680 	}
681 	wake_up(&chip->wq);
682 }
683 
684 #ifdef CONFIG_MTD_XIP
685 
686 /*
687  * No interrupt what so ever can be serviced while the flash isn't in array
688  * mode.  This is ensured by the xip_disable() and xip_enable() functions
689  * enclosing any code path where the flash is known not to be in array mode.
690  * And within a XIP disabled code path, only functions marked with __xipram
691  * may be called and nothing else (it's a good thing to inspect generated
692  * assembly to make sure inline functions were actually inlined and that gcc
693  * didn't emit calls to its own support functions). Also configuring MTD CFI
694  * support to a single buswidth and a single interleave is also recommended.
695  */
696 
697 static void xip_disable(struct map_info *map, struct flchip *chip,
698 			unsigned long adr)
699 {
700 	/* TODO: chips with no XIP use should ignore and return */
701 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
702 	local_irq_disable();
703 }
704 
705 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
706 				unsigned long adr)
707 {
708 	struct cfi_private *cfi = map->fldrv_priv;
709 
710 	if (chip->state != FL_POINT && chip->state != FL_READY) {
711 		map_write(map, CMD(0xf0), adr);
712 		chip->state = FL_READY;
713 	}
714 	(void) map_read(map, adr);
715 	xip_iprefetch();
716 	local_irq_enable();
717 }
718 
719 /*
720  * When a delay is required for the flash operation to complete, the
721  * xip_udelay() function is polling for both the given timeout and pending
722  * (but still masked) hardware interrupts.  Whenever there is an interrupt
723  * pending then the flash erase operation is suspended, array mode restored
724  * and interrupts unmasked.  Task scheduling might also happen at that
725  * point.  The CPU eventually returns from the interrupt or the call to
726  * schedule() and the suspended flash operation is resumed for the remaining
727  * of the delay period.
728  *
729  * Warning: this function _will_ fool interrupt latency tracing tools.
730  */
731 
732 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
733 				unsigned long adr, int usec)
734 {
735 	struct cfi_private *cfi = map->fldrv_priv;
736 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
737 	map_word status, OK = CMD(0x80);
738 	unsigned long suspended, start = xip_currtime();
739 	flstate_t oldstate;
740 
741 	do {
742 		cpu_relax();
743 		if (xip_irqpending() && extp &&
744 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
745 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
746 			/*
747 			 * Let's suspend the erase operation when supported.
748 			 * Note that we currently don't try to suspend
749 			 * interleaved chips if there is already another
750 			 * operation suspended (imagine what happens
751 			 * when one chip was already done with the current
752 			 * operation while another chip suspended it, then
753 			 * we resume the whole thing at once).  Yes, it
754 			 * can happen!
755 			 */
756 			map_write(map, CMD(0xb0), adr);
757 			usec -= xip_elapsed_since(start);
758 			suspended = xip_currtime();
759 			do {
760 				if (xip_elapsed_since(suspended) > 100000) {
761 					/*
762 					 * The chip doesn't want to suspend
763 					 * after waiting for 100 msecs.
764 					 * This is a critical error but there
765 					 * is not much we can do here.
766 					 */
767 					return;
768 				}
769 				status = map_read(map, adr);
770 			} while (!map_word_andequal(map, status, OK, OK));
771 
772 			/* Suspend succeeded */
773 			oldstate = chip->state;
774 			if (!map_word_bitsset(map, status, CMD(0x40)))
775 				break;
776 			chip->state = FL_XIP_WHILE_ERASING;
777 			chip->erase_suspended = 1;
778 			map_write(map, CMD(0xf0), adr);
779 			(void) map_read(map, adr);
780 			xip_iprefetch();
781 			local_irq_enable();
782 			spin_unlock(chip->mutex);
783 			xip_iprefetch();
784 			cond_resched();
785 
786 			/*
787 			 * We're back.  However someone else might have
788 			 * decided to go write to the chip if we are in
789 			 * a suspended erase state.  If so let's wait
790 			 * until it's done.
791 			 */
792 			spin_lock(chip->mutex);
793 			while (chip->state != FL_XIP_WHILE_ERASING) {
794 				DECLARE_WAITQUEUE(wait, current);
795 				set_current_state(TASK_UNINTERRUPTIBLE);
796 				add_wait_queue(&chip->wq, &wait);
797 				spin_unlock(chip->mutex);
798 				schedule();
799 				remove_wait_queue(&chip->wq, &wait);
800 				spin_lock(chip->mutex);
801 			}
802 			/* Disallow XIP again */
803 			local_irq_disable();
804 
805 			/* Resume the write or erase operation */
806 			map_write(map, CMD(0x30), adr);
807 			chip->state = oldstate;
808 			start = xip_currtime();
809 		} else if (usec >= 1000000/HZ) {
810 			/*
811 			 * Try to save on CPU power when waiting delay
812 			 * is at least a system timer tick period.
813 			 * No need to be extremely accurate here.
814 			 */
815 			xip_cpu_idle();
816 		}
817 		status = map_read(map, adr);
818 	} while (!map_word_andequal(map, status, OK, OK)
819 		 && xip_elapsed_since(start) < usec);
820 }
821 
822 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
823 
824 /*
825  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
826  * the flash is actively programming or erasing since we have to poll for
827  * the operation to complete anyway.  We can't do that in a generic way with
828  * a XIP setup so do it before the actual flash operation in this case
829  * and stub it out from INVALIDATE_CACHE_UDELAY.
830  */
831 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
832 	INVALIDATE_CACHED_RANGE(map, from, size)
833 
834 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
835 	UDELAY(map, chip, adr, usec)
836 
837 /*
838  * Extra notes:
839  *
840  * Activating this XIP support changes the way the code works a bit.  For
841  * example the code to suspend the current process when concurrent access
842  * happens is never executed because xip_udelay() will always return with the
843  * same chip state as it was entered with.  This is why there is no care for
844  * the presence of add_wait_queue() or schedule() calls from within a couple
845  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
846  * The queueing and scheduling are always happening within xip_udelay().
847  *
848  * Similarly, get_chip() and put_chip() just happen to always be executed
849  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
850  * is in array mode, therefore never executing many cases therein and not
851  * causing any problem with XIP.
852  */
853 
854 #else
855 
856 #define xip_disable(map, chip, adr)
857 #define xip_enable(map, chip, adr)
858 #define XIP_INVAL_CACHED_RANGE(x...)
859 
860 #define UDELAY(map, chip, adr, usec)  \
861 do {  \
862 	spin_unlock(chip->mutex);  \
863 	cfi_udelay(usec);  \
864 	spin_lock(chip->mutex);  \
865 } while (0)
866 
867 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
868 do {  \
869 	spin_unlock(chip->mutex);  \
870 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
871 	cfi_udelay(usec);  \
872 	spin_lock(chip->mutex);  \
873 } while (0)
874 
875 #endif
876 
877 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
878 {
879 	unsigned long cmd_addr;
880 	struct cfi_private *cfi = map->fldrv_priv;
881 	int ret;
882 
883 	adr += chip->start;
884 
885 	/* Ensure cmd read/writes are aligned. */
886 	cmd_addr = adr & ~(map_bankwidth(map)-1);
887 
888 	spin_lock(chip->mutex);
889 	ret = get_chip(map, chip, cmd_addr, FL_READY);
890 	if (ret) {
891 		spin_unlock(chip->mutex);
892 		return ret;
893 	}
894 
895 	if (chip->state != FL_POINT && chip->state != FL_READY) {
896 		map_write(map, CMD(0xf0), cmd_addr);
897 		chip->state = FL_READY;
898 	}
899 
900 	map_copy_from(map, buf, adr, len);
901 
902 	put_chip(map, chip, cmd_addr);
903 
904 	spin_unlock(chip->mutex);
905 	return 0;
906 }
907 
908 
909 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
910 {
911 	struct map_info *map = mtd->priv;
912 	struct cfi_private *cfi = map->fldrv_priv;
913 	unsigned long ofs;
914 	int chipnum;
915 	int ret = 0;
916 
917 	/* ofs: offset within the first chip that the first read should start */
918 
919 	chipnum = (from >> cfi->chipshift);
920 	ofs = from - (chipnum <<  cfi->chipshift);
921 
922 
923 	*retlen = 0;
924 
925 	while (len) {
926 		unsigned long thislen;
927 
928 		if (chipnum >= cfi->numchips)
929 			break;
930 
931 		if ((len + ofs -1) >> cfi->chipshift)
932 			thislen = (1<<cfi->chipshift) - ofs;
933 		else
934 			thislen = len;
935 
936 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
937 		if (ret)
938 			break;
939 
940 		*retlen += thislen;
941 		len -= thislen;
942 		buf += thislen;
943 
944 		ofs = 0;
945 		chipnum++;
946 	}
947 	return ret;
948 }
949 
950 
951 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
952 {
953 	DECLARE_WAITQUEUE(wait, current);
954 	unsigned long timeo = jiffies + HZ;
955 	struct cfi_private *cfi = map->fldrv_priv;
956 
957  retry:
958 	spin_lock(chip->mutex);
959 
960 	if (chip->state != FL_READY){
961 #if 0
962 		printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
963 #endif
964 		set_current_state(TASK_UNINTERRUPTIBLE);
965 		add_wait_queue(&chip->wq, &wait);
966 
967 		spin_unlock(chip->mutex);
968 
969 		schedule();
970 		remove_wait_queue(&chip->wq, &wait);
971 #if 0
972 		if(signal_pending(current))
973 			return -EINTR;
974 #endif
975 		timeo = jiffies + HZ;
976 
977 		goto retry;
978 	}
979 
980 	adr += chip->start;
981 
982 	chip->state = FL_READY;
983 
984 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
985 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
986 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
987 
988 	map_copy_from(map, buf, adr, len);
989 
990 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
991 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
992 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
993 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
994 
995 	wake_up(&chip->wq);
996 	spin_unlock(chip->mutex);
997 
998 	return 0;
999 }
1000 
1001 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1002 {
1003 	struct map_info *map = mtd->priv;
1004 	struct cfi_private *cfi = map->fldrv_priv;
1005 	unsigned long ofs;
1006 	int chipnum;
1007 	int ret = 0;
1008 
1009 
1010 	/* ofs: offset within the first chip that the first read should start */
1011 
1012 	/* 8 secsi bytes per chip */
1013 	chipnum=from>>3;
1014 	ofs=from & 7;
1015 
1016 
1017 	*retlen = 0;
1018 
1019 	while (len) {
1020 		unsigned long thislen;
1021 
1022 		if (chipnum >= cfi->numchips)
1023 			break;
1024 
1025 		if ((len + ofs -1) >> 3)
1026 			thislen = (1<<3) - ofs;
1027 		else
1028 			thislen = len;
1029 
1030 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1031 		if (ret)
1032 			break;
1033 
1034 		*retlen += thislen;
1035 		len -= thislen;
1036 		buf += thislen;
1037 
1038 		ofs = 0;
1039 		chipnum++;
1040 	}
1041 	return ret;
1042 }
1043 
1044 
1045 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1046 {
1047 	struct cfi_private *cfi = map->fldrv_priv;
1048 	unsigned long timeo = jiffies + HZ;
1049 	/*
1050 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1051 	 * have a max write time of a few hundreds usec). However, we should
1052 	 * use the maximum timeout value given by the chip at probe time
1053 	 * instead.  Unfortunately, struct flchip does have a field for
1054 	 * maximum timeout, only for typical which can be far too short
1055 	 * depending of the conditions.	 The ' + 1' is to avoid having a
1056 	 * timeout of 0 jiffies if HZ is smaller than 1000.
1057 	 */
1058 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1059 	int ret = 0;
1060 	map_word oldd;
1061 	int retry_cnt = 0;
1062 
1063 	adr += chip->start;
1064 
1065 	spin_lock(chip->mutex);
1066 	ret = get_chip(map, chip, adr, FL_WRITING);
1067 	if (ret) {
1068 		spin_unlock(chip->mutex);
1069 		return ret;
1070 	}
1071 
1072 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1073 	       __func__, adr, datum.x[0] );
1074 
1075 	/*
1076 	 * Check for a NOP for the case when the datum to write is already
1077 	 * present - it saves time and works around buggy chips that corrupt
1078 	 * data at other locations when 0xff is written to a location that
1079 	 * already contains 0xff.
1080 	 */
1081 	oldd = map_read(map, adr);
1082 	if (map_word_equal(map, oldd, datum)) {
1083 		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1084 		       __func__);
1085 		goto op_done;
1086 	}
1087 
1088 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1089 	ENABLE_VPP(map);
1090 	xip_disable(map, chip, adr);
1091  retry:
1092 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1093 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1094 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1095 	map_write(map, datum, adr);
1096 	chip->state = FL_WRITING;
1097 
1098 	INVALIDATE_CACHE_UDELAY(map, chip,
1099 				adr, map_bankwidth(map),
1100 				chip->word_write_time);
1101 
1102 	/* See comment above for timeout value. */
1103 	timeo = jiffies + uWriteTimeout;
1104 	for (;;) {
1105 		if (chip->state != FL_WRITING) {
1106 			/* Someone's suspended the write. Sleep */
1107 			DECLARE_WAITQUEUE(wait, current);
1108 
1109 			set_current_state(TASK_UNINTERRUPTIBLE);
1110 			add_wait_queue(&chip->wq, &wait);
1111 			spin_unlock(chip->mutex);
1112 			schedule();
1113 			remove_wait_queue(&chip->wq, &wait);
1114 			timeo = jiffies + (HZ / 2); /* FIXME */
1115 			spin_lock(chip->mutex);
1116 			continue;
1117 		}
1118 
1119 		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1120 			xip_enable(map, chip, adr);
1121 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1122 			xip_disable(map, chip, adr);
1123 			break;
1124 		}
1125 
1126 		if (chip_ready(map, adr))
1127 			break;
1128 
1129 		/* Latency issues. Drop the lock, wait a while and retry */
1130 		UDELAY(map, chip, adr, 1);
1131 	}
1132 	/* Did we succeed? */
1133 	if (!chip_good(map, adr, datum)) {
1134 		/* reset on all failures. */
1135 		map_write( map, CMD(0xF0), chip->start );
1136 		/* FIXME - should have reset delay before continuing */
1137 
1138 		if (++retry_cnt <= MAX_WORD_RETRIES)
1139 			goto retry;
1140 
1141 		ret = -EIO;
1142 	}
1143 	xip_enable(map, chip, adr);
1144  op_done:
1145 	chip->state = FL_READY;
1146 	put_chip(map, chip, adr);
1147 	spin_unlock(chip->mutex);
1148 
1149 	return ret;
1150 }
1151 
1152 
1153 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1154 				  size_t *retlen, const u_char *buf)
1155 {
1156 	struct map_info *map = mtd->priv;
1157 	struct cfi_private *cfi = map->fldrv_priv;
1158 	int ret = 0;
1159 	int chipnum;
1160 	unsigned long ofs, chipstart;
1161 	DECLARE_WAITQUEUE(wait, current);
1162 
1163 	*retlen = 0;
1164 	if (!len)
1165 		return 0;
1166 
1167 	chipnum = to >> cfi->chipshift;
1168 	ofs = to  - (chipnum << cfi->chipshift);
1169 	chipstart = cfi->chips[chipnum].start;
1170 
1171 	/* If it's not bus-aligned, do the first byte write */
1172 	if (ofs & (map_bankwidth(map)-1)) {
1173 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1174 		int i = ofs - bus_ofs;
1175 		int n = 0;
1176 		map_word tmp_buf;
1177 
1178  retry:
1179 		spin_lock(cfi->chips[chipnum].mutex);
1180 
1181 		if (cfi->chips[chipnum].state != FL_READY) {
1182 #if 0
1183 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1184 #endif
1185 			set_current_state(TASK_UNINTERRUPTIBLE);
1186 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1187 
1188 			spin_unlock(cfi->chips[chipnum].mutex);
1189 
1190 			schedule();
1191 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1192 #if 0
1193 			if(signal_pending(current))
1194 				return -EINTR;
1195 #endif
1196 			goto retry;
1197 		}
1198 
1199 		/* Load 'tmp_buf' with old contents of flash */
1200 		tmp_buf = map_read(map, bus_ofs+chipstart);
1201 
1202 		spin_unlock(cfi->chips[chipnum].mutex);
1203 
1204 		/* Number of bytes to copy from buffer */
1205 		n = min_t(int, len, map_bankwidth(map)-i);
1206 
1207 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1208 
1209 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1210 				       bus_ofs, tmp_buf);
1211 		if (ret)
1212 			return ret;
1213 
1214 		ofs += n;
1215 		buf += n;
1216 		(*retlen) += n;
1217 		len -= n;
1218 
1219 		if (ofs >> cfi->chipshift) {
1220 			chipnum ++;
1221 			ofs = 0;
1222 			if (chipnum == cfi->numchips)
1223 				return 0;
1224 		}
1225 	}
1226 
1227 	/* We are now aligned, write as much as possible */
1228 	while(len >= map_bankwidth(map)) {
1229 		map_word datum;
1230 
1231 		datum = map_word_load(map, buf);
1232 
1233 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1234 				       ofs, datum);
1235 		if (ret)
1236 			return ret;
1237 
1238 		ofs += map_bankwidth(map);
1239 		buf += map_bankwidth(map);
1240 		(*retlen) += map_bankwidth(map);
1241 		len -= map_bankwidth(map);
1242 
1243 		if (ofs >> cfi->chipshift) {
1244 			chipnum ++;
1245 			ofs = 0;
1246 			if (chipnum == cfi->numchips)
1247 				return 0;
1248 			chipstart = cfi->chips[chipnum].start;
1249 		}
1250 	}
1251 
1252 	/* Write the trailing bytes if any */
1253 	if (len & (map_bankwidth(map)-1)) {
1254 		map_word tmp_buf;
1255 
1256  retry1:
1257 		spin_lock(cfi->chips[chipnum].mutex);
1258 
1259 		if (cfi->chips[chipnum].state != FL_READY) {
1260 #if 0
1261 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1262 #endif
1263 			set_current_state(TASK_UNINTERRUPTIBLE);
1264 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1265 
1266 			spin_unlock(cfi->chips[chipnum].mutex);
1267 
1268 			schedule();
1269 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1270 #if 0
1271 			if(signal_pending(current))
1272 				return -EINTR;
1273 #endif
1274 			goto retry1;
1275 		}
1276 
1277 		tmp_buf = map_read(map, ofs + chipstart);
1278 
1279 		spin_unlock(cfi->chips[chipnum].mutex);
1280 
1281 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1282 
1283 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1284 				ofs, tmp_buf);
1285 		if (ret)
1286 			return ret;
1287 
1288 		(*retlen) += len;
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 
1295 /*
1296  * FIXME: interleaved mode not tested, and probably not supported!
1297  */
1298 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1299 				    unsigned long adr, const u_char *buf,
1300 				    int len)
1301 {
1302 	struct cfi_private *cfi = map->fldrv_priv;
1303 	unsigned long timeo = jiffies + HZ;
1304 	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1305 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1306 	int ret = -EIO;
1307 	unsigned long cmd_adr;
1308 	int z, words;
1309 	map_word datum;
1310 
1311 	adr += chip->start;
1312 	cmd_adr = adr;
1313 
1314 	spin_lock(chip->mutex);
1315 	ret = get_chip(map, chip, adr, FL_WRITING);
1316 	if (ret) {
1317 		spin_unlock(chip->mutex);
1318 		return ret;
1319 	}
1320 
1321 	datum = map_word_load(map, buf);
1322 
1323 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1324 	       __func__, adr, datum.x[0] );
1325 
1326 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1327 	ENABLE_VPP(map);
1328 	xip_disable(map, chip, cmd_adr);
1329 
1330 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1331 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1332 	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1333 
1334 	/* Write Buffer Load */
1335 	map_write(map, CMD(0x25), cmd_adr);
1336 
1337 	chip->state = FL_WRITING_TO_BUFFER;
1338 
1339 	/* Write length of data to come */
1340 	words = len / map_bankwidth(map);
1341 	map_write(map, CMD(words - 1), cmd_adr);
1342 	/* Write data */
1343 	z = 0;
1344 	while(z < words * map_bankwidth(map)) {
1345 		datum = map_word_load(map, buf);
1346 		map_write(map, datum, adr + z);
1347 
1348 		z += map_bankwidth(map);
1349 		buf += map_bankwidth(map);
1350 	}
1351 	z -= map_bankwidth(map);
1352 
1353 	adr += z;
1354 
1355 	/* Write Buffer Program Confirm: GO GO GO */
1356 	map_write(map, CMD(0x29), cmd_adr);
1357 	chip->state = FL_WRITING;
1358 
1359 	INVALIDATE_CACHE_UDELAY(map, chip,
1360 				adr, map_bankwidth(map),
1361 				chip->word_write_time);
1362 
1363 	timeo = jiffies + uWriteTimeout;
1364 
1365 	for (;;) {
1366 		if (chip->state != FL_WRITING) {
1367 			/* Someone's suspended the write. Sleep */
1368 			DECLARE_WAITQUEUE(wait, current);
1369 
1370 			set_current_state(TASK_UNINTERRUPTIBLE);
1371 			add_wait_queue(&chip->wq, &wait);
1372 			spin_unlock(chip->mutex);
1373 			schedule();
1374 			remove_wait_queue(&chip->wq, &wait);
1375 			timeo = jiffies + (HZ / 2); /* FIXME */
1376 			spin_lock(chip->mutex);
1377 			continue;
1378 		}
1379 
1380 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1381 			break;
1382 
1383 		if (chip_ready(map, adr)) {
1384 			xip_enable(map, chip, adr);
1385 			goto op_done;
1386 		}
1387 
1388 		/* Latency issues. Drop the lock, wait a while and retry */
1389 		UDELAY(map, chip, adr, 1);
1390 	}
1391 
1392 	/* reset on all failures. */
1393 	map_write( map, CMD(0xF0), chip->start );
1394 	xip_enable(map, chip, adr);
1395 	/* FIXME - should have reset delay before continuing */
1396 
1397 	printk(KERN_WARNING "MTD %s(): software timeout\n",
1398 	       __func__ );
1399 
1400 	ret = -EIO;
1401  op_done:
1402 	chip->state = FL_READY;
1403 	put_chip(map, chip, adr);
1404 	spin_unlock(chip->mutex);
1405 
1406 	return ret;
1407 }
1408 
1409 
1410 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1411 				    size_t *retlen, const u_char *buf)
1412 {
1413 	struct map_info *map = mtd->priv;
1414 	struct cfi_private *cfi = map->fldrv_priv;
1415 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1416 	int ret = 0;
1417 	int chipnum;
1418 	unsigned long ofs;
1419 
1420 	*retlen = 0;
1421 	if (!len)
1422 		return 0;
1423 
1424 	chipnum = to >> cfi->chipshift;
1425 	ofs = to  - (chipnum << cfi->chipshift);
1426 
1427 	/* If it's not bus-aligned, do the first word write */
1428 	if (ofs & (map_bankwidth(map)-1)) {
1429 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1430 		if (local_len > len)
1431 			local_len = len;
1432 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1433 					     local_len, retlen, buf);
1434 		if (ret)
1435 			return ret;
1436 		ofs += local_len;
1437 		buf += local_len;
1438 		len -= local_len;
1439 
1440 		if (ofs >> cfi->chipshift) {
1441 			chipnum ++;
1442 			ofs = 0;
1443 			if (chipnum == cfi->numchips)
1444 				return 0;
1445 		}
1446 	}
1447 
1448 	/* Write buffer is worth it only if more than one word to write... */
1449 	while (len >= map_bankwidth(map) * 2) {
1450 		/* We must not cross write block boundaries */
1451 		int size = wbufsize - (ofs & (wbufsize-1));
1452 
1453 		if (size > len)
1454 			size = len;
1455 		if (size % map_bankwidth(map))
1456 			size -= size % map_bankwidth(map);
1457 
1458 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1459 				      ofs, buf, size);
1460 		if (ret)
1461 			return ret;
1462 
1463 		ofs += size;
1464 		buf += size;
1465 		(*retlen) += size;
1466 		len -= size;
1467 
1468 		if (ofs >> cfi->chipshift) {
1469 			chipnum ++;
1470 			ofs = 0;
1471 			if (chipnum == cfi->numchips)
1472 				return 0;
1473 		}
1474 	}
1475 
1476 	if (len) {
1477 		size_t retlen_dregs = 0;
1478 
1479 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1480 					     len, &retlen_dregs, buf);
1481 
1482 		*retlen += retlen_dregs;
1483 		return ret;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 
1490 /*
1491  * Handle devices with one erase region, that only implement
1492  * the chip erase command.
1493  */
1494 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1495 {
1496 	struct cfi_private *cfi = map->fldrv_priv;
1497 	unsigned long timeo = jiffies + HZ;
1498 	unsigned long int adr;
1499 	DECLARE_WAITQUEUE(wait, current);
1500 	int ret = 0;
1501 
1502 	adr = cfi->addr_unlock1;
1503 
1504 	spin_lock(chip->mutex);
1505 	ret = get_chip(map, chip, adr, FL_WRITING);
1506 	if (ret) {
1507 		spin_unlock(chip->mutex);
1508 		return ret;
1509 	}
1510 
1511 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1512 	       __func__, chip->start );
1513 
1514 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1515 	ENABLE_VPP(map);
1516 	xip_disable(map, chip, adr);
1517 
1518 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1519 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1520 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1521 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1522 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1523 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1524 
1525 	chip->state = FL_ERASING;
1526 	chip->erase_suspended = 0;
1527 	chip->in_progress_block_addr = adr;
1528 
1529 	INVALIDATE_CACHE_UDELAY(map, chip,
1530 				adr, map->size,
1531 				chip->erase_time*500);
1532 
1533 	timeo = jiffies + (HZ*20);
1534 
1535 	for (;;) {
1536 		if (chip->state != FL_ERASING) {
1537 			/* Someone's suspended the erase. Sleep */
1538 			set_current_state(TASK_UNINTERRUPTIBLE);
1539 			add_wait_queue(&chip->wq, &wait);
1540 			spin_unlock(chip->mutex);
1541 			schedule();
1542 			remove_wait_queue(&chip->wq, &wait);
1543 			spin_lock(chip->mutex);
1544 			continue;
1545 		}
1546 		if (chip->erase_suspended) {
1547 			/* This erase was suspended and resumed.
1548 			   Adjust the timeout */
1549 			timeo = jiffies + (HZ*20); /* FIXME */
1550 			chip->erase_suspended = 0;
1551 		}
1552 
1553 		if (chip_ready(map, adr))
1554 			break;
1555 
1556 		if (time_after(jiffies, timeo)) {
1557 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1558 				__func__ );
1559 			break;
1560 		}
1561 
1562 		/* Latency issues. Drop the lock, wait a while and retry */
1563 		UDELAY(map, chip, adr, 1000000/HZ);
1564 	}
1565 	/* Did we succeed? */
1566 	if (!chip_good(map, adr, map_word_ff(map))) {
1567 		/* reset on all failures. */
1568 		map_write( map, CMD(0xF0), chip->start );
1569 		/* FIXME - should have reset delay before continuing */
1570 
1571 		ret = -EIO;
1572 	}
1573 
1574 	chip->state = FL_READY;
1575 	xip_enable(map, chip, adr);
1576 	put_chip(map, chip, adr);
1577 	spin_unlock(chip->mutex);
1578 
1579 	return ret;
1580 }
1581 
1582 
1583 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1584 {
1585 	struct cfi_private *cfi = map->fldrv_priv;
1586 	unsigned long timeo = jiffies + HZ;
1587 	DECLARE_WAITQUEUE(wait, current);
1588 	int ret = 0;
1589 
1590 	adr += chip->start;
1591 
1592 	spin_lock(chip->mutex);
1593 	ret = get_chip(map, chip, adr, FL_ERASING);
1594 	if (ret) {
1595 		spin_unlock(chip->mutex);
1596 		return ret;
1597 	}
1598 
1599 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1600 	       __func__, adr );
1601 
1602 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1603 	ENABLE_VPP(map);
1604 	xip_disable(map, chip, adr);
1605 
1606 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1607 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1608 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1609 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1610 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1611 	map_write(map, CMD(0x30), adr);
1612 
1613 	chip->state = FL_ERASING;
1614 	chip->erase_suspended = 0;
1615 	chip->in_progress_block_addr = adr;
1616 
1617 	INVALIDATE_CACHE_UDELAY(map, chip,
1618 				adr, len,
1619 				chip->erase_time*500);
1620 
1621 	timeo = jiffies + (HZ*20);
1622 
1623 	for (;;) {
1624 		if (chip->state != FL_ERASING) {
1625 			/* Someone's suspended the erase. Sleep */
1626 			set_current_state(TASK_UNINTERRUPTIBLE);
1627 			add_wait_queue(&chip->wq, &wait);
1628 			spin_unlock(chip->mutex);
1629 			schedule();
1630 			remove_wait_queue(&chip->wq, &wait);
1631 			spin_lock(chip->mutex);
1632 			continue;
1633 		}
1634 		if (chip->erase_suspended) {
1635 			/* This erase was suspended and resumed.
1636 			   Adjust the timeout */
1637 			timeo = jiffies + (HZ*20); /* FIXME */
1638 			chip->erase_suspended = 0;
1639 		}
1640 
1641 		if (chip_ready(map, adr)) {
1642 			xip_enable(map, chip, adr);
1643 			break;
1644 		}
1645 
1646 		if (time_after(jiffies, timeo)) {
1647 			xip_enable(map, chip, adr);
1648 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1649 				__func__ );
1650 			break;
1651 		}
1652 
1653 		/* Latency issues. Drop the lock, wait a while and retry */
1654 		UDELAY(map, chip, adr, 1000000/HZ);
1655 	}
1656 	/* Did we succeed? */
1657 	if (!chip_good(map, adr, map_word_ff(map))) {
1658 		/* reset on all failures. */
1659 		map_write( map, CMD(0xF0), chip->start );
1660 		/* FIXME - should have reset delay before continuing */
1661 
1662 		ret = -EIO;
1663 	}
1664 
1665 	chip->state = FL_READY;
1666 	put_chip(map, chip, adr);
1667 	spin_unlock(chip->mutex);
1668 	return ret;
1669 }
1670 
1671 
1672 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1673 {
1674 	unsigned long ofs, len;
1675 	int ret;
1676 
1677 	ofs = instr->addr;
1678 	len = instr->len;
1679 
1680 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1681 	if (ret)
1682 		return ret;
1683 
1684 	instr->state = MTD_ERASE_DONE;
1685 	mtd_erase_callback(instr);
1686 
1687 	return 0;
1688 }
1689 
1690 
1691 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1692 {
1693 	struct map_info *map = mtd->priv;
1694 	struct cfi_private *cfi = map->fldrv_priv;
1695 	int ret = 0;
1696 
1697 	if (instr->addr != 0)
1698 		return -EINVAL;
1699 
1700 	if (instr->len != mtd->size)
1701 		return -EINVAL;
1702 
1703 	ret = do_erase_chip(map, &cfi->chips[0]);
1704 	if (ret)
1705 		return ret;
1706 
1707 	instr->state = MTD_ERASE_DONE;
1708 	mtd_erase_callback(instr);
1709 
1710 	return 0;
1711 }
1712 
1713 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1714 			 unsigned long adr, int len, void *thunk)
1715 {
1716 	struct cfi_private *cfi = map->fldrv_priv;
1717 	int ret;
1718 
1719 	spin_lock(chip->mutex);
1720 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1721 	if (ret)
1722 		goto out_unlock;
1723 	chip->state = FL_LOCKING;
1724 
1725 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1726 	      __func__, adr, len);
1727 
1728 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1729 			 cfi->device_type, NULL);
1730 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1731 			 cfi->device_type, NULL);
1732 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1733 			 cfi->device_type, NULL);
1734 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1735 			 cfi->device_type, NULL);
1736 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1737 			 cfi->device_type, NULL);
1738 	map_write(map, CMD(0x40), chip->start + adr);
1739 
1740 	chip->state = FL_READY;
1741 	put_chip(map, chip, adr + chip->start);
1742 	ret = 0;
1743 
1744 out_unlock:
1745 	spin_unlock(chip->mutex);
1746 	return ret;
1747 }
1748 
1749 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1750 			   unsigned long adr, int len, void *thunk)
1751 {
1752 	struct cfi_private *cfi = map->fldrv_priv;
1753 	int ret;
1754 
1755 	spin_lock(chip->mutex);
1756 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1757 	if (ret)
1758 		goto out_unlock;
1759 	chip->state = FL_UNLOCKING;
1760 
1761 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1762 	      __func__, adr, len);
1763 
1764 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1765 			 cfi->device_type, NULL);
1766 	map_write(map, CMD(0x70), adr);
1767 
1768 	chip->state = FL_READY;
1769 	put_chip(map, chip, adr + chip->start);
1770 	ret = 0;
1771 
1772 out_unlock:
1773 	spin_unlock(chip->mutex);
1774 	return ret;
1775 }
1776 
1777 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1778 {
1779 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1780 }
1781 
1782 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1783 {
1784 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1785 }
1786 
1787 
1788 static void cfi_amdstd_sync (struct mtd_info *mtd)
1789 {
1790 	struct map_info *map = mtd->priv;
1791 	struct cfi_private *cfi = map->fldrv_priv;
1792 	int i;
1793 	struct flchip *chip;
1794 	int ret = 0;
1795 	DECLARE_WAITQUEUE(wait, current);
1796 
1797 	for (i=0; !ret && i<cfi->numchips; i++) {
1798 		chip = &cfi->chips[i];
1799 
1800 	retry:
1801 		spin_lock(chip->mutex);
1802 
1803 		switch(chip->state) {
1804 		case FL_READY:
1805 		case FL_STATUS:
1806 		case FL_CFI_QUERY:
1807 		case FL_JEDEC_QUERY:
1808 			chip->oldstate = chip->state;
1809 			chip->state = FL_SYNCING;
1810 			/* No need to wake_up() on this state change -
1811 			 * as the whole point is that nobody can do anything
1812 			 * with the chip now anyway.
1813 			 */
1814 		case FL_SYNCING:
1815 			spin_unlock(chip->mutex);
1816 			break;
1817 
1818 		default:
1819 			/* Not an idle state */
1820 			set_current_state(TASK_UNINTERRUPTIBLE);
1821 			add_wait_queue(&chip->wq, &wait);
1822 
1823 			spin_unlock(chip->mutex);
1824 
1825 			schedule();
1826 
1827 			remove_wait_queue(&chip->wq, &wait);
1828 
1829 			goto retry;
1830 		}
1831 	}
1832 
1833 	/* Unlock the chips again */
1834 
1835 	for (i--; i >=0; i--) {
1836 		chip = &cfi->chips[i];
1837 
1838 		spin_lock(chip->mutex);
1839 
1840 		if (chip->state == FL_SYNCING) {
1841 			chip->state = chip->oldstate;
1842 			wake_up(&chip->wq);
1843 		}
1844 		spin_unlock(chip->mutex);
1845 	}
1846 }
1847 
1848 
1849 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1850 {
1851 	struct map_info *map = mtd->priv;
1852 	struct cfi_private *cfi = map->fldrv_priv;
1853 	int i;
1854 	struct flchip *chip;
1855 	int ret = 0;
1856 
1857 	for (i=0; !ret && i<cfi->numchips; i++) {
1858 		chip = &cfi->chips[i];
1859 
1860 		spin_lock(chip->mutex);
1861 
1862 		switch(chip->state) {
1863 		case FL_READY:
1864 		case FL_STATUS:
1865 		case FL_CFI_QUERY:
1866 		case FL_JEDEC_QUERY:
1867 			chip->oldstate = chip->state;
1868 			chip->state = FL_PM_SUSPENDED;
1869 			/* No need to wake_up() on this state change -
1870 			 * as the whole point is that nobody can do anything
1871 			 * with the chip now anyway.
1872 			 */
1873 		case FL_PM_SUSPENDED:
1874 			break;
1875 
1876 		default:
1877 			ret = -EAGAIN;
1878 			break;
1879 		}
1880 		spin_unlock(chip->mutex);
1881 	}
1882 
1883 	/* Unlock the chips again */
1884 
1885 	if (ret) {
1886 		for (i--; i >=0; i--) {
1887 			chip = &cfi->chips[i];
1888 
1889 			spin_lock(chip->mutex);
1890 
1891 			if (chip->state == FL_PM_SUSPENDED) {
1892 				chip->state = chip->oldstate;
1893 				wake_up(&chip->wq);
1894 			}
1895 			spin_unlock(chip->mutex);
1896 		}
1897 	}
1898 
1899 	return ret;
1900 }
1901 
1902 
1903 static void cfi_amdstd_resume(struct mtd_info *mtd)
1904 {
1905 	struct map_info *map = mtd->priv;
1906 	struct cfi_private *cfi = map->fldrv_priv;
1907 	int i;
1908 	struct flchip *chip;
1909 
1910 	for (i=0; i<cfi->numchips; i++) {
1911 
1912 		chip = &cfi->chips[i];
1913 
1914 		spin_lock(chip->mutex);
1915 
1916 		if (chip->state == FL_PM_SUSPENDED) {
1917 			chip->state = FL_READY;
1918 			map_write(map, CMD(0xF0), chip->start);
1919 			wake_up(&chip->wq);
1920 		}
1921 		else
1922 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1923 
1924 		spin_unlock(chip->mutex);
1925 	}
1926 }
1927 
1928 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1929 {
1930 	struct map_info *map = mtd->priv;
1931 	struct cfi_private *cfi = map->fldrv_priv;
1932 
1933 	kfree(cfi->cmdset_priv);
1934 	kfree(cfi->cfiq);
1935 	kfree(cfi);
1936 	kfree(mtd->eraseregions);
1937 }
1938 
1939 MODULE_LICENSE("GPL");
1940 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1941 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1942