xref: /linux/drivers/mtd/chips/cfi_cmdset_0002.c (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17  *
18  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19  *
20  * This code is GPL
21  */
22 
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
30 
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
40 
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
43 
44 #define MAX_WORD_RETRIES 3
45 
46 #define MANUFACTURER_AMD	0x0001
47 #define MANUFACTURER_ATMEL	0x001F
48 #define MANUFACTURER_MACRONIX	0x00C2
49 #define MANUFACTURER_SST	0x00BF
50 #define SST49LF004B	        0x0060
51 #define SST49LF040B	        0x0050
52 #define SST49LF008A		0x005a
53 #define AT49BV6416		0x00d6
54 
55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_amdstd_sync (struct mtd_info *);
61 static int cfi_amdstd_suspend (struct mtd_info *);
62 static void cfi_amdstd_resume (struct mtd_info *);
63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 
65 static void cfi_amdstd_destroy(struct mtd_info *);
66 
67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
69 
70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72 #include "fwh_lock.h"
73 
74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
76 
77 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 	.probe		= NULL, /* Not usable directly */
79 	.destroy	= cfi_amdstd_destroy,
80 	.name		= "cfi_cmdset_0002",
81 	.module		= THIS_MODULE
82 };
83 
84 
85 /* #define DEBUG_CFI_FEATURES */
86 
87 
88 #ifdef DEBUG_CFI_FEATURES
89 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
90 {
91 	const char* erase_suspend[3] = {
92 		"Not supported", "Read only", "Read/write"
93 	};
94 	const char* top_bottom[6] = {
95 		"No WP", "8x8KiB sectors at top & bottom, no WP",
96 		"Bottom boot", "Top boot",
97 		"Uniform, Bottom WP", "Uniform, Top WP"
98 	};
99 
100 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
101 	printk("  Address sensitive unlock: %s\n",
102 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
103 
104 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
106 	else
107 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
108 
109 	if (extp->BlkProt == 0)
110 		printk("  Block protection: Not supported\n");
111 	else
112 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
113 
114 
115 	printk("  Temporary block unprotect: %s\n",
116 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 	printk("  Burst mode: %s\n",
120 	       extp->BurstMode ? "Supported" : "Not supported");
121 	if (extp->PageMode == 0)
122 		printk("  Page mode: Not supported\n");
123 	else
124 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
125 
126 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127 	       extp->VppMin >> 4, extp->VppMin & 0xf);
128 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129 	       extp->VppMax >> 4, extp->VppMax & 0xf);
130 
131 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
133 	else
134 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135 }
136 #endif
137 
138 #ifdef AMD_BOOTLOC_BUG
139 /* Wheee. Bring me the head of someone at AMD. */
140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
141 {
142 	struct map_info *map = mtd->priv;
143 	struct cfi_private *cfi = map->fldrv_priv;
144 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 	__u8 major = extp->MajorVersion;
146 	__u8 minor = extp->MinorVersion;
147 
148 	if (((major << 8) | minor) < 0x3131) {
149 		/* CFI version 1.0 => don't trust bootloc */
150 
151 		DEBUG(MTD_DEBUG_LEVEL1,
152 			"%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
153 			map->name, cfi->mfr, cfi->id);
154 
155 		/* AFAICS all 29LV400 with a bottom boot block have a device ID
156 		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
157 		 * These were badly detected as they have the 0x80 bit set
158 		 * so treat them as a special case.
159 		 */
160 		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
161 
162 			/* Macronix added CFI to their 2nd generation
163 			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
164 			 * Fujitsu, Spansion, EON, ESI and older Macronix)
165 			 * has CFI.
166 			 *
167 			 * Therefore also check the manufacturer.
168 			 * This reduces the risk of false detection due to
169 			 * the 8-bit device ID.
170 			 */
171 			(cfi->mfr == MANUFACTURER_MACRONIX)) {
172 			DEBUG(MTD_DEBUG_LEVEL1,
173 				"%s: Macronix MX29LV400C with bottom boot block"
174 				" detected\n", map->name);
175 			extp->TopBottom = 2;	/* bottom boot */
176 		} else
177 		if (cfi->id & 0x80) {
178 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
179 			extp->TopBottom = 3;	/* top boot */
180 		} else {
181 			extp->TopBottom = 2;	/* bottom boot */
182 		}
183 
184 		DEBUG(MTD_DEBUG_LEVEL1,
185 			"%s: AMD CFI PRI V%c.%c has no boot block field;"
186 			" deduced %s from Device ID\n", map->name, major, minor,
187 			extp->TopBottom == 2 ? "bottom" : "top");
188 	}
189 }
190 #endif
191 
192 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
193 {
194 	struct map_info *map = mtd->priv;
195 	struct cfi_private *cfi = map->fldrv_priv;
196 	if (cfi->cfiq->BufWriteTimeoutTyp) {
197 		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
198 		mtd->write = cfi_amdstd_write_buffers;
199 	}
200 }
201 
202 /* Atmel chips don't use the same PRI format as AMD chips */
203 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
204 {
205 	struct map_info *map = mtd->priv;
206 	struct cfi_private *cfi = map->fldrv_priv;
207 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
208 	struct cfi_pri_atmel atmel_pri;
209 
210 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
211 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
212 
213 	if (atmel_pri.Features & 0x02)
214 		extp->EraseSuspend = 2;
215 
216 	/* Some chips got it backwards... */
217 	if (cfi->id == AT49BV6416) {
218 		if (atmel_pri.BottomBoot)
219 			extp->TopBottom = 3;
220 		else
221 			extp->TopBottom = 2;
222 	} else {
223 		if (atmel_pri.BottomBoot)
224 			extp->TopBottom = 2;
225 		else
226 			extp->TopBottom = 3;
227 	}
228 
229 	/* burst write mode not supported */
230 	cfi->cfiq->BufWriteTimeoutTyp = 0;
231 	cfi->cfiq->BufWriteTimeoutMax = 0;
232 }
233 
234 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
235 {
236 	/* Setup for chips with a secsi area */
237 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
238 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
239 }
240 
241 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
242 {
243 	struct map_info *map = mtd->priv;
244 	struct cfi_private *cfi = map->fldrv_priv;
245 	if ((cfi->cfiq->NumEraseRegions == 1) &&
246 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
247 		mtd->erase = cfi_amdstd_erase_chip;
248 	}
249 
250 }
251 
252 /*
253  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
254  * locked by default.
255  */
256 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
257 {
258 	mtd->lock = cfi_atmel_lock;
259 	mtd->unlock = cfi_atmel_unlock;
260 	mtd->flags |= MTD_POWERUP_LOCK;
261 }
262 
263 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
264 {
265 	struct map_info *map = mtd->priv;
266 	struct cfi_private *cfi = map->fldrv_priv;
267 
268 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
269 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
270 		pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
271 	}
272 }
273 
274 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
275 {
276 	struct map_info *map = mtd->priv;
277 	struct cfi_private *cfi = map->fldrv_priv;
278 
279 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
280 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
281 		pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
282 	}
283 }
284 
285 static struct cfi_fixup cfi_fixup_table[] = {
286 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
287 #ifdef AMD_BOOTLOC_BUG
288 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
289 	{ MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
290 #endif
291 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
292 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
293 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
294 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
295 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
296 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
297 	{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
298 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
299 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
300 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
301 #if !FORCE_WORD_WRITE
302 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
303 #endif
304 	{ 0, 0, NULL, NULL }
305 };
306 static struct cfi_fixup jedec_fixup_table[] = {
307 	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
308 	{ MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
309 	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
310 	{ 0, 0, NULL, NULL }
311 };
312 
313 static struct cfi_fixup fixup_table[] = {
314 	/* The CFI vendor ids and the JEDEC vendor IDs appear
315 	 * to be common.  It is like the devices id's are as
316 	 * well.  This table is to pick all cases where
317 	 * we know that is the case.
318 	 */
319 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
320 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
321 	{ 0, 0, NULL, NULL }
322 };
323 
324 
325 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
326 {
327 	struct cfi_private *cfi = map->fldrv_priv;
328 	struct mtd_info *mtd;
329 	int i;
330 
331 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
332 	if (!mtd) {
333 		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
334 		return NULL;
335 	}
336 	mtd->priv = map;
337 	mtd->type = MTD_NORFLASH;
338 
339 	/* Fill in the default mtd operations */
340 	mtd->erase   = cfi_amdstd_erase_varsize;
341 	mtd->write   = cfi_amdstd_write_words;
342 	mtd->read    = cfi_amdstd_read;
343 	mtd->sync    = cfi_amdstd_sync;
344 	mtd->suspend = cfi_amdstd_suspend;
345 	mtd->resume  = cfi_amdstd_resume;
346 	mtd->flags   = MTD_CAP_NORFLASH;
347 	mtd->name    = map->name;
348 	mtd->writesize = 1;
349 
350 	if (cfi->cfi_mode==CFI_MODE_CFI){
351 		unsigned char bootloc;
352 		/*
353 		 * It's a real CFI chip, not one for which the probe
354 		 * routine faked a CFI structure. So we read the feature
355 		 * table from it.
356 		 */
357 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
358 		struct cfi_pri_amdstd *extp;
359 
360 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
361 		if (!extp) {
362 			kfree(mtd);
363 			return NULL;
364 		}
365 
366 		if (extp->MajorVersion != '1' ||
367 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
368 			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
369 			       "version %c.%c.\n",  extp->MajorVersion,
370 			       extp->MinorVersion);
371 			kfree(extp);
372 			kfree(mtd);
373 			return NULL;
374 		}
375 
376 		/* Install our own private info structure */
377 		cfi->cmdset_priv = extp;
378 
379 		/* Apply cfi device specific fixups */
380 		cfi_fixup(mtd, cfi_fixup_table);
381 
382 #ifdef DEBUG_CFI_FEATURES
383 		/* Tell the user about it in lots of lovely detail */
384 		cfi_tell_features(extp);
385 #endif
386 
387 		bootloc = extp->TopBottom;
388 		if ((bootloc != 2) && (bootloc != 3)) {
389 			printk(KERN_WARNING "%s: CFI does not contain boot "
390 			       "bank location. Assuming top.\n", map->name);
391 			bootloc = 2;
392 		}
393 
394 		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
395 			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
396 
397 			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
398 				int j = (cfi->cfiq->NumEraseRegions-1)-i;
399 				__u32 swap;
400 
401 				swap = cfi->cfiq->EraseRegionInfo[i];
402 				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
403 				cfi->cfiq->EraseRegionInfo[j] = swap;
404 			}
405 		}
406 		/* Set the default CFI lock/unlock addresses */
407 		cfi->addr_unlock1 = 0x555;
408 		cfi->addr_unlock2 = 0x2aa;
409 		/* Modify the unlock address if we are in compatibility mode */
410 		if (	/* x16 in x8 mode */
411 			((cfi->device_type == CFI_DEVICETYPE_X8) &&
412 				(cfi->cfiq->InterfaceDesc ==
413 					CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
414 			/* x32 in x16 mode */
415 			((cfi->device_type == CFI_DEVICETYPE_X16) &&
416 				(cfi->cfiq->InterfaceDesc ==
417 					CFI_INTERFACE_X16_BY_X32_ASYNC)))
418 		{
419 			cfi->addr_unlock1 = 0xaaa;
420 			cfi->addr_unlock2 = 0x555;
421 		}
422 
423 	} /* CFI mode */
424 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
425 		/* Apply jedec specific fixups */
426 		cfi_fixup(mtd, jedec_fixup_table);
427 	}
428 	/* Apply generic fixups */
429 	cfi_fixup(mtd, fixup_table);
430 
431 	for (i=0; i< cfi->numchips; i++) {
432 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
433 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
434 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
435 		cfi->chips[i].ref_point_counter = 0;
436 		init_waitqueue_head(&(cfi->chips[i].wq));
437 	}
438 
439 	map->fldrv = &cfi_amdstd_chipdrv;
440 
441 	return cfi_amdstd_setup(mtd);
442 }
443 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
444 
445 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
446 {
447 	struct map_info *map = mtd->priv;
448 	struct cfi_private *cfi = map->fldrv_priv;
449 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
450 	unsigned long offset = 0;
451 	int i,j;
452 
453 	printk(KERN_NOTICE "number of %s chips: %d\n",
454 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
455 	/* Select the correct geometry setup */
456 	mtd->size = devsize * cfi->numchips;
457 
458 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
459 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
460 				    * mtd->numeraseregions, GFP_KERNEL);
461 	if (!mtd->eraseregions) {
462 		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
463 		goto setup_err;
464 	}
465 
466 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
467 		unsigned long ernum, ersize;
468 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
469 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
470 
471 		if (mtd->erasesize < ersize) {
472 			mtd->erasesize = ersize;
473 		}
474 		for (j=0; j<cfi->numchips; j++) {
475 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
476 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
477 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
478 		}
479 		offset += (ersize * ernum);
480 	}
481 	if (offset != devsize) {
482 		/* Argh */
483 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
484 		goto setup_err;
485 	}
486 #if 0
487 	// debug
488 	for (i=0; i<mtd->numeraseregions;i++){
489 		printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
490 		       i,mtd->eraseregions[i].offset,
491 		       mtd->eraseregions[i].erasesize,
492 		       mtd->eraseregions[i].numblocks);
493 	}
494 #endif
495 
496 	/* FIXME: erase-suspend-program is broken.  See
497 	   http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
498 	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
499 
500 	__module_get(THIS_MODULE);
501 	return mtd;
502 
503  setup_err:
504 	if(mtd) {
505 		kfree(mtd->eraseregions);
506 		kfree(mtd);
507 	}
508 	kfree(cfi->cmdset_priv);
509 	kfree(cfi->cfiq);
510 	return NULL;
511 }
512 
513 /*
514  * Return true if the chip is ready.
515  *
516  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
517  * non-suspended sector) and is indicated by no toggle bits toggling.
518  *
519  * Note that anything more complicated than checking if no bits are toggling
520  * (including checking DQ5 for an error status) is tricky to get working
521  * correctly and is therefore not done	(particulary with interleaved chips
522  * as each chip must be checked independantly of the others).
523  */
524 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
525 {
526 	map_word d, t;
527 
528 	d = map_read(map, addr);
529 	t = map_read(map, addr);
530 
531 	return map_word_equal(map, d, t);
532 }
533 
534 /*
535  * Return true if the chip is ready and has the correct value.
536  *
537  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
538  * non-suspended sector) and it is indicated by no bits toggling.
539  *
540  * Error are indicated by toggling bits or bits held with the wrong value,
541  * or with bits toggling.
542  *
543  * Note that anything more complicated than checking if no bits are toggling
544  * (including checking DQ5 for an error status) is tricky to get working
545  * correctly and is therefore not done	(particulary with interleaved chips
546  * as each chip must be checked independantly of the others).
547  *
548  */
549 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
550 {
551 	map_word oldd, curd;
552 
553 	oldd = map_read(map, addr);
554 	curd = map_read(map, addr);
555 
556 	return	map_word_equal(map, oldd, curd) &&
557 		map_word_equal(map, curd, expected);
558 }
559 
560 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
561 {
562 	DECLARE_WAITQUEUE(wait, current);
563 	struct cfi_private *cfi = map->fldrv_priv;
564 	unsigned long timeo;
565 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
566 
567  resettime:
568 	timeo = jiffies + HZ;
569  retry:
570 	switch (chip->state) {
571 
572 	case FL_STATUS:
573 		for (;;) {
574 			if (chip_ready(map, adr))
575 				break;
576 
577 			if (time_after(jiffies, timeo)) {
578 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
579 				spin_unlock(chip->mutex);
580 				return -EIO;
581 			}
582 			spin_unlock(chip->mutex);
583 			cfi_udelay(1);
584 			spin_lock(chip->mutex);
585 			/* Someone else might have been playing with it. */
586 			goto retry;
587 		}
588 
589 	case FL_READY:
590 	case FL_CFI_QUERY:
591 	case FL_JEDEC_QUERY:
592 		return 0;
593 
594 	case FL_ERASING:
595 		if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
596 			goto sleep;
597 
598 		if (!(   mode == FL_READY
599 		      || mode == FL_POINT
600 		      || !cfip
601 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
602 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
603 		    )))
604 			goto sleep;
605 
606 		/* We could check to see if we're trying to access the sector
607 		 * that is currently being erased. However, no user will try
608 		 * anything like that so we just wait for the timeout. */
609 
610 		/* Erase suspend */
611 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
612 		 * commands when the erase algorithm isn't in progress. */
613 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
614 		chip->oldstate = FL_ERASING;
615 		chip->state = FL_ERASE_SUSPENDING;
616 		chip->erase_suspended = 1;
617 		for (;;) {
618 			if (chip_ready(map, adr))
619 				break;
620 
621 			if (time_after(jiffies, timeo)) {
622 				/* Should have suspended the erase by now.
623 				 * Send an Erase-Resume command as either
624 				 * there was an error (so leave the erase
625 				 * routine to recover from it) or we trying to
626 				 * use the erase-in-progress sector. */
627 				map_write(map, CMD(0x30), chip->in_progress_block_addr);
628 				chip->state = FL_ERASING;
629 				chip->oldstate = FL_READY;
630 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
631 				return -EIO;
632 			}
633 
634 			spin_unlock(chip->mutex);
635 			cfi_udelay(1);
636 			spin_lock(chip->mutex);
637 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
638 			   So we can just loop here. */
639 		}
640 		chip->state = FL_READY;
641 		return 0;
642 
643 	case FL_XIP_WHILE_ERASING:
644 		if (mode != FL_READY && mode != FL_POINT &&
645 		    (!cfip || !(cfip->EraseSuspend&2)))
646 			goto sleep;
647 		chip->oldstate = chip->state;
648 		chip->state = FL_READY;
649 		return 0;
650 
651 	case FL_POINT:
652 		/* Only if there's no operation suspended... */
653 		if (mode == FL_READY && chip->oldstate == FL_READY)
654 			return 0;
655 
656 	default:
657 	sleep:
658 		set_current_state(TASK_UNINTERRUPTIBLE);
659 		add_wait_queue(&chip->wq, &wait);
660 		spin_unlock(chip->mutex);
661 		schedule();
662 		remove_wait_queue(&chip->wq, &wait);
663 		spin_lock(chip->mutex);
664 		goto resettime;
665 	}
666 }
667 
668 
669 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
670 {
671 	struct cfi_private *cfi = map->fldrv_priv;
672 
673 	switch(chip->oldstate) {
674 	case FL_ERASING:
675 		chip->state = chip->oldstate;
676 		map_write(map, CMD(0x30), chip->in_progress_block_addr);
677 		chip->oldstate = FL_READY;
678 		chip->state = FL_ERASING;
679 		break;
680 
681 	case FL_XIP_WHILE_ERASING:
682 		chip->state = chip->oldstate;
683 		chip->oldstate = FL_READY;
684 		break;
685 
686 	case FL_READY:
687 	case FL_STATUS:
688 		/* We should really make set_vpp() count, rather than doing this */
689 		DISABLE_VPP(map);
690 		break;
691 	default:
692 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
693 	}
694 	wake_up(&chip->wq);
695 }
696 
697 #ifdef CONFIG_MTD_XIP
698 
699 /*
700  * No interrupt what so ever can be serviced while the flash isn't in array
701  * mode.  This is ensured by the xip_disable() and xip_enable() functions
702  * enclosing any code path where the flash is known not to be in array mode.
703  * And within a XIP disabled code path, only functions marked with __xipram
704  * may be called and nothing else (it's a good thing to inspect generated
705  * assembly to make sure inline functions were actually inlined and that gcc
706  * didn't emit calls to its own support functions). Also configuring MTD CFI
707  * support to a single buswidth and a single interleave is also recommended.
708  */
709 
710 static void xip_disable(struct map_info *map, struct flchip *chip,
711 			unsigned long adr)
712 {
713 	/* TODO: chips with no XIP use should ignore and return */
714 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
715 	local_irq_disable();
716 }
717 
718 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
719 				unsigned long adr)
720 {
721 	struct cfi_private *cfi = map->fldrv_priv;
722 
723 	if (chip->state != FL_POINT && chip->state != FL_READY) {
724 		map_write(map, CMD(0xf0), adr);
725 		chip->state = FL_READY;
726 	}
727 	(void) map_read(map, adr);
728 	xip_iprefetch();
729 	local_irq_enable();
730 }
731 
732 /*
733  * When a delay is required for the flash operation to complete, the
734  * xip_udelay() function is polling for both the given timeout and pending
735  * (but still masked) hardware interrupts.  Whenever there is an interrupt
736  * pending then the flash erase operation is suspended, array mode restored
737  * and interrupts unmasked.  Task scheduling might also happen at that
738  * point.  The CPU eventually returns from the interrupt or the call to
739  * schedule() and the suspended flash operation is resumed for the remaining
740  * of the delay period.
741  *
742  * Warning: this function _will_ fool interrupt latency tracing tools.
743  */
744 
745 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
746 				unsigned long adr, int usec)
747 {
748 	struct cfi_private *cfi = map->fldrv_priv;
749 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
750 	map_word status, OK = CMD(0x80);
751 	unsigned long suspended, start = xip_currtime();
752 	flstate_t oldstate;
753 
754 	do {
755 		cpu_relax();
756 		if (xip_irqpending() && extp &&
757 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
758 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
759 			/*
760 			 * Let's suspend the erase operation when supported.
761 			 * Note that we currently don't try to suspend
762 			 * interleaved chips if there is already another
763 			 * operation suspended (imagine what happens
764 			 * when one chip was already done with the current
765 			 * operation while another chip suspended it, then
766 			 * we resume the whole thing at once).  Yes, it
767 			 * can happen!
768 			 */
769 			map_write(map, CMD(0xb0), adr);
770 			usec -= xip_elapsed_since(start);
771 			suspended = xip_currtime();
772 			do {
773 				if (xip_elapsed_since(suspended) > 100000) {
774 					/*
775 					 * The chip doesn't want to suspend
776 					 * after waiting for 100 msecs.
777 					 * This is a critical error but there
778 					 * is not much we can do here.
779 					 */
780 					return;
781 				}
782 				status = map_read(map, adr);
783 			} while (!map_word_andequal(map, status, OK, OK));
784 
785 			/* Suspend succeeded */
786 			oldstate = chip->state;
787 			if (!map_word_bitsset(map, status, CMD(0x40)))
788 				break;
789 			chip->state = FL_XIP_WHILE_ERASING;
790 			chip->erase_suspended = 1;
791 			map_write(map, CMD(0xf0), adr);
792 			(void) map_read(map, adr);
793 			xip_iprefetch();
794 			local_irq_enable();
795 			spin_unlock(chip->mutex);
796 			xip_iprefetch();
797 			cond_resched();
798 
799 			/*
800 			 * We're back.  However someone else might have
801 			 * decided to go write to the chip if we are in
802 			 * a suspended erase state.  If so let's wait
803 			 * until it's done.
804 			 */
805 			spin_lock(chip->mutex);
806 			while (chip->state != FL_XIP_WHILE_ERASING) {
807 				DECLARE_WAITQUEUE(wait, current);
808 				set_current_state(TASK_UNINTERRUPTIBLE);
809 				add_wait_queue(&chip->wq, &wait);
810 				spin_unlock(chip->mutex);
811 				schedule();
812 				remove_wait_queue(&chip->wq, &wait);
813 				spin_lock(chip->mutex);
814 			}
815 			/* Disallow XIP again */
816 			local_irq_disable();
817 
818 			/* Resume the write or erase operation */
819 			map_write(map, CMD(0x30), adr);
820 			chip->state = oldstate;
821 			start = xip_currtime();
822 		} else if (usec >= 1000000/HZ) {
823 			/*
824 			 * Try to save on CPU power when waiting delay
825 			 * is at least a system timer tick period.
826 			 * No need to be extremely accurate here.
827 			 */
828 			xip_cpu_idle();
829 		}
830 		status = map_read(map, adr);
831 	} while (!map_word_andequal(map, status, OK, OK)
832 		 && xip_elapsed_since(start) < usec);
833 }
834 
835 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
836 
837 /*
838  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
839  * the flash is actively programming or erasing since we have to poll for
840  * the operation to complete anyway.  We can't do that in a generic way with
841  * a XIP setup so do it before the actual flash operation in this case
842  * and stub it out from INVALIDATE_CACHE_UDELAY.
843  */
844 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
845 	INVALIDATE_CACHED_RANGE(map, from, size)
846 
847 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
848 	UDELAY(map, chip, adr, usec)
849 
850 /*
851  * Extra notes:
852  *
853  * Activating this XIP support changes the way the code works a bit.  For
854  * example the code to suspend the current process when concurrent access
855  * happens is never executed because xip_udelay() will always return with the
856  * same chip state as it was entered with.  This is why there is no care for
857  * the presence of add_wait_queue() or schedule() calls from within a couple
858  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
859  * The queueing and scheduling are always happening within xip_udelay().
860  *
861  * Similarly, get_chip() and put_chip() just happen to always be executed
862  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
863  * is in array mode, therefore never executing many cases therein and not
864  * causing any problem with XIP.
865  */
866 
867 #else
868 
869 #define xip_disable(map, chip, adr)
870 #define xip_enable(map, chip, adr)
871 #define XIP_INVAL_CACHED_RANGE(x...)
872 
873 #define UDELAY(map, chip, adr, usec)  \
874 do {  \
875 	spin_unlock(chip->mutex);  \
876 	cfi_udelay(usec);  \
877 	spin_lock(chip->mutex);  \
878 } while (0)
879 
880 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
881 do {  \
882 	spin_unlock(chip->mutex);  \
883 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
884 	cfi_udelay(usec);  \
885 	spin_lock(chip->mutex);  \
886 } while (0)
887 
888 #endif
889 
890 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
891 {
892 	unsigned long cmd_addr;
893 	struct cfi_private *cfi = map->fldrv_priv;
894 	int ret;
895 
896 	adr += chip->start;
897 
898 	/* Ensure cmd read/writes are aligned. */
899 	cmd_addr = adr & ~(map_bankwidth(map)-1);
900 
901 	spin_lock(chip->mutex);
902 	ret = get_chip(map, chip, cmd_addr, FL_READY);
903 	if (ret) {
904 		spin_unlock(chip->mutex);
905 		return ret;
906 	}
907 
908 	if (chip->state != FL_POINT && chip->state != FL_READY) {
909 		map_write(map, CMD(0xf0), cmd_addr);
910 		chip->state = FL_READY;
911 	}
912 
913 	map_copy_from(map, buf, adr, len);
914 
915 	put_chip(map, chip, cmd_addr);
916 
917 	spin_unlock(chip->mutex);
918 	return 0;
919 }
920 
921 
922 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
923 {
924 	struct map_info *map = mtd->priv;
925 	struct cfi_private *cfi = map->fldrv_priv;
926 	unsigned long ofs;
927 	int chipnum;
928 	int ret = 0;
929 
930 	/* ofs: offset within the first chip that the first read should start */
931 
932 	chipnum = (from >> cfi->chipshift);
933 	ofs = from - (chipnum <<  cfi->chipshift);
934 
935 
936 	*retlen = 0;
937 
938 	while (len) {
939 		unsigned long thislen;
940 
941 		if (chipnum >= cfi->numchips)
942 			break;
943 
944 		if ((len + ofs -1) >> cfi->chipshift)
945 			thislen = (1<<cfi->chipshift) - ofs;
946 		else
947 			thislen = len;
948 
949 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
950 		if (ret)
951 			break;
952 
953 		*retlen += thislen;
954 		len -= thislen;
955 		buf += thislen;
956 
957 		ofs = 0;
958 		chipnum++;
959 	}
960 	return ret;
961 }
962 
963 
964 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
965 {
966 	DECLARE_WAITQUEUE(wait, current);
967 	unsigned long timeo = jiffies + HZ;
968 	struct cfi_private *cfi = map->fldrv_priv;
969 
970  retry:
971 	spin_lock(chip->mutex);
972 
973 	if (chip->state != FL_READY){
974 #if 0
975 		printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
976 #endif
977 		set_current_state(TASK_UNINTERRUPTIBLE);
978 		add_wait_queue(&chip->wq, &wait);
979 
980 		spin_unlock(chip->mutex);
981 
982 		schedule();
983 		remove_wait_queue(&chip->wq, &wait);
984 #if 0
985 		if(signal_pending(current))
986 			return -EINTR;
987 #endif
988 		timeo = jiffies + HZ;
989 
990 		goto retry;
991 	}
992 
993 	adr += chip->start;
994 
995 	chip->state = FL_READY;
996 
997 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
998 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
999 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1000 
1001 	map_copy_from(map, buf, adr, len);
1002 
1003 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1004 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1005 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1006 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1007 
1008 	wake_up(&chip->wq);
1009 	spin_unlock(chip->mutex);
1010 
1011 	return 0;
1012 }
1013 
1014 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1015 {
1016 	struct map_info *map = mtd->priv;
1017 	struct cfi_private *cfi = map->fldrv_priv;
1018 	unsigned long ofs;
1019 	int chipnum;
1020 	int ret = 0;
1021 
1022 
1023 	/* ofs: offset within the first chip that the first read should start */
1024 
1025 	/* 8 secsi bytes per chip */
1026 	chipnum=from>>3;
1027 	ofs=from & 7;
1028 
1029 
1030 	*retlen = 0;
1031 
1032 	while (len) {
1033 		unsigned long thislen;
1034 
1035 		if (chipnum >= cfi->numchips)
1036 			break;
1037 
1038 		if ((len + ofs -1) >> 3)
1039 			thislen = (1<<3) - ofs;
1040 		else
1041 			thislen = len;
1042 
1043 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1044 		if (ret)
1045 			break;
1046 
1047 		*retlen += thislen;
1048 		len -= thislen;
1049 		buf += thislen;
1050 
1051 		ofs = 0;
1052 		chipnum++;
1053 	}
1054 	return ret;
1055 }
1056 
1057 
1058 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1059 {
1060 	struct cfi_private *cfi = map->fldrv_priv;
1061 	unsigned long timeo = jiffies + HZ;
1062 	/*
1063 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1064 	 * have a max write time of a few hundreds usec). However, we should
1065 	 * use the maximum timeout value given by the chip at probe time
1066 	 * instead.  Unfortunately, struct flchip does have a field for
1067 	 * maximum timeout, only for typical which can be far too short
1068 	 * depending of the conditions.	 The ' + 1' is to avoid having a
1069 	 * timeout of 0 jiffies if HZ is smaller than 1000.
1070 	 */
1071 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1072 	int ret = 0;
1073 	map_word oldd;
1074 	int retry_cnt = 0;
1075 
1076 	adr += chip->start;
1077 
1078 	spin_lock(chip->mutex);
1079 	ret = get_chip(map, chip, adr, FL_WRITING);
1080 	if (ret) {
1081 		spin_unlock(chip->mutex);
1082 		return ret;
1083 	}
1084 
1085 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1086 	       __func__, adr, datum.x[0] );
1087 
1088 	/*
1089 	 * Check for a NOP for the case when the datum to write is already
1090 	 * present - it saves time and works around buggy chips that corrupt
1091 	 * data at other locations when 0xff is written to a location that
1092 	 * already contains 0xff.
1093 	 */
1094 	oldd = map_read(map, adr);
1095 	if (map_word_equal(map, oldd, datum)) {
1096 		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1097 		       __func__);
1098 		goto op_done;
1099 	}
1100 
1101 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1102 	ENABLE_VPP(map);
1103 	xip_disable(map, chip, adr);
1104  retry:
1105 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1106 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1107 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1108 	map_write(map, datum, adr);
1109 	chip->state = FL_WRITING;
1110 
1111 	INVALIDATE_CACHE_UDELAY(map, chip,
1112 				adr, map_bankwidth(map),
1113 				chip->word_write_time);
1114 
1115 	/* See comment above for timeout value. */
1116 	timeo = jiffies + uWriteTimeout;
1117 	for (;;) {
1118 		if (chip->state != FL_WRITING) {
1119 			/* Someone's suspended the write. Sleep */
1120 			DECLARE_WAITQUEUE(wait, current);
1121 
1122 			set_current_state(TASK_UNINTERRUPTIBLE);
1123 			add_wait_queue(&chip->wq, &wait);
1124 			spin_unlock(chip->mutex);
1125 			schedule();
1126 			remove_wait_queue(&chip->wq, &wait);
1127 			timeo = jiffies + (HZ / 2); /* FIXME */
1128 			spin_lock(chip->mutex);
1129 			continue;
1130 		}
1131 
1132 		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1133 			xip_enable(map, chip, adr);
1134 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1135 			xip_disable(map, chip, adr);
1136 			break;
1137 		}
1138 
1139 		if (chip_ready(map, adr))
1140 			break;
1141 
1142 		/* Latency issues. Drop the lock, wait a while and retry */
1143 		UDELAY(map, chip, adr, 1);
1144 	}
1145 	/* Did we succeed? */
1146 	if (!chip_good(map, adr, datum)) {
1147 		/* reset on all failures. */
1148 		map_write( map, CMD(0xF0), chip->start );
1149 		/* FIXME - should have reset delay before continuing */
1150 
1151 		if (++retry_cnt <= MAX_WORD_RETRIES)
1152 			goto retry;
1153 
1154 		ret = -EIO;
1155 	}
1156 	xip_enable(map, chip, adr);
1157  op_done:
1158 	chip->state = FL_READY;
1159 	put_chip(map, chip, adr);
1160 	spin_unlock(chip->mutex);
1161 
1162 	return ret;
1163 }
1164 
1165 
1166 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1167 				  size_t *retlen, const u_char *buf)
1168 {
1169 	struct map_info *map = mtd->priv;
1170 	struct cfi_private *cfi = map->fldrv_priv;
1171 	int ret = 0;
1172 	int chipnum;
1173 	unsigned long ofs, chipstart;
1174 	DECLARE_WAITQUEUE(wait, current);
1175 
1176 	*retlen = 0;
1177 	if (!len)
1178 		return 0;
1179 
1180 	chipnum = to >> cfi->chipshift;
1181 	ofs = to  - (chipnum << cfi->chipshift);
1182 	chipstart = cfi->chips[chipnum].start;
1183 
1184 	/* If it's not bus-aligned, do the first byte write */
1185 	if (ofs & (map_bankwidth(map)-1)) {
1186 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1187 		int i = ofs - bus_ofs;
1188 		int n = 0;
1189 		map_word tmp_buf;
1190 
1191  retry:
1192 		spin_lock(cfi->chips[chipnum].mutex);
1193 
1194 		if (cfi->chips[chipnum].state != FL_READY) {
1195 #if 0
1196 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1197 #endif
1198 			set_current_state(TASK_UNINTERRUPTIBLE);
1199 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1200 
1201 			spin_unlock(cfi->chips[chipnum].mutex);
1202 
1203 			schedule();
1204 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1205 #if 0
1206 			if(signal_pending(current))
1207 				return -EINTR;
1208 #endif
1209 			goto retry;
1210 		}
1211 
1212 		/* Load 'tmp_buf' with old contents of flash */
1213 		tmp_buf = map_read(map, bus_ofs+chipstart);
1214 
1215 		spin_unlock(cfi->chips[chipnum].mutex);
1216 
1217 		/* Number of bytes to copy from buffer */
1218 		n = min_t(int, len, map_bankwidth(map)-i);
1219 
1220 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1221 
1222 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1223 				       bus_ofs, tmp_buf);
1224 		if (ret)
1225 			return ret;
1226 
1227 		ofs += n;
1228 		buf += n;
1229 		(*retlen) += n;
1230 		len -= n;
1231 
1232 		if (ofs >> cfi->chipshift) {
1233 			chipnum ++;
1234 			ofs = 0;
1235 			if (chipnum == cfi->numchips)
1236 				return 0;
1237 		}
1238 	}
1239 
1240 	/* We are now aligned, write as much as possible */
1241 	while(len >= map_bankwidth(map)) {
1242 		map_word datum;
1243 
1244 		datum = map_word_load(map, buf);
1245 
1246 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1247 				       ofs, datum);
1248 		if (ret)
1249 			return ret;
1250 
1251 		ofs += map_bankwidth(map);
1252 		buf += map_bankwidth(map);
1253 		(*retlen) += map_bankwidth(map);
1254 		len -= map_bankwidth(map);
1255 
1256 		if (ofs >> cfi->chipshift) {
1257 			chipnum ++;
1258 			ofs = 0;
1259 			if (chipnum == cfi->numchips)
1260 				return 0;
1261 			chipstart = cfi->chips[chipnum].start;
1262 		}
1263 	}
1264 
1265 	/* Write the trailing bytes if any */
1266 	if (len & (map_bankwidth(map)-1)) {
1267 		map_word tmp_buf;
1268 
1269  retry1:
1270 		spin_lock(cfi->chips[chipnum].mutex);
1271 
1272 		if (cfi->chips[chipnum].state != FL_READY) {
1273 #if 0
1274 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1275 #endif
1276 			set_current_state(TASK_UNINTERRUPTIBLE);
1277 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1278 
1279 			spin_unlock(cfi->chips[chipnum].mutex);
1280 
1281 			schedule();
1282 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1283 #if 0
1284 			if(signal_pending(current))
1285 				return -EINTR;
1286 #endif
1287 			goto retry1;
1288 		}
1289 
1290 		tmp_buf = map_read(map, ofs + chipstart);
1291 
1292 		spin_unlock(cfi->chips[chipnum].mutex);
1293 
1294 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1295 
1296 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1297 				ofs, tmp_buf);
1298 		if (ret)
1299 			return ret;
1300 
1301 		(*retlen) += len;
1302 	}
1303 
1304 	return 0;
1305 }
1306 
1307 
1308 /*
1309  * FIXME: interleaved mode not tested, and probably not supported!
1310  */
1311 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1312 				    unsigned long adr, const u_char *buf,
1313 				    int len)
1314 {
1315 	struct cfi_private *cfi = map->fldrv_priv;
1316 	unsigned long timeo = jiffies + HZ;
1317 	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1318 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1319 	int ret = -EIO;
1320 	unsigned long cmd_adr;
1321 	int z, words;
1322 	map_word datum;
1323 
1324 	adr += chip->start;
1325 	cmd_adr = adr;
1326 
1327 	spin_lock(chip->mutex);
1328 	ret = get_chip(map, chip, adr, FL_WRITING);
1329 	if (ret) {
1330 		spin_unlock(chip->mutex);
1331 		return ret;
1332 	}
1333 
1334 	datum = map_word_load(map, buf);
1335 
1336 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1337 	       __func__, adr, datum.x[0] );
1338 
1339 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1340 	ENABLE_VPP(map);
1341 	xip_disable(map, chip, cmd_adr);
1342 
1343 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1344 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1345 	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1346 
1347 	/* Write Buffer Load */
1348 	map_write(map, CMD(0x25), cmd_adr);
1349 
1350 	chip->state = FL_WRITING_TO_BUFFER;
1351 
1352 	/* Write length of data to come */
1353 	words = len / map_bankwidth(map);
1354 	map_write(map, CMD(words - 1), cmd_adr);
1355 	/* Write data */
1356 	z = 0;
1357 	while(z < words * map_bankwidth(map)) {
1358 		datum = map_word_load(map, buf);
1359 		map_write(map, datum, adr + z);
1360 
1361 		z += map_bankwidth(map);
1362 		buf += map_bankwidth(map);
1363 	}
1364 	z -= map_bankwidth(map);
1365 
1366 	adr += z;
1367 
1368 	/* Write Buffer Program Confirm: GO GO GO */
1369 	map_write(map, CMD(0x29), cmd_adr);
1370 	chip->state = FL_WRITING;
1371 
1372 	INVALIDATE_CACHE_UDELAY(map, chip,
1373 				adr, map_bankwidth(map),
1374 				chip->word_write_time);
1375 
1376 	timeo = jiffies + uWriteTimeout;
1377 
1378 	for (;;) {
1379 		if (chip->state != FL_WRITING) {
1380 			/* Someone's suspended the write. Sleep */
1381 			DECLARE_WAITQUEUE(wait, current);
1382 
1383 			set_current_state(TASK_UNINTERRUPTIBLE);
1384 			add_wait_queue(&chip->wq, &wait);
1385 			spin_unlock(chip->mutex);
1386 			schedule();
1387 			remove_wait_queue(&chip->wq, &wait);
1388 			timeo = jiffies + (HZ / 2); /* FIXME */
1389 			spin_lock(chip->mutex);
1390 			continue;
1391 		}
1392 
1393 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1394 			break;
1395 
1396 		if (chip_ready(map, adr)) {
1397 			xip_enable(map, chip, adr);
1398 			goto op_done;
1399 		}
1400 
1401 		/* Latency issues. Drop the lock, wait a while and retry */
1402 		UDELAY(map, chip, adr, 1);
1403 	}
1404 
1405 	/* reset on all failures. */
1406 	map_write( map, CMD(0xF0), chip->start );
1407 	xip_enable(map, chip, adr);
1408 	/* FIXME - should have reset delay before continuing */
1409 
1410 	printk(KERN_WARNING "MTD %s(): software timeout\n",
1411 	       __func__ );
1412 
1413 	ret = -EIO;
1414  op_done:
1415 	chip->state = FL_READY;
1416 	put_chip(map, chip, adr);
1417 	spin_unlock(chip->mutex);
1418 
1419 	return ret;
1420 }
1421 
1422 
1423 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1424 				    size_t *retlen, const u_char *buf)
1425 {
1426 	struct map_info *map = mtd->priv;
1427 	struct cfi_private *cfi = map->fldrv_priv;
1428 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1429 	int ret = 0;
1430 	int chipnum;
1431 	unsigned long ofs;
1432 
1433 	*retlen = 0;
1434 	if (!len)
1435 		return 0;
1436 
1437 	chipnum = to >> cfi->chipshift;
1438 	ofs = to  - (chipnum << cfi->chipshift);
1439 
1440 	/* If it's not bus-aligned, do the first word write */
1441 	if (ofs & (map_bankwidth(map)-1)) {
1442 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1443 		if (local_len > len)
1444 			local_len = len;
1445 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1446 					     local_len, retlen, buf);
1447 		if (ret)
1448 			return ret;
1449 		ofs += local_len;
1450 		buf += local_len;
1451 		len -= local_len;
1452 
1453 		if (ofs >> cfi->chipshift) {
1454 			chipnum ++;
1455 			ofs = 0;
1456 			if (chipnum == cfi->numchips)
1457 				return 0;
1458 		}
1459 	}
1460 
1461 	/* Write buffer is worth it only if more than one word to write... */
1462 	while (len >= map_bankwidth(map) * 2) {
1463 		/* We must not cross write block boundaries */
1464 		int size = wbufsize - (ofs & (wbufsize-1));
1465 
1466 		if (size > len)
1467 			size = len;
1468 		if (size % map_bankwidth(map))
1469 			size -= size % map_bankwidth(map);
1470 
1471 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1472 				      ofs, buf, size);
1473 		if (ret)
1474 			return ret;
1475 
1476 		ofs += size;
1477 		buf += size;
1478 		(*retlen) += size;
1479 		len -= size;
1480 
1481 		if (ofs >> cfi->chipshift) {
1482 			chipnum ++;
1483 			ofs = 0;
1484 			if (chipnum == cfi->numchips)
1485 				return 0;
1486 		}
1487 	}
1488 
1489 	if (len) {
1490 		size_t retlen_dregs = 0;
1491 
1492 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1493 					     len, &retlen_dregs, buf);
1494 
1495 		*retlen += retlen_dregs;
1496 		return ret;
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 
1503 /*
1504  * Handle devices with one erase region, that only implement
1505  * the chip erase command.
1506  */
1507 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1508 {
1509 	struct cfi_private *cfi = map->fldrv_priv;
1510 	unsigned long timeo = jiffies + HZ;
1511 	unsigned long int adr;
1512 	DECLARE_WAITQUEUE(wait, current);
1513 	int ret = 0;
1514 
1515 	adr = cfi->addr_unlock1;
1516 
1517 	spin_lock(chip->mutex);
1518 	ret = get_chip(map, chip, adr, FL_WRITING);
1519 	if (ret) {
1520 		spin_unlock(chip->mutex);
1521 		return ret;
1522 	}
1523 
1524 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1525 	       __func__, chip->start );
1526 
1527 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1528 	ENABLE_VPP(map);
1529 	xip_disable(map, chip, adr);
1530 
1531 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1532 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1533 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1534 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1535 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1536 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1537 
1538 	chip->state = FL_ERASING;
1539 	chip->erase_suspended = 0;
1540 	chip->in_progress_block_addr = adr;
1541 
1542 	INVALIDATE_CACHE_UDELAY(map, chip,
1543 				adr, map->size,
1544 				chip->erase_time*500);
1545 
1546 	timeo = jiffies + (HZ*20);
1547 
1548 	for (;;) {
1549 		if (chip->state != FL_ERASING) {
1550 			/* Someone's suspended the erase. Sleep */
1551 			set_current_state(TASK_UNINTERRUPTIBLE);
1552 			add_wait_queue(&chip->wq, &wait);
1553 			spin_unlock(chip->mutex);
1554 			schedule();
1555 			remove_wait_queue(&chip->wq, &wait);
1556 			spin_lock(chip->mutex);
1557 			continue;
1558 		}
1559 		if (chip->erase_suspended) {
1560 			/* This erase was suspended and resumed.
1561 			   Adjust the timeout */
1562 			timeo = jiffies + (HZ*20); /* FIXME */
1563 			chip->erase_suspended = 0;
1564 		}
1565 
1566 		if (chip_ready(map, adr))
1567 			break;
1568 
1569 		if (time_after(jiffies, timeo)) {
1570 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1571 				__func__ );
1572 			break;
1573 		}
1574 
1575 		/* Latency issues. Drop the lock, wait a while and retry */
1576 		UDELAY(map, chip, adr, 1000000/HZ);
1577 	}
1578 	/* Did we succeed? */
1579 	if (!chip_good(map, adr, map_word_ff(map))) {
1580 		/* reset on all failures. */
1581 		map_write( map, CMD(0xF0), chip->start );
1582 		/* FIXME - should have reset delay before continuing */
1583 
1584 		ret = -EIO;
1585 	}
1586 
1587 	chip->state = FL_READY;
1588 	xip_enable(map, chip, adr);
1589 	put_chip(map, chip, adr);
1590 	spin_unlock(chip->mutex);
1591 
1592 	return ret;
1593 }
1594 
1595 
1596 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1597 {
1598 	struct cfi_private *cfi = map->fldrv_priv;
1599 	unsigned long timeo = jiffies + HZ;
1600 	DECLARE_WAITQUEUE(wait, current);
1601 	int ret = 0;
1602 
1603 	adr += chip->start;
1604 
1605 	spin_lock(chip->mutex);
1606 	ret = get_chip(map, chip, adr, FL_ERASING);
1607 	if (ret) {
1608 		spin_unlock(chip->mutex);
1609 		return ret;
1610 	}
1611 
1612 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1613 	       __func__, adr );
1614 
1615 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1616 	ENABLE_VPP(map);
1617 	xip_disable(map, chip, adr);
1618 
1619 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1620 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1621 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1622 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1623 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1624 	map_write(map, CMD(0x30), adr);
1625 
1626 	chip->state = FL_ERASING;
1627 	chip->erase_suspended = 0;
1628 	chip->in_progress_block_addr = adr;
1629 
1630 	INVALIDATE_CACHE_UDELAY(map, chip,
1631 				adr, len,
1632 				chip->erase_time*500);
1633 
1634 	timeo = jiffies + (HZ*20);
1635 
1636 	for (;;) {
1637 		if (chip->state != FL_ERASING) {
1638 			/* Someone's suspended the erase. Sleep */
1639 			set_current_state(TASK_UNINTERRUPTIBLE);
1640 			add_wait_queue(&chip->wq, &wait);
1641 			spin_unlock(chip->mutex);
1642 			schedule();
1643 			remove_wait_queue(&chip->wq, &wait);
1644 			spin_lock(chip->mutex);
1645 			continue;
1646 		}
1647 		if (chip->erase_suspended) {
1648 			/* This erase was suspended and resumed.
1649 			   Adjust the timeout */
1650 			timeo = jiffies + (HZ*20); /* FIXME */
1651 			chip->erase_suspended = 0;
1652 		}
1653 
1654 		if (chip_ready(map, adr)) {
1655 			xip_enable(map, chip, adr);
1656 			break;
1657 		}
1658 
1659 		if (time_after(jiffies, timeo)) {
1660 			xip_enable(map, chip, adr);
1661 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1662 				__func__ );
1663 			break;
1664 		}
1665 
1666 		/* Latency issues. Drop the lock, wait a while and retry */
1667 		UDELAY(map, chip, adr, 1000000/HZ);
1668 	}
1669 	/* Did we succeed? */
1670 	if (!chip_good(map, adr, map_word_ff(map))) {
1671 		/* reset on all failures. */
1672 		map_write( map, CMD(0xF0), chip->start );
1673 		/* FIXME - should have reset delay before continuing */
1674 
1675 		ret = -EIO;
1676 	}
1677 
1678 	chip->state = FL_READY;
1679 	put_chip(map, chip, adr);
1680 	spin_unlock(chip->mutex);
1681 	return ret;
1682 }
1683 
1684 
1685 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1686 {
1687 	unsigned long ofs, len;
1688 	int ret;
1689 
1690 	ofs = instr->addr;
1691 	len = instr->len;
1692 
1693 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1694 	if (ret)
1695 		return ret;
1696 
1697 	instr->state = MTD_ERASE_DONE;
1698 	mtd_erase_callback(instr);
1699 
1700 	return 0;
1701 }
1702 
1703 
1704 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1705 {
1706 	struct map_info *map = mtd->priv;
1707 	struct cfi_private *cfi = map->fldrv_priv;
1708 	int ret = 0;
1709 
1710 	if (instr->addr != 0)
1711 		return -EINVAL;
1712 
1713 	if (instr->len != mtd->size)
1714 		return -EINVAL;
1715 
1716 	ret = do_erase_chip(map, &cfi->chips[0]);
1717 	if (ret)
1718 		return ret;
1719 
1720 	instr->state = MTD_ERASE_DONE;
1721 	mtd_erase_callback(instr);
1722 
1723 	return 0;
1724 }
1725 
1726 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1727 			 unsigned long adr, int len, void *thunk)
1728 {
1729 	struct cfi_private *cfi = map->fldrv_priv;
1730 	int ret;
1731 
1732 	spin_lock(chip->mutex);
1733 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1734 	if (ret)
1735 		goto out_unlock;
1736 	chip->state = FL_LOCKING;
1737 
1738 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1739 	      __func__, adr, len);
1740 
1741 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1742 			 cfi->device_type, NULL);
1743 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1744 			 cfi->device_type, NULL);
1745 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1746 			 cfi->device_type, NULL);
1747 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1748 			 cfi->device_type, NULL);
1749 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1750 			 cfi->device_type, NULL);
1751 	map_write(map, CMD(0x40), chip->start + adr);
1752 
1753 	chip->state = FL_READY;
1754 	put_chip(map, chip, adr + chip->start);
1755 	ret = 0;
1756 
1757 out_unlock:
1758 	spin_unlock(chip->mutex);
1759 	return ret;
1760 }
1761 
1762 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1763 			   unsigned long adr, int len, void *thunk)
1764 {
1765 	struct cfi_private *cfi = map->fldrv_priv;
1766 	int ret;
1767 
1768 	spin_lock(chip->mutex);
1769 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1770 	if (ret)
1771 		goto out_unlock;
1772 	chip->state = FL_UNLOCKING;
1773 
1774 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1775 	      __func__, adr, len);
1776 
1777 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1778 			 cfi->device_type, NULL);
1779 	map_write(map, CMD(0x70), adr);
1780 
1781 	chip->state = FL_READY;
1782 	put_chip(map, chip, adr + chip->start);
1783 	ret = 0;
1784 
1785 out_unlock:
1786 	spin_unlock(chip->mutex);
1787 	return ret;
1788 }
1789 
1790 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1791 {
1792 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1793 }
1794 
1795 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1796 {
1797 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1798 }
1799 
1800 
1801 static void cfi_amdstd_sync (struct mtd_info *mtd)
1802 {
1803 	struct map_info *map = mtd->priv;
1804 	struct cfi_private *cfi = map->fldrv_priv;
1805 	int i;
1806 	struct flchip *chip;
1807 	int ret = 0;
1808 	DECLARE_WAITQUEUE(wait, current);
1809 
1810 	for (i=0; !ret && i<cfi->numchips; i++) {
1811 		chip = &cfi->chips[i];
1812 
1813 	retry:
1814 		spin_lock(chip->mutex);
1815 
1816 		switch(chip->state) {
1817 		case FL_READY:
1818 		case FL_STATUS:
1819 		case FL_CFI_QUERY:
1820 		case FL_JEDEC_QUERY:
1821 			chip->oldstate = chip->state;
1822 			chip->state = FL_SYNCING;
1823 			/* No need to wake_up() on this state change -
1824 			 * as the whole point is that nobody can do anything
1825 			 * with the chip now anyway.
1826 			 */
1827 		case FL_SYNCING:
1828 			spin_unlock(chip->mutex);
1829 			break;
1830 
1831 		default:
1832 			/* Not an idle state */
1833 			set_current_state(TASK_UNINTERRUPTIBLE);
1834 			add_wait_queue(&chip->wq, &wait);
1835 
1836 			spin_unlock(chip->mutex);
1837 
1838 			schedule();
1839 
1840 			remove_wait_queue(&chip->wq, &wait);
1841 
1842 			goto retry;
1843 		}
1844 	}
1845 
1846 	/* Unlock the chips again */
1847 
1848 	for (i--; i >=0; i--) {
1849 		chip = &cfi->chips[i];
1850 
1851 		spin_lock(chip->mutex);
1852 
1853 		if (chip->state == FL_SYNCING) {
1854 			chip->state = chip->oldstate;
1855 			wake_up(&chip->wq);
1856 		}
1857 		spin_unlock(chip->mutex);
1858 	}
1859 }
1860 
1861 
1862 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1863 {
1864 	struct map_info *map = mtd->priv;
1865 	struct cfi_private *cfi = map->fldrv_priv;
1866 	int i;
1867 	struct flchip *chip;
1868 	int ret = 0;
1869 
1870 	for (i=0; !ret && i<cfi->numchips; i++) {
1871 		chip = &cfi->chips[i];
1872 
1873 		spin_lock(chip->mutex);
1874 
1875 		switch(chip->state) {
1876 		case FL_READY:
1877 		case FL_STATUS:
1878 		case FL_CFI_QUERY:
1879 		case FL_JEDEC_QUERY:
1880 			chip->oldstate = chip->state;
1881 			chip->state = FL_PM_SUSPENDED;
1882 			/* No need to wake_up() on this state change -
1883 			 * as the whole point is that nobody can do anything
1884 			 * with the chip now anyway.
1885 			 */
1886 		case FL_PM_SUSPENDED:
1887 			break;
1888 
1889 		default:
1890 			ret = -EAGAIN;
1891 			break;
1892 		}
1893 		spin_unlock(chip->mutex);
1894 	}
1895 
1896 	/* Unlock the chips again */
1897 
1898 	if (ret) {
1899 		for (i--; i >=0; i--) {
1900 			chip = &cfi->chips[i];
1901 
1902 			spin_lock(chip->mutex);
1903 
1904 			if (chip->state == FL_PM_SUSPENDED) {
1905 				chip->state = chip->oldstate;
1906 				wake_up(&chip->wq);
1907 			}
1908 			spin_unlock(chip->mutex);
1909 		}
1910 	}
1911 
1912 	return ret;
1913 }
1914 
1915 
1916 static void cfi_amdstd_resume(struct mtd_info *mtd)
1917 {
1918 	struct map_info *map = mtd->priv;
1919 	struct cfi_private *cfi = map->fldrv_priv;
1920 	int i;
1921 	struct flchip *chip;
1922 
1923 	for (i=0; i<cfi->numchips; i++) {
1924 
1925 		chip = &cfi->chips[i];
1926 
1927 		spin_lock(chip->mutex);
1928 
1929 		if (chip->state == FL_PM_SUSPENDED) {
1930 			chip->state = FL_READY;
1931 			map_write(map, CMD(0xF0), chip->start);
1932 			wake_up(&chip->wq);
1933 		}
1934 		else
1935 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1936 
1937 		spin_unlock(chip->mutex);
1938 	}
1939 }
1940 
1941 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1942 {
1943 	struct map_info *map = mtd->priv;
1944 	struct cfi_private *cfi = map->fldrv_priv;
1945 
1946 	kfree(cfi->cmdset_priv);
1947 	kfree(cfi->cfiq);
1948 	kfree(cfi);
1949 	kfree(mtd->eraseregions);
1950 }
1951 
1952 MODULE_LICENSE("GPL");
1953 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1954 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1955