xref: /linux/drivers/mtd/chips/cfi_cmdset_0002.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17  *
18  * This code is GPL
19  *
20  * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21  *
22  */
23 
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <asm/io.h>
31 #include <asm/byteorder.h>
32 
33 #include <linux/errno.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/interrupt.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/map.h>
39 #include <linux/mtd/mtd.h>
40 #include <linux/mtd/cfi.h>
41 #include <linux/mtd/xip.h>
42 
43 #define AMD_BOOTLOC_BUG
44 #define FORCE_WORD_WRITE 0
45 
46 #define MAX_WORD_RETRIES 3
47 
48 #define MANUFACTURER_AMD	0x0001
49 #define MANUFACTURER_SST	0x00BF
50 #define SST49LF004B	        0x0060
51 #define SST49LF008A		0x005a
52 
53 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
57 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
58 static void cfi_amdstd_sync (struct mtd_info *);
59 static int cfi_amdstd_suspend (struct mtd_info *);
60 static void cfi_amdstd_resume (struct mtd_info *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 
63 static void cfi_amdstd_destroy(struct mtd_info *);
64 
65 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
66 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67 
68 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
69 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
70 #include "fwh_lock.h"
71 
72 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
73 	.probe		= NULL, /* Not usable directly */
74 	.destroy	= cfi_amdstd_destroy,
75 	.name		= "cfi_cmdset_0002",
76 	.module		= THIS_MODULE
77 };
78 
79 
80 /* #define DEBUG_CFI_FEATURES */
81 
82 
83 #ifdef DEBUG_CFI_FEATURES
84 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
85 {
86 	const char* erase_suspend[3] = {
87 		"Not supported", "Read only", "Read/write"
88 	};
89 	const char* top_bottom[6] = {
90 		"No WP", "8x8KiB sectors at top & bottom, no WP",
91 		"Bottom boot", "Top boot",
92 		"Uniform, Bottom WP", "Uniform, Top WP"
93 	};
94 
95 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
96 	printk("  Address sensitive unlock: %s\n",
97 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
98 
99 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
100 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
101 	else
102 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
103 
104 	if (extp->BlkProt == 0)
105 		printk("  Block protection: Not supported\n");
106 	else
107 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
108 
109 
110 	printk("  Temporary block unprotect: %s\n",
111 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
112 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
113 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
114 	printk("  Burst mode: %s\n",
115 	       extp->BurstMode ? "Supported" : "Not supported");
116 	if (extp->PageMode == 0)
117 		printk("  Page mode: Not supported\n");
118 	else
119 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
120 
121 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
122 	       extp->VppMin >> 4, extp->VppMin & 0xf);
123 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
124 	       extp->VppMax >> 4, extp->VppMax & 0xf);
125 
126 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
127 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
128 	else
129 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
130 }
131 #endif
132 
133 #ifdef AMD_BOOTLOC_BUG
134 /* Wheee. Bring me the head of someone at AMD. */
135 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
136 {
137 	struct map_info *map = mtd->priv;
138 	struct cfi_private *cfi = map->fldrv_priv;
139 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
140 	__u8 major = extp->MajorVersion;
141 	__u8 minor = extp->MinorVersion;
142 
143 	if (((major << 8) | minor) < 0x3131) {
144 		/* CFI version 1.0 => don't trust bootloc */
145 		if (cfi->id & 0x80) {
146 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
147 			extp->TopBottom = 3;	/* top boot */
148 		} else {
149 			extp->TopBottom = 2;	/* bottom boot */
150 		}
151 	}
152 }
153 #endif
154 
155 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
156 {
157 	struct map_info *map = mtd->priv;
158 	struct cfi_private *cfi = map->fldrv_priv;
159 	if (cfi->cfiq->BufWriteTimeoutTyp) {
160 		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
161 		mtd->write = cfi_amdstd_write_buffers;
162 	}
163 }
164 
165 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
166 {
167 	/* Setup for chips with a secsi area */
168 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
169 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
170 }
171 
172 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
173 {
174 	struct map_info *map = mtd->priv;
175 	struct cfi_private *cfi = map->fldrv_priv;
176 	if ((cfi->cfiq->NumEraseRegions == 1) &&
177 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
178 		mtd->erase = cfi_amdstd_erase_chip;
179 	}
180 
181 }
182 
183 static struct cfi_fixup cfi_fixup_table[] = {
184 #ifdef AMD_BOOTLOC_BUG
185 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
186 #endif
187 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
188 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
189 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
190 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
191 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
192 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
193 #if !FORCE_WORD_WRITE
194 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
195 #endif
196 	{ 0, 0, NULL, NULL }
197 };
198 static struct cfi_fixup jedec_fixup_table[] = {
199 	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
200 	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
201 	{ 0, 0, NULL, NULL }
202 };
203 
204 static struct cfi_fixup fixup_table[] = {
205 	/* The CFI vendor ids and the JEDEC vendor IDs appear
206 	 * to be common.  It is like the devices id's are as
207 	 * well.  This table is to pick all cases where
208 	 * we know that is the case.
209 	 */
210 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
211 	{ 0, 0, NULL, NULL }
212 };
213 
214 
215 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
216 {
217 	struct cfi_private *cfi = map->fldrv_priv;
218 	struct mtd_info *mtd;
219 	int i;
220 
221 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
222 	if (!mtd) {
223 		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
224 		return NULL;
225 	}
226 	memset(mtd, 0, sizeof(*mtd));
227 	mtd->priv = map;
228 	mtd->type = MTD_NORFLASH;
229 
230 	/* Fill in the default mtd operations */
231 	mtd->erase   = cfi_amdstd_erase_varsize;
232 	mtd->write   = cfi_amdstd_write_words;
233 	mtd->read    = cfi_amdstd_read;
234 	mtd->sync    = cfi_amdstd_sync;
235 	mtd->suspend = cfi_amdstd_suspend;
236 	mtd->resume  = cfi_amdstd_resume;
237 	mtd->flags   = MTD_CAP_NORFLASH;
238 	mtd->name    = map->name;
239 	mtd->writesize = 1;
240 
241 	if (cfi->cfi_mode==CFI_MODE_CFI){
242 		unsigned char bootloc;
243 		/*
244 		 * It's a real CFI chip, not one for which the probe
245 		 * routine faked a CFI structure. So we read the feature
246 		 * table from it.
247 		 */
248 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
249 		struct cfi_pri_amdstd *extp;
250 
251 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
252 		if (!extp) {
253 			kfree(mtd);
254 			return NULL;
255 		}
256 
257 		if (extp->MajorVersion != '1' ||
258 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
259 			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
260 			       "version %c.%c.\n",  extp->MajorVersion,
261 			       extp->MinorVersion);
262 			kfree(extp);
263 			kfree(mtd);
264 			return NULL;
265 		}
266 
267 		/* Install our own private info structure */
268 		cfi->cmdset_priv = extp;
269 
270 		/* Apply cfi device specific fixups */
271 		cfi_fixup(mtd, cfi_fixup_table);
272 
273 #ifdef DEBUG_CFI_FEATURES
274 		/* Tell the user about it in lots of lovely detail */
275 		cfi_tell_features(extp);
276 #endif
277 
278 		bootloc = extp->TopBottom;
279 		if ((bootloc != 2) && (bootloc != 3)) {
280 			printk(KERN_WARNING "%s: CFI does not contain boot "
281 			       "bank location. Assuming top.\n", map->name);
282 			bootloc = 2;
283 		}
284 
285 		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
286 			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
287 
288 			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
289 				int j = (cfi->cfiq->NumEraseRegions-1)-i;
290 				__u32 swap;
291 
292 				swap = cfi->cfiq->EraseRegionInfo[i];
293 				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
294 				cfi->cfiq->EraseRegionInfo[j] = swap;
295 			}
296 		}
297 		/* Set the default CFI lock/unlock addresses */
298 		cfi->addr_unlock1 = 0x555;
299 		cfi->addr_unlock2 = 0x2aa;
300 		/* Modify the unlock address if we are in compatibility mode */
301 		if (	/* x16 in x8 mode */
302 			((cfi->device_type == CFI_DEVICETYPE_X8) &&
303 				(cfi->cfiq->InterfaceDesc == 2)) ||
304 			/* x32 in x16 mode */
305 			((cfi->device_type == CFI_DEVICETYPE_X16) &&
306 				(cfi->cfiq->InterfaceDesc == 4)))
307 		{
308 			cfi->addr_unlock1 = 0xaaa;
309 			cfi->addr_unlock2 = 0x555;
310 		}
311 
312 	} /* CFI mode */
313 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
314 		/* Apply jedec specific fixups */
315 		cfi_fixup(mtd, jedec_fixup_table);
316 	}
317 	/* Apply generic fixups */
318 	cfi_fixup(mtd, fixup_table);
319 
320 	for (i=0; i< cfi->numchips; i++) {
321 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
322 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
323 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
324 	}
325 
326 	map->fldrv = &cfi_amdstd_chipdrv;
327 
328 	return cfi_amdstd_setup(mtd);
329 }
330 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
331 
332 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
333 {
334 	struct map_info *map = mtd->priv;
335 	struct cfi_private *cfi = map->fldrv_priv;
336 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
337 	unsigned long offset = 0;
338 	int i,j;
339 
340 	printk(KERN_NOTICE "number of %s chips: %d\n",
341 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
342 	/* Select the correct geometry setup */
343 	mtd->size = devsize * cfi->numchips;
344 
345 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
346 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
347 				    * mtd->numeraseregions, GFP_KERNEL);
348 	if (!mtd->eraseregions) {
349 		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
350 		goto setup_err;
351 	}
352 
353 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
354 		unsigned long ernum, ersize;
355 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
356 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
357 
358 		if (mtd->erasesize < ersize) {
359 			mtd->erasesize = ersize;
360 		}
361 		for (j=0; j<cfi->numchips; j++) {
362 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
363 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
364 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
365 		}
366 		offset += (ersize * ernum);
367 	}
368 	if (offset != devsize) {
369 		/* Argh */
370 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
371 		goto setup_err;
372 	}
373 #if 0
374 	// debug
375 	for (i=0; i<mtd->numeraseregions;i++){
376 		printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
377 		       i,mtd->eraseregions[i].offset,
378 		       mtd->eraseregions[i].erasesize,
379 		       mtd->eraseregions[i].numblocks);
380 	}
381 #endif
382 
383 	/* FIXME: erase-suspend-program is broken.  See
384 	   http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
385 	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
386 
387 	__module_get(THIS_MODULE);
388 	return mtd;
389 
390  setup_err:
391 	if(mtd) {
392 		kfree(mtd->eraseregions);
393 		kfree(mtd);
394 	}
395 	kfree(cfi->cmdset_priv);
396 	kfree(cfi->cfiq);
397 	return NULL;
398 }
399 
400 /*
401  * Return true if the chip is ready.
402  *
403  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
404  * non-suspended sector) and is indicated by no toggle bits toggling.
405  *
406  * Note that anything more complicated than checking if no bits are toggling
407  * (including checking DQ5 for an error status) is tricky to get working
408  * correctly and is therefore not done	(particulary with interleaved chips
409  * as each chip must be checked independantly of the others).
410  */
411 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
412 {
413 	map_word d, t;
414 
415 	d = map_read(map, addr);
416 	t = map_read(map, addr);
417 
418 	return map_word_equal(map, d, t);
419 }
420 
421 /*
422  * Return true if the chip is ready and has the correct value.
423  *
424  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
425  * non-suspended sector) and it is indicated by no bits toggling.
426  *
427  * Error are indicated by toggling bits or bits held with the wrong value,
428  * or with bits toggling.
429  *
430  * Note that anything more complicated than checking if no bits are toggling
431  * (including checking DQ5 for an error status) is tricky to get working
432  * correctly and is therefore not done	(particulary with interleaved chips
433  * as each chip must be checked independantly of the others).
434  *
435  */
436 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
437 {
438 	map_word oldd, curd;
439 
440 	oldd = map_read(map, addr);
441 	curd = map_read(map, addr);
442 
443 	return	map_word_equal(map, oldd, curd) &&
444 		map_word_equal(map, curd, expected);
445 }
446 
447 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
448 {
449 	DECLARE_WAITQUEUE(wait, current);
450 	struct cfi_private *cfi = map->fldrv_priv;
451 	unsigned long timeo;
452 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
453 
454  resettime:
455 	timeo = jiffies + HZ;
456  retry:
457 	switch (chip->state) {
458 
459 	case FL_STATUS:
460 		for (;;) {
461 			if (chip_ready(map, adr))
462 				break;
463 
464 			if (time_after(jiffies, timeo)) {
465 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
466 				spin_unlock(chip->mutex);
467 				return -EIO;
468 			}
469 			spin_unlock(chip->mutex);
470 			cfi_udelay(1);
471 			spin_lock(chip->mutex);
472 			/* Someone else might have been playing with it. */
473 			goto retry;
474 		}
475 
476 	case FL_READY:
477 	case FL_CFI_QUERY:
478 	case FL_JEDEC_QUERY:
479 		return 0;
480 
481 	case FL_ERASING:
482 		if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
483 			goto sleep;
484 
485 		if (!(mode == FL_READY || mode == FL_POINT
486 		      || !cfip
487 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
488 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
489 			goto sleep;
490 
491 		/* We could check to see if we're trying to access the sector
492 		 * that is currently being erased. However, no user will try
493 		 * anything like that so we just wait for the timeout. */
494 
495 		/* Erase suspend */
496 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
497 		 * commands when the erase algorithm isn't in progress. */
498 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
499 		chip->oldstate = FL_ERASING;
500 		chip->state = FL_ERASE_SUSPENDING;
501 		chip->erase_suspended = 1;
502 		for (;;) {
503 			if (chip_ready(map, adr))
504 				break;
505 
506 			if (time_after(jiffies, timeo)) {
507 				/* Should have suspended the erase by now.
508 				 * Send an Erase-Resume command as either
509 				 * there was an error (so leave the erase
510 				 * routine to recover from it) or we trying to
511 				 * use the erase-in-progress sector. */
512 				map_write(map, CMD(0x30), chip->in_progress_block_addr);
513 				chip->state = FL_ERASING;
514 				chip->oldstate = FL_READY;
515 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
516 				return -EIO;
517 			}
518 
519 			spin_unlock(chip->mutex);
520 			cfi_udelay(1);
521 			spin_lock(chip->mutex);
522 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
523 			   So we can just loop here. */
524 		}
525 		chip->state = FL_READY;
526 		return 0;
527 
528 	case FL_XIP_WHILE_ERASING:
529 		if (mode != FL_READY && mode != FL_POINT &&
530 		    (!cfip || !(cfip->EraseSuspend&2)))
531 			goto sleep;
532 		chip->oldstate = chip->state;
533 		chip->state = FL_READY;
534 		return 0;
535 
536 	case FL_POINT:
537 		/* Only if there's no operation suspended... */
538 		if (mode == FL_READY && chip->oldstate == FL_READY)
539 			return 0;
540 
541 	default:
542 	sleep:
543 		set_current_state(TASK_UNINTERRUPTIBLE);
544 		add_wait_queue(&chip->wq, &wait);
545 		spin_unlock(chip->mutex);
546 		schedule();
547 		remove_wait_queue(&chip->wq, &wait);
548 		spin_lock(chip->mutex);
549 		goto resettime;
550 	}
551 }
552 
553 
554 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
555 {
556 	struct cfi_private *cfi = map->fldrv_priv;
557 
558 	switch(chip->oldstate) {
559 	case FL_ERASING:
560 		chip->state = chip->oldstate;
561 		map_write(map, CMD(0x30), chip->in_progress_block_addr);
562 		chip->oldstate = FL_READY;
563 		chip->state = FL_ERASING;
564 		break;
565 
566 	case FL_XIP_WHILE_ERASING:
567 		chip->state = chip->oldstate;
568 		chip->oldstate = FL_READY;
569 		break;
570 
571 	case FL_READY:
572 	case FL_STATUS:
573 		/* We should really make set_vpp() count, rather than doing this */
574 		DISABLE_VPP(map);
575 		break;
576 	default:
577 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
578 	}
579 	wake_up(&chip->wq);
580 }
581 
582 #ifdef CONFIG_MTD_XIP
583 
584 /*
585  * No interrupt what so ever can be serviced while the flash isn't in array
586  * mode.  This is ensured by the xip_disable() and xip_enable() functions
587  * enclosing any code path where the flash is known not to be in array mode.
588  * And within a XIP disabled code path, only functions marked with __xipram
589  * may be called and nothing else (it's a good thing to inspect generated
590  * assembly to make sure inline functions were actually inlined and that gcc
591  * didn't emit calls to its own support functions). Also configuring MTD CFI
592  * support to a single buswidth and a single interleave is also recommended.
593  */
594 
595 static void xip_disable(struct map_info *map, struct flchip *chip,
596 			unsigned long adr)
597 {
598 	/* TODO: chips with no XIP use should ignore and return */
599 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
600 	local_irq_disable();
601 }
602 
603 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
604 				unsigned long adr)
605 {
606 	struct cfi_private *cfi = map->fldrv_priv;
607 
608 	if (chip->state != FL_POINT && chip->state != FL_READY) {
609 		map_write(map, CMD(0xf0), adr);
610 		chip->state = FL_READY;
611 	}
612 	(void) map_read(map, adr);
613 	xip_iprefetch();
614 	local_irq_enable();
615 }
616 
617 /*
618  * When a delay is required for the flash operation to complete, the
619  * xip_udelay() function is polling for both the given timeout and pending
620  * (but still masked) hardware interrupts.  Whenever there is an interrupt
621  * pending then the flash erase operation is suspended, array mode restored
622  * and interrupts unmasked.  Task scheduling might also happen at that
623  * point.  The CPU eventually returns from the interrupt or the call to
624  * schedule() and the suspended flash operation is resumed for the remaining
625  * of the delay period.
626  *
627  * Warning: this function _will_ fool interrupt latency tracing tools.
628  */
629 
630 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
631 				unsigned long adr, int usec)
632 {
633 	struct cfi_private *cfi = map->fldrv_priv;
634 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
635 	map_word status, OK = CMD(0x80);
636 	unsigned long suspended, start = xip_currtime();
637 	flstate_t oldstate;
638 
639 	do {
640 		cpu_relax();
641 		if (xip_irqpending() && extp &&
642 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
643 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
644 			/*
645 			 * Let's suspend the erase operation when supported.
646 			 * Note that we currently don't try to suspend
647 			 * interleaved chips if there is already another
648 			 * operation suspended (imagine what happens
649 			 * when one chip was already done with the current
650 			 * operation while another chip suspended it, then
651 			 * we resume the whole thing at once).  Yes, it
652 			 * can happen!
653 			 */
654 			map_write(map, CMD(0xb0), adr);
655 			usec -= xip_elapsed_since(start);
656 			suspended = xip_currtime();
657 			do {
658 				if (xip_elapsed_since(suspended) > 100000) {
659 					/*
660 					 * The chip doesn't want to suspend
661 					 * after waiting for 100 msecs.
662 					 * This is a critical error but there
663 					 * is not much we can do here.
664 					 */
665 					return;
666 				}
667 				status = map_read(map, adr);
668 			} while (!map_word_andequal(map, status, OK, OK));
669 
670 			/* Suspend succeeded */
671 			oldstate = chip->state;
672 			if (!map_word_bitsset(map, status, CMD(0x40)))
673 				break;
674 			chip->state = FL_XIP_WHILE_ERASING;
675 			chip->erase_suspended = 1;
676 			map_write(map, CMD(0xf0), adr);
677 			(void) map_read(map, adr);
678 			asm volatile (".rep 8; nop; .endr");
679 			local_irq_enable();
680 			spin_unlock(chip->mutex);
681 			asm volatile (".rep 8; nop; .endr");
682 			cond_resched();
683 
684 			/*
685 			 * We're back.  However someone else might have
686 			 * decided to go write to the chip if we are in
687 			 * a suspended erase state.  If so let's wait
688 			 * until it's done.
689 			 */
690 			spin_lock(chip->mutex);
691 			while (chip->state != FL_XIP_WHILE_ERASING) {
692 				DECLARE_WAITQUEUE(wait, current);
693 				set_current_state(TASK_UNINTERRUPTIBLE);
694 				add_wait_queue(&chip->wq, &wait);
695 				spin_unlock(chip->mutex);
696 				schedule();
697 				remove_wait_queue(&chip->wq, &wait);
698 				spin_lock(chip->mutex);
699 			}
700 			/* Disallow XIP again */
701 			local_irq_disable();
702 
703 			/* Resume the write or erase operation */
704 			map_write(map, CMD(0x30), adr);
705 			chip->state = oldstate;
706 			start = xip_currtime();
707 		} else if (usec >= 1000000/HZ) {
708 			/*
709 			 * Try to save on CPU power when waiting delay
710 			 * is at least a system timer tick period.
711 			 * No need to be extremely accurate here.
712 			 */
713 			xip_cpu_idle();
714 		}
715 		status = map_read(map, adr);
716 	} while (!map_word_andequal(map, status, OK, OK)
717 		 && xip_elapsed_since(start) < usec);
718 }
719 
720 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
721 
722 /*
723  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
724  * the flash is actively programming or erasing since we have to poll for
725  * the operation to complete anyway.  We can't do that in a generic way with
726  * a XIP setup so do it before the actual flash operation in this case
727  * and stub it out from INVALIDATE_CACHE_UDELAY.
728  */
729 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
730 	INVALIDATE_CACHED_RANGE(map, from, size)
731 
732 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
733 	UDELAY(map, chip, adr, usec)
734 
735 /*
736  * Extra notes:
737  *
738  * Activating this XIP support changes the way the code works a bit.  For
739  * example the code to suspend the current process when concurrent access
740  * happens is never executed because xip_udelay() will always return with the
741  * same chip state as it was entered with.  This is why there is no care for
742  * the presence of add_wait_queue() or schedule() calls from within a couple
743  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
744  * The queueing and scheduling are always happening within xip_udelay().
745  *
746  * Similarly, get_chip() and put_chip() just happen to always be executed
747  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
748  * is in array mode, therefore never executing many cases therein and not
749  * causing any problem with XIP.
750  */
751 
752 #else
753 
754 #define xip_disable(map, chip, adr)
755 #define xip_enable(map, chip, adr)
756 #define XIP_INVAL_CACHED_RANGE(x...)
757 
758 #define UDELAY(map, chip, adr, usec)  \
759 do {  \
760 	spin_unlock(chip->mutex);  \
761 	cfi_udelay(usec);  \
762 	spin_lock(chip->mutex);  \
763 } while (0)
764 
765 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
766 do {  \
767 	spin_unlock(chip->mutex);  \
768 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
769 	cfi_udelay(usec);  \
770 	spin_lock(chip->mutex);  \
771 } while (0)
772 
773 #endif
774 
775 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
776 {
777 	unsigned long cmd_addr;
778 	struct cfi_private *cfi = map->fldrv_priv;
779 	int ret;
780 
781 	adr += chip->start;
782 
783 	/* Ensure cmd read/writes are aligned. */
784 	cmd_addr = adr & ~(map_bankwidth(map)-1);
785 
786 	spin_lock(chip->mutex);
787 	ret = get_chip(map, chip, cmd_addr, FL_READY);
788 	if (ret) {
789 		spin_unlock(chip->mutex);
790 		return ret;
791 	}
792 
793 	if (chip->state != FL_POINT && chip->state != FL_READY) {
794 		map_write(map, CMD(0xf0), cmd_addr);
795 		chip->state = FL_READY;
796 	}
797 
798 	map_copy_from(map, buf, adr, len);
799 
800 	put_chip(map, chip, cmd_addr);
801 
802 	spin_unlock(chip->mutex);
803 	return 0;
804 }
805 
806 
807 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
808 {
809 	struct map_info *map = mtd->priv;
810 	struct cfi_private *cfi = map->fldrv_priv;
811 	unsigned long ofs;
812 	int chipnum;
813 	int ret = 0;
814 
815 	/* ofs: offset within the first chip that the first read should start */
816 
817 	chipnum = (from >> cfi->chipshift);
818 	ofs = from - (chipnum <<  cfi->chipshift);
819 
820 
821 	*retlen = 0;
822 
823 	while (len) {
824 		unsigned long thislen;
825 
826 		if (chipnum >= cfi->numchips)
827 			break;
828 
829 		if ((len + ofs -1) >> cfi->chipshift)
830 			thislen = (1<<cfi->chipshift) - ofs;
831 		else
832 			thislen = len;
833 
834 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
835 		if (ret)
836 			break;
837 
838 		*retlen += thislen;
839 		len -= thislen;
840 		buf += thislen;
841 
842 		ofs = 0;
843 		chipnum++;
844 	}
845 	return ret;
846 }
847 
848 
849 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
850 {
851 	DECLARE_WAITQUEUE(wait, current);
852 	unsigned long timeo = jiffies + HZ;
853 	struct cfi_private *cfi = map->fldrv_priv;
854 
855  retry:
856 	spin_lock(chip->mutex);
857 
858 	if (chip->state != FL_READY){
859 #if 0
860 		printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
861 #endif
862 		set_current_state(TASK_UNINTERRUPTIBLE);
863 		add_wait_queue(&chip->wq, &wait);
864 
865 		spin_unlock(chip->mutex);
866 
867 		schedule();
868 		remove_wait_queue(&chip->wq, &wait);
869 #if 0
870 		if(signal_pending(current))
871 			return -EINTR;
872 #endif
873 		timeo = jiffies + HZ;
874 
875 		goto retry;
876 	}
877 
878 	adr += chip->start;
879 
880 	chip->state = FL_READY;
881 
882 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
883 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
884 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
885 
886 	map_copy_from(map, buf, adr, len);
887 
888 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
889 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
890 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
891 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
892 
893 	wake_up(&chip->wq);
894 	spin_unlock(chip->mutex);
895 
896 	return 0;
897 }
898 
899 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
900 {
901 	struct map_info *map = mtd->priv;
902 	struct cfi_private *cfi = map->fldrv_priv;
903 	unsigned long ofs;
904 	int chipnum;
905 	int ret = 0;
906 
907 
908 	/* ofs: offset within the first chip that the first read should start */
909 
910 	/* 8 secsi bytes per chip */
911 	chipnum=from>>3;
912 	ofs=from & 7;
913 
914 
915 	*retlen = 0;
916 
917 	while (len) {
918 		unsigned long thislen;
919 
920 		if (chipnum >= cfi->numchips)
921 			break;
922 
923 		if ((len + ofs -1) >> 3)
924 			thislen = (1<<3) - ofs;
925 		else
926 			thislen = len;
927 
928 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
929 		if (ret)
930 			break;
931 
932 		*retlen += thislen;
933 		len -= thislen;
934 		buf += thislen;
935 
936 		ofs = 0;
937 		chipnum++;
938 	}
939 	return ret;
940 }
941 
942 
943 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
944 {
945 	struct cfi_private *cfi = map->fldrv_priv;
946 	unsigned long timeo = jiffies + HZ;
947 	/*
948 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
949 	 * have a max write time of a few hundreds usec). However, we should
950 	 * use the maximum timeout value given by the chip at probe time
951 	 * instead.  Unfortunately, struct flchip does have a field for
952 	 * maximum timeout, only for typical which can be far too short
953 	 * depending of the conditions.	 The ' + 1' is to avoid having a
954 	 * timeout of 0 jiffies if HZ is smaller than 1000.
955 	 */
956 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
957 	int ret = 0;
958 	map_word oldd;
959 	int retry_cnt = 0;
960 
961 	adr += chip->start;
962 
963 	spin_lock(chip->mutex);
964 	ret = get_chip(map, chip, adr, FL_WRITING);
965 	if (ret) {
966 		spin_unlock(chip->mutex);
967 		return ret;
968 	}
969 
970 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
971 	       __func__, adr, datum.x[0] );
972 
973 	/*
974 	 * Check for a NOP for the case when the datum to write is already
975 	 * present - it saves time and works around buggy chips that corrupt
976 	 * data at other locations when 0xff is written to a location that
977 	 * already contains 0xff.
978 	 */
979 	oldd = map_read(map, adr);
980 	if (map_word_equal(map, oldd, datum)) {
981 		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
982 		       __func__);
983 		goto op_done;
984 	}
985 
986 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
987 	ENABLE_VPP(map);
988 	xip_disable(map, chip, adr);
989  retry:
990 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
991 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
992 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
993 	map_write(map, datum, adr);
994 	chip->state = FL_WRITING;
995 
996 	INVALIDATE_CACHE_UDELAY(map, chip,
997 				adr, map_bankwidth(map),
998 				chip->word_write_time);
999 
1000 	/* See comment above for timeout value. */
1001 	timeo = jiffies + uWriteTimeout;
1002 	for (;;) {
1003 		if (chip->state != FL_WRITING) {
1004 			/* Someone's suspended the write. Sleep */
1005 			DECLARE_WAITQUEUE(wait, current);
1006 
1007 			set_current_state(TASK_UNINTERRUPTIBLE);
1008 			add_wait_queue(&chip->wq, &wait);
1009 			spin_unlock(chip->mutex);
1010 			schedule();
1011 			remove_wait_queue(&chip->wq, &wait);
1012 			timeo = jiffies + (HZ / 2); /* FIXME */
1013 			spin_lock(chip->mutex);
1014 			continue;
1015 		}
1016 
1017 		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1018 			xip_enable(map, chip, adr);
1019 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1020 			xip_disable(map, chip, adr);
1021 			break;
1022 		}
1023 
1024 		if (chip_ready(map, adr))
1025 			break;
1026 
1027 		/* Latency issues. Drop the lock, wait a while and retry */
1028 		UDELAY(map, chip, adr, 1);
1029 	}
1030 	/* Did we succeed? */
1031 	if (!chip_good(map, adr, datum)) {
1032 		/* reset on all failures. */
1033 		map_write( map, CMD(0xF0), chip->start );
1034 		/* FIXME - should have reset delay before continuing */
1035 
1036 		if (++retry_cnt <= MAX_WORD_RETRIES)
1037 			goto retry;
1038 
1039 		ret = -EIO;
1040 	}
1041 	xip_enable(map, chip, adr);
1042  op_done:
1043 	chip->state = FL_READY;
1044 	put_chip(map, chip, adr);
1045 	spin_unlock(chip->mutex);
1046 
1047 	return ret;
1048 }
1049 
1050 
1051 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1052 				  size_t *retlen, const u_char *buf)
1053 {
1054 	struct map_info *map = mtd->priv;
1055 	struct cfi_private *cfi = map->fldrv_priv;
1056 	int ret = 0;
1057 	int chipnum;
1058 	unsigned long ofs, chipstart;
1059 	DECLARE_WAITQUEUE(wait, current);
1060 
1061 	*retlen = 0;
1062 	if (!len)
1063 		return 0;
1064 
1065 	chipnum = to >> cfi->chipshift;
1066 	ofs = to  - (chipnum << cfi->chipshift);
1067 	chipstart = cfi->chips[chipnum].start;
1068 
1069 	/* If it's not bus-aligned, do the first byte write */
1070 	if (ofs & (map_bankwidth(map)-1)) {
1071 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1072 		int i = ofs - bus_ofs;
1073 		int n = 0;
1074 		map_word tmp_buf;
1075 
1076  retry:
1077 		spin_lock(cfi->chips[chipnum].mutex);
1078 
1079 		if (cfi->chips[chipnum].state != FL_READY) {
1080 #if 0
1081 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1082 #endif
1083 			set_current_state(TASK_UNINTERRUPTIBLE);
1084 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1085 
1086 			spin_unlock(cfi->chips[chipnum].mutex);
1087 
1088 			schedule();
1089 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1090 #if 0
1091 			if(signal_pending(current))
1092 				return -EINTR;
1093 #endif
1094 			goto retry;
1095 		}
1096 
1097 		/* Load 'tmp_buf' with old contents of flash */
1098 		tmp_buf = map_read(map, bus_ofs+chipstart);
1099 
1100 		spin_unlock(cfi->chips[chipnum].mutex);
1101 
1102 		/* Number of bytes to copy from buffer */
1103 		n = min_t(int, len, map_bankwidth(map)-i);
1104 
1105 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1106 
1107 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1108 				       bus_ofs, tmp_buf);
1109 		if (ret)
1110 			return ret;
1111 
1112 		ofs += n;
1113 		buf += n;
1114 		(*retlen) += n;
1115 		len -= n;
1116 
1117 		if (ofs >> cfi->chipshift) {
1118 			chipnum ++;
1119 			ofs = 0;
1120 			if (chipnum == cfi->numchips)
1121 				return 0;
1122 		}
1123 	}
1124 
1125 	/* We are now aligned, write as much as possible */
1126 	while(len >= map_bankwidth(map)) {
1127 		map_word datum;
1128 
1129 		datum = map_word_load(map, buf);
1130 
1131 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1132 				       ofs, datum);
1133 		if (ret)
1134 			return ret;
1135 
1136 		ofs += map_bankwidth(map);
1137 		buf += map_bankwidth(map);
1138 		(*retlen) += map_bankwidth(map);
1139 		len -= map_bankwidth(map);
1140 
1141 		if (ofs >> cfi->chipshift) {
1142 			chipnum ++;
1143 			ofs = 0;
1144 			if (chipnum == cfi->numchips)
1145 				return 0;
1146 			chipstart = cfi->chips[chipnum].start;
1147 		}
1148 	}
1149 
1150 	/* Write the trailing bytes if any */
1151 	if (len & (map_bankwidth(map)-1)) {
1152 		map_word tmp_buf;
1153 
1154  retry1:
1155 		spin_lock(cfi->chips[chipnum].mutex);
1156 
1157 		if (cfi->chips[chipnum].state != FL_READY) {
1158 #if 0
1159 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1160 #endif
1161 			set_current_state(TASK_UNINTERRUPTIBLE);
1162 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1163 
1164 			spin_unlock(cfi->chips[chipnum].mutex);
1165 
1166 			schedule();
1167 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1168 #if 0
1169 			if(signal_pending(current))
1170 				return -EINTR;
1171 #endif
1172 			goto retry1;
1173 		}
1174 
1175 		tmp_buf = map_read(map, ofs + chipstart);
1176 
1177 		spin_unlock(cfi->chips[chipnum].mutex);
1178 
1179 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1180 
1181 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1182 				ofs, tmp_buf);
1183 		if (ret)
1184 			return ret;
1185 
1186 		(*retlen) += len;
1187 	}
1188 
1189 	return 0;
1190 }
1191 
1192 
1193 /*
1194  * FIXME: interleaved mode not tested, and probably not supported!
1195  */
1196 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1197 				    unsigned long adr, const u_char *buf,
1198 				    int len)
1199 {
1200 	struct cfi_private *cfi = map->fldrv_priv;
1201 	unsigned long timeo = jiffies + HZ;
1202 	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1203 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1204 	int ret = -EIO;
1205 	unsigned long cmd_adr;
1206 	int z, words;
1207 	map_word datum;
1208 
1209 	adr += chip->start;
1210 	cmd_adr = adr;
1211 
1212 	spin_lock(chip->mutex);
1213 	ret = get_chip(map, chip, adr, FL_WRITING);
1214 	if (ret) {
1215 		spin_unlock(chip->mutex);
1216 		return ret;
1217 	}
1218 
1219 	datum = map_word_load(map, buf);
1220 
1221 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1222 	       __func__, adr, datum.x[0] );
1223 
1224 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1225 	ENABLE_VPP(map);
1226 	xip_disable(map, chip, cmd_adr);
1227 
1228 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1229 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1230 	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1231 
1232 	/* Write Buffer Load */
1233 	map_write(map, CMD(0x25), cmd_adr);
1234 
1235 	chip->state = FL_WRITING_TO_BUFFER;
1236 
1237 	/* Write length of data to come */
1238 	words = len / map_bankwidth(map);
1239 	map_write(map, CMD(words - 1), cmd_adr);
1240 	/* Write data */
1241 	z = 0;
1242 	while(z < words * map_bankwidth(map)) {
1243 		datum = map_word_load(map, buf);
1244 		map_write(map, datum, adr + z);
1245 
1246 		z += map_bankwidth(map);
1247 		buf += map_bankwidth(map);
1248 	}
1249 	z -= map_bankwidth(map);
1250 
1251 	adr += z;
1252 
1253 	/* Write Buffer Program Confirm: GO GO GO */
1254 	map_write(map, CMD(0x29), cmd_adr);
1255 	chip->state = FL_WRITING;
1256 
1257 	INVALIDATE_CACHE_UDELAY(map, chip,
1258 				adr, map_bankwidth(map),
1259 				chip->word_write_time);
1260 
1261 	timeo = jiffies + uWriteTimeout;
1262 
1263 	for (;;) {
1264 		if (chip->state != FL_WRITING) {
1265 			/* Someone's suspended the write. Sleep */
1266 			DECLARE_WAITQUEUE(wait, current);
1267 
1268 			set_current_state(TASK_UNINTERRUPTIBLE);
1269 			add_wait_queue(&chip->wq, &wait);
1270 			spin_unlock(chip->mutex);
1271 			schedule();
1272 			remove_wait_queue(&chip->wq, &wait);
1273 			timeo = jiffies + (HZ / 2); /* FIXME */
1274 			spin_lock(chip->mutex);
1275 			continue;
1276 		}
1277 
1278 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1279 			break;
1280 
1281 		if (chip_ready(map, adr)) {
1282 			xip_enable(map, chip, adr);
1283 			goto op_done;
1284 		}
1285 
1286 		/* Latency issues. Drop the lock, wait a while and retry */
1287 		UDELAY(map, chip, adr, 1);
1288 	}
1289 
1290 	/* reset on all failures. */
1291 	map_write( map, CMD(0xF0), chip->start );
1292 	xip_enable(map, chip, adr);
1293 	/* FIXME - should have reset delay before continuing */
1294 
1295 	printk(KERN_WARNING "MTD %s(): software timeout\n",
1296 	       __func__ );
1297 
1298 	ret = -EIO;
1299  op_done:
1300 	chip->state = FL_READY;
1301 	put_chip(map, chip, adr);
1302 	spin_unlock(chip->mutex);
1303 
1304 	return ret;
1305 }
1306 
1307 
1308 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1309 				    size_t *retlen, const u_char *buf)
1310 {
1311 	struct map_info *map = mtd->priv;
1312 	struct cfi_private *cfi = map->fldrv_priv;
1313 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1314 	int ret = 0;
1315 	int chipnum;
1316 	unsigned long ofs;
1317 
1318 	*retlen = 0;
1319 	if (!len)
1320 		return 0;
1321 
1322 	chipnum = to >> cfi->chipshift;
1323 	ofs = to  - (chipnum << cfi->chipshift);
1324 
1325 	/* If it's not bus-aligned, do the first word write */
1326 	if (ofs & (map_bankwidth(map)-1)) {
1327 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1328 		if (local_len > len)
1329 			local_len = len;
1330 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1331 					     local_len, retlen, buf);
1332 		if (ret)
1333 			return ret;
1334 		ofs += local_len;
1335 		buf += local_len;
1336 		len -= local_len;
1337 
1338 		if (ofs >> cfi->chipshift) {
1339 			chipnum ++;
1340 			ofs = 0;
1341 			if (chipnum == cfi->numchips)
1342 				return 0;
1343 		}
1344 	}
1345 
1346 	/* Write buffer is worth it only if more than one word to write... */
1347 	while (len >= map_bankwidth(map) * 2) {
1348 		/* We must not cross write block boundaries */
1349 		int size = wbufsize - (ofs & (wbufsize-1));
1350 
1351 		if (size > len)
1352 			size = len;
1353 		if (size % map_bankwidth(map))
1354 			size -= size % map_bankwidth(map);
1355 
1356 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1357 				      ofs, buf, size);
1358 		if (ret)
1359 			return ret;
1360 
1361 		ofs += size;
1362 		buf += size;
1363 		(*retlen) += size;
1364 		len -= size;
1365 
1366 		if (ofs >> cfi->chipshift) {
1367 			chipnum ++;
1368 			ofs = 0;
1369 			if (chipnum == cfi->numchips)
1370 				return 0;
1371 		}
1372 	}
1373 
1374 	if (len) {
1375 		size_t retlen_dregs = 0;
1376 
1377 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1378 					     len, &retlen_dregs, buf);
1379 
1380 		*retlen += retlen_dregs;
1381 		return ret;
1382 	}
1383 
1384 	return 0;
1385 }
1386 
1387 
1388 /*
1389  * Handle devices with one erase region, that only implement
1390  * the chip erase command.
1391  */
1392 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1393 {
1394 	struct cfi_private *cfi = map->fldrv_priv;
1395 	unsigned long timeo = jiffies + HZ;
1396 	unsigned long int adr;
1397 	DECLARE_WAITQUEUE(wait, current);
1398 	int ret = 0;
1399 
1400 	adr = cfi->addr_unlock1;
1401 
1402 	spin_lock(chip->mutex);
1403 	ret = get_chip(map, chip, adr, FL_WRITING);
1404 	if (ret) {
1405 		spin_unlock(chip->mutex);
1406 		return ret;
1407 	}
1408 
1409 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1410 	       __func__, chip->start );
1411 
1412 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1413 	ENABLE_VPP(map);
1414 	xip_disable(map, chip, adr);
1415 
1416 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1417 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1418 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1419 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1420 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1421 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1422 
1423 	chip->state = FL_ERASING;
1424 	chip->erase_suspended = 0;
1425 	chip->in_progress_block_addr = adr;
1426 
1427 	INVALIDATE_CACHE_UDELAY(map, chip,
1428 				adr, map->size,
1429 				chip->erase_time*500);
1430 
1431 	timeo = jiffies + (HZ*20);
1432 
1433 	for (;;) {
1434 		if (chip->state != FL_ERASING) {
1435 			/* Someone's suspended the erase. Sleep */
1436 			set_current_state(TASK_UNINTERRUPTIBLE);
1437 			add_wait_queue(&chip->wq, &wait);
1438 			spin_unlock(chip->mutex);
1439 			schedule();
1440 			remove_wait_queue(&chip->wq, &wait);
1441 			spin_lock(chip->mutex);
1442 			continue;
1443 		}
1444 		if (chip->erase_suspended) {
1445 			/* This erase was suspended and resumed.
1446 			   Adjust the timeout */
1447 			timeo = jiffies + (HZ*20); /* FIXME */
1448 			chip->erase_suspended = 0;
1449 		}
1450 
1451 		if (chip_ready(map, adr))
1452 			break;
1453 
1454 		if (time_after(jiffies, timeo)) {
1455 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1456 				__func__ );
1457 			break;
1458 		}
1459 
1460 		/* Latency issues. Drop the lock, wait a while and retry */
1461 		UDELAY(map, chip, adr, 1000000/HZ);
1462 	}
1463 	/* Did we succeed? */
1464 	if (!chip_good(map, adr, map_word_ff(map))) {
1465 		/* reset on all failures. */
1466 		map_write( map, CMD(0xF0), chip->start );
1467 		/* FIXME - should have reset delay before continuing */
1468 
1469 		ret = -EIO;
1470 	}
1471 
1472 	chip->state = FL_READY;
1473 	xip_enable(map, chip, adr);
1474 	put_chip(map, chip, adr);
1475 	spin_unlock(chip->mutex);
1476 
1477 	return ret;
1478 }
1479 
1480 
1481 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1482 {
1483 	struct cfi_private *cfi = map->fldrv_priv;
1484 	unsigned long timeo = jiffies + HZ;
1485 	DECLARE_WAITQUEUE(wait, current);
1486 	int ret = 0;
1487 
1488 	adr += chip->start;
1489 
1490 	spin_lock(chip->mutex);
1491 	ret = get_chip(map, chip, adr, FL_ERASING);
1492 	if (ret) {
1493 		spin_unlock(chip->mutex);
1494 		return ret;
1495 	}
1496 
1497 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1498 	       __func__, adr );
1499 
1500 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1501 	ENABLE_VPP(map);
1502 	xip_disable(map, chip, adr);
1503 
1504 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1505 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1506 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1507 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1508 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1509 	map_write(map, CMD(0x30), adr);
1510 
1511 	chip->state = FL_ERASING;
1512 	chip->erase_suspended = 0;
1513 	chip->in_progress_block_addr = adr;
1514 
1515 	INVALIDATE_CACHE_UDELAY(map, chip,
1516 				adr, len,
1517 				chip->erase_time*500);
1518 
1519 	timeo = jiffies + (HZ*20);
1520 
1521 	for (;;) {
1522 		if (chip->state != FL_ERASING) {
1523 			/* Someone's suspended the erase. Sleep */
1524 			set_current_state(TASK_UNINTERRUPTIBLE);
1525 			add_wait_queue(&chip->wq, &wait);
1526 			spin_unlock(chip->mutex);
1527 			schedule();
1528 			remove_wait_queue(&chip->wq, &wait);
1529 			spin_lock(chip->mutex);
1530 			continue;
1531 		}
1532 		if (chip->erase_suspended) {
1533 			/* This erase was suspended and resumed.
1534 			   Adjust the timeout */
1535 			timeo = jiffies + (HZ*20); /* FIXME */
1536 			chip->erase_suspended = 0;
1537 		}
1538 
1539 		if (chip_ready(map, adr)) {
1540 			xip_enable(map, chip, adr);
1541 			break;
1542 		}
1543 
1544 		if (time_after(jiffies, timeo)) {
1545 			xip_enable(map, chip, adr);
1546 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1547 				__func__ );
1548 			break;
1549 		}
1550 
1551 		/* Latency issues. Drop the lock, wait a while and retry */
1552 		UDELAY(map, chip, adr, 1000000/HZ);
1553 	}
1554 	/* Did we succeed? */
1555 	if (!chip_good(map, adr, map_word_ff(map))) {
1556 		/* reset on all failures. */
1557 		map_write( map, CMD(0xF0), chip->start );
1558 		/* FIXME - should have reset delay before continuing */
1559 
1560 		ret = -EIO;
1561 	}
1562 
1563 	chip->state = FL_READY;
1564 	put_chip(map, chip, adr);
1565 	spin_unlock(chip->mutex);
1566 	return ret;
1567 }
1568 
1569 
1570 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1571 {
1572 	unsigned long ofs, len;
1573 	int ret;
1574 
1575 	ofs = instr->addr;
1576 	len = instr->len;
1577 
1578 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1579 	if (ret)
1580 		return ret;
1581 
1582 	instr->state = MTD_ERASE_DONE;
1583 	mtd_erase_callback(instr);
1584 
1585 	return 0;
1586 }
1587 
1588 
1589 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1590 {
1591 	struct map_info *map = mtd->priv;
1592 	struct cfi_private *cfi = map->fldrv_priv;
1593 	int ret = 0;
1594 
1595 	if (instr->addr != 0)
1596 		return -EINVAL;
1597 
1598 	if (instr->len != mtd->size)
1599 		return -EINVAL;
1600 
1601 	ret = do_erase_chip(map, &cfi->chips[0]);
1602 	if (ret)
1603 		return ret;
1604 
1605 	instr->state = MTD_ERASE_DONE;
1606 	mtd_erase_callback(instr);
1607 
1608 	return 0;
1609 }
1610 
1611 
1612 static void cfi_amdstd_sync (struct mtd_info *mtd)
1613 {
1614 	struct map_info *map = mtd->priv;
1615 	struct cfi_private *cfi = map->fldrv_priv;
1616 	int i;
1617 	struct flchip *chip;
1618 	int ret = 0;
1619 	DECLARE_WAITQUEUE(wait, current);
1620 
1621 	for (i=0; !ret && i<cfi->numchips; i++) {
1622 		chip = &cfi->chips[i];
1623 
1624 	retry:
1625 		spin_lock(chip->mutex);
1626 
1627 		switch(chip->state) {
1628 		case FL_READY:
1629 		case FL_STATUS:
1630 		case FL_CFI_QUERY:
1631 		case FL_JEDEC_QUERY:
1632 			chip->oldstate = chip->state;
1633 			chip->state = FL_SYNCING;
1634 			/* No need to wake_up() on this state change -
1635 			 * as the whole point is that nobody can do anything
1636 			 * with the chip now anyway.
1637 			 */
1638 		case FL_SYNCING:
1639 			spin_unlock(chip->mutex);
1640 			break;
1641 
1642 		default:
1643 			/* Not an idle state */
1644 			add_wait_queue(&chip->wq, &wait);
1645 
1646 			spin_unlock(chip->mutex);
1647 
1648 			schedule();
1649 
1650 			remove_wait_queue(&chip->wq, &wait);
1651 
1652 			goto retry;
1653 		}
1654 	}
1655 
1656 	/* Unlock the chips again */
1657 
1658 	for (i--; i >=0; i--) {
1659 		chip = &cfi->chips[i];
1660 
1661 		spin_lock(chip->mutex);
1662 
1663 		if (chip->state == FL_SYNCING) {
1664 			chip->state = chip->oldstate;
1665 			wake_up(&chip->wq);
1666 		}
1667 		spin_unlock(chip->mutex);
1668 	}
1669 }
1670 
1671 
1672 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1673 {
1674 	struct map_info *map = mtd->priv;
1675 	struct cfi_private *cfi = map->fldrv_priv;
1676 	int i;
1677 	struct flchip *chip;
1678 	int ret = 0;
1679 
1680 	for (i=0; !ret && i<cfi->numchips; i++) {
1681 		chip = &cfi->chips[i];
1682 
1683 		spin_lock(chip->mutex);
1684 
1685 		switch(chip->state) {
1686 		case FL_READY:
1687 		case FL_STATUS:
1688 		case FL_CFI_QUERY:
1689 		case FL_JEDEC_QUERY:
1690 			chip->oldstate = chip->state;
1691 			chip->state = FL_PM_SUSPENDED;
1692 			/* No need to wake_up() on this state change -
1693 			 * as the whole point is that nobody can do anything
1694 			 * with the chip now anyway.
1695 			 */
1696 		case FL_PM_SUSPENDED:
1697 			break;
1698 
1699 		default:
1700 			ret = -EAGAIN;
1701 			break;
1702 		}
1703 		spin_unlock(chip->mutex);
1704 	}
1705 
1706 	/* Unlock the chips again */
1707 
1708 	if (ret) {
1709 		for (i--; i >=0; i--) {
1710 			chip = &cfi->chips[i];
1711 
1712 			spin_lock(chip->mutex);
1713 
1714 			if (chip->state == FL_PM_SUSPENDED) {
1715 				chip->state = chip->oldstate;
1716 				wake_up(&chip->wq);
1717 			}
1718 			spin_unlock(chip->mutex);
1719 		}
1720 	}
1721 
1722 	return ret;
1723 }
1724 
1725 
1726 static void cfi_amdstd_resume(struct mtd_info *mtd)
1727 {
1728 	struct map_info *map = mtd->priv;
1729 	struct cfi_private *cfi = map->fldrv_priv;
1730 	int i;
1731 	struct flchip *chip;
1732 
1733 	for (i=0; i<cfi->numchips; i++) {
1734 
1735 		chip = &cfi->chips[i];
1736 
1737 		spin_lock(chip->mutex);
1738 
1739 		if (chip->state == FL_PM_SUSPENDED) {
1740 			chip->state = FL_READY;
1741 			map_write(map, CMD(0xF0), chip->start);
1742 			wake_up(&chip->wq);
1743 		}
1744 		else
1745 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1746 
1747 		spin_unlock(chip->mutex);
1748 	}
1749 }
1750 
1751 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1752 {
1753 	struct map_info *map = mtd->priv;
1754 	struct cfi_private *cfi = map->fldrv_priv;
1755 
1756 	kfree(cfi->cmdset_priv);
1757 	kfree(cfi->cfiq);
1758 	kfree(cfi);
1759 	kfree(mtd->eraseregions);
1760 }
1761 
1762 MODULE_LICENSE("GPL");
1763 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1764 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1765