xref: /linux/drivers/mtd/chips/cfi_cmdset_0002.c (revision 20d0021394c1b070bf04b22c5bc8fdb437edd4c5)
1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17  *
18  * This code is GPL
19  *
20  * $Id: cfi_cmdset_0002.c,v 1.118 2005/07/04 22:34:29 gleixner Exp $
21  *
22  */
23 
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <asm/io.h>
31 #include <asm/byteorder.h>
32 
33 #include <linux/errno.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/interrupt.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/map.h>
39 #include <linux/mtd/mtd.h>
40 #include <linux/mtd/cfi.h>
41 #include <linux/mtd/xip.h>
42 
43 #define AMD_BOOTLOC_BUG
44 #define FORCE_WORD_WRITE 0
45 
46 #define MAX_WORD_RETRIES 3
47 
48 #define MANUFACTURER_AMD	0x0001
49 #define MANUFACTURER_SST	0x00BF
50 #define SST49LF004B	        0x0060
51 #define SST49LF008A		0x005a
52 
53 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
57 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
58 static void cfi_amdstd_sync (struct mtd_info *);
59 static int cfi_amdstd_suspend (struct mtd_info *);
60 static void cfi_amdstd_resume (struct mtd_info *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 
63 static void cfi_amdstd_destroy(struct mtd_info *);
64 
65 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
66 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67 
68 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
69 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
70 #include "fwh_lock.h"
71 
72 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
73 	.probe		= NULL, /* Not usable directly */
74 	.destroy	= cfi_amdstd_destroy,
75 	.name		= "cfi_cmdset_0002",
76 	.module		= THIS_MODULE
77 };
78 
79 
80 /* #define DEBUG_CFI_FEATURES */
81 
82 
83 #ifdef DEBUG_CFI_FEATURES
84 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
85 {
86 	const char* erase_suspend[3] = {
87 		"Not supported", "Read only", "Read/write"
88 	};
89 	const char* top_bottom[6] = {
90 		"No WP", "8x8KiB sectors at top & bottom, no WP",
91 		"Bottom boot", "Top boot",
92 		"Uniform, Bottom WP", "Uniform, Top WP"
93 	};
94 
95 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
96 	printk("  Address sensitive unlock: %s\n",
97 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
98 
99 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
100 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
101 	else
102 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
103 
104 	if (extp->BlkProt == 0)
105 		printk("  Block protection: Not supported\n");
106 	else
107 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
108 
109 
110 	printk("  Temporary block unprotect: %s\n",
111 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
112 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
113 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
114 	printk("  Burst mode: %s\n",
115 	       extp->BurstMode ? "Supported" : "Not supported");
116 	if (extp->PageMode == 0)
117 		printk("  Page mode: Not supported\n");
118 	else
119 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
120 
121 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
122 	       extp->VppMin >> 4, extp->VppMin & 0xf);
123 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
124 	       extp->VppMax >> 4, extp->VppMax & 0xf);
125 
126 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
127 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
128 	else
129 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
130 }
131 #endif
132 
133 #ifdef AMD_BOOTLOC_BUG
134 /* Wheee. Bring me the head of someone at AMD. */
135 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
136 {
137 	struct map_info *map = mtd->priv;
138 	struct cfi_private *cfi = map->fldrv_priv;
139 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
140 	__u8 major = extp->MajorVersion;
141 	__u8 minor = extp->MinorVersion;
142 
143 	if (((major << 8) | minor) < 0x3131) {
144 		/* CFI version 1.0 => don't trust bootloc */
145 		if (cfi->id & 0x80) {
146 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
147 			extp->TopBottom = 3;	/* top boot */
148 		} else {
149 			extp->TopBottom = 2;	/* bottom boot */
150 		}
151 	}
152 }
153 #endif
154 
155 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
156 {
157 	struct map_info *map = mtd->priv;
158 	struct cfi_private *cfi = map->fldrv_priv;
159 	if (cfi->cfiq->BufWriteTimeoutTyp) {
160 		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
161 		mtd->write = cfi_amdstd_write_buffers;
162 	}
163 }
164 
165 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
166 {
167 	/* Setup for chips with a secsi area */
168 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
169 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
170 }
171 
172 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
173 {
174 	struct map_info *map = mtd->priv;
175 	struct cfi_private *cfi = map->fldrv_priv;
176 	if ((cfi->cfiq->NumEraseRegions == 1) &&
177 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
178 		mtd->erase = cfi_amdstd_erase_chip;
179 	}
180 
181 }
182 
183 static struct cfi_fixup cfi_fixup_table[] = {
184 #ifdef AMD_BOOTLOC_BUG
185 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
186 #endif
187 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
188 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
189 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
190 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
191 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
192 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
193 #if !FORCE_WORD_WRITE
194 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
195 #endif
196 	{ 0, 0, NULL, NULL }
197 };
198 static struct cfi_fixup jedec_fixup_table[] = {
199 	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
200 	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
201 	{ 0, 0, NULL, NULL }
202 };
203 
204 static struct cfi_fixup fixup_table[] = {
205 	/* The CFI vendor ids and the JEDEC vendor IDs appear
206 	 * to be common.  It is like the devices id's are as
207 	 * well.  This table is to pick all cases where
208 	 * we know that is the case.
209 	 */
210 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
211 	{ 0, 0, NULL, NULL }
212 };
213 
214 
215 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
216 {
217 	struct cfi_private *cfi = map->fldrv_priv;
218 	struct mtd_info *mtd;
219 	int i;
220 
221 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
222 	if (!mtd) {
223 		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
224 		return NULL;
225 	}
226 	memset(mtd, 0, sizeof(*mtd));
227 	mtd->priv = map;
228 	mtd->type = MTD_NORFLASH;
229 
230 	/* Fill in the default mtd operations */
231 	mtd->erase   = cfi_amdstd_erase_varsize;
232 	mtd->write   = cfi_amdstd_write_words;
233 	mtd->read    = cfi_amdstd_read;
234 	mtd->sync    = cfi_amdstd_sync;
235 	mtd->suspend = cfi_amdstd_suspend;
236 	mtd->resume  = cfi_amdstd_resume;
237 	mtd->flags   = MTD_CAP_NORFLASH;
238 	mtd->name    = map->name;
239 
240 	if (cfi->cfi_mode==CFI_MODE_CFI){
241 		unsigned char bootloc;
242 		/*
243 		 * It's a real CFI chip, not one for which the probe
244 		 * routine faked a CFI structure. So we read the feature
245 		 * table from it.
246 		 */
247 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
248 		struct cfi_pri_amdstd *extp;
249 
250 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
251 		if (!extp) {
252 			kfree(mtd);
253 			return NULL;
254 		}
255 
256 		/* Install our own private info structure */
257 		cfi->cmdset_priv = extp;
258 
259 		/* Apply cfi device specific fixups */
260 		cfi_fixup(mtd, cfi_fixup_table);
261 
262 #ifdef DEBUG_CFI_FEATURES
263 		/* Tell the user about it in lots of lovely detail */
264 		cfi_tell_features(extp);
265 #endif
266 
267 		bootloc = extp->TopBottom;
268 		if ((bootloc != 2) && (bootloc != 3)) {
269 			printk(KERN_WARNING "%s: CFI does not contain boot "
270 			       "bank location. Assuming top.\n", map->name);
271 			bootloc = 2;
272 		}
273 
274 		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
275 			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
276 
277 			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
278 				int j = (cfi->cfiq->NumEraseRegions-1)-i;
279 				__u32 swap;
280 
281 				swap = cfi->cfiq->EraseRegionInfo[i];
282 				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
283 				cfi->cfiq->EraseRegionInfo[j] = swap;
284 			}
285 		}
286 		/* Set the default CFI lock/unlock addresses */
287 		cfi->addr_unlock1 = 0x555;
288 		cfi->addr_unlock2 = 0x2aa;
289 		/* Modify the unlock address if we are in compatibility mode */
290 		if (	/* x16 in x8 mode */
291 			((cfi->device_type == CFI_DEVICETYPE_X8) &&
292 				(cfi->cfiq->InterfaceDesc == 2)) ||
293 			/* x32 in x16 mode */
294 			((cfi->device_type == CFI_DEVICETYPE_X16) &&
295 				(cfi->cfiq->InterfaceDesc == 4)))
296 		{
297 			cfi->addr_unlock1 = 0xaaa;
298 			cfi->addr_unlock2 = 0x555;
299 		}
300 
301 	} /* CFI mode */
302 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
303 		/* Apply jedec specific fixups */
304 		cfi_fixup(mtd, jedec_fixup_table);
305 	}
306 	/* Apply generic fixups */
307 	cfi_fixup(mtd, fixup_table);
308 
309 	for (i=0; i< cfi->numchips; i++) {
310 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
311 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
312 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
313 	}
314 
315 	map->fldrv = &cfi_amdstd_chipdrv;
316 
317 	return cfi_amdstd_setup(mtd);
318 }
319 
320 
321 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
322 {
323 	struct map_info *map = mtd->priv;
324 	struct cfi_private *cfi = map->fldrv_priv;
325 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
326 	unsigned long offset = 0;
327 	int i,j;
328 
329 	printk(KERN_NOTICE "number of %s chips: %d\n",
330 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
331 	/* Select the correct geometry setup */
332 	mtd->size = devsize * cfi->numchips;
333 
334 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
335 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
336 				    * mtd->numeraseregions, GFP_KERNEL);
337 	if (!mtd->eraseregions) {
338 		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
339 		goto setup_err;
340 	}
341 
342 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
343 		unsigned long ernum, ersize;
344 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
345 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
346 
347 		if (mtd->erasesize < ersize) {
348 			mtd->erasesize = ersize;
349 		}
350 		for (j=0; j<cfi->numchips; j++) {
351 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
352 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
353 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
354 		}
355 		offset += (ersize * ernum);
356 	}
357 	if (offset != devsize) {
358 		/* Argh */
359 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
360 		goto setup_err;
361 	}
362 #if 0
363 	// debug
364 	for (i=0; i<mtd->numeraseregions;i++){
365 		printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
366 		       i,mtd->eraseregions[i].offset,
367 		       mtd->eraseregions[i].erasesize,
368 		       mtd->eraseregions[i].numblocks);
369 	}
370 #endif
371 
372 	/* FIXME: erase-suspend-program is broken.  See
373 	   http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
374 	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
375 
376 	__module_get(THIS_MODULE);
377 	return mtd;
378 
379  setup_err:
380 	if(mtd) {
381 		if(mtd->eraseregions)
382 			kfree(mtd->eraseregions);
383 		kfree(mtd);
384 	}
385 	kfree(cfi->cmdset_priv);
386 	kfree(cfi->cfiq);
387 	return NULL;
388 }
389 
390 /*
391  * Return true if the chip is ready.
392  *
393  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
394  * non-suspended sector) and is indicated by no toggle bits toggling.
395  *
396  * Note that anything more complicated than checking if no bits are toggling
397  * (including checking DQ5 for an error status) is tricky to get working
398  * correctly and is therefore not done	(particulary with interleaved chips
399  * as each chip must be checked independantly of the others).
400  */
401 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
402 {
403 	map_word d, t;
404 
405 	d = map_read(map, addr);
406 	t = map_read(map, addr);
407 
408 	return map_word_equal(map, d, t);
409 }
410 
411 /*
412  * Return true if the chip is ready and has the correct value.
413  *
414  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
415  * non-suspended sector) and it is indicated by no bits toggling.
416  *
417  * Error are indicated by toggling bits or bits held with the wrong value,
418  * or with bits toggling.
419  *
420  * Note that anything more complicated than checking if no bits are toggling
421  * (including checking DQ5 for an error status) is tricky to get working
422  * correctly and is therefore not done	(particulary with interleaved chips
423  * as each chip must be checked independantly of the others).
424  *
425  */
426 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
427 {
428 	map_word oldd, curd;
429 
430 	oldd = map_read(map, addr);
431 	curd = map_read(map, addr);
432 
433 	return	map_word_equal(map, oldd, curd) &&
434 		map_word_equal(map, curd, expected);
435 }
436 
437 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
438 {
439 	DECLARE_WAITQUEUE(wait, current);
440 	struct cfi_private *cfi = map->fldrv_priv;
441 	unsigned long timeo;
442 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
443 
444  resettime:
445 	timeo = jiffies + HZ;
446  retry:
447 	switch (chip->state) {
448 
449 	case FL_STATUS:
450 		for (;;) {
451 			if (chip_ready(map, adr))
452 				break;
453 
454 			if (time_after(jiffies, timeo)) {
455 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
456 				spin_unlock(chip->mutex);
457 				return -EIO;
458 			}
459 			spin_unlock(chip->mutex);
460 			cfi_udelay(1);
461 			spin_lock(chip->mutex);
462 			/* Someone else might have been playing with it. */
463 			goto retry;
464 		}
465 
466 	case FL_READY:
467 	case FL_CFI_QUERY:
468 	case FL_JEDEC_QUERY:
469 		return 0;
470 
471 	case FL_ERASING:
472 		if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
473 			goto sleep;
474 
475 		if (!(mode == FL_READY || mode == FL_POINT
476 		      || !cfip
477 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
478 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
479 			goto sleep;
480 
481 		/* We could check to see if we're trying to access the sector
482 		 * that is currently being erased. However, no user will try
483 		 * anything like that so we just wait for the timeout. */
484 
485 		/* Erase suspend */
486 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
487 		 * commands when the erase algorithm isn't in progress. */
488 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
489 		chip->oldstate = FL_ERASING;
490 		chip->state = FL_ERASE_SUSPENDING;
491 		chip->erase_suspended = 1;
492 		for (;;) {
493 			if (chip_ready(map, adr))
494 				break;
495 
496 			if (time_after(jiffies, timeo)) {
497 				/* Should have suspended the erase by now.
498 				 * Send an Erase-Resume command as either
499 				 * there was an error (so leave the erase
500 				 * routine to recover from it) or we trying to
501 				 * use the erase-in-progress sector. */
502 				map_write(map, CMD(0x30), chip->in_progress_block_addr);
503 				chip->state = FL_ERASING;
504 				chip->oldstate = FL_READY;
505 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
506 				return -EIO;
507 			}
508 
509 			spin_unlock(chip->mutex);
510 			cfi_udelay(1);
511 			spin_lock(chip->mutex);
512 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
513 			   So we can just loop here. */
514 		}
515 		chip->state = FL_READY;
516 		return 0;
517 
518 	case FL_XIP_WHILE_ERASING:
519 		if (mode != FL_READY && mode != FL_POINT &&
520 		    (!cfip || !(cfip->EraseSuspend&2)))
521 			goto sleep;
522 		chip->oldstate = chip->state;
523 		chip->state = FL_READY;
524 		return 0;
525 
526 	case FL_POINT:
527 		/* Only if there's no operation suspended... */
528 		if (mode == FL_READY && chip->oldstate == FL_READY)
529 			return 0;
530 
531 	default:
532 	sleep:
533 		set_current_state(TASK_UNINTERRUPTIBLE);
534 		add_wait_queue(&chip->wq, &wait);
535 		spin_unlock(chip->mutex);
536 		schedule();
537 		remove_wait_queue(&chip->wq, &wait);
538 		spin_lock(chip->mutex);
539 		goto resettime;
540 	}
541 }
542 
543 
544 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
545 {
546 	struct cfi_private *cfi = map->fldrv_priv;
547 
548 	switch(chip->oldstate) {
549 	case FL_ERASING:
550 		chip->state = chip->oldstate;
551 		map_write(map, CMD(0x30), chip->in_progress_block_addr);
552 		chip->oldstate = FL_READY;
553 		chip->state = FL_ERASING;
554 		break;
555 
556 	case FL_XIP_WHILE_ERASING:
557 		chip->state = chip->oldstate;
558 		chip->oldstate = FL_READY;
559 		break;
560 
561 	case FL_READY:
562 	case FL_STATUS:
563 		/* We should really make set_vpp() count, rather than doing this */
564 		DISABLE_VPP(map);
565 		break;
566 	default:
567 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
568 	}
569 	wake_up(&chip->wq);
570 }
571 
572 #ifdef CONFIG_MTD_XIP
573 
574 /*
575  * No interrupt what so ever can be serviced while the flash isn't in array
576  * mode.  This is ensured by the xip_disable() and xip_enable() functions
577  * enclosing any code path where the flash is known not to be in array mode.
578  * And within a XIP disabled code path, only functions marked with __xipram
579  * may be called and nothing else (it's a good thing to inspect generated
580  * assembly to make sure inline functions were actually inlined and that gcc
581  * didn't emit calls to its own support functions). Also configuring MTD CFI
582  * support to a single buswidth and a single interleave is also recommended.
583  */
584 
585 static void xip_disable(struct map_info *map, struct flchip *chip,
586 			unsigned long adr)
587 {
588 	/* TODO: chips with no XIP use should ignore and return */
589 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
590 	local_irq_disable();
591 }
592 
593 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
594 				unsigned long adr)
595 {
596 	struct cfi_private *cfi = map->fldrv_priv;
597 
598 	if (chip->state != FL_POINT && chip->state != FL_READY) {
599 		map_write(map, CMD(0xf0), adr);
600 		chip->state = FL_READY;
601 	}
602 	(void) map_read(map, adr);
603 	xip_iprefetch();
604 	local_irq_enable();
605 }
606 
607 /*
608  * When a delay is required for the flash operation to complete, the
609  * xip_udelay() function is polling for both the given timeout and pending
610  * (but still masked) hardware interrupts.  Whenever there is an interrupt
611  * pending then the flash erase operation is suspended, array mode restored
612  * and interrupts unmasked.  Task scheduling might also happen at that
613  * point.  The CPU eventually returns from the interrupt or the call to
614  * schedule() and the suspended flash operation is resumed for the remaining
615  * of the delay period.
616  *
617  * Warning: this function _will_ fool interrupt latency tracing tools.
618  */
619 
620 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
621 				unsigned long adr, int usec)
622 {
623 	struct cfi_private *cfi = map->fldrv_priv;
624 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
625 	map_word status, OK = CMD(0x80);
626 	unsigned long suspended, start = xip_currtime();
627 	flstate_t oldstate;
628 
629 	do {
630 		cpu_relax();
631 		if (xip_irqpending() && extp &&
632 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
633 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
634 			/*
635 			 * Let's suspend the erase operation when supported.
636 			 * Note that we currently don't try to suspend
637 			 * interleaved chips if there is already another
638 			 * operation suspended (imagine what happens
639 			 * when one chip was already done with the current
640 			 * operation while another chip suspended it, then
641 			 * we resume the whole thing at once).  Yes, it
642 			 * can happen!
643 			 */
644 			map_write(map, CMD(0xb0), adr);
645 			usec -= xip_elapsed_since(start);
646 			suspended = xip_currtime();
647 			do {
648 				if (xip_elapsed_since(suspended) > 100000) {
649 					/*
650 					 * The chip doesn't want to suspend
651 					 * after waiting for 100 msecs.
652 					 * This is a critical error but there
653 					 * is not much we can do here.
654 					 */
655 					return;
656 				}
657 				status = map_read(map, adr);
658 			} while (!map_word_andequal(map, status, OK, OK));
659 
660 			/* Suspend succeeded */
661 			oldstate = chip->state;
662 			if (!map_word_bitsset(map, status, CMD(0x40)))
663 				break;
664 			chip->state = FL_XIP_WHILE_ERASING;
665 			chip->erase_suspended = 1;
666 			map_write(map, CMD(0xf0), adr);
667 			(void) map_read(map, adr);
668 			asm volatile (".rep 8; nop; .endr");
669 			local_irq_enable();
670 			spin_unlock(chip->mutex);
671 			asm volatile (".rep 8; nop; .endr");
672 			cond_resched();
673 
674 			/*
675 			 * We're back.  However someone else might have
676 			 * decided to go write to the chip if we are in
677 			 * a suspended erase state.  If so let's wait
678 			 * until it's done.
679 			 */
680 			spin_lock(chip->mutex);
681 			while (chip->state != FL_XIP_WHILE_ERASING) {
682 				DECLARE_WAITQUEUE(wait, current);
683 				set_current_state(TASK_UNINTERRUPTIBLE);
684 				add_wait_queue(&chip->wq, &wait);
685 				spin_unlock(chip->mutex);
686 				schedule();
687 				remove_wait_queue(&chip->wq, &wait);
688 				spin_lock(chip->mutex);
689 			}
690 			/* Disallow XIP again */
691 			local_irq_disable();
692 
693 			/* Resume the write or erase operation */
694 			map_write(map, CMD(0x30), adr);
695 			chip->state = oldstate;
696 			start = xip_currtime();
697 		} else if (usec >= 1000000/HZ) {
698 			/*
699 			 * Try to save on CPU power when waiting delay
700 			 * is at least a system timer tick period.
701 			 * No need to be extremely accurate here.
702 			 */
703 			xip_cpu_idle();
704 		}
705 		status = map_read(map, adr);
706 	} while (!map_word_andequal(map, status, OK, OK)
707 		 && xip_elapsed_since(start) < usec);
708 }
709 
710 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
711 
712 /*
713  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
714  * the flash is actively programming or erasing since we have to poll for
715  * the operation to complete anyway.  We can't do that in a generic way with
716  * a XIP setup so do it before the actual flash operation in this case
717  * and stub it out from INVALIDATE_CACHE_UDELAY.
718  */
719 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
720 	INVALIDATE_CACHED_RANGE(map, from, size)
721 
722 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
723 	UDELAY(map, chip, adr, usec)
724 
725 /*
726  * Extra notes:
727  *
728  * Activating this XIP support changes the way the code works a bit.  For
729  * example the code to suspend the current process when concurrent access
730  * happens is never executed because xip_udelay() will always return with the
731  * same chip state as it was entered with.  This is why there is no care for
732  * the presence of add_wait_queue() or schedule() calls from within a couple
733  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
734  * The queueing and scheduling are always happening within xip_udelay().
735  *
736  * Similarly, get_chip() and put_chip() just happen to always be executed
737  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
738  * is in array mode, therefore never executing many cases therein and not
739  * causing any problem with XIP.
740  */
741 
742 #else
743 
744 #define xip_disable(map, chip, adr)
745 #define xip_enable(map, chip, adr)
746 #define XIP_INVAL_CACHED_RANGE(x...)
747 
748 #define UDELAY(map, chip, adr, usec)  \
749 do {  \
750 	spin_unlock(chip->mutex);  \
751 	cfi_udelay(usec);  \
752 	spin_lock(chip->mutex);  \
753 } while (0)
754 
755 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
756 do {  \
757 	spin_unlock(chip->mutex);  \
758 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
759 	cfi_udelay(usec);  \
760 	spin_lock(chip->mutex);  \
761 } while (0)
762 
763 #endif
764 
765 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
766 {
767 	unsigned long cmd_addr;
768 	struct cfi_private *cfi = map->fldrv_priv;
769 	int ret;
770 
771 	adr += chip->start;
772 
773 	/* Ensure cmd read/writes are aligned. */
774 	cmd_addr = adr & ~(map_bankwidth(map)-1);
775 
776 	spin_lock(chip->mutex);
777 	ret = get_chip(map, chip, cmd_addr, FL_READY);
778 	if (ret) {
779 		spin_unlock(chip->mutex);
780 		return ret;
781 	}
782 
783 	if (chip->state != FL_POINT && chip->state != FL_READY) {
784 		map_write(map, CMD(0xf0), cmd_addr);
785 		chip->state = FL_READY;
786 	}
787 
788 	map_copy_from(map, buf, adr, len);
789 
790 	put_chip(map, chip, cmd_addr);
791 
792 	spin_unlock(chip->mutex);
793 	return 0;
794 }
795 
796 
797 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
798 {
799 	struct map_info *map = mtd->priv;
800 	struct cfi_private *cfi = map->fldrv_priv;
801 	unsigned long ofs;
802 	int chipnum;
803 	int ret = 0;
804 
805 	/* ofs: offset within the first chip that the first read should start */
806 
807 	chipnum = (from >> cfi->chipshift);
808 	ofs = from - (chipnum <<  cfi->chipshift);
809 
810 
811 	*retlen = 0;
812 
813 	while (len) {
814 		unsigned long thislen;
815 
816 		if (chipnum >= cfi->numchips)
817 			break;
818 
819 		if ((len + ofs -1) >> cfi->chipshift)
820 			thislen = (1<<cfi->chipshift) - ofs;
821 		else
822 			thislen = len;
823 
824 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
825 		if (ret)
826 			break;
827 
828 		*retlen += thislen;
829 		len -= thislen;
830 		buf += thislen;
831 
832 		ofs = 0;
833 		chipnum++;
834 	}
835 	return ret;
836 }
837 
838 
839 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
840 {
841 	DECLARE_WAITQUEUE(wait, current);
842 	unsigned long timeo = jiffies + HZ;
843 	struct cfi_private *cfi = map->fldrv_priv;
844 
845  retry:
846 	spin_lock(chip->mutex);
847 
848 	if (chip->state != FL_READY){
849 #if 0
850 		printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
851 #endif
852 		set_current_state(TASK_UNINTERRUPTIBLE);
853 		add_wait_queue(&chip->wq, &wait);
854 
855 		spin_unlock(chip->mutex);
856 
857 		schedule();
858 		remove_wait_queue(&chip->wq, &wait);
859 #if 0
860 		if(signal_pending(current))
861 			return -EINTR;
862 #endif
863 		timeo = jiffies + HZ;
864 
865 		goto retry;
866 	}
867 
868 	adr += chip->start;
869 
870 	chip->state = FL_READY;
871 
872 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
873 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
874 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
875 
876 	map_copy_from(map, buf, adr, len);
877 
878 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
879 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
880 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
881 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
882 
883 	wake_up(&chip->wq);
884 	spin_unlock(chip->mutex);
885 
886 	return 0;
887 }
888 
889 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
890 {
891 	struct map_info *map = mtd->priv;
892 	struct cfi_private *cfi = map->fldrv_priv;
893 	unsigned long ofs;
894 	int chipnum;
895 	int ret = 0;
896 
897 
898 	/* ofs: offset within the first chip that the first read should start */
899 
900 	/* 8 secsi bytes per chip */
901 	chipnum=from>>3;
902 	ofs=from & 7;
903 
904 
905 	*retlen = 0;
906 
907 	while (len) {
908 		unsigned long thislen;
909 
910 		if (chipnum >= cfi->numchips)
911 			break;
912 
913 		if ((len + ofs -1) >> 3)
914 			thislen = (1<<3) - ofs;
915 		else
916 			thislen = len;
917 
918 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
919 		if (ret)
920 			break;
921 
922 		*retlen += thislen;
923 		len -= thislen;
924 		buf += thislen;
925 
926 		ofs = 0;
927 		chipnum++;
928 	}
929 	return ret;
930 }
931 
932 
933 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
934 {
935 	struct cfi_private *cfi = map->fldrv_priv;
936 	unsigned long timeo = jiffies + HZ;
937 	/*
938 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
939 	 * have a max write time of a few hundreds usec). However, we should
940 	 * use the maximum timeout value given by the chip at probe time
941 	 * instead.  Unfortunately, struct flchip does have a field for
942 	 * maximum timeout, only for typical which can be far too short
943 	 * depending of the conditions.	 The ' + 1' is to avoid having a
944 	 * timeout of 0 jiffies if HZ is smaller than 1000.
945 	 */
946 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
947 	int ret = 0;
948 	map_word oldd;
949 	int retry_cnt = 0;
950 
951 	adr += chip->start;
952 
953 	spin_lock(chip->mutex);
954 	ret = get_chip(map, chip, adr, FL_WRITING);
955 	if (ret) {
956 		spin_unlock(chip->mutex);
957 		return ret;
958 	}
959 
960 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
961 	       __func__, adr, datum.x[0] );
962 
963 	/*
964 	 * Check for a NOP for the case when the datum to write is already
965 	 * present - it saves time and works around buggy chips that corrupt
966 	 * data at other locations when 0xff is written to a location that
967 	 * already contains 0xff.
968 	 */
969 	oldd = map_read(map, adr);
970 	if (map_word_equal(map, oldd, datum)) {
971 		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
972 		       __func__);
973 		goto op_done;
974 	}
975 
976 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
977 	ENABLE_VPP(map);
978 	xip_disable(map, chip, adr);
979  retry:
980 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
981 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
982 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
983 	map_write(map, datum, adr);
984 	chip->state = FL_WRITING;
985 
986 	INVALIDATE_CACHE_UDELAY(map, chip,
987 				adr, map_bankwidth(map),
988 				chip->word_write_time);
989 
990 	/* See comment above for timeout value. */
991 	timeo = jiffies + uWriteTimeout;
992 	for (;;) {
993 		if (chip->state != FL_WRITING) {
994 			/* Someone's suspended the write. Sleep */
995 			DECLARE_WAITQUEUE(wait, current);
996 
997 			set_current_state(TASK_UNINTERRUPTIBLE);
998 			add_wait_queue(&chip->wq, &wait);
999 			spin_unlock(chip->mutex);
1000 			schedule();
1001 			remove_wait_queue(&chip->wq, &wait);
1002 			timeo = jiffies + (HZ / 2); /* FIXME */
1003 			spin_lock(chip->mutex);
1004 			continue;
1005 		}
1006 
1007 		if (chip_ready(map, adr))
1008 			break;
1009 
1010 		if (time_after(jiffies, timeo)) {
1011 			xip_enable(map, chip, adr);
1012 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1013 			xip_disable(map, chip, adr);
1014                         break;
1015 		}
1016 
1017 		/* Latency issues. Drop the lock, wait a while and retry */
1018 		UDELAY(map, chip, adr, 1);
1019 	}
1020 	/* Did we succeed? */
1021 	if (!chip_good(map, adr, datum)) {
1022 		/* reset on all failures. */
1023 		map_write( map, CMD(0xF0), chip->start );
1024 		/* FIXME - should have reset delay before continuing */
1025 
1026 		if (++retry_cnt <= MAX_WORD_RETRIES)
1027 			goto retry;
1028 
1029 		ret = -EIO;
1030 	}
1031 	xip_enable(map, chip, adr);
1032  op_done:
1033 	chip->state = FL_READY;
1034 	put_chip(map, chip, adr);
1035 	spin_unlock(chip->mutex);
1036 
1037 	return ret;
1038 }
1039 
1040 
1041 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1042 				  size_t *retlen, const u_char *buf)
1043 {
1044 	struct map_info *map = mtd->priv;
1045 	struct cfi_private *cfi = map->fldrv_priv;
1046 	int ret = 0;
1047 	int chipnum;
1048 	unsigned long ofs, chipstart;
1049 	DECLARE_WAITQUEUE(wait, current);
1050 
1051 	*retlen = 0;
1052 	if (!len)
1053 		return 0;
1054 
1055 	chipnum = to >> cfi->chipshift;
1056 	ofs = to  - (chipnum << cfi->chipshift);
1057 	chipstart = cfi->chips[chipnum].start;
1058 
1059 	/* If it's not bus-aligned, do the first byte write */
1060 	if (ofs & (map_bankwidth(map)-1)) {
1061 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1062 		int i = ofs - bus_ofs;
1063 		int n = 0;
1064 		map_word tmp_buf;
1065 
1066  retry:
1067 		spin_lock(cfi->chips[chipnum].mutex);
1068 
1069 		if (cfi->chips[chipnum].state != FL_READY) {
1070 #if 0
1071 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1072 #endif
1073 			set_current_state(TASK_UNINTERRUPTIBLE);
1074 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1075 
1076 			spin_unlock(cfi->chips[chipnum].mutex);
1077 
1078 			schedule();
1079 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1080 #if 0
1081 			if(signal_pending(current))
1082 				return -EINTR;
1083 #endif
1084 			goto retry;
1085 		}
1086 
1087 		/* Load 'tmp_buf' with old contents of flash */
1088 		tmp_buf = map_read(map, bus_ofs+chipstart);
1089 
1090 		spin_unlock(cfi->chips[chipnum].mutex);
1091 
1092 		/* Number of bytes to copy from buffer */
1093 		n = min_t(int, len, map_bankwidth(map)-i);
1094 
1095 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1096 
1097 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1098 				       bus_ofs, tmp_buf);
1099 		if (ret)
1100 			return ret;
1101 
1102 		ofs += n;
1103 		buf += n;
1104 		(*retlen) += n;
1105 		len -= n;
1106 
1107 		if (ofs >> cfi->chipshift) {
1108 			chipnum ++;
1109 			ofs = 0;
1110 			if (chipnum == cfi->numchips)
1111 				return 0;
1112 		}
1113 	}
1114 
1115 	/* We are now aligned, write as much as possible */
1116 	while(len >= map_bankwidth(map)) {
1117 		map_word datum;
1118 
1119 		datum = map_word_load(map, buf);
1120 
1121 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1122 				       ofs, datum);
1123 		if (ret)
1124 			return ret;
1125 
1126 		ofs += map_bankwidth(map);
1127 		buf += map_bankwidth(map);
1128 		(*retlen) += map_bankwidth(map);
1129 		len -= map_bankwidth(map);
1130 
1131 		if (ofs >> cfi->chipshift) {
1132 			chipnum ++;
1133 			ofs = 0;
1134 			if (chipnum == cfi->numchips)
1135 				return 0;
1136 			chipstart = cfi->chips[chipnum].start;
1137 		}
1138 	}
1139 
1140 	/* Write the trailing bytes if any */
1141 	if (len & (map_bankwidth(map)-1)) {
1142 		map_word tmp_buf;
1143 
1144  retry1:
1145 		spin_lock(cfi->chips[chipnum].mutex);
1146 
1147 		if (cfi->chips[chipnum].state != FL_READY) {
1148 #if 0
1149 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1150 #endif
1151 			set_current_state(TASK_UNINTERRUPTIBLE);
1152 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1153 
1154 			spin_unlock(cfi->chips[chipnum].mutex);
1155 
1156 			schedule();
1157 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1158 #if 0
1159 			if(signal_pending(current))
1160 				return -EINTR;
1161 #endif
1162 			goto retry1;
1163 		}
1164 
1165 		tmp_buf = map_read(map, ofs + chipstart);
1166 
1167 		spin_unlock(cfi->chips[chipnum].mutex);
1168 
1169 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1170 
1171 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1172 				ofs, tmp_buf);
1173 		if (ret)
1174 			return ret;
1175 
1176 		(*retlen) += len;
1177 	}
1178 
1179 	return 0;
1180 }
1181 
1182 
1183 /*
1184  * FIXME: interleaved mode not tested, and probably not supported!
1185  */
1186 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1187 				    unsigned long adr, const u_char *buf,
1188 				    int len)
1189 {
1190 	struct cfi_private *cfi = map->fldrv_priv;
1191 	unsigned long timeo = jiffies + HZ;
1192 	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1193 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1194 	int ret = -EIO;
1195 	unsigned long cmd_adr;
1196 	int z, words;
1197 	map_word datum;
1198 
1199 	adr += chip->start;
1200 	cmd_adr = adr;
1201 
1202 	spin_lock(chip->mutex);
1203 	ret = get_chip(map, chip, adr, FL_WRITING);
1204 	if (ret) {
1205 		spin_unlock(chip->mutex);
1206 		return ret;
1207 	}
1208 
1209 	datum = map_word_load(map, buf);
1210 
1211 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1212 	       __func__, adr, datum.x[0] );
1213 
1214 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1215 	ENABLE_VPP(map);
1216 	xip_disable(map, chip, cmd_adr);
1217 
1218 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1219 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1220 	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1221 
1222 	/* Write Buffer Load */
1223 	map_write(map, CMD(0x25), cmd_adr);
1224 
1225 	chip->state = FL_WRITING_TO_BUFFER;
1226 
1227 	/* Write length of data to come */
1228 	words = len / map_bankwidth(map);
1229 	map_write(map, CMD(words - 1), cmd_adr);
1230 	/* Write data */
1231 	z = 0;
1232 	while(z < words * map_bankwidth(map)) {
1233 		datum = map_word_load(map, buf);
1234 		map_write(map, datum, adr + z);
1235 
1236 		z += map_bankwidth(map);
1237 		buf += map_bankwidth(map);
1238 	}
1239 	z -= map_bankwidth(map);
1240 
1241 	adr += z;
1242 
1243 	/* Write Buffer Program Confirm: GO GO GO */
1244 	map_write(map, CMD(0x29), cmd_adr);
1245 	chip->state = FL_WRITING;
1246 
1247 	INVALIDATE_CACHE_UDELAY(map, chip,
1248 				adr, map_bankwidth(map),
1249 				chip->word_write_time);
1250 
1251 	timeo = jiffies + uWriteTimeout;
1252 
1253 	for (;;) {
1254 		if (chip->state != FL_WRITING) {
1255 			/* Someone's suspended the write. Sleep */
1256 			DECLARE_WAITQUEUE(wait, current);
1257 
1258 			set_current_state(TASK_UNINTERRUPTIBLE);
1259 			add_wait_queue(&chip->wq, &wait);
1260 			spin_unlock(chip->mutex);
1261 			schedule();
1262 			remove_wait_queue(&chip->wq, &wait);
1263 			timeo = jiffies + (HZ / 2); /* FIXME */
1264 			spin_lock(chip->mutex);
1265 			continue;
1266 		}
1267 
1268 		if (chip_ready(map, adr)) {
1269 			xip_enable(map, chip, adr);
1270 			goto op_done;
1271 		}
1272 
1273 		if( time_after(jiffies, timeo))
1274 			break;
1275 
1276 		/* Latency issues. Drop the lock, wait a while and retry */
1277 		UDELAY(map, chip, adr, 1);
1278 	}
1279 
1280 	/* reset on all failures. */
1281 	map_write( map, CMD(0xF0), chip->start );
1282 	xip_enable(map, chip, adr);
1283 	/* FIXME - should have reset delay before continuing */
1284 
1285 	printk(KERN_WARNING "MTD %s(): software timeout\n",
1286 	       __func__ );
1287 
1288 	ret = -EIO;
1289  op_done:
1290 	chip->state = FL_READY;
1291 	put_chip(map, chip, adr);
1292 	spin_unlock(chip->mutex);
1293 
1294 	return ret;
1295 }
1296 
1297 
1298 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1299 				    size_t *retlen, const u_char *buf)
1300 {
1301 	struct map_info *map = mtd->priv;
1302 	struct cfi_private *cfi = map->fldrv_priv;
1303 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1304 	int ret = 0;
1305 	int chipnum;
1306 	unsigned long ofs;
1307 
1308 	*retlen = 0;
1309 	if (!len)
1310 		return 0;
1311 
1312 	chipnum = to >> cfi->chipshift;
1313 	ofs = to  - (chipnum << cfi->chipshift);
1314 
1315 	/* If it's not bus-aligned, do the first word write */
1316 	if (ofs & (map_bankwidth(map)-1)) {
1317 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1318 		if (local_len > len)
1319 			local_len = len;
1320 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1321 					     local_len, retlen, buf);
1322 		if (ret)
1323 			return ret;
1324 		ofs += local_len;
1325 		buf += local_len;
1326 		len -= local_len;
1327 
1328 		if (ofs >> cfi->chipshift) {
1329 			chipnum ++;
1330 			ofs = 0;
1331 			if (chipnum == cfi->numchips)
1332 				return 0;
1333 		}
1334 	}
1335 
1336 	/* Write buffer is worth it only if more than one word to write... */
1337 	while (len >= map_bankwidth(map) * 2) {
1338 		/* We must not cross write block boundaries */
1339 		int size = wbufsize - (ofs & (wbufsize-1));
1340 
1341 		if (size > len)
1342 			size = len;
1343 		if (size % map_bankwidth(map))
1344 			size -= size % map_bankwidth(map);
1345 
1346 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1347 				      ofs, buf, size);
1348 		if (ret)
1349 			return ret;
1350 
1351 		ofs += size;
1352 		buf += size;
1353 		(*retlen) += size;
1354 		len -= size;
1355 
1356 		if (ofs >> cfi->chipshift) {
1357 			chipnum ++;
1358 			ofs = 0;
1359 			if (chipnum == cfi->numchips)
1360 				return 0;
1361 		}
1362 	}
1363 
1364 	if (len) {
1365 		size_t retlen_dregs = 0;
1366 
1367 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1368 					     len, &retlen_dregs, buf);
1369 
1370 		*retlen += retlen_dregs;
1371 		return ret;
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 
1378 /*
1379  * Handle devices with one erase region, that only implement
1380  * the chip erase command.
1381  */
1382 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1383 {
1384 	struct cfi_private *cfi = map->fldrv_priv;
1385 	unsigned long timeo = jiffies + HZ;
1386 	unsigned long int adr;
1387 	DECLARE_WAITQUEUE(wait, current);
1388 	int ret = 0;
1389 
1390 	adr = cfi->addr_unlock1;
1391 
1392 	spin_lock(chip->mutex);
1393 	ret = get_chip(map, chip, adr, FL_WRITING);
1394 	if (ret) {
1395 		spin_unlock(chip->mutex);
1396 		return ret;
1397 	}
1398 
1399 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1400 	       __func__, chip->start );
1401 
1402 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1403 	ENABLE_VPP(map);
1404 	xip_disable(map, chip, adr);
1405 
1406 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1407 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1408 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1409 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1410 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1411 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1412 
1413 	chip->state = FL_ERASING;
1414 	chip->erase_suspended = 0;
1415 	chip->in_progress_block_addr = adr;
1416 
1417 	INVALIDATE_CACHE_UDELAY(map, chip,
1418 				adr, map->size,
1419 				chip->erase_time*500);
1420 
1421 	timeo = jiffies + (HZ*20);
1422 
1423 	for (;;) {
1424 		if (chip->state != FL_ERASING) {
1425 			/* Someone's suspended the erase. Sleep */
1426 			set_current_state(TASK_UNINTERRUPTIBLE);
1427 			add_wait_queue(&chip->wq, &wait);
1428 			spin_unlock(chip->mutex);
1429 			schedule();
1430 			remove_wait_queue(&chip->wq, &wait);
1431 			spin_lock(chip->mutex);
1432 			continue;
1433 		}
1434 		if (chip->erase_suspended) {
1435 			/* This erase was suspended and resumed.
1436 			   Adjust the timeout */
1437 			timeo = jiffies + (HZ*20); /* FIXME */
1438 			chip->erase_suspended = 0;
1439 		}
1440 
1441 		if (chip_ready(map, adr))
1442 			break;
1443 
1444 		if (time_after(jiffies, timeo)) {
1445 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1446 				__func__ );
1447 			break;
1448 		}
1449 
1450 		/* Latency issues. Drop the lock, wait a while and retry */
1451 		UDELAY(map, chip, adr, 1000000/HZ);
1452 	}
1453 	/* Did we succeed? */
1454 	if (!chip_good(map, adr, map_word_ff(map))) {
1455 		/* reset on all failures. */
1456 		map_write( map, CMD(0xF0), chip->start );
1457 		/* FIXME - should have reset delay before continuing */
1458 
1459 		ret = -EIO;
1460 	}
1461 
1462 	chip->state = FL_READY;
1463 	xip_enable(map, chip, adr);
1464 	put_chip(map, chip, adr);
1465 	spin_unlock(chip->mutex);
1466 
1467 	return ret;
1468 }
1469 
1470 
1471 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1472 {
1473 	struct cfi_private *cfi = map->fldrv_priv;
1474 	unsigned long timeo = jiffies + HZ;
1475 	DECLARE_WAITQUEUE(wait, current);
1476 	int ret = 0;
1477 
1478 	adr += chip->start;
1479 
1480 	spin_lock(chip->mutex);
1481 	ret = get_chip(map, chip, adr, FL_ERASING);
1482 	if (ret) {
1483 		spin_unlock(chip->mutex);
1484 		return ret;
1485 	}
1486 
1487 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1488 	       __func__, adr );
1489 
1490 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1491 	ENABLE_VPP(map);
1492 	xip_disable(map, chip, adr);
1493 
1494 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1495 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1496 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1497 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1498 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1499 	map_write(map, CMD(0x30), adr);
1500 
1501 	chip->state = FL_ERASING;
1502 	chip->erase_suspended = 0;
1503 	chip->in_progress_block_addr = adr;
1504 
1505 	INVALIDATE_CACHE_UDELAY(map, chip,
1506 				adr, len,
1507 				chip->erase_time*500);
1508 
1509 	timeo = jiffies + (HZ*20);
1510 
1511 	for (;;) {
1512 		if (chip->state != FL_ERASING) {
1513 			/* Someone's suspended the erase. Sleep */
1514 			set_current_state(TASK_UNINTERRUPTIBLE);
1515 			add_wait_queue(&chip->wq, &wait);
1516 			spin_unlock(chip->mutex);
1517 			schedule();
1518 			remove_wait_queue(&chip->wq, &wait);
1519 			spin_lock(chip->mutex);
1520 			continue;
1521 		}
1522 		if (chip->erase_suspended) {
1523 			/* This erase was suspended and resumed.
1524 			   Adjust the timeout */
1525 			timeo = jiffies + (HZ*20); /* FIXME */
1526 			chip->erase_suspended = 0;
1527 		}
1528 
1529 		if (chip_ready(map, adr)) {
1530 			xip_enable(map, chip, adr);
1531 			break;
1532 		}
1533 
1534 		if (time_after(jiffies, timeo)) {
1535 			xip_enable(map, chip, adr);
1536 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1537 				__func__ );
1538 			break;
1539 		}
1540 
1541 		/* Latency issues. Drop the lock, wait a while and retry */
1542 		UDELAY(map, chip, adr, 1000000/HZ);
1543 	}
1544 	/* Did we succeed? */
1545 	if (!chip_good(map, adr, map_word_ff(map))) {
1546 		/* reset on all failures. */
1547 		map_write( map, CMD(0xF0), chip->start );
1548 		/* FIXME - should have reset delay before continuing */
1549 
1550 		ret = -EIO;
1551 	}
1552 
1553 	chip->state = FL_READY;
1554 	put_chip(map, chip, adr);
1555 	spin_unlock(chip->mutex);
1556 	return ret;
1557 }
1558 
1559 
1560 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1561 {
1562 	unsigned long ofs, len;
1563 	int ret;
1564 
1565 	ofs = instr->addr;
1566 	len = instr->len;
1567 
1568 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1569 	if (ret)
1570 		return ret;
1571 
1572 	instr->state = MTD_ERASE_DONE;
1573 	mtd_erase_callback(instr);
1574 
1575 	return 0;
1576 }
1577 
1578 
1579 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1580 {
1581 	struct map_info *map = mtd->priv;
1582 	struct cfi_private *cfi = map->fldrv_priv;
1583 	int ret = 0;
1584 
1585 	if (instr->addr != 0)
1586 		return -EINVAL;
1587 
1588 	if (instr->len != mtd->size)
1589 		return -EINVAL;
1590 
1591 	ret = do_erase_chip(map, &cfi->chips[0]);
1592 	if (ret)
1593 		return ret;
1594 
1595 	instr->state = MTD_ERASE_DONE;
1596 	mtd_erase_callback(instr);
1597 
1598 	return 0;
1599 }
1600 
1601 
1602 static void cfi_amdstd_sync (struct mtd_info *mtd)
1603 {
1604 	struct map_info *map = mtd->priv;
1605 	struct cfi_private *cfi = map->fldrv_priv;
1606 	int i;
1607 	struct flchip *chip;
1608 	int ret = 0;
1609 	DECLARE_WAITQUEUE(wait, current);
1610 
1611 	for (i=0; !ret && i<cfi->numchips; i++) {
1612 		chip = &cfi->chips[i];
1613 
1614 	retry:
1615 		spin_lock(chip->mutex);
1616 
1617 		switch(chip->state) {
1618 		case FL_READY:
1619 		case FL_STATUS:
1620 		case FL_CFI_QUERY:
1621 		case FL_JEDEC_QUERY:
1622 			chip->oldstate = chip->state;
1623 			chip->state = FL_SYNCING;
1624 			/* No need to wake_up() on this state change -
1625 			 * as the whole point is that nobody can do anything
1626 			 * with the chip now anyway.
1627 			 */
1628 		case FL_SYNCING:
1629 			spin_unlock(chip->mutex);
1630 			break;
1631 
1632 		default:
1633 			/* Not an idle state */
1634 			add_wait_queue(&chip->wq, &wait);
1635 
1636 			spin_unlock(chip->mutex);
1637 
1638 			schedule();
1639 
1640 			remove_wait_queue(&chip->wq, &wait);
1641 
1642 			goto retry;
1643 		}
1644 	}
1645 
1646 	/* Unlock the chips again */
1647 
1648 	for (i--; i >=0; i--) {
1649 		chip = &cfi->chips[i];
1650 
1651 		spin_lock(chip->mutex);
1652 
1653 		if (chip->state == FL_SYNCING) {
1654 			chip->state = chip->oldstate;
1655 			wake_up(&chip->wq);
1656 		}
1657 		spin_unlock(chip->mutex);
1658 	}
1659 }
1660 
1661 
1662 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1663 {
1664 	struct map_info *map = mtd->priv;
1665 	struct cfi_private *cfi = map->fldrv_priv;
1666 	int i;
1667 	struct flchip *chip;
1668 	int ret = 0;
1669 
1670 	for (i=0; !ret && i<cfi->numchips; i++) {
1671 		chip = &cfi->chips[i];
1672 
1673 		spin_lock(chip->mutex);
1674 
1675 		switch(chip->state) {
1676 		case FL_READY:
1677 		case FL_STATUS:
1678 		case FL_CFI_QUERY:
1679 		case FL_JEDEC_QUERY:
1680 			chip->oldstate = chip->state;
1681 			chip->state = FL_PM_SUSPENDED;
1682 			/* No need to wake_up() on this state change -
1683 			 * as the whole point is that nobody can do anything
1684 			 * with the chip now anyway.
1685 			 */
1686 		case FL_PM_SUSPENDED:
1687 			break;
1688 
1689 		default:
1690 			ret = -EAGAIN;
1691 			break;
1692 		}
1693 		spin_unlock(chip->mutex);
1694 	}
1695 
1696 	/* Unlock the chips again */
1697 
1698 	if (ret) {
1699 		for (i--; i >=0; i--) {
1700 			chip = &cfi->chips[i];
1701 
1702 			spin_lock(chip->mutex);
1703 
1704 			if (chip->state == FL_PM_SUSPENDED) {
1705 				chip->state = chip->oldstate;
1706 				wake_up(&chip->wq);
1707 			}
1708 			spin_unlock(chip->mutex);
1709 		}
1710 	}
1711 
1712 	return ret;
1713 }
1714 
1715 
1716 static void cfi_amdstd_resume(struct mtd_info *mtd)
1717 {
1718 	struct map_info *map = mtd->priv;
1719 	struct cfi_private *cfi = map->fldrv_priv;
1720 	int i;
1721 	struct flchip *chip;
1722 
1723 	for (i=0; i<cfi->numchips; i++) {
1724 
1725 		chip = &cfi->chips[i];
1726 
1727 		spin_lock(chip->mutex);
1728 
1729 		if (chip->state == FL_PM_SUSPENDED) {
1730 			chip->state = FL_READY;
1731 			map_write(map, CMD(0xF0), chip->start);
1732 			wake_up(&chip->wq);
1733 		}
1734 		else
1735 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1736 
1737 		spin_unlock(chip->mutex);
1738 	}
1739 }
1740 
1741 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1742 {
1743 	struct map_info *map = mtd->priv;
1744 	struct cfi_private *cfi = map->fldrv_priv;
1745 	kfree(cfi->cmdset_priv);
1746 	kfree(cfi->cfiq);
1747 	kfree(cfi);
1748 	kfree(mtd->eraseregions);
1749 }
1750 
1751 static char im_name[]="cfi_cmdset_0002";
1752 
1753 
1754 static int __init cfi_amdstd_init(void)
1755 {
1756 	inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1757 	return 0;
1758 }
1759 
1760 
1761 static void __exit cfi_amdstd_exit(void)
1762 {
1763 	inter_module_unregister(im_name);
1764 }
1765 
1766 
1767 module_init(cfi_amdstd_init);
1768 module_exit(cfi_amdstd_exit);
1769 
1770 MODULE_LICENSE("GPL");
1771 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1772 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1773