xref: /linux/drivers/mtd/chips/cfi_cmdset_0002.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17  *
18  * This code is GPL
19  *
20  * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21  *
22  */
23 
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <asm/io.h>
31 #include <asm/byteorder.h>
32 
33 #include <linux/errno.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/interrupt.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/map.h>
39 #include <linux/mtd/mtd.h>
40 #include <linux/mtd/cfi.h>
41 #include <linux/mtd/xip.h>
42 
43 #define AMD_BOOTLOC_BUG
44 #define FORCE_WORD_WRITE 0
45 
46 #define MAX_WORD_RETRIES 3
47 
48 #define MANUFACTURER_AMD	0x0001
49 #define MANUFACTURER_SST	0x00BF
50 #define SST49LF004B	        0x0060
51 #define SST49LF008A		0x005a
52 
53 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
57 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
58 static void cfi_amdstd_sync (struct mtd_info *);
59 static int cfi_amdstd_suspend (struct mtd_info *);
60 static void cfi_amdstd_resume (struct mtd_info *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 
63 static void cfi_amdstd_destroy(struct mtd_info *);
64 
65 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
66 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67 
68 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
69 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
70 #include "fwh_lock.h"
71 
72 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
73 	.probe		= NULL, /* Not usable directly */
74 	.destroy	= cfi_amdstd_destroy,
75 	.name		= "cfi_cmdset_0002",
76 	.module		= THIS_MODULE
77 };
78 
79 
80 /* #define DEBUG_CFI_FEATURES */
81 
82 
83 #ifdef DEBUG_CFI_FEATURES
84 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
85 {
86 	const char* erase_suspend[3] = {
87 		"Not supported", "Read only", "Read/write"
88 	};
89 	const char* top_bottom[6] = {
90 		"No WP", "8x8KiB sectors at top & bottom, no WP",
91 		"Bottom boot", "Top boot",
92 		"Uniform, Bottom WP", "Uniform, Top WP"
93 	};
94 
95 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
96 	printk("  Address sensitive unlock: %s\n",
97 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
98 
99 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
100 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
101 	else
102 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
103 
104 	if (extp->BlkProt == 0)
105 		printk("  Block protection: Not supported\n");
106 	else
107 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
108 
109 
110 	printk("  Temporary block unprotect: %s\n",
111 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
112 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
113 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
114 	printk("  Burst mode: %s\n",
115 	       extp->BurstMode ? "Supported" : "Not supported");
116 	if (extp->PageMode == 0)
117 		printk("  Page mode: Not supported\n");
118 	else
119 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
120 
121 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
122 	       extp->VppMin >> 4, extp->VppMin & 0xf);
123 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
124 	       extp->VppMax >> 4, extp->VppMax & 0xf);
125 
126 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
127 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
128 	else
129 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
130 }
131 #endif
132 
133 #ifdef AMD_BOOTLOC_BUG
134 /* Wheee. Bring me the head of someone at AMD. */
135 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
136 {
137 	struct map_info *map = mtd->priv;
138 	struct cfi_private *cfi = map->fldrv_priv;
139 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
140 	__u8 major = extp->MajorVersion;
141 	__u8 minor = extp->MinorVersion;
142 
143 	if (((major << 8) | minor) < 0x3131) {
144 		/* CFI version 1.0 => don't trust bootloc */
145 		if (cfi->id & 0x80) {
146 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
147 			extp->TopBottom = 3;	/* top boot */
148 		} else {
149 			extp->TopBottom = 2;	/* bottom boot */
150 		}
151 	}
152 }
153 #endif
154 
155 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
156 {
157 	struct map_info *map = mtd->priv;
158 	struct cfi_private *cfi = map->fldrv_priv;
159 	if (cfi->cfiq->BufWriteTimeoutTyp) {
160 		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
161 		mtd->write = cfi_amdstd_write_buffers;
162 	}
163 }
164 
165 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
166 {
167 	/* Setup for chips with a secsi area */
168 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
169 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
170 }
171 
172 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
173 {
174 	struct map_info *map = mtd->priv;
175 	struct cfi_private *cfi = map->fldrv_priv;
176 	if ((cfi->cfiq->NumEraseRegions == 1) &&
177 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
178 		mtd->erase = cfi_amdstd_erase_chip;
179 	}
180 
181 }
182 
183 static struct cfi_fixup cfi_fixup_table[] = {
184 #ifdef AMD_BOOTLOC_BUG
185 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
186 #endif
187 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
188 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
189 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
190 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
191 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
192 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
193 #if !FORCE_WORD_WRITE
194 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
195 #endif
196 	{ 0, 0, NULL, NULL }
197 };
198 static struct cfi_fixup jedec_fixup_table[] = {
199 	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
200 	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
201 	{ 0, 0, NULL, NULL }
202 };
203 
204 static struct cfi_fixup fixup_table[] = {
205 	/* The CFI vendor ids and the JEDEC vendor IDs appear
206 	 * to be common.  It is like the devices id's are as
207 	 * well.  This table is to pick all cases where
208 	 * we know that is the case.
209 	 */
210 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
211 	{ 0, 0, NULL, NULL }
212 };
213 
214 
215 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
216 {
217 	struct cfi_private *cfi = map->fldrv_priv;
218 	struct mtd_info *mtd;
219 	int i;
220 
221 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
222 	if (!mtd) {
223 		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
224 		return NULL;
225 	}
226 	memset(mtd, 0, sizeof(*mtd));
227 	mtd->priv = map;
228 	mtd->type = MTD_NORFLASH;
229 
230 	/* Fill in the default mtd operations */
231 	mtd->erase   = cfi_amdstd_erase_varsize;
232 	mtd->write   = cfi_amdstd_write_words;
233 	mtd->read    = cfi_amdstd_read;
234 	mtd->sync    = cfi_amdstd_sync;
235 	mtd->suspend = cfi_amdstd_suspend;
236 	mtd->resume  = cfi_amdstd_resume;
237 	mtd->flags   = MTD_CAP_NORFLASH;
238 	mtd->name    = map->name;
239 
240 	if (cfi->cfi_mode==CFI_MODE_CFI){
241 		unsigned char bootloc;
242 		/*
243 		 * It's a real CFI chip, not one for which the probe
244 		 * routine faked a CFI structure. So we read the feature
245 		 * table from it.
246 		 */
247 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
248 		struct cfi_pri_amdstd *extp;
249 
250 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
251 		if (!extp) {
252 			kfree(mtd);
253 			return NULL;
254 		}
255 
256 		if (extp->MajorVersion != '1' ||
257 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
258 			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
259 			       "version %c.%c.\n",  extp->MajorVersion,
260 			       extp->MinorVersion);
261 			kfree(extp);
262 			kfree(mtd);
263 			return NULL;
264 		}
265 
266 		/* Install our own private info structure */
267 		cfi->cmdset_priv = extp;
268 
269 		/* Apply cfi device specific fixups */
270 		cfi_fixup(mtd, cfi_fixup_table);
271 
272 #ifdef DEBUG_CFI_FEATURES
273 		/* Tell the user about it in lots of lovely detail */
274 		cfi_tell_features(extp);
275 #endif
276 
277 		bootloc = extp->TopBottom;
278 		if ((bootloc != 2) && (bootloc != 3)) {
279 			printk(KERN_WARNING "%s: CFI does not contain boot "
280 			       "bank location. Assuming top.\n", map->name);
281 			bootloc = 2;
282 		}
283 
284 		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
285 			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
286 
287 			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
288 				int j = (cfi->cfiq->NumEraseRegions-1)-i;
289 				__u32 swap;
290 
291 				swap = cfi->cfiq->EraseRegionInfo[i];
292 				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
293 				cfi->cfiq->EraseRegionInfo[j] = swap;
294 			}
295 		}
296 		/* Set the default CFI lock/unlock addresses */
297 		cfi->addr_unlock1 = 0x555;
298 		cfi->addr_unlock2 = 0x2aa;
299 		/* Modify the unlock address if we are in compatibility mode */
300 		if (	/* x16 in x8 mode */
301 			((cfi->device_type == CFI_DEVICETYPE_X8) &&
302 				(cfi->cfiq->InterfaceDesc == 2)) ||
303 			/* x32 in x16 mode */
304 			((cfi->device_type == CFI_DEVICETYPE_X16) &&
305 				(cfi->cfiq->InterfaceDesc == 4)))
306 		{
307 			cfi->addr_unlock1 = 0xaaa;
308 			cfi->addr_unlock2 = 0x555;
309 		}
310 
311 	} /* CFI mode */
312 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
313 		/* Apply jedec specific fixups */
314 		cfi_fixup(mtd, jedec_fixup_table);
315 	}
316 	/* Apply generic fixups */
317 	cfi_fixup(mtd, fixup_table);
318 
319 	for (i=0; i< cfi->numchips; i++) {
320 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
321 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
322 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
323 	}
324 
325 	map->fldrv = &cfi_amdstd_chipdrv;
326 
327 	return cfi_amdstd_setup(mtd);
328 }
329 
330 
331 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
332 {
333 	struct map_info *map = mtd->priv;
334 	struct cfi_private *cfi = map->fldrv_priv;
335 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
336 	unsigned long offset = 0;
337 	int i,j;
338 
339 	printk(KERN_NOTICE "number of %s chips: %d\n",
340 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
341 	/* Select the correct geometry setup */
342 	mtd->size = devsize * cfi->numchips;
343 
344 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
345 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
346 				    * mtd->numeraseregions, GFP_KERNEL);
347 	if (!mtd->eraseregions) {
348 		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
349 		goto setup_err;
350 	}
351 
352 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
353 		unsigned long ernum, ersize;
354 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
355 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
356 
357 		if (mtd->erasesize < ersize) {
358 			mtd->erasesize = ersize;
359 		}
360 		for (j=0; j<cfi->numchips; j++) {
361 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
362 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
363 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
364 		}
365 		offset += (ersize * ernum);
366 	}
367 	if (offset != devsize) {
368 		/* Argh */
369 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
370 		goto setup_err;
371 	}
372 #if 0
373 	// debug
374 	for (i=0; i<mtd->numeraseregions;i++){
375 		printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
376 		       i,mtd->eraseregions[i].offset,
377 		       mtd->eraseregions[i].erasesize,
378 		       mtd->eraseregions[i].numblocks);
379 	}
380 #endif
381 
382 	/* FIXME: erase-suspend-program is broken.  See
383 	   http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
384 	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
385 
386 	__module_get(THIS_MODULE);
387 	return mtd;
388 
389  setup_err:
390 	if(mtd) {
391 		kfree(mtd->eraseregions);
392 		kfree(mtd);
393 	}
394 	kfree(cfi->cmdset_priv);
395 	kfree(cfi->cfiq);
396 	return NULL;
397 }
398 
399 /*
400  * Return true if the chip is ready.
401  *
402  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
403  * non-suspended sector) and is indicated by no toggle bits toggling.
404  *
405  * Note that anything more complicated than checking if no bits are toggling
406  * (including checking DQ5 for an error status) is tricky to get working
407  * correctly and is therefore not done	(particulary with interleaved chips
408  * as each chip must be checked independantly of the others).
409  */
410 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
411 {
412 	map_word d, t;
413 
414 	d = map_read(map, addr);
415 	t = map_read(map, addr);
416 
417 	return map_word_equal(map, d, t);
418 }
419 
420 /*
421  * Return true if the chip is ready and has the correct value.
422  *
423  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
424  * non-suspended sector) and it is indicated by no bits toggling.
425  *
426  * Error are indicated by toggling bits or bits held with the wrong value,
427  * or with bits toggling.
428  *
429  * Note that anything more complicated than checking if no bits are toggling
430  * (including checking DQ5 for an error status) is tricky to get working
431  * correctly and is therefore not done	(particulary with interleaved chips
432  * as each chip must be checked independantly of the others).
433  *
434  */
435 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
436 {
437 	map_word oldd, curd;
438 
439 	oldd = map_read(map, addr);
440 	curd = map_read(map, addr);
441 
442 	return	map_word_equal(map, oldd, curd) &&
443 		map_word_equal(map, curd, expected);
444 }
445 
446 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
447 {
448 	DECLARE_WAITQUEUE(wait, current);
449 	struct cfi_private *cfi = map->fldrv_priv;
450 	unsigned long timeo;
451 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
452 
453  resettime:
454 	timeo = jiffies + HZ;
455  retry:
456 	switch (chip->state) {
457 
458 	case FL_STATUS:
459 		for (;;) {
460 			if (chip_ready(map, adr))
461 				break;
462 
463 			if (time_after(jiffies, timeo)) {
464 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
465 				spin_unlock(chip->mutex);
466 				return -EIO;
467 			}
468 			spin_unlock(chip->mutex);
469 			cfi_udelay(1);
470 			spin_lock(chip->mutex);
471 			/* Someone else might have been playing with it. */
472 			goto retry;
473 		}
474 
475 	case FL_READY:
476 	case FL_CFI_QUERY:
477 	case FL_JEDEC_QUERY:
478 		return 0;
479 
480 	case FL_ERASING:
481 		if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
482 			goto sleep;
483 
484 		if (!(mode == FL_READY || mode == FL_POINT
485 		      || !cfip
486 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
487 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
488 			goto sleep;
489 
490 		/* We could check to see if we're trying to access the sector
491 		 * that is currently being erased. However, no user will try
492 		 * anything like that so we just wait for the timeout. */
493 
494 		/* Erase suspend */
495 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
496 		 * commands when the erase algorithm isn't in progress. */
497 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
498 		chip->oldstate = FL_ERASING;
499 		chip->state = FL_ERASE_SUSPENDING;
500 		chip->erase_suspended = 1;
501 		for (;;) {
502 			if (chip_ready(map, adr))
503 				break;
504 
505 			if (time_after(jiffies, timeo)) {
506 				/* Should have suspended the erase by now.
507 				 * Send an Erase-Resume command as either
508 				 * there was an error (so leave the erase
509 				 * routine to recover from it) or we trying to
510 				 * use the erase-in-progress sector. */
511 				map_write(map, CMD(0x30), chip->in_progress_block_addr);
512 				chip->state = FL_ERASING;
513 				chip->oldstate = FL_READY;
514 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
515 				return -EIO;
516 			}
517 
518 			spin_unlock(chip->mutex);
519 			cfi_udelay(1);
520 			spin_lock(chip->mutex);
521 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
522 			   So we can just loop here. */
523 		}
524 		chip->state = FL_READY;
525 		return 0;
526 
527 	case FL_XIP_WHILE_ERASING:
528 		if (mode != FL_READY && mode != FL_POINT &&
529 		    (!cfip || !(cfip->EraseSuspend&2)))
530 			goto sleep;
531 		chip->oldstate = chip->state;
532 		chip->state = FL_READY;
533 		return 0;
534 
535 	case FL_POINT:
536 		/* Only if there's no operation suspended... */
537 		if (mode == FL_READY && chip->oldstate == FL_READY)
538 			return 0;
539 
540 	default:
541 	sleep:
542 		set_current_state(TASK_UNINTERRUPTIBLE);
543 		add_wait_queue(&chip->wq, &wait);
544 		spin_unlock(chip->mutex);
545 		schedule();
546 		remove_wait_queue(&chip->wq, &wait);
547 		spin_lock(chip->mutex);
548 		goto resettime;
549 	}
550 }
551 
552 
553 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
554 {
555 	struct cfi_private *cfi = map->fldrv_priv;
556 
557 	switch(chip->oldstate) {
558 	case FL_ERASING:
559 		chip->state = chip->oldstate;
560 		map_write(map, CMD(0x30), chip->in_progress_block_addr);
561 		chip->oldstate = FL_READY;
562 		chip->state = FL_ERASING;
563 		break;
564 
565 	case FL_XIP_WHILE_ERASING:
566 		chip->state = chip->oldstate;
567 		chip->oldstate = FL_READY;
568 		break;
569 
570 	case FL_READY:
571 	case FL_STATUS:
572 		/* We should really make set_vpp() count, rather than doing this */
573 		DISABLE_VPP(map);
574 		break;
575 	default:
576 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
577 	}
578 	wake_up(&chip->wq);
579 }
580 
581 #ifdef CONFIG_MTD_XIP
582 
583 /*
584  * No interrupt what so ever can be serviced while the flash isn't in array
585  * mode.  This is ensured by the xip_disable() and xip_enable() functions
586  * enclosing any code path where the flash is known not to be in array mode.
587  * And within a XIP disabled code path, only functions marked with __xipram
588  * may be called and nothing else (it's a good thing to inspect generated
589  * assembly to make sure inline functions were actually inlined and that gcc
590  * didn't emit calls to its own support functions). Also configuring MTD CFI
591  * support to a single buswidth and a single interleave is also recommended.
592  */
593 
594 static void xip_disable(struct map_info *map, struct flchip *chip,
595 			unsigned long adr)
596 {
597 	/* TODO: chips with no XIP use should ignore and return */
598 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
599 	local_irq_disable();
600 }
601 
602 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
603 				unsigned long adr)
604 {
605 	struct cfi_private *cfi = map->fldrv_priv;
606 
607 	if (chip->state != FL_POINT && chip->state != FL_READY) {
608 		map_write(map, CMD(0xf0), adr);
609 		chip->state = FL_READY;
610 	}
611 	(void) map_read(map, adr);
612 	xip_iprefetch();
613 	local_irq_enable();
614 }
615 
616 /*
617  * When a delay is required for the flash operation to complete, the
618  * xip_udelay() function is polling for both the given timeout and pending
619  * (but still masked) hardware interrupts.  Whenever there is an interrupt
620  * pending then the flash erase operation is suspended, array mode restored
621  * and interrupts unmasked.  Task scheduling might also happen at that
622  * point.  The CPU eventually returns from the interrupt or the call to
623  * schedule() and the suspended flash operation is resumed for the remaining
624  * of the delay period.
625  *
626  * Warning: this function _will_ fool interrupt latency tracing tools.
627  */
628 
629 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
630 				unsigned long adr, int usec)
631 {
632 	struct cfi_private *cfi = map->fldrv_priv;
633 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
634 	map_word status, OK = CMD(0x80);
635 	unsigned long suspended, start = xip_currtime();
636 	flstate_t oldstate;
637 
638 	do {
639 		cpu_relax();
640 		if (xip_irqpending() && extp &&
641 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
642 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
643 			/*
644 			 * Let's suspend the erase operation when supported.
645 			 * Note that we currently don't try to suspend
646 			 * interleaved chips if there is already another
647 			 * operation suspended (imagine what happens
648 			 * when one chip was already done with the current
649 			 * operation while another chip suspended it, then
650 			 * we resume the whole thing at once).  Yes, it
651 			 * can happen!
652 			 */
653 			map_write(map, CMD(0xb0), adr);
654 			usec -= xip_elapsed_since(start);
655 			suspended = xip_currtime();
656 			do {
657 				if (xip_elapsed_since(suspended) > 100000) {
658 					/*
659 					 * The chip doesn't want to suspend
660 					 * after waiting for 100 msecs.
661 					 * This is a critical error but there
662 					 * is not much we can do here.
663 					 */
664 					return;
665 				}
666 				status = map_read(map, adr);
667 			} while (!map_word_andequal(map, status, OK, OK));
668 
669 			/* Suspend succeeded */
670 			oldstate = chip->state;
671 			if (!map_word_bitsset(map, status, CMD(0x40)))
672 				break;
673 			chip->state = FL_XIP_WHILE_ERASING;
674 			chip->erase_suspended = 1;
675 			map_write(map, CMD(0xf0), adr);
676 			(void) map_read(map, adr);
677 			asm volatile (".rep 8; nop; .endr");
678 			local_irq_enable();
679 			spin_unlock(chip->mutex);
680 			asm volatile (".rep 8; nop; .endr");
681 			cond_resched();
682 
683 			/*
684 			 * We're back.  However someone else might have
685 			 * decided to go write to the chip if we are in
686 			 * a suspended erase state.  If so let's wait
687 			 * until it's done.
688 			 */
689 			spin_lock(chip->mutex);
690 			while (chip->state != FL_XIP_WHILE_ERASING) {
691 				DECLARE_WAITQUEUE(wait, current);
692 				set_current_state(TASK_UNINTERRUPTIBLE);
693 				add_wait_queue(&chip->wq, &wait);
694 				spin_unlock(chip->mutex);
695 				schedule();
696 				remove_wait_queue(&chip->wq, &wait);
697 				spin_lock(chip->mutex);
698 			}
699 			/* Disallow XIP again */
700 			local_irq_disable();
701 
702 			/* Resume the write or erase operation */
703 			map_write(map, CMD(0x30), adr);
704 			chip->state = oldstate;
705 			start = xip_currtime();
706 		} else if (usec >= 1000000/HZ) {
707 			/*
708 			 * Try to save on CPU power when waiting delay
709 			 * is at least a system timer tick period.
710 			 * No need to be extremely accurate here.
711 			 */
712 			xip_cpu_idle();
713 		}
714 		status = map_read(map, adr);
715 	} while (!map_word_andequal(map, status, OK, OK)
716 		 && xip_elapsed_since(start) < usec);
717 }
718 
719 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
720 
721 /*
722  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
723  * the flash is actively programming or erasing since we have to poll for
724  * the operation to complete anyway.  We can't do that in a generic way with
725  * a XIP setup so do it before the actual flash operation in this case
726  * and stub it out from INVALIDATE_CACHE_UDELAY.
727  */
728 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
729 	INVALIDATE_CACHED_RANGE(map, from, size)
730 
731 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
732 	UDELAY(map, chip, adr, usec)
733 
734 /*
735  * Extra notes:
736  *
737  * Activating this XIP support changes the way the code works a bit.  For
738  * example the code to suspend the current process when concurrent access
739  * happens is never executed because xip_udelay() will always return with the
740  * same chip state as it was entered with.  This is why there is no care for
741  * the presence of add_wait_queue() or schedule() calls from within a couple
742  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
743  * The queueing and scheduling are always happening within xip_udelay().
744  *
745  * Similarly, get_chip() and put_chip() just happen to always be executed
746  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
747  * is in array mode, therefore never executing many cases therein and not
748  * causing any problem with XIP.
749  */
750 
751 #else
752 
753 #define xip_disable(map, chip, adr)
754 #define xip_enable(map, chip, adr)
755 #define XIP_INVAL_CACHED_RANGE(x...)
756 
757 #define UDELAY(map, chip, adr, usec)  \
758 do {  \
759 	spin_unlock(chip->mutex);  \
760 	cfi_udelay(usec);  \
761 	spin_lock(chip->mutex);  \
762 } while (0)
763 
764 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
765 do {  \
766 	spin_unlock(chip->mutex);  \
767 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
768 	cfi_udelay(usec);  \
769 	spin_lock(chip->mutex);  \
770 } while (0)
771 
772 #endif
773 
774 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
775 {
776 	unsigned long cmd_addr;
777 	struct cfi_private *cfi = map->fldrv_priv;
778 	int ret;
779 
780 	adr += chip->start;
781 
782 	/* Ensure cmd read/writes are aligned. */
783 	cmd_addr = adr & ~(map_bankwidth(map)-1);
784 
785 	spin_lock(chip->mutex);
786 	ret = get_chip(map, chip, cmd_addr, FL_READY);
787 	if (ret) {
788 		spin_unlock(chip->mutex);
789 		return ret;
790 	}
791 
792 	if (chip->state != FL_POINT && chip->state != FL_READY) {
793 		map_write(map, CMD(0xf0), cmd_addr);
794 		chip->state = FL_READY;
795 	}
796 
797 	map_copy_from(map, buf, adr, len);
798 
799 	put_chip(map, chip, cmd_addr);
800 
801 	spin_unlock(chip->mutex);
802 	return 0;
803 }
804 
805 
806 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
807 {
808 	struct map_info *map = mtd->priv;
809 	struct cfi_private *cfi = map->fldrv_priv;
810 	unsigned long ofs;
811 	int chipnum;
812 	int ret = 0;
813 
814 	/* ofs: offset within the first chip that the first read should start */
815 
816 	chipnum = (from >> cfi->chipshift);
817 	ofs = from - (chipnum <<  cfi->chipshift);
818 
819 
820 	*retlen = 0;
821 
822 	while (len) {
823 		unsigned long thislen;
824 
825 		if (chipnum >= cfi->numchips)
826 			break;
827 
828 		if ((len + ofs -1) >> cfi->chipshift)
829 			thislen = (1<<cfi->chipshift) - ofs;
830 		else
831 			thislen = len;
832 
833 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
834 		if (ret)
835 			break;
836 
837 		*retlen += thislen;
838 		len -= thislen;
839 		buf += thislen;
840 
841 		ofs = 0;
842 		chipnum++;
843 	}
844 	return ret;
845 }
846 
847 
848 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
849 {
850 	DECLARE_WAITQUEUE(wait, current);
851 	unsigned long timeo = jiffies + HZ;
852 	struct cfi_private *cfi = map->fldrv_priv;
853 
854  retry:
855 	spin_lock(chip->mutex);
856 
857 	if (chip->state != FL_READY){
858 #if 0
859 		printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
860 #endif
861 		set_current_state(TASK_UNINTERRUPTIBLE);
862 		add_wait_queue(&chip->wq, &wait);
863 
864 		spin_unlock(chip->mutex);
865 
866 		schedule();
867 		remove_wait_queue(&chip->wq, &wait);
868 #if 0
869 		if(signal_pending(current))
870 			return -EINTR;
871 #endif
872 		timeo = jiffies + HZ;
873 
874 		goto retry;
875 	}
876 
877 	adr += chip->start;
878 
879 	chip->state = FL_READY;
880 
881 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
882 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
883 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
884 
885 	map_copy_from(map, buf, adr, len);
886 
887 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
888 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
889 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
890 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
891 
892 	wake_up(&chip->wq);
893 	spin_unlock(chip->mutex);
894 
895 	return 0;
896 }
897 
898 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
899 {
900 	struct map_info *map = mtd->priv;
901 	struct cfi_private *cfi = map->fldrv_priv;
902 	unsigned long ofs;
903 	int chipnum;
904 	int ret = 0;
905 
906 
907 	/* ofs: offset within the first chip that the first read should start */
908 
909 	/* 8 secsi bytes per chip */
910 	chipnum=from>>3;
911 	ofs=from & 7;
912 
913 
914 	*retlen = 0;
915 
916 	while (len) {
917 		unsigned long thislen;
918 
919 		if (chipnum >= cfi->numchips)
920 			break;
921 
922 		if ((len + ofs -1) >> 3)
923 			thislen = (1<<3) - ofs;
924 		else
925 			thislen = len;
926 
927 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
928 		if (ret)
929 			break;
930 
931 		*retlen += thislen;
932 		len -= thislen;
933 		buf += thislen;
934 
935 		ofs = 0;
936 		chipnum++;
937 	}
938 	return ret;
939 }
940 
941 
942 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
943 {
944 	struct cfi_private *cfi = map->fldrv_priv;
945 	unsigned long timeo = jiffies + HZ;
946 	/*
947 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
948 	 * have a max write time of a few hundreds usec). However, we should
949 	 * use the maximum timeout value given by the chip at probe time
950 	 * instead.  Unfortunately, struct flchip does have a field for
951 	 * maximum timeout, only for typical which can be far too short
952 	 * depending of the conditions.	 The ' + 1' is to avoid having a
953 	 * timeout of 0 jiffies if HZ is smaller than 1000.
954 	 */
955 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
956 	int ret = 0;
957 	map_word oldd;
958 	int retry_cnt = 0;
959 
960 	adr += chip->start;
961 
962 	spin_lock(chip->mutex);
963 	ret = get_chip(map, chip, adr, FL_WRITING);
964 	if (ret) {
965 		spin_unlock(chip->mutex);
966 		return ret;
967 	}
968 
969 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
970 	       __func__, adr, datum.x[0] );
971 
972 	/*
973 	 * Check for a NOP for the case when the datum to write is already
974 	 * present - it saves time and works around buggy chips that corrupt
975 	 * data at other locations when 0xff is written to a location that
976 	 * already contains 0xff.
977 	 */
978 	oldd = map_read(map, adr);
979 	if (map_word_equal(map, oldd, datum)) {
980 		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
981 		       __func__);
982 		goto op_done;
983 	}
984 
985 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
986 	ENABLE_VPP(map);
987 	xip_disable(map, chip, adr);
988  retry:
989 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
990 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
991 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
992 	map_write(map, datum, adr);
993 	chip->state = FL_WRITING;
994 
995 	INVALIDATE_CACHE_UDELAY(map, chip,
996 				adr, map_bankwidth(map),
997 				chip->word_write_time);
998 
999 	/* See comment above for timeout value. */
1000 	timeo = jiffies + uWriteTimeout;
1001 	for (;;) {
1002 		if (chip->state != FL_WRITING) {
1003 			/* Someone's suspended the write. Sleep */
1004 			DECLARE_WAITQUEUE(wait, current);
1005 
1006 			set_current_state(TASK_UNINTERRUPTIBLE);
1007 			add_wait_queue(&chip->wq, &wait);
1008 			spin_unlock(chip->mutex);
1009 			schedule();
1010 			remove_wait_queue(&chip->wq, &wait);
1011 			timeo = jiffies + (HZ / 2); /* FIXME */
1012 			spin_lock(chip->mutex);
1013 			continue;
1014 		}
1015 
1016 		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1017 			xip_enable(map, chip, adr);
1018 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1019 			xip_disable(map, chip, adr);
1020 			break;
1021 		}
1022 
1023 		if (chip_ready(map, adr))
1024 			break;
1025 
1026 		/* Latency issues. Drop the lock, wait a while and retry */
1027 		UDELAY(map, chip, adr, 1);
1028 	}
1029 	/* Did we succeed? */
1030 	if (!chip_good(map, adr, datum)) {
1031 		/* reset on all failures. */
1032 		map_write( map, CMD(0xF0), chip->start );
1033 		/* FIXME - should have reset delay before continuing */
1034 
1035 		if (++retry_cnt <= MAX_WORD_RETRIES)
1036 			goto retry;
1037 
1038 		ret = -EIO;
1039 	}
1040 	xip_enable(map, chip, adr);
1041  op_done:
1042 	chip->state = FL_READY;
1043 	put_chip(map, chip, adr);
1044 	spin_unlock(chip->mutex);
1045 
1046 	return ret;
1047 }
1048 
1049 
1050 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1051 				  size_t *retlen, const u_char *buf)
1052 {
1053 	struct map_info *map = mtd->priv;
1054 	struct cfi_private *cfi = map->fldrv_priv;
1055 	int ret = 0;
1056 	int chipnum;
1057 	unsigned long ofs, chipstart;
1058 	DECLARE_WAITQUEUE(wait, current);
1059 
1060 	*retlen = 0;
1061 	if (!len)
1062 		return 0;
1063 
1064 	chipnum = to >> cfi->chipshift;
1065 	ofs = to  - (chipnum << cfi->chipshift);
1066 	chipstart = cfi->chips[chipnum].start;
1067 
1068 	/* If it's not bus-aligned, do the first byte write */
1069 	if (ofs & (map_bankwidth(map)-1)) {
1070 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1071 		int i = ofs - bus_ofs;
1072 		int n = 0;
1073 		map_word tmp_buf;
1074 
1075  retry:
1076 		spin_lock(cfi->chips[chipnum].mutex);
1077 
1078 		if (cfi->chips[chipnum].state != FL_READY) {
1079 #if 0
1080 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1081 #endif
1082 			set_current_state(TASK_UNINTERRUPTIBLE);
1083 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1084 
1085 			spin_unlock(cfi->chips[chipnum].mutex);
1086 
1087 			schedule();
1088 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1089 #if 0
1090 			if(signal_pending(current))
1091 				return -EINTR;
1092 #endif
1093 			goto retry;
1094 		}
1095 
1096 		/* Load 'tmp_buf' with old contents of flash */
1097 		tmp_buf = map_read(map, bus_ofs+chipstart);
1098 
1099 		spin_unlock(cfi->chips[chipnum].mutex);
1100 
1101 		/* Number of bytes to copy from buffer */
1102 		n = min_t(int, len, map_bankwidth(map)-i);
1103 
1104 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1105 
1106 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1107 				       bus_ofs, tmp_buf);
1108 		if (ret)
1109 			return ret;
1110 
1111 		ofs += n;
1112 		buf += n;
1113 		(*retlen) += n;
1114 		len -= n;
1115 
1116 		if (ofs >> cfi->chipshift) {
1117 			chipnum ++;
1118 			ofs = 0;
1119 			if (chipnum == cfi->numchips)
1120 				return 0;
1121 		}
1122 	}
1123 
1124 	/* We are now aligned, write as much as possible */
1125 	while(len >= map_bankwidth(map)) {
1126 		map_word datum;
1127 
1128 		datum = map_word_load(map, buf);
1129 
1130 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1131 				       ofs, datum);
1132 		if (ret)
1133 			return ret;
1134 
1135 		ofs += map_bankwidth(map);
1136 		buf += map_bankwidth(map);
1137 		(*retlen) += map_bankwidth(map);
1138 		len -= map_bankwidth(map);
1139 
1140 		if (ofs >> cfi->chipshift) {
1141 			chipnum ++;
1142 			ofs = 0;
1143 			if (chipnum == cfi->numchips)
1144 				return 0;
1145 			chipstart = cfi->chips[chipnum].start;
1146 		}
1147 	}
1148 
1149 	/* Write the trailing bytes if any */
1150 	if (len & (map_bankwidth(map)-1)) {
1151 		map_word tmp_buf;
1152 
1153  retry1:
1154 		spin_lock(cfi->chips[chipnum].mutex);
1155 
1156 		if (cfi->chips[chipnum].state != FL_READY) {
1157 #if 0
1158 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1159 #endif
1160 			set_current_state(TASK_UNINTERRUPTIBLE);
1161 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1162 
1163 			spin_unlock(cfi->chips[chipnum].mutex);
1164 
1165 			schedule();
1166 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1167 #if 0
1168 			if(signal_pending(current))
1169 				return -EINTR;
1170 #endif
1171 			goto retry1;
1172 		}
1173 
1174 		tmp_buf = map_read(map, ofs + chipstart);
1175 
1176 		spin_unlock(cfi->chips[chipnum].mutex);
1177 
1178 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1179 
1180 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1181 				ofs, tmp_buf);
1182 		if (ret)
1183 			return ret;
1184 
1185 		(*retlen) += len;
1186 	}
1187 
1188 	return 0;
1189 }
1190 
1191 
1192 /*
1193  * FIXME: interleaved mode not tested, and probably not supported!
1194  */
1195 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1196 				    unsigned long adr, const u_char *buf,
1197 				    int len)
1198 {
1199 	struct cfi_private *cfi = map->fldrv_priv;
1200 	unsigned long timeo = jiffies + HZ;
1201 	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1202 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1203 	int ret = -EIO;
1204 	unsigned long cmd_adr;
1205 	int z, words;
1206 	map_word datum;
1207 
1208 	adr += chip->start;
1209 	cmd_adr = adr;
1210 
1211 	spin_lock(chip->mutex);
1212 	ret = get_chip(map, chip, adr, FL_WRITING);
1213 	if (ret) {
1214 		spin_unlock(chip->mutex);
1215 		return ret;
1216 	}
1217 
1218 	datum = map_word_load(map, buf);
1219 
1220 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1221 	       __func__, adr, datum.x[0] );
1222 
1223 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1224 	ENABLE_VPP(map);
1225 	xip_disable(map, chip, cmd_adr);
1226 
1227 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1228 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1229 	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1230 
1231 	/* Write Buffer Load */
1232 	map_write(map, CMD(0x25), cmd_adr);
1233 
1234 	chip->state = FL_WRITING_TO_BUFFER;
1235 
1236 	/* Write length of data to come */
1237 	words = len / map_bankwidth(map);
1238 	map_write(map, CMD(words - 1), cmd_adr);
1239 	/* Write data */
1240 	z = 0;
1241 	while(z < words * map_bankwidth(map)) {
1242 		datum = map_word_load(map, buf);
1243 		map_write(map, datum, adr + z);
1244 
1245 		z += map_bankwidth(map);
1246 		buf += map_bankwidth(map);
1247 	}
1248 	z -= map_bankwidth(map);
1249 
1250 	adr += z;
1251 
1252 	/* Write Buffer Program Confirm: GO GO GO */
1253 	map_write(map, CMD(0x29), cmd_adr);
1254 	chip->state = FL_WRITING;
1255 
1256 	INVALIDATE_CACHE_UDELAY(map, chip,
1257 				adr, map_bankwidth(map),
1258 				chip->word_write_time);
1259 
1260 	timeo = jiffies + uWriteTimeout;
1261 
1262 	for (;;) {
1263 		if (chip->state != FL_WRITING) {
1264 			/* Someone's suspended the write. Sleep */
1265 			DECLARE_WAITQUEUE(wait, current);
1266 
1267 			set_current_state(TASK_UNINTERRUPTIBLE);
1268 			add_wait_queue(&chip->wq, &wait);
1269 			spin_unlock(chip->mutex);
1270 			schedule();
1271 			remove_wait_queue(&chip->wq, &wait);
1272 			timeo = jiffies + (HZ / 2); /* FIXME */
1273 			spin_lock(chip->mutex);
1274 			continue;
1275 		}
1276 
1277 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1278 			break;
1279 
1280 		if (chip_ready(map, adr)) {
1281 			xip_enable(map, chip, adr);
1282 			goto op_done;
1283 		}
1284 
1285 		/* Latency issues. Drop the lock, wait a while and retry */
1286 		UDELAY(map, chip, adr, 1);
1287 	}
1288 
1289 	/* reset on all failures. */
1290 	map_write( map, CMD(0xF0), chip->start );
1291 	xip_enable(map, chip, adr);
1292 	/* FIXME - should have reset delay before continuing */
1293 
1294 	printk(KERN_WARNING "MTD %s(): software timeout\n",
1295 	       __func__ );
1296 
1297 	ret = -EIO;
1298  op_done:
1299 	chip->state = FL_READY;
1300 	put_chip(map, chip, adr);
1301 	spin_unlock(chip->mutex);
1302 
1303 	return ret;
1304 }
1305 
1306 
1307 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1308 				    size_t *retlen, const u_char *buf)
1309 {
1310 	struct map_info *map = mtd->priv;
1311 	struct cfi_private *cfi = map->fldrv_priv;
1312 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1313 	int ret = 0;
1314 	int chipnum;
1315 	unsigned long ofs;
1316 
1317 	*retlen = 0;
1318 	if (!len)
1319 		return 0;
1320 
1321 	chipnum = to >> cfi->chipshift;
1322 	ofs = to  - (chipnum << cfi->chipshift);
1323 
1324 	/* If it's not bus-aligned, do the first word write */
1325 	if (ofs & (map_bankwidth(map)-1)) {
1326 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1327 		if (local_len > len)
1328 			local_len = len;
1329 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1330 					     local_len, retlen, buf);
1331 		if (ret)
1332 			return ret;
1333 		ofs += local_len;
1334 		buf += local_len;
1335 		len -= local_len;
1336 
1337 		if (ofs >> cfi->chipshift) {
1338 			chipnum ++;
1339 			ofs = 0;
1340 			if (chipnum == cfi->numchips)
1341 				return 0;
1342 		}
1343 	}
1344 
1345 	/* Write buffer is worth it only if more than one word to write... */
1346 	while (len >= map_bankwidth(map) * 2) {
1347 		/* We must not cross write block boundaries */
1348 		int size = wbufsize - (ofs & (wbufsize-1));
1349 
1350 		if (size > len)
1351 			size = len;
1352 		if (size % map_bankwidth(map))
1353 			size -= size % map_bankwidth(map);
1354 
1355 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1356 				      ofs, buf, size);
1357 		if (ret)
1358 			return ret;
1359 
1360 		ofs += size;
1361 		buf += size;
1362 		(*retlen) += size;
1363 		len -= size;
1364 
1365 		if (ofs >> cfi->chipshift) {
1366 			chipnum ++;
1367 			ofs = 0;
1368 			if (chipnum == cfi->numchips)
1369 				return 0;
1370 		}
1371 	}
1372 
1373 	if (len) {
1374 		size_t retlen_dregs = 0;
1375 
1376 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1377 					     len, &retlen_dregs, buf);
1378 
1379 		*retlen += retlen_dregs;
1380 		return ret;
1381 	}
1382 
1383 	return 0;
1384 }
1385 
1386 
1387 /*
1388  * Handle devices with one erase region, that only implement
1389  * the chip erase command.
1390  */
1391 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1392 {
1393 	struct cfi_private *cfi = map->fldrv_priv;
1394 	unsigned long timeo = jiffies + HZ;
1395 	unsigned long int adr;
1396 	DECLARE_WAITQUEUE(wait, current);
1397 	int ret = 0;
1398 
1399 	adr = cfi->addr_unlock1;
1400 
1401 	spin_lock(chip->mutex);
1402 	ret = get_chip(map, chip, adr, FL_WRITING);
1403 	if (ret) {
1404 		spin_unlock(chip->mutex);
1405 		return ret;
1406 	}
1407 
1408 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1409 	       __func__, chip->start );
1410 
1411 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1412 	ENABLE_VPP(map);
1413 	xip_disable(map, chip, adr);
1414 
1415 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1416 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1417 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1418 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1419 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1420 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1421 
1422 	chip->state = FL_ERASING;
1423 	chip->erase_suspended = 0;
1424 	chip->in_progress_block_addr = adr;
1425 
1426 	INVALIDATE_CACHE_UDELAY(map, chip,
1427 				adr, map->size,
1428 				chip->erase_time*500);
1429 
1430 	timeo = jiffies + (HZ*20);
1431 
1432 	for (;;) {
1433 		if (chip->state != FL_ERASING) {
1434 			/* Someone's suspended the erase. Sleep */
1435 			set_current_state(TASK_UNINTERRUPTIBLE);
1436 			add_wait_queue(&chip->wq, &wait);
1437 			spin_unlock(chip->mutex);
1438 			schedule();
1439 			remove_wait_queue(&chip->wq, &wait);
1440 			spin_lock(chip->mutex);
1441 			continue;
1442 		}
1443 		if (chip->erase_suspended) {
1444 			/* This erase was suspended and resumed.
1445 			   Adjust the timeout */
1446 			timeo = jiffies + (HZ*20); /* FIXME */
1447 			chip->erase_suspended = 0;
1448 		}
1449 
1450 		if (chip_ready(map, adr))
1451 			break;
1452 
1453 		if (time_after(jiffies, timeo)) {
1454 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1455 				__func__ );
1456 			break;
1457 		}
1458 
1459 		/* Latency issues. Drop the lock, wait a while and retry */
1460 		UDELAY(map, chip, adr, 1000000/HZ);
1461 	}
1462 	/* Did we succeed? */
1463 	if (!chip_good(map, adr, map_word_ff(map))) {
1464 		/* reset on all failures. */
1465 		map_write( map, CMD(0xF0), chip->start );
1466 		/* FIXME - should have reset delay before continuing */
1467 
1468 		ret = -EIO;
1469 	}
1470 
1471 	chip->state = FL_READY;
1472 	xip_enable(map, chip, adr);
1473 	put_chip(map, chip, adr);
1474 	spin_unlock(chip->mutex);
1475 
1476 	return ret;
1477 }
1478 
1479 
1480 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1481 {
1482 	struct cfi_private *cfi = map->fldrv_priv;
1483 	unsigned long timeo = jiffies + HZ;
1484 	DECLARE_WAITQUEUE(wait, current);
1485 	int ret = 0;
1486 
1487 	adr += chip->start;
1488 
1489 	spin_lock(chip->mutex);
1490 	ret = get_chip(map, chip, adr, FL_ERASING);
1491 	if (ret) {
1492 		spin_unlock(chip->mutex);
1493 		return ret;
1494 	}
1495 
1496 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1497 	       __func__, adr );
1498 
1499 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1500 	ENABLE_VPP(map);
1501 	xip_disable(map, chip, adr);
1502 
1503 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1504 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1505 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1506 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1507 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1508 	map_write(map, CMD(0x30), adr);
1509 
1510 	chip->state = FL_ERASING;
1511 	chip->erase_suspended = 0;
1512 	chip->in_progress_block_addr = adr;
1513 
1514 	INVALIDATE_CACHE_UDELAY(map, chip,
1515 				adr, len,
1516 				chip->erase_time*500);
1517 
1518 	timeo = jiffies + (HZ*20);
1519 
1520 	for (;;) {
1521 		if (chip->state != FL_ERASING) {
1522 			/* Someone's suspended the erase. Sleep */
1523 			set_current_state(TASK_UNINTERRUPTIBLE);
1524 			add_wait_queue(&chip->wq, &wait);
1525 			spin_unlock(chip->mutex);
1526 			schedule();
1527 			remove_wait_queue(&chip->wq, &wait);
1528 			spin_lock(chip->mutex);
1529 			continue;
1530 		}
1531 		if (chip->erase_suspended) {
1532 			/* This erase was suspended and resumed.
1533 			   Adjust the timeout */
1534 			timeo = jiffies + (HZ*20); /* FIXME */
1535 			chip->erase_suspended = 0;
1536 		}
1537 
1538 		if (chip_ready(map, adr)) {
1539 			xip_enable(map, chip, adr);
1540 			break;
1541 		}
1542 
1543 		if (time_after(jiffies, timeo)) {
1544 			xip_enable(map, chip, adr);
1545 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1546 				__func__ );
1547 			break;
1548 		}
1549 
1550 		/* Latency issues. Drop the lock, wait a while and retry */
1551 		UDELAY(map, chip, adr, 1000000/HZ);
1552 	}
1553 	/* Did we succeed? */
1554 	if (!chip_good(map, adr, map_word_ff(map))) {
1555 		/* reset on all failures. */
1556 		map_write( map, CMD(0xF0), chip->start );
1557 		/* FIXME - should have reset delay before continuing */
1558 
1559 		ret = -EIO;
1560 	}
1561 
1562 	chip->state = FL_READY;
1563 	put_chip(map, chip, adr);
1564 	spin_unlock(chip->mutex);
1565 	return ret;
1566 }
1567 
1568 
1569 int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1570 {
1571 	unsigned long ofs, len;
1572 	int ret;
1573 
1574 	ofs = instr->addr;
1575 	len = instr->len;
1576 
1577 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1578 	if (ret)
1579 		return ret;
1580 
1581 	instr->state = MTD_ERASE_DONE;
1582 	mtd_erase_callback(instr);
1583 
1584 	return 0;
1585 }
1586 
1587 
1588 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1589 {
1590 	struct map_info *map = mtd->priv;
1591 	struct cfi_private *cfi = map->fldrv_priv;
1592 	int ret = 0;
1593 
1594 	if (instr->addr != 0)
1595 		return -EINVAL;
1596 
1597 	if (instr->len != mtd->size)
1598 		return -EINVAL;
1599 
1600 	ret = do_erase_chip(map, &cfi->chips[0]);
1601 	if (ret)
1602 		return ret;
1603 
1604 	instr->state = MTD_ERASE_DONE;
1605 	mtd_erase_callback(instr);
1606 
1607 	return 0;
1608 }
1609 
1610 
1611 static void cfi_amdstd_sync (struct mtd_info *mtd)
1612 {
1613 	struct map_info *map = mtd->priv;
1614 	struct cfi_private *cfi = map->fldrv_priv;
1615 	int i;
1616 	struct flchip *chip;
1617 	int ret = 0;
1618 	DECLARE_WAITQUEUE(wait, current);
1619 
1620 	for (i=0; !ret && i<cfi->numchips; i++) {
1621 		chip = &cfi->chips[i];
1622 
1623 	retry:
1624 		spin_lock(chip->mutex);
1625 
1626 		switch(chip->state) {
1627 		case FL_READY:
1628 		case FL_STATUS:
1629 		case FL_CFI_QUERY:
1630 		case FL_JEDEC_QUERY:
1631 			chip->oldstate = chip->state;
1632 			chip->state = FL_SYNCING;
1633 			/* No need to wake_up() on this state change -
1634 			 * as the whole point is that nobody can do anything
1635 			 * with the chip now anyway.
1636 			 */
1637 		case FL_SYNCING:
1638 			spin_unlock(chip->mutex);
1639 			break;
1640 
1641 		default:
1642 			/* Not an idle state */
1643 			add_wait_queue(&chip->wq, &wait);
1644 
1645 			spin_unlock(chip->mutex);
1646 
1647 			schedule();
1648 
1649 			remove_wait_queue(&chip->wq, &wait);
1650 
1651 			goto retry;
1652 		}
1653 	}
1654 
1655 	/* Unlock the chips again */
1656 
1657 	for (i--; i >=0; i--) {
1658 		chip = &cfi->chips[i];
1659 
1660 		spin_lock(chip->mutex);
1661 
1662 		if (chip->state == FL_SYNCING) {
1663 			chip->state = chip->oldstate;
1664 			wake_up(&chip->wq);
1665 		}
1666 		spin_unlock(chip->mutex);
1667 	}
1668 }
1669 
1670 
1671 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1672 {
1673 	struct map_info *map = mtd->priv;
1674 	struct cfi_private *cfi = map->fldrv_priv;
1675 	int i;
1676 	struct flchip *chip;
1677 	int ret = 0;
1678 
1679 	for (i=0; !ret && i<cfi->numchips; i++) {
1680 		chip = &cfi->chips[i];
1681 
1682 		spin_lock(chip->mutex);
1683 
1684 		switch(chip->state) {
1685 		case FL_READY:
1686 		case FL_STATUS:
1687 		case FL_CFI_QUERY:
1688 		case FL_JEDEC_QUERY:
1689 			chip->oldstate = chip->state;
1690 			chip->state = FL_PM_SUSPENDED;
1691 			/* No need to wake_up() on this state change -
1692 			 * as the whole point is that nobody can do anything
1693 			 * with the chip now anyway.
1694 			 */
1695 		case FL_PM_SUSPENDED:
1696 			break;
1697 
1698 		default:
1699 			ret = -EAGAIN;
1700 			break;
1701 		}
1702 		spin_unlock(chip->mutex);
1703 	}
1704 
1705 	/* Unlock the chips again */
1706 
1707 	if (ret) {
1708 		for (i--; i >=0; i--) {
1709 			chip = &cfi->chips[i];
1710 
1711 			spin_lock(chip->mutex);
1712 
1713 			if (chip->state == FL_PM_SUSPENDED) {
1714 				chip->state = chip->oldstate;
1715 				wake_up(&chip->wq);
1716 			}
1717 			spin_unlock(chip->mutex);
1718 		}
1719 	}
1720 
1721 	return ret;
1722 }
1723 
1724 
1725 static void cfi_amdstd_resume(struct mtd_info *mtd)
1726 {
1727 	struct map_info *map = mtd->priv;
1728 	struct cfi_private *cfi = map->fldrv_priv;
1729 	int i;
1730 	struct flchip *chip;
1731 
1732 	for (i=0; i<cfi->numchips; i++) {
1733 
1734 		chip = &cfi->chips[i];
1735 
1736 		spin_lock(chip->mutex);
1737 
1738 		if (chip->state == FL_PM_SUSPENDED) {
1739 			chip->state = FL_READY;
1740 			map_write(map, CMD(0xF0), chip->start);
1741 			wake_up(&chip->wq);
1742 		}
1743 		else
1744 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1745 
1746 		spin_unlock(chip->mutex);
1747 	}
1748 }
1749 
1750 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1751 {
1752 	struct map_info *map = mtd->priv;
1753 	struct cfi_private *cfi = map->fldrv_priv;
1754 
1755 	kfree(cfi->cmdset_priv);
1756 	kfree(cfi->cfiq);
1757 	kfree(cfi);
1758 	kfree(mtd->eraseregions);
1759 }
1760 
1761 static char im_name[]="cfi_cmdset_0002";
1762 
1763 
1764 static int __init cfi_amdstd_init(void)
1765 {
1766 	inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1767 	return 0;
1768 }
1769 
1770 
1771 static void __exit cfi_amdstd_exit(void)
1772 {
1773 	inter_module_unregister(im_name);
1774 }
1775 
1776 
1777 module_init(cfi_amdstd_init);
1778 module_exit(cfi_amdstd_exit);
1779 
1780 MODULE_LICENSE("GPL");
1781 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1782 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1783