xref: /linux/drivers/mtd/chips/cfi_cmdset_0020.c (revision 20d0021394c1b070bf04b22c5bc8fdb437edd4c5)
1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0020.c,v 1.19 2005/07/13 15:52:45 dwmw2 Exp $
8  *
9  * 10/10/2000	Nicolas Pitre <nico@cam.org>
10  * 	- completely revamped method functions so they are aware and
11  * 	  independent of the flash geometry (buswidth, interleave, etc.)
12  * 	- scalability vs code size is completely set at compile-time
13  * 	  (see include/linux/mtd/cfi.h for selection)
14  *	- optimized write buffer method
15  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
16  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
17  *	  (command set 0x0020)
18  *	- added a writev function
19  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
20  * 	- Plugged memory leak in cfi_staa_writev().
21  */
22 
23 #include <linux/version.h>
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/init.h>
29 #include <asm/io.h>
30 #include <asm/byteorder.h>
31 
32 #include <linux/errno.h>
33 #include <linux/slab.h>
34 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/cfi.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 
41 
42 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
43 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
44 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
45 		unsigned long count, loff_t to, size_t *retlen);
46 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
47 static void cfi_staa_sync (struct mtd_info *);
48 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
50 static int cfi_staa_suspend (struct mtd_info *);
51 static void cfi_staa_resume (struct mtd_info *);
52 
53 static void cfi_staa_destroy(struct mtd_info *);
54 
55 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
56 
57 static struct mtd_info *cfi_staa_setup (struct map_info *);
58 
59 static struct mtd_chip_driver cfi_staa_chipdrv = {
60 	.probe		= NULL, /* Not usable directly */
61 	.destroy	= cfi_staa_destroy,
62 	.name		= "cfi_cmdset_0020",
63 	.module		= THIS_MODULE
64 };
65 
66 /* #define DEBUG_LOCK_BITS */
67 //#define DEBUG_CFI_FEATURES
68 
69 #ifdef DEBUG_CFI_FEATURES
70 static void cfi_tell_features(struct cfi_pri_intelext *extp)
71 {
72         int i;
73         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
74 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
75 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
76 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
77 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
78 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
79 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
80 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
81 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
82 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
83 	for (i=9; i<32; i++) {
84 		if (extp->FeatureSupport & (1<<i))
85 			printk("     - Unknown Bit %X:      supported\n", i);
86 	}
87 
88 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
89 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
90 	for (i=1; i<8; i++) {
91 		if (extp->SuspendCmdSupport & (1<<i))
92 			printk("     - Unknown Bit %X:               supported\n", i);
93 	}
94 
95 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
96 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
97 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
98 	for (i=2; i<16; i++) {
99 		if (extp->BlkStatusRegMask & (1<<i))
100 			printk("     - Unknown Bit %X Active: yes\n",i);
101 	}
102 
103 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
104 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
105 	if (extp->VppOptimal)
106 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
107 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
108 }
109 #endif
110 
111 /* This routine is made available to other mtd code via
112  * inter_module_register.  It must only be accessed through
113  * inter_module_get which will bump the use count of this module.  The
114  * addresses passed back in cfi are valid as long as the use count of
115  * this module is non-zero, i.e. between inter_module_get and
116  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
117  */
118 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
119 {
120 	struct cfi_private *cfi = map->fldrv_priv;
121 	int i;
122 
123 	if (cfi->cfi_mode) {
124 		/*
125 		 * It's a real CFI chip, not one for which the probe
126 		 * routine faked a CFI structure. So we read the feature
127 		 * table from it.
128 		 */
129 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
130 		struct cfi_pri_intelext *extp;
131 
132 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
133 		if (!extp)
134 			return NULL;
135 
136 		/* Do some byteswapping if necessary */
137 		extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
138 		extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
139 
140 #ifdef DEBUG_CFI_FEATURES
141 		/* Tell the user about it in lots of lovely detail */
142 		cfi_tell_features(extp);
143 #endif
144 
145 		/* Install our own private info structure */
146 		cfi->cmdset_priv = extp;
147 	}
148 
149 	for (i=0; i< cfi->numchips; i++) {
150 		cfi->chips[i].word_write_time = 128;
151 		cfi->chips[i].buffer_write_time = 128;
152 		cfi->chips[i].erase_time = 1024;
153 	}
154 
155 	return cfi_staa_setup(map);
156 }
157 
158 static struct mtd_info *cfi_staa_setup(struct map_info *map)
159 {
160 	struct cfi_private *cfi = map->fldrv_priv;
161 	struct mtd_info *mtd;
162 	unsigned long offset = 0;
163 	int i,j;
164 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
165 
166 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
167 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
168 
169 	if (!mtd) {
170 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
171 		kfree(cfi->cmdset_priv);
172 		return NULL;
173 	}
174 
175 	memset(mtd, 0, sizeof(*mtd));
176 	mtd->priv = map;
177 	mtd->type = MTD_NORFLASH;
178 	mtd->size = devsize * cfi->numchips;
179 
180 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
181 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
182 			* mtd->numeraseregions, GFP_KERNEL);
183 	if (!mtd->eraseregions) {
184 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
185 		kfree(cfi->cmdset_priv);
186 		kfree(mtd);
187 		return NULL;
188 	}
189 
190 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
191 		unsigned long ernum, ersize;
192 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
193 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
194 
195 		if (mtd->erasesize < ersize) {
196 			mtd->erasesize = ersize;
197 		}
198 		for (j=0; j<cfi->numchips; j++) {
199 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
200 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
201 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
202 		}
203 		offset += (ersize * ernum);
204 		}
205 
206 		if (offset != devsize) {
207 			/* Argh */
208 			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
209 			kfree(mtd->eraseregions);
210 			kfree(cfi->cmdset_priv);
211 			kfree(mtd);
212 			return NULL;
213 		}
214 
215 		for (i=0; i<mtd->numeraseregions;i++){
216 			printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
217 			       i,mtd->eraseregions[i].offset,
218 			       mtd->eraseregions[i].erasesize,
219 			       mtd->eraseregions[i].numblocks);
220 		}
221 
222 	/* Also select the correct geometry setup too */
223 	mtd->erase = cfi_staa_erase_varsize;
224 	mtd->read = cfi_staa_read;
225         mtd->write = cfi_staa_write_buffers;
226 	mtd->writev = cfi_staa_writev;
227 	mtd->sync = cfi_staa_sync;
228 	mtd->lock = cfi_staa_lock;
229 	mtd->unlock = cfi_staa_unlock;
230 	mtd->suspend = cfi_staa_suspend;
231 	mtd->resume = cfi_staa_resume;
232 	mtd->flags = MTD_CAP_NORFLASH;
233 	mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
234 	mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
235 	map->fldrv = &cfi_staa_chipdrv;
236 	__module_get(THIS_MODULE);
237 	mtd->name = map->name;
238 	return mtd;
239 }
240 
241 
242 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
243 {
244 	map_word status, status_OK;
245 	unsigned long timeo;
246 	DECLARE_WAITQUEUE(wait, current);
247 	int suspended = 0;
248 	unsigned long cmd_addr;
249 	struct cfi_private *cfi = map->fldrv_priv;
250 
251 	adr += chip->start;
252 
253 	/* Ensure cmd read/writes are aligned. */
254 	cmd_addr = adr & ~(map_bankwidth(map)-1);
255 
256 	/* Let's determine this according to the interleave only once */
257 	status_OK = CMD(0x80);
258 
259 	timeo = jiffies + HZ;
260  retry:
261 	spin_lock_bh(chip->mutex);
262 
263 	/* Check that the chip's ready to talk to us.
264 	 * If it's in FL_ERASING state, suspend it and make it talk now.
265 	 */
266 	switch (chip->state) {
267 	case FL_ERASING:
268 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
269 			goto sleep; /* We don't support erase suspend */
270 
271 		map_write (map, CMD(0xb0), cmd_addr);
272 		/* If the flash has finished erasing, then 'erase suspend'
273 		 * appears to make some (28F320) flash devices switch to
274 		 * 'read' mode.  Make sure that we switch to 'read status'
275 		 * mode so we get the right data. --rmk
276 		 */
277 		map_write(map, CMD(0x70), cmd_addr);
278 		chip->oldstate = FL_ERASING;
279 		chip->state = FL_ERASE_SUSPENDING;
280 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
281 		for (;;) {
282 			status = map_read(map, cmd_addr);
283 			if (map_word_andequal(map, status, status_OK, status_OK))
284 				break;
285 
286 			if (time_after(jiffies, timeo)) {
287 				/* Urgh */
288 				map_write(map, CMD(0xd0), cmd_addr);
289 				/* make sure we're in 'read status' mode */
290 				map_write(map, CMD(0x70), cmd_addr);
291 				chip->state = FL_ERASING;
292 				spin_unlock_bh(chip->mutex);
293 				printk(KERN_ERR "Chip not ready after erase "
294 				       "suspended: status = 0x%lx\n", status.x[0]);
295 				return -EIO;
296 			}
297 
298 			spin_unlock_bh(chip->mutex);
299 			cfi_udelay(1);
300 			spin_lock_bh(chip->mutex);
301 		}
302 
303 		suspended = 1;
304 		map_write(map, CMD(0xff), cmd_addr);
305 		chip->state = FL_READY;
306 		break;
307 
308 #if 0
309 	case FL_WRITING:
310 		/* Not quite yet */
311 #endif
312 
313 	case FL_READY:
314 		break;
315 
316 	case FL_CFI_QUERY:
317 	case FL_JEDEC_QUERY:
318 		map_write(map, CMD(0x70), cmd_addr);
319 		chip->state = FL_STATUS;
320 
321 	case FL_STATUS:
322 		status = map_read(map, cmd_addr);
323 		if (map_word_andequal(map, status, status_OK, status_OK)) {
324 			map_write(map, CMD(0xff), cmd_addr);
325 			chip->state = FL_READY;
326 			break;
327 		}
328 
329 		/* Urgh. Chip not yet ready to talk to us. */
330 		if (time_after(jiffies, timeo)) {
331 			spin_unlock_bh(chip->mutex);
332 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
333 			return -EIO;
334 		}
335 
336 		/* Latency issues. Drop the lock, wait a while and retry */
337 		spin_unlock_bh(chip->mutex);
338 		cfi_udelay(1);
339 		goto retry;
340 
341 	default:
342 	sleep:
343 		/* Stick ourselves on a wait queue to be woken when
344 		   someone changes the status */
345 		set_current_state(TASK_UNINTERRUPTIBLE);
346 		add_wait_queue(&chip->wq, &wait);
347 		spin_unlock_bh(chip->mutex);
348 		schedule();
349 		remove_wait_queue(&chip->wq, &wait);
350 		timeo = jiffies + HZ;
351 		goto retry;
352 	}
353 
354 	map_copy_from(map, buf, adr, len);
355 
356 	if (suspended) {
357 		chip->state = chip->oldstate;
358 		/* What if one interleaved chip has finished and the
359 		   other hasn't? The old code would leave the finished
360 		   one in READY mode. That's bad, and caused -EROFS
361 		   errors to be returned from do_erase_oneblock because
362 		   that's the only bit it checked for at the time.
363 		   As the state machine appears to explicitly allow
364 		   sending the 0x70 (Read Status) command to an erasing
365 		   chip and expecting it to be ignored, that's what we
366 		   do. */
367 		map_write(map, CMD(0xd0), cmd_addr);
368 		map_write(map, CMD(0x70), cmd_addr);
369 	}
370 
371 	wake_up(&chip->wq);
372 	spin_unlock_bh(chip->mutex);
373 	return 0;
374 }
375 
376 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
377 {
378 	struct map_info *map = mtd->priv;
379 	struct cfi_private *cfi = map->fldrv_priv;
380 	unsigned long ofs;
381 	int chipnum;
382 	int ret = 0;
383 
384 	/* ofs: offset within the first chip that the first read should start */
385 	chipnum = (from >> cfi->chipshift);
386 	ofs = from - (chipnum <<  cfi->chipshift);
387 
388 	*retlen = 0;
389 
390 	while (len) {
391 		unsigned long thislen;
392 
393 		if (chipnum >= cfi->numchips)
394 			break;
395 
396 		if ((len + ofs -1) >> cfi->chipshift)
397 			thislen = (1<<cfi->chipshift) - ofs;
398 		else
399 			thislen = len;
400 
401 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
402 		if (ret)
403 			break;
404 
405 		*retlen += thislen;
406 		len -= thislen;
407 		buf += thislen;
408 
409 		ofs = 0;
410 		chipnum++;
411 	}
412 	return ret;
413 }
414 
415 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
416 				  unsigned long adr, const u_char *buf, int len)
417 {
418 	struct cfi_private *cfi = map->fldrv_priv;
419 	map_word status, status_OK;
420 	unsigned long cmd_adr, timeo;
421 	DECLARE_WAITQUEUE(wait, current);
422 	int wbufsize, z;
423 
424         /* M58LW064A requires bus alignment for buffer wriets -- saw */
425         if (adr & (map_bankwidth(map)-1))
426             return -EINVAL;
427 
428         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
429         adr += chip->start;
430 	cmd_adr = adr & ~(wbufsize-1);
431 
432 	/* Let's determine this according to the interleave only once */
433         status_OK = CMD(0x80);
434 
435 	timeo = jiffies + HZ;
436  retry:
437 
438 #ifdef DEBUG_CFI_FEATURES
439        printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
440 #endif
441 	spin_lock_bh(chip->mutex);
442 
443 	/* Check that the chip's ready to talk to us.
444 	 * Later, we can actually think about interrupting it
445 	 * if it's in FL_ERASING state.
446 	 * Not just yet, though.
447 	 */
448 	switch (chip->state) {
449 	case FL_READY:
450 		break;
451 
452 	case FL_CFI_QUERY:
453 	case FL_JEDEC_QUERY:
454 		map_write(map, CMD(0x70), cmd_adr);
455                 chip->state = FL_STATUS;
456 #ifdef DEBUG_CFI_FEATURES
457         printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
458 #endif
459 
460 	case FL_STATUS:
461 		status = map_read(map, cmd_adr);
462 		if (map_word_andequal(map, status, status_OK, status_OK))
463 			break;
464 		/* Urgh. Chip not yet ready to talk to us. */
465 		if (time_after(jiffies, timeo)) {
466 			spin_unlock_bh(chip->mutex);
467                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
468                                status.x[0], map_read(map, cmd_adr).x[0]);
469 			return -EIO;
470 		}
471 
472 		/* Latency issues. Drop the lock, wait a while and retry */
473 		spin_unlock_bh(chip->mutex);
474 		cfi_udelay(1);
475 		goto retry;
476 
477 	default:
478 		/* Stick ourselves on a wait queue to be woken when
479 		   someone changes the status */
480 		set_current_state(TASK_UNINTERRUPTIBLE);
481 		add_wait_queue(&chip->wq, &wait);
482 		spin_unlock_bh(chip->mutex);
483 		schedule();
484 		remove_wait_queue(&chip->wq, &wait);
485 		timeo = jiffies + HZ;
486 		goto retry;
487 	}
488 
489 	ENABLE_VPP(map);
490 	map_write(map, CMD(0xe8), cmd_adr);
491 	chip->state = FL_WRITING_TO_BUFFER;
492 
493 	z = 0;
494 	for (;;) {
495 		status = map_read(map, cmd_adr);
496 		if (map_word_andequal(map, status, status_OK, status_OK))
497 			break;
498 
499 		spin_unlock_bh(chip->mutex);
500 		cfi_udelay(1);
501 		spin_lock_bh(chip->mutex);
502 
503 		if (++z > 100) {
504 			/* Argh. Not ready for write to buffer */
505 			DISABLE_VPP(map);
506                         map_write(map, CMD(0x70), cmd_adr);
507 			chip->state = FL_STATUS;
508 			spin_unlock_bh(chip->mutex);
509 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
510 			return -EIO;
511 		}
512 	}
513 
514 	/* Write length of data to come */
515 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
516 
517 	/* Write data */
518 	for (z = 0; z < len;
519 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
520 		map_word d;
521 		d = map_word_load(map, buf);
522 		map_write(map, d, adr+z);
523 	}
524 	/* GO GO GO */
525 	map_write(map, CMD(0xd0), cmd_adr);
526 	chip->state = FL_WRITING;
527 
528 	spin_unlock_bh(chip->mutex);
529 	cfi_udelay(chip->buffer_write_time);
530 	spin_lock_bh(chip->mutex);
531 
532 	timeo = jiffies + (HZ/2);
533 	z = 0;
534 	for (;;) {
535 		if (chip->state != FL_WRITING) {
536 			/* Someone's suspended the write. Sleep */
537 			set_current_state(TASK_UNINTERRUPTIBLE);
538 			add_wait_queue(&chip->wq, &wait);
539 			spin_unlock_bh(chip->mutex);
540 			schedule();
541 			remove_wait_queue(&chip->wq, &wait);
542 			timeo = jiffies + (HZ / 2); /* FIXME */
543 			spin_lock_bh(chip->mutex);
544 			continue;
545 		}
546 
547 		status = map_read(map, cmd_adr);
548 		if (map_word_andequal(map, status, status_OK, status_OK))
549 			break;
550 
551 		/* OK Still waiting */
552 		if (time_after(jiffies, timeo)) {
553                         /* clear status */
554                         map_write(map, CMD(0x50), cmd_adr);
555                         /* put back into read status register mode */
556                         map_write(map, CMD(0x70), adr);
557 			chip->state = FL_STATUS;
558 			DISABLE_VPP(map);
559 			spin_unlock_bh(chip->mutex);
560 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
561 			return -EIO;
562 		}
563 
564 		/* Latency issues. Drop the lock, wait a while and retry */
565 		spin_unlock_bh(chip->mutex);
566 		cfi_udelay(1);
567 		z++;
568 		spin_lock_bh(chip->mutex);
569 	}
570 	if (!z) {
571 		chip->buffer_write_time--;
572 		if (!chip->buffer_write_time)
573 			chip->buffer_write_time++;
574 	}
575 	if (z > 1)
576 		chip->buffer_write_time++;
577 
578 	/* Done and happy. */
579 	DISABLE_VPP(map);
580 	chip->state = FL_STATUS;
581 
582         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
583         if (map_word_bitsset(map, status, CMD(0x3a))) {
584 #ifdef DEBUG_CFI_FEATURES
585 		printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
586 #endif
587 		/* clear status */
588 		map_write(map, CMD(0x50), cmd_adr);
589 		/* put back into read status register mode */
590 		map_write(map, CMD(0x70), adr);
591 		wake_up(&chip->wq);
592 		spin_unlock_bh(chip->mutex);
593 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
594 	}
595 	wake_up(&chip->wq);
596 	spin_unlock_bh(chip->mutex);
597 
598         return 0;
599 }
600 
601 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
602 				       size_t len, size_t *retlen, const u_char *buf)
603 {
604 	struct map_info *map = mtd->priv;
605 	struct cfi_private *cfi = map->fldrv_priv;
606 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
607 	int ret = 0;
608 	int chipnum;
609 	unsigned long ofs;
610 
611 	*retlen = 0;
612 	if (!len)
613 		return 0;
614 
615 	chipnum = to >> cfi->chipshift;
616 	ofs = to  - (chipnum << cfi->chipshift);
617 
618 #ifdef DEBUG_CFI_FEATURES
619         printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
620         printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
621         printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
622 #endif
623 
624         /* Write buffer is worth it only if more than one word to write... */
625         while (len > 0) {
626 		/* We must not cross write block boundaries */
627 		int size = wbufsize - (ofs & (wbufsize-1));
628 
629                 if (size > len)
630                     size = len;
631 
632                 ret = do_write_buffer(map, &cfi->chips[chipnum],
633 				      ofs, buf, size);
634 		if (ret)
635 			return ret;
636 
637 		ofs += size;
638 		buf += size;
639 		(*retlen) += size;
640 		len -= size;
641 
642 		if (ofs >> cfi->chipshift) {
643 			chipnum ++;
644 			ofs = 0;
645 			if (chipnum == cfi->numchips)
646 				return 0;
647 		}
648 	}
649 
650 	return 0;
651 }
652 
653 /*
654  * Writev for ECC-Flashes is a little more complicated. We need to maintain
655  * a small buffer for this.
656  * XXX: If the buffer size is not a multiple of 2, this will break
657  */
658 #define ECCBUF_SIZE (mtd->eccsize)
659 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
660 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
661 static int
662 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
663 		unsigned long count, loff_t to, size_t *retlen)
664 {
665 	unsigned long i;
666 	size_t	 totlen = 0, thislen;
667 	int	 ret = 0;
668 	size_t	 buflen = 0;
669 	static char *buffer;
670 
671 	if (!ECCBUF_SIZE) {
672 		/* We should fall back to a general writev implementation.
673 		 * Until that is written, just break.
674 		 */
675 		return -EIO;
676 	}
677 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
678 	if (!buffer)
679 		return -ENOMEM;
680 
681 	for (i=0; i<count; i++) {
682 		size_t elem_len = vecs[i].iov_len;
683 		void *elem_base = vecs[i].iov_base;
684 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
685 			continue;
686 		if (buflen) { /* cut off head */
687 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
688 				memcpy(buffer+buflen, elem_base, elem_len);
689 				buflen += elem_len;
690 				continue;
691 			}
692 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
693 			ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
694 			totlen += thislen;
695 			if (ret || thislen != ECCBUF_SIZE)
696 				goto write_error;
697 			elem_len -= thislen-buflen;
698 			elem_base += thislen-buflen;
699 			to += ECCBUF_SIZE;
700 		}
701 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
702 			ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
703 			totlen += thislen;
704 			if (ret || thislen != ECCBUF_DIV(elem_len))
705 				goto write_error;
706 			to += thislen;
707 		}
708 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
709 		if (buflen) {
710 			memset(buffer, 0xff, ECCBUF_SIZE);
711 			memcpy(buffer, elem_base + thislen, buflen);
712 		}
713 	}
714 	if (buflen) { /* flush last page, even if not full */
715 		/* This is sometimes intended behaviour, really */
716 		ret = mtd->write(mtd, to, buflen, &thislen, buffer);
717 		totlen += thislen;
718 		if (ret || thislen != ECCBUF_SIZE)
719 			goto write_error;
720 	}
721 write_error:
722 	if (retlen)
723 		*retlen = totlen;
724 	kfree(buffer);
725 	return ret;
726 }
727 
728 
729 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
730 {
731 	struct cfi_private *cfi = map->fldrv_priv;
732 	map_word status, status_OK;
733 	unsigned long timeo;
734 	int retries = 3;
735 	DECLARE_WAITQUEUE(wait, current);
736 	int ret = 0;
737 
738 	adr += chip->start;
739 
740 	/* Let's determine this according to the interleave only once */
741 	status_OK = CMD(0x80);
742 
743 	timeo = jiffies + HZ;
744 retry:
745 	spin_lock_bh(chip->mutex);
746 
747 	/* Check that the chip's ready to talk to us. */
748 	switch (chip->state) {
749 	case FL_CFI_QUERY:
750 	case FL_JEDEC_QUERY:
751 	case FL_READY:
752 		map_write(map, CMD(0x70), adr);
753 		chip->state = FL_STATUS;
754 
755 	case FL_STATUS:
756 		status = map_read(map, adr);
757 		if (map_word_andequal(map, status, status_OK, status_OK))
758 			break;
759 
760 		/* Urgh. Chip not yet ready to talk to us. */
761 		if (time_after(jiffies, timeo)) {
762 			spin_unlock_bh(chip->mutex);
763 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
764 			return -EIO;
765 		}
766 
767 		/* Latency issues. Drop the lock, wait a while and retry */
768 		spin_unlock_bh(chip->mutex);
769 		cfi_udelay(1);
770 		goto retry;
771 
772 	default:
773 		/* Stick ourselves on a wait queue to be woken when
774 		   someone changes the status */
775 		set_current_state(TASK_UNINTERRUPTIBLE);
776 		add_wait_queue(&chip->wq, &wait);
777 		spin_unlock_bh(chip->mutex);
778 		schedule();
779 		remove_wait_queue(&chip->wq, &wait);
780 		timeo = jiffies + HZ;
781 		goto retry;
782 	}
783 
784 	ENABLE_VPP(map);
785 	/* Clear the status register first */
786 	map_write(map, CMD(0x50), adr);
787 
788 	/* Now erase */
789 	map_write(map, CMD(0x20), adr);
790 	map_write(map, CMD(0xD0), adr);
791 	chip->state = FL_ERASING;
792 
793 	spin_unlock_bh(chip->mutex);
794 	msleep(1000);
795 	spin_lock_bh(chip->mutex);
796 
797 	/* FIXME. Use a timer to check this, and return immediately. */
798 	/* Once the state machine's known to be working I'll do that */
799 
800 	timeo = jiffies + (HZ*20);
801 	for (;;) {
802 		if (chip->state != FL_ERASING) {
803 			/* Someone's suspended the erase. Sleep */
804 			set_current_state(TASK_UNINTERRUPTIBLE);
805 			add_wait_queue(&chip->wq, &wait);
806 			spin_unlock_bh(chip->mutex);
807 			schedule();
808 			remove_wait_queue(&chip->wq, &wait);
809 			timeo = jiffies + (HZ*20); /* FIXME */
810 			spin_lock_bh(chip->mutex);
811 			continue;
812 		}
813 
814 		status = map_read(map, adr);
815 		if (map_word_andequal(map, status, status_OK, status_OK))
816 			break;
817 
818 		/* OK Still waiting */
819 		if (time_after(jiffies, timeo)) {
820 			map_write(map, CMD(0x70), adr);
821 			chip->state = FL_STATUS;
822 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
823 			DISABLE_VPP(map);
824 			spin_unlock_bh(chip->mutex);
825 			return -EIO;
826 		}
827 
828 		/* Latency issues. Drop the lock, wait a while and retry */
829 		spin_unlock_bh(chip->mutex);
830 		cfi_udelay(1);
831 		spin_lock_bh(chip->mutex);
832 	}
833 
834 	DISABLE_VPP(map);
835 	ret = 0;
836 
837 	/* We've broken this before. It doesn't hurt to be safe */
838 	map_write(map, CMD(0x70), adr);
839 	chip->state = FL_STATUS;
840 	status = map_read(map, adr);
841 
842 	/* check for lock bit */
843 	if (map_word_bitsset(map, status, CMD(0x3a))) {
844 		unsigned char chipstatus = status.x[0];
845 		if (!map_word_equal(map, status, CMD(chipstatus))) {
846 			int i, w;
847 			for (w=0; w<map_words(map); w++) {
848 				for (i = 0; i<cfi_interleave(cfi); i++) {
849 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
850 				}
851 			}
852 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
853 			       status.x[0], chipstatus);
854 		}
855 		/* Reset the error bits */
856 		map_write(map, CMD(0x50), adr);
857 		map_write(map, CMD(0x70), adr);
858 
859 		if ((chipstatus & 0x30) == 0x30) {
860 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
861 			ret = -EIO;
862 		} else if (chipstatus & 0x02) {
863 			/* Protection bit set */
864 			ret = -EROFS;
865 		} else if (chipstatus & 0x8) {
866 			/* Voltage */
867 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
868 			ret = -EIO;
869 		} else if (chipstatus & 0x20) {
870 			if (retries--) {
871 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
872 				timeo = jiffies + HZ;
873 				chip->state = FL_STATUS;
874 				spin_unlock_bh(chip->mutex);
875 				goto retry;
876 			}
877 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
878 			ret = -EIO;
879 		}
880 	}
881 
882 	wake_up(&chip->wq);
883 	spin_unlock_bh(chip->mutex);
884 	return ret;
885 }
886 
887 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
888 {	struct map_info *map = mtd->priv;
889 	struct cfi_private *cfi = map->fldrv_priv;
890 	unsigned long adr, len;
891 	int chipnum, ret = 0;
892 	int i, first;
893 	struct mtd_erase_region_info *regions = mtd->eraseregions;
894 
895 	if (instr->addr > mtd->size)
896 		return -EINVAL;
897 
898 	if ((instr->len + instr->addr) > mtd->size)
899 		return -EINVAL;
900 
901 	/* Check that both start and end of the requested erase are
902 	 * aligned with the erasesize at the appropriate addresses.
903 	 */
904 
905 	i = 0;
906 
907 	/* Skip all erase regions which are ended before the start of
908 	   the requested erase. Actually, to save on the calculations,
909 	   we skip to the first erase region which starts after the
910 	   start of the requested erase, and then go back one.
911 	*/
912 
913 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
914 	       i++;
915 	i--;
916 
917 	/* OK, now i is pointing at the erase region in which this
918 	   erase request starts. Check the start of the requested
919 	   erase range is aligned with the erase size which is in
920 	   effect here.
921 	*/
922 
923 	if (instr->addr & (regions[i].erasesize-1))
924 		return -EINVAL;
925 
926 	/* Remember the erase region we start on */
927 	first = i;
928 
929 	/* Next, check that the end of the requested erase is aligned
930 	 * with the erase region at that address.
931 	 */
932 
933 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
934 		i++;
935 
936 	/* As before, drop back one to point at the region in which
937 	   the address actually falls
938 	*/
939 	i--;
940 
941 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
942 		return -EINVAL;
943 
944 	chipnum = instr->addr >> cfi->chipshift;
945 	adr = instr->addr - (chipnum << cfi->chipshift);
946 	len = instr->len;
947 
948 	i=first;
949 
950 	while(len) {
951 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
952 
953 		if (ret)
954 			return ret;
955 
956 		adr += regions[i].erasesize;
957 		len -= regions[i].erasesize;
958 
959 		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
960 			i++;
961 
962 		if (adr >> cfi->chipshift) {
963 			adr = 0;
964 			chipnum++;
965 
966 			if (chipnum >= cfi->numchips)
967 			break;
968 		}
969 	}
970 
971 	instr->state = MTD_ERASE_DONE;
972 	mtd_erase_callback(instr);
973 
974 	return 0;
975 }
976 
977 static void cfi_staa_sync (struct mtd_info *mtd)
978 {
979 	struct map_info *map = mtd->priv;
980 	struct cfi_private *cfi = map->fldrv_priv;
981 	int i;
982 	struct flchip *chip;
983 	int ret = 0;
984 	DECLARE_WAITQUEUE(wait, current);
985 
986 	for (i=0; !ret && i<cfi->numchips; i++) {
987 		chip = &cfi->chips[i];
988 
989 	retry:
990 		spin_lock_bh(chip->mutex);
991 
992 		switch(chip->state) {
993 		case FL_READY:
994 		case FL_STATUS:
995 		case FL_CFI_QUERY:
996 		case FL_JEDEC_QUERY:
997 			chip->oldstate = chip->state;
998 			chip->state = FL_SYNCING;
999 			/* No need to wake_up() on this state change -
1000 			 * as the whole point is that nobody can do anything
1001 			 * with the chip now anyway.
1002 			 */
1003 		case FL_SYNCING:
1004 			spin_unlock_bh(chip->mutex);
1005 			break;
1006 
1007 		default:
1008 			/* Not an idle state */
1009 			add_wait_queue(&chip->wq, &wait);
1010 
1011 			spin_unlock_bh(chip->mutex);
1012 			schedule();
1013 		        remove_wait_queue(&chip->wq, &wait);
1014 
1015 			goto retry;
1016 		}
1017 	}
1018 
1019 	/* Unlock the chips again */
1020 
1021 	for (i--; i >=0; i--) {
1022 		chip = &cfi->chips[i];
1023 
1024 		spin_lock_bh(chip->mutex);
1025 
1026 		if (chip->state == FL_SYNCING) {
1027 			chip->state = chip->oldstate;
1028 			wake_up(&chip->wq);
1029 		}
1030 		spin_unlock_bh(chip->mutex);
1031 	}
1032 }
1033 
1034 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1035 {
1036 	struct cfi_private *cfi = map->fldrv_priv;
1037 	map_word status, status_OK;
1038 	unsigned long timeo = jiffies + HZ;
1039 	DECLARE_WAITQUEUE(wait, current);
1040 
1041 	adr += chip->start;
1042 
1043 	/* Let's determine this according to the interleave only once */
1044 	status_OK = CMD(0x80);
1045 
1046 	timeo = jiffies + HZ;
1047 retry:
1048 	spin_lock_bh(chip->mutex);
1049 
1050 	/* Check that the chip's ready to talk to us. */
1051 	switch (chip->state) {
1052 	case FL_CFI_QUERY:
1053 	case FL_JEDEC_QUERY:
1054 	case FL_READY:
1055 		map_write(map, CMD(0x70), adr);
1056 		chip->state = FL_STATUS;
1057 
1058 	case FL_STATUS:
1059 		status = map_read(map, adr);
1060 		if (map_word_andequal(map, status, status_OK, status_OK))
1061 			break;
1062 
1063 		/* Urgh. Chip not yet ready to talk to us. */
1064 		if (time_after(jiffies, timeo)) {
1065 			spin_unlock_bh(chip->mutex);
1066 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1067 			return -EIO;
1068 		}
1069 
1070 		/* Latency issues. Drop the lock, wait a while and retry */
1071 		spin_unlock_bh(chip->mutex);
1072 		cfi_udelay(1);
1073 		goto retry;
1074 
1075 	default:
1076 		/* Stick ourselves on a wait queue to be woken when
1077 		   someone changes the status */
1078 		set_current_state(TASK_UNINTERRUPTIBLE);
1079 		add_wait_queue(&chip->wq, &wait);
1080 		spin_unlock_bh(chip->mutex);
1081 		schedule();
1082 		remove_wait_queue(&chip->wq, &wait);
1083 		timeo = jiffies + HZ;
1084 		goto retry;
1085 	}
1086 
1087 	ENABLE_VPP(map);
1088 	map_write(map, CMD(0x60), adr);
1089 	map_write(map, CMD(0x01), adr);
1090 	chip->state = FL_LOCKING;
1091 
1092 	spin_unlock_bh(chip->mutex);
1093 	msleep(1000);
1094 	spin_lock_bh(chip->mutex);
1095 
1096 	/* FIXME. Use a timer to check this, and return immediately. */
1097 	/* Once the state machine's known to be working I'll do that */
1098 
1099 	timeo = jiffies + (HZ*2);
1100 	for (;;) {
1101 
1102 		status = map_read(map, adr);
1103 		if (map_word_andequal(map, status, status_OK, status_OK))
1104 			break;
1105 
1106 		/* OK Still waiting */
1107 		if (time_after(jiffies, timeo)) {
1108 			map_write(map, CMD(0x70), adr);
1109 			chip->state = FL_STATUS;
1110 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1111 			DISABLE_VPP(map);
1112 			spin_unlock_bh(chip->mutex);
1113 			return -EIO;
1114 		}
1115 
1116 		/* Latency issues. Drop the lock, wait a while and retry */
1117 		spin_unlock_bh(chip->mutex);
1118 		cfi_udelay(1);
1119 		spin_lock_bh(chip->mutex);
1120 	}
1121 
1122 	/* Done and happy. */
1123 	chip->state = FL_STATUS;
1124 	DISABLE_VPP(map);
1125 	wake_up(&chip->wq);
1126 	spin_unlock_bh(chip->mutex);
1127 	return 0;
1128 }
1129 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1130 {
1131 	struct map_info *map = mtd->priv;
1132 	struct cfi_private *cfi = map->fldrv_priv;
1133 	unsigned long adr;
1134 	int chipnum, ret = 0;
1135 #ifdef DEBUG_LOCK_BITS
1136 	int ofs_factor = cfi->interleave * cfi->device_type;
1137 #endif
1138 
1139 	if (ofs & (mtd->erasesize - 1))
1140 		return -EINVAL;
1141 
1142 	if (len & (mtd->erasesize -1))
1143 		return -EINVAL;
1144 
1145 	if ((len + ofs) > mtd->size)
1146 		return -EINVAL;
1147 
1148 	chipnum = ofs >> cfi->chipshift;
1149 	adr = ofs - (chipnum << cfi->chipshift);
1150 
1151 	while(len) {
1152 
1153 #ifdef DEBUG_LOCK_BITS
1154 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1155 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1156 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1157 #endif
1158 
1159 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1160 
1161 #ifdef DEBUG_LOCK_BITS
1162 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1163 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1164 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1165 #endif
1166 
1167 		if (ret)
1168 			return ret;
1169 
1170 		adr += mtd->erasesize;
1171 		len -= mtd->erasesize;
1172 
1173 		if (adr >> cfi->chipshift) {
1174 			adr = 0;
1175 			chipnum++;
1176 
1177 			if (chipnum >= cfi->numchips)
1178 			break;
1179 		}
1180 	}
1181 	return 0;
1182 }
1183 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1184 {
1185 	struct cfi_private *cfi = map->fldrv_priv;
1186 	map_word status, status_OK;
1187 	unsigned long timeo = jiffies + HZ;
1188 	DECLARE_WAITQUEUE(wait, current);
1189 
1190 	adr += chip->start;
1191 
1192 	/* Let's determine this according to the interleave only once */
1193 	status_OK = CMD(0x80);
1194 
1195 	timeo = jiffies + HZ;
1196 retry:
1197 	spin_lock_bh(chip->mutex);
1198 
1199 	/* Check that the chip's ready to talk to us. */
1200 	switch (chip->state) {
1201 	case FL_CFI_QUERY:
1202 	case FL_JEDEC_QUERY:
1203 	case FL_READY:
1204 		map_write(map, CMD(0x70), adr);
1205 		chip->state = FL_STATUS;
1206 
1207 	case FL_STATUS:
1208 		status = map_read(map, adr);
1209 		if (map_word_andequal(map, status, status_OK, status_OK))
1210 			break;
1211 
1212 		/* Urgh. Chip not yet ready to talk to us. */
1213 		if (time_after(jiffies, timeo)) {
1214 			spin_unlock_bh(chip->mutex);
1215 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1216 			return -EIO;
1217 		}
1218 
1219 		/* Latency issues. Drop the lock, wait a while and retry */
1220 		spin_unlock_bh(chip->mutex);
1221 		cfi_udelay(1);
1222 		goto retry;
1223 
1224 	default:
1225 		/* Stick ourselves on a wait queue to be woken when
1226 		   someone changes the status */
1227 		set_current_state(TASK_UNINTERRUPTIBLE);
1228 		add_wait_queue(&chip->wq, &wait);
1229 		spin_unlock_bh(chip->mutex);
1230 		schedule();
1231 		remove_wait_queue(&chip->wq, &wait);
1232 		timeo = jiffies + HZ;
1233 		goto retry;
1234 	}
1235 
1236 	ENABLE_VPP(map);
1237 	map_write(map, CMD(0x60), adr);
1238 	map_write(map, CMD(0xD0), adr);
1239 	chip->state = FL_UNLOCKING;
1240 
1241 	spin_unlock_bh(chip->mutex);
1242 	msleep(1000);
1243 	spin_lock_bh(chip->mutex);
1244 
1245 	/* FIXME. Use a timer to check this, and return immediately. */
1246 	/* Once the state machine's known to be working I'll do that */
1247 
1248 	timeo = jiffies + (HZ*2);
1249 	for (;;) {
1250 
1251 		status = map_read(map, adr);
1252 		if (map_word_andequal(map, status, status_OK, status_OK))
1253 			break;
1254 
1255 		/* OK Still waiting */
1256 		if (time_after(jiffies, timeo)) {
1257 			map_write(map, CMD(0x70), adr);
1258 			chip->state = FL_STATUS;
1259 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1260 			DISABLE_VPP(map);
1261 			spin_unlock_bh(chip->mutex);
1262 			return -EIO;
1263 		}
1264 
1265 		/* Latency issues. Drop the unlock, wait a while and retry */
1266 		spin_unlock_bh(chip->mutex);
1267 		cfi_udelay(1);
1268 		spin_lock_bh(chip->mutex);
1269 	}
1270 
1271 	/* Done and happy. */
1272 	chip->state = FL_STATUS;
1273 	DISABLE_VPP(map);
1274 	wake_up(&chip->wq);
1275 	spin_unlock_bh(chip->mutex);
1276 	return 0;
1277 }
1278 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1279 {
1280 	struct map_info *map = mtd->priv;
1281 	struct cfi_private *cfi = map->fldrv_priv;
1282 	unsigned long adr;
1283 	int chipnum, ret = 0;
1284 #ifdef DEBUG_LOCK_BITS
1285 	int ofs_factor = cfi->interleave * cfi->device_type;
1286 #endif
1287 
1288 	chipnum = ofs >> cfi->chipshift;
1289 	adr = ofs - (chipnum << cfi->chipshift);
1290 
1291 #ifdef DEBUG_LOCK_BITS
1292 	{
1293 		unsigned long temp_adr = adr;
1294 		unsigned long temp_len = len;
1295 
1296 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1297                 while (temp_len) {
1298 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1299 			temp_adr += mtd->erasesize;
1300 			temp_len -= mtd->erasesize;
1301 		}
1302 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1303 	}
1304 #endif
1305 
1306 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1307 
1308 #ifdef DEBUG_LOCK_BITS
1309 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1310 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1311 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1312 #endif
1313 
1314 	return ret;
1315 }
1316 
1317 static int cfi_staa_suspend(struct mtd_info *mtd)
1318 {
1319 	struct map_info *map = mtd->priv;
1320 	struct cfi_private *cfi = map->fldrv_priv;
1321 	int i;
1322 	struct flchip *chip;
1323 	int ret = 0;
1324 
1325 	for (i=0; !ret && i<cfi->numchips; i++) {
1326 		chip = &cfi->chips[i];
1327 
1328 		spin_lock_bh(chip->mutex);
1329 
1330 		switch(chip->state) {
1331 		case FL_READY:
1332 		case FL_STATUS:
1333 		case FL_CFI_QUERY:
1334 		case FL_JEDEC_QUERY:
1335 			chip->oldstate = chip->state;
1336 			chip->state = FL_PM_SUSPENDED;
1337 			/* No need to wake_up() on this state change -
1338 			 * as the whole point is that nobody can do anything
1339 			 * with the chip now anyway.
1340 			 */
1341 		case FL_PM_SUSPENDED:
1342 			break;
1343 
1344 		default:
1345 			ret = -EAGAIN;
1346 			break;
1347 		}
1348 		spin_unlock_bh(chip->mutex);
1349 	}
1350 
1351 	/* Unlock the chips again */
1352 
1353 	if (ret) {
1354 		for (i--; i >=0; i--) {
1355 			chip = &cfi->chips[i];
1356 
1357 			spin_lock_bh(chip->mutex);
1358 
1359 			if (chip->state == FL_PM_SUSPENDED) {
1360 				/* No need to force it into a known state here,
1361 				   because we're returning failure, and it didn't
1362 				   get power cycled */
1363 				chip->state = chip->oldstate;
1364 				wake_up(&chip->wq);
1365 			}
1366 			spin_unlock_bh(chip->mutex);
1367 		}
1368 	}
1369 
1370 	return ret;
1371 }
1372 
1373 static void cfi_staa_resume(struct mtd_info *mtd)
1374 {
1375 	struct map_info *map = mtd->priv;
1376 	struct cfi_private *cfi = map->fldrv_priv;
1377 	int i;
1378 	struct flchip *chip;
1379 
1380 	for (i=0; i<cfi->numchips; i++) {
1381 
1382 		chip = &cfi->chips[i];
1383 
1384 		spin_lock_bh(chip->mutex);
1385 
1386 		/* Go to known state. Chip may have been power cycled */
1387 		if (chip->state == FL_PM_SUSPENDED) {
1388 			map_write(map, CMD(0xFF), 0);
1389 			chip->state = FL_READY;
1390 			wake_up(&chip->wq);
1391 		}
1392 
1393 		spin_unlock_bh(chip->mutex);
1394 	}
1395 }
1396 
1397 static void cfi_staa_destroy(struct mtd_info *mtd)
1398 {
1399 	struct map_info *map = mtd->priv;
1400 	struct cfi_private *cfi = map->fldrv_priv;
1401 	kfree(cfi->cmdset_priv);
1402 	kfree(cfi);
1403 }
1404 
1405 static char im_name[]="cfi_cmdset_0020";
1406 
1407 static int __init cfi_staa_init(void)
1408 {
1409 	inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1410 	return 0;
1411 }
1412 
1413 static void __exit cfi_staa_exit(void)
1414 {
1415 	inter_module_unregister(im_name);
1416 }
1417 
1418 module_init(cfi_staa_init);
1419 module_exit(cfi_staa_exit);
1420 
1421 MODULE_LICENSE("GPL");
1422