xref: /linux/drivers/mtd/chips/cfi_cmdset_0020.c (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
8  * 	- completely revamped method functions so they are aware and
9  * 	  independent of the flash geometry (buswidth, interleave, etc.)
10  * 	- scalability vs code size is completely set at compile-time
11  * 	  (see include/linux/mtd/cfi.h for selection)
12  *	- optimized write buffer method
13  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
14  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
15  *	  (command set 0x0020)
16  *	- added a writev function
17  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
18  * 	- Plugged memory leak in cfi_staa_writev().
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/cfi.h>
34 #include <linux/mtd/mtd.h>
35 
36 
37 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
38 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
39 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
40 		unsigned long count, loff_t to, size_t *retlen);
41 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
42 static void cfi_staa_sync (struct mtd_info *);
43 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
44 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45 static int cfi_staa_suspend (struct mtd_info *);
46 static void cfi_staa_resume (struct mtd_info *);
47 
48 static void cfi_staa_destroy(struct mtd_info *);
49 
50 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
51 
52 static struct mtd_info *cfi_staa_setup (struct map_info *);
53 
54 static struct mtd_chip_driver cfi_staa_chipdrv = {
55 	.probe		= NULL, /* Not usable directly */
56 	.destroy	= cfi_staa_destroy,
57 	.name		= "cfi_cmdset_0020",
58 	.module		= THIS_MODULE
59 };
60 
61 /* #define DEBUG_LOCK_BITS */
62 //#define DEBUG_CFI_FEATURES
63 
64 #ifdef DEBUG_CFI_FEATURES
65 static void cfi_tell_features(struct cfi_pri_intelext *extp)
66 {
67         int i;
68         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
69 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
70 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
71 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
72 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
73 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
74 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
75 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
76 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
77 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
78 	for (i=9; i<32; i++) {
79 		if (extp->FeatureSupport & (1<<i))
80 			printk("     - Unknown Bit %X:      supported\n", i);
81 	}
82 
83 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
84 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
85 	for (i=1; i<8; i++) {
86 		if (extp->SuspendCmdSupport & (1<<i))
87 			printk("     - Unknown Bit %X:               supported\n", i);
88 	}
89 
90 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
91 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
92 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
93 	for (i=2; i<16; i++) {
94 		if (extp->BlkStatusRegMask & (1<<i))
95 			printk("     - Unknown Bit %X Active: yes\n",i);
96 	}
97 
98 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
99 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
100 	if (extp->VppOptimal)
101 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
103 }
104 #endif
105 
106 /* This routine is made available to other mtd code via
107  * inter_module_register.  It must only be accessed through
108  * inter_module_get which will bump the use count of this module.  The
109  * addresses passed back in cfi are valid as long as the use count of
110  * this module is non-zero, i.e. between inter_module_get and
111  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
112  */
113 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
114 {
115 	struct cfi_private *cfi = map->fldrv_priv;
116 	int i;
117 
118 	if (cfi->cfi_mode) {
119 		/*
120 		 * It's a real CFI chip, not one for which the probe
121 		 * routine faked a CFI structure. So we read the feature
122 		 * table from it.
123 		 */
124 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
125 		struct cfi_pri_intelext *extp;
126 
127 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
128 		if (!extp)
129 			return NULL;
130 
131 		if (extp->MajorVersion != '1' ||
132 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
133 			printk(KERN_ERR "  Unknown ST Microelectronics"
134 			       " Extended Query version %c.%c.\n",
135 			       extp->MajorVersion, extp->MinorVersion);
136 			kfree(extp);
137 			return NULL;
138 		}
139 
140 		/* Do some byteswapping if necessary */
141 		extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
142 		extp->BlkStatusRegMask = cfi32_to_cpu(map,
143 						extp->BlkStatusRegMask);
144 
145 #ifdef DEBUG_CFI_FEATURES
146 		/* Tell the user about it in lots of lovely detail */
147 		cfi_tell_features(extp);
148 #endif
149 
150 		/* Install our own private info structure */
151 		cfi->cmdset_priv = extp;
152 	}
153 
154 	for (i=0; i< cfi->numchips; i++) {
155 		cfi->chips[i].word_write_time = 128;
156 		cfi->chips[i].buffer_write_time = 128;
157 		cfi->chips[i].erase_time = 1024;
158 		cfi->chips[i].ref_point_counter = 0;
159 		init_waitqueue_head(&(cfi->chips[i].wq));
160 	}
161 
162 	return cfi_staa_setup(map);
163 }
164 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165 
166 static struct mtd_info *cfi_staa_setup(struct map_info *map)
167 {
168 	struct cfi_private *cfi = map->fldrv_priv;
169 	struct mtd_info *mtd;
170 	unsigned long offset = 0;
171 	int i,j;
172 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173 
174 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
176 
177 	if (!mtd) {
178 		kfree(cfi->cmdset_priv);
179 		return NULL;
180 	}
181 
182 	mtd->priv = map;
183 	mtd->type = MTD_NORFLASH;
184 	mtd->size = devsize * cfi->numchips;
185 
186 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
187 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
188 					  sizeof(struct mtd_erase_region_info),
189 					  GFP_KERNEL);
190 	if (!mtd->eraseregions) {
191 		kfree(cfi->cmdset_priv);
192 		kfree(mtd);
193 		return NULL;
194 	}
195 
196 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
197 		unsigned long ernum, ersize;
198 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
199 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
200 
201 		if (mtd->erasesize < ersize) {
202 			mtd->erasesize = ersize;
203 		}
204 		for (j=0; j<cfi->numchips; j++) {
205 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
206 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
207 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
208 		}
209 		offset += (ersize * ernum);
210 	}
211 
212 	if (offset != devsize) {
213 		/* Argh */
214 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
215 		kfree(mtd->eraseregions);
216 		kfree(cfi->cmdset_priv);
217 		kfree(mtd);
218 		return NULL;
219 	}
220 
221 	for (i=0; i<mtd->numeraseregions;i++){
222 		printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
223 		       i, (unsigned long long)mtd->eraseregions[i].offset,
224 		       mtd->eraseregions[i].erasesize,
225 		       mtd->eraseregions[i].numblocks);
226 	}
227 
228 	/* Also select the correct geometry setup too */
229 	mtd->_erase = cfi_staa_erase_varsize;
230 	mtd->_read = cfi_staa_read;
231 	mtd->_write = cfi_staa_write_buffers;
232 	mtd->_writev = cfi_staa_writev;
233 	mtd->_sync = cfi_staa_sync;
234 	mtd->_lock = cfi_staa_lock;
235 	mtd->_unlock = cfi_staa_unlock;
236 	mtd->_suspend = cfi_staa_suspend;
237 	mtd->_resume = cfi_staa_resume;
238 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
239 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
240 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
241 	map->fldrv = &cfi_staa_chipdrv;
242 	__module_get(THIS_MODULE);
243 	mtd->name = map->name;
244 	return mtd;
245 }
246 
247 
248 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
249 {
250 	map_word status, status_OK;
251 	unsigned long timeo;
252 	DECLARE_WAITQUEUE(wait, current);
253 	int suspended = 0;
254 	unsigned long cmd_addr;
255 	struct cfi_private *cfi = map->fldrv_priv;
256 
257 	adr += chip->start;
258 
259 	/* Ensure cmd read/writes are aligned. */
260 	cmd_addr = adr & ~(map_bankwidth(map)-1);
261 
262 	/* Let's determine this according to the interleave only once */
263 	status_OK = CMD(0x80);
264 
265 	timeo = jiffies + HZ;
266  retry:
267 	mutex_lock(&chip->mutex);
268 
269 	/* Check that the chip's ready to talk to us.
270 	 * If it's in FL_ERASING state, suspend it and make it talk now.
271 	 */
272 	switch (chip->state) {
273 	case FL_ERASING:
274 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
275 			goto sleep; /* We don't support erase suspend */
276 
277 		map_write (map, CMD(0xb0), cmd_addr);
278 		/* If the flash has finished erasing, then 'erase suspend'
279 		 * appears to make some (28F320) flash devices switch to
280 		 * 'read' mode.  Make sure that we switch to 'read status'
281 		 * mode so we get the right data. --rmk
282 		 */
283 		map_write(map, CMD(0x70), cmd_addr);
284 		chip->oldstate = FL_ERASING;
285 		chip->state = FL_ERASE_SUSPENDING;
286 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
287 		for (;;) {
288 			status = map_read(map, cmd_addr);
289 			if (map_word_andequal(map, status, status_OK, status_OK))
290 				break;
291 
292 			if (time_after(jiffies, timeo)) {
293 				/* Urgh */
294 				map_write(map, CMD(0xd0), cmd_addr);
295 				/* make sure we're in 'read status' mode */
296 				map_write(map, CMD(0x70), cmd_addr);
297 				chip->state = FL_ERASING;
298 				wake_up(&chip->wq);
299 				mutex_unlock(&chip->mutex);
300 				printk(KERN_ERR "Chip not ready after erase "
301 				       "suspended: status = 0x%lx\n", status.x[0]);
302 				return -EIO;
303 			}
304 
305 			mutex_unlock(&chip->mutex);
306 			cfi_udelay(1);
307 			mutex_lock(&chip->mutex);
308 		}
309 
310 		suspended = 1;
311 		map_write(map, CMD(0xff), cmd_addr);
312 		chip->state = FL_READY;
313 		break;
314 
315 #if 0
316 	case FL_WRITING:
317 		/* Not quite yet */
318 #endif
319 
320 	case FL_READY:
321 		break;
322 
323 	case FL_CFI_QUERY:
324 	case FL_JEDEC_QUERY:
325 		map_write(map, CMD(0x70), cmd_addr);
326 		chip->state = FL_STATUS;
327 
328 	case FL_STATUS:
329 		status = map_read(map, cmd_addr);
330 		if (map_word_andequal(map, status, status_OK, status_OK)) {
331 			map_write(map, CMD(0xff), cmd_addr);
332 			chip->state = FL_READY;
333 			break;
334 		}
335 
336 		/* Urgh. Chip not yet ready to talk to us. */
337 		if (time_after(jiffies, timeo)) {
338 			mutex_unlock(&chip->mutex);
339 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
340 			return -EIO;
341 		}
342 
343 		/* Latency issues. Drop the lock, wait a while and retry */
344 		mutex_unlock(&chip->mutex);
345 		cfi_udelay(1);
346 		goto retry;
347 
348 	default:
349 	sleep:
350 		/* Stick ourselves on a wait queue to be woken when
351 		   someone changes the status */
352 		set_current_state(TASK_UNINTERRUPTIBLE);
353 		add_wait_queue(&chip->wq, &wait);
354 		mutex_unlock(&chip->mutex);
355 		schedule();
356 		remove_wait_queue(&chip->wq, &wait);
357 		timeo = jiffies + HZ;
358 		goto retry;
359 	}
360 
361 	map_copy_from(map, buf, adr, len);
362 
363 	if (suspended) {
364 		chip->state = chip->oldstate;
365 		/* What if one interleaved chip has finished and the
366 		   other hasn't? The old code would leave the finished
367 		   one in READY mode. That's bad, and caused -EROFS
368 		   errors to be returned from do_erase_oneblock because
369 		   that's the only bit it checked for at the time.
370 		   As the state machine appears to explicitly allow
371 		   sending the 0x70 (Read Status) command to an erasing
372 		   chip and expecting it to be ignored, that's what we
373 		   do. */
374 		map_write(map, CMD(0xd0), cmd_addr);
375 		map_write(map, CMD(0x70), cmd_addr);
376 	}
377 
378 	wake_up(&chip->wq);
379 	mutex_unlock(&chip->mutex);
380 	return 0;
381 }
382 
383 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
384 {
385 	struct map_info *map = mtd->priv;
386 	struct cfi_private *cfi = map->fldrv_priv;
387 	unsigned long ofs;
388 	int chipnum;
389 	int ret = 0;
390 
391 	/* ofs: offset within the first chip that the first read should start */
392 	chipnum = (from >> cfi->chipshift);
393 	ofs = from - (chipnum <<  cfi->chipshift);
394 
395 	while (len) {
396 		unsigned long thislen;
397 
398 		if (chipnum >= cfi->numchips)
399 			break;
400 
401 		if ((len + ofs -1) >> cfi->chipshift)
402 			thislen = (1<<cfi->chipshift) - ofs;
403 		else
404 			thislen = len;
405 
406 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
407 		if (ret)
408 			break;
409 
410 		*retlen += thislen;
411 		len -= thislen;
412 		buf += thislen;
413 
414 		ofs = 0;
415 		chipnum++;
416 	}
417 	return ret;
418 }
419 
420 static int do_write_buffer(struct map_info *map, struct flchip *chip,
421 				  unsigned long adr, const u_char *buf, int len)
422 {
423 	struct cfi_private *cfi = map->fldrv_priv;
424 	map_word status, status_OK;
425 	unsigned long cmd_adr, timeo;
426 	DECLARE_WAITQUEUE(wait, current);
427 	int wbufsize, z;
428 
429         /* M58LW064A requires bus alignment for buffer wriets -- saw */
430         if (adr & (map_bankwidth(map)-1))
431             return -EINVAL;
432 
433         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
434         adr += chip->start;
435 	cmd_adr = adr & ~(wbufsize-1);
436 
437 	/* Let's determine this according to the interleave only once */
438         status_OK = CMD(0x80);
439 
440 	timeo = jiffies + HZ;
441  retry:
442 
443 #ifdef DEBUG_CFI_FEATURES
444        printk("%s: chip->state[%d]\n", __func__, chip->state);
445 #endif
446 	mutex_lock(&chip->mutex);
447 
448 	/* Check that the chip's ready to talk to us.
449 	 * Later, we can actually think about interrupting it
450 	 * if it's in FL_ERASING state.
451 	 * Not just yet, though.
452 	 */
453 	switch (chip->state) {
454 	case FL_READY:
455 		break;
456 
457 	case FL_CFI_QUERY:
458 	case FL_JEDEC_QUERY:
459 		map_write(map, CMD(0x70), cmd_adr);
460                 chip->state = FL_STATUS;
461 #ifdef DEBUG_CFI_FEATURES
462 	printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
463 #endif
464 
465 	case FL_STATUS:
466 		status = map_read(map, cmd_adr);
467 		if (map_word_andequal(map, status, status_OK, status_OK))
468 			break;
469 		/* Urgh. Chip not yet ready to talk to us. */
470 		if (time_after(jiffies, timeo)) {
471 			mutex_unlock(&chip->mutex);
472                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
473                                status.x[0], map_read(map, cmd_adr).x[0]);
474 			return -EIO;
475 		}
476 
477 		/* Latency issues. Drop the lock, wait a while and retry */
478 		mutex_unlock(&chip->mutex);
479 		cfi_udelay(1);
480 		goto retry;
481 
482 	default:
483 		/* Stick ourselves on a wait queue to be woken when
484 		   someone changes the status */
485 		set_current_state(TASK_UNINTERRUPTIBLE);
486 		add_wait_queue(&chip->wq, &wait);
487 		mutex_unlock(&chip->mutex);
488 		schedule();
489 		remove_wait_queue(&chip->wq, &wait);
490 		timeo = jiffies + HZ;
491 		goto retry;
492 	}
493 
494 	ENABLE_VPP(map);
495 	map_write(map, CMD(0xe8), cmd_adr);
496 	chip->state = FL_WRITING_TO_BUFFER;
497 
498 	z = 0;
499 	for (;;) {
500 		status = map_read(map, cmd_adr);
501 		if (map_word_andequal(map, status, status_OK, status_OK))
502 			break;
503 
504 		mutex_unlock(&chip->mutex);
505 		cfi_udelay(1);
506 		mutex_lock(&chip->mutex);
507 
508 		if (++z > 100) {
509 			/* Argh. Not ready for write to buffer */
510 			DISABLE_VPP(map);
511                         map_write(map, CMD(0x70), cmd_adr);
512 			chip->state = FL_STATUS;
513 			mutex_unlock(&chip->mutex);
514 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
515 			return -EIO;
516 		}
517 	}
518 
519 	/* Write length of data to come */
520 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
521 
522 	/* Write data */
523 	for (z = 0; z < len;
524 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
525 		map_word d;
526 		d = map_word_load(map, buf);
527 		map_write(map, d, adr+z);
528 	}
529 	/* GO GO GO */
530 	map_write(map, CMD(0xd0), cmd_adr);
531 	chip->state = FL_WRITING;
532 
533 	mutex_unlock(&chip->mutex);
534 	cfi_udelay(chip->buffer_write_time);
535 	mutex_lock(&chip->mutex);
536 
537 	timeo = jiffies + (HZ/2);
538 	z = 0;
539 	for (;;) {
540 		if (chip->state != FL_WRITING) {
541 			/* Someone's suspended the write. Sleep */
542 			set_current_state(TASK_UNINTERRUPTIBLE);
543 			add_wait_queue(&chip->wq, &wait);
544 			mutex_unlock(&chip->mutex);
545 			schedule();
546 			remove_wait_queue(&chip->wq, &wait);
547 			timeo = jiffies + (HZ / 2); /* FIXME */
548 			mutex_lock(&chip->mutex);
549 			continue;
550 		}
551 
552 		status = map_read(map, cmd_adr);
553 		if (map_word_andequal(map, status, status_OK, status_OK))
554 			break;
555 
556 		/* OK Still waiting */
557 		if (time_after(jiffies, timeo)) {
558                         /* clear status */
559                         map_write(map, CMD(0x50), cmd_adr);
560                         /* put back into read status register mode */
561                         map_write(map, CMD(0x70), adr);
562 			chip->state = FL_STATUS;
563 			DISABLE_VPP(map);
564 			mutex_unlock(&chip->mutex);
565 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
566 			return -EIO;
567 		}
568 
569 		/* Latency issues. Drop the lock, wait a while and retry */
570 		mutex_unlock(&chip->mutex);
571 		cfi_udelay(1);
572 		z++;
573 		mutex_lock(&chip->mutex);
574 	}
575 	if (!z) {
576 		chip->buffer_write_time--;
577 		if (!chip->buffer_write_time)
578 			chip->buffer_write_time++;
579 	}
580 	if (z > 1)
581 		chip->buffer_write_time++;
582 
583 	/* Done and happy. */
584 	DISABLE_VPP(map);
585 	chip->state = FL_STATUS;
586 
587         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
588         if (map_word_bitsset(map, status, CMD(0x3a))) {
589 #ifdef DEBUG_CFI_FEATURES
590 		printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
591 #endif
592 		/* clear status */
593 		map_write(map, CMD(0x50), cmd_adr);
594 		/* put back into read status register mode */
595 		map_write(map, CMD(0x70), adr);
596 		wake_up(&chip->wq);
597 		mutex_unlock(&chip->mutex);
598 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
599 	}
600 	wake_up(&chip->wq);
601 	mutex_unlock(&chip->mutex);
602 
603         return 0;
604 }
605 
606 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
607 				       size_t len, size_t *retlen, const u_char *buf)
608 {
609 	struct map_info *map = mtd->priv;
610 	struct cfi_private *cfi = map->fldrv_priv;
611 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
612 	int ret = 0;
613 	int chipnum;
614 	unsigned long ofs;
615 
616 	chipnum = to >> cfi->chipshift;
617 	ofs = to  - (chipnum << cfi->chipshift);
618 
619 #ifdef DEBUG_CFI_FEATURES
620 	printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
621 	printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
622 	printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
623 #endif
624 
625         /* Write buffer is worth it only if more than one word to write... */
626         while (len > 0) {
627 		/* We must not cross write block boundaries */
628 		int size = wbufsize - (ofs & (wbufsize-1));
629 
630                 if (size > len)
631                     size = len;
632 
633                 ret = do_write_buffer(map, &cfi->chips[chipnum],
634 				      ofs, buf, size);
635 		if (ret)
636 			return ret;
637 
638 		ofs += size;
639 		buf += size;
640 		(*retlen) += size;
641 		len -= size;
642 
643 		if (ofs >> cfi->chipshift) {
644 			chipnum ++;
645 			ofs = 0;
646 			if (chipnum == cfi->numchips)
647 				return 0;
648 		}
649 	}
650 
651 	return 0;
652 }
653 
654 /*
655  * Writev for ECC-Flashes is a little more complicated. We need to maintain
656  * a small buffer for this.
657  * XXX: If the buffer size is not a multiple of 2, this will break
658  */
659 #define ECCBUF_SIZE (mtd->writesize)
660 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
661 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
662 static int
663 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
664 		unsigned long count, loff_t to, size_t *retlen)
665 {
666 	unsigned long i;
667 	size_t	 totlen = 0, thislen;
668 	int	 ret = 0;
669 	size_t	 buflen = 0;
670 	char *buffer;
671 
672 	if (!ECCBUF_SIZE) {
673 		/* We should fall back to a general writev implementation.
674 		 * Until that is written, just break.
675 		 */
676 		return -EIO;
677 	}
678 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
679 	if (!buffer)
680 		return -ENOMEM;
681 
682 	for (i=0; i<count; i++) {
683 		size_t elem_len = vecs[i].iov_len;
684 		void *elem_base = vecs[i].iov_base;
685 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
686 			continue;
687 		if (buflen) { /* cut off head */
688 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
689 				memcpy(buffer+buflen, elem_base, elem_len);
690 				buflen += elem_len;
691 				continue;
692 			}
693 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
694 			ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
695 					buffer);
696 			totlen += thislen;
697 			if (ret || thislen != ECCBUF_SIZE)
698 				goto write_error;
699 			elem_len -= thislen-buflen;
700 			elem_base += thislen-buflen;
701 			to += ECCBUF_SIZE;
702 		}
703 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
704 			ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
705 					&thislen, elem_base);
706 			totlen += thislen;
707 			if (ret || thislen != ECCBUF_DIV(elem_len))
708 				goto write_error;
709 			to += thislen;
710 		}
711 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
712 		if (buflen) {
713 			memset(buffer, 0xff, ECCBUF_SIZE);
714 			memcpy(buffer, elem_base + thislen, buflen);
715 		}
716 	}
717 	if (buflen) { /* flush last page, even if not full */
718 		/* This is sometimes intended behaviour, really */
719 		ret = mtd_write(mtd, to, buflen, &thislen, buffer);
720 		totlen += thislen;
721 		if (ret || thislen != ECCBUF_SIZE)
722 			goto write_error;
723 	}
724 write_error:
725 	if (retlen)
726 		*retlen = totlen;
727 	kfree(buffer);
728 	return ret;
729 }
730 
731 
732 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
733 {
734 	struct cfi_private *cfi = map->fldrv_priv;
735 	map_word status, status_OK;
736 	unsigned long timeo;
737 	int retries = 3;
738 	DECLARE_WAITQUEUE(wait, current);
739 	int ret = 0;
740 
741 	adr += chip->start;
742 
743 	/* Let's determine this according to the interleave only once */
744 	status_OK = CMD(0x80);
745 
746 	timeo = jiffies + HZ;
747 retry:
748 	mutex_lock(&chip->mutex);
749 
750 	/* Check that the chip's ready to talk to us. */
751 	switch (chip->state) {
752 	case FL_CFI_QUERY:
753 	case FL_JEDEC_QUERY:
754 	case FL_READY:
755 		map_write(map, CMD(0x70), adr);
756 		chip->state = FL_STATUS;
757 
758 	case FL_STATUS:
759 		status = map_read(map, adr);
760 		if (map_word_andequal(map, status, status_OK, status_OK))
761 			break;
762 
763 		/* Urgh. Chip not yet ready to talk to us. */
764 		if (time_after(jiffies, timeo)) {
765 			mutex_unlock(&chip->mutex);
766 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
767 			return -EIO;
768 		}
769 
770 		/* Latency issues. Drop the lock, wait a while and retry */
771 		mutex_unlock(&chip->mutex);
772 		cfi_udelay(1);
773 		goto retry;
774 
775 	default:
776 		/* Stick ourselves on a wait queue to be woken when
777 		   someone changes the status */
778 		set_current_state(TASK_UNINTERRUPTIBLE);
779 		add_wait_queue(&chip->wq, &wait);
780 		mutex_unlock(&chip->mutex);
781 		schedule();
782 		remove_wait_queue(&chip->wq, &wait);
783 		timeo = jiffies + HZ;
784 		goto retry;
785 	}
786 
787 	ENABLE_VPP(map);
788 	/* Clear the status register first */
789 	map_write(map, CMD(0x50), adr);
790 
791 	/* Now erase */
792 	map_write(map, CMD(0x20), adr);
793 	map_write(map, CMD(0xD0), adr);
794 	chip->state = FL_ERASING;
795 
796 	mutex_unlock(&chip->mutex);
797 	msleep(1000);
798 	mutex_lock(&chip->mutex);
799 
800 	/* FIXME. Use a timer to check this, and return immediately. */
801 	/* Once the state machine's known to be working I'll do that */
802 
803 	timeo = jiffies + (HZ*20);
804 	for (;;) {
805 		if (chip->state != FL_ERASING) {
806 			/* Someone's suspended the erase. Sleep */
807 			set_current_state(TASK_UNINTERRUPTIBLE);
808 			add_wait_queue(&chip->wq, &wait);
809 			mutex_unlock(&chip->mutex);
810 			schedule();
811 			remove_wait_queue(&chip->wq, &wait);
812 			timeo = jiffies + (HZ*20); /* FIXME */
813 			mutex_lock(&chip->mutex);
814 			continue;
815 		}
816 
817 		status = map_read(map, adr);
818 		if (map_word_andequal(map, status, status_OK, status_OK))
819 			break;
820 
821 		/* OK Still waiting */
822 		if (time_after(jiffies, timeo)) {
823 			map_write(map, CMD(0x70), adr);
824 			chip->state = FL_STATUS;
825 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
826 			DISABLE_VPP(map);
827 			mutex_unlock(&chip->mutex);
828 			return -EIO;
829 		}
830 
831 		/* Latency issues. Drop the lock, wait a while and retry */
832 		mutex_unlock(&chip->mutex);
833 		cfi_udelay(1);
834 		mutex_lock(&chip->mutex);
835 	}
836 
837 	DISABLE_VPP(map);
838 	ret = 0;
839 
840 	/* We've broken this before. It doesn't hurt to be safe */
841 	map_write(map, CMD(0x70), adr);
842 	chip->state = FL_STATUS;
843 	status = map_read(map, adr);
844 
845 	/* check for lock bit */
846 	if (map_word_bitsset(map, status, CMD(0x3a))) {
847 		unsigned char chipstatus = status.x[0];
848 		if (!map_word_equal(map, status, CMD(chipstatus))) {
849 			int i, w;
850 			for (w=0; w<map_words(map); w++) {
851 				for (i = 0; i<cfi_interleave(cfi); i++) {
852 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
853 				}
854 			}
855 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
856 			       status.x[0], chipstatus);
857 		}
858 		/* Reset the error bits */
859 		map_write(map, CMD(0x50), adr);
860 		map_write(map, CMD(0x70), adr);
861 
862 		if ((chipstatus & 0x30) == 0x30) {
863 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
864 			ret = -EIO;
865 		} else if (chipstatus & 0x02) {
866 			/* Protection bit set */
867 			ret = -EROFS;
868 		} else if (chipstatus & 0x8) {
869 			/* Voltage */
870 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
871 			ret = -EIO;
872 		} else if (chipstatus & 0x20) {
873 			if (retries--) {
874 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
875 				timeo = jiffies + HZ;
876 				chip->state = FL_STATUS;
877 				mutex_unlock(&chip->mutex);
878 				goto retry;
879 			}
880 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
881 			ret = -EIO;
882 		}
883 	}
884 
885 	wake_up(&chip->wq);
886 	mutex_unlock(&chip->mutex);
887 	return ret;
888 }
889 
890 static int cfi_staa_erase_varsize(struct mtd_info *mtd,
891 				  struct erase_info *instr)
892 {	struct map_info *map = mtd->priv;
893 	struct cfi_private *cfi = map->fldrv_priv;
894 	unsigned long adr, len;
895 	int chipnum, ret = 0;
896 	int i, first;
897 	struct mtd_erase_region_info *regions = mtd->eraseregions;
898 
899 	/* Check that both start and end of the requested erase are
900 	 * aligned with the erasesize at the appropriate addresses.
901 	 */
902 
903 	i = 0;
904 
905 	/* Skip all erase regions which are ended before the start of
906 	   the requested erase. Actually, to save on the calculations,
907 	   we skip to the first erase region which starts after the
908 	   start of the requested erase, and then go back one.
909 	*/
910 
911 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
912 	       i++;
913 	i--;
914 
915 	/* OK, now i is pointing at the erase region in which this
916 	   erase request starts. Check the start of the requested
917 	   erase range is aligned with the erase size which is in
918 	   effect here.
919 	*/
920 
921 	if (instr->addr & (regions[i].erasesize-1))
922 		return -EINVAL;
923 
924 	/* Remember the erase region we start on */
925 	first = i;
926 
927 	/* Next, check that the end of the requested erase is aligned
928 	 * with the erase region at that address.
929 	 */
930 
931 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
932 		i++;
933 
934 	/* As before, drop back one to point at the region in which
935 	   the address actually falls
936 	*/
937 	i--;
938 
939 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
940 		return -EINVAL;
941 
942 	chipnum = instr->addr >> cfi->chipshift;
943 	adr = instr->addr - (chipnum << cfi->chipshift);
944 	len = instr->len;
945 
946 	i=first;
947 
948 	while(len) {
949 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
950 
951 		if (ret)
952 			return ret;
953 
954 		adr += regions[i].erasesize;
955 		len -= regions[i].erasesize;
956 
957 		if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
958 			i++;
959 
960 		if (adr >> cfi->chipshift) {
961 			adr = 0;
962 			chipnum++;
963 
964 			if (chipnum >= cfi->numchips)
965 				break;
966 		}
967 	}
968 
969 	return 0;
970 }
971 
972 static void cfi_staa_sync (struct mtd_info *mtd)
973 {
974 	struct map_info *map = mtd->priv;
975 	struct cfi_private *cfi = map->fldrv_priv;
976 	int i;
977 	struct flchip *chip;
978 	int ret = 0;
979 	DECLARE_WAITQUEUE(wait, current);
980 
981 	for (i=0; !ret && i<cfi->numchips; i++) {
982 		chip = &cfi->chips[i];
983 
984 	retry:
985 		mutex_lock(&chip->mutex);
986 
987 		switch(chip->state) {
988 		case FL_READY:
989 		case FL_STATUS:
990 		case FL_CFI_QUERY:
991 		case FL_JEDEC_QUERY:
992 			chip->oldstate = chip->state;
993 			chip->state = FL_SYNCING;
994 			/* No need to wake_up() on this state change -
995 			 * as the whole point is that nobody can do anything
996 			 * with the chip now anyway.
997 			 */
998 		case FL_SYNCING:
999 			mutex_unlock(&chip->mutex);
1000 			break;
1001 
1002 		default:
1003 			/* Not an idle state */
1004 			set_current_state(TASK_UNINTERRUPTIBLE);
1005 			add_wait_queue(&chip->wq, &wait);
1006 
1007 			mutex_unlock(&chip->mutex);
1008 			schedule();
1009 		        remove_wait_queue(&chip->wq, &wait);
1010 
1011 			goto retry;
1012 		}
1013 	}
1014 
1015 	/* Unlock the chips again */
1016 
1017 	for (i--; i >=0; i--) {
1018 		chip = &cfi->chips[i];
1019 
1020 		mutex_lock(&chip->mutex);
1021 
1022 		if (chip->state == FL_SYNCING) {
1023 			chip->state = chip->oldstate;
1024 			wake_up(&chip->wq);
1025 		}
1026 		mutex_unlock(&chip->mutex);
1027 	}
1028 }
1029 
1030 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1031 {
1032 	struct cfi_private *cfi = map->fldrv_priv;
1033 	map_word status, status_OK;
1034 	unsigned long timeo = jiffies + HZ;
1035 	DECLARE_WAITQUEUE(wait, current);
1036 
1037 	adr += chip->start;
1038 
1039 	/* Let's determine this according to the interleave only once */
1040 	status_OK = CMD(0x80);
1041 
1042 	timeo = jiffies + HZ;
1043 retry:
1044 	mutex_lock(&chip->mutex);
1045 
1046 	/* Check that the chip's ready to talk to us. */
1047 	switch (chip->state) {
1048 	case FL_CFI_QUERY:
1049 	case FL_JEDEC_QUERY:
1050 	case FL_READY:
1051 		map_write(map, CMD(0x70), adr);
1052 		chip->state = FL_STATUS;
1053 
1054 	case FL_STATUS:
1055 		status = map_read(map, adr);
1056 		if (map_word_andequal(map, status, status_OK, status_OK))
1057 			break;
1058 
1059 		/* Urgh. Chip not yet ready to talk to us. */
1060 		if (time_after(jiffies, timeo)) {
1061 			mutex_unlock(&chip->mutex);
1062 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1063 			return -EIO;
1064 		}
1065 
1066 		/* Latency issues. Drop the lock, wait a while and retry */
1067 		mutex_unlock(&chip->mutex);
1068 		cfi_udelay(1);
1069 		goto retry;
1070 
1071 	default:
1072 		/* Stick ourselves on a wait queue to be woken when
1073 		   someone changes the status */
1074 		set_current_state(TASK_UNINTERRUPTIBLE);
1075 		add_wait_queue(&chip->wq, &wait);
1076 		mutex_unlock(&chip->mutex);
1077 		schedule();
1078 		remove_wait_queue(&chip->wq, &wait);
1079 		timeo = jiffies + HZ;
1080 		goto retry;
1081 	}
1082 
1083 	ENABLE_VPP(map);
1084 	map_write(map, CMD(0x60), adr);
1085 	map_write(map, CMD(0x01), adr);
1086 	chip->state = FL_LOCKING;
1087 
1088 	mutex_unlock(&chip->mutex);
1089 	msleep(1000);
1090 	mutex_lock(&chip->mutex);
1091 
1092 	/* FIXME. Use a timer to check this, and return immediately. */
1093 	/* Once the state machine's known to be working I'll do that */
1094 
1095 	timeo = jiffies + (HZ*2);
1096 	for (;;) {
1097 
1098 		status = map_read(map, adr);
1099 		if (map_word_andequal(map, status, status_OK, status_OK))
1100 			break;
1101 
1102 		/* OK Still waiting */
1103 		if (time_after(jiffies, timeo)) {
1104 			map_write(map, CMD(0x70), adr);
1105 			chip->state = FL_STATUS;
1106 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1107 			DISABLE_VPP(map);
1108 			mutex_unlock(&chip->mutex);
1109 			return -EIO;
1110 		}
1111 
1112 		/* Latency issues. Drop the lock, wait a while and retry */
1113 		mutex_unlock(&chip->mutex);
1114 		cfi_udelay(1);
1115 		mutex_lock(&chip->mutex);
1116 	}
1117 
1118 	/* Done and happy. */
1119 	chip->state = FL_STATUS;
1120 	DISABLE_VPP(map);
1121 	wake_up(&chip->wq);
1122 	mutex_unlock(&chip->mutex);
1123 	return 0;
1124 }
1125 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1126 {
1127 	struct map_info *map = mtd->priv;
1128 	struct cfi_private *cfi = map->fldrv_priv;
1129 	unsigned long adr;
1130 	int chipnum, ret = 0;
1131 #ifdef DEBUG_LOCK_BITS
1132 	int ofs_factor = cfi->interleave * cfi->device_type;
1133 #endif
1134 
1135 	if (ofs & (mtd->erasesize - 1))
1136 		return -EINVAL;
1137 
1138 	if (len & (mtd->erasesize -1))
1139 		return -EINVAL;
1140 
1141 	chipnum = ofs >> cfi->chipshift;
1142 	adr = ofs - (chipnum << cfi->chipshift);
1143 
1144 	while(len) {
1145 
1146 #ifdef DEBUG_LOCK_BITS
1147 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1148 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1149 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1150 #endif
1151 
1152 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1153 
1154 #ifdef DEBUG_LOCK_BITS
1155 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1156 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1157 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1158 #endif
1159 
1160 		if (ret)
1161 			return ret;
1162 
1163 		adr += mtd->erasesize;
1164 		len -= mtd->erasesize;
1165 
1166 		if (adr >> cfi->chipshift) {
1167 			adr = 0;
1168 			chipnum++;
1169 
1170 			if (chipnum >= cfi->numchips)
1171 				break;
1172 		}
1173 	}
1174 	return 0;
1175 }
1176 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1177 {
1178 	struct cfi_private *cfi = map->fldrv_priv;
1179 	map_word status, status_OK;
1180 	unsigned long timeo = jiffies + HZ;
1181 	DECLARE_WAITQUEUE(wait, current);
1182 
1183 	adr += chip->start;
1184 
1185 	/* Let's determine this according to the interleave only once */
1186 	status_OK = CMD(0x80);
1187 
1188 	timeo = jiffies + HZ;
1189 retry:
1190 	mutex_lock(&chip->mutex);
1191 
1192 	/* Check that the chip's ready to talk to us. */
1193 	switch (chip->state) {
1194 	case FL_CFI_QUERY:
1195 	case FL_JEDEC_QUERY:
1196 	case FL_READY:
1197 		map_write(map, CMD(0x70), adr);
1198 		chip->state = FL_STATUS;
1199 
1200 	case FL_STATUS:
1201 		status = map_read(map, adr);
1202 		if (map_word_andequal(map, status, status_OK, status_OK))
1203 			break;
1204 
1205 		/* Urgh. Chip not yet ready to talk to us. */
1206 		if (time_after(jiffies, timeo)) {
1207 			mutex_unlock(&chip->mutex);
1208 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1209 			return -EIO;
1210 		}
1211 
1212 		/* Latency issues. Drop the lock, wait a while and retry */
1213 		mutex_unlock(&chip->mutex);
1214 		cfi_udelay(1);
1215 		goto retry;
1216 
1217 	default:
1218 		/* Stick ourselves on a wait queue to be woken when
1219 		   someone changes the status */
1220 		set_current_state(TASK_UNINTERRUPTIBLE);
1221 		add_wait_queue(&chip->wq, &wait);
1222 		mutex_unlock(&chip->mutex);
1223 		schedule();
1224 		remove_wait_queue(&chip->wq, &wait);
1225 		timeo = jiffies + HZ;
1226 		goto retry;
1227 	}
1228 
1229 	ENABLE_VPP(map);
1230 	map_write(map, CMD(0x60), adr);
1231 	map_write(map, CMD(0xD0), adr);
1232 	chip->state = FL_UNLOCKING;
1233 
1234 	mutex_unlock(&chip->mutex);
1235 	msleep(1000);
1236 	mutex_lock(&chip->mutex);
1237 
1238 	/* FIXME. Use a timer to check this, and return immediately. */
1239 	/* Once the state machine's known to be working I'll do that */
1240 
1241 	timeo = jiffies + (HZ*2);
1242 	for (;;) {
1243 
1244 		status = map_read(map, adr);
1245 		if (map_word_andequal(map, status, status_OK, status_OK))
1246 			break;
1247 
1248 		/* OK Still waiting */
1249 		if (time_after(jiffies, timeo)) {
1250 			map_write(map, CMD(0x70), adr);
1251 			chip->state = FL_STATUS;
1252 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1253 			DISABLE_VPP(map);
1254 			mutex_unlock(&chip->mutex);
1255 			return -EIO;
1256 		}
1257 
1258 		/* Latency issues. Drop the unlock, wait a while and retry */
1259 		mutex_unlock(&chip->mutex);
1260 		cfi_udelay(1);
1261 		mutex_lock(&chip->mutex);
1262 	}
1263 
1264 	/* Done and happy. */
1265 	chip->state = FL_STATUS;
1266 	DISABLE_VPP(map);
1267 	wake_up(&chip->wq);
1268 	mutex_unlock(&chip->mutex);
1269 	return 0;
1270 }
1271 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1272 {
1273 	struct map_info *map = mtd->priv;
1274 	struct cfi_private *cfi = map->fldrv_priv;
1275 	unsigned long adr;
1276 	int chipnum, ret = 0;
1277 #ifdef DEBUG_LOCK_BITS
1278 	int ofs_factor = cfi->interleave * cfi->device_type;
1279 #endif
1280 
1281 	chipnum = ofs >> cfi->chipshift;
1282 	adr = ofs - (chipnum << cfi->chipshift);
1283 
1284 #ifdef DEBUG_LOCK_BITS
1285 	{
1286 		unsigned long temp_adr = adr;
1287 		unsigned long temp_len = len;
1288 
1289 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1290                 while (temp_len) {
1291 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1292 			temp_adr += mtd->erasesize;
1293 			temp_len -= mtd->erasesize;
1294 		}
1295 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1296 	}
1297 #endif
1298 
1299 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1300 
1301 #ifdef DEBUG_LOCK_BITS
1302 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1303 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1304 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1305 #endif
1306 
1307 	return ret;
1308 }
1309 
1310 static int cfi_staa_suspend(struct mtd_info *mtd)
1311 {
1312 	struct map_info *map = mtd->priv;
1313 	struct cfi_private *cfi = map->fldrv_priv;
1314 	int i;
1315 	struct flchip *chip;
1316 	int ret = 0;
1317 
1318 	for (i=0; !ret && i<cfi->numchips; i++) {
1319 		chip = &cfi->chips[i];
1320 
1321 		mutex_lock(&chip->mutex);
1322 
1323 		switch(chip->state) {
1324 		case FL_READY:
1325 		case FL_STATUS:
1326 		case FL_CFI_QUERY:
1327 		case FL_JEDEC_QUERY:
1328 			chip->oldstate = chip->state;
1329 			chip->state = FL_PM_SUSPENDED;
1330 			/* No need to wake_up() on this state change -
1331 			 * as the whole point is that nobody can do anything
1332 			 * with the chip now anyway.
1333 			 */
1334 		case FL_PM_SUSPENDED:
1335 			break;
1336 
1337 		default:
1338 			ret = -EAGAIN;
1339 			break;
1340 		}
1341 		mutex_unlock(&chip->mutex);
1342 	}
1343 
1344 	/* Unlock the chips again */
1345 
1346 	if (ret) {
1347 		for (i--; i >=0; i--) {
1348 			chip = &cfi->chips[i];
1349 
1350 			mutex_lock(&chip->mutex);
1351 
1352 			if (chip->state == FL_PM_SUSPENDED) {
1353 				/* No need to force it into a known state here,
1354 				   because we're returning failure, and it didn't
1355 				   get power cycled */
1356 				chip->state = chip->oldstate;
1357 				wake_up(&chip->wq);
1358 			}
1359 			mutex_unlock(&chip->mutex);
1360 		}
1361 	}
1362 
1363 	return ret;
1364 }
1365 
1366 static void cfi_staa_resume(struct mtd_info *mtd)
1367 {
1368 	struct map_info *map = mtd->priv;
1369 	struct cfi_private *cfi = map->fldrv_priv;
1370 	int i;
1371 	struct flchip *chip;
1372 
1373 	for (i=0; i<cfi->numchips; i++) {
1374 
1375 		chip = &cfi->chips[i];
1376 
1377 		mutex_lock(&chip->mutex);
1378 
1379 		/* Go to known state. Chip may have been power cycled */
1380 		if (chip->state == FL_PM_SUSPENDED) {
1381 			map_write(map, CMD(0xFF), 0);
1382 			chip->state = FL_READY;
1383 			wake_up(&chip->wq);
1384 		}
1385 
1386 		mutex_unlock(&chip->mutex);
1387 	}
1388 }
1389 
1390 static void cfi_staa_destroy(struct mtd_info *mtd)
1391 {
1392 	struct map_info *map = mtd->priv;
1393 	struct cfi_private *cfi = map->fldrv_priv;
1394 	kfree(cfi->cmdset_priv);
1395 	kfree(cfi);
1396 }
1397 
1398 MODULE_LICENSE("GPL");
1399