xref: /linux/drivers/mtd/chips/cfi_cmdset_0020.c (revision 6efc0ab3b05de0d7bab8ec0597214e4788251071)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Flash Interface support:
4  *   ST Advanced Architecture Command Set (ID 0x0020)
5  *
6  * (C) 2000 Red Hat.
7  *
8  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
9  * 	- completely revamped method functions so they are aware and
10  * 	  independent of the flash geometry (buswidth, interleave, etc.)
11  * 	- scalability vs code size is completely set at compile-time
12  * 	  (see include/linux/mtd/cfi.h for selection)
13  *	- optimized write buffer method
14  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
15  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
16  *	  (command set 0x0020)
17  *	- added a writev function
18  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
19  * 	- Plugged memory leak in cfi_staa_writev().
20  */
21 
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28 
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/cfi.h>
35 #include <linux/mtd/mtd.h>
36 
37 
38 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
40 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
41 		unsigned long count, loff_t to, size_t *retlen);
42 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
43 static void cfi_staa_sync (struct mtd_info *);
44 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46 static int cfi_staa_suspend (struct mtd_info *);
47 static void cfi_staa_resume (struct mtd_info *);
48 
49 static void cfi_staa_destroy(struct mtd_info *);
50 
51 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
52 
53 static struct mtd_info *cfi_staa_setup (struct map_info *);
54 
55 static struct mtd_chip_driver cfi_staa_chipdrv = {
56 	.probe		= NULL, /* Not usable directly */
57 	.destroy	= cfi_staa_destroy,
58 	.name		= "cfi_cmdset_0020",
59 	.module		= THIS_MODULE
60 };
61 
62 /* #define DEBUG_LOCK_BITS */
63 //#define DEBUG_CFI_FEATURES
64 
65 #ifdef DEBUG_CFI_FEATURES
66 static void cfi_tell_features(struct cfi_pri_intelext *extp)
67 {
68         int i;
69         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
70 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
71 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
72 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
73 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
74 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
75 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
76 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
77 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
78 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
79 	for (i=9; i<32; i++) {
80 		if (extp->FeatureSupport & (1<<i))
81 			printk("     - Unknown Bit %X:      supported\n", i);
82 	}
83 
84 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
85 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
86 	for (i=1; i<8; i++) {
87 		if (extp->SuspendCmdSupport & (1<<i))
88 			printk("     - Unknown Bit %X:               supported\n", i);
89 	}
90 
91 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
92 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
93 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
94 	for (i=2; i<16; i++) {
95 		if (extp->BlkStatusRegMask & (1<<i))
96 			printk("     - Unknown Bit %X Active: yes\n",i);
97 	}
98 
99 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
101 	if (extp->VppOptimal)
102 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
104 }
105 #endif
106 
107 /* This routine is made available to other mtd code via
108  * inter_module_register.  It must only be accessed through
109  * inter_module_get which will bump the use count of this module.  The
110  * addresses passed back in cfi are valid as long as the use count of
111  * this module is non-zero, i.e. between inter_module_get and
112  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
113  */
114 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
115 {
116 	struct cfi_private *cfi = map->fldrv_priv;
117 	int i;
118 
119 	if (cfi->cfi_mode) {
120 		/*
121 		 * It's a real CFI chip, not one for which the probe
122 		 * routine faked a CFI structure. So we read the feature
123 		 * table from it.
124 		 */
125 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
126 		struct cfi_pri_intelext *extp;
127 
128 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
129 		if (!extp)
130 			return NULL;
131 
132 		if (extp->MajorVersion != '1' ||
133 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
134 			printk(KERN_ERR "  Unknown ST Microelectronics"
135 			       " Extended Query version %c.%c.\n",
136 			       extp->MajorVersion, extp->MinorVersion);
137 			kfree(extp);
138 			return NULL;
139 		}
140 
141 		/* Do some byteswapping if necessary */
142 		extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
143 		extp->BlkStatusRegMask = cfi32_to_cpu(map,
144 						extp->BlkStatusRegMask);
145 
146 #ifdef DEBUG_CFI_FEATURES
147 		/* Tell the user about it in lots of lovely detail */
148 		cfi_tell_features(extp);
149 #endif
150 
151 		/* Install our own private info structure */
152 		cfi->cmdset_priv = extp;
153 	}
154 
155 	for (i=0; i< cfi->numchips; i++) {
156 		cfi->chips[i].word_write_time = 128;
157 		cfi->chips[i].buffer_write_time = 128;
158 		cfi->chips[i].erase_time = 1024;
159 		cfi->chips[i].ref_point_counter = 0;
160 		init_waitqueue_head(&(cfi->chips[i].wq));
161 	}
162 
163 	return cfi_staa_setup(map);
164 }
165 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
166 
167 static struct mtd_info *cfi_staa_setup(struct map_info *map)
168 {
169 	struct cfi_private *cfi = map->fldrv_priv;
170 	struct mtd_info *mtd;
171 	unsigned long offset = 0;
172 	int i,j;
173 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
174 
175 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
176 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
177 
178 	if (!mtd) {
179 		kfree(cfi->cmdset_priv);
180 		return NULL;
181 	}
182 
183 	mtd->priv = map;
184 	mtd->type = MTD_NORFLASH;
185 	mtd->size = devsize * cfi->numchips;
186 
187 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
188 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
189 					  sizeof(struct mtd_erase_region_info),
190 					  GFP_KERNEL);
191 	if (!mtd->eraseregions) {
192 		kfree(cfi->cmdset_priv);
193 		kfree(mtd);
194 		return NULL;
195 	}
196 
197 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
198 		unsigned long ernum, ersize;
199 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
200 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
201 
202 		if (mtd->erasesize < ersize) {
203 			mtd->erasesize = ersize;
204 		}
205 		for (j=0; j<cfi->numchips; j++) {
206 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
207 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
208 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
209 		}
210 		offset += (ersize * ernum);
211 	}
212 
213 	if (offset != devsize) {
214 		/* Argh */
215 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
216 		kfree(mtd->eraseregions);
217 		kfree(cfi->cmdset_priv);
218 		kfree(mtd);
219 		return NULL;
220 	}
221 
222 	for (i=0; i<mtd->numeraseregions;i++){
223 		printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
224 		       i, (unsigned long long)mtd->eraseregions[i].offset,
225 		       mtd->eraseregions[i].erasesize,
226 		       mtd->eraseregions[i].numblocks);
227 	}
228 
229 	/* Also select the correct geometry setup too */
230 	mtd->_erase = cfi_staa_erase_varsize;
231 	mtd->_read = cfi_staa_read;
232 	mtd->_write = cfi_staa_write_buffers;
233 	mtd->_writev = cfi_staa_writev;
234 	mtd->_sync = cfi_staa_sync;
235 	mtd->_lock = cfi_staa_lock;
236 	mtd->_unlock = cfi_staa_unlock;
237 	mtd->_suspend = cfi_staa_suspend;
238 	mtd->_resume = cfi_staa_resume;
239 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
240 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
241 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
242 	map->fldrv = &cfi_staa_chipdrv;
243 	__module_get(THIS_MODULE);
244 	mtd->name = map->name;
245 	return mtd;
246 }
247 
248 
249 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
250 {
251 	map_word status, status_OK;
252 	unsigned long timeo;
253 	DECLARE_WAITQUEUE(wait, current);
254 	int suspended = 0;
255 	unsigned long cmd_addr;
256 	struct cfi_private *cfi = map->fldrv_priv;
257 
258 	adr += chip->start;
259 
260 	/* Ensure cmd read/writes are aligned. */
261 	cmd_addr = adr & ~(map_bankwidth(map)-1);
262 
263 	/* Let's determine this according to the interleave only once */
264 	status_OK = CMD(0x80);
265 
266 	timeo = jiffies + HZ;
267  retry:
268 	mutex_lock(&chip->mutex);
269 
270 	/* Check that the chip's ready to talk to us.
271 	 * If it's in FL_ERASING state, suspend it and make it talk now.
272 	 */
273 	switch (chip->state) {
274 	case FL_ERASING:
275 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
276 			goto sleep; /* We don't support erase suspend */
277 
278 		map_write (map, CMD(0xb0), cmd_addr);
279 		/* If the flash has finished erasing, then 'erase suspend'
280 		 * appears to make some (28F320) flash devices switch to
281 		 * 'read' mode.  Make sure that we switch to 'read status'
282 		 * mode so we get the right data. --rmk
283 		 */
284 		map_write(map, CMD(0x70), cmd_addr);
285 		chip->oldstate = FL_ERASING;
286 		chip->state = FL_ERASE_SUSPENDING;
287 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
288 		for (;;) {
289 			status = map_read(map, cmd_addr);
290 			if (map_word_andequal(map, status, status_OK, status_OK))
291 				break;
292 
293 			if (time_after(jiffies, timeo)) {
294 				/* Urgh */
295 				map_write(map, CMD(0xd0), cmd_addr);
296 				/* make sure we're in 'read status' mode */
297 				map_write(map, CMD(0x70), cmd_addr);
298 				chip->state = FL_ERASING;
299 				wake_up(&chip->wq);
300 				mutex_unlock(&chip->mutex);
301 				printk(KERN_ERR "Chip not ready after erase "
302 				       "suspended: status = 0x%lx\n", status.x[0]);
303 				return -EIO;
304 			}
305 
306 			mutex_unlock(&chip->mutex);
307 			cfi_udelay(1);
308 			mutex_lock(&chip->mutex);
309 		}
310 
311 		suspended = 1;
312 		map_write(map, CMD(0xff), cmd_addr);
313 		chip->state = FL_READY;
314 		break;
315 
316 #if 0
317 	case FL_WRITING:
318 		/* Not quite yet */
319 #endif
320 
321 	case FL_READY:
322 		break;
323 
324 	case FL_CFI_QUERY:
325 	case FL_JEDEC_QUERY:
326 		map_write(map, CMD(0x70), cmd_addr);
327 		chip->state = FL_STATUS;
328 		fallthrough;
329 	case FL_STATUS:
330 		status = map_read(map, cmd_addr);
331 		if (map_word_andequal(map, status, status_OK, status_OK)) {
332 			map_write(map, CMD(0xff), cmd_addr);
333 			chip->state = FL_READY;
334 			break;
335 		}
336 
337 		/* Urgh. Chip not yet ready to talk to us. */
338 		if (time_after(jiffies, timeo)) {
339 			mutex_unlock(&chip->mutex);
340 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
341 			return -EIO;
342 		}
343 
344 		/* Latency issues. Drop the lock, wait a while and retry */
345 		mutex_unlock(&chip->mutex);
346 		cfi_udelay(1);
347 		goto retry;
348 
349 	default:
350 	sleep:
351 		/* Stick ourselves on a wait queue to be woken when
352 		   someone changes the status */
353 		set_current_state(TASK_UNINTERRUPTIBLE);
354 		add_wait_queue(&chip->wq, &wait);
355 		mutex_unlock(&chip->mutex);
356 		schedule();
357 		remove_wait_queue(&chip->wq, &wait);
358 		timeo = jiffies + HZ;
359 		goto retry;
360 	}
361 
362 	map_copy_from(map, buf, adr, len);
363 
364 	if (suspended) {
365 		chip->state = chip->oldstate;
366 		/* What if one interleaved chip has finished and the
367 		   other hasn't? The old code would leave the finished
368 		   one in READY mode. That's bad, and caused -EROFS
369 		   errors to be returned from do_erase_oneblock because
370 		   that's the only bit it checked for at the time.
371 		   As the state machine appears to explicitly allow
372 		   sending the 0x70 (Read Status) command to an erasing
373 		   chip and expecting it to be ignored, that's what we
374 		   do. */
375 		map_write(map, CMD(0xd0), cmd_addr);
376 		map_write(map, CMD(0x70), cmd_addr);
377 	}
378 
379 	wake_up(&chip->wq);
380 	mutex_unlock(&chip->mutex);
381 	return 0;
382 }
383 
384 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
385 {
386 	struct map_info *map = mtd->priv;
387 	struct cfi_private *cfi = map->fldrv_priv;
388 	unsigned long ofs;
389 	int chipnum;
390 	int ret = 0;
391 
392 	/* ofs: offset within the first chip that the first read should start */
393 	chipnum = (from >> cfi->chipshift);
394 	ofs = from - (chipnum <<  cfi->chipshift);
395 
396 	while (len) {
397 		unsigned long thislen;
398 
399 		if (chipnum >= cfi->numchips)
400 			break;
401 
402 		if ((len + ofs -1) >> cfi->chipshift)
403 			thislen = (1<<cfi->chipshift) - ofs;
404 		else
405 			thislen = len;
406 
407 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
408 		if (ret)
409 			break;
410 
411 		*retlen += thislen;
412 		len -= thislen;
413 		buf += thislen;
414 
415 		ofs = 0;
416 		chipnum++;
417 	}
418 	return ret;
419 }
420 
421 static int do_write_buffer(struct map_info *map, struct flchip *chip,
422 				  unsigned long adr, const u_char *buf, int len)
423 {
424 	struct cfi_private *cfi = map->fldrv_priv;
425 	map_word status, status_OK;
426 	unsigned long cmd_adr, timeo;
427 	DECLARE_WAITQUEUE(wait, current);
428 	int wbufsize, z;
429 
430         /* M58LW064A requires bus alignment for buffer wriets -- saw */
431         if (adr & (map_bankwidth(map)-1))
432             return -EINVAL;
433 
434         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
435         adr += chip->start;
436 	cmd_adr = adr & ~(wbufsize-1);
437 
438 	/* Let's determine this according to the interleave only once */
439         status_OK = CMD(0x80);
440 
441 	timeo = jiffies + HZ;
442  retry:
443 
444 #ifdef DEBUG_CFI_FEATURES
445        printk("%s: chip->state[%d]\n", __func__, chip->state);
446 #endif
447 	mutex_lock(&chip->mutex);
448 
449 	/* Check that the chip's ready to talk to us.
450 	 * Later, we can actually think about interrupting it
451 	 * if it's in FL_ERASING state.
452 	 * Not just yet, though.
453 	 */
454 	switch (chip->state) {
455 	case FL_READY:
456 		break;
457 
458 	case FL_CFI_QUERY:
459 	case FL_JEDEC_QUERY:
460 		map_write(map, CMD(0x70), cmd_adr);
461                 chip->state = FL_STATUS;
462 #ifdef DEBUG_CFI_FEATURES
463 	printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
464 #endif
465 		fallthrough;
466 	case FL_STATUS:
467 		status = map_read(map, cmd_adr);
468 		if (map_word_andequal(map, status, status_OK, status_OK))
469 			break;
470 		/* Urgh. Chip not yet ready to talk to us. */
471 		if (time_after(jiffies, timeo)) {
472 			mutex_unlock(&chip->mutex);
473                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
474                                status.x[0], map_read(map, cmd_adr).x[0]);
475 			return -EIO;
476 		}
477 
478 		/* Latency issues. Drop the lock, wait a while and retry */
479 		mutex_unlock(&chip->mutex);
480 		cfi_udelay(1);
481 		goto retry;
482 
483 	default:
484 		/* Stick ourselves on a wait queue to be woken when
485 		   someone changes the status */
486 		set_current_state(TASK_UNINTERRUPTIBLE);
487 		add_wait_queue(&chip->wq, &wait);
488 		mutex_unlock(&chip->mutex);
489 		schedule();
490 		remove_wait_queue(&chip->wq, &wait);
491 		timeo = jiffies + HZ;
492 		goto retry;
493 	}
494 
495 	ENABLE_VPP(map);
496 	map_write(map, CMD(0xe8), cmd_adr);
497 	chip->state = FL_WRITING_TO_BUFFER;
498 
499 	z = 0;
500 	for (;;) {
501 		status = map_read(map, cmd_adr);
502 		if (map_word_andequal(map, status, status_OK, status_OK))
503 			break;
504 
505 		mutex_unlock(&chip->mutex);
506 		cfi_udelay(1);
507 		mutex_lock(&chip->mutex);
508 
509 		if (++z > 100) {
510 			/* Argh. Not ready for write to buffer */
511 			DISABLE_VPP(map);
512                         map_write(map, CMD(0x70), cmd_adr);
513 			chip->state = FL_STATUS;
514 			mutex_unlock(&chip->mutex);
515 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
516 			return -EIO;
517 		}
518 	}
519 
520 	/* Write length of data to come */
521 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
522 
523 	/* Write data */
524 	for (z = 0; z < len;
525 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
526 		map_word d;
527 		d = map_word_load(map, buf);
528 		map_write(map, d, adr+z);
529 	}
530 	/* GO GO GO */
531 	map_write(map, CMD(0xd0), cmd_adr);
532 	chip->state = FL_WRITING;
533 
534 	mutex_unlock(&chip->mutex);
535 	cfi_udelay(chip->buffer_write_time);
536 	mutex_lock(&chip->mutex);
537 
538 	timeo = jiffies + (HZ/2);
539 	z = 0;
540 	for (;;) {
541 		if (chip->state != FL_WRITING) {
542 			/* Someone's suspended the write. Sleep */
543 			set_current_state(TASK_UNINTERRUPTIBLE);
544 			add_wait_queue(&chip->wq, &wait);
545 			mutex_unlock(&chip->mutex);
546 			schedule();
547 			remove_wait_queue(&chip->wq, &wait);
548 			timeo = jiffies + (HZ / 2); /* FIXME */
549 			mutex_lock(&chip->mutex);
550 			continue;
551 		}
552 
553 		status = map_read(map, cmd_adr);
554 		if (map_word_andequal(map, status, status_OK, status_OK))
555 			break;
556 
557 		/* OK Still waiting */
558 		if (time_after(jiffies, timeo)) {
559                         /* clear status */
560                         map_write(map, CMD(0x50), cmd_adr);
561                         /* put back into read status register mode */
562                         map_write(map, CMD(0x70), adr);
563 			chip->state = FL_STATUS;
564 			DISABLE_VPP(map);
565 			mutex_unlock(&chip->mutex);
566 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
567 			return -EIO;
568 		}
569 
570 		/* Latency issues. Drop the lock, wait a while and retry */
571 		mutex_unlock(&chip->mutex);
572 		cfi_udelay(1);
573 		z++;
574 		mutex_lock(&chip->mutex);
575 	}
576 	if (!z) {
577 		chip->buffer_write_time--;
578 		if (!chip->buffer_write_time)
579 			chip->buffer_write_time++;
580 	}
581 	if (z > 1)
582 		chip->buffer_write_time++;
583 
584 	/* Done and happy. */
585 	DISABLE_VPP(map);
586 	chip->state = FL_STATUS;
587 
588         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
589         if (map_word_bitsset(map, status, CMD(0x3a))) {
590 #ifdef DEBUG_CFI_FEATURES
591 		printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
592 #endif
593 		/* clear status */
594 		map_write(map, CMD(0x50), cmd_adr);
595 		/* put back into read status register mode */
596 		map_write(map, CMD(0x70), adr);
597 		wake_up(&chip->wq);
598 		mutex_unlock(&chip->mutex);
599 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
600 	}
601 	wake_up(&chip->wq);
602 	mutex_unlock(&chip->mutex);
603 
604         return 0;
605 }
606 
607 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
608 				       size_t len, size_t *retlen, const u_char *buf)
609 {
610 	struct map_info *map = mtd->priv;
611 	struct cfi_private *cfi = map->fldrv_priv;
612 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
613 	int ret;
614 	int chipnum;
615 	unsigned long ofs;
616 
617 	chipnum = to >> cfi->chipshift;
618 	ofs = to  - (chipnum << cfi->chipshift);
619 
620 #ifdef DEBUG_CFI_FEATURES
621 	printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
622 	printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
623 	printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
624 #endif
625 
626         /* Write buffer is worth it only if more than one word to write... */
627         while (len > 0) {
628 		/* We must not cross write block boundaries */
629 		int size = wbufsize - (ofs & (wbufsize-1));
630 
631                 if (size > len)
632                     size = len;
633 
634                 ret = do_write_buffer(map, &cfi->chips[chipnum],
635 				      ofs, buf, size);
636 		if (ret)
637 			return ret;
638 
639 		ofs += size;
640 		buf += size;
641 		(*retlen) += size;
642 		len -= size;
643 
644 		if (ofs >> cfi->chipshift) {
645 			chipnum ++;
646 			ofs = 0;
647 			if (chipnum == cfi->numchips)
648 				return 0;
649 		}
650 	}
651 
652 	return 0;
653 }
654 
655 /*
656  * Writev for ECC-Flashes is a little more complicated. We need to maintain
657  * a small buffer for this.
658  * XXX: If the buffer size is not a multiple of 2, this will break
659  */
660 #define ECCBUF_SIZE (mtd->writesize)
661 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
662 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
663 static int
664 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
665 		unsigned long count, loff_t to, size_t *retlen)
666 {
667 	unsigned long i;
668 	size_t	 totlen = 0, thislen;
669 	int	 ret = 0;
670 	size_t	 buflen = 0;
671 	char *buffer;
672 
673 	if (!ECCBUF_SIZE) {
674 		/* We should fall back to a general writev implementation.
675 		 * Until that is written, just break.
676 		 */
677 		return -EIO;
678 	}
679 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
680 	if (!buffer)
681 		return -ENOMEM;
682 
683 	for (i=0; i<count; i++) {
684 		size_t elem_len = vecs[i].iov_len;
685 		void *elem_base = vecs[i].iov_base;
686 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
687 			continue;
688 		if (buflen) { /* cut off head */
689 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
690 				memcpy(buffer+buflen, elem_base, elem_len);
691 				buflen += elem_len;
692 				continue;
693 			}
694 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
695 			ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
696 					buffer);
697 			totlen += thislen;
698 			if (ret || thislen != ECCBUF_SIZE)
699 				goto write_error;
700 			elem_len -= thislen-buflen;
701 			elem_base += thislen-buflen;
702 			to += ECCBUF_SIZE;
703 		}
704 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
705 			ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
706 					&thislen, elem_base);
707 			totlen += thislen;
708 			if (ret || thislen != ECCBUF_DIV(elem_len))
709 				goto write_error;
710 			to += thislen;
711 		}
712 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
713 		if (buflen) {
714 			memset(buffer, 0xff, ECCBUF_SIZE);
715 			memcpy(buffer, elem_base + thislen, buflen);
716 		}
717 	}
718 	if (buflen) { /* flush last page, even if not full */
719 		/* This is sometimes intended behaviour, really */
720 		ret = mtd_write(mtd, to, buflen, &thislen, buffer);
721 		totlen += thislen;
722 		if (ret || thislen != ECCBUF_SIZE)
723 			goto write_error;
724 	}
725 write_error:
726 	if (retlen)
727 		*retlen = totlen;
728 	kfree(buffer);
729 	return ret;
730 }
731 
732 
733 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
734 {
735 	struct cfi_private *cfi = map->fldrv_priv;
736 	map_word status, status_OK;
737 	unsigned long timeo;
738 	int retries = 3;
739 	DECLARE_WAITQUEUE(wait, current);
740 	int ret = 0;
741 
742 	adr += chip->start;
743 
744 	/* Let's determine this according to the interleave only once */
745 	status_OK = CMD(0x80);
746 
747 	timeo = jiffies + HZ;
748 retry:
749 	mutex_lock(&chip->mutex);
750 
751 	/* Check that the chip's ready to talk to us. */
752 	switch (chip->state) {
753 	case FL_CFI_QUERY:
754 	case FL_JEDEC_QUERY:
755 	case FL_READY:
756 		map_write(map, CMD(0x70), adr);
757 		chip->state = FL_STATUS;
758 		fallthrough;
759 	case FL_STATUS:
760 		status = map_read(map, adr);
761 		if (map_word_andequal(map, status, status_OK, status_OK))
762 			break;
763 
764 		/* Urgh. Chip not yet ready to talk to us. */
765 		if (time_after(jiffies, timeo)) {
766 			mutex_unlock(&chip->mutex);
767 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
768 			return -EIO;
769 		}
770 
771 		/* Latency issues. Drop the lock, wait a while and retry */
772 		mutex_unlock(&chip->mutex);
773 		cfi_udelay(1);
774 		goto retry;
775 
776 	default:
777 		/* Stick ourselves on a wait queue to be woken when
778 		   someone changes the status */
779 		set_current_state(TASK_UNINTERRUPTIBLE);
780 		add_wait_queue(&chip->wq, &wait);
781 		mutex_unlock(&chip->mutex);
782 		schedule();
783 		remove_wait_queue(&chip->wq, &wait);
784 		timeo = jiffies + HZ;
785 		goto retry;
786 	}
787 
788 	ENABLE_VPP(map);
789 	/* Clear the status register first */
790 	map_write(map, CMD(0x50), adr);
791 
792 	/* Now erase */
793 	map_write(map, CMD(0x20), adr);
794 	map_write(map, CMD(0xD0), adr);
795 	chip->state = FL_ERASING;
796 
797 	mutex_unlock(&chip->mutex);
798 	msleep(1000);
799 	mutex_lock(&chip->mutex);
800 
801 	/* FIXME. Use a timer to check this, and return immediately. */
802 	/* Once the state machine's known to be working I'll do that */
803 
804 	timeo = jiffies + (HZ*20);
805 	for (;;) {
806 		if (chip->state != FL_ERASING) {
807 			/* Someone's suspended the erase. Sleep */
808 			set_current_state(TASK_UNINTERRUPTIBLE);
809 			add_wait_queue(&chip->wq, &wait);
810 			mutex_unlock(&chip->mutex);
811 			schedule();
812 			remove_wait_queue(&chip->wq, &wait);
813 			timeo = jiffies + (HZ*20); /* FIXME */
814 			mutex_lock(&chip->mutex);
815 			continue;
816 		}
817 
818 		status = map_read(map, adr);
819 		if (map_word_andequal(map, status, status_OK, status_OK))
820 			break;
821 
822 		/* OK Still waiting */
823 		if (time_after(jiffies, timeo)) {
824 			map_write(map, CMD(0x70), adr);
825 			chip->state = FL_STATUS;
826 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
827 			DISABLE_VPP(map);
828 			mutex_unlock(&chip->mutex);
829 			return -EIO;
830 		}
831 
832 		/* Latency issues. Drop the lock, wait a while and retry */
833 		mutex_unlock(&chip->mutex);
834 		cfi_udelay(1);
835 		mutex_lock(&chip->mutex);
836 	}
837 
838 	DISABLE_VPP(map);
839 	ret = 0;
840 
841 	/* We've broken this before. It doesn't hurt to be safe */
842 	map_write(map, CMD(0x70), adr);
843 	chip->state = FL_STATUS;
844 	status = map_read(map, adr);
845 
846 	/* check for lock bit */
847 	if (map_word_bitsset(map, status, CMD(0x3a))) {
848 		unsigned char chipstatus = status.x[0];
849 		if (!map_word_equal(map, status, CMD(chipstatus))) {
850 			int i, w;
851 			for (w=0; w<map_words(map); w++) {
852 				for (i = 0; i<cfi_interleave(cfi); i++) {
853 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
854 				}
855 			}
856 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
857 			       status.x[0], chipstatus);
858 		}
859 		/* Reset the error bits */
860 		map_write(map, CMD(0x50), adr);
861 		map_write(map, CMD(0x70), adr);
862 
863 		if ((chipstatus & 0x30) == 0x30) {
864 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
865 			ret = -EIO;
866 		} else if (chipstatus & 0x02) {
867 			/* Protection bit set */
868 			ret = -EROFS;
869 		} else if (chipstatus & 0x8) {
870 			/* Voltage */
871 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
872 			ret = -EIO;
873 		} else if (chipstatus & 0x20) {
874 			if (retries--) {
875 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
876 				timeo = jiffies + HZ;
877 				chip->state = FL_STATUS;
878 				mutex_unlock(&chip->mutex);
879 				goto retry;
880 			}
881 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
882 			ret = -EIO;
883 		}
884 	}
885 
886 	wake_up(&chip->wq);
887 	mutex_unlock(&chip->mutex);
888 	return ret;
889 }
890 
891 static int cfi_staa_erase_varsize(struct mtd_info *mtd,
892 				  struct erase_info *instr)
893 {	struct map_info *map = mtd->priv;
894 	struct cfi_private *cfi = map->fldrv_priv;
895 	unsigned long adr, len;
896 	int chipnum, ret;
897 	int i, first;
898 	struct mtd_erase_region_info *regions = mtd->eraseregions;
899 
900 	/* Check that both start and end of the requested erase are
901 	 * aligned with the erasesize at the appropriate addresses.
902 	 */
903 
904 	i = 0;
905 
906 	/* Skip all erase regions which are ended before the start of
907 	   the requested erase. Actually, to save on the calculations,
908 	   we skip to the first erase region which starts after the
909 	   start of the requested erase, and then go back one.
910 	*/
911 
912 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
913 	       i++;
914 	i--;
915 
916 	/* OK, now i is pointing at the erase region in which this
917 	   erase request starts. Check the start of the requested
918 	   erase range is aligned with the erase size which is in
919 	   effect here.
920 	*/
921 
922 	if (instr->addr & (regions[i].erasesize-1))
923 		return -EINVAL;
924 
925 	/* Remember the erase region we start on */
926 	first = i;
927 
928 	/* Next, check that the end of the requested erase is aligned
929 	 * with the erase region at that address.
930 	 */
931 
932 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
933 		i++;
934 
935 	/* As before, drop back one to point at the region in which
936 	   the address actually falls
937 	*/
938 	i--;
939 
940 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
941 		return -EINVAL;
942 
943 	chipnum = instr->addr >> cfi->chipshift;
944 	adr = instr->addr - (chipnum << cfi->chipshift);
945 	len = instr->len;
946 
947 	i=first;
948 
949 	while(len) {
950 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
951 
952 		if (ret)
953 			return ret;
954 
955 		adr += regions[i].erasesize;
956 		len -= regions[i].erasesize;
957 
958 		if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
959 			i++;
960 
961 		if (adr >> cfi->chipshift) {
962 			adr = 0;
963 			chipnum++;
964 
965 			if (chipnum >= cfi->numchips)
966 				break;
967 		}
968 	}
969 
970 	return 0;
971 }
972 
973 static void cfi_staa_sync (struct mtd_info *mtd)
974 {
975 	struct map_info *map = mtd->priv;
976 	struct cfi_private *cfi = map->fldrv_priv;
977 	int i;
978 	struct flchip *chip;
979 	int ret = 0;
980 	DECLARE_WAITQUEUE(wait, current);
981 
982 	for (i=0; !ret && i<cfi->numchips; i++) {
983 		chip = &cfi->chips[i];
984 
985 	retry:
986 		mutex_lock(&chip->mutex);
987 
988 		switch(chip->state) {
989 		case FL_READY:
990 		case FL_STATUS:
991 		case FL_CFI_QUERY:
992 		case FL_JEDEC_QUERY:
993 			chip->oldstate = chip->state;
994 			chip->state = FL_SYNCING;
995 			/* No need to wake_up() on this state change -
996 			 * as the whole point is that nobody can do anything
997 			 * with the chip now anyway.
998 			 */
999 			fallthrough;
1000 		case FL_SYNCING:
1001 			mutex_unlock(&chip->mutex);
1002 			break;
1003 
1004 		default:
1005 			/* Not an idle state */
1006 			set_current_state(TASK_UNINTERRUPTIBLE);
1007 			add_wait_queue(&chip->wq, &wait);
1008 
1009 			mutex_unlock(&chip->mutex);
1010 			schedule();
1011 		        remove_wait_queue(&chip->wq, &wait);
1012 
1013 			goto retry;
1014 		}
1015 	}
1016 
1017 	/* Unlock the chips again */
1018 
1019 	for (i--; i >=0; i--) {
1020 		chip = &cfi->chips[i];
1021 
1022 		mutex_lock(&chip->mutex);
1023 
1024 		if (chip->state == FL_SYNCING) {
1025 			chip->state = chip->oldstate;
1026 			wake_up(&chip->wq);
1027 		}
1028 		mutex_unlock(&chip->mutex);
1029 	}
1030 }
1031 
1032 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1033 {
1034 	struct cfi_private *cfi = map->fldrv_priv;
1035 	map_word status, status_OK;
1036 	unsigned long timeo = jiffies + HZ;
1037 	DECLARE_WAITQUEUE(wait, current);
1038 
1039 	adr += chip->start;
1040 
1041 	/* Let's determine this according to the interleave only once */
1042 	status_OK = CMD(0x80);
1043 
1044 	timeo = jiffies + HZ;
1045 retry:
1046 	mutex_lock(&chip->mutex);
1047 
1048 	/* Check that the chip's ready to talk to us. */
1049 	switch (chip->state) {
1050 	case FL_CFI_QUERY:
1051 	case FL_JEDEC_QUERY:
1052 	case FL_READY:
1053 		map_write(map, CMD(0x70), adr);
1054 		chip->state = FL_STATUS;
1055 		fallthrough;
1056 	case FL_STATUS:
1057 		status = map_read(map, adr);
1058 		if (map_word_andequal(map, status, status_OK, status_OK))
1059 			break;
1060 
1061 		/* Urgh. Chip not yet ready to talk to us. */
1062 		if (time_after(jiffies, timeo)) {
1063 			mutex_unlock(&chip->mutex);
1064 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1065 			return -EIO;
1066 		}
1067 
1068 		/* Latency issues. Drop the lock, wait a while and retry */
1069 		mutex_unlock(&chip->mutex);
1070 		cfi_udelay(1);
1071 		goto retry;
1072 
1073 	default:
1074 		/* Stick ourselves on a wait queue to be woken when
1075 		   someone changes the status */
1076 		set_current_state(TASK_UNINTERRUPTIBLE);
1077 		add_wait_queue(&chip->wq, &wait);
1078 		mutex_unlock(&chip->mutex);
1079 		schedule();
1080 		remove_wait_queue(&chip->wq, &wait);
1081 		timeo = jiffies + HZ;
1082 		goto retry;
1083 	}
1084 
1085 	ENABLE_VPP(map);
1086 	map_write(map, CMD(0x60), adr);
1087 	map_write(map, CMD(0x01), adr);
1088 	chip->state = FL_LOCKING;
1089 
1090 	mutex_unlock(&chip->mutex);
1091 	msleep(1000);
1092 	mutex_lock(&chip->mutex);
1093 
1094 	/* FIXME. Use a timer to check this, and return immediately. */
1095 	/* Once the state machine's known to be working I'll do that */
1096 
1097 	timeo = jiffies + (HZ*2);
1098 	for (;;) {
1099 
1100 		status = map_read(map, adr);
1101 		if (map_word_andequal(map, status, status_OK, status_OK))
1102 			break;
1103 
1104 		/* OK Still waiting */
1105 		if (time_after(jiffies, timeo)) {
1106 			map_write(map, CMD(0x70), adr);
1107 			chip->state = FL_STATUS;
1108 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1109 			DISABLE_VPP(map);
1110 			mutex_unlock(&chip->mutex);
1111 			return -EIO;
1112 		}
1113 
1114 		/* Latency issues. Drop the lock, wait a while and retry */
1115 		mutex_unlock(&chip->mutex);
1116 		cfi_udelay(1);
1117 		mutex_lock(&chip->mutex);
1118 	}
1119 
1120 	/* Done and happy. */
1121 	chip->state = FL_STATUS;
1122 	DISABLE_VPP(map);
1123 	wake_up(&chip->wq);
1124 	mutex_unlock(&chip->mutex);
1125 	return 0;
1126 }
1127 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1128 {
1129 	struct map_info *map = mtd->priv;
1130 	struct cfi_private *cfi = map->fldrv_priv;
1131 	unsigned long adr;
1132 	int chipnum, ret;
1133 #ifdef DEBUG_LOCK_BITS
1134 	int ofs_factor = cfi->interleave * cfi->device_type;
1135 #endif
1136 
1137 	if (ofs & (mtd->erasesize - 1))
1138 		return -EINVAL;
1139 
1140 	if (len & (mtd->erasesize -1))
1141 		return -EINVAL;
1142 
1143 	chipnum = ofs >> cfi->chipshift;
1144 	adr = ofs - (chipnum << cfi->chipshift);
1145 
1146 	while(len) {
1147 
1148 #ifdef DEBUG_LOCK_BITS
1149 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1150 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1151 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1152 #endif
1153 
1154 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1155 
1156 #ifdef DEBUG_LOCK_BITS
1157 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1158 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1159 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1160 #endif
1161 
1162 		if (ret)
1163 			return ret;
1164 
1165 		adr += mtd->erasesize;
1166 		len -= mtd->erasesize;
1167 
1168 		if (adr >> cfi->chipshift) {
1169 			adr = 0;
1170 			chipnum++;
1171 
1172 			if (chipnum >= cfi->numchips)
1173 				break;
1174 		}
1175 	}
1176 	return 0;
1177 }
1178 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1179 {
1180 	struct cfi_private *cfi = map->fldrv_priv;
1181 	map_word status, status_OK;
1182 	unsigned long timeo = jiffies + HZ;
1183 	DECLARE_WAITQUEUE(wait, current);
1184 
1185 	adr += chip->start;
1186 
1187 	/* Let's determine this according to the interleave only once */
1188 	status_OK = CMD(0x80);
1189 
1190 	timeo = jiffies + HZ;
1191 retry:
1192 	mutex_lock(&chip->mutex);
1193 
1194 	/* Check that the chip's ready to talk to us. */
1195 	switch (chip->state) {
1196 	case FL_CFI_QUERY:
1197 	case FL_JEDEC_QUERY:
1198 	case FL_READY:
1199 		map_write(map, CMD(0x70), adr);
1200 		chip->state = FL_STATUS;
1201 		fallthrough;
1202 	case FL_STATUS:
1203 		status = map_read(map, adr);
1204 		if (map_word_andequal(map, status, status_OK, status_OK))
1205 			break;
1206 
1207 		/* Urgh. Chip not yet ready to talk to us. */
1208 		if (time_after(jiffies, timeo)) {
1209 			mutex_unlock(&chip->mutex);
1210 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1211 			return -EIO;
1212 		}
1213 
1214 		/* Latency issues. Drop the lock, wait a while and retry */
1215 		mutex_unlock(&chip->mutex);
1216 		cfi_udelay(1);
1217 		goto retry;
1218 
1219 	default:
1220 		/* Stick ourselves on a wait queue to be woken when
1221 		   someone changes the status */
1222 		set_current_state(TASK_UNINTERRUPTIBLE);
1223 		add_wait_queue(&chip->wq, &wait);
1224 		mutex_unlock(&chip->mutex);
1225 		schedule();
1226 		remove_wait_queue(&chip->wq, &wait);
1227 		timeo = jiffies + HZ;
1228 		goto retry;
1229 	}
1230 
1231 	ENABLE_VPP(map);
1232 	map_write(map, CMD(0x60), adr);
1233 	map_write(map, CMD(0xD0), adr);
1234 	chip->state = FL_UNLOCKING;
1235 
1236 	mutex_unlock(&chip->mutex);
1237 	msleep(1000);
1238 	mutex_lock(&chip->mutex);
1239 
1240 	/* FIXME. Use a timer to check this, and return immediately. */
1241 	/* Once the state machine's known to be working I'll do that */
1242 
1243 	timeo = jiffies + (HZ*2);
1244 	for (;;) {
1245 
1246 		status = map_read(map, adr);
1247 		if (map_word_andequal(map, status, status_OK, status_OK))
1248 			break;
1249 
1250 		/* OK Still waiting */
1251 		if (time_after(jiffies, timeo)) {
1252 			map_write(map, CMD(0x70), adr);
1253 			chip->state = FL_STATUS;
1254 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1255 			DISABLE_VPP(map);
1256 			mutex_unlock(&chip->mutex);
1257 			return -EIO;
1258 		}
1259 
1260 		/* Latency issues. Drop the unlock, wait a while and retry */
1261 		mutex_unlock(&chip->mutex);
1262 		cfi_udelay(1);
1263 		mutex_lock(&chip->mutex);
1264 	}
1265 
1266 	/* Done and happy. */
1267 	chip->state = FL_STATUS;
1268 	DISABLE_VPP(map);
1269 	wake_up(&chip->wq);
1270 	mutex_unlock(&chip->mutex);
1271 	return 0;
1272 }
1273 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1274 {
1275 	struct map_info *map = mtd->priv;
1276 	struct cfi_private *cfi = map->fldrv_priv;
1277 	unsigned long adr;
1278 	int chipnum, ret;
1279 #ifdef DEBUG_LOCK_BITS
1280 	int ofs_factor = cfi->interleave * cfi->device_type;
1281 #endif
1282 
1283 	chipnum = ofs >> cfi->chipshift;
1284 	adr = ofs - (chipnum << cfi->chipshift);
1285 
1286 #ifdef DEBUG_LOCK_BITS
1287 	{
1288 		unsigned long temp_adr = adr;
1289 		unsigned long temp_len = len;
1290 
1291 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1292                 while (temp_len) {
1293 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1294 			temp_adr += mtd->erasesize;
1295 			temp_len -= mtd->erasesize;
1296 		}
1297 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1298 	}
1299 #endif
1300 
1301 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1302 
1303 #ifdef DEBUG_LOCK_BITS
1304 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1305 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1306 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307 #endif
1308 
1309 	return ret;
1310 }
1311 
1312 static int cfi_staa_suspend(struct mtd_info *mtd)
1313 {
1314 	struct map_info *map = mtd->priv;
1315 	struct cfi_private *cfi = map->fldrv_priv;
1316 	int i;
1317 	struct flchip *chip;
1318 	int ret = 0;
1319 
1320 	for (i=0; !ret && i<cfi->numchips; i++) {
1321 		chip = &cfi->chips[i];
1322 
1323 		mutex_lock(&chip->mutex);
1324 
1325 		switch(chip->state) {
1326 		case FL_READY:
1327 		case FL_STATUS:
1328 		case FL_CFI_QUERY:
1329 		case FL_JEDEC_QUERY:
1330 			chip->oldstate = chip->state;
1331 			chip->state = FL_PM_SUSPENDED;
1332 			/* No need to wake_up() on this state change -
1333 			 * as the whole point is that nobody can do anything
1334 			 * with the chip now anyway.
1335 			 */
1336 			break;
1337 
1338 		case FL_PM_SUSPENDED:
1339 			break;
1340 
1341 		default:
1342 			ret = -EAGAIN;
1343 			break;
1344 		}
1345 		mutex_unlock(&chip->mutex);
1346 	}
1347 
1348 	/* Unlock the chips again */
1349 
1350 	if (ret) {
1351 		for (i--; i >=0; i--) {
1352 			chip = &cfi->chips[i];
1353 
1354 			mutex_lock(&chip->mutex);
1355 
1356 			if (chip->state == FL_PM_SUSPENDED) {
1357 				/* No need to force it into a known state here,
1358 				   because we're returning failure, and it didn't
1359 				   get power cycled */
1360 				chip->state = chip->oldstate;
1361 				wake_up(&chip->wq);
1362 			}
1363 			mutex_unlock(&chip->mutex);
1364 		}
1365 	}
1366 
1367 	return ret;
1368 }
1369 
1370 static void cfi_staa_resume(struct mtd_info *mtd)
1371 {
1372 	struct map_info *map = mtd->priv;
1373 	struct cfi_private *cfi = map->fldrv_priv;
1374 	int i;
1375 	struct flchip *chip;
1376 
1377 	for (i=0; i<cfi->numchips; i++) {
1378 
1379 		chip = &cfi->chips[i];
1380 
1381 		mutex_lock(&chip->mutex);
1382 
1383 		/* Go to known state. Chip may have been power cycled */
1384 		if (chip->state == FL_PM_SUSPENDED) {
1385 			map_write(map, CMD(0xFF), 0);
1386 			chip->state = FL_READY;
1387 			wake_up(&chip->wq);
1388 		}
1389 
1390 		mutex_unlock(&chip->mutex);
1391 	}
1392 }
1393 
1394 static void cfi_staa_destroy(struct mtd_info *mtd)
1395 {
1396 	struct map_info *map = mtd->priv;
1397 	struct cfi_private *cfi = map->fldrv_priv;
1398 	kfree(cfi->cmdset_priv);
1399 	kfree(cfi);
1400 }
1401 
1402 MODULE_DESCRIPTION("MTD chip driver for ST Advanced Architecture Command Set (ID 0x0020)");
1403 MODULE_LICENSE("GPL");
1404