xref: /linux/drivers/mtd/chips/cfi_cmdset_0002.c (revision d99ff463ecf651437e9e4abe68f331dfb6b5bd9d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Flash Interface support:
4  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5  *
6  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
7  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
8  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9  *
10  * 2_by_8 routines added by Simon Munton
11  *
12  * 4_by_16 work by Carolyn J. Smith
13  *
14  * XIP support hooks by Vitaly Wool (based on code for Intel flash
15  * by Nicolas Pitre)
16  *
17  * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18  *
19  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
20  */
21 
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28 
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/reboot.h>
34 #include <linux/of.h>
35 #include <linux/of_platform.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
40 
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
43 
44 #define MAX_RETRIES 3
45 
46 #define SST49LF004B		0x0060
47 #define SST49LF040B		0x0050
48 #define SST49LF008A		0x005a
49 #define AT49BV6416		0x00d6
50 #define S29GL064N_MN12		0x0c01
51 
52 /*
53  * Status Register bit description. Used by flash devices that don't
54  * support DQ polling (e.g. HyperFlash)
55  */
56 #define CFI_SR_DRB		BIT(7)
57 #define CFI_SR_ESB		BIT(5)
58 #define CFI_SR_PSB		BIT(4)
59 #define CFI_SR_WBASB		BIT(3)
60 #define CFI_SR_SLSB		BIT(1)
61 
62 enum cfi_quirks {
63 	CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
64 };
65 
66 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
68 #if !FORCE_WORD_WRITE
69 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
70 #endif
71 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
72 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
73 static void cfi_amdstd_sync (struct mtd_info *);
74 static int cfi_amdstd_suspend (struct mtd_info *);
75 static void cfi_amdstd_resume (struct mtd_info *);
76 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
77 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
78 					 size_t *, struct otp_info *);
79 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
80 					 size_t *, struct otp_info *);
81 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
82 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
83 					 size_t *, u_char *);
84 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
85 					 size_t *, u_char *);
86 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
87 					  size_t *, const u_char *);
88 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
89 
90 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
91 				  size_t *retlen, const u_char *buf);
92 
93 static void cfi_amdstd_destroy(struct mtd_info *);
94 
95 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
96 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
97 
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101 
102 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
103 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
104 
105 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
106 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
107 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
108 
109 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
110 	.probe		= NULL, /* Not usable directly */
111 	.destroy	= cfi_amdstd_destroy,
112 	.name		= "cfi_cmdset_0002",
113 	.module		= THIS_MODULE
114 };
115 
116 /*
117  * Use status register to poll for Erase/write completion when DQ is not
118  * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
119  * CFI Primary Vendor-Specific Extended Query table 1.5
120  */
121 static int cfi_use_status_reg(struct cfi_private *cfi)
122 {
123 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
124 	u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
125 
126 	return extp && extp->MinorVersion >= '5' &&
127 		(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
128 }
129 
130 static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
131 				unsigned long adr)
132 {
133 	struct cfi_private *cfi = map->fldrv_priv;
134 	map_word status;
135 
136 	if (!cfi_use_status_reg(cfi))
137 		return 0;
138 
139 	cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
140 			 cfi->device_type, NULL);
141 	status = map_read(map, adr);
142 
143 	/* The error bits are invalid while the chip's busy */
144 	if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
145 		return 0;
146 
147 	if (map_word_bitsset(map, status, CMD(0x3a))) {
148 		unsigned long chipstatus = MERGESTATUS(status);
149 
150 		if (chipstatus & CFI_SR_ESB)
151 			pr_err("%s erase operation failed, status %lx\n",
152 			       map->name, chipstatus);
153 		if (chipstatus & CFI_SR_PSB)
154 			pr_err("%s program operation failed, status %lx\n",
155 			       map->name, chipstatus);
156 		if (chipstatus & CFI_SR_WBASB)
157 			pr_err("%s buffer program command aborted, status %lx\n",
158 			       map->name, chipstatus);
159 		if (chipstatus & CFI_SR_SLSB)
160 			pr_err("%s sector write protected, status %lx\n",
161 			       map->name, chipstatus);
162 
163 		/* Erase/Program status bits are set on the operation failure */
164 		if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
165 			return 1;
166 	}
167 	return 0;
168 }
169 
170 /* #define DEBUG_CFI_FEATURES */
171 
172 
173 #ifdef DEBUG_CFI_FEATURES
174 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
175 {
176 	const char* erase_suspend[3] = {
177 		"Not supported", "Read only", "Read/write"
178 	};
179 	const char* top_bottom[6] = {
180 		"No WP", "8x8KiB sectors at top & bottom, no WP",
181 		"Bottom boot", "Top boot",
182 		"Uniform, Bottom WP", "Uniform, Top WP"
183 	};
184 
185 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
186 	printk("  Address sensitive unlock: %s\n",
187 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
188 
189 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
190 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
191 	else
192 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
193 
194 	if (extp->BlkProt == 0)
195 		printk("  Block protection: Not supported\n");
196 	else
197 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
198 
199 
200 	printk("  Temporary block unprotect: %s\n",
201 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
202 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
203 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
204 	printk("  Burst mode: %s\n",
205 	       extp->BurstMode ? "Supported" : "Not supported");
206 	if (extp->PageMode == 0)
207 		printk("  Page mode: Not supported\n");
208 	else
209 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
210 
211 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
212 	       extp->VppMin >> 4, extp->VppMin & 0xf);
213 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
214 	       extp->VppMax >> 4, extp->VppMax & 0xf);
215 
216 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
217 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
218 	else
219 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
220 }
221 #endif
222 
223 #ifdef AMD_BOOTLOC_BUG
224 /* Wheee. Bring me the head of someone at AMD. */
225 static void fixup_amd_bootblock(struct mtd_info *mtd)
226 {
227 	struct map_info *map = mtd->priv;
228 	struct cfi_private *cfi = map->fldrv_priv;
229 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
230 	__u8 major = extp->MajorVersion;
231 	__u8 minor = extp->MinorVersion;
232 
233 	if (((major << 8) | minor) < 0x3131) {
234 		/* CFI version 1.0 => don't trust bootloc */
235 
236 		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
237 			map->name, cfi->mfr, cfi->id);
238 
239 		/* AFAICS all 29LV400 with a bottom boot block have a device ID
240 		 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
241 		 * These were badly detected as they have the 0x80 bit set
242 		 * so treat them as a special case.
243 		 */
244 		if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
245 
246 			/* Macronix added CFI to their 2nd generation
247 			 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
248 			 * Fujitsu, Spansion, EON, ESI and older Macronix)
249 			 * has CFI.
250 			 *
251 			 * Therefore also check the manufacturer.
252 			 * This reduces the risk of false detection due to
253 			 * the 8-bit device ID.
254 			 */
255 			(cfi->mfr == CFI_MFR_MACRONIX)) {
256 			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
257 				" detected\n", map->name);
258 			extp->TopBottom = 2;	/* bottom boot */
259 		} else
260 		if (cfi->id & 0x80) {
261 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
262 			extp->TopBottom = 3;	/* top boot */
263 		} else {
264 			extp->TopBottom = 2;	/* bottom boot */
265 		}
266 
267 		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
268 			" deduced %s from Device ID\n", map->name, major, minor,
269 			extp->TopBottom == 2 ? "bottom" : "top");
270 	}
271 }
272 #endif
273 
274 #if !FORCE_WORD_WRITE
275 static void fixup_use_write_buffers(struct mtd_info *mtd)
276 {
277 	struct map_info *map = mtd->priv;
278 	struct cfi_private *cfi = map->fldrv_priv;
279 
280 	if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
281 		return;
282 
283 	if (cfi->cfiq->BufWriteTimeoutTyp) {
284 		pr_debug("Using buffer write method\n");
285 		mtd->_write = cfi_amdstd_write_buffers;
286 	}
287 }
288 #endif /* !FORCE_WORD_WRITE */
289 
290 /* Atmel chips don't use the same PRI format as AMD chips */
291 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
292 {
293 	struct map_info *map = mtd->priv;
294 	struct cfi_private *cfi = map->fldrv_priv;
295 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
296 	struct cfi_pri_atmel atmel_pri;
297 
298 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
299 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
300 
301 	if (atmel_pri.Features & 0x02)
302 		extp->EraseSuspend = 2;
303 
304 	/* Some chips got it backwards... */
305 	if (cfi->id == AT49BV6416) {
306 		if (atmel_pri.BottomBoot)
307 			extp->TopBottom = 3;
308 		else
309 			extp->TopBottom = 2;
310 	} else {
311 		if (atmel_pri.BottomBoot)
312 			extp->TopBottom = 2;
313 		else
314 			extp->TopBottom = 3;
315 	}
316 
317 	/* burst write mode not supported */
318 	cfi->cfiq->BufWriteTimeoutTyp = 0;
319 	cfi->cfiq->BufWriteTimeoutMax = 0;
320 }
321 
322 static void fixup_use_secsi(struct mtd_info *mtd)
323 {
324 	/* Setup for chips with a secsi area */
325 	mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
326 	mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
327 }
328 
329 static void fixup_use_erase_chip(struct mtd_info *mtd)
330 {
331 	struct map_info *map = mtd->priv;
332 	struct cfi_private *cfi = map->fldrv_priv;
333 	if ((cfi->cfiq->NumEraseRegions == 1) &&
334 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
335 		mtd->_erase = cfi_amdstd_erase_chip;
336 	}
337 
338 }
339 
340 /*
341  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
342  * locked by default.
343  */
344 static void fixup_use_atmel_lock(struct mtd_info *mtd)
345 {
346 	mtd->_lock = cfi_atmel_lock;
347 	mtd->_unlock = cfi_atmel_unlock;
348 	mtd->flags |= MTD_POWERUP_LOCK;
349 }
350 
351 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
352 {
353 	struct map_info *map = mtd->priv;
354 	struct cfi_private *cfi = map->fldrv_priv;
355 
356 	/*
357 	 * These flashes report two separate eraseblock regions based on the
358 	 * sector_erase-size and block_erase-size, although they both operate on the
359 	 * same memory. This is not allowed according to CFI, so we just pick the
360 	 * sector_erase-size.
361 	 */
362 	cfi->cfiq->NumEraseRegions = 1;
363 }
364 
365 static void fixup_sst39vf(struct mtd_info *mtd)
366 {
367 	struct map_info *map = mtd->priv;
368 	struct cfi_private *cfi = map->fldrv_priv;
369 
370 	fixup_old_sst_eraseregion(mtd);
371 
372 	cfi->addr_unlock1 = 0x5555;
373 	cfi->addr_unlock2 = 0x2AAA;
374 }
375 
376 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
377 {
378 	struct map_info *map = mtd->priv;
379 	struct cfi_private *cfi = map->fldrv_priv;
380 
381 	fixup_old_sst_eraseregion(mtd);
382 
383 	cfi->addr_unlock1 = 0x555;
384 	cfi->addr_unlock2 = 0x2AA;
385 
386 	cfi->sector_erase_cmd = CMD(0x50);
387 }
388 
389 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
390 {
391 	struct map_info *map = mtd->priv;
392 	struct cfi_private *cfi = map->fldrv_priv;
393 
394 	fixup_sst39vf_rev_b(mtd);
395 
396 	/*
397 	 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
398 	 * it should report a size of 8KBytes (0x0020*256).
399 	 */
400 	cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
401 	pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
402 		mtd->name);
403 }
404 
405 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
406 {
407 	struct map_info *map = mtd->priv;
408 	struct cfi_private *cfi = map->fldrv_priv;
409 
410 	if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
411 		cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
412 		pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
413 			mtd->name);
414 	}
415 }
416 
417 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
418 {
419 	struct map_info *map = mtd->priv;
420 	struct cfi_private *cfi = map->fldrv_priv;
421 
422 	if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
423 		cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
424 		pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
425 			mtd->name);
426 	}
427 }
428 
429 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
430 {
431 	struct map_info *map = mtd->priv;
432 	struct cfi_private *cfi = map->fldrv_priv;
433 
434 	/*
435 	 *  S29NS512P flash uses more than 8bits to report number of sectors,
436 	 * which is not permitted by CFI.
437 	 */
438 	cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
439 	pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
440 		mtd->name);
441 }
442 
443 static void fixup_quirks(struct mtd_info *mtd)
444 {
445 	struct map_info *map = mtd->priv;
446 	struct cfi_private *cfi = map->fldrv_priv;
447 
448 	if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12)
449 		cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
450 }
451 
452 /* Used to fix CFI-Tables of chips without Extended Query Tables */
453 static struct cfi_fixup cfi_nopri_fixup_table[] = {
454 	{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
455 	{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
456 	{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
457 	{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
458 	{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
459 	{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
460 	{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
461 	{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
462 	{ 0, 0, NULL }
463 };
464 
465 static struct cfi_fixup cfi_fixup_table[] = {
466 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
467 #ifdef AMD_BOOTLOC_BUG
468 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
469 	{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
470 	{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
471 #endif
472 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
473 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
474 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
475 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
476 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
477 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
478 	{ CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors },
479 	{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
480 	{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
481 	{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
482 	{ CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
483 	{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
484 	{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
485 	{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
486 	{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
487 #if !FORCE_WORD_WRITE
488 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
489 #endif
490 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
491 	{ 0, 0, NULL }
492 };
493 static struct cfi_fixup jedec_fixup_table[] = {
494 	{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
495 	{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
496 	{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
497 	{ 0, 0, NULL }
498 };
499 
500 static struct cfi_fixup fixup_table[] = {
501 	/* The CFI vendor ids and the JEDEC vendor IDs appear
502 	 * to be common.  It is like the devices id's are as
503 	 * well.  This table is to pick all cases where
504 	 * we know that is the case.
505 	 */
506 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
507 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
508 	{ 0, 0, NULL }
509 };
510 
511 
512 static void cfi_fixup_major_minor(struct cfi_private *cfi,
513 				  struct cfi_pri_amdstd *extp)
514 {
515 	if (cfi->mfr == CFI_MFR_SAMSUNG) {
516 		if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
517 		    (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
518 			/*
519 			 * Samsung K8P2815UQB and K8D6x16UxM chips
520 			 * report major=0 / minor=0.
521 			 * K8D3x16UxC chips report major=3 / minor=3.
522 			 */
523 			printk(KERN_NOTICE "  Fixing Samsung's Amd/Fujitsu"
524 			       " Extended Query version to 1.%c\n",
525 			       extp->MinorVersion);
526 			extp->MajorVersion = '1';
527 		}
528 	}
529 
530 	/*
531 	 * SST 38VF640x chips report major=0xFF / minor=0xFF.
532 	 */
533 	if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
534 		extp->MajorVersion = '1';
535 		extp->MinorVersion = '0';
536 	}
537 }
538 
539 static int is_m29ew(struct cfi_private *cfi)
540 {
541 	if (cfi->mfr == CFI_MFR_INTEL &&
542 	    ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
543 	     (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
544 		return 1;
545 	return 0;
546 }
547 
548 /*
549  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
550  * Some revisions of the M29EW suffer from erase suspend hang ups. In
551  * particular, it can occur when the sequence
552  * Erase Confirm -> Suspend -> Program -> Resume
553  * causes a lockup due to internal timing issues. The consequence is that the
554  * erase cannot be resumed without inserting a dummy command after programming
555  * and prior to resuming. [...] The work-around is to issue a dummy write cycle
556  * that writes an F0 command code before the RESUME command.
557  */
558 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
559 					  unsigned long adr)
560 {
561 	struct cfi_private *cfi = map->fldrv_priv;
562 	/* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
563 	if (is_m29ew(cfi))
564 		map_write(map, CMD(0xF0), adr);
565 }
566 
567 /*
568  * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
569  *
570  * Some revisions of the M29EW (for example, A1 and A2 step revisions)
571  * are affected by a problem that could cause a hang up when an ERASE SUSPEND
572  * command is issued after an ERASE RESUME operation without waiting for a
573  * minimum delay.  The result is that once the ERASE seems to be completed
574  * (no bits are toggling), the contents of the Flash memory block on which
575  * the erase was ongoing could be inconsistent with the expected values
576  * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
577  * values), causing a consequent failure of the ERASE operation.
578  * The occurrence of this issue could be high, especially when file system
579  * operations on the Flash are intensive.  As a result, it is recommended
580  * that a patch be applied.  Intensive file system operations can cause many
581  * calls to the garbage routine to free Flash space (also by erasing physical
582  * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
583  * commands can occur.  The problem disappears when a delay is inserted after
584  * the RESUME command by using the udelay() function available in Linux.
585  * The DELAY value must be tuned based on the customer's platform.
586  * The maximum value that fixes the problem in all cases is 500us.
587  * But, in our experience, a delay of 30 µs to 50 µs is sufficient
588  * in most cases.
589  * We have chosen 500µs because this latency is acceptable.
590  */
591 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
592 {
593 	/*
594 	 * Resolving the Delay After Resume Issue see Micron TN-13-07
595 	 * Worst case delay must be 500µs but 30-50µs should be ok as well
596 	 */
597 	if (is_m29ew(cfi))
598 		cfi_udelay(500);
599 }
600 
601 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
602 {
603 	struct cfi_private *cfi = map->fldrv_priv;
604 	struct device_node __maybe_unused *np = map->device_node;
605 	struct mtd_info *mtd;
606 	int i;
607 
608 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
609 	if (!mtd)
610 		return NULL;
611 	mtd->priv = map;
612 	mtd->type = MTD_NORFLASH;
613 
614 	/* Fill in the default mtd operations */
615 	mtd->_erase   = cfi_amdstd_erase_varsize;
616 	mtd->_write   = cfi_amdstd_write_words;
617 	mtd->_read    = cfi_amdstd_read;
618 	mtd->_sync    = cfi_amdstd_sync;
619 	mtd->_suspend = cfi_amdstd_suspend;
620 	mtd->_resume  = cfi_amdstd_resume;
621 	mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
622 	mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
623 	mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
624 	mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
625 	mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
626 	mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
627 	mtd->flags   = MTD_CAP_NORFLASH;
628 	mtd->name    = map->name;
629 	mtd->writesize = 1;
630 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
631 
632 	pr_debug("MTD %s(): write buffer size %d\n", __func__,
633 			mtd->writebufsize);
634 
635 	mtd->_panic_write = cfi_amdstd_panic_write;
636 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
637 
638 	if (cfi->cfi_mode==CFI_MODE_CFI){
639 		unsigned char bootloc;
640 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
641 		struct cfi_pri_amdstd *extp;
642 
643 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
644 		if (extp) {
645 			/*
646 			 * It's a real CFI chip, not one for which the probe
647 			 * routine faked a CFI structure.
648 			 */
649 			cfi_fixup_major_minor(cfi, extp);
650 
651 			/*
652 			 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
653 			 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
654 			 *      http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
655 			 *      http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
656 			 *      http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
657 			 */
658 			if (extp->MajorVersion != '1' ||
659 			    (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
660 				printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
661 				       "version %c.%c (%#02x/%#02x).\n",
662 				       extp->MajorVersion, extp->MinorVersion,
663 				       extp->MajorVersion, extp->MinorVersion);
664 				kfree(extp);
665 				kfree(mtd);
666 				return NULL;
667 			}
668 
669 			printk(KERN_INFO "  Amd/Fujitsu Extended Query version %c.%c.\n",
670 			       extp->MajorVersion, extp->MinorVersion);
671 
672 			/* Install our own private info structure */
673 			cfi->cmdset_priv = extp;
674 
675 			/* Apply cfi device specific fixups */
676 			cfi_fixup(mtd, cfi_fixup_table);
677 
678 #ifdef DEBUG_CFI_FEATURES
679 			/* Tell the user about it in lots of lovely detail */
680 			cfi_tell_features(extp);
681 #endif
682 
683 #ifdef CONFIG_OF
684 			if (np && of_property_read_bool(
685 				    np, "use-advanced-sector-protection")
686 			    && extp->BlkProtUnprot == 8) {
687 				printk(KERN_INFO "  Advanced Sector Protection (PPB Locking) supported\n");
688 				mtd->_lock = cfi_ppb_lock;
689 				mtd->_unlock = cfi_ppb_unlock;
690 				mtd->_is_locked = cfi_ppb_is_locked;
691 			}
692 #endif
693 
694 			bootloc = extp->TopBottom;
695 			if ((bootloc < 2) || (bootloc > 5)) {
696 				printk(KERN_WARNING "%s: CFI contains unrecognised boot "
697 				       "bank location (%d). Assuming bottom.\n",
698 				       map->name, bootloc);
699 				bootloc = 2;
700 			}
701 
702 			if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
703 				printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
704 
705 				for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
706 					int j = (cfi->cfiq->NumEraseRegions-1)-i;
707 
708 					swap(cfi->cfiq->EraseRegionInfo[i],
709 					     cfi->cfiq->EraseRegionInfo[j]);
710 				}
711 			}
712 			/* Set the default CFI lock/unlock addresses */
713 			cfi->addr_unlock1 = 0x555;
714 			cfi->addr_unlock2 = 0x2aa;
715 		}
716 		cfi_fixup(mtd, cfi_nopri_fixup_table);
717 
718 		if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
719 			kfree(mtd);
720 			return NULL;
721 		}
722 
723 	} /* CFI mode */
724 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
725 		/* Apply jedec specific fixups */
726 		cfi_fixup(mtd, jedec_fixup_table);
727 	}
728 	/* Apply generic fixups */
729 	cfi_fixup(mtd, fixup_table);
730 
731 	for (i=0; i< cfi->numchips; i++) {
732 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
733 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
734 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
735 		/*
736 		 * First calculate the timeout max according to timeout field
737 		 * of struct cfi_ident that probed from chip's CFI aera, if
738 		 * available. Specify a minimum of 2000us, in case the CFI data
739 		 * is wrong.
740 		 */
741 		if (cfi->cfiq->BufWriteTimeoutTyp &&
742 		    cfi->cfiq->BufWriteTimeoutMax)
743 			cfi->chips[i].buffer_write_time_max =
744 				1 << (cfi->cfiq->BufWriteTimeoutTyp +
745 				      cfi->cfiq->BufWriteTimeoutMax);
746 		else
747 			cfi->chips[i].buffer_write_time_max = 0;
748 
749 		cfi->chips[i].buffer_write_time_max =
750 			max(cfi->chips[i].buffer_write_time_max, 2000);
751 
752 		cfi->chips[i].ref_point_counter = 0;
753 		init_waitqueue_head(&(cfi->chips[i].wq));
754 	}
755 
756 	map->fldrv = &cfi_amdstd_chipdrv;
757 
758 	return cfi_amdstd_setup(mtd);
759 }
760 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
761 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
762 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
763 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
764 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
765 
766 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
767 {
768 	struct map_info *map = mtd->priv;
769 	struct cfi_private *cfi = map->fldrv_priv;
770 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
771 	unsigned long offset = 0;
772 	int i,j;
773 
774 	printk(KERN_NOTICE "number of %s chips: %d\n",
775 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
776 	/* Select the correct geometry setup */
777 	mtd->size = devsize * cfi->numchips;
778 
779 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
780 	mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
781 					  sizeof(struct mtd_erase_region_info),
782 					  GFP_KERNEL);
783 	if (!mtd->eraseregions)
784 		goto setup_err;
785 
786 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
787 		unsigned long ernum, ersize;
788 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
789 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
790 
791 		if (mtd->erasesize < ersize) {
792 			mtd->erasesize = ersize;
793 		}
794 		for (j=0; j<cfi->numchips; j++) {
795 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
796 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
797 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
798 		}
799 		offset += (ersize * ernum);
800 	}
801 	if (offset != devsize) {
802 		/* Argh */
803 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
804 		goto setup_err;
805 	}
806 
807 	__module_get(THIS_MODULE);
808 	register_reboot_notifier(&mtd->reboot_notifier);
809 	return mtd;
810 
811  setup_err:
812 	kfree(mtd->eraseregions);
813 	kfree(mtd);
814 	kfree(cfi->cmdset_priv);
815 	return NULL;
816 }
817 
818 /*
819  * Return true if the chip is ready and has the correct value.
820  *
821  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
822  * non-suspended sector) and is indicated by no toggle bits toggling.
823  *
824  * Error are indicated by toggling bits or bits held with the wrong value,
825  * or with bits toggling.
826  *
827  * Note that anything more complicated than checking if no bits are toggling
828  * (including checking DQ5 for an error status) is tricky to get working
829  * correctly and is therefore not done	(particularly with interleaved chips
830  * as each chip must be checked independently of the others).
831  */
832 static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
833 			       unsigned long addr, map_word *expected)
834 {
835 	struct cfi_private *cfi = map->fldrv_priv;
836 	map_word oldd, curd;
837 	int ret;
838 
839 	if (cfi_use_status_reg(cfi)) {
840 		map_word ready = CMD(CFI_SR_DRB);
841 		/*
842 		 * For chips that support status register, check device
843 		 * ready bit
844 		 */
845 		cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
846 				 cfi->device_type, NULL);
847 		curd = map_read(map, addr);
848 
849 		return map_word_andequal(map, curd, ready, ready);
850 	}
851 
852 	oldd = map_read(map, addr);
853 	curd = map_read(map, addr);
854 
855 	ret = map_word_equal(map, oldd, curd);
856 
857 	if (!ret || !expected)
858 		return ret;
859 
860 	return map_word_equal(map, curd, *expected);
861 }
862 
863 static int __xipram chip_good(struct map_info *map, struct flchip *chip,
864 			      unsigned long addr, map_word *expected)
865 {
866 	struct cfi_private *cfi = map->fldrv_priv;
867 	map_word *datum = expected;
868 
869 	if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
870 		datum = NULL;
871 
872 	return chip_ready(map, chip, addr, datum);
873 }
874 
875 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
876 {
877 	DECLARE_WAITQUEUE(wait, current);
878 	struct cfi_private *cfi = map->fldrv_priv;
879 	unsigned long timeo;
880 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
881 
882  resettime:
883 	timeo = jiffies + HZ;
884  retry:
885 	switch (chip->state) {
886 
887 	case FL_STATUS:
888 		for (;;) {
889 			if (chip_ready(map, chip, adr, NULL))
890 				break;
891 
892 			if (time_after(jiffies, timeo)) {
893 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
894 				return -EIO;
895 			}
896 			mutex_unlock(&chip->mutex);
897 			cfi_udelay(1);
898 			mutex_lock(&chip->mutex);
899 			/* Someone else might have been playing with it. */
900 			goto retry;
901 		}
902 		return 0;
903 
904 	case FL_READY:
905 	case FL_CFI_QUERY:
906 	case FL_JEDEC_QUERY:
907 		return 0;
908 
909 	case FL_ERASING:
910 		if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
911 		    !(mode == FL_READY || mode == FL_POINT ||
912 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
913 			goto sleep;
914 
915 		/* Do not allow suspend iff read/write to EB address */
916 		if ((adr & chip->in_progress_block_mask) ==
917 		    chip->in_progress_block_addr)
918 			goto sleep;
919 
920 		/* Erase suspend */
921 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
922 		 * commands when the erase algorithm isn't in progress. */
923 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
924 		chip->oldstate = FL_ERASING;
925 		chip->state = FL_ERASE_SUSPENDING;
926 		chip->erase_suspended = 1;
927 		for (;;) {
928 			if (chip_ready(map, chip, adr, NULL))
929 				break;
930 
931 			if (time_after(jiffies, timeo)) {
932 				/* Should have suspended the erase by now.
933 				 * Send an Erase-Resume command as either
934 				 * there was an error (so leave the erase
935 				 * routine to recover from it) or we trying to
936 				 * use the erase-in-progress sector. */
937 				put_chip(map, chip, adr);
938 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
939 				return -EIO;
940 			}
941 
942 			mutex_unlock(&chip->mutex);
943 			cfi_udelay(1);
944 			mutex_lock(&chip->mutex);
945 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
946 			   So we can just loop here. */
947 		}
948 		chip->state = FL_READY;
949 		return 0;
950 
951 	case FL_XIP_WHILE_ERASING:
952 		if (mode != FL_READY && mode != FL_POINT &&
953 		    (!cfip || !(cfip->EraseSuspend&2)))
954 			goto sleep;
955 		chip->oldstate = chip->state;
956 		chip->state = FL_READY;
957 		return 0;
958 
959 	case FL_SHUTDOWN:
960 		/* The machine is rebooting */
961 		return -EIO;
962 
963 	case FL_POINT:
964 		/* Only if there's no operation suspended... */
965 		if (mode == FL_READY && chip->oldstate == FL_READY)
966 			return 0;
967 		fallthrough;
968 	default:
969 	sleep:
970 		set_current_state(TASK_UNINTERRUPTIBLE);
971 		add_wait_queue(&chip->wq, &wait);
972 		mutex_unlock(&chip->mutex);
973 		schedule();
974 		remove_wait_queue(&chip->wq, &wait);
975 		mutex_lock(&chip->mutex);
976 		goto resettime;
977 	}
978 }
979 
980 
981 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
982 {
983 	struct cfi_private *cfi = map->fldrv_priv;
984 
985 	switch(chip->oldstate) {
986 	case FL_ERASING:
987 		cfi_fixup_m29ew_erase_suspend(map,
988 			chip->in_progress_block_addr);
989 		map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
990 		cfi_fixup_m29ew_delay_after_resume(cfi);
991 		chip->oldstate = FL_READY;
992 		chip->state = FL_ERASING;
993 		break;
994 
995 	case FL_XIP_WHILE_ERASING:
996 		chip->state = chip->oldstate;
997 		chip->oldstate = FL_READY;
998 		break;
999 
1000 	case FL_READY:
1001 	case FL_STATUS:
1002 		break;
1003 	default:
1004 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1005 	}
1006 	wake_up(&chip->wq);
1007 }
1008 
1009 #ifdef CONFIG_MTD_XIP
1010 
1011 /*
1012  * No interrupt what so ever can be serviced while the flash isn't in array
1013  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1014  * enclosing any code path where the flash is known not to be in array mode.
1015  * And within a XIP disabled code path, only functions marked with __xipram
1016  * may be called and nothing else (it's a good thing to inspect generated
1017  * assembly to make sure inline functions were actually inlined and that gcc
1018  * didn't emit calls to its own support functions). Also configuring MTD CFI
1019  * support to a single buswidth and a single interleave is also recommended.
1020  */
1021 
1022 static void xip_disable(struct map_info *map, struct flchip *chip,
1023 			unsigned long adr)
1024 {
1025 	/* TODO: chips with no XIP use should ignore and return */
1026 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1027 	local_irq_disable();
1028 }
1029 
1030 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1031 				unsigned long adr)
1032 {
1033 	struct cfi_private *cfi = map->fldrv_priv;
1034 
1035 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1036 		map_write(map, CMD(0xf0), adr);
1037 		chip->state = FL_READY;
1038 	}
1039 	(void) map_read(map, adr);
1040 	xip_iprefetch();
1041 	local_irq_enable();
1042 }
1043 
1044 /*
1045  * When a delay is required for the flash operation to complete, the
1046  * xip_udelay() function is polling for both the given timeout and pending
1047  * (but still masked) hardware interrupts.  Whenever there is an interrupt
1048  * pending then the flash erase operation is suspended, array mode restored
1049  * and interrupts unmasked.  Task scheduling might also happen at that
1050  * point.  The CPU eventually returns from the interrupt or the call to
1051  * schedule() and the suspended flash operation is resumed for the remaining
1052  * of the delay period.
1053  *
1054  * Warning: this function _will_ fool interrupt latency tracing tools.
1055  */
1056 
1057 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1058 				unsigned long adr, int usec)
1059 {
1060 	struct cfi_private *cfi = map->fldrv_priv;
1061 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1062 	map_word status, OK = CMD(0x80);
1063 	unsigned long suspended, start = xip_currtime();
1064 	flstate_t oldstate;
1065 
1066 	do {
1067 		cpu_relax();
1068 		if (xip_irqpending() && extp &&
1069 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1070 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1071 			/*
1072 			 * Let's suspend the erase operation when supported.
1073 			 * Note that we currently don't try to suspend
1074 			 * interleaved chips if there is already another
1075 			 * operation suspended (imagine what happens
1076 			 * when one chip was already done with the current
1077 			 * operation while another chip suspended it, then
1078 			 * we resume the whole thing at once).  Yes, it
1079 			 * can happen!
1080 			 */
1081 			map_write(map, CMD(0xb0), adr);
1082 			usec -= xip_elapsed_since(start);
1083 			suspended = xip_currtime();
1084 			do {
1085 				if (xip_elapsed_since(suspended) > 100000) {
1086 					/*
1087 					 * The chip doesn't want to suspend
1088 					 * after waiting for 100 msecs.
1089 					 * This is a critical error but there
1090 					 * is not much we can do here.
1091 					 */
1092 					return;
1093 				}
1094 				status = map_read(map, adr);
1095 			} while (!map_word_andequal(map, status, OK, OK));
1096 
1097 			/* Suspend succeeded */
1098 			oldstate = chip->state;
1099 			if (!map_word_bitsset(map, status, CMD(0x40)))
1100 				break;
1101 			chip->state = FL_XIP_WHILE_ERASING;
1102 			chip->erase_suspended = 1;
1103 			map_write(map, CMD(0xf0), adr);
1104 			(void) map_read(map, adr);
1105 			xip_iprefetch();
1106 			local_irq_enable();
1107 			mutex_unlock(&chip->mutex);
1108 			xip_iprefetch();
1109 			cond_resched();
1110 
1111 			/*
1112 			 * We're back.  However someone else might have
1113 			 * decided to go write to the chip if we are in
1114 			 * a suspended erase state.  If so let's wait
1115 			 * until it's done.
1116 			 */
1117 			mutex_lock(&chip->mutex);
1118 			while (chip->state != FL_XIP_WHILE_ERASING) {
1119 				DECLARE_WAITQUEUE(wait, current);
1120 				set_current_state(TASK_UNINTERRUPTIBLE);
1121 				add_wait_queue(&chip->wq, &wait);
1122 				mutex_unlock(&chip->mutex);
1123 				schedule();
1124 				remove_wait_queue(&chip->wq, &wait);
1125 				mutex_lock(&chip->mutex);
1126 			}
1127 			/* Disallow XIP again */
1128 			local_irq_disable();
1129 
1130 			/* Correct Erase Suspend Hangups for M29EW */
1131 			cfi_fixup_m29ew_erase_suspend(map, adr);
1132 			/* Resume the write or erase operation */
1133 			map_write(map, cfi->sector_erase_cmd, adr);
1134 			chip->state = oldstate;
1135 			start = xip_currtime();
1136 		} else if (usec >= 1000000/HZ) {
1137 			/*
1138 			 * Try to save on CPU power when waiting delay
1139 			 * is at least a system timer tick period.
1140 			 * No need to be extremely accurate here.
1141 			 */
1142 			xip_cpu_idle();
1143 		}
1144 		status = map_read(map, adr);
1145 	} while (!map_word_andequal(map, status, OK, OK)
1146 		 && xip_elapsed_since(start) < usec);
1147 }
1148 
1149 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
1150 
1151 /*
1152  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1153  * the flash is actively programming or erasing since we have to poll for
1154  * the operation to complete anyway.  We can't do that in a generic way with
1155  * a XIP setup so do it before the actual flash operation in this case
1156  * and stub it out from INVALIDATE_CACHE_UDELAY.
1157  */
1158 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1159 	INVALIDATE_CACHED_RANGE(map, from, size)
1160 
1161 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1162 	UDELAY(map, chip, adr, usec)
1163 
1164 /*
1165  * Extra notes:
1166  *
1167  * Activating this XIP support changes the way the code works a bit.  For
1168  * example the code to suspend the current process when concurrent access
1169  * happens is never executed because xip_udelay() will always return with the
1170  * same chip state as it was entered with.  This is why there is no care for
1171  * the presence of add_wait_queue() or schedule() calls from within a couple
1172  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
1173  * The queueing and scheduling are always happening within xip_udelay().
1174  *
1175  * Similarly, get_chip() and put_chip() just happen to always be executed
1176  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1177  * is in array mode, therefore never executing many cases therein and not
1178  * causing any problem with XIP.
1179  */
1180 
1181 #else
1182 
1183 #define xip_disable(map, chip, adr)
1184 #define xip_enable(map, chip, adr)
1185 #define XIP_INVAL_CACHED_RANGE(x...)
1186 
1187 #define UDELAY(map, chip, adr, usec)  \
1188 do {  \
1189 	mutex_unlock(&chip->mutex);  \
1190 	cfi_udelay(usec);  \
1191 	mutex_lock(&chip->mutex);  \
1192 } while (0)
1193 
1194 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
1195 do {  \
1196 	mutex_unlock(&chip->mutex);  \
1197 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
1198 	cfi_udelay(usec);  \
1199 	mutex_lock(&chip->mutex);  \
1200 } while (0)
1201 
1202 #endif
1203 
1204 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1205 {
1206 	unsigned long cmd_addr;
1207 	struct cfi_private *cfi = map->fldrv_priv;
1208 	int ret;
1209 
1210 	adr += chip->start;
1211 
1212 	/* Ensure cmd read/writes are aligned. */
1213 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1214 
1215 	mutex_lock(&chip->mutex);
1216 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1217 	if (ret) {
1218 		mutex_unlock(&chip->mutex);
1219 		return ret;
1220 	}
1221 
1222 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1223 		map_write(map, CMD(0xf0), cmd_addr);
1224 		chip->state = FL_READY;
1225 	}
1226 
1227 	map_copy_from(map, buf, adr, len);
1228 
1229 	put_chip(map, chip, cmd_addr);
1230 
1231 	mutex_unlock(&chip->mutex);
1232 	return 0;
1233 }
1234 
1235 
1236 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1237 {
1238 	struct map_info *map = mtd->priv;
1239 	struct cfi_private *cfi = map->fldrv_priv;
1240 	unsigned long ofs;
1241 	int chipnum;
1242 	int ret = 0;
1243 
1244 	/* ofs: offset within the first chip that the first read should start */
1245 	chipnum = (from >> cfi->chipshift);
1246 	ofs = from - (chipnum <<  cfi->chipshift);
1247 
1248 	while (len) {
1249 		unsigned long thislen;
1250 
1251 		if (chipnum >= cfi->numchips)
1252 			break;
1253 
1254 		if ((len + ofs -1) >> cfi->chipshift)
1255 			thislen = (1<<cfi->chipshift) - ofs;
1256 		else
1257 			thislen = len;
1258 
1259 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1260 		if (ret)
1261 			break;
1262 
1263 		*retlen += thislen;
1264 		len -= thislen;
1265 		buf += thislen;
1266 
1267 		ofs = 0;
1268 		chipnum++;
1269 	}
1270 	return ret;
1271 }
1272 
1273 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1274 			loff_t adr, size_t len, u_char *buf, size_t grouplen);
1275 
1276 static inline void otp_enter(struct map_info *map, struct flchip *chip,
1277 			     loff_t adr, size_t len)
1278 {
1279 	struct cfi_private *cfi = map->fldrv_priv;
1280 
1281 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1282 			 cfi->device_type, NULL);
1283 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1284 			 cfi->device_type, NULL);
1285 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1286 			 cfi->device_type, NULL);
1287 
1288 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1289 }
1290 
1291 static inline void otp_exit(struct map_info *map, struct flchip *chip,
1292 			    loff_t adr, size_t len)
1293 {
1294 	struct cfi_private *cfi = map->fldrv_priv;
1295 
1296 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1297 			 cfi->device_type, NULL);
1298 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1299 			 cfi->device_type, NULL);
1300 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1301 			 cfi->device_type, NULL);
1302 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1303 			 cfi->device_type, NULL);
1304 
1305 	INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1306 }
1307 
1308 static inline int do_read_secsi_onechip(struct map_info *map,
1309 					struct flchip *chip, loff_t adr,
1310 					size_t len, u_char *buf,
1311 					size_t grouplen)
1312 {
1313 	DECLARE_WAITQUEUE(wait, current);
1314 
1315  retry:
1316 	mutex_lock(&chip->mutex);
1317 
1318 	if (chip->state != FL_READY){
1319 		set_current_state(TASK_UNINTERRUPTIBLE);
1320 		add_wait_queue(&chip->wq, &wait);
1321 
1322 		mutex_unlock(&chip->mutex);
1323 
1324 		schedule();
1325 		remove_wait_queue(&chip->wq, &wait);
1326 
1327 		goto retry;
1328 	}
1329 
1330 	adr += chip->start;
1331 
1332 	chip->state = FL_READY;
1333 
1334 	otp_enter(map, chip, adr, len);
1335 	map_copy_from(map, buf, adr, len);
1336 	otp_exit(map, chip, adr, len);
1337 
1338 	wake_up(&chip->wq);
1339 	mutex_unlock(&chip->mutex);
1340 
1341 	return 0;
1342 }
1343 
1344 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1345 {
1346 	struct map_info *map = mtd->priv;
1347 	struct cfi_private *cfi = map->fldrv_priv;
1348 	unsigned long ofs;
1349 	int chipnum;
1350 	int ret = 0;
1351 
1352 	/* ofs: offset within the first chip that the first read should start */
1353 	/* 8 secsi bytes per chip */
1354 	chipnum=from>>3;
1355 	ofs=from & 7;
1356 
1357 	while (len) {
1358 		unsigned long thislen;
1359 
1360 		if (chipnum >= cfi->numchips)
1361 			break;
1362 
1363 		if ((len + ofs -1) >> 3)
1364 			thislen = (1<<3) - ofs;
1365 		else
1366 			thislen = len;
1367 
1368 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1369 					    thislen, buf, 0);
1370 		if (ret)
1371 			break;
1372 
1373 		*retlen += thislen;
1374 		len -= thislen;
1375 		buf += thislen;
1376 
1377 		ofs = 0;
1378 		chipnum++;
1379 	}
1380 	return ret;
1381 }
1382 
1383 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1384 				     unsigned long adr, map_word datum,
1385 				     int mode);
1386 
1387 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1388 			size_t len, u_char *buf, size_t grouplen)
1389 {
1390 	int ret;
1391 	while (len) {
1392 		unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1393 		int gap = adr - bus_ofs;
1394 		int n = min_t(int, len, map_bankwidth(map) - gap);
1395 		map_word datum = map_word_ff(map);
1396 
1397 		if (n != map_bankwidth(map)) {
1398 			/* partial write of a word, load old contents */
1399 			otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1400 			datum = map_read(map, bus_ofs);
1401 			otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1402 		}
1403 
1404 		datum = map_word_load_partial(map, datum, buf, gap, n);
1405 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1406 		if (ret)
1407 			return ret;
1408 
1409 		adr += n;
1410 		buf += n;
1411 		len -= n;
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1418 		       size_t len, u_char *buf, size_t grouplen)
1419 {
1420 	struct cfi_private *cfi = map->fldrv_priv;
1421 	uint8_t lockreg;
1422 	unsigned long timeo;
1423 	int ret;
1424 
1425 	/* make sure area matches group boundaries */
1426 	if ((adr != 0) || (len != grouplen))
1427 		return -EINVAL;
1428 
1429 	mutex_lock(&chip->mutex);
1430 	ret = get_chip(map, chip, chip->start, FL_LOCKING);
1431 	if (ret) {
1432 		mutex_unlock(&chip->mutex);
1433 		return ret;
1434 	}
1435 	chip->state = FL_LOCKING;
1436 
1437 	/* Enter lock register command */
1438 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1439 			 cfi->device_type, NULL);
1440 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1441 			 cfi->device_type, NULL);
1442 	cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1443 			 cfi->device_type, NULL);
1444 
1445 	/* read lock register */
1446 	lockreg = cfi_read_query(map, 0);
1447 
1448 	/* set bit 0 to protect extended memory block */
1449 	lockreg &= ~0x01;
1450 
1451 	/* set bit 0 to protect extended memory block */
1452 	/* write lock register */
1453 	map_write(map, CMD(0xA0), chip->start);
1454 	map_write(map, CMD(lockreg), chip->start);
1455 
1456 	/* wait for chip to become ready */
1457 	timeo = jiffies + msecs_to_jiffies(2);
1458 	for (;;) {
1459 		if (chip_ready(map, chip, adr, NULL))
1460 			break;
1461 
1462 		if (time_after(jiffies, timeo)) {
1463 			pr_err("Waiting for chip to be ready timed out.\n");
1464 			ret = -EIO;
1465 			break;
1466 		}
1467 		UDELAY(map, chip, 0, 1);
1468 	}
1469 
1470 	/* exit protection commands */
1471 	map_write(map, CMD(0x90), chip->start);
1472 	map_write(map, CMD(0x00), chip->start);
1473 
1474 	chip->state = FL_READY;
1475 	put_chip(map, chip, chip->start);
1476 	mutex_unlock(&chip->mutex);
1477 
1478 	return ret;
1479 }
1480 
1481 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1482 			       size_t *retlen, u_char *buf,
1483 			       otp_op_t action, int user_regs)
1484 {
1485 	struct map_info *map = mtd->priv;
1486 	struct cfi_private *cfi = map->fldrv_priv;
1487 	int ofs_factor = cfi->interleave * cfi->device_type;
1488 	unsigned long base;
1489 	int chipnum;
1490 	struct flchip *chip;
1491 	uint8_t otp, lockreg;
1492 	int ret;
1493 
1494 	size_t user_size, factory_size, otpsize;
1495 	loff_t user_offset, factory_offset, otpoffset;
1496 	int user_locked = 0, otplocked;
1497 
1498 	*retlen = 0;
1499 
1500 	for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1501 		chip = &cfi->chips[chipnum];
1502 		factory_size = 0;
1503 		user_size = 0;
1504 
1505 		/* Micron M29EW family */
1506 		if (is_m29ew(cfi)) {
1507 			base = chip->start;
1508 
1509 			/* check whether secsi area is factory locked
1510 			   or user lockable */
1511 			mutex_lock(&chip->mutex);
1512 			ret = get_chip(map, chip, base, FL_CFI_QUERY);
1513 			if (ret) {
1514 				mutex_unlock(&chip->mutex);
1515 				return ret;
1516 			}
1517 			cfi_qry_mode_on(base, map, cfi);
1518 			otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1519 			cfi_qry_mode_off(base, map, cfi);
1520 			put_chip(map, chip, base);
1521 			mutex_unlock(&chip->mutex);
1522 
1523 			if (otp & 0x80) {
1524 				/* factory locked */
1525 				factory_offset = 0;
1526 				factory_size = 0x100;
1527 			} else {
1528 				/* customer lockable */
1529 				user_offset = 0;
1530 				user_size = 0x100;
1531 
1532 				mutex_lock(&chip->mutex);
1533 				ret = get_chip(map, chip, base, FL_LOCKING);
1534 				if (ret) {
1535 					mutex_unlock(&chip->mutex);
1536 					return ret;
1537 				}
1538 
1539 				/* Enter lock register command */
1540 				cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1541 						 chip->start, map, cfi,
1542 						 cfi->device_type, NULL);
1543 				cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1544 						 chip->start, map, cfi,
1545 						 cfi->device_type, NULL);
1546 				cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1547 						 chip->start, map, cfi,
1548 						 cfi->device_type, NULL);
1549 				/* read lock register */
1550 				lockreg = cfi_read_query(map, 0);
1551 				/* exit protection commands */
1552 				map_write(map, CMD(0x90), chip->start);
1553 				map_write(map, CMD(0x00), chip->start);
1554 				put_chip(map, chip, chip->start);
1555 				mutex_unlock(&chip->mutex);
1556 
1557 				user_locked = ((lockreg & 0x01) == 0x00);
1558 			}
1559 		}
1560 
1561 		otpsize = user_regs ? user_size : factory_size;
1562 		if (!otpsize)
1563 			continue;
1564 		otpoffset = user_regs ? user_offset : factory_offset;
1565 		otplocked = user_regs ? user_locked : 1;
1566 
1567 		if (!action) {
1568 			/* return otpinfo */
1569 			struct otp_info *otpinfo;
1570 			len -= sizeof(*otpinfo);
1571 			if (len <= 0)
1572 				return -ENOSPC;
1573 			otpinfo = (struct otp_info *)buf;
1574 			otpinfo->start = from;
1575 			otpinfo->length = otpsize;
1576 			otpinfo->locked = otplocked;
1577 			buf += sizeof(*otpinfo);
1578 			*retlen += sizeof(*otpinfo);
1579 			from += otpsize;
1580 		} else if ((from < otpsize) && (len > 0)) {
1581 			size_t size;
1582 			size = (len < otpsize - from) ? len : otpsize - from;
1583 			ret = action(map, chip, otpoffset + from, size, buf,
1584 				     otpsize);
1585 			if (ret < 0)
1586 				return ret;
1587 
1588 			buf += size;
1589 			len -= size;
1590 			*retlen += size;
1591 			from = 0;
1592 		} else {
1593 			from -= otpsize;
1594 		}
1595 	}
1596 	return 0;
1597 }
1598 
1599 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1600 					 size_t *retlen, struct otp_info *buf)
1601 {
1602 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1603 				   NULL, 0);
1604 }
1605 
1606 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1607 					 size_t *retlen, struct otp_info *buf)
1608 {
1609 	return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1610 				   NULL, 1);
1611 }
1612 
1613 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1614 					 size_t len, size_t *retlen,
1615 					 u_char *buf)
1616 {
1617 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1618 				   buf, do_read_secsi_onechip, 0);
1619 }
1620 
1621 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1622 					 size_t len, size_t *retlen,
1623 					 u_char *buf)
1624 {
1625 	return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1626 				   buf, do_read_secsi_onechip, 1);
1627 }
1628 
1629 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1630 					  size_t len, size_t *retlen,
1631 					  const u_char *buf)
1632 {
1633 	return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
1634 				   do_otp_write, 1);
1635 }
1636 
1637 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1638 					 size_t len)
1639 {
1640 	size_t retlen;
1641 	return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1642 				   do_otp_lock, 1);
1643 }
1644 
1645 static int __xipram do_write_oneword_once(struct map_info *map,
1646 					  struct flchip *chip,
1647 					  unsigned long adr, map_word datum,
1648 					  int mode, struct cfi_private *cfi)
1649 {
1650 	unsigned long timeo;
1651 	/*
1652 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1653 	 * have a max write time of a few hundreds usec). However, we should
1654 	 * use the maximum timeout value given by the chip at probe time
1655 	 * instead.  Unfortunately, struct flchip does have a field for
1656 	 * maximum timeout, only for typical which can be far too short
1657 	 * depending of the conditions.	 The ' + 1' is to avoid having a
1658 	 * timeout of 0 jiffies if HZ is smaller than 1000.
1659 	 */
1660 	unsigned long uWriteTimeout = (HZ / 1000) + 1;
1661 	int ret = 0;
1662 
1663 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1664 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1665 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1666 	map_write(map, datum, adr);
1667 	chip->state = mode;
1668 
1669 	INVALIDATE_CACHE_UDELAY(map, chip,
1670 				adr, map_bankwidth(map),
1671 				chip->word_write_time);
1672 
1673 	/* See comment above for timeout value. */
1674 	timeo = jiffies + uWriteTimeout;
1675 	for (;;) {
1676 		if (chip->state != mode) {
1677 			/* Someone's suspended the write. Sleep */
1678 			DECLARE_WAITQUEUE(wait, current);
1679 
1680 			set_current_state(TASK_UNINTERRUPTIBLE);
1681 			add_wait_queue(&chip->wq, &wait);
1682 			mutex_unlock(&chip->mutex);
1683 			schedule();
1684 			remove_wait_queue(&chip->wq, &wait);
1685 			timeo = jiffies + (HZ / 2); /* FIXME */
1686 			mutex_lock(&chip->mutex);
1687 			continue;
1688 		}
1689 
1690 		/*
1691 		 * We check "time_after" and "!chip_good" before checking
1692 		 * "chip_good" to avoid the failure due to scheduling.
1693 		 */
1694 		if (time_after(jiffies, timeo) &&
1695 		    !chip_good(map, chip, adr, &datum)) {
1696 			xip_enable(map, chip, adr);
1697 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1698 			xip_disable(map, chip, adr);
1699 			ret = -EIO;
1700 			break;
1701 		}
1702 
1703 		if (chip_good(map, chip, adr, &datum)) {
1704 			if (cfi_check_err_status(map, chip, adr))
1705 				ret = -EIO;
1706 			break;
1707 		}
1708 
1709 		/* Latency issues. Drop the lock, wait a while and retry */
1710 		UDELAY(map, chip, adr, 1);
1711 	}
1712 
1713 	return ret;
1714 }
1715 
1716 static int __xipram do_write_oneword_start(struct map_info *map,
1717 					   struct flchip *chip,
1718 					   unsigned long adr, int mode)
1719 {
1720 	int ret;
1721 
1722 	mutex_lock(&chip->mutex);
1723 
1724 	ret = get_chip(map, chip, adr, mode);
1725 	if (ret) {
1726 		mutex_unlock(&chip->mutex);
1727 		return ret;
1728 	}
1729 
1730 	if (mode == FL_OTP_WRITE)
1731 		otp_enter(map, chip, adr, map_bankwidth(map));
1732 
1733 	return ret;
1734 }
1735 
1736 static void __xipram do_write_oneword_done(struct map_info *map,
1737 					   struct flchip *chip,
1738 					   unsigned long adr, int mode)
1739 {
1740 	if (mode == FL_OTP_WRITE)
1741 		otp_exit(map, chip, adr, map_bankwidth(map));
1742 
1743 	chip->state = FL_READY;
1744 	DISABLE_VPP(map);
1745 	put_chip(map, chip, adr);
1746 
1747 	mutex_unlock(&chip->mutex);
1748 }
1749 
1750 static int __xipram do_write_oneword_retry(struct map_info *map,
1751 					   struct flchip *chip,
1752 					   unsigned long adr, map_word datum,
1753 					   int mode)
1754 {
1755 	struct cfi_private *cfi = map->fldrv_priv;
1756 	int ret = 0;
1757 	map_word oldd;
1758 	int retry_cnt = 0;
1759 
1760 	/*
1761 	 * Check for a NOP for the case when the datum to write is already
1762 	 * present - it saves time and works around buggy chips that corrupt
1763 	 * data at other locations when 0xff is written to a location that
1764 	 * already contains 0xff.
1765 	 */
1766 	oldd = map_read(map, adr);
1767 	if (map_word_equal(map, oldd, datum)) {
1768 		pr_debug("MTD %s(): NOP\n", __func__);
1769 		return ret;
1770 	}
1771 
1772 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1773 	ENABLE_VPP(map);
1774 	xip_disable(map, chip, adr);
1775 
1776  retry:
1777 	ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1778 	if (ret) {
1779 		/* reset on all failures. */
1780 		map_write(map, CMD(0xF0), chip->start);
1781 		/* FIXME - should have reset delay before continuing */
1782 
1783 		if (++retry_cnt <= MAX_RETRIES) {
1784 			ret = 0;
1785 			goto retry;
1786 		}
1787 	}
1788 	xip_enable(map, chip, adr);
1789 
1790 	return ret;
1791 }
1792 
1793 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1794 				     unsigned long adr, map_word datum,
1795 				     int mode)
1796 {
1797 	int ret;
1798 
1799 	adr += chip->start;
1800 
1801 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1802 		 datum.x[0]);
1803 
1804 	ret = do_write_oneword_start(map, chip, adr, mode);
1805 	if (ret)
1806 		return ret;
1807 
1808 	ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1809 
1810 	do_write_oneword_done(map, chip, adr, mode);
1811 
1812 	return ret;
1813 }
1814 
1815 
1816 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1817 				  size_t *retlen, const u_char *buf)
1818 {
1819 	struct map_info *map = mtd->priv;
1820 	struct cfi_private *cfi = map->fldrv_priv;
1821 	int ret;
1822 	int chipnum;
1823 	unsigned long ofs, chipstart;
1824 	DECLARE_WAITQUEUE(wait, current);
1825 
1826 	chipnum = to >> cfi->chipshift;
1827 	ofs = to  - (chipnum << cfi->chipshift);
1828 	chipstart = cfi->chips[chipnum].start;
1829 
1830 	/* If it's not bus-aligned, do the first byte write */
1831 	if (ofs & (map_bankwidth(map)-1)) {
1832 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1833 		int i = ofs - bus_ofs;
1834 		int n = 0;
1835 		map_word tmp_buf;
1836 
1837  retry:
1838 		mutex_lock(&cfi->chips[chipnum].mutex);
1839 
1840 		if (cfi->chips[chipnum].state != FL_READY) {
1841 			set_current_state(TASK_UNINTERRUPTIBLE);
1842 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1843 
1844 			mutex_unlock(&cfi->chips[chipnum].mutex);
1845 
1846 			schedule();
1847 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1848 			goto retry;
1849 		}
1850 
1851 		/* Load 'tmp_buf' with old contents of flash */
1852 		tmp_buf = map_read(map, bus_ofs+chipstart);
1853 
1854 		mutex_unlock(&cfi->chips[chipnum].mutex);
1855 
1856 		/* Number of bytes to copy from buffer */
1857 		n = min_t(int, len, map_bankwidth(map)-i);
1858 
1859 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1860 
1861 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1862 				       bus_ofs, tmp_buf, FL_WRITING);
1863 		if (ret)
1864 			return ret;
1865 
1866 		ofs += n;
1867 		buf += n;
1868 		(*retlen) += n;
1869 		len -= n;
1870 
1871 		if (ofs >> cfi->chipshift) {
1872 			chipnum ++;
1873 			ofs = 0;
1874 			if (chipnum == cfi->numchips)
1875 				return 0;
1876 		}
1877 	}
1878 
1879 	/* We are now aligned, write as much as possible */
1880 	while(len >= map_bankwidth(map)) {
1881 		map_word datum;
1882 
1883 		datum = map_word_load(map, buf);
1884 
1885 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1886 				       ofs, datum, FL_WRITING);
1887 		if (ret)
1888 			return ret;
1889 
1890 		ofs += map_bankwidth(map);
1891 		buf += map_bankwidth(map);
1892 		(*retlen) += map_bankwidth(map);
1893 		len -= map_bankwidth(map);
1894 
1895 		if (ofs >> cfi->chipshift) {
1896 			chipnum ++;
1897 			ofs = 0;
1898 			if (chipnum == cfi->numchips)
1899 				return 0;
1900 			chipstart = cfi->chips[chipnum].start;
1901 		}
1902 	}
1903 
1904 	/* Write the trailing bytes if any */
1905 	if (len & (map_bankwidth(map)-1)) {
1906 		map_word tmp_buf;
1907 
1908  retry1:
1909 		mutex_lock(&cfi->chips[chipnum].mutex);
1910 
1911 		if (cfi->chips[chipnum].state != FL_READY) {
1912 			set_current_state(TASK_UNINTERRUPTIBLE);
1913 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1914 
1915 			mutex_unlock(&cfi->chips[chipnum].mutex);
1916 
1917 			schedule();
1918 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1919 			goto retry1;
1920 		}
1921 
1922 		tmp_buf = map_read(map, ofs + chipstart);
1923 
1924 		mutex_unlock(&cfi->chips[chipnum].mutex);
1925 
1926 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1927 
1928 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1929 				       ofs, tmp_buf, FL_WRITING);
1930 		if (ret)
1931 			return ret;
1932 
1933 		(*retlen) += len;
1934 	}
1935 
1936 	return 0;
1937 }
1938 
1939 #if !FORCE_WORD_WRITE
1940 static int __xipram do_write_buffer_wait(struct map_info *map,
1941 					 struct flchip *chip, unsigned long adr,
1942 					 map_word datum)
1943 {
1944 	unsigned long timeo;
1945 	unsigned long u_write_timeout;
1946 	int ret = 0;
1947 
1948 	/*
1949 	 * Timeout is calculated according to CFI data, if available.
1950 	 * See more comments in cfi_cmdset_0002().
1951 	 */
1952 	u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1953 	timeo = jiffies + u_write_timeout;
1954 
1955 	for (;;) {
1956 		if (chip->state != FL_WRITING) {
1957 			/* Someone's suspended the write. Sleep */
1958 			DECLARE_WAITQUEUE(wait, current);
1959 
1960 			set_current_state(TASK_UNINTERRUPTIBLE);
1961 			add_wait_queue(&chip->wq, &wait);
1962 			mutex_unlock(&chip->mutex);
1963 			schedule();
1964 			remove_wait_queue(&chip->wq, &wait);
1965 			timeo = jiffies + (HZ / 2); /* FIXME */
1966 			mutex_lock(&chip->mutex);
1967 			continue;
1968 		}
1969 
1970 		/*
1971 		 * We check "time_after" and "!chip_good" before checking
1972 		 * "chip_good" to avoid the failure due to scheduling.
1973 		 */
1974 		if (time_after(jiffies, timeo) &&
1975 		    !chip_good(map, chip, adr, &datum)) {
1976 			pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1977 			       __func__, adr);
1978 			ret = -EIO;
1979 			break;
1980 		}
1981 
1982 		if (chip_good(map, chip, adr, &datum)) {
1983 			if (cfi_check_err_status(map, chip, adr))
1984 				ret = -EIO;
1985 			break;
1986 		}
1987 
1988 		/* Latency issues. Drop the lock, wait a while and retry */
1989 		UDELAY(map, chip, adr, 1);
1990 	}
1991 
1992 	return ret;
1993 }
1994 
1995 static void __xipram do_write_buffer_reset(struct map_info *map,
1996 					   struct flchip *chip,
1997 					   struct cfi_private *cfi)
1998 {
1999 	/*
2000 	 * Recovery from write-buffer programming failures requires
2001 	 * the write-to-buffer-reset sequence.  Since the last part
2002 	 * of the sequence also works as a normal reset, we can run
2003 	 * the same commands regardless of why we are here.
2004 	 * See e.g.
2005 	 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
2006 	 */
2007 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2008 			 cfi->device_type, NULL);
2009 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2010 			 cfi->device_type, NULL);
2011 	cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2012 			 cfi->device_type, NULL);
2013 
2014 	/* FIXME - should have reset delay before continuing */
2015 }
2016 
2017 /*
2018  * FIXME: interleaved mode not tested, and probably not supported!
2019  */
2020 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2021 				    unsigned long adr, const u_char *buf,
2022 				    int len)
2023 {
2024 	struct cfi_private *cfi = map->fldrv_priv;
2025 	int ret;
2026 	unsigned long cmd_adr;
2027 	int z, words;
2028 	map_word datum;
2029 
2030 	adr += chip->start;
2031 	cmd_adr = adr;
2032 
2033 	mutex_lock(&chip->mutex);
2034 	ret = get_chip(map, chip, adr, FL_WRITING);
2035 	if (ret) {
2036 		mutex_unlock(&chip->mutex);
2037 		return ret;
2038 	}
2039 
2040 	datum = map_word_load(map, buf);
2041 
2042 	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2043 		 __func__, adr, datum.x[0]);
2044 
2045 	XIP_INVAL_CACHED_RANGE(map, adr, len);
2046 	ENABLE_VPP(map);
2047 	xip_disable(map, chip, cmd_adr);
2048 
2049 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2050 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2051 
2052 	/* Write Buffer Load */
2053 	map_write(map, CMD(0x25), cmd_adr);
2054 
2055 	chip->state = FL_WRITING_TO_BUFFER;
2056 
2057 	/* Write length of data to come */
2058 	words = len / map_bankwidth(map);
2059 	map_write(map, CMD(words - 1), cmd_adr);
2060 	/* Write data */
2061 	z = 0;
2062 	while(z < words * map_bankwidth(map)) {
2063 		datum = map_word_load(map, buf);
2064 		map_write(map, datum, adr + z);
2065 
2066 		z += map_bankwidth(map);
2067 		buf += map_bankwidth(map);
2068 	}
2069 	z -= map_bankwidth(map);
2070 
2071 	adr += z;
2072 
2073 	/* Write Buffer Program Confirm: GO GO GO */
2074 	map_write(map, CMD(0x29), cmd_adr);
2075 	chip->state = FL_WRITING;
2076 
2077 	INVALIDATE_CACHE_UDELAY(map, chip,
2078 				adr, map_bankwidth(map),
2079 				chip->word_write_time);
2080 
2081 	ret = do_write_buffer_wait(map, chip, adr, datum);
2082 	if (ret)
2083 		do_write_buffer_reset(map, chip, cfi);
2084 
2085 	xip_enable(map, chip, adr);
2086 
2087 	chip->state = FL_READY;
2088 	DISABLE_VPP(map);
2089 	put_chip(map, chip, adr);
2090 	mutex_unlock(&chip->mutex);
2091 
2092 	return ret;
2093 }
2094 
2095 
2096 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2097 				    size_t *retlen, const u_char *buf)
2098 {
2099 	struct map_info *map = mtd->priv;
2100 	struct cfi_private *cfi = map->fldrv_priv;
2101 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2102 	int ret;
2103 	int chipnum;
2104 	unsigned long ofs;
2105 
2106 	chipnum = to >> cfi->chipshift;
2107 	ofs = to  - (chipnum << cfi->chipshift);
2108 
2109 	/* If it's not bus-aligned, do the first word write */
2110 	if (ofs & (map_bankwidth(map)-1)) {
2111 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2112 		if (local_len > len)
2113 			local_len = len;
2114 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2115 					     local_len, retlen, buf);
2116 		if (ret)
2117 			return ret;
2118 		ofs += local_len;
2119 		buf += local_len;
2120 		len -= local_len;
2121 
2122 		if (ofs >> cfi->chipshift) {
2123 			chipnum ++;
2124 			ofs = 0;
2125 			if (chipnum == cfi->numchips)
2126 				return 0;
2127 		}
2128 	}
2129 
2130 	/* Write buffer is worth it only if more than one word to write... */
2131 	while (len >= map_bankwidth(map) * 2) {
2132 		/* We must not cross write block boundaries */
2133 		int size = wbufsize - (ofs & (wbufsize-1));
2134 
2135 		if (size > len)
2136 			size = len;
2137 		if (size % map_bankwidth(map))
2138 			size -= size % map_bankwidth(map);
2139 
2140 		ret = do_write_buffer(map, &cfi->chips[chipnum],
2141 				      ofs, buf, size);
2142 		if (ret)
2143 			return ret;
2144 
2145 		ofs += size;
2146 		buf += size;
2147 		(*retlen) += size;
2148 		len -= size;
2149 
2150 		if (ofs >> cfi->chipshift) {
2151 			chipnum ++;
2152 			ofs = 0;
2153 			if (chipnum == cfi->numchips)
2154 				return 0;
2155 		}
2156 	}
2157 
2158 	if (len) {
2159 		size_t retlen_dregs = 0;
2160 
2161 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2162 					     len, &retlen_dregs, buf);
2163 
2164 		*retlen += retlen_dregs;
2165 		return ret;
2166 	}
2167 
2168 	return 0;
2169 }
2170 #endif /* !FORCE_WORD_WRITE */
2171 
2172 /*
2173  * Wait for the flash chip to become ready to write data
2174  *
2175  * This is only called during the panic_write() path. When panic_write()
2176  * is called, the kernel is in the process of a panic, and will soon be
2177  * dead. Therefore we don't take any locks, and attempt to get access
2178  * to the chip as soon as possible.
2179  */
2180 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2181 				 unsigned long adr)
2182 {
2183 	struct cfi_private *cfi = map->fldrv_priv;
2184 	int retries = 10;
2185 	int i;
2186 
2187 	/*
2188 	 * If the driver thinks the chip is idle, and no toggle bits
2189 	 * are changing, then the chip is actually idle for sure.
2190 	 */
2191 	if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
2192 		return 0;
2193 
2194 	/*
2195 	 * Try several times to reset the chip and then wait for it
2196 	 * to become idle. The upper limit of a few milliseconds of
2197 	 * delay isn't a big problem: the kernel is dying anyway. It
2198 	 * is more important to save the messages.
2199 	 */
2200 	while (retries > 0) {
2201 		const unsigned long timeo = (HZ / 1000) + 1;
2202 
2203 		/* send the reset command */
2204 		map_write(map, CMD(0xF0), chip->start);
2205 
2206 		/* wait for the chip to become ready */
2207 		for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2208 			if (chip_ready(map, chip, adr, NULL))
2209 				return 0;
2210 
2211 			udelay(1);
2212 		}
2213 
2214 		retries--;
2215 	}
2216 
2217 	/* the chip never became ready */
2218 	return -EBUSY;
2219 }
2220 
2221 /*
2222  * Write out one word of data to a single flash chip during a kernel panic
2223  *
2224  * This is only called during the panic_write() path. When panic_write()
2225  * is called, the kernel is in the process of a panic, and will soon be
2226  * dead. Therefore we don't take any locks, and attempt to get access
2227  * to the chip as soon as possible.
2228  *
2229  * The implementation of this routine is intentionally similar to
2230  * do_write_oneword(), in order to ease code maintenance.
2231  */
2232 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2233 				  unsigned long adr, map_word datum)
2234 {
2235 	const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2236 	struct cfi_private *cfi = map->fldrv_priv;
2237 	int retry_cnt = 0;
2238 	map_word oldd;
2239 	int ret;
2240 	int i;
2241 
2242 	adr += chip->start;
2243 
2244 	ret = cfi_amdstd_panic_wait(map, chip, adr);
2245 	if (ret)
2246 		return ret;
2247 
2248 	pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2249 			__func__, adr, datum.x[0]);
2250 
2251 	/*
2252 	 * Check for a NOP for the case when the datum to write is already
2253 	 * present - it saves time and works around buggy chips that corrupt
2254 	 * data at other locations when 0xff is written to a location that
2255 	 * already contains 0xff.
2256 	 */
2257 	oldd = map_read(map, adr);
2258 	if (map_word_equal(map, oldd, datum)) {
2259 		pr_debug("MTD %s(): NOP\n", __func__);
2260 		goto op_done;
2261 	}
2262 
2263 	ENABLE_VPP(map);
2264 
2265 retry:
2266 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2267 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2268 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2269 	map_write(map, datum, adr);
2270 
2271 	for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2272 		if (chip_ready(map, chip, adr, NULL))
2273 			break;
2274 
2275 		udelay(1);
2276 	}
2277 
2278 	if (!chip_ready(map, chip, adr, &datum) ||
2279 	    cfi_check_err_status(map, chip, adr)) {
2280 		/* reset on all failures. */
2281 		map_write(map, CMD(0xF0), chip->start);
2282 		/* FIXME - should have reset delay before continuing */
2283 
2284 		if (++retry_cnt <= MAX_RETRIES)
2285 			goto retry;
2286 
2287 		ret = -EIO;
2288 	}
2289 
2290 op_done:
2291 	DISABLE_VPP(map);
2292 	return ret;
2293 }
2294 
2295 /*
2296  * Write out some data during a kernel panic
2297  *
2298  * This is used by the mtdoops driver to save the dying messages from a
2299  * kernel which has panic'd.
2300  *
2301  * This routine ignores all of the locking used throughout the rest of the
2302  * driver, in order to ensure that the data gets written out no matter what
2303  * state this driver (and the flash chip itself) was in when the kernel crashed.
2304  *
2305  * The implementation of this routine is intentionally similar to
2306  * cfi_amdstd_write_words(), in order to ease code maintenance.
2307  */
2308 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2309 				  size_t *retlen, const u_char *buf)
2310 {
2311 	struct map_info *map = mtd->priv;
2312 	struct cfi_private *cfi = map->fldrv_priv;
2313 	unsigned long ofs, chipstart;
2314 	int ret;
2315 	int chipnum;
2316 
2317 	chipnum = to >> cfi->chipshift;
2318 	ofs = to - (chipnum << cfi->chipshift);
2319 	chipstart = cfi->chips[chipnum].start;
2320 
2321 	/* If it's not bus aligned, do the first byte write */
2322 	if (ofs & (map_bankwidth(map) - 1)) {
2323 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2324 		int i = ofs - bus_ofs;
2325 		int n = 0;
2326 		map_word tmp_buf;
2327 
2328 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2329 		if (ret)
2330 			return ret;
2331 
2332 		/* Load 'tmp_buf' with old contents of flash */
2333 		tmp_buf = map_read(map, bus_ofs + chipstart);
2334 
2335 		/* Number of bytes to copy from buffer */
2336 		n = min_t(int, len, map_bankwidth(map) - i);
2337 
2338 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2339 
2340 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2341 					     bus_ofs, tmp_buf);
2342 		if (ret)
2343 			return ret;
2344 
2345 		ofs += n;
2346 		buf += n;
2347 		(*retlen) += n;
2348 		len -= n;
2349 
2350 		if (ofs >> cfi->chipshift) {
2351 			chipnum++;
2352 			ofs = 0;
2353 			if (chipnum == cfi->numchips)
2354 				return 0;
2355 		}
2356 	}
2357 
2358 	/* We are now aligned, write as much as possible */
2359 	while (len >= map_bankwidth(map)) {
2360 		map_word datum;
2361 
2362 		datum = map_word_load(map, buf);
2363 
2364 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2365 					     ofs, datum);
2366 		if (ret)
2367 			return ret;
2368 
2369 		ofs += map_bankwidth(map);
2370 		buf += map_bankwidth(map);
2371 		(*retlen) += map_bankwidth(map);
2372 		len -= map_bankwidth(map);
2373 
2374 		if (ofs >> cfi->chipshift) {
2375 			chipnum++;
2376 			ofs = 0;
2377 			if (chipnum == cfi->numchips)
2378 				return 0;
2379 
2380 			chipstart = cfi->chips[chipnum].start;
2381 		}
2382 	}
2383 
2384 	/* Write the trailing bytes if any */
2385 	if (len & (map_bankwidth(map) - 1)) {
2386 		map_word tmp_buf;
2387 
2388 		ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2389 		if (ret)
2390 			return ret;
2391 
2392 		tmp_buf = map_read(map, ofs + chipstart);
2393 
2394 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2395 
2396 		ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2397 					     ofs, tmp_buf);
2398 		if (ret)
2399 			return ret;
2400 
2401 		(*retlen) += len;
2402 	}
2403 
2404 	return 0;
2405 }
2406 
2407 
2408 /*
2409  * Handle devices with one erase region, that only implement
2410  * the chip erase command.
2411  */
2412 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2413 {
2414 	struct cfi_private *cfi = map->fldrv_priv;
2415 	unsigned long timeo = jiffies + HZ;
2416 	unsigned long int adr;
2417 	DECLARE_WAITQUEUE(wait, current);
2418 	int ret;
2419 	int retry_cnt = 0;
2420 	map_word datum = map_word_ff(map);
2421 
2422 	adr = cfi->addr_unlock1;
2423 
2424 	mutex_lock(&chip->mutex);
2425 	ret = get_chip(map, chip, adr, FL_ERASING);
2426 	if (ret) {
2427 		mutex_unlock(&chip->mutex);
2428 		return ret;
2429 	}
2430 
2431 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2432 	       __func__, chip->start);
2433 
2434 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2435 	ENABLE_VPP(map);
2436 	xip_disable(map, chip, adr);
2437 
2438  retry:
2439 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2440 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2441 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2442 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2443 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2444 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2445 
2446 	chip->state = FL_ERASING;
2447 	chip->erase_suspended = 0;
2448 	chip->in_progress_block_addr = adr;
2449 	chip->in_progress_block_mask = ~(map->size - 1);
2450 
2451 	INVALIDATE_CACHE_UDELAY(map, chip,
2452 				adr, map->size,
2453 				chip->erase_time*500);
2454 
2455 	timeo = jiffies + (HZ*20);
2456 
2457 	for (;;) {
2458 		if (chip->state != FL_ERASING) {
2459 			/* Someone's suspended the erase. Sleep */
2460 			set_current_state(TASK_UNINTERRUPTIBLE);
2461 			add_wait_queue(&chip->wq, &wait);
2462 			mutex_unlock(&chip->mutex);
2463 			schedule();
2464 			remove_wait_queue(&chip->wq, &wait);
2465 			mutex_lock(&chip->mutex);
2466 			continue;
2467 		}
2468 		if (chip->erase_suspended) {
2469 			/* This erase was suspended and resumed.
2470 			   Adjust the timeout */
2471 			timeo = jiffies + (HZ*20); /* FIXME */
2472 			chip->erase_suspended = 0;
2473 		}
2474 
2475 		if (chip_ready(map, chip, adr, &datum)) {
2476 			if (cfi_check_err_status(map, chip, adr))
2477 				ret = -EIO;
2478 			break;
2479 		}
2480 
2481 		if (time_after(jiffies, timeo)) {
2482 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2483 			       __func__);
2484 			ret = -EIO;
2485 			break;
2486 		}
2487 
2488 		/* Latency issues. Drop the lock, wait a while and retry */
2489 		UDELAY(map, chip, adr, 1000000/HZ);
2490 	}
2491 	/* Did we succeed? */
2492 	if (ret) {
2493 		/* reset on all failures. */
2494 		map_write(map, CMD(0xF0), chip->start);
2495 		/* FIXME - should have reset delay before continuing */
2496 
2497 		if (++retry_cnt <= MAX_RETRIES) {
2498 			ret = 0;
2499 			goto retry;
2500 		}
2501 	}
2502 
2503 	chip->state = FL_READY;
2504 	xip_enable(map, chip, adr);
2505 	DISABLE_VPP(map);
2506 	put_chip(map, chip, adr);
2507 	mutex_unlock(&chip->mutex);
2508 
2509 	return ret;
2510 }
2511 
2512 
2513 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2514 {
2515 	struct cfi_private *cfi = map->fldrv_priv;
2516 	unsigned long timeo = jiffies + HZ;
2517 	DECLARE_WAITQUEUE(wait, current);
2518 	int ret;
2519 	int retry_cnt = 0;
2520 	map_word datum = map_word_ff(map);
2521 
2522 	adr += chip->start;
2523 
2524 	mutex_lock(&chip->mutex);
2525 	ret = get_chip(map, chip, adr, FL_ERASING);
2526 	if (ret) {
2527 		mutex_unlock(&chip->mutex);
2528 		return ret;
2529 	}
2530 
2531 	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2532 		 __func__, adr);
2533 
2534 	XIP_INVAL_CACHED_RANGE(map, adr, len);
2535 	ENABLE_VPP(map);
2536 	xip_disable(map, chip, adr);
2537 
2538  retry:
2539 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2540 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2541 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2542 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2543 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2544 	map_write(map, cfi->sector_erase_cmd, adr);
2545 
2546 	chip->state = FL_ERASING;
2547 	chip->erase_suspended = 0;
2548 	chip->in_progress_block_addr = adr;
2549 	chip->in_progress_block_mask = ~(len - 1);
2550 
2551 	INVALIDATE_CACHE_UDELAY(map, chip,
2552 				adr, len,
2553 				chip->erase_time*500);
2554 
2555 	timeo = jiffies + (HZ*20);
2556 
2557 	for (;;) {
2558 		if (chip->state != FL_ERASING) {
2559 			/* Someone's suspended the erase. Sleep */
2560 			set_current_state(TASK_UNINTERRUPTIBLE);
2561 			add_wait_queue(&chip->wq, &wait);
2562 			mutex_unlock(&chip->mutex);
2563 			schedule();
2564 			remove_wait_queue(&chip->wq, &wait);
2565 			mutex_lock(&chip->mutex);
2566 			continue;
2567 		}
2568 		if (chip->erase_suspended) {
2569 			/* This erase was suspended and resumed.
2570 			   Adjust the timeout */
2571 			timeo = jiffies + (HZ*20); /* FIXME */
2572 			chip->erase_suspended = 0;
2573 		}
2574 
2575 		if (chip_ready(map, chip, adr, &datum)) {
2576 			if (cfi_check_err_status(map, chip, adr))
2577 				ret = -EIO;
2578 			break;
2579 		}
2580 
2581 		if (time_after(jiffies, timeo)) {
2582 			printk(KERN_WARNING "MTD %s(): software timeout\n",
2583 			       __func__);
2584 			ret = -EIO;
2585 			break;
2586 		}
2587 
2588 		/* Latency issues. Drop the lock, wait a while and retry */
2589 		UDELAY(map, chip, adr, 1000000/HZ);
2590 	}
2591 	/* Did we succeed? */
2592 	if (ret) {
2593 		/* reset on all failures. */
2594 		map_write(map, CMD(0xF0), chip->start);
2595 		/* FIXME - should have reset delay before continuing */
2596 
2597 		if (++retry_cnt <= MAX_RETRIES) {
2598 			ret = 0;
2599 			goto retry;
2600 		}
2601 	}
2602 
2603 	chip->state = FL_READY;
2604 	xip_enable(map, chip, adr);
2605 	DISABLE_VPP(map);
2606 	put_chip(map, chip, adr);
2607 	mutex_unlock(&chip->mutex);
2608 	return ret;
2609 }
2610 
2611 
2612 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2613 {
2614 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2615 				instr->len, NULL);
2616 }
2617 
2618 
2619 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2620 {
2621 	struct map_info *map = mtd->priv;
2622 	struct cfi_private *cfi = map->fldrv_priv;
2623 
2624 	if (instr->addr != 0)
2625 		return -EINVAL;
2626 
2627 	if (instr->len != mtd->size)
2628 		return -EINVAL;
2629 
2630 	return do_erase_chip(map, &cfi->chips[0]);
2631 }
2632 
2633 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2634 			 unsigned long adr, int len, void *thunk)
2635 {
2636 	struct cfi_private *cfi = map->fldrv_priv;
2637 	int ret;
2638 
2639 	mutex_lock(&chip->mutex);
2640 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2641 	if (ret)
2642 		goto out_unlock;
2643 	chip->state = FL_LOCKING;
2644 
2645 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2646 
2647 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2648 			 cfi->device_type, NULL);
2649 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2650 			 cfi->device_type, NULL);
2651 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2652 			 cfi->device_type, NULL);
2653 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2654 			 cfi->device_type, NULL);
2655 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2656 			 cfi->device_type, NULL);
2657 	map_write(map, CMD(0x40), chip->start + adr);
2658 
2659 	chip->state = FL_READY;
2660 	put_chip(map, chip, adr + chip->start);
2661 	ret = 0;
2662 
2663 out_unlock:
2664 	mutex_unlock(&chip->mutex);
2665 	return ret;
2666 }
2667 
2668 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2669 			   unsigned long adr, int len, void *thunk)
2670 {
2671 	struct cfi_private *cfi = map->fldrv_priv;
2672 	int ret;
2673 
2674 	mutex_lock(&chip->mutex);
2675 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2676 	if (ret)
2677 		goto out_unlock;
2678 	chip->state = FL_UNLOCKING;
2679 
2680 	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2681 
2682 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2683 			 cfi->device_type, NULL);
2684 	map_write(map, CMD(0x70), adr);
2685 
2686 	chip->state = FL_READY;
2687 	put_chip(map, chip, adr + chip->start);
2688 	ret = 0;
2689 
2690 out_unlock:
2691 	mutex_unlock(&chip->mutex);
2692 	return ret;
2693 }
2694 
2695 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2696 {
2697 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2698 }
2699 
2700 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2701 {
2702 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2703 }
2704 
2705 /*
2706  * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2707  */
2708 
2709 struct ppb_lock {
2710 	struct flchip *chip;
2711 	unsigned long adr;
2712 	int locked;
2713 };
2714 
2715 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *)1)
2716 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *)2)
2717 #define DO_XXLOCK_ONEBLOCK_GETLOCK	((void *)3)
2718 
2719 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2720 					struct flchip *chip,
2721 					unsigned long adr, int len, void *thunk)
2722 {
2723 	struct cfi_private *cfi = map->fldrv_priv;
2724 	unsigned long timeo;
2725 	int ret;
2726 
2727 	adr += chip->start;
2728 	mutex_lock(&chip->mutex);
2729 	ret = get_chip(map, chip, adr, FL_LOCKING);
2730 	if (ret) {
2731 		mutex_unlock(&chip->mutex);
2732 		return ret;
2733 	}
2734 
2735 	pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2736 
2737 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2738 			 cfi->device_type, NULL);
2739 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2740 			 cfi->device_type, NULL);
2741 	/* PPB entry command */
2742 	cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2743 			 cfi->device_type, NULL);
2744 
2745 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2746 		chip->state = FL_LOCKING;
2747 		map_write(map, CMD(0xA0), adr);
2748 		map_write(map, CMD(0x00), adr);
2749 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2750 		/*
2751 		 * Unlocking of one specific sector is not supported, so we
2752 		 * have to unlock all sectors of this device instead
2753 		 */
2754 		chip->state = FL_UNLOCKING;
2755 		map_write(map, CMD(0x80), chip->start);
2756 		map_write(map, CMD(0x30), chip->start);
2757 	} else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2758 		chip->state = FL_JEDEC_QUERY;
2759 		/* Return locked status: 0->locked, 1->unlocked */
2760 		ret = !cfi_read_query(map, adr);
2761 	} else
2762 		BUG();
2763 
2764 	/*
2765 	 * Wait for some time as unlocking of all sectors takes quite long
2766 	 */
2767 	timeo = jiffies + msecs_to_jiffies(2000);	/* 2s max (un)locking */
2768 	for (;;) {
2769 		if (chip_ready(map, chip, adr, NULL))
2770 			break;
2771 
2772 		if (time_after(jiffies, timeo)) {
2773 			printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2774 			ret = -EIO;
2775 			break;
2776 		}
2777 
2778 		UDELAY(map, chip, adr, 1);
2779 	}
2780 
2781 	/* Exit BC commands */
2782 	map_write(map, CMD(0x90), chip->start);
2783 	map_write(map, CMD(0x00), chip->start);
2784 
2785 	chip->state = FL_READY;
2786 	put_chip(map, chip, adr);
2787 	mutex_unlock(&chip->mutex);
2788 
2789 	return ret;
2790 }
2791 
2792 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2793 				       uint64_t len)
2794 {
2795 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2796 				DO_XXLOCK_ONEBLOCK_LOCK);
2797 }
2798 
2799 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2800 					 uint64_t len)
2801 {
2802 	struct mtd_erase_region_info *regions = mtd->eraseregions;
2803 	struct map_info *map = mtd->priv;
2804 	struct cfi_private *cfi = map->fldrv_priv;
2805 	struct ppb_lock *sect;
2806 	unsigned long adr;
2807 	loff_t offset;
2808 	uint64_t length;
2809 	int chipnum;
2810 	int i;
2811 	int sectors;
2812 	int ret;
2813 	int max_sectors;
2814 
2815 	/*
2816 	 * PPB unlocking always unlocks all sectors of the flash chip.
2817 	 * We need to re-lock all previously locked sectors. So lets
2818 	 * first check the locking status of all sectors and save
2819 	 * it for future use.
2820 	 */
2821 	max_sectors = 0;
2822 	for (i = 0; i < mtd->numeraseregions; i++)
2823 		max_sectors += regions[i].numblocks;
2824 
2825 	sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2826 	if (!sect)
2827 		return -ENOMEM;
2828 
2829 	/*
2830 	 * This code to walk all sectors is a slightly modified version
2831 	 * of the cfi_varsize_frob() code.
2832 	 */
2833 	i = 0;
2834 	chipnum = 0;
2835 	adr = 0;
2836 	sectors = 0;
2837 	offset = 0;
2838 	length = mtd->size;
2839 
2840 	while (length) {
2841 		int size = regions[i].erasesize;
2842 
2843 		/*
2844 		 * Only test sectors that shall not be unlocked. The other
2845 		 * sectors shall be unlocked, so lets keep their locking
2846 		 * status at "unlocked" (locked=0) for the final re-locking.
2847 		 */
2848 		if ((offset < ofs) || (offset >= (ofs + len))) {
2849 			sect[sectors].chip = &cfi->chips[chipnum];
2850 			sect[sectors].adr = adr;
2851 			sect[sectors].locked = do_ppb_xxlock(
2852 				map, &cfi->chips[chipnum], adr, 0,
2853 				DO_XXLOCK_ONEBLOCK_GETLOCK);
2854 		}
2855 
2856 		adr += size;
2857 		offset += size;
2858 		length -= size;
2859 
2860 		if (offset == regions[i].offset + size * regions[i].numblocks)
2861 			i++;
2862 
2863 		if (adr >> cfi->chipshift) {
2864 			if (offset >= (ofs + len))
2865 				break;
2866 			adr = 0;
2867 			chipnum++;
2868 
2869 			if (chipnum >= cfi->numchips)
2870 				break;
2871 		}
2872 
2873 		sectors++;
2874 		if (sectors >= max_sectors) {
2875 			printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2876 			       max_sectors);
2877 			kfree(sect);
2878 			return -EINVAL;
2879 		}
2880 	}
2881 
2882 	/* Now unlock the whole chip */
2883 	ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2884 			       DO_XXLOCK_ONEBLOCK_UNLOCK);
2885 	if (ret) {
2886 		kfree(sect);
2887 		return ret;
2888 	}
2889 
2890 	/*
2891 	 * PPB unlocking always unlocks all sectors of the flash chip.
2892 	 * We need to re-lock all previously locked sectors.
2893 	 */
2894 	for (i = 0; i < sectors; i++) {
2895 		if (sect[i].locked)
2896 			do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2897 				      DO_XXLOCK_ONEBLOCK_LOCK);
2898 	}
2899 
2900 	kfree(sect);
2901 	return ret;
2902 }
2903 
2904 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2905 					    uint64_t len)
2906 {
2907 	return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2908 				DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2909 }
2910 
2911 static void cfi_amdstd_sync (struct mtd_info *mtd)
2912 {
2913 	struct map_info *map = mtd->priv;
2914 	struct cfi_private *cfi = map->fldrv_priv;
2915 	int i;
2916 	struct flchip *chip;
2917 	int ret = 0;
2918 	DECLARE_WAITQUEUE(wait, current);
2919 
2920 	for (i=0; !ret && i<cfi->numchips; i++) {
2921 		chip = &cfi->chips[i];
2922 
2923 	retry:
2924 		mutex_lock(&chip->mutex);
2925 
2926 		switch(chip->state) {
2927 		case FL_READY:
2928 		case FL_STATUS:
2929 		case FL_CFI_QUERY:
2930 		case FL_JEDEC_QUERY:
2931 			chip->oldstate = chip->state;
2932 			chip->state = FL_SYNCING;
2933 			/* No need to wake_up() on this state change -
2934 			 * as the whole point is that nobody can do anything
2935 			 * with the chip now anyway.
2936 			 */
2937 			fallthrough;
2938 		case FL_SYNCING:
2939 			mutex_unlock(&chip->mutex);
2940 			break;
2941 
2942 		default:
2943 			/* Not an idle state */
2944 			set_current_state(TASK_UNINTERRUPTIBLE);
2945 			add_wait_queue(&chip->wq, &wait);
2946 
2947 			mutex_unlock(&chip->mutex);
2948 
2949 			schedule();
2950 
2951 			remove_wait_queue(&chip->wq, &wait);
2952 
2953 			goto retry;
2954 		}
2955 	}
2956 
2957 	/* Unlock the chips again */
2958 
2959 	for (i--; i >=0; i--) {
2960 		chip = &cfi->chips[i];
2961 
2962 		mutex_lock(&chip->mutex);
2963 
2964 		if (chip->state == FL_SYNCING) {
2965 			chip->state = chip->oldstate;
2966 			wake_up(&chip->wq);
2967 		}
2968 		mutex_unlock(&chip->mutex);
2969 	}
2970 }
2971 
2972 
2973 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2974 {
2975 	struct map_info *map = mtd->priv;
2976 	struct cfi_private *cfi = map->fldrv_priv;
2977 	int i;
2978 	struct flchip *chip;
2979 	int ret = 0;
2980 
2981 	for (i=0; !ret && i<cfi->numchips; i++) {
2982 		chip = &cfi->chips[i];
2983 
2984 		mutex_lock(&chip->mutex);
2985 
2986 		switch(chip->state) {
2987 		case FL_READY:
2988 		case FL_STATUS:
2989 		case FL_CFI_QUERY:
2990 		case FL_JEDEC_QUERY:
2991 			chip->oldstate = chip->state;
2992 			chip->state = FL_PM_SUSPENDED;
2993 			/* No need to wake_up() on this state change -
2994 			 * as the whole point is that nobody can do anything
2995 			 * with the chip now anyway.
2996 			 */
2997 			break;
2998 		case FL_PM_SUSPENDED:
2999 			break;
3000 
3001 		default:
3002 			ret = -EAGAIN;
3003 			break;
3004 		}
3005 		mutex_unlock(&chip->mutex);
3006 	}
3007 
3008 	/* Unlock the chips again */
3009 
3010 	if (ret) {
3011 		for (i--; i >=0; i--) {
3012 			chip = &cfi->chips[i];
3013 
3014 			mutex_lock(&chip->mutex);
3015 
3016 			if (chip->state == FL_PM_SUSPENDED) {
3017 				chip->state = chip->oldstate;
3018 				wake_up(&chip->wq);
3019 			}
3020 			mutex_unlock(&chip->mutex);
3021 		}
3022 	}
3023 
3024 	return ret;
3025 }
3026 
3027 
3028 static void cfi_amdstd_resume(struct mtd_info *mtd)
3029 {
3030 	struct map_info *map = mtd->priv;
3031 	struct cfi_private *cfi = map->fldrv_priv;
3032 	int i;
3033 	struct flchip *chip;
3034 
3035 	for (i=0; i<cfi->numchips; i++) {
3036 
3037 		chip = &cfi->chips[i];
3038 
3039 		mutex_lock(&chip->mutex);
3040 
3041 		if (chip->state == FL_PM_SUSPENDED) {
3042 			chip->state = FL_READY;
3043 			map_write(map, CMD(0xF0), chip->start);
3044 			wake_up(&chip->wq);
3045 		}
3046 		else
3047 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3048 
3049 		mutex_unlock(&chip->mutex);
3050 	}
3051 }
3052 
3053 
3054 /*
3055  * Ensure that the flash device is put back into read array mode before
3056  * unloading the driver or rebooting.  On some systems, rebooting while
3057  * the flash is in query/program/erase mode will prevent the CPU from
3058  * fetching the bootloader code, requiring a hard reset or power cycle.
3059  */
3060 static int cfi_amdstd_reset(struct mtd_info *mtd)
3061 {
3062 	struct map_info *map = mtd->priv;
3063 	struct cfi_private *cfi = map->fldrv_priv;
3064 	int i, ret;
3065 	struct flchip *chip;
3066 
3067 	for (i = 0; i < cfi->numchips; i++) {
3068 
3069 		chip = &cfi->chips[i];
3070 
3071 		mutex_lock(&chip->mutex);
3072 
3073 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3074 		if (!ret) {
3075 			map_write(map, CMD(0xF0), chip->start);
3076 			chip->state = FL_SHUTDOWN;
3077 			put_chip(map, chip, chip->start);
3078 		}
3079 
3080 		mutex_unlock(&chip->mutex);
3081 	}
3082 
3083 	return 0;
3084 }
3085 
3086 
3087 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3088 			       void *v)
3089 {
3090 	struct mtd_info *mtd;
3091 
3092 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
3093 	cfi_amdstd_reset(mtd);
3094 	return NOTIFY_DONE;
3095 }
3096 
3097 
3098 static void cfi_amdstd_destroy(struct mtd_info *mtd)
3099 {
3100 	struct map_info *map = mtd->priv;
3101 	struct cfi_private *cfi = map->fldrv_priv;
3102 
3103 	cfi_amdstd_reset(mtd);
3104 	unregister_reboot_notifier(&mtd->reboot_notifier);
3105 	kfree(cfi->cmdset_priv);
3106 	kfree(cfi->cfiq);
3107 	kfree(cfi);
3108 	kfree(mtd->eraseregions);
3109 }
3110 
3111 MODULE_LICENSE("GPL");
3112 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3113 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3114 MODULE_ALIAS("cfi_cmdset_0006");
3115 MODULE_ALIAS("cfi_cmdset_0701");
3116