xref: /linux/drivers/mtd/chips/cfi_cmdset_0001.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Flash Interface support:
4  *   Intel Extended Vendor Command Set (ID 0x0001)
5  *
6  * (C) 2000 Red Hat.
7  *
8  *
9  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
10  * 	- completely revamped method functions so they are aware and
11  * 	  independent of the flash geometry (buswidth, interleave, etc.)
12  * 	- scalability vs code size is completely set at compile-time
13  * 	  (see include/linux/mtd/cfi.h for selection)
14  *	- optimized write buffer method
15  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
16  *	- reworked lock/unlock/erase support for var size flash
17  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
18  * 	- auto unlock sectors on resume for auto locking flash on power up
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 /* Intel chips */
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define PF38F4476	0x881c
49 #define M28F00AP30	0x8963
50 /* STMicroelectronics chips */
51 #define M50LPW080       0x002F
52 #define M50FLW080A	0x0080
53 #define M50FLW080B	0x0081
54 /* Atmel chips */
55 #define AT49BV640D	0x02de
56 #define AT49BV640DT	0x02db
57 /* Sharp chips */
58 #define LH28F640BFHE_PTTL90	0x00b0
59 #define LH28F640BFHE_PBTL90	0x00b1
60 #define LH28F640BFHE_PTTL70A	0x00b2
61 #define LH28F640BFHE_PBTL70A	0x00b3
62 
63 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
66 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
67 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
68 static void cfi_intelext_sync (struct mtd_info *);
69 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
71 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
72 				  uint64_t len);
73 #ifdef CONFIG_MTD_OTP
74 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
77 					    size_t *, const u_char *);
78 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
79 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
80 					   size_t *, struct otp_info *);
81 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
82 					   size_t *, struct otp_info *);
83 #endif
84 static int cfi_intelext_suspend (struct mtd_info *);
85 static void cfi_intelext_resume (struct mtd_info *);
86 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
87 
88 static void cfi_intelext_destroy(struct mtd_info *);
89 
90 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
91 
92 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
93 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
94 
95 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
96 		     size_t *retlen, void **virt, resource_size_t *phys);
97 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
98 
99 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
100 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
101 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
102 #include "fwh_lock.h"
103 
104 
105 
106 /*
107  *  *********** SETUP AND PROBE BITS  ***********
108  */
109 
110 static struct mtd_chip_driver cfi_intelext_chipdrv = {
111 	.probe		= NULL, /* Not usable directly */
112 	.destroy	= cfi_intelext_destroy,
113 	.name		= "cfi_cmdset_0001",
114 	.module		= THIS_MODULE
115 };
116 
117 /* #define DEBUG_LOCK_BITS */
118 /* #define DEBUG_CFI_FEATURES */
119 
120 #ifdef DEBUG_CFI_FEATURES
121 static void cfi_tell_features(struct cfi_pri_intelext *extp)
122 {
123 	int i;
124 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
125 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
126 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
127 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
128 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
129 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
130 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
131 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
132 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
133 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
134 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
135 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
136 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
137 	for (i=11; i<32; i++) {
138 		if (extp->FeatureSupport & (1<<i))
139 			printk("     - Unknown Bit %X:      supported\n", i);
140 	}
141 
142 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
143 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
144 	for (i=1; i<8; i++) {
145 		if (extp->SuspendCmdSupport & (1<<i))
146 			printk("     - Unknown Bit %X:               supported\n", i);
147 	}
148 
149 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
150 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
151 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
152 	for (i=2; i<3; i++) {
153 		if (extp->BlkStatusRegMask & (1<<i))
154 			printk("     - Unknown Bit %X Active: yes\n",i);
155 	}
156 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
157 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
158 	for (i=6; i<16; i++) {
159 		if (extp->BlkStatusRegMask & (1<<i))
160 			printk("     - Unknown Bit %X Active: yes\n",i);
161 	}
162 
163 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
164 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
165 	if (extp->VppOptimal)
166 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
167 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
168 }
169 #endif
170 
171 /* Atmel chips don't use the same PRI format as Intel chips */
172 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
173 {
174 	struct map_info *map = mtd->priv;
175 	struct cfi_private *cfi = map->fldrv_priv;
176 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
177 	struct cfi_pri_atmel atmel_pri;
178 	uint32_t features = 0;
179 
180 	/* Reverse byteswapping */
181 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
182 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
183 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
184 
185 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
186 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
187 
188 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
189 
190 	if (atmel_pri.Features & 0x01) /* chip erase supported */
191 		features |= (1<<0);
192 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
193 		features |= (1<<1);
194 	if (atmel_pri.Features & 0x04) /* program suspend supported */
195 		features |= (1<<2);
196 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
197 		features |= (1<<9);
198 	if (atmel_pri.Features & 0x20) /* page mode read supported */
199 		features |= (1<<7);
200 	if (atmel_pri.Features & 0x40) /* queued erase supported */
201 		features |= (1<<4);
202 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
203 		features |= (1<<6);
204 
205 	extp->FeatureSupport = features;
206 
207 	/* burst write mode not supported */
208 	cfi->cfiq->BufWriteTimeoutTyp = 0;
209 	cfi->cfiq->BufWriteTimeoutMax = 0;
210 }
211 
212 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
213 {
214 	struct map_info *map = mtd->priv;
215 	struct cfi_private *cfi = map->fldrv_priv;
216 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
217 
218 	cfip->FeatureSupport |= (1 << 5);
219 	mtd->flags |= MTD_POWERUP_LOCK;
220 }
221 
222 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
224 static void fixup_intel_strataflash(struct mtd_info *mtd)
225 {
226 	struct map_info *map = mtd->priv;
227 	struct cfi_private *cfi = map->fldrv_priv;
228 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
229 
230 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
231 	                    "erase on write disabled.\n");
232 	extp->SuspendCmdSupport &= ~1;
233 }
234 #endif
235 
236 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
237 static void fixup_no_write_suspend(struct mtd_info *mtd)
238 {
239 	struct map_info *map = mtd->priv;
240 	struct cfi_private *cfi = map->fldrv_priv;
241 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
242 
243 	if (cfip && (cfip->FeatureSupport&4)) {
244 		cfip->FeatureSupport &= ~4;
245 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
246 	}
247 }
248 #endif
249 
250 static void fixup_st_m28w320ct(struct mtd_info *mtd)
251 {
252 	struct map_info *map = mtd->priv;
253 	struct cfi_private *cfi = map->fldrv_priv;
254 
255 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
256 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
257 }
258 
259 static void fixup_st_m28w320cb(struct mtd_info *mtd)
260 {
261 	struct map_info *map = mtd->priv;
262 	struct cfi_private *cfi = map->fldrv_priv;
263 
264 	/* Note this is done after the region info is endian swapped */
265 	cfi->cfiq->EraseRegionInfo[1] =
266 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
267 };
268 
269 static int is_LH28F640BF(struct cfi_private *cfi)
270 {
271 	/* Sharp LH28F640BF Family */
272 	if (cfi->mfr == CFI_MFR_SHARP && (
273 	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
274 	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
275 		return 1;
276 	return 0;
277 }
278 
279 static void fixup_LH28F640BF(struct mtd_info *mtd)
280 {
281 	struct map_info *map = mtd->priv;
282 	struct cfi_private *cfi = map->fldrv_priv;
283 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
284 
285 	/* Reset the Partition Configuration Register on LH28F640BF
286 	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
287 	if (is_LH28F640BF(cfi)) {
288 		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
289 		map_write(map, CMD(0x60), 0);
290 		map_write(map, CMD(0x04), 0);
291 
292 		/* We have set one single partition thus
293 		 * Simultaneous Operations are not allowed */
294 		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
295 		extp->FeatureSupport &= ~512;
296 	}
297 }
298 
299 static void fixup_use_point(struct mtd_info *mtd)
300 {
301 	struct map_info *map = mtd->priv;
302 	if (!mtd->_point && map_is_linear(map)) {
303 		mtd->_point   = cfi_intelext_point;
304 		mtd->_unpoint = cfi_intelext_unpoint;
305 	}
306 }
307 
308 static void fixup_use_write_buffers(struct mtd_info *mtd)
309 {
310 	struct map_info *map = mtd->priv;
311 	struct cfi_private *cfi = map->fldrv_priv;
312 	if (cfi->cfiq->BufWriteTimeoutTyp) {
313 		printk(KERN_INFO "Using buffer write method\n" );
314 		mtd->_write = cfi_intelext_write_buffers;
315 		mtd->_writev = cfi_intelext_writev;
316 	}
317 }
318 
319 /*
320  * Some chips power-up with all sectors locked by default.
321  */
322 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
323 {
324 	struct map_info *map = mtd->priv;
325 	struct cfi_private *cfi = map->fldrv_priv;
326 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
327 
328 	if (cfip->FeatureSupport&32) {
329 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
330 		mtd->flags |= MTD_POWERUP_LOCK;
331 	}
332 }
333 
334 static struct cfi_fixup cfi_fixup_table[] = {
335 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
336 	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
337 	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
338 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
339 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
340 #endif
341 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
342 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
343 #endif
344 #if !FORCE_WORD_WRITE
345 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
346 #endif
347 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
348 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
349 	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
350 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
351 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
352 	{ 0, 0, NULL }
353 };
354 
355 static struct cfi_fixup jedec_fixup_table[] = {
356 	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
357 	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
358 	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
359 	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
360 	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
361 	{ 0, 0, NULL }
362 };
363 static struct cfi_fixup fixup_table[] = {
364 	/* The CFI vendor ids and the JEDEC vendor IDs appear
365 	 * to be common.  It is like the devices id's are as
366 	 * well.  This table is to pick all cases where
367 	 * we know that is the case.
368 	 */
369 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
370 	{ 0, 0, NULL }
371 };
372 
373 static void cfi_fixup_major_minor(struct cfi_private *cfi,
374 						struct cfi_pri_intelext *extp)
375 {
376 	if (cfi->mfr == CFI_MFR_INTEL &&
377 			cfi->id == PF38F4476 && extp->MinorVersion == '3')
378 		extp->MinorVersion = '1';
379 }
380 
381 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
382 {
383 	/*
384 	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
385 	 * Erase Supend for their small Erase Blocks(0x8000)
386 	 */
387 	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
388 		return 1;
389 	return 0;
390 }
391 
392 static inline struct cfi_pri_intelext *
393 read_pri_intelext(struct map_info *map, __u16 adr)
394 {
395 	struct cfi_private *cfi = map->fldrv_priv;
396 	struct cfi_pri_intelext *extp;
397 	unsigned int extra_size = 0;
398 	unsigned int extp_size = sizeof(*extp);
399 
400  again:
401 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
402 	if (!extp)
403 		return NULL;
404 
405 	cfi_fixup_major_minor(cfi, extp);
406 
407 	if (extp->MajorVersion != '1' ||
408 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
409 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
410 		       "version %c.%c.\n",  extp->MajorVersion,
411 		       extp->MinorVersion);
412 		kfree(extp);
413 		return NULL;
414 	}
415 
416 	/* Do some byteswapping if necessary */
417 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
418 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
419 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
420 
421 	if (extp->MinorVersion >= '0') {
422 		extra_size = 0;
423 
424 		/* Protection Register info */
425 		if (extp->NumProtectionFields) {
426 			struct cfi_intelext_otpinfo *otp =
427 				(struct cfi_intelext_otpinfo *)&extp->extra[0];
428 
429 			extra_size += (extp->NumProtectionFields - 1) *
430 				sizeof(struct cfi_intelext_otpinfo);
431 
432 			if (extp_size >= sizeof(*extp) + extra_size) {
433 				int i;
434 
435 				/* Do some byteswapping if necessary */
436 				for (i = 0; i < extp->NumProtectionFields - 1; i++) {
437 					otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
438 					otp->FactGroups = le16_to_cpu(otp->FactGroups);
439 					otp->UserGroups = le16_to_cpu(otp->UserGroups);
440 					otp++;
441 				}
442 			}
443 		}
444 	}
445 
446 	if (extp->MinorVersion >= '1') {
447 		/* Burst Read info */
448 		extra_size += 2;
449 		if (extp_size < sizeof(*extp) + extra_size)
450 			goto need_more;
451 		extra_size += extp->extra[extra_size - 1];
452 	}
453 
454 	if (extp->MinorVersion >= '3') {
455 		int nb_parts, i;
456 
457 		/* Number of hardware-partitions */
458 		extra_size += 1;
459 		if (extp_size < sizeof(*extp) + extra_size)
460 			goto need_more;
461 		nb_parts = extp->extra[extra_size - 1];
462 
463 		/* skip the sizeof(partregion) field in CFI 1.4 */
464 		if (extp->MinorVersion >= '4')
465 			extra_size += 2;
466 
467 		for (i = 0; i < nb_parts; i++) {
468 			struct cfi_intelext_regioninfo *rinfo;
469 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
470 			extra_size += sizeof(*rinfo);
471 			if (extp_size < sizeof(*extp) + extra_size)
472 				goto need_more;
473 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
474 			extra_size += (rinfo->NumBlockTypes - 1)
475 				      * sizeof(struct cfi_intelext_blockinfo);
476 		}
477 
478 		if (extp->MinorVersion >= '4')
479 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
480 
481 		if (extp_size < sizeof(*extp) + extra_size) {
482 			need_more:
483 			extp_size = sizeof(*extp) + extra_size;
484 			kfree(extp);
485 			if (extp_size > 4096) {
486 				printk(KERN_ERR
487 					"%s: cfi_pri_intelext is too fat\n",
488 					__func__);
489 				return NULL;
490 			}
491 			goto again;
492 		}
493 	}
494 
495 	return extp;
496 }
497 
498 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
499 {
500 	struct cfi_private *cfi = map->fldrv_priv;
501 	struct mtd_info *mtd;
502 	int i;
503 
504 	mtd = kzalloc_obj(*mtd);
505 	if (!mtd)
506 		return NULL;
507 	mtd->priv = map;
508 	mtd->type = MTD_NORFLASH;
509 
510 	/* Fill in the default mtd operations */
511 	mtd->_erase   = cfi_intelext_erase_varsize;
512 	mtd->_read    = cfi_intelext_read;
513 	mtd->_write   = cfi_intelext_write_words;
514 	mtd->_sync    = cfi_intelext_sync;
515 	mtd->_lock    = cfi_intelext_lock;
516 	mtd->_unlock  = cfi_intelext_unlock;
517 	mtd->_is_locked = cfi_intelext_is_locked;
518 	mtd->_suspend = cfi_intelext_suspend;
519 	mtd->_resume  = cfi_intelext_resume;
520 	mtd->flags   = MTD_CAP_NORFLASH;
521 	mtd->name    = map->name;
522 	mtd->writesize = 1;
523 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
524 
525 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
526 
527 	if (cfi->cfi_mode == CFI_MODE_CFI) {
528 		/*
529 		 * It's a real CFI chip, not one for which the probe
530 		 * routine faked a CFI structure. So we read the feature
531 		 * table from it.
532 		 */
533 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
534 		struct cfi_pri_intelext *extp;
535 
536 		extp = read_pri_intelext(map, adr);
537 		if (!extp) {
538 			kfree(mtd);
539 			return NULL;
540 		}
541 
542 		/* Install our own private info structure */
543 		cfi->cmdset_priv = extp;
544 
545 		cfi_fixup(mtd, cfi_fixup_table);
546 
547 #ifdef DEBUG_CFI_FEATURES
548 		/* Tell the user about it in lots of lovely detail */
549 		cfi_tell_features(extp);
550 #endif
551 
552 		if(extp->SuspendCmdSupport & 1) {
553 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
554 		}
555 	}
556 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
557 		/* Apply jedec specific fixups */
558 		cfi_fixup(mtd, jedec_fixup_table);
559 	}
560 	/* Apply generic fixups */
561 	cfi_fixup(mtd, fixup_table);
562 
563 	for (i=0; i< cfi->numchips; i++) {
564 		if (cfi->cfiq->WordWriteTimeoutTyp)
565 			cfi->chips[i].word_write_time =
566 				1<<cfi->cfiq->WordWriteTimeoutTyp;
567 		else
568 			cfi->chips[i].word_write_time = 50000;
569 
570 		if (cfi->cfiq->BufWriteTimeoutTyp)
571 			cfi->chips[i].buffer_write_time =
572 				1<<cfi->cfiq->BufWriteTimeoutTyp;
573 		/* No default; if it isn't specified, we won't use it */
574 
575 		if (cfi->cfiq->BlockEraseTimeoutTyp)
576 			cfi->chips[i].erase_time =
577 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
578 		else
579 			cfi->chips[i].erase_time = 2000000;
580 
581 		if (cfi->cfiq->WordWriteTimeoutTyp &&
582 		    cfi->cfiq->WordWriteTimeoutMax)
583 			cfi->chips[i].word_write_time_max =
584 				1<<(cfi->cfiq->WordWriteTimeoutTyp +
585 				    cfi->cfiq->WordWriteTimeoutMax);
586 		else
587 			cfi->chips[i].word_write_time_max = 50000 * 8;
588 
589 		if (cfi->cfiq->BufWriteTimeoutTyp &&
590 		    cfi->cfiq->BufWriteTimeoutMax)
591 			cfi->chips[i].buffer_write_time_max =
592 				1<<(cfi->cfiq->BufWriteTimeoutTyp +
593 				    cfi->cfiq->BufWriteTimeoutMax);
594 
595 		if (cfi->cfiq->BlockEraseTimeoutTyp &&
596 		    cfi->cfiq->BlockEraseTimeoutMax)
597 			cfi->chips[i].erase_time_max =
598 				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
599 				       cfi->cfiq->BlockEraseTimeoutMax);
600 		else
601 			cfi->chips[i].erase_time_max = 2000000 * 8;
602 
603 		cfi->chips[i].ref_point_counter = 0;
604 		init_waitqueue_head(&(cfi->chips[i].wq));
605 	}
606 
607 	map->fldrv = &cfi_intelext_chipdrv;
608 
609 	return cfi_intelext_setup(mtd);
610 }
611 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
612 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
613 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
614 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
615 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
616 
617 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
618 {
619 	struct map_info *map = mtd->priv;
620 	struct cfi_private *cfi = map->fldrv_priv;
621 	unsigned long offset = 0;
622 	int i,j;
623 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
624 
625 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
626 
627 	mtd->size = devsize * cfi->numchips;
628 
629 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
630 	mtd->eraseregions = kzalloc_objs(struct mtd_erase_region_info,
631 					 mtd->numeraseregions, GFP_KERNEL);
632 	if (!mtd->eraseregions)
633 		goto setup_err;
634 
635 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
636 		unsigned long ernum, ersize;
637 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
638 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
639 
640 		if (mtd->erasesize < ersize) {
641 			mtd->erasesize = ersize;
642 		}
643 		for (j=0; j<cfi->numchips; j++) {
644 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
645 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
646 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
647 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
648 			if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
649 				goto setup_err;
650 		}
651 		offset += (ersize * ernum);
652 	}
653 
654 	if (offset != devsize) {
655 		/* Argh */
656 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
657 		goto setup_err;
658 	}
659 
660 	for (i=0; i<mtd->numeraseregions;i++){
661 		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
662 		       i,(unsigned long long)mtd->eraseregions[i].offset,
663 		       mtd->eraseregions[i].erasesize,
664 		       mtd->eraseregions[i].numblocks);
665 	}
666 
667 #ifdef CONFIG_MTD_OTP
668 	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
669 	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
670 	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
671 	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
672 	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
673 	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
674 #endif
675 
676 	/* This function has the potential to distort the reality
677 	   a bit and therefore should be called last. */
678 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
679 		goto setup_err;
680 
681 	__module_get(THIS_MODULE);
682 	register_reboot_notifier(&mtd->reboot_notifier);
683 	return mtd;
684 
685  setup_err:
686 	if (mtd->eraseregions)
687 		for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
688 			for (j=0; j<cfi->numchips; j++)
689 				kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
690 	kfree(mtd->eraseregions);
691 	kfree(mtd);
692 	kfree(cfi->cmdset_priv);
693 	return NULL;
694 }
695 
696 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
697 					struct cfi_private **pcfi)
698 {
699 	struct map_info *map = mtd->priv;
700 	struct cfi_private *cfi = *pcfi;
701 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
702 
703 	/*
704 	 * Probing of multi-partition flash chips.
705 	 *
706 	 * To support multiple partitions when available, we simply arrange
707 	 * for each of them to have their own flchip structure even if they
708 	 * are on the same physical chip.  This means completely recreating
709 	 * a new cfi_private structure right here which is a blatent code
710 	 * layering violation, but this is still the least intrusive
711 	 * arrangement at this point. This can be rearranged in the future
712 	 * if someone feels motivated enough.  --nico
713 	 */
714 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
715 	    && extp->FeatureSupport & (1 << 9)) {
716 		int offs = 0;
717 		struct cfi_private *newcfi;
718 		struct flchip *chip;
719 		struct flchip_shared *shared;
720 		int numregions, numparts, partshift, numvirtchips, i, j;
721 
722 		/* Protection Register info */
723 		if (extp->NumProtectionFields)
724 			offs = (extp->NumProtectionFields - 1) *
725 			       sizeof(struct cfi_intelext_otpinfo);
726 
727 		/* Burst Read info */
728 		offs += extp->extra[offs+1]+2;
729 
730 		/* Number of partition regions */
731 		numregions = extp->extra[offs];
732 		offs += 1;
733 
734 		/* skip the sizeof(partregion) field in CFI 1.4 */
735 		if (extp->MinorVersion >= '4')
736 			offs += 2;
737 
738 		/* Number of hardware partitions */
739 		numparts = 0;
740 		for (i = 0; i < numregions; i++) {
741 			struct cfi_intelext_regioninfo *rinfo;
742 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
743 			numparts += rinfo->NumIdentPartitions;
744 			offs += sizeof(*rinfo)
745 				+ (rinfo->NumBlockTypes - 1) *
746 				  sizeof(struct cfi_intelext_blockinfo);
747 		}
748 
749 		if (!numparts)
750 			numparts = 1;
751 
752 		/* Programming Region info */
753 		if (extp->MinorVersion >= '4') {
754 			struct cfi_intelext_programming_regioninfo *prinfo;
755 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
756 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
757 			mtd->flags &= ~MTD_BIT_WRITEABLE;
758 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
759 			       map->name, mtd->writesize,
760 			       cfi->interleave * prinfo->ControlValid,
761 			       cfi->interleave * prinfo->ControlInvalid);
762 		}
763 
764 		/*
765 		 * All functions below currently rely on all chips having
766 		 * the same geometry so we'll just assume that all hardware
767 		 * partitions are of the same size too.
768 		 */
769 		partshift = cfi->chipshift - __ffs(numparts);
770 
771 		if ((1 << partshift) < mtd->erasesize) {
772 			printk( KERN_ERR
773 				"%s: bad number of hw partitions (%d)\n",
774 				__func__, numparts);
775 			return -EINVAL;
776 		}
777 
778 		numvirtchips = cfi->numchips * numparts;
779 		newcfi = kmalloc_flex(*newcfi, chips, numvirtchips, GFP_KERNEL);
780 		if (!newcfi)
781 			return -ENOMEM;
782 		shared = kmalloc_objs(struct flchip_shared, cfi->numchips,
783 				      GFP_KERNEL);
784 		if (!shared) {
785 			kfree(newcfi);
786 			return -ENOMEM;
787 		}
788 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
789 		newcfi->numchips = numvirtchips;
790 		newcfi->chipshift = partshift;
791 
792 		chip = &newcfi->chips[0];
793 		for (i = 0; i < cfi->numchips; i++) {
794 			shared[i].writing = shared[i].erasing = NULL;
795 			mutex_init(&shared[i].lock);
796 			for (j = 0; j < numparts; j++) {
797 				*chip = cfi->chips[i];
798 				chip->start += j << partshift;
799 				chip->priv = &shared[i];
800 				/* those should be reset too since
801 				   they create memory references. */
802 				init_waitqueue_head(&chip->wq);
803 				mutex_init(&chip->mutex);
804 				chip++;
805 			}
806 		}
807 
808 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
809 				  "--> %d partitions of %d KiB\n",
810 				  map->name, cfi->numchips, cfi->interleave,
811 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
812 
813 		map->fldrv_priv = newcfi;
814 		*pcfi = newcfi;
815 		kfree(cfi);
816 	}
817 
818 	return 0;
819 }
820 
821 /*
822  *  *********** CHIP ACCESS FUNCTIONS ***********
823  */
824 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
825 {
826 	DECLARE_WAITQUEUE(wait, current);
827 	struct cfi_private *cfi = map->fldrv_priv;
828 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
829 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
830 	unsigned long timeo = jiffies + HZ;
831 
832 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
833 	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
834 		goto sleep;
835 
836 	switch (chip->state) {
837 
838 	case FL_STATUS:
839 		for (;;) {
840 			status = map_read(map, adr);
841 			if (map_word_andequal(map, status, status_OK, status_OK))
842 				break;
843 
844 			/* At this point we're fine with write operations
845 			   in other partitions as they don't conflict. */
846 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
847 				break;
848 
849 			mutex_unlock(&chip->mutex);
850 			cfi_udelay(1);
851 			mutex_lock(&chip->mutex);
852 			/* Someone else might have been playing with it. */
853 			return -EAGAIN;
854 		}
855 		fallthrough;
856 	case FL_READY:
857 	case FL_CFI_QUERY:
858 	case FL_JEDEC_QUERY:
859 		return 0;
860 
861 	case FL_ERASING:
862 		if (!cfip ||
863 		    !(cfip->FeatureSupport & 2) ||
864 		    !(mode == FL_READY || mode == FL_POINT ||
865 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
866 			goto sleep;
867 
868 		/* Do not allow suspend iff read/write to EB address */
869 		if ((adr & chip->in_progress_block_mask) ==
870 		    chip->in_progress_block_addr)
871 			goto sleep;
872 
873 		/* do not suspend small EBs, buggy Micron Chips */
874 		if (cfi_is_micron_28F00AP30(cfi, chip) &&
875 		    (chip->in_progress_block_mask == ~(0x8000-1)))
876 			goto sleep;
877 
878 		/* Erase suspend */
879 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
880 
881 		/* If the flash has finished erasing, then 'erase suspend'
882 		 * appears to make some (28F320) flash devices switch to
883 		 * 'read' mode.  Make sure that we switch to 'read status'
884 		 * mode so we get the right data. --rmk
885 		 */
886 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
887 		chip->oldstate = FL_ERASING;
888 		chip->state = FL_ERASE_SUSPENDING;
889 		chip->erase_suspended = 1;
890 		for (;;) {
891 			status = map_read(map, chip->in_progress_block_addr);
892 			if (map_word_andequal(map, status, status_OK, status_OK))
893 			        break;
894 
895 			if (time_after(jiffies, timeo)) {
896 				/* Urgh. Resume and pretend we weren't here.
897 				 * Make sure we're in 'read status' mode if it had finished */
898 				put_chip(map, chip, adr);
899 				printk(KERN_ERR "%s: Chip not ready after erase "
900 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
901 				return -EIO;
902 			}
903 
904 			mutex_unlock(&chip->mutex);
905 			cfi_udelay(1);
906 			mutex_lock(&chip->mutex);
907 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
908 			   So we can just loop here. */
909 		}
910 		chip->state = FL_STATUS;
911 		return 0;
912 
913 	case FL_XIP_WHILE_ERASING:
914 		if (mode != FL_READY && mode != FL_POINT &&
915 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
916 			goto sleep;
917 		chip->oldstate = chip->state;
918 		chip->state = FL_READY;
919 		return 0;
920 
921 	case FL_SHUTDOWN:
922 		/* The machine is rebooting now,so no one can get chip anymore */
923 		return -EIO;
924 	case FL_POINT:
925 		/* Only if there's no operation suspended... */
926 		if (mode == FL_READY && chip->oldstate == FL_READY)
927 			return 0;
928 		fallthrough;
929 	default:
930 	sleep:
931 		set_current_state(TASK_UNINTERRUPTIBLE);
932 		add_wait_queue(&chip->wq, &wait);
933 		mutex_unlock(&chip->mutex);
934 		schedule();
935 		remove_wait_queue(&chip->wq, &wait);
936 		mutex_lock(&chip->mutex);
937 		return -EAGAIN;
938 	}
939 }
940 
941 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
942 {
943 	int ret;
944 	DECLARE_WAITQUEUE(wait, current);
945 
946  retry:
947 	if (chip->priv &&
948 	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
949 	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
950 		/*
951 		 * OK. We have possibility for contention on the write/erase
952 		 * operations which are global to the real chip and not per
953 		 * partition.  So let's fight it over in the partition which
954 		 * currently has authority on the operation.
955 		 *
956 		 * The rules are as follows:
957 		 *
958 		 * - any write operation must own shared->writing.
959 		 *
960 		 * - any erase operation must own _both_ shared->writing and
961 		 *   shared->erasing.
962 		 *
963 		 * - contention arbitration is handled in the owner's context.
964 		 *
965 		 * The 'shared' struct can be read and/or written only when
966 		 * its lock is taken.
967 		 */
968 		struct flchip_shared *shared = chip->priv;
969 		struct flchip *contender;
970 		mutex_lock(&shared->lock);
971 		contender = shared->writing;
972 		if (contender && contender != chip) {
973 			/*
974 			 * The engine to perform desired operation on this
975 			 * partition is already in use by someone else.
976 			 * Let's fight over it in the context of the chip
977 			 * currently using it.  If it is possible to suspend,
978 			 * that other partition will do just that, otherwise
979 			 * it'll happily send us to sleep.  In any case, when
980 			 * get_chip returns success we're clear to go ahead.
981 			 */
982 			ret = mutex_trylock(&contender->mutex);
983 			mutex_unlock(&shared->lock);
984 			if (!ret)
985 				goto retry;
986 			mutex_unlock(&chip->mutex);
987 			ret = chip_ready(map, contender, contender->start, mode);
988 			mutex_lock(&chip->mutex);
989 
990 			if (ret == -EAGAIN) {
991 				mutex_unlock(&contender->mutex);
992 				goto retry;
993 			}
994 			if (ret) {
995 				mutex_unlock(&contender->mutex);
996 				return ret;
997 			}
998 			mutex_lock(&shared->lock);
999 
1000 			/* We should not own chip if it is already
1001 			 * in FL_SYNCING state. Put contender and retry. */
1002 			if (chip->state == FL_SYNCING) {
1003 				put_chip(map, contender, contender->start);
1004 				mutex_unlock(&contender->mutex);
1005 				goto retry;
1006 			}
1007 			mutex_unlock(&contender->mutex);
1008 		}
1009 
1010 		/* Check if we already have suspended erase
1011 		 * on this chip. Sleep. */
1012 		if (mode == FL_ERASING && shared->erasing
1013 		    && shared->erasing->oldstate == FL_ERASING) {
1014 			mutex_unlock(&shared->lock);
1015 			set_current_state(TASK_UNINTERRUPTIBLE);
1016 			add_wait_queue(&chip->wq, &wait);
1017 			mutex_unlock(&chip->mutex);
1018 			schedule();
1019 			remove_wait_queue(&chip->wq, &wait);
1020 			mutex_lock(&chip->mutex);
1021 			goto retry;
1022 		}
1023 
1024 		/* We now own it */
1025 		shared->writing = chip;
1026 		if (mode == FL_ERASING)
1027 			shared->erasing = chip;
1028 		mutex_unlock(&shared->lock);
1029 	}
1030 	ret = chip_ready(map, chip, adr, mode);
1031 	if (ret == -EAGAIN)
1032 		goto retry;
1033 
1034 	return ret;
1035 }
1036 
1037 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1038 {
1039 	struct cfi_private *cfi = map->fldrv_priv;
1040 
1041 	if (chip->priv) {
1042 		struct flchip_shared *shared = chip->priv;
1043 		mutex_lock(&shared->lock);
1044 		if (shared->writing == chip && chip->oldstate == FL_READY) {
1045 			/* We own the ability to write, but we're done */
1046 			shared->writing = shared->erasing;
1047 			if (shared->writing && shared->writing != chip) {
1048 				/* give back ownership to who we loaned it from */
1049 				struct flchip *loaner = shared->writing;
1050 				mutex_lock(&loaner->mutex);
1051 				mutex_unlock(&shared->lock);
1052 				mutex_unlock(&chip->mutex);
1053 				put_chip(map, loaner, loaner->start);
1054 				mutex_lock(&chip->mutex);
1055 				mutex_unlock(&loaner->mutex);
1056 				wake_up(&chip->wq);
1057 				return;
1058 			}
1059 			shared->erasing = NULL;
1060 			shared->writing = NULL;
1061 		} else if (shared->erasing == chip && shared->writing != chip) {
1062 			/*
1063 			 * We own the ability to erase without the ability
1064 			 * to write, which means the erase was suspended
1065 			 * and some other partition is currently writing.
1066 			 * Don't let the switch below mess things up since
1067 			 * we don't have ownership to resume anything.
1068 			 */
1069 			mutex_unlock(&shared->lock);
1070 			wake_up(&chip->wq);
1071 			return;
1072 		}
1073 		mutex_unlock(&shared->lock);
1074 	}
1075 
1076 	switch(chip->oldstate) {
1077 	case FL_ERASING:
1078 		/* What if one interleaved chip has finished and the
1079 		   other hasn't? The old code would leave the finished
1080 		   one in READY mode. That's bad, and caused -EROFS
1081 		   errors to be returned from do_erase_oneblock because
1082 		   that's the only bit it checked for at the time.
1083 		   As the state machine appears to explicitly allow
1084 		   sending the 0x70 (Read Status) command to an erasing
1085 		   chip and expecting it to be ignored, that's what we
1086 		   do. */
1087 		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1088 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
1089 		chip->oldstate = FL_READY;
1090 		chip->state = FL_ERASING;
1091 		break;
1092 
1093 	case FL_XIP_WHILE_ERASING:
1094 		chip->state = chip->oldstate;
1095 		chip->oldstate = FL_READY;
1096 		break;
1097 
1098 	case FL_READY:
1099 	case FL_STATUS:
1100 	case FL_JEDEC_QUERY:
1101 		break;
1102 	default:
1103 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1104 	}
1105 	wake_up(&chip->wq);
1106 }
1107 
1108 #ifdef CONFIG_MTD_XIP
1109 
1110 /*
1111  * No interrupt what so ever can be serviced while the flash isn't in array
1112  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1113  * enclosing any code path where the flash is known not to be in array mode.
1114  * And within a XIP disabled code path, only functions marked with __xipram
1115  * may be called and nothing else (it's a good thing to inspect generated
1116  * assembly to make sure inline functions were actually inlined and that gcc
1117  * didn't emit calls to its own support functions). Also configuring MTD CFI
1118  * support to a single buswidth and a single interleave is also recommended.
1119  */
1120 
1121 static void xip_disable(struct map_info *map, struct flchip *chip,
1122 			unsigned long adr)
1123 {
1124 	/* TODO: chips with no XIP use should ignore and return */
1125 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1126 	local_irq_disable();
1127 }
1128 
1129 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1130 				unsigned long adr)
1131 {
1132 	struct cfi_private *cfi = map->fldrv_priv;
1133 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1134 		map_write(map, CMD(0xff), adr);
1135 		chip->state = FL_READY;
1136 	}
1137 	(void) map_read(map, adr);
1138 	xip_iprefetch();
1139 	local_irq_enable();
1140 }
1141 
1142 /*
1143  * When a delay is required for the flash operation to complete, the
1144  * xip_wait_for_operation() function is polling for both the given timeout
1145  * and pending (but still masked) hardware interrupts.  Whenever there is an
1146  * interrupt pending then the flash erase or write operation is suspended,
1147  * array mode restored and interrupts unmasked.  Task scheduling might also
1148  * happen at that point.  The CPU eventually returns from the interrupt or
1149  * the call to schedule() and the suspended flash operation is resumed for
1150  * the remaining of the delay period.
1151  *
1152  * Warning: this function _will_ fool interrupt latency tracing tools.
1153  */
1154 
1155 static int __xipram xip_wait_for_operation(
1156 		struct map_info *map, struct flchip *chip,
1157 		unsigned long adr, unsigned int chip_op_time_max)
1158 {
1159 	struct cfi_private *cfi = map->fldrv_priv;
1160 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1161 	map_word status, OK = CMD(0x80);
1162 	unsigned long usec, suspended, start, done;
1163 	flstate_t oldstate, newstate;
1164 
1165        	start = xip_currtime();
1166 	usec = chip_op_time_max;
1167 	if (usec == 0)
1168 		usec = 500000;
1169 	done = 0;
1170 
1171 	do {
1172 		cpu_relax();
1173 		if (xip_irqpending() && cfip &&
1174 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1175 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1176 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1177 			/*
1178 			 * Let's suspend the erase or write operation when
1179 			 * supported.  Note that we currently don't try to
1180 			 * suspend interleaved chips if there is already
1181 			 * another operation suspended (imagine what happens
1182 			 * when one chip was already done with the current
1183 			 * operation while another chip suspended it, then
1184 			 * we resume the whole thing at once).  Yes, it
1185 			 * can happen!
1186 			 */
1187 			usec -= done;
1188 			map_write(map, CMD(0xb0), adr);
1189 			map_write(map, CMD(0x70), adr);
1190 			suspended = xip_currtime();
1191 			do {
1192 				if (xip_elapsed_since(suspended) > 100000) {
1193 					/*
1194 					 * The chip doesn't want to suspend
1195 					 * after waiting for 100 msecs.
1196 					 * This is a critical error but there
1197 					 * is not much we can do here.
1198 					 */
1199 					return -EIO;
1200 				}
1201 				status = map_read(map, adr);
1202 			} while (!map_word_andequal(map, status, OK, OK));
1203 
1204 			/* Suspend succeeded */
1205 			oldstate = chip->state;
1206 			if (oldstate == FL_ERASING) {
1207 				if (!map_word_bitsset(map, status, CMD(0x40)))
1208 					break;
1209 				newstate = FL_XIP_WHILE_ERASING;
1210 				chip->erase_suspended = 1;
1211 			} else {
1212 				if (!map_word_bitsset(map, status, CMD(0x04)))
1213 					break;
1214 				newstate = FL_XIP_WHILE_WRITING;
1215 				chip->write_suspended = 1;
1216 			}
1217 			chip->state = newstate;
1218 			map_write(map, CMD(0xff), adr);
1219 			(void) map_read(map, adr);
1220 			xip_iprefetch();
1221 			local_irq_enable();
1222 			mutex_unlock(&chip->mutex);
1223 			xip_iprefetch();
1224 			cond_resched();
1225 
1226 			/*
1227 			 * We're back.  However someone else might have
1228 			 * decided to go write to the chip if we are in
1229 			 * a suspended erase state.  If so let's wait
1230 			 * until it's done.
1231 			 */
1232 			mutex_lock(&chip->mutex);
1233 			while (chip->state != newstate) {
1234 				DECLARE_WAITQUEUE(wait, current);
1235 				set_current_state(TASK_UNINTERRUPTIBLE);
1236 				add_wait_queue(&chip->wq, &wait);
1237 				mutex_unlock(&chip->mutex);
1238 				schedule();
1239 				remove_wait_queue(&chip->wq, &wait);
1240 				mutex_lock(&chip->mutex);
1241 			}
1242 			/* Disallow XIP again */
1243 			local_irq_disable();
1244 
1245 			/* Resume the write or erase operation */
1246 			map_write(map, CMD(0xd0), adr);
1247 			map_write(map, CMD(0x70), adr);
1248 			chip->state = oldstate;
1249 			start = xip_currtime();
1250 		} else if (usec >= 1000000/HZ) {
1251 			/*
1252 			 * Try to save on CPU power when waiting delay
1253 			 * is at least a system timer tick period.
1254 			 * No need to be extremely accurate here.
1255 			 */
1256 			xip_cpu_idle();
1257 		}
1258 		status = map_read(map, adr);
1259 		done = xip_elapsed_since(start);
1260 	} while (!map_word_andequal(map, status, OK, OK)
1261 		 && done < usec);
1262 
1263 	return (done >= usec) ? -ETIME : 0;
1264 }
1265 
1266 /*
1267  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1268  * the flash is actively programming or erasing since we have to poll for
1269  * the operation to complete anyway.  We can't do that in a generic way with
1270  * a XIP setup so do it before the actual flash operation in this case
1271  * and stub it out from INVAL_CACHE_AND_WAIT.
1272  */
1273 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1274 	INVALIDATE_CACHED_RANGE(map, from, size)
1275 
1276 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1277 	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1278 
1279 #else
1280 
1281 #define xip_disable(map, chip, adr)
1282 #define xip_enable(map, chip, adr)
1283 #define XIP_INVAL_CACHED_RANGE(x...)
1284 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1285 
1286 static int inval_cache_and_wait_for_operation(
1287 		struct map_info *map, struct flchip *chip,
1288 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1289 		unsigned int chip_op_time, unsigned int chip_op_time_max)
1290 {
1291 	struct cfi_private *cfi = map->fldrv_priv;
1292 	map_word status, status_OK = CMD(0x80);
1293 	int chip_state = chip->state;
1294 	unsigned int timeo, sleep_time, reset_timeo;
1295 
1296 	mutex_unlock(&chip->mutex);
1297 	if (inval_len)
1298 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1299 	mutex_lock(&chip->mutex);
1300 
1301 	timeo = chip_op_time_max;
1302 	if (!timeo)
1303 		timeo = 500000;
1304 	reset_timeo = timeo;
1305 	sleep_time = chip_op_time / 2;
1306 
1307 	for (;;) {
1308 		if (chip->state != chip_state) {
1309 			/* Someone's suspended the operation: sleep */
1310 			DECLARE_WAITQUEUE(wait, current);
1311 			set_current_state(TASK_UNINTERRUPTIBLE);
1312 			add_wait_queue(&chip->wq, &wait);
1313 			mutex_unlock(&chip->mutex);
1314 			schedule();
1315 			remove_wait_queue(&chip->wq, &wait);
1316 			mutex_lock(&chip->mutex);
1317 			continue;
1318 		}
1319 
1320 		status = map_read(map, cmd_adr);
1321 		if (map_word_andequal(map, status, status_OK, status_OK))
1322 			break;
1323 
1324 		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1325 			/* Erase suspend occurred while sleep: reset timeout */
1326 			timeo = reset_timeo;
1327 			chip->erase_suspended = 0;
1328 		}
1329 		if (chip->write_suspended && chip_state == FL_WRITING)  {
1330 			/* Write suspend occurred while sleep: reset timeout */
1331 			timeo = reset_timeo;
1332 			chip->write_suspended = 0;
1333 		}
1334 		if (!timeo) {
1335 			map_write(map, CMD(0x70), cmd_adr);
1336 			chip->state = FL_STATUS;
1337 			return -ETIME;
1338 		}
1339 
1340 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1341 		mutex_unlock(&chip->mutex);
1342 		if (sleep_time >= 1000000/HZ) {
1343 			/*
1344 			 * Half of the normal delay still remaining
1345 			 * can be performed with a sleeping delay instead
1346 			 * of busy waiting.
1347 			 */
1348 			msleep(sleep_time/1000);
1349 			timeo -= sleep_time;
1350 			sleep_time = 1000000/HZ;
1351 		} else {
1352 			udelay(1);
1353 			cond_resched();
1354 			timeo--;
1355 		}
1356 		mutex_lock(&chip->mutex);
1357 	}
1358 
1359 	/* Done and happy. */
1360  	chip->state = FL_STATUS;
1361 	return 0;
1362 }
1363 
1364 #endif
1365 
1366 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1367 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1368 
1369 
1370 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1371 {
1372 	unsigned long cmd_addr;
1373 	struct cfi_private *cfi = map->fldrv_priv;
1374 	int ret;
1375 
1376 	adr += chip->start;
1377 
1378 	/* Ensure cmd read/writes are aligned. */
1379 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1380 
1381 	mutex_lock(&chip->mutex);
1382 
1383 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1384 
1385 	if (!ret) {
1386 		if (chip->state != FL_POINT && chip->state != FL_READY)
1387 			map_write(map, CMD(0xff), cmd_addr);
1388 
1389 		chip->state = FL_POINT;
1390 		chip->ref_point_counter++;
1391 	}
1392 	mutex_unlock(&chip->mutex);
1393 
1394 	return ret;
1395 }
1396 
1397 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1398 		size_t *retlen, void **virt, resource_size_t *phys)
1399 {
1400 	struct map_info *map = mtd->priv;
1401 	struct cfi_private *cfi = map->fldrv_priv;
1402 	unsigned long ofs, last_end = 0;
1403 	int chipnum;
1404 	int ret;
1405 
1406 	if (!map->virt)
1407 		return -EINVAL;
1408 
1409 	/* Now lock the chip(s) to POINT state */
1410 
1411 	/* ofs: offset within the first chip that the first read should start */
1412 	chipnum = (from >> cfi->chipshift);
1413 	ofs = from - (chipnum << cfi->chipshift);
1414 
1415 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1416 	if (phys)
1417 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1418 
1419 	while (len) {
1420 		unsigned long thislen;
1421 
1422 		if (chipnum >= cfi->numchips)
1423 			break;
1424 
1425 		/* We cannot point across chips that are virtually disjoint */
1426 		if (!last_end)
1427 			last_end = cfi->chips[chipnum].start;
1428 		else if (cfi->chips[chipnum].start != last_end)
1429 			break;
1430 
1431 		if ((len + ofs -1) >> cfi->chipshift)
1432 			thislen = (1<<cfi->chipshift) - ofs;
1433 		else
1434 			thislen = len;
1435 
1436 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1437 		if (ret)
1438 			break;
1439 
1440 		*retlen += thislen;
1441 		len -= thislen;
1442 
1443 		ofs = 0;
1444 		last_end += 1 << cfi->chipshift;
1445 		chipnum++;
1446 	}
1447 	return 0;
1448 }
1449 
1450 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1451 {
1452 	struct map_info *map = mtd->priv;
1453 	struct cfi_private *cfi = map->fldrv_priv;
1454 	unsigned long ofs;
1455 	int chipnum, err = 0;
1456 
1457 	/* Now unlock the chip(s) POINT state */
1458 
1459 	/* ofs: offset within the first chip that the first read should start */
1460 	chipnum = (from >> cfi->chipshift);
1461 	ofs = from - (chipnum <<  cfi->chipshift);
1462 
1463 	while (len && !err) {
1464 		unsigned long thislen;
1465 		struct flchip *chip;
1466 
1467 		chip = &cfi->chips[chipnum];
1468 		if (chipnum >= cfi->numchips)
1469 			break;
1470 
1471 		if ((len + ofs -1) >> cfi->chipshift)
1472 			thislen = (1<<cfi->chipshift) - ofs;
1473 		else
1474 			thislen = len;
1475 
1476 		mutex_lock(&chip->mutex);
1477 		if (chip->state == FL_POINT) {
1478 			chip->ref_point_counter--;
1479 			if(chip->ref_point_counter == 0)
1480 				chip->state = FL_READY;
1481 		} else {
1482 			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1483 			err = -EINVAL;
1484 		}
1485 
1486 		put_chip(map, chip, chip->start);
1487 		mutex_unlock(&chip->mutex);
1488 
1489 		len -= thislen;
1490 		ofs = 0;
1491 		chipnum++;
1492 	}
1493 
1494 	return err;
1495 }
1496 
1497 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1498 {
1499 	unsigned long cmd_addr;
1500 	struct cfi_private *cfi = map->fldrv_priv;
1501 	int ret;
1502 
1503 	adr += chip->start;
1504 
1505 	/* Ensure cmd read/writes are aligned. */
1506 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1507 
1508 	mutex_lock(&chip->mutex);
1509 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1510 	if (ret) {
1511 		mutex_unlock(&chip->mutex);
1512 		return ret;
1513 	}
1514 
1515 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1516 		map_write(map, CMD(0xff), cmd_addr);
1517 
1518 		chip->state = FL_READY;
1519 	}
1520 
1521 	map_copy_from(map, buf, adr, len);
1522 
1523 	put_chip(map, chip, cmd_addr);
1524 
1525 	mutex_unlock(&chip->mutex);
1526 	return 0;
1527 }
1528 
1529 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1530 {
1531 	struct map_info *map = mtd->priv;
1532 	struct cfi_private *cfi = map->fldrv_priv;
1533 	unsigned long ofs;
1534 	int chipnum;
1535 	int ret = 0;
1536 
1537 	/* ofs: offset within the first chip that the first read should start */
1538 	chipnum = (from >> cfi->chipshift);
1539 	ofs = from - (chipnum <<  cfi->chipshift);
1540 
1541 	while (len) {
1542 		unsigned long thislen;
1543 
1544 		if (chipnum >= cfi->numchips)
1545 			break;
1546 
1547 		if ((len + ofs -1) >> cfi->chipshift)
1548 			thislen = (1<<cfi->chipshift) - ofs;
1549 		else
1550 			thislen = len;
1551 
1552 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1553 		if (ret)
1554 			break;
1555 
1556 		*retlen += thislen;
1557 		len -= thislen;
1558 		buf += thislen;
1559 
1560 		ofs = 0;
1561 		chipnum++;
1562 	}
1563 	return ret;
1564 }
1565 
1566 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1567 				     unsigned long adr, map_word datum, int mode)
1568 {
1569 	struct cfi_private *cfi = map->fldrv_priv;
1570 	map_word status, write_cmd;
1571 	int ret;
1572 
1573 	adr += chip->start;
1574 
1575 	switch (mode) {
1576 	case FL_WRITING:
1577 		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1578 		break;
1579 	case FL_OTP_WRITE:
1580 		write_cmd = CMD(0xc0);
1581 		break;
1582 	default:
1583 		return -EINVAL;
1584 	}
1585 
1586 	mutex_lock(&chip->mutex);
1587 	ret = get_chip(map, chip, adr, mode);
1588 	if (ret) {
1589 		mutex_unlock(&chip->mutex);
1590 		return ret;
1591 	}
1592 
1593 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1594 	ENABLE_VPP(map);
1595 	xip_disable(map, chip, adr);
1596 	map_write(map, write_cmd, adr);
1597 	map_write(map, datum, adr);
1598 	chip->state = mode;
1599 
1600 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1601 				   adr, map_bankwidth(map),
1602 				   chip->word_write_time,
1603 				   chip->word_write_time_max);
1604 	if (ret) {
1605 		xip_enable(map, chip, adr);
1606 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1607 		goto out;
1608 	}
1609 
1610 	/* check for errors */
1611 	status = map_read(map, adr);
1612 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1613 		unsigned long chipstatus = MERGESTATUS(status);
1614 
1615 		/* reset status */
1616 		map_write(map, CMD(0x50), adr);
1617 		map_write(map, CMD(0x70), adr);
1618 		xip_enable(map, chip, adr);
1619 
1620 		if (chipstatus & 0x02) {
1621 			ret = -EROFS;
1622 		} else if (chipstatus & 0x08) {
1623 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1624 			ret = -EIO;
1625 		} else {
1626 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1627 			ret = -EINVAL;
1628 		}
1629 
1630 		goto out;
1631 	}
1632 
1633 	xip_enable(map, chip, adr);
1634  out:	DISABLE_VPP(map);
1635 	put_chip(map, chip, adr);
1636 	mutex_unlock(&chip->mutex);
1637 	return ret;
1638 }
1639 
1640 
1641 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1642 {
1643 	struct map_info *map = mtd->priv;
1644 	struct cfi_private *cfi = map->fldrv_priv;
1645 	int ret;
1646 	int chipnum;
1647 	unsigned long ofs;
1648 
1649 	chipnum = to >> cfi->chipshift;
1650 	ofs = to  - (chipnum << cfi->chipshift);
1651 
1652 	/* If it's not bus-aligned, do the first byte write */
1653 	if (ofs & (map_bankwidth(map)-1)) {
1654 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1655 		int gap = ofs - bus_ofs;
1656 		int n;
1657 		map_word datum;
1658 
1659 		n = min_t(int, len, map_bankwidth(map)-gap);
1660 		datum = map_word_ff(map);
1661 		datum = map_word_load_partial(map, datum, buf, gap, n);
1662 
1663 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1664 					       bus_ofs, datum, FL_WRITING);
1665 		if (ret)
1666 			return ret;
1667 
1668 		len -= n;
1669 		ofs += n;
1670 		buf += n;
1671 		(*retlen) += n;
1672 
1673 		if (ofs >> cfi->chipshift) {
1674 			chipnum ++;
1675 			ofs = 0;
1676 			if (chipnum == cfi->numchips)
1677 				return 0;
1678 		}
1679 	}
1680 
1681 	while(len >= map_bankwidth(map)) {
1682 		map_word datum = map_word_load(map, buf);
1683 
1684 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1685 				       ofs, datum, FL_WRITING);
1686 		if (ret)
1687 			return ret;
1688 
1689 		ofs += map_bankwidth(map);
1690 		buf += map_bankwidth(map);
1691 		(*retlen) += map_bankwidth(map);
1692 		len -= map_bankwidth(map);
1693 
1694 		if (ofs >> cfi->chipshift) {
1695 			chipnum ++;
1696 			ofs = 0;
1697 			if (chipnum == cfi->numchips)
1698 				return 0;
1699 		}
1700 	}
1701 
1702 	if (len & (map_bankwidth(map)-1)) {
1703 		map_word datum;
1704 
1705 		datum = map_word_ff(map);
1706 		datum = map_word_load_partial(map, datum, buf, 0, len);
1707 
1708 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1709 				       ofs, datum, FL_WRITING);
1710 		if (ret)
1711 			return ret;
1712 
1713 		(*retlen) += len;
1714 	}
1715 
1716 	return 0;
1717 }
1718 
1719 
1720 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1721 				    unsigned long adr, const struct kvec **pvec,
1722 				    unsigned long *pvec_seek, int len)
1723 {
1724 	struct cfi_private *cfi = map->fldrv_priv;
1725 	map_word status, write_cmd, datum;
1726 	unsigned long cmd_adr;
1727 	int ret, wbufsize, word_gap, words;
1728 	const struct kvec *vec;
1729 	unsigned long vec_seek;
1730 	unsigned long initial_adr;
1731 	int initial_len = len;
1732 
1733 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1734 	adr += chip->start;
1735 	initial_adr = adr;
1736 	cmd_adr = adr & ~(wbufsize-1);
1737 
1738 	/* Sharp LH28F640BF chips need the first address for the
1739 	 * Page Buffer Program command. See Table 5 of
1740 	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1741 	if (is_LH28F640BF(cfi))
1742 		cmd_adr = adr;
1743 
1744 	/* Let's determine this according to the interleave only once */
1745 	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1746 
1747 	mutex_lock(&chip->mutex);
1748 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1749 	if (ret) {
1750 		mutex_unlock(&chip->mutex);
1751 		return ret;
1752 	}
1753 
1754 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1755 	ENABLE_VPP(map);
1756 	xip_disable(map, chip, cmd_adr);
1757 
1758 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1759 	   [...], the device will not accept any more Write to Buffer commands".
1760 	   So we must check here and reset those bits if they're set. Otherwise
1761 	   we're just pissing in the wind */
1762 	if (chip->state != FL_STATUS) {
1763 		map_write(map, CMD(0x70), cmd_adr);
1764 		chip->state = FL_STATUS;
1765 	}
1766 	status = map_read(map, cmd_adr);
1767 	if (map_word_bitsset(map, status, CMD(0x30))) {
1768 		xip_enable(map, chip, cmd_adr);
1769 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1770 		xip_disable(map, chip, cmd_adr);
1771 		map_write(map, CMD(0x50), cmd_adr);
1772 		map_write(map, CMD(0x70), cmd_adr);
1773 	}
1774 
1775 	chip->state = FL_WRITING_TO_BUFFER;
1776 	map_write(map, write_cmd, cmd_adr);
1777 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1778 	if (ret) {
1779 		/* Argh. Not ready for write to buffer */
1780 		map_word Xstatus = map_read(map, cmd_adr);
1781 		map_write(map, CMD(0x70), cmd_adr);
1782 		chip->state = FL_STATUS;
1783 		status = map_read(map, cmd_adr);
1784 		map_write(map, CMD(0x50), cmd_adr);
1785 		map_write(map, CMD(0x70), cmd_adr);
1786 		xip_enable(map, chip, cmd_adr);
1787 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1788 				map->name, Xstatus.x[0], status.x[0]);
1789 		goto out;
1790 	}
1791 
1792 	/* Figure out the number of words to write */
1793 	word_gap = (-adr & (map_bankwidth(map)-1));
1794 	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1795 	if (!word_gap) {
1796 		words--;
1797 	} else {
1798 		word_gap = map_bankwidth(map) - word_gap;
1799 		adr -= word_gap;
1800 		datum = map_word_ff(map);
1801 	}
1802 
1803 	/* Write length of data to come */
1804 	map_write(map, CMD(words), cmd_adr );
1805 
1806 	/* Write data */
1807 	vec = *pvec;
1808 	vec_seek = *pvec_seek;
1809 	do {
1810 		int n = map_bankwidth(map) - word_gap;
1811 		if (n > vec->iov_len - vec_seek)
1812 			n = vec->iov_len - vec_seek;
1813 		if (n > len)
1814 			n = len;
1815 
1816 		if (!word_gap && len < map_bankwidth(map))
1817 			datum = map_word_ff(map);
1818 
1819 		datum = map_word_load_partial(map, datum,
1820 					      vec->iov_base + vec_seek,
1821 					      word_gap, n);
1822 
1823 		len -= n;
1824 		word_gap += n;
1825 		if (!len || word_gap == map_bankwidth(map)) {
1826 			map_write(map, datum, adr);
1827 			adr += map_bankwidth(map);
1828 			word_gap = 0;
1829 		}
1830 
1831 		vec_seek += n;
1832 		if (vec_seek == vec->iov_len) {
1833 			vec++;
1834 			vec_seek = 0;
1835 		}
1836 	} while (len);
1837 	*pvec = vec;
1838 	*pvec_seek = vec_seek;
1839 
1840 	/* GO GO GO */
1841 	map_write(map, CMD(0xd0), cmd_adr);
1842 	chip->state = FL_WRITING;
1843 
1844 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1845 				   initial_adr, initial_len,
1846 				   chip->buffer_write_time,
1847 				   chip->buffer_write_time_max);
1848 	if (ret) {
1849 		map_write(map, CMD(0x70), cmd_adr);
1850 		chip->state = FL_STATUS;
1851 		xip_enable(map, chip, cmd_adr);
1852 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1853 		goto out;
1854 	}
1855 
1856 	/* check for errors */
1857 	status = map_read(map, cmd_adr);
1858 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1859 		unsigned long chipstatus = MERGESTATUS(status);
1860 
1861 		/* reset status */
1862 		map_write(map, CMD(0x50), cmd_adr);
1863 		map_write(map, CMD(0x70), cmd_adr);
1864 		xip_enable(map, chip, cmd_adr);
1865 
1866 		if (chipstatus & 0x02) {
1867 			ret = -EROFS;
1868 		} else if (chipstatus & 0x08) {
1869 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1870 			ret = -EIO;
1871 		} else {
1872 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1873 			ret = -EINVAL;
1874 		}
1875 
1876 		goto out;
1877 	}
1878 
1879 	xip_enable(map, chip, cmd_adr);
1880  out:	DISABLE_VPP(map);
1881 	put_chip(map, chip, cmd_adr);
1882 	mutex_unlock(&chip->mutex);
1883 	return ret;
1884 }
1885 
1886 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1887 				unsigned long count, loff_t to, size_t *retlen)
1888 {
1889 	struct map_info *map = mtd->priv;
1890 	struct cfi_private *cfi = map->fldrv_priv;
1891 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1892 	int ret;
1893 	int chipnum;
1894 	unsigned long ofs, vec_seek, i;
1895 	size_t len = 0;
1896 
1897 	for (i = 0; i < count; i++)
1898 		len += vecs[i].iov_len;
1899 
1900 	if (!len)
1901 		return 0;
1902 
1903 	chipnum = to >> cfi->chipshift;
1904 	ofs = to - (chipnum << cfi->chipshift);
1905 	vec_seek = 0;
1906 
1907 	do {
1908 		/* We must not cross write block boundaries */
1909 		int size = wbufsize - (ofs & (wbufsize-1));
1910 
1911 		if (size > len)
1912 			size = len;
1913 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1914 				      ofs, &vecs, &vec_seek, size);
1915 		if (ret)
1916 			return ret;
1917 
1918 		ofs += size;
1919 		(*retlen) += size;
1920 		len -= size;
1921 
1922 		if (ofs >> cfi->chipshift) {
1923 			chipnum ++;
1924 			ofs = 0;
1925 			if (chipnum == cfi->numchips)
1926 				return 0;
1927 		}
1928 
1929 		/* Be nice and reschedule with the chip in a usable state for other
1930 		   processes. */
1931 		cond_resched();
1932 
1933 	} while (len);
1934 
1935 	return 0;
1936 }
1937 
1938 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1939 				       size_t len, size_t *retlen, const u_char *buf)
1940 {
1941 	struct kvec vec;
1942 
1943 	vec.iov_base = (void *) buf;
1944 	vec.iov_len = len;
1945 
1946 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1947 }
1948 
1949 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1950 				      unsigned long adr, int len, void *thunk)
1951 {
1952 	struct cfi_private *cfi = map->fldrv_priv;
1953 	map_word status;
1954 	int retries = 3;
1955 	int ret;
1956 
1957 	adr += chip->start;
1958 
1959  retry:
1960 	mutex_lock(&chip->mutex);
1961 	ret = get_chip(map, chip, adr, FL_ERASING);
1962 	if (ret) {
1963 		mutex_unlock(&chip->mutex);
1964 		return ret;
1965 	}
1966 
1967 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1968 	ENABLE_VPP(map);
1969 	xip_disable(map, chip, adr);
1970 
1971 	/* Clear the status register first */
1972 	map_write(map, CMD(0x50), adr);
1973 
1974 	/* Now erase */
1975 	map_write(map, CMD(0x20), adr);
1976 	map_write(map, CMD(0xD0), adr);
1977 	chip->state = FL_ERASING;
1978 	chip->erase_suspended = 0;
1979 	chip->in_progress_block_addr = adr;
1980 	chip->in_progress_block_mask = ~(len - 1);
1981 
1982 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1983 				   adr, len,
1984 				   chip->erase_time,
1985 				   chip->erase_time_max);
1986 	if (ret) {
1987 		map_write(map, CMD(0x70), adr);
1988 		chip->state = FL_STATUS;
1989 		xip_enable(map, chip, adr);
1990 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1991 		goto out;
1992 	}
1993 
1994 	/* We've broken this before. It doesn't hurt to be safe */
1995 	map_write(map, CMD(0x70), adr);
1996 	chip->state = FL_STATUS;
1997 	status = map_read(map, adr);
1998 
1999 	/* check for errors */
2000 	if (map_word_bitsset(map, status, CMD(0x3a))) {
2001 		unsigned long chipstatus = MERGESTATUS(status);
2002 
2003 		/* Reset the error bits */
2004 		map_write(map, CMD(0x50), adr);
2005 		map_write(map, CMD(0x70), adr);
2006 		xip_enable(map, chip, adr);
2007 
2008 		if ((chipstatus & 0x30) == 0x30) {
2009 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
2010 			ret = -EINVAL;
2011 		} else if (chipstatus & 0x02) {
2012 			/* Protection bit set */
2013 			ret = -EROFS;
2014 		} else if (chipstatus & 0x8) {
2015 			/* Voltage */
2016 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2017 			ret = -EIO;
2018 		} else if (chipstatus & 0x20 && retries--) {
2019 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2020 			DISABLE_VPP(map);
2021 			put_chip(map, chip, adr);
2022 			mutex_unlock(&chip->mutex);
2023 			goto retry;
2024 		} else {
2025 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2026 			ret = -EIO;
2027 		}
2028 
2029 		goto out;
2030 	}
2031 
2032 	xip_enable(map, chip, adr);
2033  out:	DISABLE_VPP(map);
2034 	put_chip(map, chip, adr);
2035 	mutex_unlock(&chip->mutex);
2036 	return ret;
2037 }
2038 
2039 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2040 {
2041 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2042 				instr->len, NULL);
2043 }
2044 
2045 static void cfi_intelext_sync (struct mtd_info *mtd)
2046 {
2047 	struct map_info *map = mtd->priv;
2048 	struct cfi_private *cfi = map->fldrv_priv;
2049 	int i;
2050 	struct flchip *chip;
2051 	int ret = 0;
2052 
2053 	for (i=0; !ret && i<cfi->numchips; i++) {
2054 		chip = &cfi->chips[i];
2055 
2056 		mutex_lock(&chip->mutex);
2057 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2058 
2059 		if (!ret) {
2060 			chip->oldstate = chip->state;
2061 			chip->state = FL_SYNCING;
2062 			/* No need to wake_up() on this state change -
2063 			 * as the whole point is that nobody can do anything
2064 			 * with the chip now anyway.
2065 			 */
2066 		}
2067 		mutex_unlock(&chip->mutex);
2068 	}
2069 
2070 	/* Unlock the chips again */
2071 
2072 	for (i--; i >=0; i--) {
2073 		chip = &cfi->chips[i];
2074 
2075 		mutex_lock(&chip->mutex);
2076 
2077 		if (chip->state == FL_SYNCING) {
2078 			chip->state = chip->oldstate;
2079 			chip->oldstate = FL_READY;
2080 			wake_up(&chip->wq);
2081 		}
2082 		mutex_unlock(&chip->mutex);
2083 	}
2084 }
2085 
2086 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2087 						struct flchip *chip,
2088 						unsigned long adr,
2089 						int len, void *thunk)
2090 {
2091 	struct cfi_private *cfi = map->fldrv_priv;
2092 	int status, ofs_factor = cfi->interleave * cfi->device_type;
2093 
2094 	adr += chip->start;
2095 	xip_disable(map, chip, adr+(2*ofs_factor));
2096 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2097 	chip->state = FL_JEDEC_QUERY;
2098 	status = cfi_read_query(map, adr+(2*ofs_factor));
2099 	xip_enable(map, chip, 0);
2100 	return status;
2101 }
2102 
2103 #ifdef DEBUG_LOCK_BITS
2104 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2105 						struct flchip *chip,
2106 						unsigned long adr,
2107 						int len, void *thunk)
2108 {
2109 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2110 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2111 	return 0;
2112 }
2113 #endif
2114 
2115 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2116 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2117 
2118 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2119 				       unsigned long adr, int len, void *thunk)
2120 {
2121 	struct cfi_private *cfi = map->fldrv_priv;
2122 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2123 	int mdelay;
2124 	int ret;
2125 
2126 	adr += chip->start;
2127 
2128 	mutex_lock(&chip->mutex);
2129 	ret = get_chip(map, chip, adr, FL_LOCKING);
2130 	if (ret) {
2131 		mutex_unlock(&chip->mutex);
2132 		return ret;
2133 	}
2134 
2135 	ENABLE_VPP(map);
2136 	xip_disable(map, chip, adr);
2137 
2138 	map_write(map, CMD(0x60), adr);
2139 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2140 		map_write(map, CMD(0x01), adr);
2141 		chip->state = FL_LOCKING;
2142 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2143 		map_write(map, CMD(0xD0), adr);
2144 		chip->state = FL_UNLOCKING;
2145 	} else
2146 		BUG();
2147 
2148 	/*
2149 	 * If Instant Individual Block Locking supported then no need
2150 	 * to delay.
2151 	 */
2152 	/*
2153 	 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2154 	 * lets use a max of 1.5 seconds (1500ms) as timeout.
2155 	 *
2156 	 * See "Clear Block Lock-Bits Time" on page 40 in
2157 	 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2158 	 * from February 2003
2159 	 */
2160 	mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2161 
2162 	ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2163 	if (ret) {
2164 		map_write(map, CMD(0x70), adr);
2165 		chip->state = FL_STATUS;
2166 		xip_enable(map, chip, adr);
2167 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2168 		goto out;
2169 	}
2170 
2171 	xip_enable(map, chip, adr);
2172  out:	DISABLE_VPP(map);
2173 	put_chip(map, chip, adr);
2174 	mutex_unlock(&chip->mutex);
2175 	return ret;
2176 }
2177 
2178 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2179 {
2180 	int ret;
2181 
2182 #ifdef DEBUG_LOCK_BITS
2183 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2184 	       __func__, ofs, len);
2185 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2186 		ofs, len, NULL);
2187 #endif
2188 
2189 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2190 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2191 
2192 #ifdef DEBUG_LOCK_BITS
2193 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2194 	       __func__, ret);
2195 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2196 		ofs, len, NULL);
2197 #endif
2198 
2199 	return ret;
2200 }
2201 
2202 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2203 {
2204 	int ret;
2205 
2206 #ifdef DEBUG_LOCK_BITS
2207 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2208 	       __func__, ofs, len);
2209 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2210 		ofs, len, NULL);
2211 #endif
2212 
2213 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2214 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2215 
2216 #ifdef DEBUG_LOCK_BITS
2217 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2218 	       __func__, ret);
2219 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2220 		ofs, len, NULL);
2221 #endif
2222 
2223 	return ret;
2224 }
2225 
2226 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2227 				  uint64_t len)
2228 {
2229 	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2230 				ofs, len, NULL) ? 1 : 0;
2231 }
2232 
2233 #ifdef CONFIG_MTD_OTP
2234 
2235 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2236 			u_long data_offset, u_char *buf, u_int size,
2237 			u_long prot_offset, u_int groupno, u_int groupsize);
2238 
2239 static int __xipram
2240 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2241 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2242 {
2243 	struct cfi_private *cfi = map->fldrv_priv;
2244 	int ret;
2245 
2246 	mutex_lock(&chip->mutex);
2247 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2248 	if (ret) {
2249 		mutex_unlock(&chip->mutex);
2250 		return ret;
2251 	}
2252 
2253 	/* let's ensure we're not reading back cached data from array mode */
2254 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2255 
2256 	xip_disable(map, chip, chip->start);
2257 	if (chip->state != FL_JEDEC_QUERY) {
2258 		map_write(map, CMD(0x90), chip->start);
2259 		chip->state = FL_JEDEC_QUERY;
2260 	}
2261 	map_copy_from(map, buf, chip->start + offset, size);
2262 	xip_enable(map, chip, chip->start);
2263 
2264 	/* then ensure we don't keep OTP data in the cache */
2265 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2266 
2267 	put_chip(map, chip, chip->start);
2268 	mutex_unlock(&chip->mutex);
2269 	return 0;
2270 }
2271 
2272 static int
2273 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2274 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2275 {
2276 	int ret;
2277 
2278 	while (size) {
2279 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2280 		int gap = offset - bus_ofs;
2281 		int n = min_t(int, size, map_bankwidth(map)-gap);
2282 		map_word datum = map_word_ff(map);
2283 
2284 		datum = map_word_load_partial(map, datum, buf, gap, n);
2285 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2286 		if (ret)
2287 			return ret;
2288 
2289 		offset += n;
2290 		buf += n;
2291 		size -= n;
2292 	}
2293 
2294 	return 0;
2295 }
2296 
2297 static int
2298 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2299 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2300 {
2301 	struct cfi_private *cfi = map->fldrv_priv;
2302 	map_word datum;
2303 
2304 	/* make sure area matches group boundaries */
2305 	if (size != grpsz)
2306 		return -EXDEV;
2307 
2308 	datum = map_word_ff(map);
2309 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2310 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2311 }
2312 
2313 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2314 				 size_t *retlen, u_char *buf,
2315 				 otp_op_t action, int user_regs)
2316 {
2317 	struct map_info *map = mtd->priv;
2318 	struct cfi_private *cfi = map->fldrv_priv;
2319 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2320 	struct flchip *chip;
2321 	struct cfi_intelext_otpinfo *otp;
2322 	u_long devsize, reg_prot_offset, data_offset;
2323 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2324 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2325 	int ret;
2326 
2327 	*retlen = 0;
2328 
2329 	/* Check that we actually have some OTP registers */
2330 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2331 		return -ENODATA;
2332 
2333 	/* we need real chips here not virtual ones */
2334 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2335 	chip_step = devsize >> cfi->chipshift;
2336 	chip_num = 0;
2337 
2338 	/* Some chips have OTP located in the _top_ partition only.
2339 	   For example: Intel 28F256L18T (T means top-parameter device) */
2340 	if (cfi->mfr == CFI_MFR_INTEL) {
2341 		switch (cfi->id) {
2342 		case 0x880b:
2343 		case 0x880c:
2344 		case 0x880d:
2345 			chip_num = chip_step - 1;
2346 		}
2347 	}
2348 
2349 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2350 		chip = &cfi->chips[chip_num];
2351 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2352 
2353 		/* first OTP region */
2354 		field = 0;
2355 		reg_prot_offset = extp->ProtRegAddr;
2356 		reg_fact_groups = 1;
2357 		reg_fact_size = 1 << extp->FactProtRegSize;
2358 		reg_user_groups = 1;
2359 		reg_user_size = 1 << extp->UserProtRegSize;
2360 
2361 		while (len > 0) {
2362 			/* flash geometry fixup */
2363 			data_offset = reg_prot_offset + 1;
2364 			data_offset *= cfi->interleave * cfi->device_type;
2365 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2366 			reg_fact_size *= cfi->interleave;
2367 			reg_user_size *= cfi->interleave;
2368 
2369 			if (user_regs) {
2370 				groups = reg_user_groups;
2371 				groupsize = reg_user_size;
2372 				/* skip over factory reg area */
2373 				groupno = reg_fact_groups;
2374 				data_offset += reg_fact_groups * reg_fact_size;
2375 			} else {
2376 				groups = reg_fact_groups;
2377 				groupsize = reg_fact_size;
2378 				groupno = 0;
2379 			}
2380 
2381 			while (len > 0 && groups > 0) {
2382 				if (!action) {
2383 					/*
2384 					 * Special case: if action is NULL
2385 					 * we fill buf with otp_info records.
2386 					 */
2387 					struct otp_info *otpinfo;
2388 					map_word lockword;
2389 					len -= sizeof(struct otp_info);
2390 					if (len <= 0)
2391 						return -ENOSPC;
2392 					ret = do_otp_read(map, chip,
2393 							  reg_prot_offset,
2394 							  (u_char *)&lockword,
2395 							  map_bankwidth(map),
2396 							  0, 0,  0);
2397 					if (ret)
2398 						return ret;
2399 					otpinfo = (struct otp_info *)buf;
2400 					otpinfo->start = from;
2401 					otpinfo->length = groupsize;
2402 					otpinfo->locked =
2403 					   !map_word_bitsset(map, lockword,
2404 							     CMD(1 << groupno));
2405 					from += groupsize;
2406 					buf += sizeof(*otpinfo);
2407 					*retlen += sizeof(*otpinfo);
2408 				} else if (from >= groupsize) {
2409 					from -= groupsize;
2410 					data_offset += groupsize;
2411 				} else {
2412 					int size = groupsize;
2413 					data_offset += from;
2414 					size -= from;
2415 					from = 0;
2416 					if (size > len)
2417 						size = len;
2418 					ret = action(map, chip, data_offset,
2419 						     buf, size, reg_prot_offset,
2420 						     groupno, groupsize);
2421 					if (ret < 0)
2422 						return ret;
2423 					buf += size;
2424 					len -= size;
2425 					*retlen += size;
2426 					data_offset += size;
2427 				}
2428 				groupno++;
2429 				groups--;
2430 			}
2431 
2432 			/* next OTP region */
2433 			if (++field == extp->NumProtectionFields)
2434 				break;
2435 			reg_prot_offset = otp->ProtRegAddr;
2436 			reg_fact_groups = otp->FactGroups;
2437 			reg_fact_size = 1 << otp->FactProtRegSize;
2438 			reg_user_groups = otp->UserGroups;
2439 			reg_user_size = 1 << otp->UserProtRegSize;
2440 			otp++;
2441 		}
2442 	}
2443 
2444 	return 0;
2445 }
2446 
2447 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2448 					   size_t len, size_t *retlen,
2449 					    u_char *buf)
2450 {
2451 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2452 				     buf, do_otp_read, 0);
2453 }
2454 
2455 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2456 					   size_t len, size_t *retlen,
2457 					    u_char *buf)
2458 {
2459 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2460 				     buf, do_otp_read, 1);
2461 }
2462 
2463 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2464 					    size_t len, size_t *retlen,
2465 					    const u_char *buf)
2466 {
2467 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2468 				     (u_char *)buf, do_otp_write, 1);
2469 }
2470 
2471 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2472 					   loff_t from, size_t len)
2473 {
2474 	size_t retlen;
2475 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2476 				     NULL, do_otp_lock, 1);
2477 }
2478 
2479 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2480 					   size_t *retlen, struct otp_info *buf)
2481 
2482 {
2483 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2484 				     NULL, 0);
2485 }
2486 
2487 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2488 					   size_t *retlen, struct otp_info *buf)
2489 {
2490 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2491 				     NULL, 1);
2492 }
2493 
2494 #endif
2495 
2496 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2497 {
2498 	struct mtd_erase_region_info *region;
2499 	int block, status, i;
2500 	unsigned long adr;
2501 	size_t len;
2502 
2503 	for (i = 0; i < mtd->numeraseregions; i++) {
2504 		region = &mtd->eraseregions[i];
2505 		if (!region->lockmap)
2506 			continue;
2507 
2508 		for (block = 0; block < region->numblocks; block++){
2509 			len = region->erasesize;
2510 			adr = region->offset + block * len;
2511 
2512 			status = cfi_varsize_frob(mtd,
2513 					do_getlockstatus_oneblock, adr, len, NULL);
2514 			if (status)
2515 				set_bit(block, region->lockmap);
2516 			else
2517 				clear_bit(block, region->lockmap);
2518 		}
2519 	}
2520 }
2521 
2522 static int cfi_intelext_suspend(struct mtd_info *mtd)
2523 {
2524 	struct map_info *map = mtd->priv;
2525 	struct cfi_private *cfi = map->fldrv_priv;
2526 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2527 	int i;
2528 	struct flchip *chip;
2529 	int ret = 0;
2530 
2531 	if ((mtd->flags & MTD_POWERUP_LOCK)
2532 	    && extp && (extp->FeatureSupport & (1 << 5)))
2533 		cfi_intelext_save_locks(mtd);
2534 
2535 	for (i=0; !ret && i<cfi->numchips; i++) {
2536 		chip = &cfi->chips[i];
2537 
2538 		mutex_lock(&chip->mutex);
2539 
2540 		switch (chip->state) {
2541 		case FL_READY:
2542 		case FL_STATUS:
2543 		case FL_CFI_QUERY:
2544 		case FL_JEDEC_QUERY:
2545 			if (chip->oldstate == FL_READY) {
2546 				/* place the chip in a known state before suspend */
2547 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2548 				chip->oldstate = chip->state;
2549 				chip->state = FL_PM_SUSPENDED;
2550 				/* No need to wake_up() on this state change -
2551 				 * as the whole point is that nobody can do anything
2552 				 * with the chip now anyway.
2553 				 */
2554 			} else {
2555 				/* There seems to be an operation pending. We must wait for it. */
2556 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2557 				ret = -EAGAIN;
2558 			}
2559 			break;
2560 		default:
2561 			/* Should we actually wait? Once upon a time these routines weren't
2562 			   allowed to. Or should we return -EAGAIN, because the upper layers
2563 			   ought to have already shut down anything which was using the device
2564 			   anyway? The latter for now. */
2565 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2566 			ret = -EAGAIN;
2567 			break;
2568 		case FL_PM_SUSPENDED:
2569 			break;
2570 		}
2571 		mutex_unlock(&chip->mutex);
2572 	}
2573 
2574 	/* Unlock the chips again */
2575 
2576 	if (ret) {
2577 		for (i--; i >=0; i--) {
2578 			chip = &cfi->chips[i];
2579 
2580 			mutex_lock(&chip->mutex);
2581 
2582 			if (chip->state == FL_PM_SUSPENDED) {
2583 				/* No need to force it into a known state here,
2584 				   because we're returning failure, and it didn't
2585 				   get power cycled */
2586 				chip->state = chip->oldstate;
2587 				chip->oldstate = FL_READY;
2588 				wake_up(&chip->wq);
2589 			}
2590 			mutex_unlock(&chip->mutex);
2591 		}
2592 	}
2593 
2594 	return ret;
2595 }
2596 
2597 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2598 {
2599 	struct mtd_erase_region_info *region;
2600 	int block, i;
2601 	unsigned long adr;
2602 	size_t len;
2603 
2604 	for (i = 0; i < mtd->numeraseregions; i++) {
2605 		region = &mtd->eraseregions[i];
2606 		if (!region->lockmap)
2607 			continue;
2608 
2609 		for_each_clear_bit(block, region->lockmap, region->numblocks) {
2610 			len = region->erasesize;
2611 			adr = region->offset + block * len;
2612 			cfi_intelext_unlock(mtd, adr, len);
2613 		}
2614 	}
2615 }
2616 
2617 static void cfi_intelext_resume(struct mtd_info *mtd)
2618 {
2619 	struct map_info *map = mtd->priv;
2620 	struct cfi_private *cfi = map->fldrv_priv;
2621 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2622 	int i;
2623 	struct flchip *chip;
2624 
2625 	for (i=0; i<cfi->numchips; i++) {
2626 
2627 		chip = &cfi->chips[i];
2628 
2629 		mutex_lock(&chip->mutex);
2630 
2631 		/* Go to known state. Chip may have been power cycled */
2632 		if (chip->state == FL_PM_SUSPENDED) {
2633 			/* Refresh LH28F640BF Partition Config. Register */
2634 			fixup_LH28F640BF(mtd);
2635 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2636 			chip->oldstate = chip->state = FL_READY;
2637 			wake_up(&chip->wq);
2638 		}
2639 
2640 		mutex_unlock(&chip->mutex);
2641 	}
2642 
2643 	if ((mtd->flags & MTD_POWERUP_LOCK)
2644 	    && extp && (extp->FeatureSupport & (1 << 5)))
2645 		cfi_intelext_restore_locks(mtd);
2646 }
2647 
2648 static int cfi_intelext_reset(struct mtd_info *mtd)
2649 {
2650 	struct map_info *map = mtd->priv;
2651 	struct cfi_private *cfi = map->fldrv_priv;
2652 	int i, ret;
2653 
2654 	for (i=0; i < cfi->numchips; i++) {
2655 		struct flchip *chip = &cfi->chips[i];
2656 
2657 		/* force the completion of any ongoing operation
2658 		   and switch to array mode so any bootloader in
2659 		   flash is accessible for soft reboot. */
2660 		mutex_lock(&chip->mutex);
2661 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2662 		if (!ret) {
2663 			map_write(map, CMD(0xff), chip->start);
2664 			chip->state = FL_SHUTDOWN;
2665 			put_chip(map, chip, chip->start);
2666 		}
2667 		mutex_unlock(&chip->mutex);
2668 	}
2669 
2670 	return 0;
2671 }
2672 
2673 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2674 			       void *v)
2675 {
2676 	struct mtd_info *mtd;
2677 
2678 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2679 	cfi_intelext_reset(mtd);
2680 	return NOTIFY_DONE;
2681 }
2682 
2683 static void cfi_intelext_destroy(struct mtd_info *mtd)
2684 {
2685 	struct map_info *map = mtd->priv;
2686 	struct cfi_private *cfi = map->fldrv_priv;
2687 	struct mtd_erase_region_info *region;
2688 	int i;
2689 	cfi_intelext_reset(mtd);
2690 	unregister_reboot_notifier(&mtd->reboot_notifier);
2691 	kfree(cfi->cmdset_priv);
2692 	kfree(cfi->cfiq);
2693 	kfree(cfi->chips[0].priv);
2694 	kfree(cfi);
2695 	for (i = 0; i < mtd->numeraseregions; i++) {
2696 		region = &mtd->eraseregions[i];
2697 		kfree(region->lockmap);
2698 	}
2699 	kfree(mtd->eraseregions);
2700 }
2701 
2702 MODULE_LICENSE("GPL");
2703 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2704 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2705 MODULE_ALIAS("cfi_cmdset_0003");
2706 MODULE_ALIAS("cfi_cmdset_0200");
2707