xref: /linux/drivers/mtd/chips/cfi_cmdset_0001.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Flash Interface support:
4  *   Intel Extended Vendor Command Set (ID 0x0001)
5  *
6  * (C) 2000 Red Hat.
7  *
8  *
9  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
10  * 	- completely revamped method functions so they are aware and
11  * 	  independent of the flash geometry (buswidth, interleave, etc.)
12  * 	- scalability vs code size is completely set at compile-time
13  * 	  (see include/linux/mtd/cfi.h for selection)
14  *	- optimized write buffer method
15  * 02/05/2002	Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
16  *	- reworked lock/unlock/erase support for var size flash
17  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
18  * 	- auto unlock sectors on resume for auto locking flash on power up
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27 
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38 
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44 
45 /* Intel chips */
46 #define I82802AB	0x00ad
47 #define I82802AC	0x00ac
48 #define PF38F4476	0x881c
49 #define M28F00AP30	0x8963
50 /* STMicroelectronics chips */
51 #define M50LPW080       0x002F
52 #define M50FLW080A	0x0080
53 #define M50FLW080B	0x0081
54 /* Atmel chips */
55 #define AT49BV640D	0x02de
56 #define AT49BV640DT	0x02db
57 /* Sharp chips */
58 #define LH28F640BFHE_PTTL90	0x00b0
59 #define LH28F640BFHE_PBTL90	0x00b1
60 #define LH28F640BFHE_PTTL70A	0x00b2
61 #define LH28F640BFHE_PBTL70A	0x00b3
62 
63 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
66 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
67 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
68 static void cfi_intelext_sync (struct mtd_info *);
69 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
71 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
72 				  uint64_t len);
73 #ifdef CONFIG_MTD_OTP
74 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
77 					    size_t *, const u_char *);
78 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
79 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
80 					   size_t *, struct otp_info *);
81 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
82 					   size_t *, struct otp_info *);
83 #endif
84 static int cfi_intelext_suspend (struct mtd_info *);
85 static void cfi_intelext_resume (struct mtd_info *);
86 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
87 
88 static void cfi_intelext_destroy(struct mtd_info *);
89 
90 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
91 
92 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
93 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
94 
95 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
96 		     size_t *retlen, void **virt, resource_size_t *phys);
97 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
98 
99 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
100 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
101 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
102 #include "fwh_lock.h"
103 
104 
105 
106 /*
107  *  *********** SETUP AND PROBE BITS  ***********
108  */
109 
110 static struct mtd_chip_driver cfi_intelext_chipdrv = {
111 	.probe		= NULL, /* Not usable directly */
112 	.destroy	= cfi_intelext_destroy,
113 	.name		= "cfi_cmdset_0001",
114 	.module		= THIS_MODULE
115 };
116 
117 /* #define DEBUG_LOCK_BITS */
118 /* #define DEBUG_CFI_FEATURES */
119 
120 #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_intelext * extp)121 static void cfi_tell_features(struct cfi_pri_intelext *extp)
122 {
123 	int i;
124 	printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
125 	printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
126 	printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
127 	printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
128 	printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
129 	printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
130 	printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
131 	printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
132 	printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
133 	printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
134 	printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
135 	printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
136 	printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
137 	for (i=11; i<32; i++) {
138 		if (extp->FeatureSupport & (1<<i))
139 			printk("     - Unknown Bit %X:      supported\n", i);
140 	}
141 
142 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
143 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
144 	for (i=1; i<8; i++) {
145 		if (extp->SuspendCmdSupport & (1<<i))
146 			printk("     - Unknown Bit %X:               supported\n", i);
147 	}
148 
149 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
150 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
151 	printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
152 	for (i=2; i<3; i++) {
153 		if (extp->BlkStatusRegMask & (1<<i))
154 			printk("     - Unknown Bit %X Active: yes\n",i);
155 	}
156 	printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
157 	printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
158 	for (i=6; i<16; i++) {
159 		if (extp->BlkStatusRegMask & (1<<i))
160 			printk("     - Unknown Bit %X Active: yes\n",i);
161 	}
162 
163 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
164 	       extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
165 	if (extp->VppOptimal)
166 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
167 		       extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
168 }
169 #endif
170 
171 /* Atmel chips don't use the same PRI format as Intel chips */
fixup_convert_atmel_pri(struct mtd_info * mtd)172 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
173 {
174 	struct map_info *map = mtd->priv;
175 	struct cfi_private *cfi = map->fldrv_priv;
176 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
177 	struct cfi_pri_atmel atmel_pri;
178 	uint32_t features = 0;
179 
180 	/* Reverse byteswapping */
181 	extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
182 	extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
183 	extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
184 
185 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
186 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
187 
188 	printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
189 
190 	if (atmel_pri.Features & 0x01) /* chip erase supported */
191 		features |= (1<<0);
192 	if (atmel_pri.Features & 0x02) /* erase suspend supported */
193 		features |= (1<<1);
194 	if (atmel_pri.Features & 0x04) /* program suspend supported */
195 		features |= (1<<2);
196 	if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
197 		features |= (1<<9);
198 	if (atmel_pri.Features & 0x20) /* page mode read supported */
199 		features |= (1<<7);
200 	if (atmel_pri.Features & 0x40) /* queued erase supported */
201 		features |= (1<<4);
202 	if (atmel_pri.Features & 0x80) /* Protection bits supported */
203 		features |= (1<<6);
204 
205 	extp->FeatureSupport = features;
206 
207 	/* burst write mode not supported */
208 	cfi->cfiq->BufWriteTimeoutTyp = 0;
209 	cfi->cfiq->BufWriteTimeoutMax = 0;
210 }
211 
fixup_at49bv640dx_lock(struct mtd_info * mtd)212 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
213 {
214 	struct map_info *map = mtd->priv;
215 	struct cfi_private *cfi = map->fldrv_priv;
216 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
217 
218 	cfip->FeatureSupport |= (1 << 5);
219 	mtd->flags |= MTD_POWERUP_LOCK;
220 }
221 
222 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
fixup_intel_strataflash(struct mtd_info * mtd)224 static void fixup_intel_strataflash(struct mtd_info *mtd)
225 {
226 	struct map_info *map = mtd->priv;
227 	struct cfi_private *cfi = map->fldrv_priv;
228 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
229 
230 	printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
231 	                    "erase on write disabled.\n");
232 	extp->SuspendCmdSupport &= ~1;
233 }
234 #endif
235 
236 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
fixup_no_write_suspend(struct mtd_info * mtd)237 static void fixup_no_write_suspend(struct mtd_info *mtd)
238 {
239 	struct map_info *map = mtd->priv;
240 	struct cfi_private *cfi = map->fldrv_priv;
241 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
242 
243 	if (cfip && (cfip->FeatureSupport&4)) {
244 		cfip->FeatureSupport &= ~4;
245 		printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
246 	}
247 }
248 #endif
249 
fixup_st_m28w320ct(struct mtd_info * mtd)250 static void fixup_st_m28w320ct(struct mtd_info *mtd)
251 {
252 	struct map_info *map = mtd->priv;
253 	struct cfi_private *cfi = map->fldrv_priv;
254 
255 	cfi->cfiq->BufWriteTimeoutTyp = 0;	/* Not supported */
256 	cfi->cfiq->BufWriteTimeoutMax = 0;	/* Not supported */
257 }
258 
fixup_st_m28w320cb(struct mtd_info * mtd)259 static void fixup_st_m28w320cb(struct mtd_info *mtd)
260 {
261 	struct map_info *map = mtd->priv;
262 	struct cfi_private *cfi = map->fldrv_priv;
263 
264 	/* Note this is done after the region info is endian swapped */
265 	cfi->cfiq->EraseRegionInfo[1] =
266 		(cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
267 };
268 
is_LH28F640BF(struct cfi_private * cfi)269 static int is_LH28F640BF(struct cfi_private *cfi)
270 {
271 	/* Sharp LH28F640BF Family */
272 	if (cfi->mfr == CFI_MFR_SHARP && (
273 	    cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
274 	    cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
275 		return 1;
276 	return 0;
277 }
278 
fixup_LH28F640BF(struct mtd_info * mtd)279 static void fixup_LH28F640BF(struct mtd_info *mtd)
280 {
281 	struct map_info *map = mtd->priv;
282 	struct cfi_private *cfi = map->fldrv_priv;
283 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
284 
285 	/* Reset the Partition Configuration Register on LH28F640BF
286 	 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
287 	if (is_LH28F640BF(cfi)) {
288 		printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
289 		map_write(map, CMD(0x60), 0);
290 		map_write(map, CMD(0x04), 0);
291 
292 		/* We have set one single partition thus
293 		 * Simultaneous Operations are not allowed */
294 		printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
295 		extp->FeatureSupport &= ~512;
296 	}
297 }
298 
fixup_use_point(struct mtd_info * mtd)299 static void fixup_use_point(struct mtd_info *mtd)
300 {
301 	struct map_info *map = mtd->priv;
302 	if (!mtd->_point && map_is_linear(map)) {
303 		mtd->_point   = cfi_intelext_point;
304 		mtd->_unpoint = cfi_intelext_unpoint;
305 	}
306 }
307 
fixup_use_write_buffers(struct mtd_info * mtd)308 static void fixup_use_write_buffers(struct mtd_info *mtd)
309 {
310 	struct map_info *map = mtd->priv;
311 	struct cfi_private *cfi = map->fldrv_priv;
312 	if (cfi->cfiq->BufWriteTimeoutTyp) {
313 		printk(KERN_INFO "Using buffer write method\n" );
314 		mtd->_write = cfi_intelext_write_buffers;
315 		mtd->_writev = cfi_intelext_writev;
316 	}
317 }
318 
319 /*
320  * Some chips power-up with all sectors locked by default.
321  */
fixup_unlock_powerup_lock(struct mtd_info * mtd)322 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
323 {
324 	struct map_info *map = mtd->priv;
325 	struct cfi_private *cfi = map->fldrv_priv;
326 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
327 
328 	if (cfip->FeatureSupport&32) {
329 		printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
330 		mtd->flags |= MTD_POWERUP_LOCK;
331 	}
332 }
333 
334 static struct cfi_fixup cfi_fixup_table[] = {
335 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
336 	{ CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
337 	{ CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
338 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
339 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
340 #endif
341 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
342 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
343 #endif
344 #if !FORCE_WORD_WRITE
345 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
346 #endif
347 	{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
348 	{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
349 	{ CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
350 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
351 	{ CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
352 	{ 0, 0, NULL }
353 };
354 
355 static struct cfi_fixup jedec_fixup_table[] = {
356 	{ CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
357 	{ CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
358 	{ CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
359 	{ CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
360 	{ CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
361 	{ 0, 0, NULL }
362 };
363 static struct cfi_fixup fixup_table[] = {
364 	/* The CFI vendor ids and the JEDEC vendor IDs appear
365 	 * to be common.  It is like the devices id's are as
366 	 * well.  This table is to pick all cases where
367 	 * we know that is the case.
368 	 */
369 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
370 	{ 0, 0, NULL }
371 };
372 
cfi_fixup_major_minor(struct cfi_private * cfi,struct cfi_pri_intelext * extp)373 static void cfi_fixup_major_minor(struct cfi_private *cfi,
374 						struct cfi_pri_intelext *extp)
375 {
376 	if (cfi->mfr == CFI_MFR_INTEL &&
377 			cfi->id == PF38F4476 && extp->MinorVersion == '3')
378 		extp->MinorVersion = '1';
379 }
380 
cfi_is_micron_28F00AP30(struct cfi_private * cfi,struct flchip * chip)381 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
382 {
383 	/*
384 	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
385 	 * Erase Supend for their small Erase Blocks(0x8000)
386 	 */
387 	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
388 		return 1;
389 	return 0;
390 }
391 
392 static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info * map,__u16 adr)393 read_pri_intelext(struct map_info *map, __u16 adr)
394 {
395 	struct cfi_private *cfi = map->fldrv_priv;
396 	struct cfi_pri_intelext *extp;
397 	unsigned int extra_size = 0;
398 	unsigned int extp_size = sizeof(*extp);
399 
400  again:
401 	extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
402 	if (!extp)
403 		return NULL;
404 
405 	cfi_fixup_major_minor(cfi, extp);
406 
407 	if (extp->MajorVersion != '1' ||
408 	    (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
409 		printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
410 		       "version %c.%c.\n",  extp->MajorVersion,
411 		       extp->MinorVersion);
412 		kfree(extp);
413 		return NULL;
414 	}
415 
416 	/* Do some byteswapping if necessary */
417 	extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
418 	extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
419 	extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
420 
421 	if (extp->MinorVersion >= '0') {
422 		extra_size = 0;
423 
424 		/* Protection Register info */
425 		if (extp->NumProtectionFields) {
426 			struct cfi_intelext_otpinfo *otp =
427 				(struct cfi_intelext_otpinfo *)&extp->extra[0];
428 
429 			extra_size += (extp->NumProtectionFields - 1) *
430 				sizeof(struct cfi_intelext_otpinfo);
431 
432 			if (extp_size >= sizeof(*extp) + extra_size) {
433 				int i;
434 
435 				/* Do some byteswapping if necessary */
436 				for (i = 0; i < extp->NumProtectionFields - 1; i++) {
437 					otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
438 					otp->FactGroups = le16_to_cpu(otp->FactGroups);
439 					otp->UserGroups = le16_to_cpu(otp->UserGroups);
440 					otp++;
441 				}
442 			}
443 		}
444 	}
445 
446 	if (extp->MinorVersion >= '1') {
447 		/* Burst Read info */
448 		extra_size += 2;
449 		if (extp_size < sizeof(*extp) + extra_size)
450 			goto need_more;
451 		extra_size += extp->extra[extra_size - 1];
452 	}
453 
454 	if (extp->MinorVersion >= '3') {
455 		int nb_parts, i;
456 
457 		/* Number of hardware-partitions */
458 		extra_size += 1;
459 		if (extp_size < sizeof(*extp) + extra_size)
460 			goto need_more;
461 		nb_parts = extp->extra[extra_size - 1];
462 
463 		/* skip the sizeof(partregion) field in CFI 1.4 */
464 		if (extp->MinorVersion >= '4')
465 			extra_size += 2;
466 
467 		for (i = 0; i < nb_parts; i++) {
468 			struct cfi_intelext_regioninfo *rinfo;
469 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
470 			extra_size += sizeof(*rinfo);
471 			if (extp_size < sizeof(*extp) + extra_size)
472 				goto need_more;
473 			rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
474 			extra_size += (rinfo->NumBlockTypes - 1)
475 				      * sizeof(struct cfi_intelext_blockinfo);
476 		}
477 
478 		if (extp->MinorVersion >= '4')
479 			extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
480 
481 		if (extp_size < sizeof(*extp) + extra_size) {
482 			need_more:
483 			extp_size = sizeof(*extp) + extra_size;
484 			kfree(extp);
485 			if (extp_size > 4096) {
486 				printk(KERN_ERR
487 					"%s: cfi_pri_intelext is too fat\n",
488 					__func__);
489 				return NULL;
490 			}
491 			goto again;
492 		}
493 	}
494 
495 	return extp;
496 }
497 
cfi_cmdset_0001(struct map_info * map,int primary)498 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
499 {
500 	struct cfi_private *cfi = map->fldrv_priv;
501 	struct mtd_info *mtd;
502 	int i;
503 
504 	mtd = kzalloc_obj(*mtd);
505 	if (!mtd)
506 		return NULL;
507 	mtd->priv = map;
508 	mtd->type = MTD_NORFLASH;
509 
510 	/* Fill in the default mtd operations */
511 	mtd->_erase   = cfi_intelext_erase_varsize;
512 	mtd->_read    = cfi_intelext_read;
513 	mtd->_write   = cfi_intelext_write_words;
514 	mtd->_sync    = cfi_intelext_sync;
515 	mtd->_lock    = cfi_intelext_lock;
516 	mtd->_unlock  = cfi_intelext_unlock;
517 	mtd->_is_locked = cfi_intelext_is_locked;
518 	mtd->_suspend = cfi_intelext_suspend;
519 	mtd->_resume  = cfi_intelext_resume;
520 	mtd->flags   = MTD_CAP_NORFLASH;
521 	mtd->name    = map->name;
522 	mtd->writesize = 1;
523 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
524 
525 	mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
526 
527 	if (cfi->cfi_mode == CFI_MODE_CFI) {
528 		/*
529 		 * It's a real CFI chip, not one for which the probe
530 		 * routine faked a CFI structure. So we read the feature
531 		 * table from it.
532 		 */
533 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
534 		struct cfi_pri_intelext *extp;
535 
536 		extp = read_pri_intelext(map, adr);
537 		if (!extp) {
538 			kfree(mtd);
539 			return NULL;
540 		}
541 
542 		/* Install our own private info structure */
543 		cfi->cmdset_priv = extp;
544 
545 		cfi_fixup(mtd, cfi_fixup_table);
546 
547 #ifdef DEBUG_CFI_FEATURES
548 		/* Tell the user about it in lots of lovely detail */
549 		cfi_tell_features(extp);
550 #endif
551 
552 		if(extp->SuspendCmdSupport & 1) {
553 			printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
554 		}
555 	}
556 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
557 		/* Apply jedec specific fixups */
558 		cfi_fixup(mtd, jedec_fixup_table);
559 	}
560 	/* Apply generic fixups */
561 	cfi_fixup(mtd, fixup_table);
562 
563 	for (i=0; i< cfi->numchips; i++) {
564 		if (cfi->cfiq->WordWriteTimeoutTyp)
565 			cfi->chips[i].word_write_time =
566 				1<<cfi->cfiq->WordWriteTimeoutTyp;
567 		else
568 			cfi->chips[i].word_write_time = 50000;
569 
570 		if (cfi->cfiq->BufWriteTimeoutTyp)
571 			cfi->chips[i].buffer_write_time =
572 				1<<cfi->cfiq->BufWriteTimeoutTyp;
573 		/* No default; if it isn't specified, we won't use it */
574 
575 		if (cfi->cfiq->BlockEraseTimeoutTyp)
576 			cfi->chips[i].erase_time =
577 				1000<<cfi->cfiq->BlockEraseTimeoutTyp;
578 		else
579 			cfi->chips[i].erase_time = 2000000;
580 
581 		if (cfi->cfiq->WordWriteTimeoutTyp &&
582 		    cfi->cfiq->WordWriteTimeoutMax)
583 			cfi->chips[i].word_write_time_max =
584 				1<<(cfi->cfiq->WordWriteTimeoutTyp +
585 				    cfi->cfiq->WordWriteTimeoutMax);
586 		else
587 			cfi->chips[i].word_write_time_max = 50000 * 8;
588 
589 		if (cfi->cfiq->BufWriteTimeoutTyp &&
590 		    cfi->cfiq->BufWriteTimeoutMax)
591 			cfi->chips[i].buffer_write_time_max =
592 				1<<(cfi->cfiq->BufWriteTimeoutTyp +
593 				    cfi->cfiq->BufWriteTimeoutMax);
594 
595 		if (cfi->cfiq->BlockEraseTimeoutTyp &&
596 		    cfi->cfiq->BlockEraseTimeoutMax)
597 			cfi->chips[i].erase_time_max =
598 				1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
599 				       cfi->cfiq->BlockEraseTimeoutMax);
600 		else
601 			cfi->chips[i].erase_time_max = 2000000 * 8;
602 
603 		cfi->chips[i].ref_point_counter = 0;
604 		init_waitqueue_head(&(cfi->chips[i].wq));
605 	}
606 
607 	map->fldrv = &cfi_intelext_chipdrv;
608 
609 	return cfi_intelext_setup(mtd);
610 }
611 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
612 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
613 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
614 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
615 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
616 
cfi_intelext_setup(struct mtd_info * mtd)617 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
618 {
619 	struct map_info *map = mtd->priv;
620 	struct cfi_private *cfi = map->fldrv_priv;
621 	unsigned long offset = 0;
622 	int i,j;
623 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
624 
625 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
626 
627 	mtd->size = devsize * cfi->numchips;
628 
629 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
630 	mtd->eraseregions = kzalloc_objs(struct mtd_erase_region_info,
631 					 mtd->numeraseregions);
632 	if (!mtd->eraseregions)
633 		goto setup_err;
634 
635 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
636 		unsigned long ernum, ersize;
637 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
638 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
639 
640 		if (mtd->erasesize < ersize) {
641 			mtd->erasesize = ersize;
642 		}
643 		for (j=0; j<cfi->numchips; j++) {
644 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
645 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
646 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
647 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
648 			if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
649 				goto setup_err;
650 		}
651 		offset += (ersize * ernum);
652 	}
653 
654 	if (offset != devsize) {
655 		/* Argh */
656 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
657 		goto setup_err;
658 	}
659 
660 	for (i=0; i<mtd->numeraseregions;i++){
661 		printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
662 		       i,(unsigned long long)mtd->eraseregions[i].offset,
663 		       mtd->eraseregions[i].erasesize,
664 		       mtd->eraseregions[i].numblocks);
665 	}
666 
667 #ifdef CONFIG_MTD_OTP
668 	mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
669 	mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
670 	mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
671 	mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
672 	mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
673 	mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
674 #endif
675 
676 	/* This function has the potential to distort the reality
677 	   a bit and therefore should be called last. */
678 	if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
679 		goto setup_err;
680 
681 	__module_get(THIS_MODULE);
682 	register_reboot_notifier(&mtd->reboot_notifier);
683 	return mtd;
684 
685  setup_err:
686 	if (mtd->eraseregions)
687 		for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
688 			for (j=0; j<cfi->numchips; j++)
689 				kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
690 	kfree(mtd->eraseregions);
691 	kfree(mtd);
692 	kfree(cfi->cmdset_priv);
693 	return NULL;
694 }
695 
cfi_intelext_partition_fixup(struct mtd_info * mtd,struct cfi_private ** pcfi)696 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
697 					struct cfi_private **pcfi)
698 {
699 	struct map_info *map = mtd->priv;
700 	struct cfi_private *cfi = *pcfi;
701 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
702 
703 	/*
704 	 * Probing of multi-partition flash chips.
705 	 *
706 	 * To support multiple partitions when available, we simply arrange
707 	 * for each of them to have their own flchip structure even if they
708 	 * are on the same physical chip.  This means completely recreating
709 	 * a new cfi_private structure right here which is a blatent code
710 	 * layering violation, but this is still the least intrusive
711 	 * arrangement at this point. This can be rearranged in the future
712 	 * if someone feels motivated enough.  --nico
713 	 */
714 	if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
715 	    && extp->FeatureSupport & (1 << 9)) {
716 		int offs = 0;
717 		struct cfi_private *newcfi;
718 		struct flchip *chip;
719 		struct flchip_shared *shared;
720 		int numregions, numparts, partshift, numvirtchips, i, j;
721 
722 		/* Protection Register info */
723 		if (extp->NumProtectionFields)
724 			offs = (extp->NumProtectionFields - 1) *
725 			       sizeof(struct cfi_intelext_otpinfo);
726 
727 		/* Burst Read info */
728 		offs += extp->extra[offs+1]+2;
729 
730 		/* Number of partition regions */
731 		numregions = extp->extra[offs];
732 		offs += 1;
733 
734 		/* skip the sizeof(partregion) field in CFI 1.4 */
735 		if (extp->MinorVersion >= '4')
736 			offs += 2;
737 
738 		/* Number of hardware partitions */
739 		numparts = 0;
740 		for (i = 0; i < numregions; i++) {
741 			struct cfi_intelext_regioninfo *rinfo;
742 			rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
743 			numparts += rinfo->NumIdentPartitions;
744 			offs += sizeof(*rinfo)
745 				+ (rinfo->NumBlockTypes - 1) *
746 				  sizeof(struct cfi_intelext_blockinfo);
747 		}
748 
749 		if (!numparts)
750 			numparts = 1;
751 
752 		/* Programming Region info */
753 		if (extp->MinorVersion >= '4') {
754 			struct cfi_intelext_programming_regioninfo *prinfo;
755 			prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
756 			mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
757 			mtd->flags &= ~MTD_BIT_WRITEABLE;
758 			printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
759 			       map->name, mtd->writesize,
760 			       cfi->interleave * prinfo->ControlValid,
761 			       cfi->interleave * prinfo->ControlInvalid);
762 		}
763 
764 		/*
765 		 * All functions below currently rely on all chips having
766 		 * the same geometry so we'll just assume that all hardware
767 		 * partitions are of the same size too.
768 		 */
769 		partshift = cfi->chipshift - __ffs(numparts);
770 
771 		if ((1 << partshift) < mtd->erasesize) {
772 			printk( KERN_ERR
773 				"%s: bad number of hw partitions (%d)\n",
774 				__func__, numparts);
775 			return -EINVAL;
776 		}
777 
778 		numvirtchips = cfi->numchips * numparts;
779 		newcfi = kmalloc_flex(*newcfi, chips, numvirtchips);
780 		if (!newcfi)
781 			return -ENOMEM;
782 		shared = kmalloc_objs(struct flchip_shared, cfi->numchips);
783 		if (!shared) {
784 			kfree(newcfi);
785 			return -ENOMEM;
786 		}
787 		memcpy(newcfi, cfi, sizeof(struct cfi_private));
788 		newcfi->numchips = numvirtchips;
789 		newcfi->chipshift = partshift;
790 
791 		chip = &newcfi->chips[0];
792 		for (i = 0; i < cfi->numchips; i++) {
793 			shared[i].writing = shared[i].erasing = NULL;
794 			mutex_init(&shared[i].lock);
795 			for (j = 0; j < numparts; j++) {
796 				*chip = cfi->chips[i];
797 				chip->start += j << partshift;
798 				chip->priv = &shared[i];
799 				/* those should be reset too since
800 				   they create memory references. */
801 				init_waitqueue_head(&chip->wq);
802 				mutex_init(&chip->mutex);
803 				chip++;
804 			}
805 		}
806 
807 		printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
808 				  "--> %d partitions of %d KiB\n",
809 				  map->name, cfi->numchips, cfi->interleave,
810 				  newcfi->numchips, 1<<(newcfi->chipshift-10));
811 
812 		map->fldrv_priv = newcfi;
813 		*pcfi = newcfi;
814 		kfree(cfi);
815 	}
816 
817 	return 0;
818 }
819 
820 /*
821  *  *********** CHIP ACCESS FUNCTIONS ***********
822  */
chip_ready(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)823 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
824 {
825 	DECLARE_WAITQUEUE(wait, current);
826 	struct cfi_private *cfi = map->fldrv_priv;
827 	map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
828 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
829 	unsigned long timeo = jiffies + HZ;
830 
831 	/* Prevent setting state FL_SYNCING for chip in suspended state. */
832 	if (mode == FL_SYNCING && chip->oldstate != FL_READY)
833 		goto sleep;
834 
835 	switch (chip->state) {
836 
837 	case FL_STATUS:
838 		for (;;) {
839 			status = map_read(map, adr);
840 			if (map_word_andequal(map, status, status_OK, status_OK))
841 				break;
842 
843 			/* At this point we're fine with write operations
844 			   in other partitions as they don't conflict. */
845 			if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
846 				break;
847 
848 			mutex_unlock(&chip->mutex);
849 			cfi_udelay(1);
850 			mutex_lock(&chip->mutex);
851 			/* Someone else might have been playing with it. */
852 			return -EAGAIN;
853 		}
854 		fallthrough;
855 	case FL_READY:
856 	case FL_CFI_QUERY:
857 	case FL_JEDEC_QUERY:
858 		return 0;
859 
860 	case FL_ERASING:
861 		if (!cfip ||
862 		    !(cfip->FeatureSupport & 2) ||
863 		    !(mode == FL_READY || mode == FL_POINT ||
864 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
865 			goto sleep;
866 
867 		/* Do not allow suspend iff read/write to EB address */
868 		if ((adr & chip->in_progress_block_mask) ==
869 		    chip->in_progress_block_addr)
870 			goto sleep;
871 
872 		/* do not suspend small EBs, buggy Micron Chips */
873 		if (cfi_is_micron_28F00AP30(cfi, chip) &&
874 		    (chip->in_progress_block_mask == ~(0x8000-1)))
875 			goto sleep;
876 
877 		/* Erase suspend */
878 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
879 
880 		/* If the flash has finished erasing, then 'erase suspend'
881 		 * appears to make some (28F320) flash devices switch to
882 		 * 'read' mode.  Make sure that we switch to 'read status'
883 		 * mode so we get the right data. --rmk
884 		 */
885 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
886 		chip->oldstate = FL_ERASING;
887 		chip->state = FL_ERASE_SUSPENDING;
888 		chip->erase_suspended = 1;
889 		for (;;) {
890 			status = map_read(map, chip->in_progress_block_addr);
891 			if (map_word_andequal(map, status, status_OK, status_OK))
892 			        break;
893 
894 			if (time_after(jiffies, timeo)) {
895 				/* Urgh. Resume and pretend we weren't here.
896 				 * Make sure we're in 'read status' mode if it had finished */
897 				put_chip(map, chip, adr);
898 				printk(KERN_ERR "%s: Chip not ready after erase "
899 				       "suspended: status = 0x%lx\n", map->name, status.x[0]);
900 				return -EIO;
901 			}
902 
903 			mutex_unlock(&chip->mutex);
904 			cfi_udelay(1);
905 			mutex_lock(&chip->mutex);
906 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
907 			   So we can just loop here. */
908 		}
909 		chip->state = FL_STATUS;
910 		return 0;
911 
912 	case FL_XIP_WHILE_ERASING:
913 		if (mode != FL_READY && mode != FL_POINT &&
914 		    (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
915 			goto sleep;
916 		chip->oldstate = chip->state;
917 		chip->state = FL_READY;
918 		return 0;
919 
920 	case FL_SHUTDOWN:
921 		/* The machine is rebooting now,so no one can get chip anymore */
922 		return -EIO;
923 	case FL_POINT:
924 		/* Only if there's no operation suspended... */
925 		if (mode == FL_READY && chip->oldstate == FL_READY)
926 			return 0;
927 		fallthrough;
928 	default:
929 	sleep:
930 		set_current_state(TASK_UNINTERRUPTIBLE);
931 		add_wait_queue(&chip->wq, &wait);
932 		mutex_unlock(&chip->mutex);
933 		schedule();
934 		remove_wait_queue(&chip->wq, &wait);
935 		mutex_lock(&chip->mutex);
936 		return -EAGAIN;
937 	}
938 }
939 
get_chip(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)940 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
941 {
942 	int ret;
943 	DECLARE_WAITQUEUE(wait, current);
944 
945  retry:
946 	if (chip->priv &&
947 	    (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
948 	    || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
949 		/*
950 		 * OK. We have possibility for contention on the write/erase
951 		 * operations which are global to the real chip and not per
952 		 * partition.  So let's fight it over in the partition which
953 		 * currently has authority on the operation.
954 		 *
955 		 * The rules are as follows:
956 		 *
957 		 * - any write operation must own shared->writing.
958 		 *
959 		 * - any erase operation must own _both_ shared->writing and
960 		 *   shared->erasing.
961 		 *
962 		 * - contention arbitration is handled in the owner's context.
963 		 *
964 		 * The 'shared' struct can be read and/or written only when
965 		 * its lock is taken.
966 		 */
967 		struct flchip_shared *shared = chip->priv;
968 		struct flchip *contender;
969 		mutex_lock(&shared->lock);
970 		contender = shared->writing;
971 		if (contender && contender != chip) {
972 			/*
973 			 * The engine to perform desired operation on this
974 			 * partition is already in use by someone else.
975 			 * Let's fight over it in the context of the chip
976 			 * currently using it.  If it is possible to suspend,
977 			 * that other partition will do just that, otherwise
978 			 * it'll happily send us to sleep.  In any case, when
979 			 * get_chip returns success we're clear to go ahead.
980 			 */
981 			ret = mutex_trylock(&contender->mutex);
982 			mutex_unlock(&shared->lock);
983 			if (!ret)
984 				goto retry;
985 			mutex_unlock(&chip->mutex);
986 			ret = chip_ready(map, contender, contender->start, mode);
987 			mutex_lock(&chip->mutex);
988 
989 			if (ret == -EAGAIN) {
990 				mutex_unlock(&contender->mutex);
991 				goto retry;
992 			}
993 			if (ret) {
994 				mutex_unlock(&contender->mutex);
995 				return ret;
996 			}
997 			mutex_lock(&shared->lock);
998 
999 			/* We should not own chip if it is already
1000 			 * in FL_SYNCING state. Put contender and retry. */
1001 			if (chip->state == FL_SYNCING) {
1002 				put_chip(map, contender, contender->start);
1003 				mutex_unlock(&contender->mutex);
1004 				goto retry;
1005 			}
1006 			mutex_unlock(&contender->mutex);
1007 		}
1008 
1009 		/* Check if we already have suspended erase
1010 		 * on this chip. Sleep. */
1011 		if (mode == FL_ERASING && shared->erasing
1012 		    && shared->erasing->oldstate == FL_ERASING) {
1013 			mutex_unlock(&shared->lock);
1014 			set_current_state(TASK_UNINTERRUPTIBLE);
1015 			add_wait_queue(&chip->wq, &wait);
1016 			mutex_unlock(&chip->mutex);
1017 			schedule();
1018 			remove_wait_queue(&chip->wq, &wait);
1019 			mutex_lock(&chip->mutex);
1020 			goto retry;
1021 		}
1022 
1023 		/* We now own it */
1024 		shared->writing = chip;
1025 		if (mode == FL_ERASING)
1026 			shared->erasing = chip;
1027 		mutex_unlock(&shared->lock);
1028 	}
1029 	ret = chip_ready(map, chip, adr, mode);
1030 	if (ret == -EAGAIN)
1031 		goto retry;
1032 
1033 	return ret;
1034 }
1035 
put_chip(struct map_info * map,struct flchip * chip,unsigned long adr)1036 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1037 {
1038 	struct cfi_private *cfi = map->fldrv_priv;
1039 
1040 	if (chip->priv) {
1041 		struct flchip_shared *shared = chip->priv;
1042 		mutex_lock(&shared->lock);
1043 		if (shared->writing == chip && chip->oldstate == FL_READY) {
1044 			/* We own the ability to write, but we're done */
1045 			shared->writing = shared->erasing;
1046 			if (shared->writing && shared->writing != chip) {
1047 				/* give back ownership to who we loaned it from */
1048 				struct flchip *loaner = shared->writing;
1049 				mutex_lock(&loaner->mutex);
1050 				mutex_unlock(&shared->lock);
1051 				mutex_unlock(&chip->mutex);
1052 				put_chip(map, loaner, loaner->start);
1053 				mutex_lock(&chip->mutex);
1054 				mutex_unlock(&loaner->mutex);
1055 				wake_up(&chip->wq);
1056 				return;
1057 			}
1058 			shared->erasing = NULL;
1059 			shared->writing = NULL;
1060 		} else if (shared->erasing == chip && shared->writing != chip) {
1061 			/*
1062 			 * We own the ability to erase without the ability
1063 			 * to write, which means the erase was suspended
1064 			 * and some other partition is currently writing.
1065 			 * Don't let the switch below mess things up since
1066 			 * we don't have ownership to resume anything.
1067 			 */
1068 			mutex_unlock(&shared->lock);
1069 			wake_up(&chip->wq);
1070 			return;
1071 		}
1072 		mutex_unlock(&shared->lock);
1073 	}
1074 
1075 	switch(chip->oldstate) {
1076 	case FL_ERASING:
1077 		/* What if one interleaved chip has finished and the
1078 		   other hasn't? The old code would leave the finished
1079 		   one in READY mode. That's bad, and caused -EROFS
1080 		   errors to be returned from do_erase_oneblock because
1081 		   that's the only bit it checked for at the time.
1082 		   As the state machine appears to explicitly allow
1083 		   sending the 0x70 (Read Status) command to an erasing
1084 		   chip and expecting it to be ignored, that's what we
1085 		   do. */
1086 		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1087 		map_write(map, CMD(0x70), chip->in_progress_block_addr);
1088 		chip->oldstate = FL_READY;
1089 		chip->state = FL_ERASING;
1090 		break;
1091 
1092 	case FL_XIP_WHILE_ERASING:
1093 		chip->state = chip->oldstate;
1094 		chip->oldstate = FL_READY;
1095 		break;
1096 
1097 	case FL_READY:
1098 	case FL_STATUS:
1099 	case FL_JEDEC_QUERY:
1100 		break;
1101 	default:
1102 		printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1103 	}
1104 	wake_up(&chip->wq);
1105 }
1106 
1107 #ifdef CONFIG_MTD_XIP
1108 
1109 /*
1110  * No interrupt what so ever can be serviced while the flash isn't in array
1111  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1112  * enclosing any code path where the flash is known not to be in array mode.
1113  * And within a XIP disabled code path, only functions marked with __xipram
1114  * may be called and nothing else (it's a good thing to inspect generated
1115  * assembly to make sure inline functions were actually inlined and that gcc
1116  * didn't emit calls to its own support functions). Also configuring MTD CFI
1117  * support to a single buswidth and a single interleave is also recommended.
1118  */
1119 
xip_disable(struct map_info * map,struct flchip * chip,unsigned long adr)1120 static void xip_disable(struct map_info *map, struct flchip *chip,
1121 			unsigned long adr)
1122 {
1123 	/* TODO: chips with no XIP use should ignore and return */
1124 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
1125 	local_irq_disable();
1126 }
1127 
xip_enable(struct map_info * map,struct flchip * chip,unsigned long adr)1128 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1129 				unsigned long adr)
1130 {
1131 	struct cfi_private *cfi = map->fldrv_priv;
1132 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1133 		map_write(map, CMD(0xff), adr);
1134 		chip->state = FL_READY;
1135 	}
1136 	(void) map_read(map, adr);
1137 	xip_iprefetch();
1138 	local_irq_enable();
1139 }
1140 
1141 /*
1142  * When a delay is required for the flash operation to complete, the
1143  * xip_wait_for_operation() function is polling for both the given timeout
1144  * and pending (but still masked) hardware interrupts.  Whenever there is an
1145  * interrupt pending then the flash erase or write operation is suspended,
1146  * array mode restored and interrupts unmasked.  Task scheduling might also
1147  * happen at that point.  The CPU eventually returns from the interrupt or
1148  * the call to schedule() and the suspended flash operation is resumed for
1149  * the remaining of the delay period.
1150  *
1151  * Warning: this function _will_ fool interrupt latency tracing tools.
1152  */
1153 
xip_wait_for_operation(struct map_info * map,struct flchip * chip,unsigned long adr,unsigned int chip_op_time_max)1154 static int __xipram xip_wait_for_operation(
1155 		struct map_info *map, struct flchip *chip,
1156 		unsigned long adr, unsigned int chip_op_time_max)
1157 {
1158 	struct cfi_private *cfi = map->fldrv_priv;
1159 	struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1160 	map_word status, OK = CMD(0x80);
1161 	unsigned long usec, suspended, start, done;
1162 	flstate_t oldstate, newstate;
1163 
1164        	start = xip_currtime();
1165 	usec = chip_op_time_max;
1166 	if (usec == 0)
1167 		usec = 500000;
1168 	done = 0;
1169 
1170 	do {
1171 		cpu_relax();
1172 		if (xip_irqpending() && cfip &&
1173 		    ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1174 		     (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1175 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1176 			/*
1177 			 * Let's suspend the erase or write operation when
1178 			 * supported.  Note that we currently don't try to
1179 			 * suspend interleaved chips if there is already
1180 			 * another operation suspended (imagine what happens
1181 			 * when one chip was already done with the current
1182 			 * operation while another chip suspended it, then
1183 			 * we resume the whole thing at once).  Yes, it
1184 			 * can happen!
1185 			 */
1186 			usec -= done;
1187 			map_write(map, CMD(0xb0), adr);
1188 			map_write(map, CMD(0x70), adr);
1189 			suspended = xip_currtime();
1190 			do {
1191 				if (xip_elapsed_since(suspended) > 100000) {
1192 					/*
1193 					 * The chip doesn't want to suspend
1194 					 * after waiting for 100 msecs.
1195 					 * This is a critical error but there
1196 					 * is not much we can do here.
1197 					 */
1198 					return -EIO;
1199 				}
1200 				status = map_read(map, adr);
1201 			} while (!map_word_andequal(map, status, OK, OK));
1202 
1203 			/* Suspend succeeded */
1204 			oldstate = chip->state;
1205 			if (oldstate == FL_ERASING) {
1206 				if (!map_word_bitsset(map, status, CMD(0x40)))
1207 					break;
1208 				newstate = FL_XIP_WHILE_ERASING;
1209 				chip->erase_suspended = 1;
1210 			} else {
1211 				if (!map_word_bitsset(map, status, CMD(0x04)))
1212 					break;
1213 				newstate = FL_XIP_WHILE_WRITING;
1214 				chip->write_suspended = 1;
1215 			}
1216 			chip->state = newstate;
1217 			map_write(map, CMD(0xff), adr);
1218 			(void) map_read(map, adr);
1219 			xip_iprefetch();
1220 			local_irq_enable();
1221 			mutex_unlock(&chip->mutex);
1222 			xip_iprefetch();
1223 			cond_resched();
1224 
1225 			/*
1226 			 * We're back.  However someone else might have
1227 			 * decided to go write to the chip if we are in
1228 			 * a suspended erase state.  If so let's wait
1229 			 * until it's done.
1230 			 */
1231 			mutex_lock(&chip->mutex);
1232 			while (chip->state != newstate) {
1233 				DECLARE_WAITQUEUE(wait, current);
1234 				set_current_state(TASK_UNINTERRUPTIBLE);
1235 				add_wait_queue(&chip->wq, &wait);
1236 				mutex_unlock(&chip->mutex);
1237 				schedule();
1238 				remove_wait_queue(&chip->wq, &wait);
1239 				mutex_lock(&chip->mutex);
1240 			}
1241 			/* Disallow XIP again */
1242 			local_irq_disable();
1243 
1244 			/* Resume the write or erase operation */
1245 			map_write(map, CMD(0xd0), adr);
1246 			map_write(map, CMD(0x70), adr);
1247 			chip->state = oldstate;
1248 			start = xip_currtime();
1249 		} else if (usec >= 1000000/HZ) {
1250 			/*
1251 			 * Try to save on CPU power when waiting delay
1252 			 * is at least a system timer tick period.
1253 			 * No need to be extremely accurate here.
1254 			 */
1255 			xip_cpu_idle();
1256 		}
1257 		status = map_read(map, adr);
1258 		done = xip_elapsed_since(start);
1259 	} while (!map_word_andequal(map, status, OK, OK)
1260 		 && done < usec);
1261 
1262 	return (done >= usec) ? -ETIME : 0;
1263 }
1264 
1265 /*
1266  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1267  * the flash is actively programming or erasing since we have to poll for
1268  * the operation to complete anyway.  We can't do that in a generic way with
1269  * a XIP setup so do it before the actual flash operation in this case
1270  * and stub it out from INVAL_CACHE_AND_WAIT.
1271  */
1272 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1273 	INVALIDATE_CACHED_RANGE(map, from, size)
1274 
1275 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1276 	xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1277 
1278 #else
1279 
1280 #define xip_disable(map, chip, adr)
1281 #define xip_enable(map, chip, adr)
1282 #define XIP_INVAL_CACHED_RANGE(x...)
1283 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1284 
inval_cache_and_wait_for_operation(struct map_info * map,struct flchip * chip,unsigned long cmd_adr,unsigned long inval_adr,int inval_len,unsigned int chip_op_time,unsigned int chip_op_time_max)1285 static int inval_cache_and_wait_for_operation(
1286 		struct map_info *map, struct flchip *chip,
1287 		unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1288 		unsigned int chip_op_time, unsigned int chip_op_time_max)
1289 {
1290 	struct cfi_private *cfi = map->fldrv_priv;
1291 	map_word status, status_OK = CMD(0x80);
1292 	int chip_state = chip->state;
1293 	unsigned int timeo, sleep_time, reset_timeo;
1294 
1295 	mutex_unlock(&chip->mutex);
1296 	if (inval_len)
1297 		INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1298 	mutex_lock(&chip->mutex);
1299 
1300 	timeo = chip_op_time_max;
1301 	if (!timeo)
1302 		timeo = 500000;
1303 	reset_timeo = timeo;
1304 	sleep_time = chip_op_time / 2;
1305 
1306 	for (;;) {
1307 		if (chip->state != chip_state) {
1308 			/* Someone's suspended the operation: sleep */
1309 			DECLARE_WAITQUEUE(wait, current);
1310 			set_current_state(TASK_UNINTERRUPTIBLE);
1311 			add_wait_queue(&chip->wq, &wait);
1312 			mutex_unlock(&chip->mutex);
1313 			schedule();
1314 			remove_wait_queue(&chip->wq, &wait);
1315 			mutex_lock(&chip->mutex);
1316 			continue;
1317 		}
1318 
1319 		status = map_read(map, cmd_adr);
1320 		if (map_word_andequal(map, status, status_OK, status_OK))
1321 			break;
1322 
1323 		if (chip->erase_suspended && chip_state == FL_ERASING)  {
1324 			/* Erase suspend occurred while sleep: reset timeout */
1325 			timeo = reset_timeo;
1326 			chip->erase_suspended = 0;
1327 		}
1328 		if (chip->write_suspended && chip_state == FL_WRITING)  {
1329 			/* Write suspend occurred while sleep: reset timeout */
1330 			timeo = reset_timeo;
1331 			chip->write_suspended = 0;
1332 		}
1333 		if (!timeo) {
1334 			map_write(map, CMD(0x70), cmd_adr);
1335 			chip->state = FL_STATUS;
1336 			return -ETIME;
1337 		}
1338 
1339 		/* OK Still waiting. Drop the lock, wait a while and retry. */
1340 		mutex_unlock(&chip->mutex);
1341 		if (sleep_time >= 1000000/HZ) {
1342 			/*
1343 			 * Half of the normal delay still remaining
1344 			 * can be performed with a sleeping delay instead
1345 			 * of busy waiting.
1346 			 */
1347 			msleep(sleep_time/1000);
1348 			timeo -= sleep_time;
1349 			sleep_time = 1000000/HZ;
1350 		} else {
1351 			udelay(1);
1352 			cond_resched();
1353 			timeo--;
1354 		}
1355 		mutex_lock(&chip->mutex);
1356 	}
1357 
1358 	/* Done and happy. */
1359  	chip->state = FL_STATUS;
1360 	return 0;
1361 }
1362 
1363 #endif
1364 
1365 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1366 	INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1367 
1368 
do_point_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1369 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1370 {
1371 	unsigned long cmd_addr;
1372 	struct cfi_private *cfi = map->fldrv_priv;
1373 	int ret;
1374 
1375 	adr += chip->start;
1376 
1377 	/* Ensure cmd read/writes are aligned. */
1378 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1379 
1380 	mutex_lock(&chip->mutex);
1381 
1382 	ret = get_chip(map, chip, cmd_addr, FL_POINT);
1383 
1384 	if (!ret) {
1385 		if (chip->state != FL_POINT && chip->state != FL_READY)
1386 			map_write(map, CMD(0xff), cmd_addr);
1387 
1388 		chip->state = FL_POINT;
1389 		chip->ref_point_counter++;
1390 	}
1391 	mutex_unlock(&chip->mutex);
1392 
1393 	return ret;
1394 }
1395 
cfi_intelext_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1396 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1397 		size_t *retlen, void **virt, resource_size_t *phys)
1398 {
1399 	struct map_info *map = mtd->priv;
1400 	struct cfi_private *cfi = map->fldrv_priv;
1401 	unsigned long ofs, last_end = 0;
1402 	int chipnum;
1403 	int ret;
1404 
1405 	if (!map->virt)
1406 		return -EINVAL;
1407 
1408 	/* Now lock the chip(s) to POINT state */
1409 
1410 	/* ofs: offset within the first chip that the first read should start */
1411 	chipnum = (from >> cfi->chipshift);
1412 	ofs = from - (chipnum << cfi->chipshift);
1413 
1414 	*virt = map->virt + cfi->chips[chipnum].start + ofs;
1415 	if (phys)
1416 		*phys = map->phys + cfi->chips[chipnum].start + ofs;
1417 
1418 	while (len) {
1419 		unsigned long thislen;
1420 
1421 		if (chipnum >= cfi->numchips)
1422 			break;
1423 
1424 		/* We cannot point across chips that are virtually disjoint */
1425 		if (!last_end)
1426 			last_end = cfi->chips[chipnum].start;
1427 		else if (cfi->chips[chipnum].start != last_end)
1428 			break;
1429 
1430 		if ((len + ofs -1) >> cfi->chipshift)
1431 			thislen = (1<<cfi->chipshift) - ofs;
1432 		else
1433 			thislen = len;
1434 
1435 		ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1436 		if (ret)
1437 			break;
1438 
1439 		*retlen += thislen;
1440 		len -= thislen;
1441 
1442 		ofs = 0;
1443 		last_end += 1 << cfi->chipshift;
1444 		chipnum++;
1445 	}
1446 	return 0;
1447 }
1448 
cfi_intelext_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1449 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1450 {
1451 	struct map_info *map = mtd->priv;
1452 	struct cfi_private *cfi = map->fldrv_priv;
1453 	unsigned long ofs;
1454 	int chipnum, err = 0;
1455 
1456 	/* Now unlock the chip(s) POINT state */
1457 
1458 	/* ofs: offset within the first chip that the first read should start */
1459 	chipnum = (from >> cfi->chipshift);
1460 	ofs = from - (chipnum <<  cfi->chipshift);
1461 
1462 	while (len && !err) {
1463 		unsigned long thislen;
1464 		struct flchip *chip;
1465 
1466 		chip = &cfi->chips[chipnum];
1467 		if (chipnum >= cfi->numchips)
1468 			break;
1469 
1470 		if ((len + ofs -1) >> cfi->chipshift)
1471 			thislen = (1<<cfi->chipshift) - ofs;
1472 		else
1473 			thislen = len;
1474 
1475 		mutex_lock(&chip->mutex);
1476 		if (chip->state == FL_POINT) {
1477 			chip->ref_point_counter--;
1478 			if(chip->ref_point_counter == 0)
1479 				chip->state = FL_READY;
1480 		} else {
1481 			printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1482 			err = -EINVAL;
1483 		}
1484 
1485 		put_chip(map, chip, chip->start);
1486 		mutex_unlock(&chip->mutex);
1487 
1488 		len -= thislen;
1489 		ofs = 0;
1490 		chipnum++;
1491 	}
1492 
1493 	return err;
1494 }
1495 
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)1496 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1497 {
1498 	unsigned long cmd_addr;
1499 	struct cfi_private *cfi = map->fldrv_priv;
1500 	int ret;
1501 
1502 	adr += chip->start;
1503 
1504 	/* Ensure cmd read/writes are aligned. */
1505 	cmd_addr = adr & ~(map_bankwidth(map)-1);
1506 
1507 	mutex_lock(&chip->mutex);
1508 	ret = get_chip(map, chip, cmd_addr, FL_READY);
1509 	if (ret) {
1510 		mutex_unlock(&chip->mutex);
1511 		return ret;
1512 	}
1513 
1514 	if (chip->state != FL_POINT && chip->state != FL_READY) {
1515 		map_write(map, CMD(0xff), cmd_addr);
1516 
1517 		chip->state = FL_READY;
1518 	}
1519 
1520 	map_copy_from(map, buf, adr, len);
1521 
1522 	put_chip(map, chip, cmd_addr);
1523 
1524 	mutex_unlock(&chip->mutex);
1525 	return 0;
1526 }
1527 
cfi_intelext_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1528 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1529 {
1530 	struct map_info *map = mtd->priv;
1531 	struct cfi_private *cfi = map->fldrv_priv;
1532 	unsigned long ofs;
1533 	int chipnum;
1534 	int ret = 0;
1535 
1536 	/* ofs: offset within the first chip that the first read should start */
1537 	chipnum = (from >> cfi->chipshift);
1538 	ofs = from - (chipnum <<  cfi->chipshift);
1539 
1540 	while (len) {
1541 		unsigned long thislen;
1542 
1543 		if (chipnum >= cfi->numchips)
1544 			break;
1545 
1546 		if ((len + ofs -1) >> cfi->chipshift)
1547 			thislen = (1<<cfi->chipshift) - ofs;
1548 		else
1549 			thislen = len;
1550 
1551 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1552 		if (ret)
1553 			break;
1554 
1555 		*retlen += thislen;
1556 		len -= thislen;
1557 		buf += thislen;
1558 
1559 		ofs = 0;
1560 		chipnum++;
1561 	}
1562 	return ret;
1563 }
1564 
do_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1565 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1566 				     unsigned long adr, map_word datum, int mode)
1567 {
1568 	struct cfi_private *cfi = map->fldrv_priv;
1569 	map_word status, write_cmd;
1570 	int ret;
1571 
1572 	adr += chip->start;
1573 
1574 	switch (mode) {
1575 	case FL_WRITING:
1576 		write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1577 		break;
1578 	case FL_OTP_WRITE:
1579 		write_cmd = CMD(0xc0);
1580 		break;
1581 	default:
1582 		return -EINVAL;
1583 	}
1584 
1585 	mutex_lock(&chip->mutex);
1586 	ret = get_chip(map, chip, adr, mode);
1587 	if (ret) {
1588 		mutex_unlock(&chip->mutex);
1589 		return ret;
1590 	}
1591 
1592 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1593 	ENABLE_VPP(map);
1594 	xip_disable(map, chip, adr);
1595 	map_write(map, write_cmd, adr);
1596 	map_write(map, datum, adr);
1597 	chip->state = mode;
1598 
1599 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1600 				   adr, map_bankwidth(map),
1601 				   chip->word_write_time,
1602 				   chip->word_write_time_max);
1603 	if (ret) {
1604 		xip_enable(map, chip, adr);
1605 		printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1606 		goto out;
1607 	}
1608 
1609 	/* check for errors */
1610 	status = map_read(map, adr);
1611 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1612 		unsigned long chipstatus = MERGESTATUS(status);
1613 
1614 		/* reset status */
1615 		map_write(map, CMD(0x50), adr);
1616 		map_write(map, CMD(0x70), adr);
1617 		xip_enable(map, chip, adr);
1618 
1619 		if (chipstatus & 0x02) {
1620 			ret = -EROFS;
1621 		} else if (chipstatus & 0x08) {
1622 			printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1623 			ret = -EIO;
1624 		} else {
1625 			printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1626 			ret = -EINVAL;
1627 		}
1628 
1629 		goto out;
1630 	}
1631 
1632 	xip_enable(map, chip, adr);
1633  out:	DISABLE_VPP(map);
1634 	put_chip(map, chip, adr);
1635 	mutex_unlock(&chip->mutex);
1636 	return ret;
1637 }
1638 
1639 
cfi_intelext_write_words(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1640 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1641 {
1642 	struct map_info *map = mtd->priv;
1643 	struct cfi_private *cfi = map->fldrv_priv;
1644 	int ret;
1645 	int chipnum;
1646 	unsigned long ofs;
1647 
1648 	chipnum = to >> cfi->chipshift;
1649 	ofs = to  - (chipnum << cfi->chipshift);
1650 
1651 	/* If it's not bus-aligned, do the first byte write */
1652 	if (ofs & (map_bankwidth(map)-1)) {
1653 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1654 		int gap = ofs - bus_ofs;
1655 		int n;
1656 		map_word datum;
1657 
1658 		n = min_t(int, len, map_bankwidth(map)-gap);
1659 		datum = map_word_ff(map);
1660 		datum = map_word_load_partial(map, datum, buf, gap, n);
1661 
1662 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1663 					       bus_ofs, datum, FL_WRITING);
1664 		if (ret)
1665 			return ret;
1666 
1667 		len -= n;
1668 		ofs += n;
1669 		buf += n;
1670 		(*retlen) += n;
1671 
1672 		if (ofs >> cfi->chipshift) {
1673 			chipnum ++;
1674 			ofs = 0;
1675 			if (chipnum == cfi->numchips)
1676 				return 0;
1677 		}
1678 	}
1679 
1680 	while(len >= map_bankwidth(map)) {
1681 		map_word datum = map_word_load(map, buf);
1682 
1683 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1684 				       ofs, datum, FL_WRITING);
1685 		if (ret)
1686 			return ret;
1687 
1688 		ofs += map_bankwidth(map);
1689 		buf += map_bankwidth(map);
1690 		(*retlen) += map_bankwidth(map);
1691 		len -= map_bankwidth(map);
1692 
1693 		if (ofs >> cfi->chipshift) {
1694 			chipnum ++;
1695 			ofs = 0;
1696 			if (chipnum == cfi->numchips)
1697 				return 0;
1698 		}
1699 	}
1700 
1701 	if (len & (map_bankwidth(map)-1)) {
1702 		map_word datum;
1703 
1704 		datum = map_word_ff(map);
1705 		datum = map_word_load_partial(map, datum, buf, 0, len);
1706 
1707 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1708 				       ofs, datum, FL_WRITING);
1709 		if (ret)
1710 			return ret;
1711 
1712 		(*retlen) += len;
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const struct kvec ** pvec,unsigned long * pvec_seek,int len)1719 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1720 				    unsigned long adr, const struct kvec **pvec,
1721 				    unsigned long *pvec_seek, int len)
1722 {
1723 	struct cfi_private *cfi = map->fldrv_priv;
1724 	map_word status, write_cmd, datum;
1725 	unsigned long cmd_adr;
1726 	int ret, wbufsize, word_gap, words;
1727 	const struct kvec *vec;
1728 	unsigned long vec_seek;
1729 	unsigned long initial_adr;
1730 	int initial_len = len;
1731 
1732 	wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1733 	adr += chip->start;
1734 	initial_adr = adr;
1735 	cmd_adr = adr & ~(wbufsize-1);
1736 
1737 	/* Sharp LH28F640BF chips need the first address for the
1738 	 * Page Buffer Program command. See Table 5 of
1739 	 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1740 	if (is_LH28F640BF(cfi))
1741 		cmd_adr = adr;
1742 
1743 	/* Let's determine this according to the interleave only once */
1744 	write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1745 
1746 	mutex_lock(&chip->mutex);
1747 	ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1748 	if (ret) {
1749 		mutex_unlock(&chip->mutex);
1750 		return ret;
1751 	}
1752 
1753 	XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1754 	ENABLE_VPP(map);
1755 	xip_disable(map, chip, cmd_adr);
1756 
1757 	/* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1758 	   [...], the device will not accept any more Write to Buffer commands".
1759 	   So we must check here and reset those bits if they're set. Otherwise
1760 	   we're just pissing in the wind */
1761 	if (chip->state != FL_STATUS) {
1762 		map_write(map, CMD(0x70), cmd_adr);
1763 		chip->state = FL_STATUS;
1764 	}
1765 	status = map_read(map, cmd_adr);
1766 	if (map_word_bitsset(map, status, CMD(0x30))) {
1767 		xip_enable(map, chip, cmd_adr);
1768 		printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1769 		xip_disable(map, chip, cmd_adr);
1770 		map_write(map, CMD(0x50), cmd_adr);
1771 		map_write(map, CMD(0x70), cmd_adr);
1772 	}
1773 
1774 	chip->state = FL_WRITING_TO_BUFFER;
1775 	map_write(map, write_cmd, cmd_adr);
1776 	ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1777 	if (ret) {
1778 		/* Argh. Not ready for write to buffer */
1779 		map_word Xstatus = map_read(map, cmd_adr);
1780 		map_write(map, CMD(0x70), cmd_adr);
1781 		chip->state = FL_STATUS;
1782 		status = map_read(map, cmd_adr);
1783 		map_write(map, CMD(0x50), cmd_adr);
1784 		map_write(map, CMD(0x70), cmd_adr);
1785 		xip_enable(map, chip, cmd_adr);
1786 		printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1787 				map->name, Xstatus.x[0], status.x[0]);
1788 		goto out;
1789 	}
1790 
1791 	/* Figure out the number of words to write */
1792 	word_gap = (-adr & (map_bankwidth(map)-1));
1793 	words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1794 	if (!word_gap) {
1795 		words--;
1796 	} else {
1797 		word_gap = map_bankwidth(map) - word_gap;
1798 		adr -= word_gap;
1799 		datum = map_word_ff(map);
1800 	}
1801 
1802 	/* Write length of data to come */
1803 	map_write(map, CMD(words), cmd_adr );
1804 
1805 	/* Write data */
1806 	vec = *pvec;
1807 	vec_seek = *pvec_seek;
1808 	do {
1809 		int n = map_bankwidth(map) - word_gap;
1810 		if (n > vec->iov_len - vec_seek)
1811 			n = vec->iov_len - vec_seek;
1812 		if (n > len)
1813 			n = len;
1814 
1815 		if (!word_gap && len < map_bankwidth(map))
1816 			datum = map_word_ff(map);
1817 
1818 		datum = map_word_load_partial(map, datum,
1819 					      vec->iov_base + vec_seek,
1820 					      word_gap, n);
1821 
1822 		len -= n;
1823 		word_gap += n;
1824 		if (!len || word_gap == map_bankwidth(map)) {
1825 			map_write(map, datum, adr);
1826 			adr += map_bankwidth(map);
1827 			word_gap = 0;
1828 		}
1829 
1830 		vec_seek += n;
1831 		if (vec_seek == vec->iov_len) {
1832 			vec++;
1833 			vec_seek = 0;
1834 		}
1835 	} while (len);
1836 	*pvec = vec;
1837 	*pvec_seek = vec_seek;
1838 
1839 	/* GO GO GO */
1840 	map_write(map, CMD(0xd0), cmd_adr);
1841 	chip->state = FL_WRITING;
1842 
1843 	ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1844 				   initial_adr, initial_len,
1845 				   chip->buffer_write_time,
1846 				   chip->buffer_write_time_max);
1847 	if (ret) {
1848 		map_write(map, CMD(0x70), cmd_adr);
1849 		chip->state = FL_STATUS;
1850 		xip_enable(map, chip, cmd_adr);
1851 		printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1852 		goto out;
1853 	}
1854 
1855 	/* check for errors */
1856 	status = map_read(map, cmd_adr);
1857 	if (map_word_bitsset(map, status, CMD(0x1a))) {
1858 		unsigned long chipstatus = MERGESTATUS(status);
1859 
1860 		/* reset status */
1861 		map_write(map, CMD(0x50), cmd_adr);
1862 		map_write(map, CMD(0x70), cmd_adr);
1863 		xip_enable(map, chip, cmd_adr);
1864 
1865 		if (chipstatus & 0x02) {
1866 			ret = -EROFS;
1867 		} else if (chipstatus & 0x08) {
1868 			printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1869 			ret = -EIO;
1870 		} else {
1871 			printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1872 			ret = -EINVAL;
1873 		}
1874 
1875 		goto out;
1876 	}
1877 
1878 	xip_enable(map, chip, cmd_adr);
1879  out:	DISABLE_VPP(map);
1880 	put_chip(map, chip, cmd_adr);
1881 	mutex_unlock(&chip->mutex);
1882 	return ret;
1883 }
1884 
cfi_intelext_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)1885 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1886 				unsigned long count, loff_t to, size_t *retlen)
1887 {
1888 	struct map_info *map = mtd->priv;
1889 	struct cfi_private *cfi = map->fldrv_priv;
1890 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1891 	int ret;
1892 	int chipnum;
1893 	unsigned long ofs, vec_seek, i;
1894 	size_t len = 0;
1895 
1896 	for (i = 0; i < count; i++)
1897 		len += vecs[i].iov_len;
1898 
1899 	if (!len)
1900 		return 0;
1901 
1902 	chipnum = to >> cfi->chipshift;
1903 	ofs = to - (chipnum << cfi->chipshift);
1904 	vec_seek = 0;
1905 
1906 	do {
1907 		/* We must not cross write block boundaries */
1908 		int size = wbufsize - (ofs & (wbufsize-1));
1909 
1910 		if (size > len)
1911 			size = len;
1912 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1913 				      ofs, &vecs, &vec_seek, size);
1914 		if (ret)
1915 			return ret;
1916 
1917 		ofs += size;
1918 		(*retlen) += size;
1919 		len -= size;
1920 
1921 		if (ofs >> cfi->chipshift) {
1922 			chipnum ++;
1923 			ofs = 0;
1924 			if (chipnum == cfi->numchips)
1925 				return 0;
1926 		}
1927 
1928 		/* Be nice and reschedule with the chip in a usable state for other
1929 		   processes. */
1930 		cond_resched();
1931 
1932 	} while (len);
1933 
1934 	return 0;
1935 }
1936 
cfi_intelext_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1937 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1938 				       size_t len, size_t *retlen, const u_char *buf)
1939 {
1940 	struct kvec vec;
1941 
1942 	vec.iov_base = (void *) buf;
1943 	vec.iov_len = len;
1944 
1945 	return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1946 }
1947 
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)1948 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1949 				      unsigned long adr, int len, void *thunk)
1950 {
1951 	struct cfi_private *cfi = map->fldrv_priv;
1952 	map_word status;
1953 	int retries = 3;
1954 	int ret;
1955 
1956 	adr += chip->start;
1957 
1958  retry:
1959 	mutex_lock(&chip->mutex);
1960 	ret = get_chip(map, chip, adr, FL_ERASING);
1961 	if (ret) {
1962 		mutex_unlock(&chip->mutex);
1963 		return ret;
1964 	}
1965 
1966 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1967 	ENABLE_VPP(map);
1968 	xip_disable(map, chip, adr);
1969 
1970 	/* Clear the status register first */
1971 	map_write(map, CMD(0x50), adr);
1972 
1973 	/* Now erase */
1974 	map_write(map, CMD(0x20), adr);
1975 	map_write(map, CMD(0xD0), adr);
1976 	chip->state = FL_ERASING;
1977 	chip->erase_suspended = 0;
1978 	chip->in_progress_block_addr = adr;
1979 	chip->in_progress_block_mask = ~(len - 1);
1980 
1981 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1982 				   adr, len,
1983 				   chip->erase_time,
1984 				   chip->erase_time_max);
1985 	if (ret) {
1986 		map_write(map, CMD(0x70), adr);
1987 		chip->state = FL_STATUS;
1988 		xip_enable(map, chip, adr);
1989 		printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1990 		goto out;
1991 	}
1992 
1993 	/* We've broken this before. It doesn't hurt to be safe */
1994 	map_write(map, CMD(0x70), adr);
1995 	chip->state = FL_STATUS;
1996 	status = map_read(map, adr);
1997 
1998 	/* check for errors */
1999 	if (map_word_bitsset(map, status, CMD(0x3a))) {
2000 		unsigned long chipstatus = MERGESTATUS(status);
2001 
2002 		/* Reset the error bits */
2003 		map_write(map, CMD(0x50), adr);
2004 		map_write(map, CMD(0x70), adr);
2005 		xip_enable(map, chip, adr);
2006 
2007 		if ((chipstatus & 0x30) == 0x30) {
2008 			printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
2009 			ret = -EINVAL;
2010 		} else if (chipstatus & 0x02) {
2011 			/* Protection bit set */
2012 			ret = -EROFS;
2013 		} else if (chipstatus & 0x8) {
2014 			/* Voltage */
2015 			printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2016 			ret = -EIO;
2017 		} else if (chipstatus & 0x20 && retries--) {
2018 			printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2019 			DISABLE_VPP(map);
2020 			put_chip(map, chip, adr);
2021 			mutex_unlock(&chip->mutex);
2022 			goto retry;
2023 		} else {
2024 			printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2025 			ret = -EIO;
2026 		}
2027 
2028 		goto out;
2029 	}
2030 
2031 	xip_enable(map, chip, adr);
2032  out:	DISABLE_VPP(map);
2033 	put_chip(map, chip, adr);
2034 	mutex_unlock(&chip->mutex);
2035 	return ret;
2036 }
2037 
cfi_intelext_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)2038 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2039 {
2040 	return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2041 				instr->len, NULL);
2042 }
2043 
cfi_intelext_sync(struct mtd_info * mtd)2044 static void cfi_intelext_sync (struct mtd_info *mtd)
2045 {
2046 	struct map_info *map = mtd->priv;
2047 	struct cfi_private *cfi = map->fldrv_priv;
2048 	int i;
2049 	struct flchip *chip;
2050 	int ret = 0;
2051 
2052 	for (i=0; !ret && i<cfi->numchips; i++) {
2053 		chip = &cfi->chips[i];
2054 
2055 		mutex_lock(&chip->mutex);
2056 		ret = get_chip(map, chip, chip->start, FL_SYNCING);
2057 
2058 		if (!ret) {
2059 			chip->oldstate = chip->state;
2060 			chip->state = FL_SYNCING;
2061 			/* No need to wake_up() on this state change -
2062 			 * as the whole point is that nobody can do anything
2063 			 * with the chip now anyway.
2064 			 */
2065 		}
2066 		mutex_unlock(&chip->mutex);
2067 	}
2068 
2069 	/* Unlock the chips again */
2070 
2071 	for (i--; i >=0; i--) {
2072 		chip = &cfi->chips[i];
2073 
2074 		mutex_lock(&chip->mutex);
2075 
2076 		if (chip->state == FL_SYNCING) {
2077 			chip->state = chip->oldstate;
2078 			chip->oldstate = FL_READY;
2079 			wake_up(&chip->wq);
2080 		}
2081 		mutex_unlock(&chip->mutex);
2082 	}
2083 }
2084 
do_getlockstatus_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2085 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2086 						struct flchip *chip,
2087 						unsigned long adr,
2088 						int len, void *thunk)
2089 {
2090 	struct cfi_private *cfi = map->fldrv_priv;
2091 	int status, ofs_factor = cfi->interleave * cfi->device_type;
2092 
2093 	adr += chip->start;
2094 	xip_disable(map, chip, adr+(2*ofs_factor));
2095 	map_write(map, CMD(0x90), adr+(2*ofs_factor));
2096 	chip->state = FL_JEDEC_QUERY;
2097 	status = cfi_read_query(map, adr+(2*ofs_factor));
2098 	xip_enable(map, chip, 0);
2099 	return status;
2100 }
2101 
2102 #ifdef DEBUG_LOCK_BITS
do_printlockstatus_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2103 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2104 						struct flchip *chip,
2105 						unsigned long adr,
2106 						int len, void *thunk)
2107 {
2108 	printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2109 	       adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2110 	return 0;
2111 }
2112 #endif
2113 
2114 #define DO_XXLOCK_ONEBLOCK_LOCK		((void *) 1)
2115 #define DO_XXLOCK_ONEBLOCK_UNLOCK	((void *) 2)
2116 
do_xxlock_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2117 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2118 				       unsigned long adr, int len, void *thunk)
2119 {
2120 	struct cfi_private *cfi = map->fldrv_priv;
2121 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2122 	int mdelay;
2123 	int ret;
2124 
2125 	adr += chip->start;
2126 
2127 	mutex_lock(&chip->mutex);
2128 	ret = get_chip(map, chip, adr, FL_LOCKING);
2129 	if (ret) {
2130 		mutex_unlock(&chip->mutex);
2131 		return ret;
2132 	}
2133 
2134 	ENABLE_VPP(map);
2135 	xip_disable(map, chip, adr);
2136 
2137 	map_write(map, CMD(0x60), adr);
2138 	if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2139 		map_write(map, CMD(0x01), adr);
2140 		chip->state = FL_LOCKING;
2141 	} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2142 		map_write(map, CMD(0xD0), adr);
2143 		chip->state = FL_UNLOCKING;
2144 	} else
2145 		BUG();
2146 
2147 	/*
2148 	 * If Instant Individual Block Locking supported then no need
2149 	 * to delay.
2150 	 */
2151 	/*
2152 	 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2153 	 * lets use a max of 1.5 seconds (1500ms) as timeout.
2154 	 *
2155 	 * See "Clear Block Lock-Bits Time" on page 40 in
2156 	 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2157 	 * from February 2003
2158 	 */
2159 	mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2160 
2161 	ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2162 	if (ret) {
2163 		map_write(map, CMD(0x70), adr);
2164 		chip->state = FL_STATUS;
2165 		xip_enable(map, chip, adr);
2166 		printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2167 		goto out;
2168 	}
2169 
2170 	xip_enable(map, chip, adr);
2171  out:	DISABLE_VPP(map);
2172 	put_chip(map, chip, adr);
2173 	mutex_unlock(&chip->mutex);
2174 	return ret;
2175 }
2176 
cfi_intelext_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2177 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2178 {
2179 	int ret;
2180 
2181 #ifdef DEBUG_LOCK_BITS
2182 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2183 	       __func__, ofs, len);
2184 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2185 		ofs, len, NULL);
2186 #endif
2187 
2188 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2189 		ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2190 
2191 #ifdef DEBUG_LOCK_BITS
2192 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2193 	       __func__, ret);
2194 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2195 		ofs, len, NULL);
2196 #endif
2197 
2198 	return ret;
2199 }
2200 
cfi_intelext_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2201 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2202 {
2203 	int ret;
2204 
2205 #ifdef DEBUG_LOCK_BITS
2206 	printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2207 	       __func__, ofs, len);
2208 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2209 		ofs, len, NULL);
2210 #endif
2211 
2212 	ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2213 					ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2214 
2215 #ifdef DEBUG_LOCK_BITS
2216 	printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2217 	       __func__, ret);
2218 	cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2219 		ofs, len, NULL);
2220 #endif
2221 
2222 	return ret;
2223 }
2224 
cfi_intelext_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2225 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2226 				  uint64_t len)
2227 {
2228 	return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2229 				ofs, len, NULL) ? 1 : 0;
2230 }
2231 
2232 #ifdef CONFIG_MTD_OTP
2233 
2234 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2235 			u_long data_offset, u_char *buf, u_int size,
2236 			u_long prot_offset, u_int groupno, u_int groupsize);
2237 
2238 static int __xipram
do_otp_read(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2239 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2240 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2241 {
2242 	struct cfi_private *cfi = map->fldrv_priv;
2243 	int ret;
2244 
2245 	mutex_lock(&chip->mutex);
2246 	ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2247 	if (ret) {
2248 		mutex_unlock(&chip->mutex);
2249 		return ret;
2250 	}
2251 
2252 	/* let's ensure we're not reading back cached data from array mode */
2253 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2254 
2255 	xip_disable(map, chip, chip->start);
2256 	if (chip->state != FL_JEDEC_QUERY) {
2257 		map_write(map, CMD(0x90), chip->start);
2258 		chip->state = FL_JEDEC_QUERY;
2259 	}
2260 	map_copy_from(map, buf, chip->start + offset, size);
2261 	xip_enable(map, chip, chip->start);
2262 
2263 	/* then ensure we don't keep OTP data in the cache */
2264 	INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2265 
2266 	put_chip(map, chip, chip->start);
2267 	mutex_unlock(&chip->mutex);
2268 	return 0;
2269 }
2270 
2271 static int
do_otp_write(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2272 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2273 	     u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2274 {
2275 	int ret;
2276 
2277 	while (size) {
2278 		unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2279 		int gap = offset - bus_ofs;
2280 		int n = min_t(int, size, map_bankwidth(map)-gap);
2281 		map_word datum = map_word_ff(map);
2282 
2283 		datum = map_word_load_partial(map, datum, buf, gap, n);
2284 		ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2285 		if (ret)
2286 			return ret;
2287 
2288 		offset += n;
2289 		buf += n;
2290 		size -= n;
2291 	}
2292 
2293 	return 0;
2294 }
2295 
2296 static int
do_otp_lock(struct map_info * map,struct flchip * chip,u_long offset,u_char * buf,u_int size,u_long prot,u_int grpno,u_int grpsz)2297 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2298 	    u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2299 {
2300 	struct cfi_private *cfi = map->fldrv_priv;
2301 	map_word datum;
2302 
2303 	/* make sure area matches group boundaries */
2304 	if (size != grpsz)
2305 		return -EXDEV;
2306 
2307 	datum = map_word_ff(map);
2308 	datum = map_word_clr(map, datum, CMD(1 << grpno));
2309 	return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2310 }
2311 
cfi_intelext_otp_walk(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf,otp_op_t action,int user_regs)2312 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2313 				 size_t *retlen, u_char *buf,
2314 				 otp_op_t action, int user_regs)
2315 {
2316 	struct map_info *map = mtd->priv;
2317 	struct cfi_private *cfi = map->fldrv_priv;
2318 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2319 	struct flchip *chip;
2320 	struct cfi_intelext_otpinfo *otp;
2321 	u_long devsize, reg_prot_offset, data_offset;
2322 	u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2323 	u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2324 	int ret;
2325 
2326 	*retlen = 0;
2327 
2328 	/* Check that we actually have some OTP registers */
2329 	if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2330 		return -ENODATA;
2331 
2332 	/* we need real chips here not virtual ones */
2333 	devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2334 	chip_step = devsize >> cfi->chipshift;
2335 	chip_num = 0;
2336 
2337 	/* Some chips have OTP located in the _top_ partition only.
2338 	   For example: Intel 28F256L18T (T means top-parameter device) */
2339 	if (cfi->mfr == CFI_MFR_INTEL) {
2340 		switch (cfi->id) {
2341 		case 0x880b:
2342 		case 0x880c:
2343 		case 0x880d:
2344 			chip_num = chip_step - 1;
2345 		}
2346 	}
2347 
2348 	for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2349 		chip = &cfi->chips[chip_num];
2350 		otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2351 
2352 		/* first OTP region */
2353 		field = 0;
2354 		reg_prot_offset = extp->ProtRegAddr;
2355 		reg_fact_groups = 1;
2356 		reg_fact_size = 1 << extp->FactProtRegSize;
2357 		reg_user_groups = 1;
2358 		reg_user_size = 1 << extp->UserProtRegSize;
2359 
2360 		while (len > 0) {
2361 			/* flash geometry fixup */
2362 			data_offset = reg_prot_offset + 1;
2363 			data_offset *= cfi->interleave * cfi->device_type;
2364 			reg_prot_offset *= cfi->interleave * cfi->device_type;
2365 			reg_fact_size *= cfi->interleave;
2366 			reg_user_size *= cfi->interleave;
2367 
2368 			if (user_regs) {
2369 				groups = reg_user_groups;
2370 				groupsize = reg_user_size;
2371 				/* skip over factory reg area */
2372 				groupno = reg_fact_groups;
2373 				data_offset += reg_fact_groups * reg_fact_size;
2374 			} else {
2375 				groups = reg_fact_groups;
2376 				groupsize = reg_fact_size;
2377 				groupno = 0;
2378 			}
2379 
2380 			while (len > 0 && groups > 0) {
2381 				if (!action) {
2382 					/*
2383 					 * Special case: if action is NULL
2384 					 * we fill buf with otp_info records.
2385 					 */
2386 					struct otp_info *otpinfo;
2387 					map_word lockword;
2388 					len -= sizeof(struct otp_info);
2389 					if (len <= 0)
2390 						return -ENOSPC;
2391 					ret = do_otp_read(map, chip,
2392 							  reg_prot_offset,
2393 							  (u_char *)&lockword,
2394 							  map_bankwidth(map),
2395 							  0, 0,  0);
2396 					if (ret)
2397 						return ret;
2398 					otpinfo = (struct otp_info *)buf;
2399 					otpinfo->start = from;
2400 					otpinfo->length = groupsize;
2401 					otpinfo->locked =
2402 					   !map_word_bitsset(map, lockword,
2403 							     CMD(1 << groupno));
2404 					from += groupsize;
2405 					buf += sizeof(*otpinfo);
2406 					*retlen += sizeof(*otpinfo);
2407 				} else if (from >= groupsize) {
2408 					from -= groupsize;
2409 					data_offset += groupsize;
2410 				} else {
2411 					int size = groupsize;
2412 					data_offset += from;
2413 					size -= from;
2414 					from = 0;
2415 					if (size > len)
2416 						size = len;
2417 					ret = action(map, chip, data_offset,
2418 						     buf, size, reg_prot_offset,
2419 						     groupno, groupsize);
2420 					if (ret < 0)
2421 						return ret;
2422 					buf += size;
2423 					len -= size;
2424 					*retlen += size;
2425 					data_offset += size;
2426 				}
2427 				groupno++;
2428 				groups--;
2429 			}
2430 
2431 			/* next OTP region */
2432 			if (++field == extp->NumProtectionFields)
2433 				break;
2434 			reg_prot_offset = otp->ProtRegAddr;
2435 			reg_fact_groups = otp->FactGroups;
2436 			reg_fact_size = 1 << otp->FactProtRegSize;
2437 			reg_user_groups = otp->UserGroups;
2438 			reg_user_size = 1 << otp->UserProtRegSize;
2439 			otp++;
2440 		}
2441 	}
2442 
2443 	return 0;
2444 }
2445 
cfi_intelext_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2446 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2447 					   size_t len, size_t *retlen,
2448 					    u_char *buf)
2449 {
2450 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2451 				     buf, do_otp_read, 0);
2452 }
2453 
cfi_intelext_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2454 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2455 					   size_t len, size_t *retlen,
2456 					    u_char *buf)
2457 {
2458 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2459 				     buf, do_otp_read, 1);
2460 }
2461 
cfi_intelext_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,const u_char * buf)2462 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2463 					    size_t len, size_t *retlen,
2464 					    const u_char *buf)
2465 {
2466 	return cfi_intelext_otp_walk(mtd, from, len, retlen,
2467 				     (u_char *)buf, do_otp_write, 1);
2468 }
2469 
cfi_intelext_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2470 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2471 					   loff_t from, size_t len)
2472 {
2473 	size_t retlen;
2474 	return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2475 				     NULL, do_otp_lock, 1);
2476 }
2477 
cfi_intelext_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2478 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2479 					   size_t *retlen, struct otp_info *buf)
2480 
2481 {
2482 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2483 				     NULL, 0);
2484 }
2485 
cfi_intelext_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2486 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2487 					   size_t *retlen, struct otp_info *buf)
2488 {
2489 	return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2490 				     NULL, 1);
2491 }
2492 
2493 #endif
2494 
cfi_intelext_save_locks(struct mtd_info * mtd)2495 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2496 {
2497 	struct mtd_erase_region_info *region;
2498 	int block, status, i;
2499 	unsigned long adr;
2500 	size_t len;
2501 
2502 	for (i = 0; i < mtd->numeraseregions; i++) {
2503 		region = &mtd->eraseregions[i];
2504 		if (!region->lockmap)
2505 			continue;
2506 
2507 		for (block = 0; block < region->numblocks; block++){
2508 			len = region->erasesize;
2509 			adr = region->offset + block * len;
2510 
2511 			status = cfi_varsize_frob(mtd,
2512 					do_getlockstatus_oneblock, adr, len, NULL);
2513 			if (status)
2514 				set_bit(block, region->lockmap);
2515 			else
2516 				clear_bit(block, region->lockmap);
2517 		}
2518 	}
2519 }
2520 
cfi_intelext_suspend(struct mtd_info * mtd)2521 static int cfi_intelext_suspend(struct mtd_info *mtd)
2522 {
2523 	struct map_info *map = mtd->priv;
2524 	struct cfi_private *cfi = map->fldrv_priv;
2525 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2526 	int i;
2527 	struct flchip *chip;
2528 	int ret = 0;
2529 
2530 	if ((mtd->flags & MTD_POWERUP_LOCK)
2531 	    && extp && (extp->FeatureSupport & (1 << 5)))
2532 		cfi_intelext_save_locks(mtd);
2533 
2534 	for (i=0; !ret && i<cfi->numchips; i++) {
2535 		chip = &cfi->chips[i];
2536 
2537 		mutex_lock(&chip->mutex);
2538 
2539 		switch (chip->state) {
2540 		case FL_READY:
2541 		case FL_STATUS:
2542 		case FL_CFI_QUERY:
2543 		case FL_JEDEC_QUERY:
2544 			if (chip->oldstate == FL_READY) {
2545 				/* place the chip in a known state before suspend */
2546 				map_write(map, CMD(0xFF), cfi->chips[i].start);
2547 				chip->oldstate = chip->state;
2548 				chip->state = FL_PM_SUSPENDED;
2549 				/* No need to wake_up() on this state change -
2550 				 * as the whole point is that nobody can do anything
2551 				 * with the chip now anyway.
2552 				 */
2553 			} else {
2554 				/* There seems to be an operation pending. We must wait for it. */
2555 				printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2556 				ret = -EAGAIN;
2557 			}
2558 			break;
2559 		default:
2560 			/* Should we actually wait? Once upon a time these routines weren't
2561 			   allowed to. Or should we return -EAGAIN, because the upper layers
2562 			   ought to have already shut down anything which was using the device
2563 			   anyway? The latter for now. */
2564 			printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2565 			ret = -EAGAIN;
2566 			break;
2567 		case FL_PM_SUSPENDED:
2568 			break;
2569 		}
2570 		mutex_unlock(&chip->mutex);
2571 	}
2572 
2573 	/* Unlock the chips again */
2574 
2575 	if (ret) {
2576 		for (i--; i >=0; i--) {
2577 			chip = &cfi->chips[i];
2578 
2579 			mutex_lock(&chip->mutex);
2580 
2581 			if (chip->state == FL_PM_SUSPENDED) {
2582 				/* No need to force it into a known state here,
2583 				   because we're returning failure, and it didn't
2584 				   get power cycled */
2585 				chip->state = chip->oldstate;
2586 				chip->oldstate = FL_READY;
2587 				wake_up(&chip->wq);
2588 			}
2589 			mutex_unlock(&chip->mutex);
2590 		}
2591 	}
2592 
2593 	return ret;
2594 }
2595 
cfi_intelext_restore_locks(struct mtd_info * mtd)2596 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2597 {
2598 	struct mtd_erase_region_info *region;
2599 	int block, i;
2600 	unsigned long adr;
2601 	size_t len;
2602 
2603 	for (i = 0; i < mtd->numeraseregions; i++) {
2604 		region = &mtd->eraseregions[i];
2605 		if (!region->lockmap)
2606 			continue;
2607 
2608 		for_each_clear_bit(block, region->lockmap, region->numblocks) {
2609 			len = region->erasesize;
2610 			adr = region->offset + block * len;
2611 			cfi_intelext_unlock(mtd, adr, len);
2612 		}
2613 	}
2614 }
2615 
cfi_intelext_resume(struct mtd_info * mtd)2616 static void cfi_intelext_resume(struct mtd_info *mtd)
2617 {
2618 	struct map_info *map = mtd->priv;
2619 	struct cfi_private *cfi = map->fldrv_priv;
2620 	struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2621 	int i;
2622 	struct flchip *chip;
2623 
2624 	for (i=0; i<cfi->numchips; i++) {
2625 
2626 		chip = &cfi->chips[i];
2627 
2628 		mutex_lock(&chip->mutex);
2629 
2630 		/* Go to known state. Chip may have been power cycled */
2631 		if (chip->state == FL_PM_SUSPENDED) {
2632 			/* Refresh LH28F640BF Partition Config. Register */
2633 			fixup_LH28F640BF(mtd);
2634 			map_write(map, CMD(0xFF), cfi->chips[i].start);
2635 			chip->oldstate = chip->state = FL_READY;
2636 			wake_up(&chip->wq);
2637 		}
2638 
2639 		mutex_unlock(&chip->mutex);
2640 	}
2641 
2642 	if ((mtd->flags & MTD_POWERUP_LOCK)
2643 	    && extp && (extp->FeatureSupport & (1 << 5)))
2644 		cfi_intelext_restore_locks(mtd);
2645 }
2646 
cfi_intelext_reset(struct mtd_info * mtd)2647 static int cfi_intelext_reset(struct mtd_info *mtd)
2648 {
2649 	struct map_info *map = mtd->priv;
2650 	struct cfi_private *cfi = map->fldrv_priv;
2651 	int i, ret;
2652 
2653 	for (i=0; i < cfi->numchips; i++) {
2654 		struct flchip *chip = &cfi->chips[i];
2655 
2656 		/* force the completion of any ongoing operation
2657 		   and switch to array mode so any bootloader in
2658 		   flash is accessible for soft reboot. */
2659 		mutex_lock(&chip->mutex);
2660 		ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2661 		if (!ret) {
2662 			map_write(map, CMD(0xff), chip->start);
2663 			chip->state = FL_SHUTDOWN;
2664 			put_chip(map, chip, chip->start);
2665 		}
2666 		mutex_unlock(&chip->mutex);
2667 	}
2668 
2669 	return 0;
2670 }
2671 
cfi_intelext_reboot(struct notifier_block * nb,unsigned long val,void * v)2672 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2673 			       void *v)
2674 {
2675 	struct mtd_info *mtd;
2676 
2677 	mtd = container_of(nb, struct mtd_info, reboot_notifier);
2678 	cfi_intelext_reset(mtd);
2679 	return NOTIFY_DONE;
2680 }
2681 
cfi_intelext_destroy(struct mtd_info * mtd)2682 static void cfi_intelext_destroy(struct mtd_info *mtd)
2683 {
2684 	struct map_info *map = mtd->priv;
2685 	struct cfi_private *cfi = map->fldrv_priv;
2686 	struct mtd_erase_region_info *region;
2687 	int i;
2688 	cfi_intelext_reset(mtd);
2689 	unregister_reboot_notifier(&mtd->reboot_notifier);
2690 	kfree(cfi->cmdset_priv);
2691 	kfree(cfi->cfiq);
2692 	kfree(cfi->chips[0].priv);
2693 	kfree(cfi);
2694 	for (i = 0; i < mtd->numeraseregions; i++) {
2695 		region = &mtd->eraseregions[i];
2696 		kfree(region->lockmap);
2697 	}
2698 	kfree(mtd->eraseregions);
2699 }
2700 
2701 MODULE_LICENSE("GPL");
2702 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2703 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2704 MODULE_ALIAS("cfi_cmdset_0003");
2705 MODULE_ALIAS("cfi_cmdset_0200");
2706