1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Flash Interface support:
4 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 *
6 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
7 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
8 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 *
10 * 2_by_8 routines added by Simon Munton
11 *
12 * 4_by_16 work by Carolyn J. Smith
13 *
14 * XIP support hooks by Vitaly Wool (based on code for Intel flash
15 * by Nicolas Pitre)
16 *
17 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 *
19 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
20 */
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/reboot.h>
34 #include <linux/of.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38 #include <linux/mtd/xip.h>
39
40 #define AMD_BOOTLOC_BUG
41 #define FORCE_WORD_WRITE 0
42
43 #define MAX_RETRIES 3
44
45 #define SST49LF004B 0x0060
46 #define SST49LF040B 0x0050
47 #define SST49LF008A 0x005a
48 #define AT49BV6416 0x00d6
49 #define S29GL064N_MN12 0x0c01
50
51 /*
52 * Status Register bit description. Used by flash devices that don't
53 * support DQ polling (e.g. HyperFlash)
54 */
55 #define CFI_SR_DRB BIT(7)
56 #define CFI_SR_ESB BIT(5)
57 #define CFI_SR_PSB BIT(4)
58 #define CFI_SR_WBASB BIT(3)
59 #define CFI_SR_SLSB BIT(1)
60
61 enum cfi_quirks {
62 CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
63 };
64
65 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
67 #if !FORCE_WORD_WRITE
68 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
69 #endif
70 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
71 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
72 static void cfi_amdstd_sync (struct mtd_info *);
73 static int cfi_amdstd_suspend (struct mtd_info *);
74 static void cfi_amdstd_resume (struct mtd_info *);
75 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
76 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
77 size_t *, struct otp_info *);
78 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
79 size_t *, struct otp_info *);
80 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
81 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
82 size_t *, u_char *);
83 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
84 size_t *, u_char *);
85 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
86 size_t *, const u_char *);
87 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
88
89 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
90 size_t *retlen, const u_char *buf);
91
92 static void cfi_amdstd_destroy(struct mtd_info *);
93
94 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
95 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
96
97 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
99 #include "fwh_lock.h"
100
101 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
102 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
103
104 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
105 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
106 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
107
108 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
109 .probe = NULL, /* Not usable directly */
110 .destroy = cfi_amdstd_destroy,
111 .name = "cfi_cmdset_0002",
112 .module = THIS_MODULE
113 };
114
115 /*
116 * Use status register to poll for Erase/write completion when DQ is not
117 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
118 * CFI Primary Vendor-Specific Extended Query table 1.5
119 */
cfi_use_status_reg(struct cfi_private * cfi)120 static int cfi_use_status_reg(struct cfi_private *cfi)
121 {
122 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
123 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
124
125 return extp && extp->MinorVersion >= '5' &&
126 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
127 }
128
cfi_check_err_status(struct map_info * map,struct flchip * chip,unsigned long adr)129 static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
130 unsigned long adr)
131 {
132 struct cfi_private *cfi = map->fldrv_priv;
133 map_word status;
134
135 if (!cfi_use_status_reg(cfi))
136 return 0;
137
138 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
139 cfi->device_type, NULL);
140 status = map_read(map, adr);
141
142 /* The error bits are invalid while the chip's busy */
143 if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
144 return 0;
145
146 if (map_word_bitsset(map, status, CMD(0x3a))) {
147 unsigned long chipstatus = MERGESTATUS(status);
148
149 if (chipstatus & CFI_SR_ESB)
150 pr_err("%s erase operation failed, status %lx\n",
151 map->name, chipstatus);
152 if (chipstatus & CFI_SR_PSB)
153 pr_err("%s program operation failed, status %lx\n",
154 map->name, chipstatus);
155 if (chipstatus & CFI_SR_WBASB)
156 pr_err("%s buffer program command aborted, status %lx\n",
157 map->name, chipstatus);
158 if (chipstatus & CFI_SR_SLSB)
159 pr_err("%s sector write protected, status %lx\n",
160 map->name, chipstatus);
161
162 /* Erase/Program status bits are set on the operation failure */
163 if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
164 return 1;
165 }
166 return 0;
167 }
168
169 /* #define DEBUG_CFI_FEATURES */
170
171
172 #ifdef DEBUG_CFI_FEATURES
cfi_tell_features(struct cfi_pri_amdstd * extp)173 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
174 {
175 const char* erase_suspend[3] = {
176 "Not supported", "Read only", "Read/write"
177 };
178 const char* top_bottom[6] = {
179 "No WP", "8x8KiB sectors at top & bottom, no WP",
180 "Bottom boot", "Top boot",
181 "Uniform, Bottom WP", "Uniform, Top WP"
182 };
183
184 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
185 printk(" Address sensitive unlock: %s\n",
186 (extp->SiliconRevision & 1) ? "Not required" : "Required");
187
188 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
189 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
190 else
191 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
192
193 if (extp->BlkProt == 0)
194 printk(" Block protection: Not supported\n");
195 else
196 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
197
198
199 printk(" Temporary block unprotect: %s\n",
200 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
201 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
202 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
203 printk(" Burst mode: %s\n",
204 extp->BurstMode ? "Supported" : "Not supported");
205 if (extp->PageMode == 0)
206 printk(" Page mode: Not supported\n");
207 else
208 printk(" Page mode: %d word page\n", extp->PageMode << 2);
209
210 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
211 extp->VppMin >> 4, extp->VppMin & 0xf);
212 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
213 extp->VppMax >> 4, extp->VppMax & 0xf);
214
215 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
216 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
217 else
218 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
219 }
220 #endif
221
222 #ifdef AMD_BOOTLOC_BUG
223 /* Wheee. Bring me the head of someone at AMD. */
fixup_amd_bootblock(struct mtd_info * mtd)224 static void fixup_amd_bootblock(struct mtd_info *mtd)
225 {
226 struct map_info *map = mtd->priv;
227 struct cfi_private *cfi = map->fldrv_priv;
228 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
229 __u8 major = extp->MajorVersion;
230 __u8 minor = extp->MinorVersion;
231
232 if (((major << 8) | minor) < 0x3131) {
233 /* CFI version 1.0 => don't trust bootloc */
234
235 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
236 map->name, cfi->mfr, cfi->id);
237
238 /* AFAICS all 29LV400 with a bottom boot block have a device ID
239 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
240 * These were badly detected as they have the 0x80 bit set
241 * so treat them as a special case.
242 */
243 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
244
245 /* Macronix added CFI to their 2nd generation
246 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
247 * Fujitsu, Spansion, EON, ESI and older Macronix)
248 * has CFI.
249 *
250 * Therefore also check the manufacturer.
251 * This reduces the risk of false detection due to
252 * the 8-bit device ID.
253 */
254 (cfi->mfr == CFI_MFR_MACRONIX)) {
255 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
256 " detected\n", map->name);
257 extp->TopBottom = 2; /* bottom boot */
258 } else
259 if (cfi->id & 0x80) {
260 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
261 extp->TopBottom = 3; /* top boot */
262 } else {
263 extp->TopBottom = 2; /* bottom boot */
264 }
265
266 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
267 " deduced %s from Device ID\n", map->name, major, minor,
268 extp->TopBottom == 2 ? "bottom" : "top");
269 }
270 }
271 #endif
272
273 #if !FORCE_WORD_WRITE
fixup_use_write_buffers(struct mtd_info * mtd)274 static void fixup_use_write_buffers(struct mtd_info *mtd)
275 {
276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv;
278
279 if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
280 return;
281
282 if (cfi->cfiq->BufWriteTimeoutTyp) {
283 pr_debug("Using buffer write method\n");
284 mtd->_write = cfi_amdstd_write_buffers;
285 }
286 }
287 #endif /* !FORCE_WORD_WRITE */
288
289 /* Atmel chips don't use the same PRI format as AMD chips */
fixup_convert_atmel_pri(struct mtd_info * mtd)290 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
291 {
292 struct map_info *map = mtd->priv;
293 struct cfi_private *cfi = map->fldrv_priv;
294 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
295 struct cfi_pri_atmel atmel_pri;
296
297 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
298 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
299
300 if (atmel_pri.Features & 0x02)
301 extp->EraseSuspend = 2;
302
303 /* Some chips got it backwards... */
304 if (cfi->id == AT49BV6416) {
305 if (atmel_pri.BottomBoot)
306 extp->TopBottom = 3;
307 else
308 extp->TopBottom = 2;
309 } else {
310 if (atmel_pri.BottomBoot)
311 extp->TopBottom = 2;
312 else
313 extp->TopBottom = 3;
314 }
315
316 /* burst write mode not supported */
317 cfi->cfiq->BufWriteTimeoutTyp = 0;
318 cfi->cfiq->BufWriteTimeoutMax = 0;
319 }
320
fixup_use_secsi(struct mtd_info * mtd)321 static void fixup_use_secsi(struct mtd_info *mtd)
322 {
323 /* Setup for chips with a secsi area */
324 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
325 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
326 }
327
fixup_use_erase_chip(struct mtd_info * mtd)328 static void fixup_use_erase_chip(struct mtd_info *mtd)
329 {
330 struct map_info *map = mtd->priv;
331 struct cfi_private *cfi = map->fldrv_priv;
332 if ((cfi->cfiq->NumEraseRegions == 1) &&
333 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
334 mtd->_erase = cfi_amdstd_erase_chip;
335 }
336
337 }
338
339 /*
340 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
341 * locked by default.
342 */
fixup_use_atmel_lock(struct mtd_info * mtd)343 static void fixup_use_atmel_lock(struct mtd_info *mtd)
344 {
345 mtd->_lock = cfi_atmel_lock;
346 mtd->_unlock = cfi_atmel_unlock;
347 mtd->flags |= MTD_POWERUP_LOCK;
348 }
349
fixup_old_sst_eraseregion(struct mtd_info * mtd)350 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
351 {
352 struct map_info *map = mtd->priv;
353 struct cfi_private *cfi = map->fldrv_priv;
354
355 /*
356 * These flashes report two separate eraseblock regions based on the
357 * sector_erase-size and block_erase-size, although they both operate on the
358 * same memory. This is not allowed according to CFI, so we just pick the
359 * sector_erase-size.
360 */
361 cfi->cfiq->NumEraseRegions = 1;
362 }
363
fixup_sst39vf(struct mtd_info * mtd)364 static void fixup_sst39vf(struct mtd_info *mtd)
365 {
366 struct map_info *map = mtd->priv;
367 struct cfi_private *cfi = map->fldrv_priv;
368
369 fixup_old_sst_eraseregion(mtd);
370
371 cfi->addr_unlock1 = 0x5555;
372 cfi->addr_unlock2 = 0x2AAA;
373 }
374
fixup_sst39vf_rev_b(struct mtd_info * mtd)375 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
376 {
377 struct map_info *map = mtd->priv;
378 struct cfi_private *cfi = map->fldrv_priv;
379
380 fixup_old_sst_eraseregion(mtd);
381
382 cfi->addr_unlock1 = 0x555;
383 cfi->addr_unlock2 = 0x2AA;
384
385 cfi->sector_erase_cmd = CMD(0x50);
386 }
387
fixup_sst38vf640x_sectorsize(struct mtd_info * mtd)388 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
389 {
390 struct map_info *map = mtd->priv;
391 struct cfi_private *cfi = map->fldrv_priv;
392
393 fixup_sst39vf_rev_b(mtd);
394
395 /*
396 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
397 * it should report a size of 8KBytes (0x0020*256).
398 */
399 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
400 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
401 mtd->name);
402 }
403
fixup_s29gl064n_sectors(struct mtd_info * mtd)404 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
405 {
406 struct map_info *map = mtd->priv;
407 struct cfi_private *cfi = map->fldrv_priv;
408
409 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
410 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
411 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
412 mtd->name);
413 }
414 }
415
fixup_s29gl032n_sectors(struct mtd_info * mtd)416 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
417 {
418 struct map_info *map = mtd->priv;
419 struct cfi_private *cfi = map->fldrv_priv;
420
421 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
422 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
423 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
424 mtd->name);
425 }
426 }
427
fixup_s29ns512p_sectors(struct mtd_info * mtd)428 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
429 {
430 struct map_info *map = mtd->priv;
431 struct cfi_private *cfi = map->fldrv_priv;
432
433 /*
434 * S29NS512P flash uses more than 8bits to report number of sectors,
435 * which is not permitted by CFI.
436 */
437 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
438 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
439 mtd->name);
440 }
441
fixup_quirks(struct mtd_info * mtd)442 static void fixup_quirks(struct mtd_info *mtd)
443 {
444 struct map_info *map = mtd->priv;
445 struct cfi_private *cfi = map->fldrv_priv;
446
447 if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12)
448 cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
449 }
450
451 /* Used to fix CFI-Tables of chips without Extended Query Tables */
452 static struct cfi_fixup cfi_nopri_fixup_table[] = {
453 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
454 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
455 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
456 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
457 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
458 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
459 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
460 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
461 { 0, 0, NULL }
462 };
463
464 static struct cfi_fixup cfi_fixup_table[] = {
465 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
466 #ifdef AMD_BOOTLOC_BUG
467 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
468 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
469 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
470 #endif
471 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
472 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
473 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
474 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
475 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
476 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
477 { CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors },
478 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
479 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
480 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
481 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
482 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
483 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
484 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
485 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
486 #if !FORCE_WORD_WRITE
487 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
488 #endif
489 { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
490 { 0, 0, NULL }
491 };
492 static struct cfi_fixup jedec_fixup_table[] = {
493 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
494 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
495 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
496 { 0, 0, NULL }
497 };
498
499 static struct cfi_fixup fixup_table[] = {
500 /* The CFI vendor ids and the JEDEC vendor IDs appear
501 * to be common. It is like the devices id's are as
502 * well. This table is to pick all cases where
503 * we know that is the case.
504 */
505 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
506 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
507 { 0, 0, NULL }
508 };
509
510
cfi_fixup_major_minor(struct cfi_private * cfi,struct cfi_pri_amdstd * extp)511 static void cfi_fixup_major_minor(struct cfi_private *cfi,
512 struct cfi_pri_amdstd *extp)
513 {
514 if (cfi->mfr == CFI_MFR_SAMSUNG) {
515 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
516 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
517 /*
518 * Samsung K8P2815UQB and K8D6x16UxM chips
519 * report major=0 / minor=0.
520 * K8D3x16UxC chips report major=3 / minor=3.
521 */
522 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
523 " Extended Query version to 1.%c\n",
524 extp->MinorVersion);
525 extp->MajorVersion = '1';
526 }
527 }
528
529 /*
530 * SST 38VF640x chips report major=0xFF / minor=0xFF.
531 */
532 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
533 extp->MajorVersion = '1';
534 extp->MinorVersion = '0';
535 }
536 }
537
is_m29ew(struct cfi_private * cfi)538 static int is_m29ew(struct cfi_private *cfi)
539 {
540 if (cfi->mfr == CFI_MFR_INTEL &&
541 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
542 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
543 return 1;
544 return 0;
545 }
546
547 /*
548 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
549 * Some revisions of the M29EW suffer from erase suspend hang ups. In
550 * particular, it can occur when the sequence
551 * Erase Confirm -> Suspend -> Program -> Resume
552 * causes a lockup due to internal timing issues. The consequence is that the
553 * erase cannot be resumed without inserting a dummy command after programming
554 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
555 * that writes an F0 command code before the RESUME command.
556 */
cfi_fixup_m29ew_erase_suspend(struct map_info * map,unsigned long adr)557 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
558 unsigned long adr)
559 {
560 struct cfi_private *cfi = map->fldrv_priv;
561 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
562 if (is_m29ew(cfi))
563 map_write(map, CMD(0xF0), adr);
564 }
565
566 /*
567 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
568 *
569 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
570 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
571 * command is issued after an ERASE RESUME operation without waiting for a
572 * minimum delay. The result is that once the ERASE seems to be completed
573 * (no bits are toggling), the contents of the Flash memory block on which
574 * the erase was ongoing could be inconsistent with the expected values
575 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
576 * values), causing a consequent failure of the ERASE operation.
577 * The occurrence of this issue could be high, especially when file system
578 * operations on the Flash are intensive. As a result, it is recommended
579 * that a patch be applied. Intensive file system operations can cause many
580 * calls to the garbage routine to free Flash space (also by erasing physical
581 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
582 * commands can occur. The problem disappears when a delay is inserted after
583 * the RESUME command by using the udelay() function available in Linux.
584 * The DELAY value must be tuned based on the customer's platform.
585 * The maximum value that fixes the problem in all cases is 500us.
586 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
587 * in most cases.
588 * We have chosen 500µs because this latency is acceptable.
589 */
cfi_fixup_m29ew_delay_after_resume(struct cfi_private * cfi)590 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
591 {
592 /*
593 * Resolving the Delay After Resume Issue see Micron TN-13-07
594 * Worst case delay must be 500µs but 30-50µs should be ok as well
595 */
596 if (is_m29ew(cfi))
597 cfi_udelay(500);
598 }
599
cfi_cmdset_0002(struct map_info * map,int primary)600 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
601 {
602 struct cfi_private *cfi = map->fldrv_priv;
603 struct device_node __maybe_unused *np = map->device_node;
604 struct mtd_info *mtd;
605 int i;
606
607 mtd = kzalloc_obj(*mtd);
608 if (!mtd)
609 return NULL;
610 mtd->priv = map;
611 mtd->type = MTD_NORFLASH;
612
613 /* Fill in the default mtd operations */
614 mtd->_erase = cfi_amdstd_erase_varsize;
615 mtd->_write = cfi_amdstd_write_words;
616 mtd->_read = cfi_amdstd_read;
617 mtd->_sync = cfi_amdstd_sync;
618 mtd->_suspend = cfi_amdstd_suspend;
619 mtd->_resume = cfi_amdstd_resume;
620 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
621 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
622 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
623 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
624 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
625 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
626 mtd->flags = MTD_CAP_NORFLASH;
627 mtd->name = map->name;
628 mtd->writesize = 1;
629 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
630
631 pr_debug("MTD %s(): write buffer size %d\n", __func__,
632 mtd->writebufsize);
633
634 mtd->_panic_write = cfi_amdstd_panic_write;
635 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
636
637 if (cfi->cfi_mode==CFI_MODE_CFI){
638 unsigned char bootloc;
639 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
640 struct cfi_pri_amdstd *extp;
641
642 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
643 if (extp) {
644 /*
645 * It's a real CFI chip, not one for which the probe
646 * routine faked a CFI structure.
647 */
648 cfi_fixup_major_minor(cfi, extp);
649
650 /*
651 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
652 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
653 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
654 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
655 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
656 */
657 if (extp->MajorVersion != '1' ||
658 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
659 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
660 "version %c.%c (%#02x/%#02x).\n",
661 extp->MajorVersion, extp->MinorVersion,
662 extp->MajorVersion, extp->MinorVersion);
663 kfree(extp);
664 kfree(mtd);
665 return NULL;
666 }
667
668 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
669 extp->MajorVersion, extp->MinorVersion);
670
671 /* Install our own private info structure */
672 cfi->cmdset_priv = extp;
673
674 /* Apply cfi device specific fixups */
675 cfi_fixup(mtd, cfi_fixup_table);
676
677 #ifdef DEBUG_CFI_FEATURES
678 /* Tell the user about it in lots of lovely detail */
679 cfi_tell_features(extp);
680 #endif
681
682 #ifdef CONFIG_OF
683 if (np && of_property_read_bool(
684 np, "use-advanced-sector-protection")
685 && extp->BlkProtUnprot == 8) {
686 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
687 mtd->_lock = cfi_ppb_lock;
688 mtd->_unlock = cfi_ppb_unlock;
689 mtd->_is_locked = cfi_ppb_is_locked;
690 }
691 #endif
692
693 bootloc = extp->TopBottom;
694 if ((bootloc < 2) || (bootloc > 5)) {
695 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
696 "bank location (%d). Assuming bottom.\n",
697 map->name, bootloc);
698 bootloc = 2;
699 }
700
701 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
702 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
703
704 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
705 int j = (cfi->cfiq->NumEraseRegions-1)-i;
706
707 swap(cfi->cfiq->EraseRegionInfo[i],
708 cfi->cfiq->EraseRegionInfo[j]);
709 }
710 }
711 /* Set the default CFI lock/unlock addresses */
712 cfi->addr_unlock1 = 0x555;
713 cfi->addr_unlock2 = 0x2aa;
714 }
715 cfi_fixup(mtd, cfi_nopri_fixup_table);
716
717 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
718 kfree(mtd);
719 return NULL;
720 }
721
722 } /* CFI mode */
723 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
724 /* Apply jedec specific fixups */
725 cfi_fixup(mtd, jedec_fixup_table);
726 }
727 /* Apply generic fixups */
728 cfi_fixup(mtd, fixup_table);
729
730 for (i=0; i< cfi->numchips; i++) {
731 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
732 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
733 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
734 /*
735 * First calculate the timeout max according to timeout field
736 * of struct cfi_ident that probed from chip's CFI aera, if
737 * available. Specify a minimum of 2000us, in case the CFI data
738 * is wrong.
739 */
740 if (cfi->cfiq->BufWriteTimeoutTyp &&
741 cfi->cfiq->BufWriteTimeoutMax)
742 cfi->chips[i].buffer_write_time_max =
743 1 << (cfi->cfiq->BufWriteTimeoutTyp +
744 cfi->cfiq->BufWriteTimeoutMax);
745 else
746 cfi->chips[i].buffer_write_time_max = 0;
747
748 cfi->chips[i].buffer_write_time_max =
749 max(cfi->chips[i].buffer_write_time_max, 2000);
750
751 cfi->chips[i].ref_point_counter = 0;
752 init_waitqueue_head(&(cfi->chips[i].wq));
753 }
754
755 map->fldrv = &cfi_amdstd_chipdrv;
756
757 return cfi_amdstd_setup(mtd);
758 }
759 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
760 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
761 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
762 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
763 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
764
cfi_amdstd_setup(struct mtd_info * mtd)765 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
766 {
767 struct map_info *map = mtd->priv;
768 struct cfi_private *cfi = map->fldrv_priv;
769 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
770 unsigned long offset = 0;
771 int i,j;
772
773 printk(KERN_NOTICE "number of %s chips: %d\n",
774 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
775 /* Select the correct geometry setup */
776 mtd->size = devsize * cfi->numchips;
777
778 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
779 mtd->eraseregions = kmalloc_objs(struct mtd_erase_region_info,
780 mtd->numeraseregions);
781 if (!mtd->eraseregions)
782 goto setup_err;
783
784 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
785 unsigned long ernum, ersize;
786 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
787 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
788
789 if (mtd->erasesize < ersize) {
790 mtd->erasesize = ersize;
791 }
792 for (j=0; j<cfi->numchips; j++) {
793 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
794 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
795 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
796 }
797 offset += (ersize * ernum);
798 }
799 if (offset != devsize) {
800 /* Argh */
801 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
802 goto setup_err;
803 }
804
805 __module_get(THIS_MODULE);
806 register_reboot_notifier(&mtd->reboot_notifier);
807 return mtd;
808
809 setup_err:
810 kfree(mtd->eraseregions);
811 kfree(mtd);
812 kfree(cfi->cmdset_priv);
813 return NULL;
814 }
815
816 /*
817 * Return true if the chip is ready and has the correct value.
818 *
819 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
820 * non-suspended sector) and is indicated by no toggle bits toggling.
821 *
822 * Error are indicated by toggling bits or bits held with the wrong value,
823 * or with bits toggling.
824 *
825 * Note that anything more complicated than checking if no bits are toggling
826 * (including checking DQ5 for an error status) is tricky to get working
827 * correctly and is therefore not done (particularly with interleaved chips
828 * as each chip must be checked independently of the others).
829 */
chip_ready(struct map_info * map,struct flchip * chip,unsigned long addr,map_word * expected)830 static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
831 unsigned long addr, map_word *expected)
832 {
833 struct cfi_private *cfi = map->fldrv_priv;
834 map_word oldd, curd;
835 int ret;
836
837 if (cfi_use_status_reg(cfi)) {
838 map_word ready = CMD(CFI_SR_DRB);
839 /*
840 * For chips that support status register, check device
841 * ready bit
842 */
843 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
844 cfi->device_type, NULL);
845 curd = map_read(map, addr);
846
847 return map_word_andequal(map, curd, ready, ready);
848 }
849
850 oldd = map_read(map, addr);
851 curd = map_read(map, addr);
852
853 ret = map_word_equal(map, oldd, curd);
854
855 if (!ret || !expected)
856 return ret;
857
858 return map_word_equal(map, curd, *expected);
859 }
860
chip_good(struct map_info * map,struct flchip * chip,unsigned long addr,map_word * expected)861 static int __xipram chip_good(struct map_info *map, struct flchip *chip,
862 unsigned long addr, map_word *expected)
863 {
864 struct cfi_private *cfi = map->fldrv_priv;
865 map_word *datum = expected;
866
867 if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
868 datum = NULL;
869
870 return chip_ready(map, chip, addr, datum);
871 }
872
get_chip(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)873 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
874 {
875 DECLARE_WAITQUEUE(wait, current);
876 struct cfi_private *cfi = map->fldrv_priv;
877 unsigned long timeo;
878 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
879
880 resettime:
881 timeo = jiffies + HZ;
882 retry:
883 switch (chip->state) {
884
885 case FL_STATUS:
886 for (;;) {
887 if (chip_ready(map, chip, adr, NULL))
888 break;
889
890 if (time_after(jiffies, timeo)) {
891 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
892 return -EIO;
893 }
894 mutex_unlock(&chip->mutex);
895 cfi_udelay(1);
896 mutex_lock(&chip->mutex);
897 /* Someone else might have been playing with it. */
898 goto retry;
899 }
900 return 0;
901
902 case FL_READY:
903 case FL_CFI_QUERY:
904 case FL_JEDEC_QUERY:
905 return 0;
906
907 case FL_ERASING:
908 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
909 !(mode == FL_READY || mode == FL_POINT ||
910 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
911 goto sleep;
912
913 /* Do not allow suspend iff read/write to EB address */
914 if ((adr & chip->in_progress_block_mask) ==
915 chip->in_progress_block_addr)
916 goto sleep;
917
918 /* Erase suspend */
919 /* It's harmless to issue the Erase-Suspend and Erase-Resume
920 * commands when the erase algorithm isn't in progress. */
921 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
922 chip->oldstate = FL_ERASING;
923 chip->state = FL_ERASE_SUSPENDING;
924 chip->erase_suspended = 1;
925 for (;;) {
926 if (chip_ready(map, chip, adr, NULL))
927 break;
928
929 if (time_after(jiffies, timeo)) {
930 /* Should have suspended the erase by now.
931 * Send an Erase-Resume command as either
932 * there was an error (so leave the erase
933 * routine to recover from it) or we trying to
934 * use the erase-in-progress sector. */
935 put_chip(map, chip, adr);
936 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
937 return -EIO;
938 }
939
940 mutex_unlock(&chip->mutex);
941 cfi_udelay(1);
942 mutex_lock(&chip->mutex);
943 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
944 So we can just loop here. */
945 }
946 chip->state = FL_READY;
947 return 0;
948
949 case FL_XIP_WHILE_ERASING:
950 if (mode != FL_READY && mode != FL_POINT &&
951 (!cfip || !(cfip->EraseSuspend&2)))
952 goto sleep;
953 chip->oldstate = chip->state;
954 chip->state = FL_READY;
955 return 0;
956
957 case FL_SHUTDOWN:
958 /* The machine is rebooting */
959 return -EIO;
960
961 case FL_POINT:
962 /* Only if there's no operation suspended... */
963 if (mode == FL_READY && chip->oldstate == FL_READY)
964 return 0;
965 fallthrough;
966 default:
967 sleep:
968 set_current_state(TASK_UNINTERRUPTIBLE);
969 add_wait_queue(&chip->wq, &wait);
970 mutex_unlock(&chip->mutex);
971 schedule();
972 remove_wait_queue(&chip->wq, &wait);
973 mutex_lock(&chip->mutex);
974 goto resettime;
975 }
976 }
977
978
put_chip(struct map_info * map,struct flchip * chip,unsigned long adr)979 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
980 {
981 struct cfi_private *cfi = map->fldrv_priv;
982
983 switch(chip->oldstate) {
984 case FL_ERASING:
985 cfi_fixup_m29ew_erase_suspend(map,
986 chip->in_progress_block_addr);
987 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
988 cfi_fixup_m29ew_delay_after_resume(cfi);
989 chip->oldstate = FL_READY;
990 chip->state = FL_ERASING;
991 break;
992
993 case FL_XIP_WHILE_ERASING:
994 chip->state = chip->oldstate;
995 chip->oldstate = FL_READY;
996 break;
997
998 case FL_READY:
999 case FL_STATUS:
1000 break;
1001 default:
1002 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1003 }
1004 wake_up(&chip->wq);
1005 }
1006
1007 #ifdef CONFIG_MTD_XIP
1008
1009 /*
1010 * No interrupt what so ever can be serviced while the flash isn't in array
1011 * mode. This is ensured by the xip_disable() and xip_enable() functions
1012 * enclosing any code path where the flash is known not to be in array mode.
1013 * And within a XIP disabled code path, only functions marked with __xipram
1014 * may be called and nothing else (it's a good thing to inspect generated
1015 * assembly to make sure inline functions were actually inlined and that gcc
1016 * didn't emit calls to its own support functions). Also configuring MTD CFI
1017 * support to a single buswidth and a single interleave is also recommended.
1018 */
1019
xip_disable(struct map_info * map,struct flchip * chip,unsigned long adr)1020 static void xip_disable(struct map_info *map, struct flchip *chip,
1021 unsigned long adr)
1022 {
1023 /* TODO: chips with no XIP use should ignore and return */
1024 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1025 local_irq_disable();
1026 }
1027
xip_enable(struct map_info * map,struct flchip * chip,unsigned long adr)1028 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1029 unsigned long adr)
1030 {
1031 struct cfi_private *cfi = map->fldrv_priv;
1032
1033 if (chip->state != FL_POINT && chip->state != FL_READY) {
1034 map_write(map, CMD(0xf0), adr);
1035 chip->state = FL_READY;
1036 }
1037 (void) map_read(map, adr);
1038 xip_iprefetch();
1039 local_irq_enable();
1040 }
1041
1042 /*
1043 * When a delay is required for the flash operation to complete, the
1044 * xip_udelay() function is polling for both the given timeout and pending
1045 * (but still masked) hardware interrupts. Whenever there is an interrupt
1046 * pending then the flash erase operation is suspended, array mode restored
1047 * and interrupts unmasked. Task scheduling might also happen at that
1048 * point. The CPU eventually returns from the interrupt or the call to
1049 * schedule() and the suspended flash operation is resumed for the remaining
1050 * of the delay period.
1051 *
1052 * Warning: this function _will_ fool interrupt latency tracing tools.
1053 */
1054
xip_udelay(struct map_info * map,struct flchip * chip,unsigned long adr,int usec)1055 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1056 unsigned long adr, int usec)
1057 {
1058 struct cfi_private *cfi = map->fldrv_priv;
1059 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1060 map_word status, OK = CMD(0x80);
1061 unsigned long suspended, start = xip_currtime();
1062 flstate_t oldstate;
1063
1064 do {
1065 cpu_relax();
1066 if (xip_irqpending() && extp &&
1067 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1068 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1069 /*
1070 * Let's suspend the erase operation when supported.
1071 * Note that we currently don't try to suspend
1072 * interleaved chips if there is already another
1073 * operation suspended (imagine what happens
1074 * when one chip was already done with the current
1075 * operation while another chip suspended it, then
1076 * we resume the whole thing at once). Yes, it
1077 * can happen!
1078 */
1079 map_write(map, CMD(0xb0), adr);
1080 usec -= xip_elapsed_since(start);
1081 suspended = xip_currtime();
1082 do {
1083 if (xip_elapsed_since(suspended) > 100000) {
1084 /*
1085 * The chip doesn't want to suspend
1086 * after waiting for 100 msecs.
1087 * This is a critical error but there
1088 * is not much we can do here.
1089 */
1090 return;
1091 }
1092 status = map_read(map, adr);
1093 } while (!map_word_andequal(map, status, OK, OK));
1094
1095 /* Suspend succeeded */
1096 oldstate = chip->state;
1097 if (!map_word_bitsset(map, status, CMD(0x40)))
1098 break;
1099 chip->state = FL_XIP_WHILE_ERASING;
1100 chip->erase_suspended = 1;
1101 map_write(map, CMD(0xf0), adr);
1102 (void) map_read(map, adr);
1103 xip_iprefetch();
1104 local_irq_enable();
1105 mutex_unlock(&chip->mutex);
1106 xip_iprefetch();
1107 cond_resched();
1108
1109 /*
1110 * We're back. However someone else might have
1111 * decided to go write to the chip if we are in
1112 * a suspended erase state. If so let's wait
1113 * until it's done.
1114 */
1115 mutex_lock(&chip->mutex);
1116 while (chip->state != FL_XIP_WHILE_ERASING) {
1117 DECLARE_WAITQUEUE(wait, current);
1118 set_current_state(TASK_UNINTERRUPTIBLE);
1119 add_wait_queue(&chip->wq, &wait);
1120 mutex_unlock(&chip->mutex);
1121 schedule();
1122 remove_wait_queue(&chip->wq, &wait);
1123 mutex_lock(&chip->mutex);
1124 }
1125 /* Disallow XIP again */
1126 local_irq_disable();
1127
1128 /* Correct Erase Suspend Hangups for M29EW */
1129 cfi_fixup_m29ew_erase_suspend(map, adr);
1130 /* Resume the write or erase operation */
1131 map_write(map, cfi->sector_erase_cmd, adr);
1132 chip->state = oldstate;
1133 start = xip_currtime();
1134 } else if (usec >= 1000000/HZ) {
1135 /*
1136 * Try to save on CPU power when waiting delay
1137 * is at least a system timer tick period.
1138 * No need to be extremely accurate here.
1139 */
1140 xip_cpu_idle();
1141 }
1142 status = map_read(map, adr);
1143 } while (!map_word_andequal(map, status, OK, OK)
1144 && xip_elapsed_since(start) < usec);
1145 }
1146
1147 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1148
1149 /*
1150 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1151 * the flash is actively programming or erasing since we have to poll for
1152 * the operation to complete anyway. We can't do that in a generic way with
1153 * a XIP setup so do it before the actual flash operation in this case
1154 * and stub it out from INVALIDATE_CACHE_UDELAY.
1155 */
1156 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1157 INVALIDATE_CACHED_RANGE(map, from, size)
1158
1159 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1160 UDELAY(map, chip, adr, usec)
1161
1162 /*
1163 * Extra notes:
1164 *
1165 * Activating this XIP support changes the way the code works a bit. For
1166 * example the code to suspend the current process when concurrent access
1167 * happens is never executed because xip_udelay() will always return with the
1168 * same chip state as it was entered with. This is why there is no care for
1169 * the presence of add_wait_queue() or schedule() calls from within a couple
1170 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1171 * The queueing and scheduling are always happening within xip_udelay().
1172 *
1173 * Similarly, get_chip() and put_chip() just happen to always be executed
1174 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1175 * is in array mode, therefore never executing many cases therein and not
1176 * causing any problem with XIP.
1177 */
1178
1179 #else
1180
1181 #define xip_disable(map, chip, adr)
1182 #define xip_enable(map, chip, adr)
1183 #define XIP_INVAL_CACHED_RANGE(x...)
1184
1185 #define UDELAY(map, chip, adr, usec) \
1186 do { \
1187 mutex_unlock(&chip->mutex); \
1188 cfi_udelay(usec); \
1189 mutex_lock(&chip->mutex); \
1190 } while (0)
1191
1192 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1193 do { \
1194 mutex_unlock(&chip->mutex); \
1195 INVALIDATE_CACHED_RANGE(map, adr, len); \
1196 cfi_udelay(usec); \
1197 mutex_lock(&chip->mutex); \
1198 } while (0)
1199
1200 #endif
1201
do_read_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf)1202 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1203 {
1204 unsigned long cmd_addr;
1205 struct cfi_private *cfi = map->fldrv_priv;
1206 int ret;
1207
1208 adr += chip->start;
1209
1210 /* Ensure cmd read/writes are aligned. */
1211 cmd_addr = adr & ~(map_bankwidth(map)-1);
1212
1213 mutex_lock(&chip->mutex);
1214 ret = get_chip(map, chip, cmd_addr, FL_READY);
1215 if (ret) {
1216 mutex_unlock(&chip->mutex);
1217 return ret;
1218 }
1219
1220 if (chip->state != FL_POINT && chip->state != FL_READY) {
1221 map_write(map, CMD(0xf0), cmd_addr);
1222 chip->state = FL_READY;
1223 }
1224
1225 map_copy_from(map, buf, adr, len);
1226
1227 put_chip(map, chip, cmd_addr);
1228
1229 mutex_unlock(&chip->mutex);
1230 return 0;
1231 }
1232
1233
cfi_amdstd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1234 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1235 {
1236 struct map_info *map = mtd->priv;
1237 struct cfi_private *cfi = map->fldrv_priv;
1238 unsigned long ofs;
1239 int chipnum;
1240 int ret = 0;
1241
1242 /* ofs: offset within the first chip that the first read should start */
1243 chipnum = (from >> cfi->chipshift);
1244 ofs = from - (chipnum << cfi->chipshift);
1245
1246 while (len) {
1247 unsigned long thislen;
1248
1249 if (chipnum >= cfi->numchips)
1250 break;
1251
1252 if ((len + ofs -1) >> cfi->chipshift)
1253 thislen = (1<<cfi->chipshift) - ofs;
1254 else
1255 thislen = len;
1256
1257 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1258 if (ret)
1259 break;
1260
1261 *retlen += thislen;
1262 len -= thislen;
1263 buf += thislen;
1264
1265 ofs = 0;
1266 chipnum++;
1267 }
1268 return ret;
1269 }
1270
1271 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1272 loff_t adr, size_t len, u_char *buf, size_t grouplen);
1273
otp_enter(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1274 static inline void otp_enter(struct map_info *map, struct flchip *chip,
1275 loff_t adr, size_t len)
1276 {
1277 struct cfi_private *cfi = map->fldrv_priv;
1278
1279 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1280 cfi->device_type, NULL);
1281 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1282 cfi->device_type, NULL);
1283 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1284 cfi->device_type, NULL);
1285
1286 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1287 }
1288
otp_exit(struct map_info * map,struct flchip * chip,loff_t adr,size_t len)1289 static inline void otp_exit(struct map_info *map, struct flchip *chip,
1290 loff_t adr, size_t len)
1291 {
1292 struct cfi_private *cfi = map->fldrv_priv;
1293
1294 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1295 cfi->device_type, NULL);
1296 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1297 cfi->device_type, NULL);
1298 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1299 cfi->device_type, NULL);
1300 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1301 cfi->device_type, NULL);
1302
1303 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1304 }
1305
do_read_secsi_onechip(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1306 static inline int do_read_secsi_onechip(struct map_info *map,
1307 struct flchip *chip, loff_t adr,
1308 size_t len, u_char *buf,
1309 size_t grouplen)
1310 {
1311 DECLARE_WAITQUEUE(wait, current);
1312
1313 retry:
1314 mutex_lock(&chip->mutex);
1315
1316 if (chip->state != FL_READY){
1317 set_current_state(TASK_UNINTERRUPTIBLE);
1318 add_wait_queue(&chip->wq, &wait);
1319
1320 mutex_unlock(&chip->mutex);
1321
1322 schedule();
1323 remove_wait_queue(&chip->wq, &wait);
1324
1325 goto retry;
1326 }
1327
1328 adr += chip->start;
1329
1330 chip->state = FL_READY;
1331
1332 otp_enter(map, chip, adr, len);
1333 map_copy_from(map, buf, adr, len);
1334 otp_exit(map, chip, adr, len);
1335
1336 wake_up(&chip->wq);
1337 mutex_unlock(&chip->mutex);
1338
1339 return 0;
1340 }
1341
cfi_amdstd_secsi_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1342 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1343 {
1344 struct map_info *map = mtd->priv;
1345 struct cfi_private *cfi = map->fldrv_priv;
1346 unsigned long ofs;
1347 int chipnum;
1348 int ret = 0;
1349
1350 /* ofs: offset within the first chip that the first read should start */
1351 /* 8 secsi bytes per chip */
1352 chipnum=from>>3;
1353 ofs=from & 7;
1354
1355 while (len) {
1356 unsigned long thislen;
1357
1358 if (chipnum >= cfi->numchips)
1359 break;
1360
1361 if ((len + ofs -1) >> 3)
1362 thislen = (1<<3) - ofs;
1363 else
1364 thislen = len;
1365
1366 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1367 thislen, buf, 0);
1368 if (ret)
1369 break;
1370
1371 *retlen += thislen;
1372 len -= thislen;
1373 buf += thislen;
1374
1375 ofs = 0;
1376 chipnum++;
1377 }
1378 return ret;
1379 }
1380
1381 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1382 unsigned long adr, map_word datum,
1383 int mode);
1384
do_otp_write(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1385 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1386 size_t len, u_char *buf, size_t grouplen)
1387 {
1388 int ret;
1389 while (len) {
1390 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1391 int gap = adr - bus_ofs;
1392 int n = min_t(int, len, map_bankwidth(map) - gap);
1393 map_word datum = map_word_ff(map);
1394
1395 if (n != map_bankwidth(map)) {
1396 /* partial write of a word, load old contents */
1397 otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1398 datum = map_read(map, bus_ofs);
1399 otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1400 }
1401
1402 datum = map_word_load_partial(map, datum, buf, gap, n);
1403 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1404 if (ret)
1405 return ret;
1406
1407 adr += n;
1408 buf += n;
1409 len -= n;
1410 }
1411
1412 return 0;
1413 }
1414
do_otp_lock(struct map_info * map,struct flchip * chip,loff_t adr,size_t len,u_char * buf,size_t grouplen)1415 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1416 size_t len, u_char *buf, size_t grouplen)
1417 {
1418 struct cfi_private *cfi = map->fldrv_priv;
1419 uint8_t lockreg;
1420 unsigned long timeo;
1421 int ret;
1422
1423 /* make sure area matches group boundaries */
1424 if ((adr != 0) || (len != grouplen))
1425 return -EINVAL;
1426
1427 mutex_lock(&chip->mutex);
1428 ret = get_chip(map, chip, chip->start, FL_LOCKING);
1429 if (ret) {
1430 mutex_unlock(&chip->mutex);
1431 return ret;
1432 }
1433 chip->state = FL_LOCKING;
1434
1435 /* Enter lock register command */
1436 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1437 cfi->device_type, NULL);
1438 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1439 cfi->device_type, NULL);
1440 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1441 cfi->device_type, NULL);
1442
1443 /* read lock register */
1444 lockreg = cfi_read_query(map, 0);
1445
1446 /* set bit 0 to protect extended memory block */
1447 lockreg &= ~0x01;
1448
1449 /* set bit 0 to protect extended memory block */
1450 /* write lock register */
1451 map_write(map, CMD(0xA0), chip->start);
1452 map_write(map, CMD(lockreg), chip->start);
1453
1454 /* wait for chip to become ready */
1455 timeo = jiffies + msecs_to_jiffies(2);
1456 for (;;) {
1457 if (chip_ready(map, chip, adr, NULL))
1458 break;
1459
1460 if (time_after(jiffies, timeo)) {
1461 pr_err("Waiting for chip to be ready timed out.\n");
1462 ret = -EIO;
1463 break;
1464 }
1465 UDELAY(map, chip, 0, 1);
1466 }
1467
1468 /* exit protection commands */
1469 map_write(map, CMD(0x90), chip->start);
1470 map_write(map, CMD(0x00), chip->start);
1471
1472 chip->state = FL_READY;
1473 put_chip(map, chip, chip->start);
1474 mutex_unlock(&chip->mutex);
1475
1476 return ret;
1477 }
1478
cfi_amdstd_otp_walk(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf,otp_op_t action,int user_regs)1479 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1480 size_t *retlen, u_char *buf,
1481 otp_op_t action, int user_regs)
1482 {
1483 struct map_info *map = mtd->priv;
1484 struct cfi_private *cfi = map->fldrv_priv;
1485 int ofs_factor = cfi->interleave * cfi->device_type;
1486 unsigned long base;
1487 int chipnum;
1488 struct flchip *chip;
1489 uint8_t otp, lockreg;
1490 int ret;
1491
1492 size_t user_size, factory_size, otpsize;
1493 loff_t user_offset, factory_offset, otpoffset;
1494 int user_locked = 0, otplocked;
1495
1496 *retlen = 0;
1497
1498 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1499 chip = &cfi->chips[chipnum];
1500 factory_size = 0;
1501 user_size = 0;
1502
1503 /* Micron M29EW family */
1504 if (is_m29ew(cfi)) {
1505 base = chip->start;
1506
1507 /* check whether secsi area is factory locked
1508 or user lockable */
1509 mutex_lock(&chip->mutex);
1510 ret = get_chip(map, chip, base, FL_CFI_QUERY);
1511 if (ret) {
1512 mutex_unlock(&chip->mutex);
1513 return ret;
1514 }
1515 cfi_qry_mode_on(base, map, cfi);
1516 otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1517 cfi_qry_mode_off(base, map, cfi);
1518 put_chip(map, chip, base);
1519 mutex_unlock(&chip->mutex);
1520
1521 if (otp & 0x80) {
1522 /* factory locked */
1523 factory_offset = 0;
1524 factory_size = 0x100;
1525 } else {
1526 /* customer lockable */
1527 user_offset = 0;
1528 user_size = 0x100;
1529
1530 mutex_lock(&chip->mutex);
1531 ret = get_chip(map, chip, base, FL_LOCKING);
1532 if (ret) {
1533 mutex_unlock(&chip->mutex);
1534 return ret;
1535 }
1536
1537 /* Enter lock register command */
1538 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1539 chip->start, map, cfi,
1540 cfi->device_type, NULL);
1541 cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1542 chip->start, map, cfi,
1543 cfi->device_type, NULL);
1544 cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1545 chip->start, map, cfi,
1546 cfi->device_type, NULL);
1547 /* read lock register */
1548 lockreg = cfi_read_query(map, 0);
1549 /* exit protection commands */
1550 map_write(map, CMD(0x90), chip->start);
1551 map_write(map, CMD(0x00), chip->start);
1552 put_chip(map, chip, chip->start);
1553 mutex_unlock(&chip->mutex);
1554
1555 user_locked = ((lockreg & 0x01) == 0x00);
1556 }
1557 }
1558
1559 otpsize = user_regs ? user_size : factory_size;
1560 if (!otpsize)
1561 continue;
1562 otpoffset = user_regs ? user_offset : factory_offset;
1563 otplocked = user_regs ? user_locked : 1;
1564
1565 if (!action) {
1566 /* return otpinfo */
1567 struct otp_info *otpinfo;
1568 len -= sizeof(*otpinfo);
1569 if (len <= 0)
1570 return -ENOSPC;
1571 otpinfo = (struct otp_info *)buf;
1572 otpinfo->start = from;
1573 otpinfo->length = otpsize;
1574 otpinfo->locked = otplocked;
1575 buf += sizeof(*otpinfo);
1576 *retlen += sizeof(*otpinfo);
1577 from += otpsize;
1578 } else if ((from < otpsize) && (len > 0)) {
1579 size_t size;
1580 size = (len < otpsize - from) ? len : otpsize - from;
1581 ret = action(map, chip, otpoffset + from, size, buf,
1582 otpsize);
1583 if (ret < 0)
1584 return ret;
1585
1586 buf += size;
1587 len -= size;
1588 *retlen += size;
1589 from = 0;
1590 } else {
1591 from -= otpsize;
1592 }
1593 }
1594 return 0;
1595 }
1596
cfi_amdstd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1597 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1598 size_t *retlen, struct otp_info *buf)
1599 {
1600 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1601 NULL, 0);
1602 }
1603
cfi_amdstd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1604 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1605 size_t *retlen, struct otp_info *buf)
1606 {
1607 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1608 NULL, 1);
1609 }
1610
cfi_amdstd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1611 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1612 size_t len, size_t *retlen,
1613 u_char *buf)
1614 {
1615 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1616 buf, do_read_secsi_onechip, 0);
1617 }
1618
cfi_amdstd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1619 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1620 size_t len, size_t *retlen,
1621 u_char *buf)
1622 {
1623 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1624 buf, do_read_secsi_onechip, 1);
1625 }
1626
cfi_amdstd_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,const u_char * buf)1627 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1628 size_t len, size_t *retlen,
1629 const u_char *buf)
1630 {
1631 return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
1632 do_otp_write, 1);
1633 }
1634
cfi_amdstd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)1635 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1636 size_t len)
1637 {
1638 size_t retlen;
1639 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1640 do_otp_lock, 1);
1641 }
1642
do_write_oneword_once(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode,struct cfi_private * cfi)1643 static int __xipram do_write_oneword_once(struct map_info *map,
1644 struct flchip *chip,
1645 unsigned long adr, map_word datum,
1646 int mode, struct cfi_private *cfi)
1647 {
1648 unsigned long timeo;
1649 /*
1650 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1651 * have a max write time of a few hundreds usec). However, we should
1652 * use the maximum timeout value given by the chip at probe time
1653 * instead. Unfortunately, struct flchip does have a field for
1654 * maximum timeout, only for typical which can be far too short
1655 * depending of the conditions. The ' + 1' is to avoid having a
1656 * timeout of 0 jiffies if HZ is smaller than 1000.
1657 */
1658 unsigned long uWriteTimeout = (HZ / 1000) + 1;
1659 int ret = 0;
1660
1661 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1662 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1663 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1664 map_write(map, datum, adr);
1665 chip->state = mode;
1666
1667 INVALIDATE_CACHE_UDELAY(map, chip,
1668 adr, map_bankwidth(map),
1669 chip->word_write_time);
1670
1671 /* See comment above for timeout value. */
1672 timeo = jiffies + uWriteTimeout;
1673 for (;;) {
1674 if (chip->state != mode) {
1675 /* Someone's suspended the write. Sleep */
1676 DECLARE_WAITQUEUE(wait, current);
1677
1678 set_current_state(TASK_UNINTERRUPTIBLE);
1679 add_wait_queue(&chip->wq, &wait);
1680 mutex_unlock(&chip->mutex);
1681 schedule();
1682 remove_wait_queue(&chip->wq, &wait);
1683 timeo = jiffies + (HZ / 2); /* FIXME */
1684 mutex_lock(&chip->mutex);
1685 continue;
1686 }
1687
1688 /*
1689 * We check "time_after" and "!chip_good" before checking
1690 * "chip_good" to avoid the failure due to scheduling.
1691 */
1692 if (time_after(jiffies, timeo) &&
1693 !chip_good(map, chip, adr, &datum)) {
1694 xip_enable(map, chip, adr);
1695 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1696 xip_disable(map, chip, adr);
1697 ret = -EIO;
1698 break;
1699 }
1700
1701 if (chip_good(map, chip, adr, &datum)) {
1702 if (cfi_check_err_status(map, chip, adr))
1703 ret = -EIO;
1704 break;
1705 }
1706
1707 /* Latency issues. Drop the lock, wait a while and retry */
1708 UDELAY(map, chip, adr, 1);
1709 }
1710
1711 return ret;
1712 }
1713
do_write_oneword_start(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)1714 static int __xipram do_write_oneword_start(struct map_info *map,
1715 struct flchip *chip,
1716 unsigned long adr, int mode)
1717 {
1718 int ret;
1719
1720 mutex_lock(&chip->mutex);
1721
1722 ret = get_chip(map, chip, adr, mode);
1723 if (ret) {
1724 mutex_unlock(&chip->mutex);
1725 return ret;
1726 }
1727
1728 if (mode == FL_OTP_WRITE)
1729 otp_enter(map, chip, adr, map_bankwidth(map));
1730
1731 return ret;
1732 }
1733
do_write_oneword_done(struct map_info * map,struct flchip * chip,unsigned long adr,int mode)1734 static void __xipram do_write_oneword_done(struct map_info *map,
1735 struct flchip *chip,
1736 unsigned long adr, int mode)
1737 {
1738 if (mode == FL_OTP_WRITE)
1739 otp_exit(map, chip, adr, map_bankwidth(map));
1740
1741 chip->state = FL_READY;
1742 DISABLE_VPP(map);
1743 put_chip(map, chip, adr);
1744
1745 mutex_unlock(&chip->mutex);
1746 }
1747
do_write_oneword_retry(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1748 static int __xipram do_write_oneword_retry(struct map_info *map,
1749 struct flchip *chip,
1750 unsigned long adr, map_word datum,
1751 int mode)
1752 {
1753 struct cfi_private *cfi = map->fldrv_priv;
1754 int ret = 0;
1755 map_word oldd;
1756 int retry_cnt = 0;
1757
1758 /*
1759 * Check for a NOP for the case when the datum to write is already
1760 * present - it saves time and works around buggy chips that corrupt
1761 * data at other locations when 0xff is written to a location that
1762 * already contains 0xff.
1763 */
1764 oldd = map_read(map, adr);
1765 if (map_word_equal(map, oldd, datum)) {
1766 pr_debug("MTD %s(): NOP\n", __func__);
1767 return ret;
1768 }
1769
1770 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1771 ENABLE_VPP(map);
1772 xip_disable(map, chip, adr);
1773
1774 retry:
1775 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1776 if (ret) {
1777 /* reset on all failures. */
1778 map_write(map, CMD(0xF0), chip->start);
1779 /* FIXME - should have reset delay before continuing */
1780
1781 if (++retry_cnt <= MAX_RETRIES)
1782 goto retry;
1783 }
1784 xip_enable(map, chip, adr);
1785
1786 return ret;
1787 }
1788
do_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum,int mode)1789 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1790 unsigned long adr, map_word datum,
1791 int mode)
1792 {
1793 int ret;
1794
1795 adr += chip->start;
1796
1797 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1798 datum.x[0]);
1799
1800 ret = do_write_oneword_start(map, chip, adr, mode);
1801 if (ret)
1802 return ret;
1803
1804 ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1805
1806 do_write_oneword_done(map, chip, adr, mode);
1807
1808 return ret;
1809 }
1810
1811
cfi_amdstd_write_words(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1812 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1813 size_t *retlen, const u_char *buf)
1814 {
1815 struct map_info *map = mtd->priv;
1816 struct cfi_private *cfi = map->fldrv_priv;
1817 int ret;
1818 int chipnum;
1819 unsigned long ofs, chipstart;
1820 DECLARE_WAITQUEUE(wait, current);
1821
1822 chipnum = to >> cfi->chipshift;
1823 ofs = to - (chipnum << cfi->chipshift);
1824 chipstart = cfi->chips[chipnum].start;
1825
1826 /* If it's not bus-aligned, do the first byte write */
1827 if (ofs & (map_bankwidth(map)-1)) {
1828 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1829 int i = ofs - bus_ofs;
1830 int n = 0;
1831 map_word tmp_buf;
1832
1833 retry:
1834 mutex_lock(&cfi->chips[chipnum].mutex);
1835
1836 if (cfi->chips[chipnum].state != FL_READY) {
1837 set_current_state(TASK_UNINTERRUPTIBLE);
1838 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1839
1840 mutex_unlock(&cfi->chips[chipnum].mutex);
1841
1842 schedule();
1843 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1844 goto retry;
1845 }
1846
1847 /* Load 'tmp_buf' with old contents of flash */
1848 tmp_buf = map_read(map, bus_ofs+chipstart);
1849
1850 mutex_unlock(&cfi->chips[chipnum].mutex);
1851
1852 /* Number of bytes to copy from buffer */
1853 n = min_t(int, len, map_bankwidth(map)-i);
1854
1855 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1856
1857 ret = do_write_oneword(map, &cfi->chips[chipnum],
1858 bus_ofs, tmp_buf, FL_WRITING);
1859 if (ret)
1860 return ret;
1861
1862 ofs += n;
1863 buf += n;
1864 (*retlen) += n;
1865 len -= n;
1866
1867 if (ofs >> cfi->chipshift) {
1868 chipnum ++;
1869 ofs = 0;
1870 if (chipnum == cfi->numchips)
1871 return 0;
1872 }
1873 }
1874
1875 /* We are now aligned, write as much as possible */
1876 while(len >= map_bankwidth(map)) {
1877 map_word datum;
1878
1879 datum = map_word_load(map, buf);
1880
1881 ret = do_write_oneword(map, &cfi->chips[chipnum],
1882 ofs, datum, FL_WRITING);
1883 if (ret)
1884 return ret;
1885
1886 ofs += map_bankwidth(map);
1887 buf += map_bankwidth(map);
1888 (*retlen) += map_bankwidth(map);
1889 len -= map_bankwidth(map);
1890
1891 if (ofs >> cfi->chipshift) {
1892 chipnum ++;
1893 ofs = 0;
1894 if (chipnum == cfi->numchips)
1895 return 0;
1896 chipstart = cfi->chips[chipnum].start;
1897 }
1898 }
1899
1900 /* Write the trailing bytes if any */
1901 if (len & (map_bankwidth(map)-1)) {
1902 map_word tmp_buf;
1903
1904 retry1:
1905 mutex_lock(&cfi->chips[chipnum].mutex);
1906
1907 if (cfi->chips[chipnum].state != FL_READY) {
1908 set_current_state(TASK_UNINTERRUPTIBLE);
1909 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1910
1911 mutex_unlock(&cfi->chips[chipnum].mutex);
1912
1913 schedule();
1914 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1915 goto retry1;
1916 }
1917
1918 tmp_buf = map_read(map, ofs + chipstart);
1919
1920 mutex_unlock(&cfi->chips[chipnum].mutex);
1921
1922 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1923
1924 ret = do_write_oneword(map, &cfi->chips[chipnum],
1925 ofs, tmp_buf, FL_WRITING);
1926 if (ret)
1927 return ret;
1928
1929 (*retlen) += len;
1930 }
1931
1932 return 0;
1933 }
1934
1935 #if !FORCE_WORD_WRITE
do_write_buffer_wait(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum)1936 static int __xipram do_write_buffer_wait(struct map_info *map,
1937 struct flchip *chip, unsigned long adr,
1938 map_word datum)
1939 {
1940 unsigned long timeo;
1941 unsigned long u_write_timeout;
1942 int ret = 0;
1943
1944 /*
1945 * Timeout is calculated according to CFI data, if available.
1946 * See more comments in cfi_cmdset_0002().
1947 */
1948 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1949 timeo = jiffies + u_write_timeout;
1950
1951 for (;;) {
1952 if (chip->state != FL_WRITING) {
1953 /* Someone's suspended the write. Sleep */
1954 DECLARE_WAITQUEUE(wait, current);
1955
1956 set_current_state(TASK_UNINTERRUPTIBLE);
1957 add_wait_queue(&chip->wq, &wait);
1958 mutex_unlock(&chip->mutex);
1959 schedule();
1960 remove_wait_queue(&chip->wq, &wait);
1961 timeo = jiffies + (HZ / 2); /* FIXME */
1962 mutex_lock(&chip->mutex);
1963 continue;
1964 }
1965
1966 /*
1967 * We check "time_after" and "!chip_good" before checking
1968 * "chip_good" to avoid the failure due to scheduling.
1969 */
1970 if (time_after(jiffies, timeo) &&
1971 !chip_good(map, chip, adr, &datum)) {
1972 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1973 __func__, adr);
1974 ret = -EIO;
1975 break;
1976 }
1977
1978 if (chip_good(map, chip, adr, &datum)) {
1979 if (cfi_check_err_status(map, chip, adr))
1980 ret = -EIO;
1981 break;
1982 }
1983
1984 /* Latency issues. Drop the lock, wait a while and retry */
1985 UDELAY(map, chip, adr, 1);
1986 }
1987
1988 return ret;
1989 }
1990
do_write_buffer_reset(struct map_info * map,struct flchip * chip,struct cfi_private * cfi)1991 static void __xipram do_write_buffer_reset(struct map_info *map,
1992 struct flchip *chip,
1993 struct cfi_private *cfi)
1994 {
1995 /*
1996 * Recovery from write-buffer programming failures requires
1997 * the write-to-buffer-reset sequence. Since the last part
1998 * of the sequence also works as a normal reset, we can run
1999 * the same commands regardless of why we are here.
2000 * See e.g.
2001 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
2002 */
2003 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2004 cfi->device_type, NULL);
2005 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2006 cfi->device_type, NULL);
2007 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2008 cfi->device_type, NULL);
2009
2010 /* FIXME - should have reset delay before continuing */
2011 }
2012
2013 /*
2014 * FIXME: interleaved mode not tested, and probably not supported!
2015 */
do_write_buffer(struct map_info * map,struct flchip * chip,unsigned long adr,const u_char * buf,int len)2016 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2017 unsigned long adr, const u_char *buf,
2018 int len)
2019 {
2020 struct cfi_private *cfi = map->fldrv_priv;
2021 int ret;
2022 unsigned long cmd_adr;
2023 int z, words;
2024 map_word datum;
2025
2026 adr += chip->start;
2027 cmd_adr = adr;
2028
2029 mutex_lock(&chip->mutex);
2030 ret = get_chip(map, chip, adr, FL_WRITING);
2031 if (ret) {
2032 mutex_unlock(&chip->mutex);
2033 return ret;
2034 }
2035
2036 datum = map_word_load(map, buf);
2037
2038 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2039 __func__, adr, datum.x[0]);
2040
2041 XIP_INVAL_CACHED_RANGE(map, adr, len);
2042 ENABLE_VPP(map);
2043 xip_disable(map, chip, cmd_adr);
2044
2045 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2046 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2047
2048 /* Write Buffer Load */
2049 map_write(map, CMD(0x25), cmd_adr);
2050
2051 chip->state = FL_WRITING_TO_BUFFER;
2052
2053 /* Write length of data to come */
2054 words = len / map_bankwidth(map);
2055 map_write(map, CMD(words - 1), cmd_adr);
2056 /* Write data */
2057 z = 0;
2058 while(z < words * map_bankwidth(map)) {
2059 datum = map_word_load(map, buf);
2060 map_write(map, datum, adr + z);
2061
2062 z += map_bankwidth(map);
2063 buf += map_bankwidth(map);
2064 }
2065 z -= map_bankwidth(map);
2066
2067 adr += z;
2068
2069 /* Write Buffer Program Confirm: GO GO GO */
2070 map_write(map, CMD(0x29), cmd_adr);
2071 chip->state = FL_WRITING;
2072
2073 INVALIDATE_CACHE_UDELAY(map, chip,
2074 adr, map_bankwidth(map),
2075 chip->word_write_time);
2076
2077 ret = do_write_buffer_wait(map, chip, adr, datum);
2078 if (ret)
2079 do_write_buffer_reset(map, chip, cfi);
2080
2081 xip_enable(map, chip, adr);
2082
2083 chip->state = FL_READY;
2084 DISABLE_VPP(map);
2085 put_chip(map, chip, adr);
2086 mutex_unlock(&chip->mutex);
2087
2088 return ret;
2089 }
2090
2091
cfi_amdstd_write_buffers(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2092 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2093 size_t *retlen, const u_char *buf)
2094 {
2095 struct map_info *map = mtd->priv;
2096 struct cfi_private *cfi = map->fldrv_priv;
2097 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2098 int ret;
2099 int chipnum;
2100 unsigned long ofs;
2101
2102 chipnum = to >> cfi->chipshift;
2103 ofs = to - (chipnum << cfi->chipshift);
2104
2105 /* If it's not bus-aligned, do the first word write */
2106 if (ofs & (map_bankwidth(map)-1)) {
2107 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2108 if (local_len > len)
2109 local_len = len;
2110 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2111 local_len, retlen, buf);
2112 if (ret)
2113 return ret;
2114 ofs += local_len;
2115 buf += local_len;
2116 len -= local_len;
2117
2118 if (ofs >> cfi->chipshift) {
2119 chipnum ++;
2120 ofs = 0;
2121 if (chipnum == cfi->numchips)
2122 return 0;
2123 }
2124 }
2125
2126 /* Write buffer is worth it only if more than one word to write... */
2127 while (len >= map_bankwidth(map) * 2) {
2128 /* We must not cross write block boundaries */
2129 int size = wbufsize - (ofs & (wbufsize-1));
2130
2131 if (size > len)
2132 size = len;
2133 if (size % map_bankwidth(map))
2134 size -= size % map_bankwidth(map);
2135
2136 ret = do_write_buffer(map, &cfi->chips[chipnum],
2137 ofs, buf, size);
2138 if (ret)
2139 return ret;
2140
2141 ofs += size;
2142 buf += size;
2143 (*retlen) += size;
2144 len -= size;
2145
2146 if (ofs >> cfi->chipshift) {
2147 chipnum ++;
2148 ofs = 0;
2149 if (chipnum == cfi->numchips)
2150 return 0;
2151 }
2152 }
2153
2154 if (len) {
2155 size_t retlen_dregs = 0;
2156
2157 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2158 len, &retlen_dregs, buf);
2159
2160 *retlen += retlen_dregs;
2161 return ret;
2162 }
2163
2164 return 0;
2165 }
2166 #endif /* !FORCE_WORD_WRITE */
2167
2168 /*
2169 * Wait for the flash chip to become ready to write data
2170 *
2171 * This is only called during the panic_write() path. When panic_write()
2172 * is called, the kernel is in the process of a panic, and will soon be
2173 * dead. Therefore we don't take any locks, and attempt to get access
2174 * to the chip as soon as possible.
2175 */
cfi_amdstd_panic_wait(struct map_info * map,struct flchip * chip,unsigned long adr)2176 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2177 unsigned long adr)
2178 {
2179 struct cfi_private *cfi = map->fldrv_priv;
2180 int retries = 10;
2181 int i;
2182
2183 /*
2184 * If the driver thinks the chip is idle, and no toggle bits
2185 * are changing, then the chip is actually idle for sure.
2186 */
2187 if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
2188 return 0;
2189
2190 /*
2191 * Try several times to reset the chip and then wait for it
2192 * to become idle. The upper limit of a few milliseconds of
2193 * delay isn't a big problem: the kernel is dying anyway. It
2194 * is more important to save the messages.
2195 */
2196 while (retries > 0) {
2197 const unsigned long timeo = (HZ / 1000) + 1;
2198
2199 /* send the reset command */
2200 map_write(map, CMD(0xF0), chip->start);
2201
2202 /* wait for the chip to become ready */
2203 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2204 if (chip_ready(map, chip, adr, NULL))
2205 return 0;
2206
2207 udelay(1);
2208 }
2209
2210 retries--;
2211 }
2212
2213 /* the chip never became ready */
2214 return -EBUSY;
2215 }
2216
2217 /*
2218 * Write out one word of data to a single flash chip during a kernel panic
2219 *
2220 * This is only called during the panic_write() path. When panic_write()
2221 * is called, the kernel is in the process of a panic, and will soon be
2222 * dead. Therefore we don't take any locks, and attempt to get access
2223 * to the chip as soon as possible.
2224 *
2225 * The implementation of this routine is intentionally similar to
2226 * do_write_oneword(), in order to ease code maintenance.
2227 */
do_panic_write_oneword(struct map_info * map,struct flchip * chip,unsigned long adr,map_word datum)2228 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2229 unsigned long adr, map_word datum)
2230 {
2231 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2232 struct cfi_private *cfi = map->fldrv_priv;
2233 int retry_cnt = 0;
2234 map_word oldd;
2235 int ret;
2236 int i;
2237
2238 adr += chip->start;
2239
2240 ret = cfi_amdstd_panic_wait(map, chip, adr);
2241 if (ret)
2242 return ret;
2243
2244 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2245 __func__, adr, datum.x[0]);
2246
2247 /*
2248 * Check for a NOP for the case when the datum to write is already
2249 * present - it saves time and works around buggy chips that corrupt
2250 * data at other locations when 0xff is written to a location that
2251 * already contains 0xff.
2252 */
2253 oldd = map_read(map, adr);
2254 if (map_word_equal(map, oldd, datum)) {
2255 pr_debug("MTD %s(): NOP\n", __func__);
2256 goto op_done;
2257 }
2258
2259 ENABLE_VPP(map);
2260
2261 retry:
2262 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2263 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2264 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2265 map_write(map, datum, adr);
2266
2267 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2268 if (chip_ready(map, chip, adr, NULL))
2269 break;
2270
2271 udelay(1);
2272 }
2273
2274 if (!chip_ready(map, chip, adr, &datum) ||
2275 cfi_check_err_status(map, chip, adr)) {
2276 /* reset on all failures. */
2277 map_write(map, CMD(0xF0), chip->start);
2278 /* FIXME - should have reset delay before continuing */
2279
2280 if (++retry_cnt <= MAX_RETRIES)
2281 goto retry;
2282
2283 ret = -EIO;
2284 }
2285
2286 op_done:
2287 DISABLE_VPP(map);
2288 return ret;
2289 }
2290
2291 /*
2292 * Write out some data during a kernel panic
2293 *
2294 * This is used by the mtdoops driver to save the dying messages from a
2295 * kernel which has panic'd.
2296 *
2297 * This routine ignores all of the locking used throughout the rest of the
2298 * driver, in order to ensure that the data gets written out no matter what
2299 * state this driver (and the flash chip itself) was in when the kernel crashed.
2300 *
2301 * The implementation of this routine is intentionally similar to
2302 * cfi_amdstd_write_words(), in order to ease code maintenance.
2303 */
cfi_amdstd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2304 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2305 size_t *retlen, const u_char *buf)
2306 {
2307 struct map_info *map = mtd->priv;
2308 struct cfi_private *cfi = map->fldrv_priv;
2309 unsigned long ofs, chipstart;
2310 int ret;
2311 int chipnum;
2312
2313 chipnum = to >> cfi->chipshift;
2314 ofs = to - (chipnum << cfi->chipshift);
2315 chipstart = cfi->chips[chipnum].start;
2316
2317 /* If it's not bus aligned, do the first byte write */
2318 if (ofs & (map_bankwidth(map) - 1)) {
2319 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2320 int i = ofs - bus_ofs;
2321 int n = 0;
2322 map_word tmp_buf;
2323
2324 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2325 if (ret)
2326 return ret;
2327
2328 /* Load 'tmp_buf' with old contents of flash */
2329 tmp_buf = map_read(map, bus_ofs + chipstart);
2330
2331 /* Number of bytes to copy from buffer */
2332 n = min_t(int, len, map_bankwidth(map) - i);
2333
2334 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2335
2336 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2337 bus_ofs, tmp_buf);
2338 if (ret)
2339 return ret;
2340
2341 ofs += n;
2342 buf += n;
2343 (*retlen) += n;
2344 len -= n;
2345
2346 if (ofs >> cfi->chipshift) {
2347 chipnum++;
2348 ofs = 0;
2349 if (chipnum == cfi->numchips)
2350 return 0;
2351 }
2352 }
2353
2354 /* We are now aligned, write as much as possible */
2355 while (len >= map_bankwidth(map)) {
2356 map_word datum;
2357
2358 datum = map_word_load(map, buf);
2359
2360 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2361 ofs, datum);
2362 if (ret)
2363 return ret;
2364
2365 ofs += map_bankwidth(map);
2366 buf += map_bankwidth(map);
2367 (*retlen) += map_bankwidth(map);
2368 len -= map_bankwidth(map);
2369
2370 if (ofs >> cfi->chipshift) {
2371 chipnum++;
2372 ofs = 0;
2373 if (chipnum == cfi->numchips)
2374 return 0;
2375
2376 chipstart = cfi->chips[chipnum].start;
2377 }
2378 }
2379
2380 /* Write the trailing bytes if any */
2381 if (len & (map_bankwidth(map) - 1)) {
2382 map_word tmp_buf;
2383
2384 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2385 if (ret)
2386 return ret;
2387
2388 tmp_buf = map_read(map, ofs + chipstart);
2389
2390 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2391
2392 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2393 ofs, tmp_buf);
2394 if (ret)
2395 return ret;
2396
2397 (*retlen) += len;
2398 }
2399
2400 return 0;
2401 }
2402
2403
2404 /*
2405 * Handle devices with one erase region, that only implement
2406 * the chip erase command.
2407 */
do_erase_chip(struct map_info * map,struct flchip * chip)2408 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2409 {
2410 struct cfi_private *cfi = map->fldrv_priv;
2411 unsigned long timeo;
2412 unsigned long int adr;
2413 DECLARE_WAITQUEUE(wait, current);
2414 int ret;
2415 int retry_cnt = 0;
2416 map_word datum = map_word_ff(map);
2417
2418 adr = cfi->addr_unlock1;
2419
2420 mutex_lock(&chip->mutex);
2421 ret = get_chip(map, chip, adr, FL_ERASING);
2422 if (ret) {
2423 mutex_unlock(&chip->mutex);
2424 return ret;
2425 }
2426
2427 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2428 __func__, chip->start);
2429
2430 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2431 ENABLE_VPP(map);
2432 xip_disable(map, chip, adr);
2433
2434 retry:
2435 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2436 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2437 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2438 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2439 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2440 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2441
2442 chip->state = FL_ERASING;
2443 chip->erase_suspended = 0;
2444 chip->in_progress_block_addr = adr;
2445 chip->in_progress_block_mask = ~(map->size - 1);
2446
2447 INVALIDATE_CACHE_UDELAY(map, chip,
2448 adr, map->size,
2449 chip->erase_time*500);
2450
2451 timeo = jiffies + (HZ*20);
2452
2453 for (;;) {
2454 if (chip->state != FL_ERASING) {
2455 /* Someone's suspended the erase. Sleep */
2456 set_current_state(TASK_UNINTERRUPTIBLE);
2457 add_wait_queue(&chip->wq, &wait);
2458 mutex_unlock(&chip->mutex);
2459 schedule();
2460 remove_wait_queue(&chip->wq, &wait);
2461 mutex_lock(&chip->mutex);
2462 continue;
2463 }
2464 if (chip->erase_suspended) {
2465 /* This erase was suspended and resumed.
2466 Adjust the timeout */
2467 timeo = jiffies + (HZ*20); /* FIXME */
2468 chip->erase_suspended = 0;
2469 }
2470
2471 if (chip_ready(map, chip, adr, &datum)) {
2472 if (cfi_check_err_status(map, chip, adr))
2473 ret = -EIO;
2474 break;
2475 }
2476
2477 if (time_after(jiffies, timeo)) {
2478 printk(KERN_WARNING "MTD %s(): software timeout\n",
2479 __func__);
2480 ret = -EIO;
2481 break;
2482 }
2483
2484 /* Latency issues. Drop the lock, wait a while and retry */
2485 UDELAY(map, chip, adr, 1000000/HZ);
2486 }
2487 /* Did we succeed? */
2488 if (ret) {
2489 /* reset on all failures. */
2490 map_write(map, CMD(0xF0), chip->start);
2491 /* FIXME - should have reset delay before continuing */
2492
2493 if (++retry_cnt <= MAX_RETRIES) {
2494 ret = 0;
2495 goto retry;
2496 }
2497 }
2498
2499 chip->state = FL_READY;
2500 xip_enable(map, chip, adr);
2501 DISABLE_VPP(map);
2502 put_chip(map, chip, adr);
2503 mutex_unlock(&chip->mutex);
2504
2505 return ret;
2506 }
2507
2508
do_erase_oneblock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2509 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2510 {
2511 struct cfi_private *cfi = map->fldrv_priv;
2512 unsigned long timeo;
2513 DECLARE_WAITQUEUE(wait, current);
2514 int ret;
2515 int retry_cnt = 0;
2516 map_word datum = map_word_ff(map);
2517
2518 adr += chip->start;
2519
2520 mutex_lock(&chip->mutex);
2521 ret = get_chip(map, chip, adr, FL_ERASING);
2522 if (ret) {
2523 mutex_unlock(&chip->mutex);
2524 return ret;
2525 }
2526
2527 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2528 __func__, adr);
2529
2530 XIP_INVAL_CACHED_RANGE(map, adr, len);
2531 ENABLE_VPP(map);
2532 xip_disable(map, chip, adr);
2533
2534 retry:
2535 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2536 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2537 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2538 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2539 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2540 map_write(map, cfi->sector_erase_cmd, adr);
2541
2542 chip->state = FL_ERASING;
2543 chip->erase_suspended = 0;
2544 chip->in_progress_block_addr = adr;
2545 chip->in_progress_block_mask = ~(len - 1);
2546
2547 INVALIDATE_CACHE_UDELAY(map, chip,
2548 adr, len,
2549 chip->erase_time*500);
2550
2551 timeo = jiffies + (HZ*20);
2552
2553 for (;;) {
2554 if (chip->state != FL_ERASING) {
2555 /* Someone's suspended the erase. Sleep */
2556 set_current_state(TASK_UNINTERRUPTIBLE);
2557 add_wait_queue(&chip->wq, &wait);
2558 mutex_unlock(&chip->mutex);
2559 schedule();
2560 remove_wait_queue(&chip->wq, &wait);
2561 mutex_lock(&chip->mutex);
2562 continue;
2563 }
2564 if (chip->erase_suspended) {
2565 /* This erase was suspended and resumed.
2566 Adjust the timeout */
2567 timeo = jiffies + (HZ*20); /* FIXME */
2568 chip->erase_suspended = 0;
2569 }
2570
2571 if (chip_ready(map, chip, adr, &datum)) {
2572 if (cfi_check_err_status(map, chip, adr))
2573 ret = -EIO;
2574 break;
2575 }
2576
2577 if (time_after(jiffies, timeo)) {
2578 printk(KERN_WARNING "MTD %s(): software timeout\n",
2579 __func__);
2580 ret = -EIO;
2581 break;
2582 }
2583
2584 /* Latency issues. Drop the lock, wait a while and retry */
2585 UDELAY(map, chip, adr, 1000000/HZ);
2586 }
2587 /* Did we succeed? */
2588 if (ret) {
2589 /* reset on all failures. */
2590 map_write(map, CMD(0xF0), chip->start);
2591 /* FIXME - should have reset delay before continuing */
2592
2593 if (++retry_cnt <= MAX_RETRIES) {
2594 ret = 0;
2595 goto retry;
2596 }
2597 }
2598
2599 chip->state = FL_READY;
2600 xip_enable(map, chip, adr);
2601 DISABLE_VPP(map);
2602 put_chip(map, chip, adr);
2603 mutex_unlock(&chip->mutex);
2604 return ret;
2605 }
2606
2607
cfi_amdstd_erase_varsize(struct mtd_info * mtd,struct erase_info * instr)2608 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2609 {
2610 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2611 instr->len, NULL);
2612 }
2613
2614
cfi_amdstd_erase_chip(struct mtd_info * mtd,struct erase_info * instr)2615 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2616 {
2617 struct map_info *map = mtd->priv;
2618 struct cfi_private *cfi = map->fldrv_priv;
2619
2620 if (instr->addr != 0)
2621 return -EINVAL;
2622
2623 if (instr->len != mtd->size)
2624 return -EINVAL;
2625
2626 return do_erase_chip(map, &cfi->chips[0]);
2627 }
2628
do_atmel_lock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2629 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2630 unsigned long adr, int len, void *thunk)
2631 {
2632 struct cfi_private *cfi = map->fldrv_priv;
2633 int ret;
2634
2635 mutex_lock(&chip->mutex);
2636 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2637 if (ret)
2638 goto out_unlock;
2639 chip->state = FL_LOCKING;
2640
2641 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2642
2643 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2644 cfi->device_type, NULL);
2645 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2646 cfi->device_type, NULL);
2647 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2648 cfi->device_type, NULL);
2649 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2650 cfi->device_type, NULL);
2651 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2652 cfi->device_type, NULL);
2653 map_write(map, CMD(0x40), chip->start + adr);
2654
2655 chip->state = FL_READY;
2656 put_chip(map, chip, adr + chip->start);
2657 ret = 0;
2658
2659 out_unlock:
2660 mutex_unlock(&chip->mutex);
2661 return ret;
2662 }
2663
do_atmel_unlock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2664 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2665 unsigned long adr, int len, void *thunk)
2666 {
2667 struct cfi_private *cfi = map->fldrv_priv;
2668 int ret;
2669
2670 mutex_lock(&chip->mutex);
2671 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2672 if (ret)
2673 goto out_unlock;
2674 chip->state = FL_UNLOCKING;
2675
2676 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2677
2678 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2679 cfi->device_type, NULL);
2680 map_write(map, CMD(0x70), adr);
2681
2682 chip->state = FL_READY;
2683 put_chip(map, chip, adr + chip->start);
2684 ret = 0;
2685
2686 out_unlock:
2687 mutex_unlock(&chip->mutex);
2688 return ret;
2689 }
2690
cfi_atmel_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2691 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2692 {
2693 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2694 }
2695
cfi_atmel_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2696 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2697 {
2698 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2699 }
2700
2701 /*
2702 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2703 */
2704
2705 struct ppb_lock {
2706 struct flchip *chip;
2707 unsigned long adr;
2708 int locked;
2709 };
2710
2711 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2712 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2713 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2714
do_ppb_xxlock(struct map_info * map,struct flchip * chip,unsigned long adr,int len,void * thunk)2715 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2716 struct flchip *chip,
2717 unsigned long adr, int len, void *thunk)
2718 {
2719 struct cfi_private *cfi = map->fldrv_priv;
2720 unsigned long timeo;
2721 int ret;
2722
2723 adr += chip->start;
2724 mutex_lock(&chip->mutex);
2725 ret = get_chip(map, chip, adr, FL_LOCKING);
2726 if (ret) {
2727 mutex_unlock(&chip->mutex);
2728 return ret;
2729 }
2730
2731 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2732
2733 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2734 cfi->device_type, NULL);
2735 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2736 cfi->device_type, NULL);
2737 /* PPB entry command */
2738 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2739 cfi->device_type, NULL);
2740
2741 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2742 chip->state = FL_LOCKING;
2743 map_write(map, CMD(0xA0), adr);
2744 map_write(map, CMD(0x00), adr);
2745 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2746 /*
2747 * Unlocking of one specific sector is not supported, so we
2748 * have to unlock all sectors of this device instead
2749 */
2750 chip->state = FL_UNLOCKING;
2751 map_write(map, CMD(0x80), chip->start);
2752 map_write(map, CMD(0x30), chip->start);
2753 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2754 chip->state = FL_JEDEC_QUERY;
2755 /* Return locked status: 0->locked, 1->unlocked */
2756 ret = !cfi_read_query(map, adr);
2757 } else
2758 BUG();
2759
2760 /*
2761 * Wait for some time as unlocking of all sectors takes quite long
2762 */
2763 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2764 for (;;) {
2765 if (chip_ready(map, chip, adr, NULL))
2766 break;
2767
2768 if (time_after(jiffies, timeo)) {
2769 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2770 ret = -EIO;
2771 break;
2772 }
2773
2774 UDELAY(map, chip, adr, 1);
2775 }
2776
2777 /* Exit BC commands */
2778 map_write(map, CMD(0x90), chip->start);
2779 map_write(map, CMD(0x00), chip->start);
2780
2781 chip->state = FL_READY;
2782 put_chip(map, chip, adr);
2783 mutex_unlock(&chip->mutex);
2784
2785 return ret;
2786 }
2787
cfi_ppb_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2788 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2789 uint64_t len)
2790 {
2791 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2792 DO_XXLOCK_ONEBLOCK_LOCK);
2793 }
2794
cfi_ppb_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2795 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2796 uint64_t len)
2797 {
2798 struct mtd_erase_region_info *regions = mtd->eraseregions;
2799 struct map_info *map = mtd->priv;
2800 struct cfi_private *cfi = map->fldrv_priv;
2801 struct ppb_lock *sect;
2802 unsigned long adr;
2803 loff_t offset;
2804 uint64_t length;
2805 int chipnum;
2806 int i;
2807 int sectors;
2808 int ret;
2809 int max_sectors;
2810
2811 /*
2812 * PPB unlocking always unlocks all sectors of the flash chip.
2813 * We need to re-lock all previously locked sectors. So lets
2814 * first check the locking status of all sectors and save
2815 * it for future use.
2816 */
2817 max_sectors = 0;
2818 for (i = 0; i < mtd->numeraseregions; i++)
2819 max_sectors += regions[i].numblocks;
2820
2821 sect = kzalloc_objs(struct ppb_lock, max_sectors);
2822 if (!sect)
2823 return -ENOMEM;
2824
2825 /*
2826 * This code to walk all sectors is a slightly modified version
2827 * of the cfi_varsize_frob() code.
2828 */
2829 i = 0;
2830 chipnum = 0;
2831 adr = 0;
2832 sectors = 0;
2833 offset = 0;
2834 length = mtd->size;
2835
2836 while (length) {
2837 int size = regions[i].erasesize;
2838
2839 /*
2840 * Only test sectors that shall not be unlocked. The other
2841 * sectors shall be unlocked, so lets keep their locking
2842 * status at "unlocked" (locked=0) for the final re-locking.
2843 */
2844 if ((offset < ofs) || (offset >= (ofs + len))) {
2845 sect[sectors].chip = &cfi->chips[chipnum];
2846 sect[sectors].adr = adr;
2847 sect[sectors].locked = do_ppb_xxlock(
2848 map, &cfi->chips[chipnum], adr, 0,
2849 DO_XXLOCK_ONEBLOCK_GETLOCK);
2850 }
2851
2852 adr += size;
2853 offset += size;
2854 length -= size;
2855
2856 if (offset == regions[i].offset + size * regions[i].numblocks)
2857 i++;
2858
2859 if (adr >> cfi->chipshift) {
2860 if (offset >= (ofs + len))
2861 break;
2862 adr = 0;
2863 chipnum++;
2864
2865 if (chipnum >= cfi->numchips)
2866 break;
2867 }
2868
2869 sectors++;
2870 if (sectors >= max_sectors) {
2871 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2872 max_sectors);
2873 kfree(sect);
2874 return -EINVAL;
2875 }
2876 }
2877
2878 /* Now unlock the whole chip */
2879 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2880 DO_XXLOCK_ONEBLOCK_UNLOCK);
2881 if (ret) {
2882 kfree(sect);
2883 return ret;
2884 }
2885
2886 /*
2887 * PPB unlocking always unlocks all sectors of the flash chip.
2888 * We need to re-lock all previously locked sectors.
2889 */
2890 for (i = 0; i < sectors; i++) {
2891 if (sect[i].locked)
2892 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2893 DO_XXLOCK_ONEBLOCK_LOCK);
2894 }
2895
2896 kfree(sect);
2897 return ret;
2898 }
2899
cfi_ppb_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2900 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2901 uint64_t len)
2902 {
2903 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2904 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2905 }
2906
cfi_amdstd_sync(struct mtd_info * mtd)2907 static void cfi_amdstd_sync (struct mtd_info *mtd)
2908 {
2909 struct map_info *map = mtd->priv;
2910 struct cfi_private *cfi = map->fldrv_priv;
2911 int i;
2912 struct flchip *chip;
2913 int ret = 0;
2914 DECLARE_WAITQUEUE(wait, current);
2915
2916 for (i=0; !ret && i<cfi->numchips; i++) {
2917 chip = &cfi->chips[i];
2918
2919 retry:
2920 mutex_lock(&chip->mutex);
2921
2922 switch(chip->state) {
2923 case FL_READY:
2924 case FL_STATUS:
2925 case FL_CFI_QUERY:
2926 case FL_JEDEC_QUERY:
2927 chip->oldstate = chip->state;
2928 chip->state = FL_SYNCING;
2929 /* No need to wake_up() on this state change -
2930 * as the whole point is that nobody can do anything
2931 * with the chip now anyway.
2932 */
2933 fallthrough;
2934 case FL_SYNCING:
2935 mutex_unlock(&chip->mutex);
2936 break;
2937
2938 default:
2939 /* Not an idle state */
2940 set_current_state(TASK_UNINTERRUPTIBLE);
2941 add_wait_queue(&chip->wq, &wait);
2942
2943 mutex_unlock(&chip->mutex);
2944
2945 schedule();
2946
2947 remove_wait_queue(&chip->wq, &wait);
2948
2949 goto retry;
2950 }
2951 }
2952
2953 /* Unlock the chips again */
2954
2955 for (i--; i >=0; i--) {
2956 chip = &cfi->chips[i];
2957
2958 mutex_lock(&chip->mutex);
2959
2960 if (chip->state == FL_SYNCING) {
2961 chip->state = chip->oldstate;
2962 wake_up(&chip->wq);
2963 }
2964 mutex_unlock(&chip->mutex);
2965 }
2966 }
2967
2968
cfi_amdstd_suspend(struct mtd_info * mtd)2969 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2970 {
2971 struct map_info *map = mtd->priv;
2972 struct cfi_private *cfi = map->fldrv_priv;
2973 int i;
2974 struct flchip *chip;
2975 int ret = 0;
2976
2977 for (i=0; !ret && i<cfi->numchips; i++) {
2978 chip = &cfi->chips[i];
2979
2980 mutex_lock(&chip->mutex);
2981
2982 switch(chip->state) {
2983 case FL_READY:
2984 case FL_STATUS:
2985 case FL_CFI_QUERY:
2986 case FL_JEDEC_QUERY:
2987 chip->oldstate = chip->state;
2988 chip->state = FL_PM_SUSPENDED;
2989 /* No need to wake_up() on this state change -
2990 * as the whole point is that nobody can do anything
2991 * with the chip now anyway.
2992 */
2993 break;
2994 case FL_PM_SUSPENDED:
2995 break;
2996
2997 default:
2998 ret = -EAGAIN;
2999 break;
3000 }
3001 mutex_unlock(&chip->mutex);
3002 }
3003
3004 /* Unlock the chips again */
3005
3006 if (ret) {
3007 for (i--; i >=0; i--) {
3008 chip = &cfi->chips[i];
3009
3010 mutex_lock(&chip->mutex);
3011
3012 if (chip->state == FL_PM_SUSPENDED) {
3013 chip->state = chip->oldstate;
3014 wake_up(&chip->wq);
3015 }
3016 mutex_unlock(&chip->mutex);
3017 }
3018 }
3019
3020 return ret;
3021 }
3022
3023
cfi_amdstd_resume(struct mtd_info * mtd)3024 static void cfi_amdstd_resume(struct mtd_info *mtd)
3025 {
3026 struct map_info *map = mtd->priv;
3027 struct cfi_private *cfi = map->fldrv_priv;
3028 int i;
3029 struct flchip *chip;
3030
3031 for (i=0; i<cfi->numchips; i++) {
3032
3033 chip = &cfi->chips[i];
3034
3035 mutex_lock(&chip->mutex);
3036
3037 if (chip->state == FL_PM_SUSPENDED) {
3038 chip->state = FL_READY;
3039 map_write(map, CMD(0xF0), chip->start);
3040 wake_up(&chip->wq);
3041 }
3042 else
3043 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3044
3045 mutex_unlock(&chip->mutex);
3046 }
3047 }
3048
3049
3050 /*
3051 * Ensure that the flash device is put back into read array mode before
3052 * unloading the driver or rebooting. On some systems, rebooting while
3053 * the flash is in query/program/erase mode will prevent the CPU from
3054 * fetching the bootloader code, requiring a hard reset or power cycle.
3055 */
cfi_amdstd_reset(struct mtd_info * mtd)3056 static int cfi_amdstd_reset(struct mtd_info *mtd)
3057 {
3058 struct map_info *map = mtd->priv;
3059 struct cfi_private *cfi = map->fldrv_priv;
3060 int i, ret;
3061 struct flchip *chip;
3062
3063 for (i = 0; i < cfi->numchips; i++) {
3064
3065 chip = &cfi->chips[i];
3066
3067 mutex_lock(&chip->mutex);
3068
3069 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3070 if (!ret) {
3071 map_write(map, CMD(0xF0), chip->start);
3072 chip->state = FL_SHUTDOWN;
3073 put_chip(map, chip, chip->start);
3074 }
3075
3076 mutex_unlock(&chip->mutex);
3077 }
3078
3079 return 0;
3080 }
3081
3082
cfi_amdstd_reboot(struct notifier_block * nb,unsigned long val,void * v)3083 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3084 void *v)
3085 {
3086 struct mtd_info *mtd;
3087
3088 mtd = container_of(nb, struct mtd_info, reboot_notifier);
3089 cfi_amdstd_reset(mtd);
3090 return NOTIFY_DONE;
3091 }
3092
3093
cfi_amdstd_destroy(struct mtd_info * mtd)3094 static void cfi_amdstd_destroy(struct mtd_info *mtd)
3095 {
3096 struct map_info *map = mtd->priv;
3097 struct cfi_private *cfi = map->fldrv_priv;
3098
3099 cfi_amdstd_reset(mtd);
3100 unregister_reboot_notifier(&mtd->reboot_notifier);
3101 kfree(cfi->cmdset_priv);
3102 kfree(cfi->cfiq);
3103 kfree(cfi);
3104 kfree(mtd->eraseregions);
3105 }
3106
3107 MODULE_LICENSE("GPL");
3108 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3109 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3110 MODULE_ALIAS("cfi_cmdset_0006");
3111 MODULE_ALIAS("cfi_cmdset_0701");
3112