1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common Flash Interface support: 4 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 5 * 6 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 7 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 8 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 9 * 10 * 2_by_8 routines added by Simon Munton 11 * 12 * 4_by_16 work by Carolyn J. Smith 13 * 14 * XIP support hooks by Vitaly Wool (based on code for Intel flash 15 * by Nicolas Pitre) 16 * 17 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 18 * 19 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 20 */ 21 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/kernel.h> 25 #include <linux/sched.h> 26 #include <asm/io.h> 27 #include <asm/byteorder.h> 28 29 #include <linux/errno.h> 30 #include <linux/slab.h> 31 #include <linux/delay.h> 32 #include <linux/interrupt.h> 33 #include <linux/reboot.h> 34 #include <linux/of.h> 35 #include <linux/mtd/map.h> 36 #include <linux/mtd/mtd.h> 37 #include <linux/mtd/cfi.h> 38 #include <linux/mtd/xip.h> 39 40 #define AMD_BOOTLOC_BUG 41 #define FORCE_WORD_WRITE 0 42 43 #define MAX_RETRIES 3 44 45 #define SST49LF004B 0x0060 46 #define SST49LF040B 0x0050 47 #define SST49LF008A 0x005a 48 #define AT49BV6416 0x00d6 49 #define S29GL064N_MN12 0x0c01 50 51 /* 52 * Status Register bit description. Used by flash devices that don't 53 * support DQ polling (e.g. HyperFlash) 54 */ 55 #define CFI_SR_DRB BIT(7) 56 #define CFI_SR_ESB BIT(5) 57 #define CFI_SR_PSB BIT(4) 58 #define CFI_SR_WBASB BIT(3) 59 #define CFI_SR_SLSB BIT(1) 60 61 enum cfi_quirks { 62 CFI_QUIRK_DQ_TRUE_DATA = BIT(0), 63 }; 64 65 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 66 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 67 #if !FORCE_WORD_WRITE 68 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 69 #endif 70 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 71 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 72 static void cfi_amdstd_sync (struct mtd_info *); 73 static int cfi_amdstd_suspend (struct mtd_info *); 74 static void cfi_amdstd_resume (struct mtd_info *); 75 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 76 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, 77 size_t *, struct otp_info *); 78 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, 79 size_t *, struct otp_info *); 80 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 81 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, 82 size_t *, u_char *); 83 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, 84 size_t *, u_char *); 85 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, 86 size_t *, const u_char *); 87 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); 88 89 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 90 size_t *retlen, const u_char *buf); 91 92 static void cfi_amdstd_destroy(struct mtd_info *); 93 94 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 95 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 96 97 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 98 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 99 #include "fwh_lock.h" 100 101 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 102 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 103 104 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 105 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 106 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 107 108 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 109 .probe = NULL, /* Not usable directly */ 110 .destroy = cfi_amdstd_destroy, 111 .name = "cfi_cmdset_0002", 112 .module = THIS_MODULE 113 }; 114 115 /* 116 * Use status register to poll for Erase/write completion when DQ is not 117 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in 118 * CFI Primary Vendor-Specific Extended Query table 1.5 119 */ 120 static int cfi_use_status_reg(struct cfi_private *cfi) 121 { 122 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 123 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; 124 125 return extp && extp->MinorVersion >= '5' && 126 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; 127 } 128 129 static int cfi_check_err_status(struct map_info *map, struct flchip *chip, 130 unsigned long adr) 131 { 132 struct cfi_private *cfi = map->fldrv_priv; 133 map_word status; 134 135 if (!cfi_use_status_reg(cfi)) 136 return 0; 137 138 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 139 cfi->device_type, NULL); 140 status = map_read(map, adr); 141 142 /* The error bits are invalid while the chip's busy */ 143 if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) 144 return 0; 145 146 if (map_word_bitsset(map, status, CMD(0x3a))) { 147 unsigned long chipstatus = MERGESTATUS(status); 148 149 if (chipstatus & CFI_SR_ESB) 150 pr_err("%s erase operation failed, status %lx\n", 151 map->name, chipstatus); 152 if (chipstatus & CFI_SR_PSB) 153 pr_err("%s program operation failed, status %lx\n", 154 map->name, chipstatus); 155 if (chipstatus & CFI_SR_WBASB) 156 pr_err("%s buffer program command aborted, status %lx\n", 157 map->name, chipstatus); 158 if (chipstatus & CFI_SR_SLSB) 159 pr_err("%s sector write protected, status %lx\n", 160 map->name, chipstatus); 161 162 /* Erase/Program status bits are set on the operation failure */ 163 if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB)) 164 return 1; 165 } 166 return 0; 167 } 168 169 /* #define DEBUG_CFI_FEATURES */ 170 171 172 #ifdef DEBUG_CFI_FEATURES 173 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 174 { 175 const char* erase_suspend[3] = { 176 "Not supported", "Read only", "Read/write" 177 }; 178 const char* top_bottom[6] = { 179 "No WP", "8x8KiB sectors at top & bottom, no WP", 180 "Bottom boot", "Top boot", 181 "Uniform, Bottom WP", "Uniform, Top WP" 182 }; 183 184 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 185 printk(" Address sensitive unlock: %s\n", 186 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 187 188 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 189 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 190 else 191 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 192 193 if (extp->BlkProt == 0) 194 printk(" Block protection: Not supported\n"); 195 else 196 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 197 198 199 printk(" Temporary block unprotect: %s\n", 200 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 201 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 202 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 203 printk(" Burst mode: %s\n", 204 extp->BurstMode ? "Supported" : "Not supported"); 205 if (extp->PageMode == 0) 206 printk(" Page mode: Not supported\n"); 207 else 208 printk(" Page mode: %d word page\n", extp->PageMode << 2); 209 210 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 211 extp->VppMin >> 4, extp->VppMin & 0xf); 212 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 213 extp->VppMax >> 4, extp->VppMax & 0xf); 214 215 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 216 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 217 else 218 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 219 } 220 #endif 221 222 #ifdef AMD_BOOTLOC_BUG 223 /* Wheee. Bring me the head of someone at AMD. */ 224 static void fixup_amd_bootblock(struct mtd_info *mtd) 225 { 226 struct map_info *map = mtd->priv; 227 struct cfi_private *cfi = map->fldrv_priv; 228 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 229 __u8 major = extp->MajorVersion; 230 __u8 minor = extp->MinorVersion; 231 232 if (((major << 8) | minor) < 0x3131) { 233 /* CFI version 1.0 => don't trust bootloc */ 234 235 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 236 map->name, cfi->mfr, cfi->id); 237 238 /* AFAICS all 29LV400 with a bottom boot block have a device ID 239 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 240 * These were badly detected as they have the 0x80 bit set 241 * so treat them as a special case. 242 */ 243 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 244 245 /* Macronix added CFI to their 2nd generation 246 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 247 * Fujitsu, Spansion, EON, ESI and older Macronix) 248 * has CFI. 249 * 250 * Therefore also check the manufacturer. 251 * This reduces the risk of false detection due to 252 * the 8-bit device ID. 253 */ 254 (cfi->mfr == CFI_MFR_MACRONIX)) { 255 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 256 " detected\n", map->name); 257 extp->TopBottom = 2; /* bottom boot */ 258 } else 259 if (cfi->id & 0x80) { 260 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 261 extp->TopBottom = 3; /* top boot */ 262 } else { 263 extp->TopBottom = 2; /* bottom boot */ 264 } 265 266 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 267 " deduced %s from Device ID\n", map->name, major, minor, 268 extp->TopBottom == 2 ? "bottom" : "top"); 269 } 270 } 271 #endif 272 273 #if !FORCE_WORD_WRITE 274 static void fixup_use_write_buffers(struct mtd_info *mtd) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 279 if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201) 280 return; 281 282 if (cfi->cfiq->BufWriteTimeoutTyp) { 283 pr_debug("Using buffer write method\n"); 284 mtd->_write = cfi_amdstd_write_buffers; 285 } 286 } 287 #endif /* !FORCE_WORD_WRITE */ 288 289 /* Atmel chips don't use the same PRI format as AMD chips */ 290 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 291 { 292 struct map_info *map = mtd->priv; 293 struct cfi_private *cfi = map->fldrv_priv; 294 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 295 struct cfi_pri_atmel atmel_pri; 296 297 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 298 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 299 300 if (atmel_pri.Features & 0x02) 301 extp->EraseSuspend = 2; 302 303 /* Some chips got it backwards... */ 304 if (cfi->id == AT49BV6416) { 305 if (atmel_pri.BottomBoot) 306 extp->TopBottom = 3; 307 else 308 extp->TopBottom = 2; 309 } else { 310 if (atmel_pri.BottomBoot) 311 extp->TopBottom = 2; 312 else 313 extp->TopBottom = 3; 314 } 315 316 /* burst write mode not supported */ 317 cfi->cfiq->BufWriteTimeoutTyp = 0; 318 cfi->cfiq->BufWriteTimeoutMax = 0; 319 } 320 321 static void fixup_use_secsi(struct mtd_info *mtd) 322 { 323 /* Setup for chips with a secsi area */ 324 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 325 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 326 } 327 328 static void fixup_use_erase_chip(struct mtd_info *mtd) 329 { 330 struct map_info *map = mtd->priv; 331 struct cfi_private *cfi = map->fldrv_priv; 332 if ((cfi->cfiq->NumEraseRegions == 1) && 333 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 334 mtd->_erase = cfi_amdstd_erase_chip; 335 } 336 337 } 338 339 /* 340 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 341 * locked by default. 342 */ 343 static void fixup_use_atmel_lock(struct mtd_info *mtd) 344 { 345 mtd->_lock = cfi_atmel_lock; 346 mtd->_unlock = cfi_atmel_unlock; 347 mtd->flags |= MTD_POWERUP_LOCK; 348 } 349 350 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 351 { 352 struct map_info *map = mtd->priv; 353 struct cfi_private *cfi = map->fldrv_priv; 354 355 /* 356 * These flashes report two separate eraseblock regions based on the 357 * sector_erase-size and block_erase-size, although they both operate on the 358 * same memory. This is not allowed according to CFI, so we just pick the 359 * sector_erase-size. 360 */ 361 cfi->cfiq->NumEraseRegions = 1; 362 } 363 364 static void fixup_sst39vf(struct mtd_info *mtd) 365 { 366 struct map_info *map = mtd->priv; 367 struct cfi_private *cfi = map->fldrv_priv; 368 369 fixup_old_sst_eraseregion(mtd); 370 371 cfi->addr_unlock1 = 0x5555; 372 cfi->addr_unlock2 = 0x2AAA; 373 } 374 375 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 376 { 377 struct map_info *map = mtd->priv; 378 struct cfi_private *cfi = map->fldrv_priv; 379 380 fixup_old_sst_eraseregion(mtd); 381 382 cfi->addr_unlock1 = 0x555; 383 cfi->addr_unlock2 = 0x2AA; 384 385 cfi->sector_erase_cmd = CMD(0x50); 386 } 387 388 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 389 { 390 struct map_info *map = mtd->priv; 391 struct cfi_private *cfi = map->fldrv_priv; 392 393 fixup_sst39vf_rev_b(mtd); 394 395 /* 396 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 397 * it should report a size of 8KBytes (0x0020*256). 398 */ 399 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 400 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", 401 mtd->name); 402 } 403 404 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 405 { 406 struct map_info *map = mtd->priv; 407 struct cfi_private *cfi = map->fldrv_priv; 408 409 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 410 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 411 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", 412 mtd->name); 413 } 414 } 415 416 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 417 { 418 struct map_info *map = mtd->priv; 419 struct cfi_private *cfi = map->fldrv_priv; 420 421 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 422 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 423 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", 424 mtd->name); 425 } 426 } 427 428 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 429 { 430 struct map_info *map = mtd->priv; 431 struct cfi_private *cfi = map->fldrv_priv; 432 433 /* 434 * S29NS512P flash uses more than 8bits to report number of sectors, 435 * which is not permitted by CFI. 436 */ 437 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 438 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", 439 mtd->name); 440 } 441 442 static void fixup_quirks(struct mtd_info *mtd) 443 { 444 struct map_info *map = mtd->priv; 445 struct cfi_private *cfi = map->fldrv_priv; 446 447 if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12) 448 cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA; 449 } 450 451 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 452 static struct cfi_fixup cfi_nopri_fixup_table[] = { 453 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 454 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 455 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 456 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 457 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 458 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 459 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 460 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 461 { 0, 0, NULL } 462 }; 463 464 static struct cfi_fixup cfi_fixup_table[] = { 465 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 466 #ifdef AMD_BOOTLOC_BUG 467 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 468 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 469 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 470 #endif 471 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 472 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 473 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 474 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 475 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 476 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 477 { CFI_MFR_AMD, S29GL064N_MN12, fixup_s29gl064n_sectors }, 478 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 479 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 480 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 481 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 482 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 483 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 484 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 485 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 486 #if !FORCE_WORD_WRITE 487 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 488 #endif 489 { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks }, 490 { 0, 0, NULL } 491 }; 492 static struct cfi_fixup jedec_fixup_table[] = { 493 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 494 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 495 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 496 { 0, 0, NULL } 497 }; 498 499 static struct cfi_fixup fixup_table[] = { 500 /* The CFI vendor ids and the JEDEC vendor IDs appear 501 * to be common. It is like the devices id's are as 502 * well. This table is to pick all cases where 503 * we know that is the case. 504 */ 505 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 506 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 507 { 0, 0, NULL } 508 }; 509 510 511 static void cfi_fixup_major_minor(struct cfi_private *cfi, 512 struct cfi_pri_amdstd *extp) 513 { 514 if (cfi->mfr == CFI_MFR_SAMSUNG) { 515 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 516 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 517 /* 518 * Samsung K8P2815UQB and K8D6x16UxM chips 519 * report major=0 / minor=0. 520 * K8D3x16UxC chips report major=3 / minor=3. 521 */ 522 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 523 " Extended Query version to 1.%c\n", 524 extp->MinorVersion); 525 extp->MajorVersion = '1'; 526 } 527 } 528 529 /* 530 * SST 38VF640x chips report major=0xFF / minor=0xFF. 531 */ 532 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 533 extp->MajorVersion = '1'; 534 extp->MinorVersion = '0'; 535 } 536 } 537 538 static int is_m29ew(struct cfi_private *cfi) 539 { 540 if (cfi->mfr == CFI_MFR_INTEL && 541 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 542 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 543 return 1; 544 return 0; 545 } 546 547 /* 548 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 549 * Some revisions of the M29EW suffer from erase suspend hang ups. In 550 * particular, it can occur when the sequence 551 * Erase Confirm -> Suspend -> Program -> Resume 552 * causes a lockup due to internal timing issues. The consequence is that the 553 * erase cannot be resumed without inserting a dummy command after programming 554 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 555 * that writes an F0 command code before the RESUME command. 556 */ 557 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 558 unsigned long adr) 559 { 560 struct cfi_private *cfi = map->fldrv_priv; 561 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 562 if (is_m29ew(cfi)) 563 map_write(map, CMD(0xF0), adr); 564 } 565 566 /* 567 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 568 * 569 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 570 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 571 * command is issued after an ERASE RESUME operation without waiting for a 572 * minimum delay. The result is that once the ERASE seems to be completed 573 * (no bits are toggling), the contents of the Flash memory block on which 574 * the erase was ongoing could be inconsistent with the expected values 575 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 576 * values), causing a consequent failure of the ERASE operation. 577 * The occurrence of this issue could be high, especially when file system 578 * operations on the Flash are intensive. As a result, it is recommended 579 * that a patch be applied. Intensive file system operations can cause many 580 * calls to the garbage routine to free Flash space (also by erasing physical 581 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 582 * commands can occur. The problem disappears when a delay is inserted after 583 * the RESUME command by using the udelay() function available in Linux. 584 * The DELAY value must be tuned based on the customer's platform. 585 * The maximum value that fixes the problem in all cases is 500us. 586 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 587 * in most cases. 588 * We have chosen 500µs because this latency is acceptable. 589 */ 590 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 591 { 592 /* 593 * Resolving the Delay After Resume Issue see Micron TN-13-07 594 * Worst case delay must be 500µs but 30-50µs should be ok as well 595 */ 596 if (is_m29ew(cfi)) 597 cfi_udelay(500); 598 } 599 600 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 601 { 602 struct cfi_private *cfi = map->fldrv_priv; 603 struct device_node __maybe_unused *np = map->device_node; 604 struct mtd_info *mtd; 605 int i; 606 607 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 608 if (!mtd) 609 return NULL; 610 mtd->priv = map; 611 mtd->type = MTD_NORFLASH; 612 613 /* Fill in the default mtd operations */ 614 mtd->_erase = cfi_amdstd_erase_varsize; 615 mtd->_write = cfi_amdstd_write_words; 616 mtd->_read = cfi_amdstd_read; 617 mtd->_sync = cfi_amdstd_sync; 618 mtd->_suspend = cfi_amdstd_suspend; 619 mtd->_resume = cfi_amdstd_resume; 620 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; 621 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; 622 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; 623 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; 624 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; 625 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; 626 mtd->flags = MTD_CAP_NORFLASH; 627 mtd->name = map->name; 628 mtd->writesize = 1; 629 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 630 631 pr_debug("MTD %s(): write buffer size %d\n", __func__, 632 mtd->writebufsize); 633 634 mtd->_panic_write = cfi_amdstd_panic_write; 635 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 636 637 if (cfi->cfi_mode==CFI_MODE_CFI){ 638 unsigned char bootloc; 639 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 640 struct cfi_pri_amdstd *extp; 641 642 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 643 if (extp) { 644 /* 645 * It's a real CFI chip, not one for which the probe 646 * routine faked a CFI structure. 647 */ 648 cfi_fixup_major_minor(cfi, extp); 649 650 /* 651 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 652 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 653 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 654 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 655 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 656 */ 657 if (extp->MajorVersion != '1' || 658 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 659 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 660 "version %c.%c (%#02x/%#02x).\n", 661 extp->MajorVersion, extp->MinorVersion, 662 extp->MajorVersion, extp->MinorVersion); 663 kfree(extp); 664 kfree(mtd); 665 return NULL; 666 } 667 668 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 669 extp->MajorVersion, extp->MinorVersion); 670 671 /* Install our own private info structure */ 672 cfi->cmdset_priv = extp; 673 674 /* Apply cfi device specific fixups */ 675 cfi_fixup(mtd, cfi_fixup_table); 676 677 #ifdef DEBUG_CFI_FEATURES 678 /* Tell the user about it in lots of lovely detail */ 679 cfi_tell_features(extp); 680 #endif 681 682 #ifdef CONFIG_OF 683 if (np && of_property_read_bool( 684 np, "use-advanced-sector-protection") 685 && extp->BlkProtUnprot == 8) { 686 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 687 mtd->_lock = cfi_ppb_lock; 688 mtd->_unlock = cfi_ppb_unlock; 689 mtd->_is_locked = cfi_ppb_is_locked; 690 } 691 #endif 692 693 bootloc = extp->TopBottom; 694 if ((bootloc < 2) || (bootloc > 5)) { 695 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 696 "bank location (%d). Assuming bottom.\n", 697 map->name, bootloc); 698 bootloc = 2; 699 } 700 701 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 702 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 703 704 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 705 int j = (cfi->cfiq->NumEraseRegions-1)-i; 706 707 swap(cfi->cfiq->EraseRegionInfo[i], 708 cfi->cfiq->EraseRegionInfo[j]); 709 } 710 } 711 /* Set the default CFI lock/unlock addresses */ 712 cfi->addr_unlock1 = 0x555; 713 cfi->addr_unlock2 = 0x2aa; 714 } 715 cfi_fixup(mtd, cfi_nopri_fixup_table); 716 717 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 718 kfree(mtd); 719 return NULL; 720 } 721 722 } /* CFI mode */ 723 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 724 /* Apply jedec specific fixups */ 725 cfi_fixup(mtd, jedec_fixup_table); 726 } 727 /* Apply generic fixups */ 728 cfi_fixup(mtd, fixup_table); 729 730 for (i=0; i< cfi->numchips; i++) { 731 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 732 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 733 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 734 /* 735 * First calculate the timeout max according to timeout field 736 * of struct cfi_ident that probed from chip's CFI aera, if 737 * available. Specify a minimum of 2000us, in case the CFI data 738 * is wrong. 739 */ 740 if (cfi->cfiq->BufWriteTimeoutTyp && 741 cfi->cfiq->BufWriteTimeoutMax) 742 cfi->chips[i].buffer_write_time_max = 743 1 << (cfi->cfiq->BufWriteTimeoutTyp + 744 cfi->cfiq->BufWriteTimeoutMax); 745 else 746 cfi->chips[i].buffer_write_time_max = 0; 747 748 cfi->chips[i].buffer_write_time_max = 749 max(cfi->chips[i].buffer_write_time_max, 2000); 750 751 cfi->chips[i].ref_point_counter = 0; 752 init_waitqueue_head(&(cfi->chips[i].wq)); 753 } 754 755 map->fldrv = &cfi_amdstd_chipdrv; 756 757 return cfi_amdstd_setup(mtd); 758 } 759 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 760 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 761 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 762 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 763 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 764 765 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 766 { 767 struct map_info *map = mtd->priv; 768 struct cfi_private *cfi = map->fldrv_priv; 769 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 770 unsigned long offset = 0; 771 int i,j; 772 773 printk(KERN_NOTICE "number of %s chips: %d\n", 774 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 775 /* Select the correct geometry setup */ 776 mtd->size = devsize * cfi->numchips; 777 778 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 779 mtd->eraseregions = kmalloc_array(mtd->numeraseregions, 780 sizeof(struct mtd_erase_region_info), 781 GFP_KERNEL); 782 if (!mtd->eraseregions) 783 goto setup_err; 784 785 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 786 unsigned long ernum, ersize; 787 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 788 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 789 790 if (mtd->erasesize < ersize) { 791 mtd->erasesize = ersize; 792 } 793 for (j=0; j<cfi->numchips; j++) { 794 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 795 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 796 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 797 } 798 offset += (ersize * ernum); 799 } 800 if (offset != devsize) { 801 /* Argh */ 802 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 803 goto setup_err; 804 } 805 806 __module_get(THIS_MODULE); 807 register_reboot_notifier(&mtd->reboot_notifier); 808 return mtd; 809 810 setup_err: 811 kfree(mtd->eraseregions); 812 kfree(mtd); 813 kfree(cfi->cmdset_priv); 814 return NULL; 815 } 816 817 /* 818 * Return true if the chip is ready and has the correct value. 819 * 820 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 821 * non-suspended sector) and is indicated by no toggle bits toggling. 822 * 823 * Error are indicated by toggling bits or bits held with the wrong value, 824 * or with bits toggling. 825 * 826 * Note that anything more complicated than checking if no bits are toggling 827 * (including checking DQ5 for an error status) is tricky to get working 828 * correctly and is therefore not done (particularly with interleaved chips 829 * as each chip must be checked independently of the others). 830 */ 831 static int __xipram chip_ready(struct map_info *map, struct flchip *chip, 832 unsigned long addr, map_word *expected) 833 { 834 struct cfi_private *cfi = map->fldrv_priv; 835 map_word oldd, curd; 836 int ret; 837 838 if (cfi_use_status_reg(cfi)) { 839 map_word ready = CMD(CFI_SR_DRB); 840 /* 841 * For chips that support status register, check device 842 * ready bit 843 */ 844 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 845 cfi->device_type, NULL); 846 curd = map_read(map, addr); 847 848 return map_word_andequal(map, curd, ready, ready); 849 } 850 851 oldd = map_read(map, addr); 852 curd = map_read(map, addr); 853 854 ret = map_word_equal(map, oldd, curd); 855 856 if (!ret || !expected) 857 return ret; 858 859 return map_word_equal(map, curd, *expected); 860 } 861 862 static int __xipram chip_good(struct map_info *map, struct flchip *chip, 863 unsigned long addr, map_word *expected) 864 { 865 struct cfi_private *cfi = map->fldrv_priv; 866 map_word *datum = expected; 867 868 if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA) 869 datum = NULL; 870 871 return chip_ready(map, chip, addr, datum); 872 } 873 874 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 875 { 876 DECLARE_WAITQUEUE(wait, current); 877 struct cfi_private *cfi = map->fldrv_priv; 878 unsigned long timeo; 879 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 880 881 resettime: 882 timeo = jiffies + HZ; 883 retry: 884 switch (chip->state) { 885 886 case FL_STATUS: 887 for (;;) { 888 if (chip_ready(map, chip, adr, NULL)) 889 break; 890 891 if (time_after(jiffies, timeo)) { 892 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 893 return -EIO; 894 } 895 mutex_unlock(&chip->mutex); 896 cfi_udelay(1); 897 mutex_lock(&chip->mutex); 898 /* Someone else might have been playing with it. */ 899 goto retry; 900 } 901 return 0; 902 903 case FL_READY: 904 case FL_CFI_QUERY: 905 case FL_JEDEC_QUERY: 906 return 0; 907 908 case FL_ERASING: 909 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 910 !(mode == FL_READY || mode == FL_POINT || 911 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 912 goto sleep; 913 914 /* Do not allow suspend iff read/write to EB address */ 915 if ((adr & chip->in_progress_block_mask) == 916 chip->in_progress_block_addr) 917 goto sleep; 918 919 /* Erase suspend */ 920 /* It's harmless to issue the Erase-Suspend and Erase-Resume 921 * commands when the erase algorithm isn't in progress. */ 922 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 923 chip->oldstate = FL_ERASING; 924 chip->state = FL_ERASE_SUSPENDING; 925 chip->erase_suspended = 1; 926 for (;;) { 927 if (chip_ready(map, chip, adr, NULL)) 928 break; 929 930 if (time_after(jiffies, timeo)) { 931 /* Should have suspended the erase by now. 932 * Send an Erase-Resume command as either 933 * there was an error (so leave the erase 934 * routine to recover from it) or we trying to 935 * use the erase-in-progress sector. */ 936 put_chip(map, chip, adr); 937 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 938 return -EIO; 939 } 940 941 mutex_unlock(&chip->mutex); 942 cfi_udelay(1); 943 mutex_lock(&chip->mutex); 944 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 945 So we can just loop here. */ 946 } 947 chip->state = FL_READY; 948 return 0; 949 950 case FL_XIP_WHILE_ERASING: 951 if (mode != FL_READY && mode != FL_POINT && 952 (!cfip || !(cfip->EraseSuspend&2))) 953 goto sleep; 954 chip->oldstate = chip->state; 955 chip->state = FL_READY; 956 return 0; 957 958 case FL_SHUTDOWN: 959 /* The machine is rebooting */ 960 return -EIO; 961 962 case FL_POINT: 963 /* Only if there's no operation suspended... */ 964 if (mode == FL_READY && chip->oldstate == FL_READY) 965 return 0; 966 fallthrough; 967 default: 968 sleep: 969 set_current_state(TASK_UNINTERRUPTIBLE); 970 add_wait_queue(&chip->wq, &wait); 971 mutex_unlock(&chip->mutex); 972 schedule(); 973 remove_wait_queue(&chip->wq, &wait); 974 mutex_lock(&chip->mutex); 975 goto resettime; 976 } 977 } 978 979 980 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 981 { 982 struct cfi_private *cfi = map->fldrv_priv; 983 984 switch(chip->oldstate) { 985 case FL_ERASING: 986 cfi_fixup_m29ew_erase_suspend(map, 987 chip->in_progress_block_addr); 988 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 989 cfi_fixup_m29ew_delay_after_resume(cfi); 990 chip->oldstate = FL_READY; 991 chip->state = FL_ERASING; 992 break; 993 994 case FL_XIP_WHILE_ERASING: 995 chip->state = chip->oldstate; 996 chip->oldstate = FL_READY; 997 break; 998 999 case FL_READY: 1000 case FL_STATUS: 1001 break; 1002 default: 1003 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 1004 } 1005 wake_up(&chip->wq); 1006 } 1007 1008 #ifdef CONFIG_MTD_XIP 1009 1010 /* 1011 * No interrupt what so ever can be serviced while the flash isn't in array 1012 * mode. This is ensured by the xip_disable() and xip_enable() functions 1013 * enclosing any code path where the flash is known not to be in array mode. 1014 * And within a XIP disabled code path, only functions marked with __xipram 1015 * may be called and nothing else (it's a good thing to inspect generated 1016 * assembly to make sure inline functions were actually inlined and that gcc 1017 * didn't emit calls to its own support functions). Also configuring MTD CFI 1018 * support to a single buswidth and a single interleave is also recommended. 1019 */ 1020 1021 static void xip_disable(struct map_info *map, struct flchip *chip, 1022 unsigned long adr) 1023 { 1024 /* TODO: chips with no XIP use should ignore and return */ 1025 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 1026 local_irq_disable(); 1027 } 1028 1029 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 1030 unsigned long adr) 1031 { 1032 struct cfi_private *cfi = map->fldrv_priv; 1033 1034 if (chip->state != FL_POINT && chip->state != FL_READY) { 1035 map_write(map, CMD(0xf0), adr); 1036 chip->state = FL_READY; 1037 } 1038 (void) map_read(map, adr); 1039 xip_iprefetch(); 1040 local_irq_enable(); 1041 } 1042 1043 /* 1044 * When a delay is required for the flash operation to complete, the 1045 * xip_udelay() function is polling for both the given timeout and pending 1046 * (but still masked) hardware interrupts. Whenever there is an interrupt 1047 * pending then the flash erase operation is suspended, array mode restored 1048 * and interrupts unmasked. Task scheduling might also happen at that 1049 * point. The CPU eventually returns from the interrupt or the call to 1050 * schedule() and the suspended flash operation is resumed for the remaining 1051 * of the delay period. 1052 * 1053 * Warning: this function _will_ fool interrupt latency tracing tools. 1054 */ 1055 1056 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 1057 unsigned long adr, int usec) 1058 { 1059 struct cfi_private *cfi = map->fldrv_priv; 1060 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 1061 map_word status, OK = CMD(0x80); 1062 unsigned long suspended, start = xip_currtime(); 1063 flstate_t oldstate; 1064 1065 do { 1066 cpu_relax(); 1067 if (xip_irqpending() && extp && 1068 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 1069 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 1070 /* 1071 * Let's suspend the erase operation when supported. 1072 * Note that we currently don't try to suspend 1073 * interleaved chips if there is already another 1074 * operation suspended (imagine what happens 1075 * when one chip was already done with the current 1076 * operation while another chip suspended it, then 1077 * we resume the whole thing at once). Yes, it 1078 * can happen! 1079 */ 1080 map_write(map, CMD(0xb0), adr); 1081 usec -= xip_elapsed_since(start); 1082 suspended = xip_currtime(); 1083 do { 1084 if (xip_elapsed_since(suspended) > 100000) { 1085 /* 1086 * The chip doesn't want to suspend 1087 * after waiting for 100 msecs. 1088 * This is a critical error but there 1089 * is not much we can do here. 1090 */ 1091 return; 1092 } 1093 status = map_read(map, adr); 1094 } while (!map_word_andequal(map, status, OK, OK)); 1095 1096 /* Suspend succeeded */ 1097 oldstate = chip->state; 1098 if (!map_word_bitsset(map, status, CMD(0x40))) 1099 break; 1100 chip->state = FL_XIP_WHILE_ERASING; 1101 chip->erase_suspended = 1; 1102 map_write(map, CMD(0xf0), adr); 1103 (void) map_read(map, adr); 1104 xip_iprefetch(); 1105 local_irq_enable(); 1106 mutex_unlock(&chip->mutex); 1107 xip_iprefetch(); 1108 cond_resched(); 1109 1110 /* 1111 * We're back. However someone else might have 1112 * decided to go write to the chip if we are in 1113 * a suspended erase state. If so let's wait 1114 * until it's done. 1115 */ 1116 mutex_lock(&chip->mutex); 1117 while (chip->state != FL_XIP_WHILE_ERASING) { 1118 DECLARE_WAITQUEUE(wait, current); 1119 set_current_state(TASK_UNINTERRUPTIBLE); 1120 add_wait_queue(&chip->wq, &wait); 1121 mutex_unlock(&chip->mutex); 1122 schedule(); 1123 remove_wait_queue(&chip->wq, &wait); 1124 mutex_lock(&chip->mutex); 1125 } 1126 /* Disallow XIP again */ 1127 local_irq_disable(); 1128 1129 /* Correct Erase Suspend Hangups for M29EW */ 1130 cfi_fixup_m29ew_erase_suspend(map, adr); 1131 /* Resume the write or erase operation */ 1132 map_write(map, cfi->sector_erase_cmd, adr); 1133 chip->state = oldstate; 1134 start = xip_currtime(); 1135 } else if (usec >= 1000000/HZ) { 1136 /* 1137 * Try to save on CPU power when waiting delay 1138 * is at least a system timer tick period. 1139 * No need to be extremely accurate here. 1140 */ 1141 xip_cpu_idle(); 1142 } 1143 status = map_read(map, adr); 1144 } while (!map_word_andequal(map, status, OK, OK) 1145 && xip_elapsed_since(start) < usec); 1146 } 1147 1148 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1149 1150 /* 1151 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1152 * the flash is actively programming or erasing since we have to poll for 1153 * the operation to complete anyway. We can't do that in a generic way with 1154 * a XIP setup so do it before the actual flash operation in this case 1155 * and stub it out from INVALIDATE_CACHE_UDELAY. 1156 */ 1157 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1158 INVALIDATE_CACHED_RANGE(map, from, size) 1159 1160 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1161 UDELAY(map, chip, adr, usec) 1162 1163 /* 1164 * Extra notes: 1165 * 1166 * Activating this XIP support changes the way the code works a bit. For 1167 * example the code to suspend the current process when concurrent access 1168 * happens is never executed because xip_udelay() will always return with the 1169 * same chip state as it was entered with. This is why there is no care for 1170 * the presence of add_wait_queue() or schedule() calls from within a couple 1171 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1172 * The queueing and scheduling are always happening within xip_udelay(). 1173 * 1174 * Similarly, get_chip() and put_chip() just happen to always be executed 1175 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1176 * is in array mode, therefore never executing many cases therein and not 1177 * causing any problem with XIP. 1178 */ 1179 1180 #else 1181 1182 #define xip_disable(map, chip, adr) 1183 #define xip_enable(map, chip, adr) 1184 #define XIP_INVAL_CACHED_RANGE(x...) 1185 1186 #define UDELAY(map, chip, adr, usec) \ 1187 do { \ 1188 mutex_unlock(&chip->mutex); \ 1189 cfi_udelay(usec); \ 1190 mutex_lock(&chip->mutex); \ 1191 } while (0) 1192 1193 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1194 do { \ 1195 mutex_unlock(&chip->mutex); \ 1196 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1197 cfi_udelay(usec); \ 1198 mutex_lock(&chip->mutex); \ 1199 } while (0) 1200 1201 #endif 1202 1203 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1204 { 1205 unsigned long cmd_addr; 1206 struct cfi_private *cfi = map->fldrv_priv; 1207 int ret; 1208 1209 adr += chip->start; 1210 1211 /* Ensure cmd read/writes are aligned. */ 1212 cmd_addr = adr & ~(map_bankwidth(map)-1); 1213 1214 mutex_lock(&chip->mutex); 1215 ret = get_chip(map, chip, cmd_addr, FL_READY); 1216 if (ret) { 1217 mutex_unlock(&chip->mutex); 1218 return ret; 1219 } 1220 1221 if (chip->state != FL_POINT && chip->state != FL_READY) { 1222 map_write(map, CMD(0xf0), cmd_addr); 1223 chip->state = FL_READY; 1224 } 1225 1226 map_copy_from(map, buf, adr, len); 1227 1228 put_chip(map, chip, cmd_addr); 1229 1230 mutex_unlock(&chip->mutex); 1231 return 0; 1232 } 1233 1234 1235 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1236 { 1237 struct map_info *map = mtd->priv; 1238 struct cfi_private *cfi = map->fldrv_priv; 1239 unsigned long ofs; 1240 int chipnum; 1241 int ret = 0; 1242 1243 /* ofs: offset within the first chip that the first read should start */ 1244 chipnum = (from >> cfi->chipshift); 1245 ofs = from - (chipnum << cfi->chipshift); 1246 1247 while (len) { 1248 unsigned long thislen; 1249 1250 if (chipnum >= cfi->numchips) 1251 break; 1252 1253 if ((len + ofs -1) >> cfi->chipshift) 1254 thislen = (1<<cfi->chipshift) - ofs; 1255 else 1256 thislen = len; 1257 1258 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1259 if (ret) 1260 break; 1261 1262 *retlen += thislen; 1263 len -= thislen; 1264 buf += thislen; 1265 1266 ofs = 0; 1267 chipnum++; 1268 } 1269 return ret; 1270 } 1271 1272 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 1273 loff_t adr, size_t len, u_char *buf, size_t grouplen); 1274 1275 static inline void otp_enter(struct map_info *map, struct flchip *chip, 1276 loff_t adr, size_t len) 1277 { 1278 struct cfi_private *cfi = map->fldrv_priv; 1279 1280 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1281 cfi->device_type, NULL); 1282 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1283 cfi->device_type, NULL); 1284 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, 1285 cfi->device_type, NULL); 1286 1287 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1288 } 1289 1290 static inline void otp_exit(struct map_info *map, struct flchip *chip, 1291 loff_t adr, size_t len) 1292 { 1293 struct cfi_private *cfi = map->fldrv_priv; 1294 1295 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1296 cfi->device_type, NULL); 1297 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1298 cfi->device_type, NULL); 1299 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, 1300 cfi->device_type, NULL); 1301 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, 1302 cfi->device_type, NULL); 1303 1304 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1305 } 1306 1307 static inline int do_read_secsi_onechip(struct map_info *map, 1308 struct flchip *chip, loff_t adr, 1309 size_t len, u_char *buf, 1310 size_t grouplen) 1311 { 1312 DECLARE_WAITQUEUE(wait, current); 1313 1314 retry: 1315 mutex_lock(&chip->mutex); 1316 1317 if (chip->state != FL_READY){ 1318 set_current_state(TASK_UNINTERRUPTIBLE); 1319 add_wait_queue(&chip->wq, &wait); 1320 1321 mutex_unlock(&chip->mutex); 1322 1323 schedule(); 1324 remove_wait_queue(&chip->wq, &wait); 1325 1326 goto retry; 1327 } 1328 1329 adr += chip->start; 1330 1331 chip->state = FL_READY; 1332 1333 otp_enter(map, chip, adr, len); 1334 map_copy_from(map, buf, adr, len); 1335 otp_exit(map, chip, adr, len); 1336 1337 wake_up(&chip->wq); 1338 mutex_unlock(&chip->mutex); 1339 1340 return 0; 1341 } 1342 1343 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1344 { 1345 struct map_info *map = mtd->priv; 1346 struct cfi_private *cfi = map->fldrv_priv; 1347 unsigned long ofs; 1348 int chipnum; 1349 int ret = 0; 1350 1351 /* ofs: offset within the first chip that the first read should start */ 1352 /* 8 secsi bytes per chip */ 1353 chipnum=from>>3; 1354 ofs=from & 7; 1355 1356 while (len) { 1357 unsigned long thislen; 1358 1359 if (chipnum >= cfi->numchips) 1360 break; 1361 1362 if ((len + ofs -1) >> 3) 1363 thislen = (1<<3) - ofs; 1364 else 1365 thislen = len; 1366 1367 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, 1368 thislen, buf, 0); 1369 if (ret) 1370 break; 1371 1372 *retlen += thislen; 1373 len -= thislen; 1374 buf += thislen; 1375 1376 ofs = 0; 1377 chipnum++; 1378 } 1379 return ret; 1380 } 1381 1382 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1383 unsigned long adr, map_word datum, 1384 int mode); 1385 1386 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, 1387 size_t len, u_char *buf, size_t grouplen) 1388 { 1389 int ret; 1390 while (len) { 1391 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); 1392 int gap = adr - bus_ofs; 1393 int n = min_t(int, len, map_bankwidth(map) - gap); 1394 map_word datum = map_word_ff(map); 1395 1396 if (n != map_bankwidth(map)) { 1397 /* partial write of a word, load old contents */ 1398 otp_enter(map, chip, bus_ofs, map_bankwidth(map)); 1399 datum = map_read(map, bus_ofs); 1400 otp_exit(map, chip, bus_ofs, map_bankwidth(map)); 1401 } 1402 1403 datum = map_word_load_partial(map, datum, buf, gap, n); 1404 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 1405 if (ret) 1406 return ret; 1407 1408 adr += n; 1409 buf += n; 1410 len -= n; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, 1417 size_t len, u_char *buf, size_t grouplen) 1418 { 1419 struct cfi_private *cfi = map->fldrv_priv; 1420 uint8_t lockreg; 1421 unsigned long timeo; 1422 int ret; 1423 1424 /* make sure area matches group boundaries */ 1425 if ((adr != 0) || (len != grouplen)) 1426 return -EINVAL; 1427 1428 mutex_lock(&chip->mutex); 1429 ret = get_chip(map, chip, chip->start, FL_LOCKING); 1430 if (ret) { 1431 mutex_unlock(&chip->mutex); 1432 return ret; 1433 } 1434 chip->state = FL_LOCKING; 1435 1436 /* Enter lock register command */ 1437 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1438 cfi->device_type, NULL); 1439 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1440 cfi->device_type, NULL); 1441 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, 1442 cfi->device_type, NULL); 1443 1444 /* read lock register */ 1445 lockreg = cfi_read_query(map, 0); 1446 1447 /* set bit 0 to protect extended memory block */ 1448 lockreg &= ~0x01; 1449 1450 /* set bit 0 to protect extended memory block */ 1451 /* write lock register */ 1452 map_write(map, CMD(0xA0), chip->start); 1453 map_write(map, CMD(lockreg), chip->start); 1454 1455 /* wait for chip to become ready */ 1456 timeo = jiffies + msecs_to_jiffies(2); 1457 for (;;) { 1458 if (chip_ready(map, chip, adr, NULL)) 1459 break; 1460 1461 if (time_after(jiffies, timeo)) { 1462 pr_err("Waiting for chip to be ready timed out.\n"); 1463 ret = -EIO; 1464 break; 1465 } 1466 UDELAY(map, chip, 0, 1); 1467 } 1468 1469 /* exit protection commands */ 1470 map_write(map, CMD(0x90), chip->start); 1471 map_write(map, CMD(0x00), chip->start); 1472 1473 chip->state = FL_READY; 1474 put_chip(map, chip, chip->start); 1475 mutex_unlock(&chip->mutex); 1476 1477 return ret; 1478 } 1479 1480 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, 1481 size_t *retlen, u_char *buf, 1482 otp_op_t action, int user_regs) 1483 { 1484 struct map_info *map = mtd->priv; 1485 struct cfi_private *cfi = map->fldrv_priv; 1486 int ofs_factor = cfi->interleave * cfi->device_type; 1487 unsigned long base; 1488 int chipnum; 1489 struct flchip *chip; 1490 uint8_t otp, lockreg; 1491 int ret; 1492 1493 size_t user_size, factory_size, otpsize; 1494 loff_t user_offset, factory_offset, otpoffset; 1495 int user_locked = 0, otplocked; 1496 1497 *retlen = 0; 1498 1499 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { 1500 chip = &cfi->chips[chipnum]; 1501 factory_size = 0; 1502 user_size = 0; 1503 1504 /* Micron M29EW family */ 1505 if (is_m29ew(cfi)) { 1506 base = chip->start; 1507 1508 /* check whether secsi area is factory locked 1509 or user lockable */ 1510 mutex_lock(&chip->mutex); 1511 ret = get_chip(map, chip, base, FL_CFI_QUERY); 1512 if (ret) { 1513 mutex_unlock(&chip->mutex); 1514 return ret; 1515 } 1516 cfi_qry_mode_on(base, map, cfi); 1517 otp = cfi_read_query(map, base + 0x3 * ofs_factor); 1518 cfi_qry_mode_off(base, map, cfi); 1519 put_chip(map, chip, base); 1520 mutex_unlock(&chip->mutex); 1521 1522 if (otp & 0x80) { 1523 /* factory locked */ 1524 factory_offset = 0; 1525 factory_size = 0x100; 1526 } else { 1527 /* customer lockable */ 1528 user_offset = 0; 1529 user_size = 0x100; 1530 1531 mutex_lock(&chip->mutex); 1532 ret = get_chip(map, chip, base, FL_LOCKING); 1533 if (ret) { 1534 mutex_unlock(&chip->mutex); 1535 return ret; 1536 } 1537 1538 /* Enter lock register command */ 1539 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, 1540 chip->start, map, cfi, 1541 cfi->device_type, NULL); 1542 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, 1543 chip->start, map, cfi, 1544 cfi->device_type, NULL); 1545 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, 1546 chip->start, map, cfi, 1547 cfi->device_type, NULL); 1548 /* read lock register */ 1549 lockreg = cfi_read_query(map, 0); 1550 /* exit protection commands */ 1551 map_write(map, CMD(0x90), chip->start); 1552 map_write(map, CMD(0x00), chip->start); 1553 put_chip(map, chip, chip->start); 1554 mutex_unlock(&chip->mutex); 1555 1556 user_locked = ((lockreg & 0x01) == 0x00); 1557 } 1558 } 1559 1560 otpsize = user_regs ? user_size : factory_size; 1561 if (!otpsize) 1562 continue; 1563 otpoffset = user_regs ? user_offset : factory_offset; 1564 otplocked = user_regs ? user_locked : 1; 1565 1566 if (!action) { 1567 /* return otpinfo */ 1568 struct otp_info *otpinfo; 1569 len -= sizeof(*otpinfo); 1570 if (len <= 0) 1571 return -ENOSPC; 1572 otpinfo = (struct otp_info *)buf; 1573 otpinfo->start = from; 1574 otpinfo->length = otpsize; 1575 otpinfo->locked = otplocked; 1576 buf += sizeof(*otpinfo); 1577 *retlen += sizeof(*otpinfo); 1578 from += otpsize; 1579 } else if ((from < otpsize) && (len > 0)) { 1580 size_t size; 1581 size = (len < otpsize - from) ? len : otpsize - from; 1582 ret = action(map, chip, otpoffset + from, size, buf, 1583 otpsize); 1584 if (ret < 0) 1585 return ret; 1586 1587 buf += size; 1588 len -= size; 1589 *retlen += size; 1590 from = 0; 1591 } else { 1592 from -= otpsize; 1593 } 1594 } 1595 return 0; 1596 } 1597 1598 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, 1599 size_t *retlen, struct otp_info *buf) 1600 { 1601 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1602 NULL, 0); 1603 } 1604 1605 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, 1606 size_t *retlen, struct otp_info *buf) 1607 { 1608 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1609 NULL, 1); 1610 } 1611 1612 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 1613 size_t len, size_t *retlen, 1614 u_char *buf) 1615 { 1616 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1617 buf, do_read_secsi_onechip, 0); 1618 } 1619 1620 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 1621 size_t len, size_t *retlen, 1622 u_char *buf) 1623 { 1624 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1625 buf, do_read_secsi_onechip, 1); 1626 } 1627 1628 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 1629 size_t len, size_t *retlen, 1630 const u_char *buf) 1631 { 1632 return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf, 1633 do_otp_write, 1); 1634 } 1635 1636 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 1637 size_t len) 1638 { 1639 size_t retlen; 1640 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, 1641 do_otp_lock, 1); 1642 } 1643 1644 static int __xipram do_write_oneword_once(struct map_info *map, 1645 struct flchip *chip, 1646 unsigned long adr, map_word datum, 1647 int mode, struct cfi_private *cfi) 1648 { 1649 unsigned long timeo; 1650 /* 1651 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1652 * have a max write time of a few hundreds usec). However, we should 1653 * use the maximum timeout value given by the chip at probe time 1654 * instead. Unfortunately, struct flchip does have a field for 1655 * maximum timeout, only for typical which can be far too short 1656 * depending of the conditions. The ' + 1' is to avoid having a 1657 * timeout of 0 jiffies if HZ is smaller than 1000. 1658 */ 1659 unsigned long uWriteTimeout = (HZ / 1000) + 1; 1660 int ret = 0; 1661 1662 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1663 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1664 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1665 map_write(map, datum, adr); 1666 chip->state = mode; 1667 1668 INVALIDATE_CACHE_UDELAY(map, chip, 1669 adr, map_bankwidth(map), 1670 chip->word_write_time); 1671 1672 /* See comment above for timeout value. */ 1673 timeo = jiffies + uWriteTimeout; 1674 for (;;) { 1675 if (chip->state != mode) { 1676 /* Someone's suspended the write. Sleep */ 1677 DECLARE_WAITQUEUE(wait, current); 1678 1679 set_current_state(TASK_UNINTERRUPTIBLE); 1680 add_wait_queue(&chip->wq, &wait); 1681 mutex_unlock(&chip->mutex); 1682 schedule(); 1683 remove_wait_queue(&chip->wq, &wait); 1684 timeo = jiffies + (HZ / 2); /* FIXME */ 1685 mutex_lock(&chip->mutex); 1686 continue; 1687 } 1688 1689 /* 1690 * We check "time_after" and "!chip_good" before checking 1691 * "chip_good" to avoid the failure due to scheduling. 1692 */ 1693 if (time_after(jiffies, timeo) && 1694 !chip_good(map, chip, adr, &datum)) { 1695 xip_enable(map, chip, adr); 1696 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1697 xip_disable(map, chip, adr); 1698 ret = -EIO; 1699 break; 1700 } 1701 1702 if (chip_good(map, chip, adr, &datum)) { 1703 if (cfi_check_err_status(map, chip, adr)) 1704 ret = -EIO; 1705 break; 1706 } 1707 1708 /* Latency issues. Drop the lock, wait a while and retry */ 1709 UDELAY(map, chip, adr, 1); 1710 } 1711 1712 return ret; 1713 } 1714 1715 static int __xipram do_write_oneword_start(struct map_info *map, 1716 struct flchip *chip, 1717 unsigned long adr, int mode) 1718 { 1719 int ret; 1720 1721 mutex_lock(&chip->mutex); 1722 1723 ret = get_chip(map, chip, adr, mode); 1724 if (ret) { 1725 mutex_unlock(&chip->mutex); 1726 return ret; 1727 } 1728 1729 if (mode == FL_OTP_WRITE) 1730 otp_enter(map, chip, adr, map_bankwidth(map)); 1731 1732 return ret; 1733 } 1734 1735 static void __xipram do_write_oneword_done(struct map_info *map, 1736 struct flchip *chip, 1737 unsigned long adr, int mode) 1738 { 1739 if (mode == FL_OTP_WRITE) 1740 otp_exit(map, chip, adr, map_bankwidth(map)); 1741 1742 chip->state = FL_READY; 1743 DISABLE_VPP(map); 1744 put_chip(map, chip, adr); 1745 1746 mutex_unlock(&chip->mutex); 1747 } 1748 1749 static int __xipram do_write_oneword_retry(struct map_info *map, 1750 struct flchip *chip, 1751 unsigned long adr, map_word datum, 1752 int mode) 1753 { 1754 struct cfi_private *cfi = map->fldrv_priv; 1755 int ret = 0; 1756 map_word oldd; 1757 int retry_cnt = 0; 1758 1759 /* 1760 * Check for a NOP for the case when the datum to write is already 1761 * present - it saves time and works around buggy chips that corrupt 1762 * data at other locations when 0xff is written to a location that 1763 * already contains 0xff. 1764 */ 1765 oldd = map_read(map, adr); 1766 if (map_word_equal(map, oldd, datum)) { 1767 pr_debug("MTD %s(): NOP\n", __func__); 1768 return ret; 1769 } 1770 1771 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1772 ENABLE_VPP(map); 1773 xip_disable(map, chip, adr); 1774 1775 retry: 1776 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); 1777 if (ret) { 1778 /* reset on all failures. */ 1779 map_write(map, CMD(0xF0), chip->start); 1780 /* FIXME - should have reset delay before continuing */ 1781 1782 if (++retry_cnt <= MAX_RETRIES) 1783 goto retry; 1784 } 1785 xip_enable(map, chip, adr); 1786 1787 return ret; 1788 } 1789 1790 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1791 unsigned long adr, map_word datum, 1792 int mode) 1793 { 1794 int ret; 1795 1796 adr += chip->start; 1797 1798 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, 1799 datum.x[0]); 1800 1801 ret = do_write_oneword_start(map, chip, adr, mode); 1802 if (ret) 1803 return ret; 1804 1805 ret = do_write_oneword_retry(map, chip, adr, datum, mode); 1806 1807 do_write_oneword_done(map, chip, adr, mode); 1808 1809 return ret; 1810 } 1811 1812 1813 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1814 size_t *retlen, const u_char *buf) 1815 { 1816 struct map_info *map = mtd->priv; 1817 struct cfi_private *cfi = map->fldrv_priv; 1818 int ret; 1819 int chipnum; 1820 unsigned long ofs, chipstart; 1821 DECLARE_WAITQUEUE(wait, current); 1822 1823 chipnum = to >> cfi->chipshift; 1824 ofs = to - (chipnum << cfi->chipshift); 1825 chipstart = cfi->chips[chipnum].start; 1826 1827 /* If it's not bus-aligned, do the first byte write */ 1828 if (ofs & (map_bankwidth(map)-1)) { 1829 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1830 int i = ofs - bus_ofs; 1831 int n = 0; 1832 map_word tmp_buf; 1833 1834 retry: 1835 mutex_lock(&cfi->chips[chipnum].mutex); 1836 1837 if (cfi->chips[chipnum].state != FL_READY) { 1838 set_current_state(TASK_UNINTERRUPTIBLE); 1839 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1840 1841 mutex_unlock(&cfi->chips[chipnum].mutex); 1842 1843 schedule(); 1844 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1845 goto retry; 1846 } 1847 1848 /* Load 'tmp_buf' with old contents of flash */ 1849 tmp_buf = map_read(map, bus_ofs+chipstart); 1850 1851 mutex_unlock(&cfi->chips[chipnum].mutex); 1852 1853 /* Number of bytes to copy from buffer */ 1854 n = min_t(int, len, map_bankwidth(map)-i); 1855 1856 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1857 1858 ret = do_write_oneword(map, &cfi->chips[chipnum], 1859 bus_ofs, tmp_buf, FL_WRITING); 1860 if (ret) 1861 return ret; 1862 1863 ofs += n; 1864 buf += n; 1865 (*retlen) += n; 1866 len -= n; 1867 1868 if (ofs >> cfi->chipshift) { 1869 chipnum ++; 1870 ofs = 0; 1871 if (chipnum == cfi->numchips) 1872 return 0; 1873 } 1874 } 1875 1876 /* We are now aligned, write as much as possible */ 1877 while(len >= map_bankwidth(map)) { 1878 map_word datum; 1879 1880 datum = map_word_load(map, buf); 1881 1882 ret = do_write_oneword(map, &cfi->chips[chipnum], 1883 ofs, datum, FL_WRITING); 1884 if (ret) 1885 return ret; 1886 1887 ofs += map_bankwidth(map); 1888 buf += map_bankwidth(map); 1889 (*retlen) += map_bankwidth(map); 1890 len -= map_bankwidth(map); 1891 1892 if (ofs >> cfi->chipshift) { 1893 chipnum ++; 1894 ofs = 0; 1895 if (chipnum == cfi->numchips) 1896 return 0; 1897 chipstart = cfi->chips[chipnum].start; 1898 } 1899 } 1900 1901 /* Write the trailing bytes if any */ 1902 if (len & (map_bankwidth(map)-1)) { 1903 map_word tmp_buf; 1904 1905 retry1: 1906 mutex_lock(&cfi->chips[chipnum].mutex); 1907 1908 if (cfi->chips[chipnum].state != FL_READY) { 1909 set_current_state(TASK_UNINTERRUPTIBLE); 1910 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1911 1912 mutex_unlock(&cfi->chips[chipnum].mutex); 1913 1914 schedule(); 1915 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1916 goto retry1; 1917 } 1918 1919 tmp_buf = map_read(map, ofs + chipstart); 1920 1921 mutex_unlock(&cfi->chips[chipnum].mutex); 1922 1923 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1924 1925 ret = do_write_oneword(map, &cfi->chips[chipnum], 1926 ofs, tmp_buf, FL_WRITING); 1927 if (ret) 1928 return ret; 1929 1930 (*retlen) += len; 1931 } 1932 1933 return 0; 1934 } 1935 1936 #if !FORCE_WORD_WRITE 1937 static int __xipram do_write_buffer_wait(struct map_info *map, 1938 struct flchip *chip, unsigned long adr, 1939 map_word datum) 1940 { 1941 unsigned long timeo; 1942 unsigned long u_write_timeout; 1943 int ret = 0; 1944 1945 /* 1946 * Timeout is calculated according to CFI data, if available. 1947 * See more comments in cfi_cmdset_0002(). 1948 */ 1949 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max); 1950 timeo = jiffies + u_write_timeout; 1951 1952 for (;;) { 1953 if (chip->state != FL_WRITING) { 1954 /* Someone's suspended the write. Sleep */ 1955 DECLARE_WAITQUEUE(wait, current); 1956 1957 set_current_state(TASK_UNINTERRUPTIBLE); 1958 add_wait_queue(&chip->wq, &wait); 1959 mutex_unlock(&chip->mutex); 1960 schedule(); 1961 remove_wait_queue(&chip->wq, &wait); 1962 timeo = jiffies + (HZ / 2); /* FIXME */ 1963 mutex_lock(&chip->mutex); 1964 continue; 1965 } 1966 1967 /* 1968 * We check "time_after" and "!chip_good" before checking 1969 * "chip_good" to avoid the failure due to scheduling. 1970 */ 1971 if (time_after(jiffies, timeo) && 1972 !chip_good(map, chip, adr, &datum)) { 1973 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", 1974 __func__, adr); 1975 ret = -EIO; 1976 break; 1977 } 1978 1979 if (chip_good(map, chip, adr, &datum)) { 1980 if (cfi_check_err_status(map, chip, adr)) 1981 ret = -EIO; 1982 break; 1983 } 1984 1985 /* Latency issues. Drop the lock, wait a while and retry */ 1986 UDELAY(map, chip, adr, 1); 1987 } 1988 1989 return ret; 1990 } 1991 1992 static void __xipram do_write_buffer_reset(struct map_info *map, 1993 struct flchip *chip, 1994 struct cfi_private *cfi) 1995 { 1996 /* 1997 * Recovery from write-buffer programming failures requires 1998 * the write-to-buffer-reset sequence. Since the last part 1999 * of the sequence also works as a normal reset, we can run 2000 * the same commands regardless of why we are here. 2001 * See e.g. 2002 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 2003 */ 2004 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2005 cfi->device_type, NULL); 2006 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2007 cfi->device_type, NULL); 2008 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 2009 cfi->device_type, NULL); 2010 2011 /* FIXME - should have reset delay before continuing */ 2012 } 2013 2014 /* 2015 * FIXME: interleaved mode not tested, and probably not supported! 2016 */ 2017 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 2018 unsigned long adr, const u_char *buf, 2019 int len) 2020 { 2021 struct cfi_private *cfi = map->fldrv_priv; 2022 int ret; 2023 unsigned long cmd_adr; 2024 int z, words; 2025 map_word datum; 2026 2027 adr += chip->start; 2028 cmd_adr = adr; 2029 2030 mutex_lock(&chip->mutex); 2031 ret = get_chip(map, chip, adr, FL_WRITING); 2032 if (ret) { 2033 mutex_unlock(&chip->mutex); 2034 return ret; 2035 } 2036 2037 datum = map_word_load(map, buf); 2038 2039 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 2040 __func__, adr, datum.x[0]); 2041 2042 XIP_INVAL_CACHED_RANGE(map, adr, len); 2043 ENABLE_VPP(map); 2044 xip_disable(map, chip, cmd_adr); 2045 2046 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2047 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2048 2049 /* Write Buffer Load */ 2050 map_write(map, CMD(0x25), cmd_adr); 2051 2052 chip->state = FL_WRITING_TO_BUFFER; 2053 2054 /* Write length of data to come */ 2055 words = len / map_bankwidth(map); 2056 map_write(map, CMD(words - 1), cmd_adr); 2057 /* Write data */ 2058 z = 0; 2059 while(z < words * map_bankwidth(map)) { 2060 datum = map_word_load(map, buf); 2061 map_write(map, datum, adr + z); 2062 2063 z += map_bankwidth(map); 2064 buf += map_bankwidth(map); 2065 } 2066 z -= map_bankwidth(map); 2067 2068 adr += z; 2069 2070 /* Write Buffer Program Confirm: GO GO GO */ 2071 map_write(map, CMD(0x29), cmd_adr); 2072 chip->state = FL_WRITING; 2073 2074 INVALIDATE_CACHE_UDELAY(map, chip, 2075 adr, map_bankwidth(map), 2076 chip->word_write_time); 2077 2078 ret = do_write_buffer_wait(map, chip, adr, datum); 2079 if (ret) 2080 do_write_buffer_reset(map, chip, cfi); 2081 2082 xip_enable(map, chip, adr); 2083 2084 chip->state = FL_READY; 2085 DISABLE_VPP(map); 2086 put_chip(map, chip, adr); 2087 mutex_unlock(&chip->mutex); 2088 2089 return ret; 2090 } 2091 2092 2093 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 2094 size_t *retlen, const u_char *buf) 2095 { 2096 struct map_info *map = mtd->priv; 2097 struct cfi_private *cfi = map->fldrv_priv; 2098 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 2099 int ret; 2100 int chipnum; 2101 unsigned long ofs; 2102 2103 chipnum = to >> cfi->chipshift; 2104 ofs = to - (chipnum << cfi->chipshift); 2105 2106 /* If it's not bus-aligned, do the first word write */ 2107 if (ofs & (map_bankwidth(map)-1)) { 2108 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 2109 if (local_len > len) 2110 local_len = len; 2111 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2112 local_len, retlen, buf); 2113 if (ret) 2114 return ret; 2115 ofs += local_len; 2116 buf += local_len; 2117 len -= local_len; 2118 2119 if (ofs >> cfi->chipshift) { 2120 chipnum ++; 2121 ofs = 0; 2122 if (chipnum == cfi->numchips) 2123 return 0; 2124 } 2125 } 2126 2127 /* Write buffer is worth it only if more than one word to write... */ 2128 while (len >= map_bankwidth(map) * 2) { 2129 /* We must not cross write block boundaries */ 2130 int size = wbufsize - (ofs & (wbufsize-1)); 2131 2132 if (size > len) 2133 size = len; 2134 if (size % map_bankwidth(map)) 2135 size -= size % map_bankwidth(map); 2136 2137 ret = do_write_buffer(map, &cfi->chips[chipnum], 2138 ofs, buf, size); 2139 if (ret) 2140 return ret; 2141 2142 ofs += size; 2143 buf += size; 2144 (*retlen) += size; 2145 len -= size; 2146 2147 if (ofs >> cfi->chipshift) { 2148 chipnum ++; 2149 ofs = 0; 2150 if (chipnum == cfi->numchips) 2151 return 0; 2152 } 2153 } 2154 2155 if (len) { 2156 size_t retlen_dregs = 0; 2157 2158 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2159 len, &retlen_dregs, buf); 2160 2161 *retlen += retlen_dregs; 2162 return ret; 2163 } 2164 2165 return 0; 2166 } 2167 #endif /* !FORCE_WORD_WRITE */ 2168 2169 /* 2170 * Wait for the flash chip to become ready to write data 2171 * 2172 * This is only called during the panic_write() path. When panic_write() 2173 * is called, the kernel is in the process of a panic, and will soon be 2174 * dead. Therefore we don't take any locks, and attempt to get access 2175 * to the chip as soon as possible. 2176 */ 2177 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 2178 unsigned long adr) 2179 { 2180 struct cfi_private *cfi = map->fldrv_priv; 2181 int retries = 10; 2182 int i; 2183 2184 /* 2185 * If the driver thinks the chip is idle, and no toggle bits 2186 * are changing, then the chip is actually idle for sure. 2187 */ 2188 if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL)) 2189 return 0; 2190 2191 /* 2192 * Try several times to reset the chip and then wait for it 2193 * to become idle. The upper limit of a few milliseconds of 2194 * delay isn't a big problem: the kernel is dying anyway. It 2195 * is more important to save the messages. 2196 */ 2197 while (retries > 0) { 2198 const unsigned long timeo = (HZ / 1000) + 1; 2199 2200 /* send the reset command */ 2201 map_write(map, CMD(0xF0), chip->start); 2202 2203 /* wait for the chip to become ready */ 2204 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2205 if (chip_ready(map, chip, adr, NULL)) 2206 return 0; 2207 2208 udelay(1); 2209 } 2210 2211 retries--; 2212 } 2213 2214 /* the chip never became ready */ 2215 return -EBUSY; 2216 } 2217 2218 /* 2219 * Write out one word of data to a single flash chip during a kernel panic 2220 * 2221 * This is only called during the panic_write() path. When panic_write() 2222 * is called, the kernel is in the process of a panic, and will soon be 2223 * dead. Therefore we don't take any locks, and attempt to get access 2224 * to the chip as soon as possible. 2225 * 2226 * The implementation of this routine is intentionally similar to 2227 * do_write_oneword(), in order to ease code maintenance. 2228 */ 2229 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 2230 unsigned long adr, map_word datum) 2231 { 2232 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 2233 struct cfi_private *cfi = map->fldrv_priv; 2234 int retry_cnt = 0; 2235 map_word oldd; 2236 int ret; 2237 int i; 2238 2239 adr += chip->start; 2240 2241 ret = cfi_amdstd_panic_wait(map, chip, adr); 2242 if (ret) 2243 return ret; 2244 2245 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 2246 __func__, adr, datum.x[0]); 2247 2248 /* 2249 * Check for a NOP for the case when the datum to write is already 2250 * present - it saves time and works around buggy chips that corrupt 2251 * data at other locations when 0xff is written to a location that 2252 * already contains 0xff. 2253 */ 2254 oldd = map_read(map, adr); 2255 if (map_word_equal(map, oldd, datum)) { 2256 pr_debug("MTD %s(): NOP\n", __func__); 2257 goto op_done; 2258 } 2259 2260 ENABLE_VPP(map); 2261 2262 retry: 2263 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2264 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2265 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2266 map_write(map, datum, adr); 2267 2268 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2269 if (chip_ready(map, chip, adr, NULL)) 2270 break; 2271 2272 udelay(1); 2273 } 2274 2275 if (!chip_ready(map, chip, adr, &datum) || 2276 cfi_check_err_status(map, chip, adr)) { 2277 /* reset on all failures. */ 2278 map_write(map, CMD(0xF0), chip->start); 2279 /* FIXME - should have reset delay before continuing */ 2280 2281 if (++retry_cnt <= MAX_RETRIES) 2282 goto retry; 2283 2284 ret = -EIO; 2285 } 2286 2287 op_done: 2288 DISABLE_VPP(map); 2289 return ret; 2290 } 2291 2292 /* 2293 * Write out some data during a kernel panic 2294 * 2295 * This is used by the mtdoops driver to save the dying messages from a 2296 * kernel which has panic'd. 2297 * 2298 * This routine ignores all of the locking used throughout the rest of the 2299 * driver, in order to ensure that the data gets written out no matter what 2300 * state this driver (and the flash chip itself) was in when the kernel crashed. 2301 * 2302 * The implementation of this routine is intentionally similar to 2303 * cfi_amdstd_write_words(), in order to ease code maintenance. 2304 */ 2305 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 2306 size_t *retlen, const u_char *buf) 2307 { 2308 struct map_info *map = mtd->priv; 2309 struct cfi_private *cfi = map->fldrv_priv; 2310 unsigned long ofs, chipstart; 2311 int ret; 2312 int chipnum; 2313 2314 chipnum = to >> cfi->chipshift; 2315 ofs = to - (chipnum << cfi->chipshift); 2316 chipstart = cfi->chips[chipnum].start; 2317 2318 /* If it's not bus aligned, do the first byte write */ 2319 if (ofs & (map_bankwidth(map) - 1)) { 2320 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 2321 int i = ofs - bus_ofs; 2322 int n = 0; 2323 map_word tmp_buf; 2324 2325 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 2326 if (ret) 2327 return ret; 2328 2329 /* Load 'tmp_buf' with old contents of flash */ 2330 tmp_buf = map_read(map, bus_ofs + chipstart); 2331 2332 /* Number of bytes to copy from buffer */ 2333 n = min_t(int, len, map_bankwidth(map) - i); 2334 2335 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 2336 2337 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2338 bus_ofs, tmp_buf); 2339 if (ret) 2340 return ret; 2341 2342 ofs += n; 2343 buf += n; 2344 (*retlen) += n; 2345 len -= n; 2346 2347 if (ofs >> cfi->chipshift) { 2348 chipnum++; 2349 ofs = 0; 2350 if (chipnum == cfi->numchips) 2351 return 0; 2352 } 2353 } 2354 2355 /* We are now aligned, write as much as possible */ 2356 while (len >= map_bankwidth(map)) { 2357 map_word datum; 2358 2359 datum = map_word_load(map, buf); 2360 2361 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2362 ofs, datum); 2363 if (ret) 2364 return ret; 2365 2366 ofs += map_bankwidth(map); 2367 buf += map_bankwidth(map); 2368 (*retlen) += map_bankwidth(map); 2369 len -= map_bankwidth(map); 2370 2371 if (ofs >> cfi->chipshift) { 2372 chipnum++; 2373 ofs = 0; 2374 if (chipnum == cfi->numchips) 2375 return 0; 2376 2377 chipstart = cfi->chips[chipnum].start; 2378 } 2379 } 2380 2381 /* Write the trailing bytes if any */ 2382 if (len & (map_bankwidth(map) - 1)) { 2383 map_word tmp_buf; 2384 2385 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 2386 if (ret) 2387 return ret; 2388 2389 tmp_buf = map_read(map, ofs + chipstart); 2390 2391 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 2392 2393 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2394 ofs, tmp_buf); 2395 if (ret) 2396 return ret; 2397 2398 (*retlen) += len; 2399 } 2400 2401 return 0; 2402 } 2403 2404 2405 /* 2406 * Handle devices with one erase region, that only implement 2407 * the chip erase command. 2408 */ 2409 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 2410 { 2411 struct cfi_private *cfi = map->fldrv_priv; 2412 unsigned long timeo; 2413 unsigned long int adr; 2414 DECLARE_WAITQUEUE(wait, current); 2415 int ret; 2416 int retry_cnt = 0; 2417 map_word datum = map_word_ff(map); 2418 2419 adr = cfi->addr_unlock1; 2420 2421 mutex_lock(&chip->mutex); 2422 ret = get_chip(map, chip, adr, FL_ERASING); 2423 if (ret) { 2424 mutex_unlock(&chip->mutex); 2425 return ret; 2426 } 2427 2428 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2429 __func__, chip->start); 2430 2431 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 2432 ENABLE_VPP(map); 2433 xip_disable(map, chip, adr); 2434 2435 retry: 2436 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2437 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2438 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2439 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2440 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2441 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2442 2443 chip->state = FL_ERASING; 2444 chip->erase_suspended = 0; 2445 chip->in_progress_block_addr = adr; 2446 chip->in_progress_block_mask = ~(map->size - 1); 2447 2448 INVALIDATE_CACHE_UDELAY(map, chip, 2449 adr, map->size, 2450 chip->erase_time*500); 2451 2452 timeo = jiffies + (HZ*20); 2453 2454 for (;;) { 2455 if (chip->state != FL_ERASING) { 2456 /* Someone's suspended the erase. Sleep */ 2457 set_current_state(TASK_UNINTERRUPTIBLE); 2458 add_wait_queue(&chip->wq, &wait); 2459 mutex_unlock(&chip->mutex); 2460 schedule(); 2461 remove_wait_queue(&chip->wq, &wait); 2462 mutex_lock(&chip->mutex); 2463 continue; 2464 } 2465 if (chip->erase_suspended) { 2466 /* This erase was suspended and resumed. 2467 Adjust the timeout */ 2468 timeo = jiffies + (HZ*20); /* FIXME */ 2469 chip->erase_suspended = 0; 2470 } 2471 2472 if (chip_ready(map, chip, adr, &datum)) { 2473 if (cfi_check_err_status(map, chip, adr)) 2474 ret = -EIO; 2475 break; 2476 } 2477 2478 if (time_after(jiffies, timeo)) { 2479 printk(KERN_WARNING "MTD %s(): software timeout\n", 2480 __func__); 2481 ret = -EIO; 2482 break; 2483 } 2484 2485 /* Latency issues. Drop the lock, wait a while and retry */ 2486 UDELAY(map, chip, adr, 1000000/HZ); 2487 } 2488 /* Did we succeed? */ 2489 if (ret) { 2490 /* reset on all failures. */ 2491 map_write(map, CMD(0xF0), chip->start); 2492 /* FIXME - should have reset delay before continuing */ 2493 2494 if (++retry_cnt <= MAX_RETRIES) { 2495 ret = 0; 2496 goto retry; 2497 } 2498 } 2499 2500 chip->state = FL_READY; 2501 xip_enable(map, chip, adr); 2502 DISABLE_VPP(map); 2503 put_chip(map, chip, adr); 2504 mutex_unlock(&chip->mutex); 2505 2506 return ret; 2507 } 2508 2509 2510 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 2511 { 2512 struct cfi_private *cfi = map->fldrv_priv; 2513 unsigned long timeo; 2514 DECLARE_WAITQUEUE(wait, current); 2515 int ret; 2516 int retry_cnt = 0; 2517 map_word datum = map_word_ff(map); 2518 2519 adr += chip->start; 2520 2521 mutex_lock(&chip->mutex); 2522 ret = get_chip(map, chip, adr, FL_ERASING); 2523 if (ret) { 2524 mutex_unlock(&chip->mutex); 2525 return ret; 2526 } 2527 2528 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2529 __func__, adr); 2530 2531 XIP_INVAL_CACHED_RANGE(map, adr, len); 2532 ENABLE_VPP(map); 2533 xip_disable(map, chip, adr); 2534 2535 retry: 2536 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2537 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2538 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2539 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2540 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2541 map_write(map, cfi->sector_erase_cmd, adr); 2542 2543 chip->state = FL_ERASING; 2544 chip->erase_suspended = 0; 2545 chip->in_progress_block_addr = adr; 2546 chip->in_progress_block_mask = ~(len - 1); 2547 2548 INVALIDATE_CACHE_UDELAY(map, chip, 2549 adr, len, 2550 chip->erase_time*500); 2551 2552 timeo = jiffies + (HZ*20); 2553 2554 for (;;) { 2555 if (chip->state != FL_ERASING) { 2556 /* Someone's suspended the erase. Sleep */ 2557 set_current_state(TASK_UNINTERRUPTIBLE); 2558 add_wait_queue(&chip->wq, &wait); 2559 mutex_unlock(&chip->mutex); 2560 schedule(); 2561 remove_wait_queue(&chip->wq, &wait); 2562 mutex_lock(&chip->mutex); 2563 continue; 2564 } 2565 if (chip->erase_suspended) { 2566 /* This erase was suspended and resumed. 2567 Adjust the timeout */ 2568 timeo = jiffies + (HZ*20); /* FIXME */ 2569 chip->erase_suspended = 0; 2570 } 2571 2572 if (chip_ready(map, chip, adr, &datum)) { 2573 if (cfi_check_err_status(map, chip, adr)) 2574 ret = -EIO; 2575 break; 2576 } 2577 2578 if (time_after(jiffies, timeo)) { 2579 printk(KERN_WARNING "MTD %s(): software timeout\n", 2580 __func__); 2581 ret = -EIO; 2582 break; 2583 } 2584 2585 /* Latency issues. Drop the lock, wait a while and retry */ 2586 UDELAY(map, chip, adr, 1000000/HZ); 2587 } 2588 /* Did we succeed? */ 2589 if (ret) { 2590 /* reset on all failures. */ 2591 map_write(map, CMD(0xF0), chip->start); 2592 /* FIXME - should have reset delay before continuing */ 2593 2594 if (++retry_cnt <= MAX_RETRIES) { 2595 ret = 0; 2596 goto retry; 2597 } 2598 } 2599 2600 chip->state = FL_READY; 2601 xip_enable(map, chip, adr); 2602 DISABLE_VPP(map); 2603 put_chip(map, chip, adr); 2604 mutex_unlock(&chip->mutex); 2605 return ret; 2606 } 2607 2608 2609 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2610 { 2611 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr, 2612 instr->len, NULL); 2613 } 2614 2615 2616 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2617 { 2618 struct map_info *map = mtd->priv; 2619 struct cfi_private *cfi = map->fldrv_priv; 2620 2621 if (instr->addr != 0) 2622 return -EINVAL; 2623 2624 if (instr->len != mtd->size) 2625 return -EINVAL; 2626 2627 return do_erase_chip(map, &cfi->chips[0]); 2628 } 2629 2630 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2631 unsigned long adr, int len, void *thunk) 2632 { 2633 struct cfi_private *cfi = map->fldrv_priv; 2634 int ret; 2635 2636 mutex_lock(&chip->mutex); 2637 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2638 if (ret) 2639 goto out_unlock; 2640 chip->state = FL_LOCKING; 2641 2642 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2643 2644 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2645 cfi->device_type, NULL); 2646 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2647 cfi->device_type, NULL); 2648 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2649 cfi->device_type, NULL); 2650 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2651 cfi->device_type, NULL); 2652 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2653 cfi->device_type, NULL); 2654 map_write(map, CMD(0x40), chip->start + adr); 2655 2656 chip->state = FL_READY; 2657 put_chip(map, chip, adr + chip->start); 2658 ret = 0; 2659 2660 out_unlock: 2661 mutex_unlock(&chip->mutex); 2662 return ret; 2663 } 2664 2665 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2666 unsigned long adr, int len, void *thunk) 2667 { 2668 struct cfi_private *cfi = map->fldrv_priv; 2669 int ret; 2670 2671 mutex_lock(&chip->mutex); 2672 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2673 if (ret) 2674 goto out_unlock; 2675 chip->state = FL_UNLOCKING; 2676 2677 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2678 2679 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2680 cfi->device_type, NULL); 2681 map_write(map, CMD(0x70), adr); 2682 2683 chip->state = FL_READY; 2684 put_chip(map, chip, adr + chip->start); 2685 ret = 0; 2686 2687 out_unlock: 2688 mutex_unlock(&chip->mutex); 2689 return ret; 2690 } 2691 2692 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2693 { 2694 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2695 } 2696 2697 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2698 { 2699 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2700 } 2701 2702 /* 2703 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2704 */ 2705 2706 struct ppb_lock { 2707 struct flchip *chip; 2708 unsigned long adr; 2709 int locked; 2710 }; 2711 2712 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2713 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2714 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2715 2716 static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2717 struct flchip *chip, 2718 unsigned long adr, int len, void *thunk) 2719 { 2720 struct cfi_private *cfi = map->fldrv_priv; 2721 unsigned long timeo; 2722 int ret; 2723 2724 adr += chip->start; 2725 mutex_lock(&chip->mutex); 2726 ret = get_chip(map, chip, adr, FL_LOCKING); 2727 if (ret) { 2728 mutex_unlock(&chip->mutex); 2729 return ret; 2730 } 2731 2732 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2733 2734 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2735 cfi->device_type, NULL); 2736 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2737 cfi->device_type, NULL); 2738 /* PPB entry command */ 2739 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2740 cfi->device_type, NULL); 2741 2742 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2743 chip->state = FL_LOCKING; 2744 map_write(map, CMD(0xA0), adr); 2745 map_write(map, CMD(0x00), adr); 2746 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2747 /* 2748 * Unlocking of one specific sector is not supported, so we 2749 * have to unlock all sectors of this device instead 2750 */ 2751 chip->state = FL_UNLOCKING; 2752 map_write(map, CMD(0x80), chip->start); 2753 map_write(map, CMD(0x30), chip->start); 2754 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2755 chip->state = FL_JEDEC_QUERY; 2756 /* Return locked status: 0->locked, 1->unlocked */ 2757 ret = !cfi_read_query(map, adr); 2758 } else 2759 BUG(); 2760 2761 /* 2762 * Wait for some time as unlocking of all sectors takes quite long 2763 */ 2764 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2765 for (;;) { 2766 if (chip_ready(map, chip, adr, NULL)) 2767 break; 2768 2769 if (time_after(jiffies, timeo)) { 2770 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2771 ret = -EIO; 2772 break; 2773 } 2774 2775 UDELAY(map, chip, adr, 1); 2776 } 2777 2778 /* Exit BC commands */ 2779 map_write(map, CMD(0x90), chip->start); 2780 map_write(map, CMD(0x00), chip->start); 2781 2782 chip->state = FL_READY; 2783 put_chip(map, chip, adr); 2784 mutex_unlock(&chip->mutex); 2785 2786 return ret; 2787 } 2788 2789 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2790 uint64_t len) 2791 { 2792 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2793 DO_XXLOCK_ONEBLOCK_LOCK); 2794 } 2795 2796 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2797 uint64_t len) 2798 { 2799 struct mtd_erase_region_info *regions = mtd->eraseregions; 2800 struct map_info *map = mtd->priv; 2801 struct cfi_private *cfi = map->fldrv_priv; 2802 struct ppb_lock *sect; 2803 unsigned long adr; 2804 loff_t offset; 2805 uint64_t length; 2806 int chipnum; 2807 int i; 2808 int sectors; 2809 int ret; 2810 int max_sectors; 2811 2812 /* 2813 * PPB unlocking always unlocks all sectors of the flash chip. 2814 * We need to re-lock all previously locked sectors. So lets 2815 * first check the locking status of all sectors and save 2816 * it for future use. 2817 */ 2818 max_sectors = 0; 2819 for (i = 0; i < mtd->numeraseregions; i++) 2820 max_sectors += regions[i].numblocks; 2821 2822 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL); 2823 if (!sect) 2824 return -ENOMEM; 2825 2826 /* 2827 * This code to walk all sectors is a slightly modified version 2828 * of the cfi_varsize_frob() code. 2829 */ 2830 i = 0; 2831 chipnum = 0; 2832 adr = 0; 2833 sectors = 0; 2834 offset = 0; 2835 length = mtd->size; 2836 2837 while (length) { 2838 int size = regions[i].erasesize; 2839 2840 /* 2841 * Only test sectors that shall not be unlocked. The other 2842 * sectors shall be unlocked, so lets keep their locking 2843 * status at "unlocked" (locked=0) for the final re-locking. 2844 */ 2845 if ((offset < ofs) || (offset >= (ofs + len))) { 2846 sect[sectors].chip = &cfi->chips[chipnum]; 2847 sect[sectors].adr = adr; 2848 sect[sectors].locked = do_ppb_xxlock( 2849 map, &cfi->chips[chipnum], adr, 0, 2850 DO_XXLOCK_ONEBLOCK_GETLOCK); 2851 } 2852 2853 adr += size; 2854 offset += size; 2855 length -= size; 2856 2857 if (offset == regions[i].offset + size * regions[i].numblocks) 2858 i++; 2859 2860 if (adr >> cfi->chipshift) { 2861 if (offset >= (ofs + len)) 2862 break; 2863 adr = 0; 2864 chipnum++; 2865 2866 if (chipnum >= cfi->numchips) 2867 break; 2868 } 2869 2870 sectors++; 2871 if (sectors >= max_sectors) { 2872 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2873 max_sectors); 2874 kfree(sect); 2875 return -EINVAL; 2876 } 2877 } 2878 2879 /* Now unlock the whole chip */ 2880 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2881 DO_XXLOCK_ONEBLOCK_UNLOCK); 2882 if (ret) { 2883 kfree(sect); 2884 return ret; 2885 } 2886 2887 /* 2888 * PPB unlocking always unlocks all sectors of the flash chip. 2889 * We need to re-lock all previously locked sectors. 2890 */ 2891 for (i = 0; i < sectors; i++) { 2892 if (sect[i].locked) 2893 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, 2894 DO_XXLOCK_ONEBLOCK_LOCK); 2895 } 2896 2897 kfree(sect); 2898 return ret; 2899 } 2900 2901 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2902 uint64_t len) 2903 { 2904 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2905 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2906 } 2907 2908 static void cfi_amdstd_sync (struct mtd_info *mtd) 2909 { 2910 struct map_info *map = mtd->priv; 2911 struct cfi_private *cfi = map->fldrv_priv; 2912 int i; 2913 struct flchip *chip; 2914 int ret = 0; 2915 DECLARE_WAITQUEUE(wait, current); 2916 2917 for (i=0; !ret && i<cfi->numchips; i++) { 2918 chip = &cfi->chips[i]; 2919 2920 retry: 2921 mutex_lock(&chip->mutex); 2922 2923 switch(chip->state) { 2924 case FL_READY: 2925 case FL_STATUS: 2926 case FL_CFI_QUERY: 2927 case FL_JEDEC_QUERY: 2928 chip->oldstate = chip->state; 2929 chip->state = FL_SYNCING; 2930 /* No need to wake_up() on this state change - 2931 * as the whole point is that nobody can do anything 2932 * with the chip now anyway. 2933 */ 2934 fallthrough; 2935 case FL_SYNCING: 2936 mutex_unlock(&chip->mutex); 2937 break; 2938 2939 default: 2940 /* Not an idle state */ 2941 set_current_state(TASK_UNINTERRUPTIBLE); 2942 add_wait_queue(&chip->wq, &wait); 2943 2944 mutex_unlock(&chip->mutex); 2945 2946 schedule(); 2947 2948 remove_wait_queue(&chip->wq, &wait); 2949 2950 goto retry; 2951 } 2952 } 2953 2954 /* Unlock the chips again */ 2955 2956 for (i--; i >=0; i--) { 2957 chip = &cfi->chips[i]; 2958 2959 mutex_lock(&chip->mutex); 2960 2961 if (chip->state == FL_SYNCING) { 2962 chip->state = chip->oldstate; 2963 wake_up(&chip->wq); 2964 } 2965 mutex_unlock(&chip->mutex); 2966 } 2967 } 2968 2969 2970 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2971 { 2972 struct map_info *map = mtd->priv; 2973 struct cfi_private *cfi = map->fldrv_priv; 2974 int i; 2975 struct flchip *chip; 2976 int ret = 0; 2977 2978 for (i=0; !ret && i<cfi->numchips; i++) { 2979 chip = &cfi->chips[i]; 2980 2981 mutex_lock(&chip->mutex); 2982 2983 switch(chip->state) { 2984 case FL_READY: 2985 case FL_STATUS: 2986 case FL_CFI_QUERY: 2987 case FL_JEDEC_QUERY: 2988 chip->oldstate = chip->state; 2989 chip->state = FL_PM_SUSPENDED; 2990 /* No need to wake_up() on this state change - 2991 * as the whole point is that nobody can do anything 2992 * with the chip now anyway. 2993 */ 2994 break; 2995 case FL_PM_SUSPENDED: 2996 break; 2997 2998 default: 2999 ret = -EAGAIN; 3000 break; 3001 } 3002 mutex_unlock(&chip->mutex); 3003 } 3004 3005 /* Unlock the chips again */ 3006 3007 if (ret) { 3008 for (i--; i >=0; i--) { 3009 chip = &cfi->chips[i]; 3010 3011 mutex_lock(&chip->mutex); 3012 3013 if (chip->state == FL_PM_SUSPENDED) { 3014 chip->state = chip->oldstate; 3015 wake_up(&chip->wq); 3016 } 3017 mutex_unlock(&chip->mutex); 3018 } 3019 } 3020 3021 return ret; 3022 } 3023 3024 3025 static void cfi_amdstd_resume(struct mtd_info *mtd) 3026 { 3027 struct map_info *map = mtd->priv; 3028 struct cfi_private *cfi = map->fldrv_priv; 3029 int i; 3030 struct flchip *chip; 3031 3032 for (i=0; i<cfi->numchips; i++) { 3033 3034 chip = &cfi->chips[i]; 3035 3036 mutex_lock(&chip->mutex); 3037 3038 if (chip->state == FL_PM_SUSPENDED) { 3039 chip->state = FL_READY; 3040 map_write(map, CMD(0xF0), chip->start); 3041 wake_up(&chip->wq); 3042 } 3043 else 3044 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 3045 3046 mutex_unlock(&chip->mutex); 3047 } 3048 } 3049 3050 3051 /* 3052 * Ensure that the flash device is put back into read array mode before 3053 * unloading the driver or rebooting. On some systems, rebooting while 3054 * the flash is in query/program/erase mode will prevent the CPU from 3055 * fetching the bootloader code, requiring a hard reset or power cycle. 3056 */ 3057 static int cfi_amdstd_reset(struct mtd_info *mtd) 3058 { 3059 struct map_info *map = mtd->priv; 3060 struct cfi_private *cfi = map->fldrv_priv; 3061 int i, ret; 3062 struct flchip *chip; 3063 3064 for (i = 0; i < cfi->numchips; i++) { 3065 3066 chip = &cfi->chips[i]; 3067 3068 mutex_lock(&chip->mutex); 3069 3070 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 3071 if (!ret) { 3072 map_write(map, CMD(0xF0), chip->start); 3073 chip->state = FL_SHUTDOWN; 3074 put_chip(map, chip, chip->start); 3075 } 3076 3077 mutex_unlock(&chip->mutex); 3078 } 3079 3080 return 0; 3081 } 3082 3083 3084 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 3085 void *v) 3086 { 3087 struct mtd_info *mtd; 3088 3089 mtd = container_of(nb, struct mtd_info, reboot_notifier); 3090 cfi_amdstd_reset(mtd); 3091 return NOTIFY_DONE; 3092 } 3093 3094 3095 static void cfi_amdstd_destroy(struct mtd_info *mtd) 3096 { 3097 struct map_info *map = mtd->priv; 3098 struct cfi_private *cfi = map->fldrv_priv; 3099 3100 cfi_amdstd_reset(mtd); 3101 unregister_reboot_notifier(&mtd->reboot_notifier); 3102 kfree(cfi->cmdset_priv); 3103 kfree(cfi->cfiq); 3104 kfree(cfi); 3105 kfree(mtd->eraseregions); 3106 } 3107 3108 MODULE_LICENSE("GPL"); 3109 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 3110 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 3111 MODULE_ALIAS("cfi_cmdset_0006"); 3112 MODULE_ALIAS("cfi_cmdset_0701"); 3113