1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <asm/io.h> 28 #include <asm/byteorder.h> 29 30 #include <linux/errno.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/interrupt.h> 34 #include <linux/reboot.h> 35 #include <linux/of.h> 36 #include <linux/of_platform.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_RETRIES 3 46 47 #define SST49LF004B 0x0060 48 #define SST49LF040B 0x0050 49 #define SST49LF008A 0x005a 50 #define AT49BV6416 0x00d6 51 52 /* 53 * Status Register bit description. Used by flash devices that don't 54 * support DQ polling (e.g. HyperFlash) 55 */ 56 #define CFI_SR_DRB BIT(7) 57 #define CFI_SR_ESB BIT(5) 58 #define CFI_SR_PSB BIT(4) 59 #define CFI_SR_WBASB BIT(3) 60 #define CFI_SR_SLSB BIT(1) 61 62 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 63 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 64 #if !FORCE_WORD_WRITE 65 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 66 #endif 67 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 68 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 69 static void cfi_amdstd_sync (struct mtd_info *); 70 static int cfi_amdstd_suspend (struct mtd_info *); 71 static void cfi_amdstd_resume (struct mtd_info *); 72 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 73 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, 74 size_t *, struct otp_info *); 75 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, 76 size_t *, struct otp_info *); 77 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 78 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, 79 size_t *, u_char *); 80 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, 81 size_t *, u_char *); 82 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, 83 size_t *, u_char *); 84 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); 85 86 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 87 size_t *retlen, const u_char *buf); 88 89 static void cfi_amdstd_destroy(struct mtd_info *); 90 91 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 92 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 93 94 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 95 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 96 #include "fwh_lock.h" 97 98 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 99 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 100 101 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 102 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 103 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 104 105 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 106 .probe = NULL, /* Not usable directly */ 107 .destroy = cfi_amdstd_destroy, 108 .name = "cfi_cmdset_0002", 109 .module = THIS_MODULE 110 }; 111 112 /* 113 * Use status register to poll for Erase/write completion when DQ is not 114 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in 115 * CFI Primary Vendor-Specific Extended Query table 1.5 116 */ 117 static int cfi_use_status_reg(struct cfi_private *cfi) 118 { 119 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 120 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; 121 122 return extp->MinorVersion >= '5' && 123 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; 124 } 125 126 static int cfi_check_err_status(struct map_info *map, struct flchip *chip, 127 unsigned long adr) 128 { 129 struct cfi_private *cfi = map->fldrv_priv; 130 map_word status; 131 132 if (!cfi_use_status_reg(cfi)) 133 return 0; 134 135 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 136 cfi->device_type, NULL); 137 status = map_read(map, adr); 138 139 /* The error bits are invalid while the chip's busy */ 140 if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) 141 return 0; 142 143 if (map_word_bitsset(map, status, CMD(0x3a))) { 144 unsigned long chipstatus = MERGESTATUS(status); 145 146 if (chipstatus & CFI_SR_ESB) 147 pr_err("%s erase operation failed, status %lx\n", 148 map->name, chipstatus); 149 if (chipstatus & CFI_SR_PSB) 150 pr_err("%s program operation failed, status %lx\n", 151 map->name, chipstatus); 152 if (chipstatus & CFI_SR_WBASB) 153 pr_err("%s buffer program command aborted, status %lx\n", 154 map->name, chipstatus); 155 if (chipstatus & CFI_SR_SLSB) 156 pr_err("%s sector write protected, status %lx\n", 157 map->name, chipstatus); 158 159 /* Erase/Program status bits are set on the operation failure */ 160 if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB)) 161 return 1; 162 } 163 return 0; 164 } 165 166 /* #define DEBUG_CFI_FEATURES */ 167 168 169 #ifdef DEBUG_CFI_FEATURES 170 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 171 { 172 const char* erase_suspend[3] = { 173 "Not supported", "Read only", "Read/write" 174 }; 175 const char* top_bottom[6] = { 176 "No WP", "8x8KiB sectors at top & bottom, no WP", 177 "Bottom boot", "Top boot", 178 "Uniform, Bottom WP", "Uniform, Top WP" 179 }; 180 181 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 182 printk(" Address sensitive unlock: %s\n", 183 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 184 185 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 186 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 187 else 188 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 189 190 if (extp->BlkProt == 0) 191 printk(" Block protection: Not supported\n"); 192 else 193 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 194 195 196 printk(" Temporary block unprotect: %s\n", 197 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 198 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 199 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 200 printk(" Burst mode: %s\n", 201 extp->BurstMode ? "Supported" : "Not supported"); 202 if (extp->PageMode == 0) 203 printk(" Page mode: Not supported\n"); 204 else 205 printk(" Page mode: %d word page\n", extp->PageMode << 2); 206 207 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 208 extp->VppMin >> 4, extp->VppMin & 0xf); 209 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 210 extp->VppMax >> 4, extp->VppMax & 0xf); 211 212 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 213 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 214 else 215 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 216 } 217 #endif 218 219 #ifdef AMD_BOOTLOC_BUG 220 /* Wheee. Bring me the head of someone at AMD. */ 221 static void fixup_amd_bootblock(struct mtd_info *mtd) 222 { 223 struct map_info *map = mtd->priv; 224 struct cfi_private *cfi = map->fldrv_priv; 225 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 226 __u8 major = extp->MajorVersion; 227 __u8 minor = extp->MinorVersion; 228 229 if (((major << 8) | minor) < 0x3131) { 230 /* CFI version 1.0 => don't trust bootloc */ 231 232 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 233 map->name, cfi->mfr, cfi->id); 234 235 /* AFAICS all 29LV400 with a bottom boot block have a device ID 236 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 237 * These were badly detected as they have the 0x80 bit set 238 * so treat them as a special case. 239 */ 240 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 241 242 /* Macronix added CFI to their 2nd generation 243 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 244 * Fujitsu, Spansion, EON, ESI and older Macronix) 245 * has CFI. 246 * 247 * Therefore also check the manufacturer. 248 * This reduces the risk of false detection due to 249 * the 8-bit device ID. 250 */ 251 (cfi->mfr == CFI_MFR_MACRONIX)) { 252 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 253 " detected\n", map->name); 254 extp->TopBottom = 2; /* bottom boot */ 255 } else 256 if (cfi->id & 0x80) { 257 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 258 extp->TopBottom = 3; /* top boot */ 259 } else { 260 extp->TopBottom = 2; /* bottom boot */ 261 } 262 263 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 264 " deduced %s from Device ID\n", map->name, major, minor, 265 extp->TopBottom == 2 ? "bottom" : "top"); 266 } 267 } 268 #endif 269 270 #if !FORCE_WORD_WRITE 271 static void fixup_use_write_buffers(struct mtd_info *mtd) 272 { 273 struct map_info *map = mtd->priv; 274 struct cfi_private *cfi = map->fldrv_priv; 275 if (cfi->cfiq->BufWriteTimeoutTyp) { 276 pr_debug("Using buffer write method\n"); 277 mtd->_write = cfi_amdstd_write_buffers; 278 } 279 } 280 #endif /* !FORCE_WORD_WRITE */ 281 282 /* Atmel chips don't use the same PRI format as AMD chips */ 283 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 284 { 285 struct map_info *map = mtd->priv; 286 struct cfi_private *cfi = map->fldrv_priv; 287 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 288 struct cfi_pri_atmel atmel_pri; 289 290 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 291 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 292 293 if (atmel_pri.Features & 0x02) 294 extp->EraseSuspend = 2; 295 296 /* Some chips got it backwards... */ 297 if (cfi->id == AT49BV6416) { 298 if (atmel_pri.BottomBoot) 299 extp->TopBottom = 3; 300 else 301 extp->TopBottom = 2; 302 } else { 303 if (atmel_pri.BottomBoot) 304 extp->TopBottom = 2; 305 else 306 extp->TopBottom = 3; 307 } 308 309 /* burst write mode not supported */ 310 cfi->cfiq->BufWriteTimeoutTyp = 0; 311 cfi->cfiq->BufWriteTimeoutMax = 0; 312 } 313 314 static void fixup_use_secsi(struct mtd_info *mtd) 315 { 316 /* Setup for chips with a secsi area */ 317 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 318 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 319 } 320 321 static void fixup_use_erase_chip(struct mtd_info *mtd) 322 { 323 struct map_info *map = mtd->priv; 324 struct cfi_private *cfi = map->fldrv_priv; 325 if ((cfi->cfiq->NumEraseRegions == 1) && 326 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 327 mtd->_erase = cfi_amdstd_erase_chip; 328 } 329 330 } 331 332 /* 333 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 334 * locked by default. 335 */ 336 static void fixup_use_atmel_lock(struct mtd_info *mtd) 337 { 338 mtd->_lock = cfi_atmel_lock; 339 mtd->_unlock = cfi_atmel_unlock; 340 mtd->flags |= MTD_POWERUP_LOCK; 341 } 342 343 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 344 { 345 struct map_info *map = mtd->priv; 346 struct cfi_private *cfi = map->fldrv_priv; 347 348 /* 349 * These flashes report two separate eraseblock regions based on the 350 * sector_erase-size and block_erase-size, although they both operate on the 351 * same memory. This is not allowed according to CFI, so we just pick the 352 * sector_erase-size. 353 */ 354 cfi->cfiq->NumEraseRegions = 1; 355 } 356 357 static void fixup_sst39vf(struct mtd_info *mtd) 358 { 359 struct map_info *map = mtd->priv; 360 struct cfi_private *cfi = map->fldrv_priv; 361 362 fixup_old_sst_eraseregion(mtd); 363 364 cfi->addr_unlock1 = 0x5555; 365 cfi->addr_unlock2 = 0x2AAA; 366 } 367 368 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 369 { 370 struct map_info *map = mtd->priv; 371 struct cfi_private *cfi = map->fldrv_priv; 372 373 fixup_old_sst_eraseregion(mtd); 374 375 cfi->addr_unlock1 = 0x555; 376 cfi->addr_unlock2 = 0x2AA; 377 378 cfi->sector_erase_cmd = CMD(0x50); 379 } 380 381 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 382 { 383 struct map_info *map = mtd->priv; 384 struct cfi_private *cfi = map->fldrv_priv; 385 386 fixup_sst39vf_rev_b(mtd); 387 388 /* 389 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 390 * it should report a size of 8KBytes (0x0020*256). 391 */ 392 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 393 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", 394 mtd->name); 395 } 396 397 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 398 { 399 struct map_info *map = mtd->priv; 400 struct cfi_private *cfi = map->fldrv_priv; 401 402 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 403 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 404 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", 405 mtd->name); 406 } 407 } 408 409 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 410 { 411 struct map_info *map = mtd->priv; 412 struct cfi_private *cfi = map->fldrv_priv; 413 414 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 415 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 416 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", 417 mtd->name); 418 } 419 } 420 421 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 422 { 423 struct map_info *map = mtd->priv; 424 struct cfi_private *cfi = map->fldrv_priv; 425 426 /* 427 * S29NS512P flash uses more than 8bits to report number of sectors, 428 * which is not permitted by CFI. 429 */ 430 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 431 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", 432 mtd->name); 433 } 434 435 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 436 static struct cfi_fixup cfi_nopri_fixup_table[] = { 437 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 438 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 439 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 440 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 441 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 442 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 443 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 444 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 445 { 0, 0, NULL } 446 }; 447 448 static struct cfi_fixup cfi_fixup_table[] = { 449 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 450 #ifdef AMD_BOOTLOC_BUG 451 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 452 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 453 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 454 #endif 455 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 456 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 457 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 458 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 459 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 460 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 461 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 462 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 463 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 464 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 465 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 466 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 467 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 468 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 469 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 470 #if !FORCE_WORD_WRITE 471 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 472 #endif 473 { 0, 0, NULL } 474 }; 475 static struct cfi_fixup jedec_fixup_table[] = { 476 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 477 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 478 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 479 { 0, 0, NULL } 480 }; 481 482 static struct cfi_fixup fixup_table[] = { 483 /* The CFI vendor ids and the JEDEC vendor IDs appear 484 * to be common. It is like the devices id's are as 485 * well. This table is to pick all cases where 486 * we know that is the case. 487 */ 488 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 489 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 490 { 0, 0, NULL } 491 }; 492 493 494 static void cfi_fixup_major_minor(struct cfi_private *cfi, 495 struct cfi_pri_amdstd *extp) 496 { 497 if (cfi->mfr == CFI_MFR_SAMSUNG) { 498 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 499 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 500 /* 501 * Samsung K8P2815UQB and K8D6x16UxM chips 502 * report major=0 / minor=0. 503 * K8D3x16UxC chips report major=3 / minor=3. 504 */ 505 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 506 " Extended Query version to 1.%c\n", 507 extp->MinorVersion); 508 extp->MajorVersion = '1'; 509 } 510 } 511 512 /* 513 * SST 38VF640x chips report major=0xFF / minor=0xFF. 514 */ 515 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 516 extp->MajorVersion = '1'; 517 extp->MinorVersion = '0'; 518 } 519 } 520 521 static int is_m29ew(struct cfi_private *cfi) 522 { 523 if (cfi->mfr == CFI_MFR_INTEL && 524 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 525 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 526 return 1; 527 return 0; 528 } 529 530 /* 531 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 532 * Some revisions of the M29EW suffer from erase suspend hang ups. In 533 * particular, it can occur when the sequence 534 * Erase Confirm -> Suspend -> Program -> Resume 535 * causes a lockup due to internal timing issues. The consequence is that the 536 * erase cannot be resumed without inserting a dummy command after programming 537 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 538 * that writes an F0 command code before the RESUME command. 539 */ 540 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 541 unsigned long adr) 542 { 543 struct cfi_private *cfi = map->fldrv_priv; 544 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 545 if (is_m29ew(cfi)) 546 map_write(map, CMD(0xF0), adr); 547 } 548 549 /* 550 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 551 * 552 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 553 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 554 * command is issued after an ERASE RESUME operation without waiting for a 555 * minimum delay. The result is that once the ERASE seems to be completed 556 * (no bits are toggling), the contents of the Flash memory block on which 557 * the erase was ongoing could be inconsistent with the expected values 558 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 559 * values), causing a consequent failure of the ERASE operation. 560 * The occurrence of this issue could be high, especially when file system 561 * operations on the Flash are intensive. As a result, it is recommended 562 * that a patch be applied. Intensive file system operations can cause many 563 * calls to the garbage routine to free Flash space (also by erasing physical 564 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 565 * commands can occur. The problem disappears when a delay is inserted after 566 * the RESUME command by using the udelay() function available in Linux. 567 * The DELAY value must be tuned based on the customer's platform. 568 * The maximum value that fixes the problem in all cases is 500us. 569 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 570 * in most cases. 571 * We have chosen 500µs because this latency is acceptable. 572 */ 573 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 574 { 575 /* 576 * Resolving the Delay After Resume Issue see Micron TN-13-07 577 * Worst case delay must be 500µs but 30-50µs should be ok as well 578 */ 579 if (is_m29ew(cfi)) 580 cfi_udelay(500); 581 } 582 583 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 584 { 585 struct cfi_private *cfi = map->fldrv_priv; 586 struct device_node __maybe_unused *np = map->device_node; 587 struct mtd_info *mtd; 588 int i; 589 590 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 591 if (!mtd) 592 return NULL; 593 mtd->priv = map; 594 mtd->type = MTD_NORFLASH; 595 596 /* Fill in the default mtd operations */ 597 mtd->_erase = cfi_amdstd_erase_varsize; 598 mtd->_write = cfi_amdstd_write_words; 599 mtd->_read = cfi_amdstd_read; 600 mtd->_sync = cfi_amdstd_sync; 601 mtd->_suspend = cfi_amdstd_suspend; 602 mtd->_resume = cfi_amdstd_resume; 603 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; 604 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; 605 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; 606 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; 607 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; 608 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; 609 mtd->flags = MTD_CAP_NORFLASH; 610 mtd->name = map->name; 611 mtd->writesize = 1; 612 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 613 614 pr_debug("MTD %s(): write buffer size %d\n", __func__, 615 mtd->writebufsize); 616 617 mtd->_panic_write = cfi_amdstd_panic_write; 618 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 619 620 if (cfi->cfi_mode==CFI_MODE_CFI){ 621 unsigned char bootloc; 622 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 623 struct cfi_pri_amdstd *extp; 624 625 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 626 if (extp) { 627 /* 628 * It's a real CFI chip, not one for which the probe 629 * routine faked a CFI structure. 630 */ 631 cfi_fixup_major_minor(cfi, extp); 632 633 /* 634 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 635 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 636 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 637 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 638 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 639 */ 640 if (extp->MajorVersion != '1' || 641 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 642 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 643 "version %c.%c (%#02x/%#02x).\n", 644 extp->MajorVersion, extp->MinorVersion, 645 extp->MajorVersion, extp->MinorVersion); 646 kfree(extp); 647 kfree(mtd); 648 return NULL; 649 } 650 651 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 652 extp->MajorVersion, extp->MinorVersion); 653 654 /* Install our own private info structure */ 655 cfi->cmdset_priv = extp; 656 657 /* Apply cfi device specific fixups */ 658 cfi_fixup(mtd, cfi_fixup_table); 659 660 #ifdef DEBUG_CFI_FEATURES 661 /* Tell the user about it in lots of lovely detail */ 662 cfi_tell_features(extp); 663 #endif 664 665 #ifdef CONFIG_OF 666 if (np && of_property_read_bool( 667 np, "use-advanced-sector-protection") 668 && extp->BlkProtUnprot == 8) { 669 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 670 mtd->_lock = cfi_ppb_lock; 671 mtd->_unlock = cfi_ppb_unlock; 672 mtd->_is_locked = cfi_ppb_is_locked; 673 } 674 #endif 675 676 bootloc = extp->TopBottom; 677 if ((bootloc < 2) || (bootloc > 5)) { 678 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 679 "bank location (%d). Assuming bottom.\n", 680 map->name, bootloc); 681 bootloc = 2; 682 } 683 684 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 685 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 686 687 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 688 int j = (cfi->cfiq->NumEraseRegions-1)-i; 689 690 swap(cfi->cfiq->EraseRegionInfo[i], 691 cfi->cfiq->EraseRegionInfo[j]); 692 } 693 } 694 /* Set the default CFI lock/unlock addresses */ 695 cfi->addr_unlock1 = 0x555; 696 cfi->addr_unlock2 = 0x2aa; 697 } 698 cfi_fixup(mtd, cfi_nopri_fixup_table); 699 700 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 701 kfree(mtd); 702 return NULL; 703 } 704 705 } /* CFI mode */ 706 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 707 /* Apply jedec specific fixups */ 708 cfi_fixup(mtd, jedec_fixup_table); 709 } 710 /* Apply generic fixups */ 711 cfi_fixup(mtd, fixup_table); 712 713 for (i=0; i< cfi->numchips; i++) { 714 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 715 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 716 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 717 /* 718 * First calculate the timeout max according to timeout field 719 * of struct cfi_ident that probed from chip's CFI aera, if 720 * available. Specify a minimum of 2000us, in case the CFI data 721 * is wrong. 722 */ 723 if (cfi->cfiq->BufWriteTimeoutTyp && 724 cfi->cfiq->BufWriteTimeoutMax) 725 cfi->chips[i].buffer_write_time_max = 726 1 << (cfi->cfiq->BufWriteTimeoutTyp + 727 cfi->cfiq->BufWriteTimeoutMax); 728 else 729 cfi->chips[i].buffer_write_time_max = 0; 730 731 cfi->chips[i].buffer_write_time_max = 732 max(cfi->chips[i].buffer_write_time_max, 2000); 733 734 cfi->chips[i].ref_point_counter = 0; 735 init_waitqueue_head(&(cfi->chips[i].wq)); 736 } 737 738 map->fldrv = &cfi_amdstd_chipdrv; 739 740 return cfi_amdstd_setup(mtd); 741 } 742 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 743 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 744 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 745 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 746 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 747 748 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 749 { 750 struct map_info *map = mtd->priv; 751 struct cfi_private *cfi = map->fldrv_priv; 752 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 753 unsigned long offset = 0; 754 int i,j; 755 756 printk(KERN_NOTICE "number of %s chips: %d\n", 757 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 758 /* Select the correct geometry setup */ 759 mtd->size = devsize * cfi->numchips; 760 761 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 762 mtd->eraseregions = kmalloc_array(mtd->numeraseregions, 763 sizeof(struct mtd_erase_region_info), 764 GFP_KERNEL); 765 if (!mtd->eraseregions) 766 goto setup_err; 767 768 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 769 unsigned long ernum, ersize; 770 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 771 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 772 773 if (mtd->erasesize < ersize) { 774 mtd->erasesize = ersize; 775 } 776 for (j=0; j<cfi->numchips; j++) { 777 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 778 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 779 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 780 } 781 offset += (ersize * ernum); 782 } 783 if (offset != devsize) { 784 /* Argh */ 785 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 786 goto setup_err; 787 } 788 789 __module_get(THIS_MODULE); 790 register_reboot_notifier(&mtd->reboot_notifier); 791 return mtd; 792 793 setup_err: 794 kfree(mtd->eraseregions); 795 kfree(mtd); 796 kfree(cfi->cmdset_priv); 797 return NULL; 798 } 799 800 /* 801 * Return true if the chip is ready. 802 * 803 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 804 * non-suspended sector) and is indicated by no toggle bits toggling. 805 * 806 * Note that anything more complicated than checking if no bits are toggling 807 * (including checking DQ5 for an error status) is tricky to get working 808 * correctly and is therefore not done (particularly with interleaved chips 809 * as each chip must be checked independently of the others). 810 */ 811 static int __xipram chip_ready(struct map_info *map, struct flchip *chip, 812 unsigned long addr) 813 { 814 struct cfi_private *cfi = map->fldrv_priv; 815 map_word d, t; 816 817 if (cfi_use_status_reg(cfi)) { 818 map_word ready = CMD(CFI_SR_DRB); 819 /* 820 * For chips that support status register, check device 821 * ready bit 822 */ 823 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 824 cfi->device_type, NULL); 825 d = map_read(map, addr); 826 827 return map_word_andequal(map, d, ready, ready); 828 } 829 830 d = map_read(map, addr); 831 t = map_read(map, addr); 832 833 return map_word_equal(map, d, t); 834 } 835 836 /* 837 * Return true if the chip is ready and has the correct value. 838 * 839 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 840 * non-suspended sector) and it is indicated by no bits toggling. 841 * 842 * Error are indicated by toggling bits or bits held with the wrong value, 843 * or with bits toggling. 844 * 845 * Note that anything more complicated than checking if no bits are toggling 846 * (including checking DQ5 for an error status) is tricky to get working 847 * correctly and is therefore not done (particularly with interleaved chips 848 * as each chip must be checked independently of the others). 849 * 850 */ 851 static int __xipram chip_good(struct map_info *map, struct flchip *chip, 852 unsigned long addr, map_word expected) 853 { 854 struct cfi_private *cfi = map->fldrv_priv; 855 map_word oldd, curd; 856 857 if (cfi_use_status_reg(cfi)) { 858 map_word ready = CMD(CFI_SR_DRB); 859 860 /* 861 * For chips that support status register, check device 862 * ready bit 863 */ 864 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 865 cfi->device_type, NULL); 866 curd = map_read(map, addr); 867 868 return map_word_andequal(map, curd, ready, ready); 869 } 870 871 oldd = map_read(map, addr); 872 curd = map_read(map, addr); 873 874 return map_word_equal(map, oldd, curd) && 875 map_word_equal(map, curd, expected); 876 } 877 878 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 879 { 880 DECLARE_WAITQUEUE(wait, current); 881 struct cfi_private *cfi = map->fldrv_priv; 882 unsigned long timeo; 883 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 884 885 resettime: 886 timeo = jiffies + HZ; 887 retry: 888 switch (chip->state) { 889 890 case FL_STATUS: 891 for (;;) { 892 if (chip_ready(map, chip, adr)) 893 break; 894 895 if (time_after(jiffies, timeo)) { 896 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 897 return -EIO; 898 } 899 mutex_unlock(&chip->mutex); 900 cfi_udelay(1); 901 mutex_lock(&chip->mutex); 902 /* Someone else might have been playing with it. */ 903 goto retry; 904 } 905 906 case FL_READY: 907 case FL_CFI_QUERY: 908 case FL_JEDEC_QUERY: 909 return 0; 910 911 case FL_ERASING: 912 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 913 !(mode == FL_READY || mode == FL_POINT || 914 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 915 goto sleep; 916 917 /* Do not allow suspend iff read/write to EB address */ 918 if ((adr & chip->in_progress_block_mask) == 919 chip->in_progress_block_addr) 920 goto sleep; 921 922 /* Erase suspend */ 923 /* It's harmless to issue the Erase-Suspend and Erase-Resume 924 * commands when the erase algorithm isn't in progress. */ 925 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 926 chip->oldstate = FL_ERASING; 927 chip->state = FL_ERASE_SUSPENDING; 928 chip->erase_suspended = 1; 929 for (;;) { 930 if (chip_ready(map, chip, adr)) 931 break; 932 933 if (time_after(jiffies, timeo)) { 934 /* Should have suspended the erase by now. 935 * Send an Erase-Resume command as either 936 * there was an error (so leave the erase 937 * routine to recover from it) or we trying to 938 * use the erase-in-progress sector. */ 939 put_chip(map, chip, adr); 940 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 941 return -EIO; 942 } 943 944 mutex_unlock(&chip->mutex); 945 cfi_udelay(1); 946 mutex_lock(&chip->mutex); 947 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 948 So we can just loop here. */ 949 } 950 chip->state = FL_READY; 951 return 0; 952 953 case FL_XIP_WHILE_ERASING: 954 if (mode != FL_READY && mode != FL_POINT && 955 (!cfip || !(cfip->EraseSuspend&2))) 956 goto sleep; 957 chip->oldstate = chip->state; 958 chip->state = FL_READY; 959 return 0; 960 961 case FL_SHUTDOWN: 962 /* The machine is rebooting */ 963 return -EIO; 964 965 case FL_POINT: 966 /* Only if there's no operation suspended... */ 967 if (mode == FL_READY && chip->oldstate == FL_READY) 968 return 0; 969 fallthrough; 970 default: 971 sleep: 972 set_current_state(TASK_UNINTERRUPTIBLE); 973 add_wait_queue(&chip->wq, &wait); 974 mutex_unlock(&chip->mutex); 975 schedule(); 976 remove_wait_queue(&chip->wq, &wait); 977 mutex_lock(&chip->mutex); 978 goto resettime; 979 } 980 } 981 982 983 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 984 { 985 struct cfi_private *cfi = map->fldrv_priv; 986 987 switch(chip->oldstate) { 988 case FL_ERASING: 989 cfi_fixup_m29ew_erase_suspend(map, 990 chip->in_progress_block_addr); 991 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 992 cfi_fixup_m29ew_delay_after_resume(cfi); 993 chip->oldstate = FL_READY; 994 chip->state = FL_ERASING; 995 break; 996 997 case FL_XIP_WHILE_ERASING: 998 chip->state = chip->oldstate; 999 chip->oldstate = FL_READY; 1000 break; 1001 1002 case FL_READY: 1003 case FL_STATUS: 1004 break; 1005 default: 1006 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 1007 } 1008 wake_up(&chip->wq); 1009 } 1010 1011 #ifdef CONFIG_MTD_XIP 1012 1013 /* 1014 * No interrupt what so ever can be serviced while the flash isn't in array 1015 * mode. This is ensured by the xip_disable() and xip_enable() functions 1016 * enclosing any code path where the flash is known not to be in array mode. 1017 * And within a XIP disabled code path, only functions marked with __xipram 1018 * may be called and nothing else (it's a good thing to inspect generated 1019 * assembly to make sure inline functions were actually inlined and that gcc 1020 * didn't emit calls to its own support functions). Also configuring MTD CFI 1021 * support to a single buswidth and a single interleave is also recommended. 1022 */ 1023 1024 static void xip_disable(struct map_info *map, struct flchip *chip, 1025 unsigned long adr) 1026 { 1027 /* TODO: chips with no XIP use should ignore and return */ 1028 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 1029 local_irq_disable(); 1030 } 1031 1032 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 1033 unsigned long adr) 1034 { 1035 struct cfi_private *cfi = map->fldrv_priv; 1036 1037 if (chip->state != FL_POINT && chip->state != FL_READY) { 1038 map_write(map, CMD(0xf0), adr); 1039 chip->state = FL_READY; 1040 } 1041 (void) map_read(map, adr); 1042 xip_iprefetch(); 1043 local_irq_enable(); 1044 } 1045 1046 /* 1047 * When a delay is required for the flash operation to complete, the 1048 * xip_udelay() function is polling for both the given timeout and pending 1049 * (but still masked) hardware interrupts. Whenever there is an interrupt 1050 * pending then the flash erase operation is suspended, array mode restored 1051 * and interrupts unmasked. Task scheduling might also happen at that 1052 * point. The CPU eventually returns from the interrupt or the call to 1053 * schedule() and the suspended flash operation is resumed for the remaining 1054 * of the delay period. 1055 * 1056 * Warning: this function _will_ fool interrupt latency tracing tools. 1057 */ 1058 1059 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 1060 unsigned long adr, int usec) 1061 { 1062 struct cfi_private *cfi = map->fldrv_priv; 1063 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 1064 map_word status, OK = CMD(0x80); 1065 unsigned long suspended, start = xip_currtime(); 1066 flstate_t oldstate; 1067 1068 do { 1069 cpu_relax(); 1070 if (xip_irqpending() && extp && 1071 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 1072 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 1073 /* 1074 * Let's suspend the erase operation when supported. 1075 * Note that we currently don't try to suspend 1076 * interleaved chips if there is already another 1077 * operation suspended (imagine what happens 1078 * when one chip was already done with the current 1079 * operation while another chip suspended it, then 1080 * we resume the whole thing at once). Yes, it 1081 * can happen! 1082 */ 1083 map_write(map, CMD(0xb0), adr); 1084 usec -= xip_elapsed_since(start); 1085 suspended = xip_currtime(); 1086 do { 1087 if (xip_elapsed_since(suspended) > 100000) { 1088 /* 1089 * The chip doesn't want to suspend 1090 * after waiting for 100 msecs. 1091 * This is a critical error but there 1092 * is not much we can do here. 1093 */ 1094 return; 1095 } 1096 status = map_read(map, adr); 1097 } while (!map_word_andequal(map, status, OK, OK)); 1098 1099 /* Suspend succeeded */ 1100 oldstate = chip->state; 1101 if (!map_word_bitsset(map, status, CMD(0x40))) 1102 break; 1103 chip->state = FL_XIP_WHILE_ERASING; 1104 chip->erase_suspended = 1; 1105 map_write(map, CMD(0xf0), adr); 1106 (void) map_read(map, adr); 1107 xip_iprefetch(); 1108 local_irq_enable(); 1109 mutex_unlock(&chip->mutex); 1110 xip_iprefetch(); 1111 cond_resched(); 1112 1113 /* 1114 * We're back. However someone else might have 1115 * decided to go write to the chip if we are in 1116 * a suspended erase state. If so let's wait 1117 * until it's done. 1118 */ 1119 mutex_lock(&chip->mutex); 1120 while (chip->state != FL_XIP_WHILE_ERASING) { 1121 DECLARE_WAITQUEUE(wait, current); 1122 set_current_state(TASK_UNINTERRUPTIBLE); 1123 add_wait_queue(&chip->wq, &wait); 1124 mutex_unlock(&chip->mutex); 1125 schedule(); 1126 remove_wait_queue(&chip->wq, &wait); 1127 mutex_lock(&chip->mutex); 1128 } 1129 /* Disallow XIP again */ 1130 local_irq_disable(); 1131 1132 /* Correct Erase Suspend Hangups for M29EW */ 1133 cfi_fixup_m29ew_erase_suspend(map, adr); 1134 /* Resume the write or erase operation */ 1135 map_write(map, cfi->sector_erase_cmd, adr); 1136 chip->state = oldstate; 1137 start = xip_currtime(); 1138 } else if (usec >= 1000000/HZ) { 1139 /* 1140 * Try to save on CPU power when waiting delay 1141 * is at least a system timer tick period. 1142 * No need to be extremely accurate here. 1143 */ 1144 xip_cpu_idle(); 1145 } 1146 status = map_read(map, adr); 1147 } while (!map_word_andequal(map, status, OK, OK) 1148 && xip_elapsed_since(start) < usec); 1149 } 1150 1151 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1152 1153 /* 1154 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1155 * the flash is actively programming or erasing since we have to poll for 1156 * the operation to complete anyway. We can't do that in a generic way with 1157 * a XIP setup so do it before the actual flash operation in this case 1158 * and stub it out from INVALIDATE_CACHE_UDELAY. 1159 */ 1160 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1161 INVALIDATE_CACHED_RANGE(map, from, size) 1162 1163 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1164 UDELAY(map, chip, adr, usec) 1165 1166 /* 1167 * Extra notes: 1168 * 1169 * Activating this XIP support changes the way the code works a bit. For 1170 * example the code to suspend the current process when concurrent access 1171 * happens is never executed because xip_udelay() will always return with the 1172 * same chip state as it was entered with. This is why there is no care for 1173 * the presence of add_wait_queue() or schedule() calls from within a couple 1174 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1175 * The queueing and scheduling are always happening within xip_udelay(). 1176 * 1177 * Similarly, get_chip() and put_chip() just happen to always be executed 1178 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1179 * is in array mode, therefore never executing many cases therein and not 1180 * causing any problem with XIP. 1181 */ 1182 1183 #else 1184 1185 #define xip_disable(map, chip, adr) 1186 #define xip_enable(map, chip, adr) 1187 #define XIP_INVAL_CACHED_RANGE(x...) 1188 1189 #define UDELAY(map, chip, adr, usec) \ 1190 do { \ 1191 mutex_unlock(&chip->mutex); \ 1192 cfi_udelay(usec); \ 1193 mutex_lock(&chip->mutex); \ 1194 } while (0) 1195 1196 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1197 do { \ 1198 mutex_unlock(&chip->mutex); \ 1199 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1200 cfi_udelay(usec); \ 1201 mutex_lock(&chip->mutex); \ 1202 } while (0) 1203 1204 #endif 1205 1206 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1207 { 1208 unsigned long cmd_addr; 1209 struct cfi_private *cfi = map->fldrv_priv; 1210 int ret; 1211 1212 adr += chip->start; 1213 1214 /* Ensure cmd read/writes are aligned. */ 1215 cmd_addr = adr & ~(map_bankwidth(map)-1); 1216 1217 mutex_lock(&chip->mutex); 1218 ret = get_chip(map, chip, cmd_addr, FL_READY); 1219 if (ret) { 1220 mutex_unlock(&chip->mutex); 1221 return ret; 1222 } 1223 1224 if (chip->state != FL_POINT && chip->state != FL_READY) { 1225 map_write(map, CMD(0xf0), cmd_addr); 1226 chip->state = FL_READY; 1227 } 1228 1229 map_copy_from(map, buf, adr, len); 1230 1231 put_chip(map, chip, cmd_addr); 1232 1233 mutex_unlock(&chip->mutex); 1234 return 0; 1235 } 1236 1237 1238 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1239 { 1240 struct map_info *map = mtd->priv; 1241 struct cfi_private *cfi = map->fldrv_priv; 1242 unsigned long ofs; 1243 int chipnum; 1244 int ret = 0; 1245 1246 /* ofs: offset within the first chip that the first read should start */ 1247 chipnum = (from >> cfi->chipshift); 1248 ofs = from - (chipnum << cfi->chipshift); 1249 1250 while (len) { 1251 unsigned long thislen; 1252 1253 if (chipnum >= cfi->numchips) 1254 break; 1255 1256 if ((len + ofs -1) >> cfi->chipshift) 1257 thislen = (1<<cfi->chipshift) - ofs; 1258 else 1259 thislen = len; 1260 1261 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1262 if (ret) 1263 break; 1264 1265 *retlen += thislen; 1266 len -= thislen; 1267 buf += thislen; 1268 1269 ofs = 0; 1270 chipnum++; 1271 } 1272 return ret; 1273 } 1274 1275 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 1276 loff_t adr, size_t len, u_char *buf, size_t grouplen); 1277 1278 static inline void otp_enter(struct map_info *map, struct flchip *chip, 1279 loff_t adr, size_t len) 1280 { 1281 struct cfi_private *cfi = map->fldrv_priv; 1282 1283 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1284 cfi->device_type, NULL); 1285 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1286 cfi->device_type, NULL); 1287 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, 1288 cfi->device_type, NULL); 1289 1290 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1291 } 1292 1293 static inline void otp_exit(struct map_info *map, struct flchip *chip, 1294 loff_t adr, size_t len) 1295 { 1296 struct cfi_private *cfi = map->fldrv_priv; 1297 1298 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1299 cfi->device_type, NULL); 1300 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1301 cfi->device_type, NULL); 1302 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, 1303 cfi->device_type, NULL); 1304 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, 1305 cfi->device_type, NULL); 1306 1307 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1308 } 1309 1310 static inline int do_read_secsi_onechip(struct map_info *map, 1311 struct flchip *chip, loff_t adr, 1312 size_t len, u_char *buf, 1313 size_t grouplen) 1314 { 1315 DECLARE_WAITQUEUE(wait, current); 1316 1317 retry: 1318 mutex_lock(&chip->mutex); 1319 1320 if (chip->state != FL_READY){ 1321 set_current_state(TASK_UNINTERRUPTIBLE); 1322 add_wait_queue(&chip->wq, &wait); 1323 1324 mutex_unlock(&chip->mutex); 1325 1326 schedule(); 1327 remove_wait_queue(&chip->wq, &wait); 1328 1329 goto retry; 1330 } 1331 1332 adr += chip->start; 1333 1334 chip->state = FL_READY; 1335 1336 otp_enter(map, chip, adr, len); 1337 map_copy_from(map, buf, adr, len); 1338 otp_exit(map, chip, adr, len); 1339 1340 wake_up(&chip->wq); 1341 mutex_unlock(&chip->mutex); 1342 1343 return 0; 1344 } 1345 1346 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1347 { 1348 struct map_info *map = mtd->priv; 1349 struct cfi_private *cfi = map->fldrv_priv; 1350 unsigned long ofs; 1351 int chipnum; 1352 int ret = 0; 1353 1354 /* ofs: offset within the first chip that the first read should start */ 1355 /* 8 secsi bytes per chip */ 1356 chipnum=from>>3; 1357 ofs=from & 7; 1358 1359 while (len) { 1360 unsigned long thislen; 1361 1362 if (chipnum >= cfi->numchips) 1363 break; 1364 1365 if ((len + ofs -1) >> 3) 1366 thislen = (1<<3) - ofs; 1367 else 1368 thislen = len; 1369 1370 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, 1371 thislen, buf, 0); 1372 if (ret) 1373 break; 1374 1375 *retlen += thislen; 1376 len -= thislen; 1377 buf += thislen; 1378 1379 ofs = 0; 1380 chipnum++; 1381 } 1382 return ret; 1383 } 1384 1385 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1386 unsigned long adr, map_word datum, 1387 int mode); 1388 1389 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, 1390 size_t len, u_char *buf, size_t grouplen) 1391 { 1392 int ret; 1393 while (len) { 1394 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); 1395 int gap = adr - bus_ofs; 1396 int n = min_t(int, len, map_bankwidth(map) - gap); 1397 map_word datum = map_word_ff(map); 1398 1399 if (n != map_bankwidth(map)) { 1400 /* partial write of a word, load old contents */ 1401 otp_enter(map, chip, bus_ofs, map_bankwidth(map)); 1402 datum = map_read(map, bus_ofs); 1403 otp_exit(map, chip, bus_ofs, map_bankwidth(map)); 1404 } 1405 1406 datum = map_word_load_partial(map, datum, buf, gap, n); 1407 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 1408 if (ret) 1409 return ret; 1410 1411 adr += n; 1412 buf += n; 1413 len -= n; 1414 } 1415 1416 return 0; 1417 } 1418 1419 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, 1420 size_t len, u_char *buf, size_t grouplen) 1421 { 1422 struct cfi_private *cfi = map->fldrv_priv; 1423 uint8_t lockreg; 1424 unsigned long timeo; 1425 int ret; 1426 1427 /* make sure area matches group boundaries */ 1428 if ((adr != 0) || (len != grouplen)) 1429 return -EINVAL; 1430 1431 mutex_lock(&chip->mutex); 1432 ret = get_chip(map, chip, chip->start, FL_LOCKING); 1433 if (ret) { 1434 mutex_unlock(&chip->mutex); 1435 return ret; 1436 } 1437 chip->state = FL_LOCKING; 1438 1439 /* Enter lock register command */ 1440 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1441 cfi->device_type, NULL); 1442 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1443 cfi->device_type, NULL); 1444 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, 1445 cfi->device_type, NULL); 1446 1447 /* read lock register */ 1448 lockreg = cfi_read_query(map, 0); 1449 1450 /* set bit 0 to protect extended memory block */ 1451 lockreg &= ~0x01; 1452 1453 /* set bit 0 to protect extended memory block */ 1454 /* write lock register */ 1455 map_write(map, CMD(0xA0), chip->start); 1456 map_write(map, CMD(lockreg), chip->start); 1457 1458 /* wait for chip to become ready */ 1459 timeo = jiffies + msecs_to_jiffies(2); 1460 for (;;) { 1461 if (chip_ready(map, chip, adr)) 1462 break; 1463 1464 if (time_after(jiffies, timeo)) { 1465 pr_err("Waiting for chip to be ready timed out.\n"); 1466 ret = -EIO; 1467 break; 1468 } 1469 UDELAY(map, chip, 0, 1); 1470 } 1471 1472 /* exit protection commands */ 1473 map_write(map, CMD(0x90), chip->start); 1474 map_write(map, CMD(0x00), chip->start); 1475 1476 chip->state = FL_READY; 1477 put_chip(map, chip, chip->start); 1478 mutex_unlock(&chip->mutex); 1479 1480 return ret; 1481 } 1482 1483 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, 1484 size_t *retlen, u_char *buf, 1485 otp_op_t action, int user_regs) 1486 { 1487 struct map_info *map = mtd->priv; 1488 struct cfi_private *cfi = map->fldrv_priv; 1489 int ofs_factor = cfi->interleave * cfi->device_type; 1490 unsigned long base; 1491 int chipnum; 1492 struct flchip *chip; 1493 uint8_t otp, lockreg; 1494 int ret; 1495 1496 size_t user_size, factory_size, otpsize; 1497 loff_t user_offset, factory_offset, otpoffset; 1498 int user_locked = 0, otplocked; 1499 1500 *retlen = 0; 1501 1502 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { 1503 chip = &cfi->chips[chipnum]; 1504 factory_size = 0; 1505 user_size = 0; 1506 1507 /* Micron M29EW family */ 1508 if (is_m29ew(cfi)) { 1509 base = chip->start; 1510 1511 /* check whether secsi area is factory locked 1512 or user lockable */ 1513 mutex_lock(&chip->mutex); 1514 ret = get_chip(map, chip, base, FL_CFI_QUERY); 1515 if (ret) { 1516 mutex_unlock(&chip->mutex); 1517 return ret; 1518 } 1519 cfi_qry_mode_on(base, map, cfi); 1520 otp = cfi_read_query(map, base + 0x3 * ofs_factor); 1521 cfi_qry_mode_off(base, map, cfi); 1522 put_chip(map, chip, base); 1523 mutex_unlock(&chip->mutex); 1524 1525 if (otp & 0x80) { 1526 /* factory locked */ 1527 factory_offset = 0; 1528 factory_size = 0x100; 1529 } else { 1530 /* customer lockable */ 1531 user_offset = 0; 1532 user_size = 0x100; 1533 1534 mutex_lock(&chip->mutex); 1535 ret = get_chip(map, chip, base, FL_LOCKING); 1536 if (ret) { 1537 mutex_unlock(&chip->mutex); 1538 return ret; 1539 } 1540 1541 /* Enter lock register command */ 1542 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, 1543 chip->start, map, cfi, 1544 cfi->device_type, NULL); 1545 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, 1546 chip->start, map, cfi, 1547 cfi->device_type, NULL); 1548 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, 1549 chip->start, map, cfi, 1550 cfi->device_type, NULL); 1551 /* read lock register */ 1552 lockreg = cfi_read_query(map, 0); 1553 /* exit protection commands */ 1554 map_write(map, CMD(0x90), chip->start); 1555 map_write(map, CMD(0x00), chip->start); 1556 put_chip(map, chip, chip->start); 1557 mutex_unlock(&chip->mutex); 1558 1559 user_locked = ((lockreg & 0x01) == 0x00); 1560 } 1561 } 1562 1563 otpsize = user_regs ? user_size : factory_size; 1564 if (!otpsize) 1565 continue; 1566 otpoffset = user_regs ? user_offset : factory_offset; 1567 otplocked = user_regs ? user_locked : 1; 1568 1569 if (!action) { 1570 /* return otpinfo */ 1571 struct otp_info *otpinfo; 1572 len -= sizeof(*otpinfo); 1573 if (len <= 0) 1574 return -ENOSPC; 1575 otpinfo = (struct otp_info *)buf; 1576 otpinfo->start = from; 1577 otpinfo->length = otpsize; 1578 otpinfo->locked = otplocked; 1579 buf += sizeof(*otpinfo); 1580 *retlen += sizeof(*otpinfo); 1581 from += otpsize; 1582 } else if ((from < otpsize) && (len > 0)) { 1583 size_t size; 1584 size = (len < otpsize - from) ? len : otpsize - from; 1585 ret = action(map, chip, otpoffset + from, size, buf, 1586 otpsize); 1587 if (ret < 0) 1588 return ret; 1589 1590 buf += size; 1591 len -= size; 1592 *retlen += size; 1593 from = 0; 1594 } else { 1595 from -= otpsize; 1596 } 1597 } 1598 return 0; 1599 } 1600 1601 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, 1602 size_t *retlen, struct otp_info *buf) 1603 { 1604 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1605 NULL, 0); 1606 } 1607 1608 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, 1609 size_t *retlen, struct otp_info *buf) 1610 { 1611 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1612 NULL, 1); 1613 } 1614 1615 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 1616 size_t len, size_t *retlen, 1617 u_char *buf) 1618 { 1619 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1620 buf, do_read_secsi_onechip, 0); 1621 } 1622 1623 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 1624 size_t len, size_t *retlen, 1625 u_char *buf) 1626 { 1627 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1628 buf, do_read_secsi_onechip, 1); 1629 } 1630 1631 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 1632 size_t len, size_t *retlen, 1633 u_char *buf) 1634 { 1635 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, 1636 do_otp_write, 1); 1637 } 1638 1639 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 1640 size_t len) 1641 { 1642 size_t retlen; 1643 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, 1644 do_otp_lock, 1); 1645 } 1646 1647 static int __xipram do_write_oneword_once(struct map_info *map, 1648 struct flchip *chip, 1649 unsigned long adr, map_word datum, 1650 int mode, struct cfi_private *cfi) 1651 { 1652 unsigned long timeo = jiffies + HZ; 1653 /* 1654 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1655 * have a max write time of a few hundreds usec). However, we should 1656 * use the maximum timeout value given by the chip at probe time 1657 * instead. Unfortunately, struct flchip does have a field for 1658 * maximum timeout, only for typical which can be far too short 1659 * depending of the conditions. The ' + 1' is to avoid having a 1660 * timeout of 0 jiffies if HZ is smaller than 1000. 1661 */ 1662 unsigned long uWriteTimeout = (HZ / 1000) + 1; 1663 int ret = 0; 1664 1665 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1666 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1667 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1668 map_write(map, datum, adr); 1669 chip->state = mode; 1670 1671 INVALIDATE_CACHE_UDELAY(map, chip, 1672 adr, map_bankwidth(map), 1673 chip->word_write_time); 1674 1675 /* See comment above for timeout value. */ 1676 timeo = jiffies + uWriteTimeout; 1677 for (;;) { 1678 if (chip->state != mode) { 1679 /* Someone's suspended the write. Sleep */ 1680 DECLARE_WAITQUEUE(wait, current); 1681 1682 set_current_state(TASK_UNINTERRUPTIBLE); 1683 add_wait_queue(&chip->wq, &wait); 1684 mutex_unlock(&chip->mutex); 1685 schedule(); 1686 remove_wait_queue(&chip->wq, &wait); 1687 timeo = jiffies + (HZ / 2); /* FIXME */ 1688 mutex_lock(&chip->mutex); 1689 continue; 1690 } 1691 1692 /* 1693 * We check "time_after" and "!chip_good" before checking 1694 * "chip_good" to avoid the failure due to scheduling. 1695 */ 1696 if (time_after(jiffies, timeo) && 1697 !chip_good(map, chip, adr, datum)) { 1698 xip_enable(map, chip, adr); 1699 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1700 xip_disable(map, chip, adr); 1701 ret = -EIO; 1702 break; 1703 } 1704 1705 if (chip_good(map, chip, adr, datum)) { 1706 if (cfi_check_err_status(map, chip, adr)) 1707 ret = -EIO; 1708 break; 1709 } 1710 1711 /* Latency issues. Drop the lock, wait a while and retry */ 1712 UDELAY(map, chip, adr, 1); 1713 } 1714 1715 return ret; 1716 } 1717 1718 static int __xipram do_write_oneword_start(struct map_info *map, 1719 struct flchip *chip, 1720 unsigned long adr, int mode) 1721 { 1722 int ret; 1723 1724 mutex_lock(&chip->mutex); 1725 1726 ret = get_chip(map, chip, adr, mode); 1727 if (ret) { 1728 mutex_unlock(&chip->mutex); 1729 return ret; 1730 } 1731 1732 if (mode == FL_OTP_WRITE) 1733 otp_enter(map, chip, adr, map_bankwidth(map)); 1734 1735 return ret; 1736 } 1737 1738 static void __xipram do_write_oneword_done(struct map_info *map, 1739 struct flchip *chip, 1740 unsigned long adr, int mode) 1741 { 1742 if (mode == FL_OTP_WRITE) 1743 otp_exit(map, chip, adr, map_bankwidth(map)); 1744 1745 chip->state = FL_READY; 1746 DISABLE_VPP(map); 1747 put_chip(map, chip, adr); 1748 1749 mutex_unlock(&chip->mutex); 1750 } 1751 1752 static int __xipram do_write_oneword_retry(struct map_info *map, 1753 struct flchip *chip, 1754 unsigned long adr, map_word datum, 1755 int mode) 1756 { 1757 struct cfi_private *cfi = map->fldrv_priv; 1758 int ret = 0; 1759 map_word oldd; 1760 int retry_cnt = 0; 1761 1762 /* 1763 * Check for a NOP for the case when the datum to write is already 1764 * present - it saves time and works around buggy chips that corrupt 1765 * data at other locations when 0xff is written to a location that 1766 * already contains 0xff. 1767 */ 1768 oldd = map_read(map, adr); 1769 if (map_word_equal(map, oldd, datum)) { 1770 pr_debug("MTD %s(): NOP\n", __func__); 1771 return ret; 1772 } 1773 1774 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1775 ENABLE_VPP(map); 1776 xip_disable(map, chip, adr); 1777 1778 retry: 1779 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); 1780 if (ret) { 1781 /* reset on all failures. */ 1782 map_write(map, CMD(0xF0), chip->start); 1783 /* FIXME - should have reset delay before continuing */ 1784 1785 if (++retry_cnt <= MAX_RETRIES) { 1786 ret = 0; 1787 goto retry; 1788 } 1789 } 1790 xip_enable(map, chip, adr); 1791 1792 return ret; 1793 } 1794 1795 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1796 unsigned long adr, map_word datum, 1797 int mode) 1798 { 1799 int ret; 1800 1801 adr += chip->start; 1802 1803 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, 1804 datum.x[0]); 1805 1806 ret = do_write_oneword_start(map, chip, adr, mode); 1807 if (ret) 1808 return ret; 1809 1810 ret = do_write_oneword_retry(map, chip, adr, datum, mode); 1811 1812 do_write_oneword_done(map, chip, adr, mode); 1813 1814 return ret; 1815 } 1816 1817 1818 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1819 size_t *retlen, const u_char *buf) 1820 { 1821 struct map_info *map = mtd->priv; 1822 struct cfi_private *cfi = map->fldrv_priv; 1823 int ret; 1824 int chipnum; 1825 unsigned long ofs, chipstart; 1826 DECLARE_WAITQUEUE(wait, current); 1827 1828 chipnum = to >> cfi->chipshift; 1829 ofs = to - (chipnum << cfi->chipshift); 1830 chipstart = cfi->chips[chipnum].start; 1831 1832 /* If it's not bus-aligned, do the first byte write */ 1833 if (ofs & (map_bankwidth(map)-1)) { 1834 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1835 int i = ofs - bus_ofs; 1836 int n = 0; 1837 map_word tmp_buf; 1838 1839 retry: 1840 mutex_lock(&cfi->chips[chipnum].mutex); 1841 1842 if (cfi->chips[chipnum].state != FL_READY) { 1843 set_current_state(TASK_UNINTERRUPTIBLE); 1844 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1845 1846 mutex_unlock(&cfi->chips[chipnum].mutex); 1847 1848 schedule(); 1849 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1850 goto retry; 1851 } 1852 1853 /* Load 'tmp_buf' with old contents of flash */ 1854 tmp_buf = map_read(map, bus_ofs+chipstart); 1855 1856 mutex_unlock(&cfi->chips[chipnum].mutex); 1857 1858 /* Number of bytes to copy from buffer */ 1859 n = min_t(int, len, map_bankwidth(map)-i); 1860 1861 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1862 1863 ret = do_write_oneword(map, &cfi->chips[chipnum], 1864 bus_ofs, tmp_buf, FL_WRITING); 1865 if (ret) 1866 return ret; 1867 1868 ofs += n; 1869 buf += n; 1870 (*retlen) += n; 1871 len -= n; 1872 1873 if (ofs >> cfi->chipshift) { 1874 chipnum ++; 1875 ofs = 0; 1876 if (chipnum == cfi->numchips) 1877 return 0; 1878 } 1879 } 1880 1881 /* We are now aligned, write as much as possible */ 1882 while(len >= map_bankwidth(map)) { 1883 map_word datum; 1884 1885 datum = map_word_load(map, buf); 1886 1887 ret = do_write_oneword(map, &cfi->chips[chipnum], 1888 ofs, datum, FL_WRITING); 1889 if (ret) 1890 return ret; 1891 1892 ofs += map_bankwidth(map); 1893 buf += map_bankwidth(map); 1894 (*retlen) += map_bankwidth(map); 1895 len -= map_bankwidth(map); 1896 1897 if (ofs >> cfi->chipshift) { 1898 chipnum ++; 1899 ofs = 0; 1900 if (chipnum == cfi->numchips) 1901 return 0; 1902 chipstart = cfi->chips[chipnum].start; 1903 } 1904 } 1905 1906 /* Write the trailing bytes if any */ 1907 if (len & (map_bankwidth(map)-1)) { 1908 map_word tmp_buf; 1909 1910 retry1: 1911 mutex_lock(&cfi->chips[chipnum].mutex); 1912 1913 if (cfi->chips[chipnum].state != FL_READY) { 1914 set_current_state(TASK_UNINTERRUPTIBLE); 1915 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1916 1917 mutex_unlock(&cfi->chips[chipnum].mutex); 1918 1919 schedule(); 1920 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1921 goto retry1; 1922 } 1923 1924 tmp_buf = map_read(map, ofs + chipstart); 1925 1926 mutex_unlock(&cfi->chips[chipnum].mutex); 1927 1928 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1929 1930 ret = do_write_oneword(map, &cfi->chips[chipnum], 1931 ofs, tmp_buf, FL_WRITING); 1932 if (ret) 1933 return ret; 1934 1935 (*retlen) += len; 1936 } 1937 1938 return 0; 1939 } 1940 1941 #if !FORCE_WORD_WRITE 1942 static int __xipram do_write_buffer_wait(struct map_info *map, 1943 struct flchip *chip, unsigned long adr, 1944 map_word datum) 1945 { 1946 unsigned long timeo; 1947 unsigned long u_write_timeout; 1948 int ret = 0; 1949 1950 /* 1951 * Timeout is calculated according to CFI data, if available. 1952 * See more comments in cfi_cmdset_0002(). 1953 */ 1954 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max); 1955 timeo = jiffies + u_write_timeout; 1956 1957 for (;;) { 1958 if (chip->state != FL_WRITING) { 1959 /* Someone's suspended the write. Sleep */ 1960 DECLARE_WAITQUEUE(wait, current); 1961 1962 set_current_state(TASK_UNINTERRUPTIBLE); 1963 add_wait_queue(&chip->wq, &wait); 1964 mutex_unlock(&chip->mutex); 1965 schedule(); 1966 remove_wait_queue(&chip->wq, &wait); 1967 timeo = jiffies + (HZ / 2); /* FIXME */ 1968 mutex_lock(&chip->mutex); 1969 continue; 1970 } 1971 1972 /* 1973 * We check "time_after" and "!chip_good" before checking 1974 * "chip_good" to avoid the failure due to scheduling. 1975 */ 1976 if (time_after(jiffies, timeo) && 1977 !chip_good(map, chip, adr, datum)) { 1978 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", 1979 __func__, adr); 1980 ret = -EIO; 1981 break; 1982 } 1983 1984 if (chip_good(map, chip, adr, datum)) { 1985 if (cfi_check_err_status(map, chip, adr)) 1986 ret = -EIO; 1987 break; 1988 } 1989 1990 /* Latency issues. Drop the lock, wait a while and retry */ 1991 UDELAY(map, chip, adr, 1); 1992 } 1993 1994 return ret; 1995 } 1996 1997 static void __xipram do_write_buffer_reset(struct map_info *map, 1998 struct flchip *chip, 1999 struct cfi_private *cfi) 2000 { 2001 /* 2002 * Recovery from write-buffer programming failures requires 2003 * the write-to-buffer-reset sequence. Since the last part 2004 * of the sequence also works as a normal reset, we can run 2005 * the same commands regardless of why we are here. 2006 * See e.g. 2007 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 2008 */ 2009 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2010 cfi->device_type, NULL); 2011 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2012 cfi->device_type, NULL); 2013 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 2014 cfi->device_type, NULL); 2015 2016 /* FIXME - should have reset delay before continuing */ 2017 } 2018 2019 /* 2020 * FIXME: interleaved mode not tested, and probably not supported! 2021 */ 2022 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 2023 unsigned long adr, const u_char *buf, 2024 int len) 2025 { 2026 struct cfi_private *cfi = map->fldrv_priv; 2027 int ret; 2028 unsigned long cmd_adr; 2029 int z, words; 2030 map_word datum; 2031 2032 adr += chip->start; 2033 cmd_adr = adr; 2034 2035 mutex_lock(&chip->mutex); 2036 ret = get_chip(map, chip, adr, FL_WRITING); 2037 if (ret) { 2038 mutex_unlock(&chip->mutex); 2039 return ret; 2040 } 2041 2042 datum = map_word_load(map, buf); 2043 2044 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 2045 __func__, adr, datum.x[0]); 2046 2047 XIP_INVAL_CACHED_RANGE(map, adr, len); 2048 ENABLE_VPP(map); 2049 xip_disable(map, chip, cmd_adr); 2050 2051 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2052 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2053 2054 /* Write Buffer Load */ 2055 map_write(map, CMD(0x25), cmd_adr); 2056 2057 chip->state = FL_WRITING_TO_BUFFER; 2058 2059 /* Write length of data to come */ 2060 words = len / map_bankwidth(map); 2061 map_write(map, CMD(words - 1), cmd_adr); 2062 /* Write data */ 2063 z = 0; 2064 while(z < words * map_bankwidth(map)) { 2065 datum = map_word_load(map, buf); 2066 map_write(map, datum, adr + z); 2067 2068 z += map_bankwidth(map); 2069 buf += map_bankwidth(map); 2070 } 2071 z -= map_bankwidth(map); 2072 2073 adr += z; 2074 2075 /* Write Buffer Program Confirm: GO GO GO */ 2076 map_write(map, CMD(0x29), cmd_adr); 2077 chip->state = FL_WRITING; 2078 2079 INVALIDATE_CACHE_UDELAY(map, chip, 2080 adr, map_bankwidth(map), 2081 chip->word_write_time); 2082 2083 ret = do_write_buffer_wait(map, chip, adr, datum); 2084 if (ret) 2085 do_write_buffer_reset(map, chip, cfi); 2086 2087 xip_enable(map, chip, adr); 2088 2089 chip->state = FL_READY; 2090 DISABLE_VPP(map); 2091 put_chip(map, chip, adr); 2092 mutex_unlock(&chip->mutex); 2093 2094 return ret; 2095 } 2096 2097 2098 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 2099 size_t *retlen, const u_char *buf) 2100 { 2101 struct map_info *map = mtd->priv; 2102 struct cfi_private *cfi = map->fldrv_priv; 2103 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 2104 int ret; 2105 int chipnum; 2106 unsigned long ofs; 2107 2108 chipnum = to >> cfi->chipshift; 2109 ofs = to - (chipnum << cfi->chipshift); 2110 2111 /* If it's not bus-aligned, do the first word write */ 2112 if (ofs & (map_bankwidth(map)-1)) { 2113 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 2114 if (local_len > len) 2115 local_len = len; 2116 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2117 local_len, retlen, buf); 2118 if (ret) 2119 return ret; 2120 ofs += local_len; 2121 buf += local_len; 2122 len -= local_len; 2123 2124 if (ofs >> cfi->chipshift) { 2125 chipnum ++; 2126 ofs = 0; 2127 if (chipnum == cfi->numchips) 2128 return 0; 2129 } 2130 } 2131 2132 /* Write buffer is worth it only if more than one word to write... */ 2133 while (len >= map_bankwidth(map) * 2) { 2134 /* We must not cross write block boundaries */ 2135 int size = wbufsize - (ofs & (wbufsize-1)); 2136 2137 if (size > len) 2138 size = len; 2139 if (size % map_bankwidth(map)) 2140 size -= size % map_bankwidth(map); 2141 2142 ret = do_write_buffer(map, &cfi->chips[chipnum], 2143 ofs, buf, size); 2144 if (ret) 2145 return ret; 2146 2147 ofs += size; 2148 buf += size; 2149 (*retlen) += size; 2150 len -= size; 2151 2152 if (ofs >> cfi->chipshift) { 2153 chipnum ++; 2154 ofs = 0; 2155 if (chipnum == cfi->numchips) 2156 return 0; 2157 } 2158 } 2159 2160 if (len) { 2161 size_t retlen_dregs = 0; 2162 2163 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2164 len, &retlen_dregs, buf); 2165 2166 *retlen += retlen_dregs; 2167 return ret; 2168 } 2169 2170 return 0; 2171 } 2172 #endif /* !FORCE_WORD_WRITE */ 2173 2174 /* 2175 * Wait for the flash chip to become ready to write data 2176 * 2177 * This is only called during the panic_write() path. When panic_write() 2178 * is called, the kernel is in the process of a panic, and will soon be 2179 * dead. Therefore we don't take any locks, and attempt to get access 2180 * to the chip as soon as possible. 2181 */ 2182 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 2183 unsigned long adr) 2184 { 2185 struct cfi_private *cfi = map->fldrv_priv; 2186 int retries = 10; 2187 int i; 2188 2189 /* 2190 * If the driver thinks the chip is idle, and no toggle bits 2191 * are changing, then the chip is actually idle for sure. 2192 */ 2193 if (chip->state == FL_READY && chip_ready(map, chip, adr)) 2194 return 0; 2195 2196 /* 2197 * Try several times to reset the chip and then wait for it 2198 * to become idle. The upper limit of a few milliseconds of 2199 * delay isn't a big problem: the kernel is dying anyway. It 2200 * is more important to save the messages. 2201 */ 2202 while (retries > 0) { 2203 const unsigned long timeo = (HZ / 1000) + 1; 2204 2205 /* send the reset command */ 2206 map_write(map, CMD(0xF0), chip->start); 2207 2208 /* wait for the chip to become ready */ 2209 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2210 if (chip_ready(map, chip, adr)) 2211 return 0; 2212 2213 udelay(1); 2214 } 2215 2216 retries--; 2217 } 2218 2219 /* the chip never became ready */ 2220 return -EBUSY; 2221 } 2222 2223 /* 2224 * Write out one word of data to a single flash chip during a kernel panic 2225 * 2226 * This is only called during the panic_write() path. When panic_write() 2227 * is called, the kernel is in the process of a panic, and will soon be 2228 * dead. Therefore we don't take any locks, and attempt to get access 2229 * to the chip as soon as possible. 2230 * 2231 * The implementation of this routine is intentionally similar to 2232 * do_write_oneword(), in order to ease code maintenance. 2233 */ 2234 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 2235 unsigned long adr, map_word datum) 2236 { 2237 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 2238 struct cfi_private *cfi = map->fldrv_priv; 2239 int retry_cnt = 0; 2240 map_word oldd; 2241 int ret; 2242 int i; 2243 2244 adr += chip->start; 2245 2246 ret = cfi_amdstd_panic_wait(map, chip, adr); 2247 if (ret) 2248 return ret; 2249 2250 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 2251 __func__, adr, datum.x[0]); 2252 2253 /* 2254 * Check for a NOP for the case when the datum to write is already 2255 * present - it saves time and works around buggy chips that corrupt 2256 * data at other locations when 0xff is written to a location that 2257 * already contains 0xff. 2258 */ 2259 oldd = map_read(map, adr); 2260 if (map_word_equal(map, oldd, datum)) { 2261 pr_debug("MTD %s(): NOP\n", __func__); 2262 goto op_done; 2263 } 2264 2265 ENABLE_VPP(map); 2266 2267 retry: 2268 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2269 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2270 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2271 map_write(map, datum, adr); 2272 2273 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2274 if (chip_ready(map, chip, adr)) 2275 break; 2276 2277 udelay(1); 2278 } 2279 2280 if (!chip_good(map, chip, adr, datum) || 2281 cfi_check_err_status(map, chip, adr)) { 2282 /* reset on all failures. */ 2283 map_write(map, CMD(0xF0), chip->start); 2284 /* FIXME - should have reset delay before continuing */ 2285 2286 if (++retry_cnt <= MAX_RETRIES) 2287 goto retry; 2288 2289 ret = -EIO; 2290 } 2291 2292 op_done: 2293 DISABLE_VPP(map); 2294 return ret; 2295 } 2296 2297 /* 2298 * Write out some data during a kernel panic 2299 * 2300 * This is used by the mtdoops driver to save the dying messages from a 2301 * kernel which has panic'd. 2302 * 2303 * This routine ignores all of the locking used throughout the rest of the 2304 * driver, in order to ensure that the data gets written out no matter what 2305 * state this driver (and the flash chip itself) was in when the kernel crashed. 2306 * 2307 * The implementation of this routine is intentionally similar to 2308 * cfi_amdstd_write_words(), in order to ease code maintenance. 2309 */ 2310 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 2311 size_t *retlen, const u_char *buf) 2312 { 2313 struct map_info *map = mtd->priv; 2314 struct cfi_private *cfi = map->fldrv_priv; 2315 unsigned long ofs, chipstart; 2316 int ret; 2317 int chipnum; 2318 2319 chipnum = to >> cfi->chipshift; 2320 ofs = to - (chipnum << cfi->chipshift); 2321 chipstart = cfi->chips[chipnum].start; 2322 2323 /* If it's not bus aligned, do the first byte write */ 2324 if (ofs & (map_bankwidth(map) - 1)) { 2325 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 2326 int i = ofs - bus_ofs; 2327 int n = 0; 2328 map_word tmp_buf; 2329 2330 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 2331 if (ret) 2332 return ret; 2333 2334 /* Load 'tmp_buf' with old contents of flash */ 2335 tmp_buf = map_read(map, bus_ofs + chipstart); 2336 2337 /* Number of bytes to copy from buffer */ 2338 n = min_t(int, len, map_bankwidth(map) - i); 2339 2340 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 2341 2342 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2343 bus_ofs, tmp_buf); 2344 if (ret) 2345 return ret; 2346 2347 ofs += n; 2348 buf += n; 2349 (*retlen) += n; 2350 len -= n; 2351 2352 if (ofs >> cfi->chipshift) { 2353 chipnum++; 2354 ofs = 0; 2355 if (chipnum == cfi->numchips) 2356 return 0; 2357 } 2358 } 2359 2360 /* We are now aligned, write as much as possible */ 2361 while (len >= map_bankwidth(map)) { 2362 map_word datum; 2363 2364 datum = map_word_load(map, buf); 2365 2366 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2367 ofs, datum); 2368 if (ret) 2369 return ret; 2370 2371 ofs += map_bankwidth(map); 2372 buf += map_bankwidth(map); 2373 (*retlen) += map_bankwidth(map); 2374 len -= map_bankwidth(map); 2375 2376 if (ofs >> cfi->chipshift) { 2377 chipnum++; 2378 ofs = 0; 2379 if (chipnum == cfi->numchips) 2380 return 0; 2381 2382 chipstart = cfi->chips[chipnum].start; 2383 } 2384 } 2385 2386 /* Write the trailing bytes if any */ 2387 if (len & (map_bankwidth(map) - 1)) { 2388 map_word tmp_buf; 2389 2390 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 2391 if (ret) 2392 return ret; 2393 2394 tmp_buf = map_read(map, ofs + chipstart); 2395 2396 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 2397 2398 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2399 ofs, tmp_buf); 2400 if (ret) 2401 return ret; 2402 2403 (*retlen) += len; 2404 } 2405 2406 return 0; 2407 } 2408 2409 2410 /* 2411 * Handle devices with one erase region, that only implement 2412 * the chip erase command. 2413 */ 2414 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 2415 { 2416 struct cfi_private *cfi = map->fldrv_priv; 2417 unsigned long timeo = jiffies + HZ; 2418 unsigned long int adr; 2419 DECLARE_WAITQUEUE(wait, current); 2420 int ret; 2421 int retry_cnt = 0; 2422 2423 adr = cfi->addr_unlock1; 2424 2425 mutex_lock(&chip->mutex); 2426 ret = get_chip(map, chip, adr, FL_ERASING); 2427 if (ret) { 2428 mutex_unlock(&chip->mutex); 2429 return ret; 2430 } 2431 2432 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2433 __func__, chip->start); 2434 2435 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 2436 ENABLE_VPP(map); 2437 xip_disable(map, chip, adr); 2438 2439 retry: 2440 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2441 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2442 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2443 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2444 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2445 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2446 2447 chip->state = FL_ERASING; 2448 chip->erase_suspended = 0; 2449 chip->in_progress_block_addr = adr; 2450 chip->in_progress_block_mask = ~(map->size - 1); 2451 2452 INVALIDATE_CACHE_UDELAY(map, chip, 2453 adr, map->size, 2454 chip->erase_time*500); 2455 2456 timeo = jiffies + (HZ*20); 2457 2458 for (;;) { 2459 if (chip->state != FL_ERASING) { 2460 /* Someone's suspended the erase. Sleep */ 2461 set_current_state(TASK_UNINTERRUPTIBLE); 2462 add_wait_queue(&chip->wq, &wait); 2463 mutex_unlock(&chip->mutex); 2464 schedule(); 2465 remove_wait_queue(&chip->wq, &wait); 2466 mutex_lock(&chip->mutex); 2467 continue; 2468 } 2469 if (chip->erase_suspended) { 2470 /* This erase was suspended and resumed. 2471 Adjust the timeout */ 2472 timeo = jiffies + (HZ*20); /* FIXME */ 2473 chip->erase_suspended = 0; 2474 } 2475 2476 if (chip_good(map, chip, adr, map_word_ff(map))) { 2477 if (cfi_check_err_status(map, chip, adr)) 2478 ret = -EIO; 2479 break; 2480 } 2481 2482 if (time_after(jiffies, timeo)) { 2483 printk(KERN_WARNING "MTD %s(): software timeout\n", 2484 __func__); 2485 ret = -EIO; 2486 break; 2487 } 2488 2489 /* Latency issues. Drop the lock, wait a while and retry */ 2490 UDELAY(map, chip, adr, 1000000/HZ); 2491 } 2492 /* Did we succeed? */ 2493 if (ret) { 2494 /* reset on all failures. */ 2495 map_write(map, CMD(0xF0), chip->start); 2496 /* FIXME - should have reset delay before continuing */ 2497 2498 if (++retry_cnt <= MAX_RETRIES) { 2499 ret = 0; 2500 goto retry; 2501 } 2502 } 2503 2504 chip->state = FL_READY; 2505 xip_enable(map, chip, adr); 2506 DISABLE_VPP(map); 2507 put_chip(map, chip, adr); 2508 mutex_unlock(&chip->mutex); 2509 2510 return ret; 2511 } 2512 2513 2514 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 2515 { 2516 struct cfi_private *cfi = map->fldrv_priv; 2517 unsigned long timeo = jiffies + HZ; 2518 DECLARE_WAITQUEUE(wait, current); 2519 int ret; 2520 int retry_cnt = 0; 2521 2522 adr += chip->start; 2523 2524 mutex_lock(&chip->mutex); 2525 ret = get_chip(map, chip, adr, FL_ERASING); 2526 if (ret) { 2527 mutex_unlock(&chip->mutex); 2528 return ret; 2529 } 2530 2531 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2532 __func__, adr); 2533 2534 XIP_INVAL_CACHED_RANGE(map, adr, len); 2535 ENABLE_VPP(map); 2536 xip_disable(map, chip, adr); 2537 2538 retry: 2539 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2540 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2541 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2542 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2543 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2544 map_write(map, cfi->sector_erase_cmd, adr); 2545 2546 chip->state = FL_ERASING; 2547 chip->erase_suspended = 0; 2548 chip->in_progress_block_addr = adr; 2549 chip->in_progress_block_mask = ~(len - 1); 2550 2551 INVALIDATE_CACHE_UDELAY(map, chip, 2552 adr, len, 2553 chip->erase_time*500); 2554 2555 timeo = jiffies + (HZ*20); 2556 2557 for (;;) { 2558 if (chip->state != FL_ERASING) { 2559 /* Someone's suspended the erase. Sleep */ 2560 set_current_state(TASK_UNINTERRUPTIBLE); 2561 add_wait_queue(&chip->wq, &wait); 2562 mutex_unlock(&chip->mutex); 2563 schedule(); 2564 remove_wait_queue(&chip->wq, &wait); 2565 mutex_lock(&chip->mutex); 2566 continue; 2567 } 2568 if (chip->erase_suspended) { 2569 /* This erase was suspended and resumed. 2570 Adjust the timeout */ 2571 timeo = jiffies + (HZ*20); /* FIXME */ 2572 chip->erase_suspended = 0; 2573 } 2574 2575 if (chip_good(map, chip, adr, map_word_ff(map))) { 2576 if (cfi_check_err_status(map, chip, adr)) 2577 ret = -EIO; 2578 break; 2579 } 2580 2581 if (time_after(jiffies, timeo)) { 2582 printk(KERN_WARNING "MTD %s(): software timeout\n", 2583 __func__); 2584 ret = -EIO; 2585 break; 2586 } 2587 2588 /* Latency issues. Drop the lock, wait a while and retry */ 2589 UDELAY(map, chip, adr, 1000000/HZ); 2590 } 2591 /* Did we succeed? */ 2592 if (ret) { 2593 /* reset on all failures. */ 2594 map_write(map, CMD(0xF0), chip->start); 2595 /* FIXME - should have reset delay before continuing */ 2596 2597 if (++retry_cnt <= MAX_RETRIES) { 2598 ret = 0; 2599 goto retry; 2600 } 2601 } 2602 2603 chip->state = FL_READY; 2604 xip_enable(map, chip, adr); 2605 DISABLE_VPP(map); 2606 put_chip(map, chip, adr); 2607 mutex_unlock(&chip->mutex); 2608 return ret; 2609 } 2610 2611 2612 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2613 { 2614 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr, 2615 instr->len, NULL); 2616 } 2617 2618 2619 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2620 { 2621 struct map_info *map = mtd->priv; 2622 struct cfi_private *cfi = map->fldrv_priv; 2623 2624 if (instr->addr != 0) 2625 return -EINVAL; 2626 2627 if (instr->len != mtd->size) 2628 return -EINVAL; 2629 2630 return do_erase_chip(map, &cfi->chips[0]); 2631 } 2632 2633 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2634 unsigned long adr, int len, void *thunk) 2635 { 2636 struct cfi_private *cfi = map->fldrv_priv; 2637 int ret; 2638 2639 mutex_lock(&chip->mutex); 2640 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2641 if (ret) 2642 goto out_unlock; 2643 chip->state = FL_LOCKING; 2644 2645 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2646 2647 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2648 cfi->device_type, NULL); 2649 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2650 cfi->device_type, NULL); 2651 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2652 cfi->device_type, NULL); 2653 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2654 cfi->device_type, NULL); 2655 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2656 cfi->device_type, NULL); 2657 map_write(map, CMD(0x40), chip->start + adr); 2658 2659 chip->state = FL_READY; 2660 put_chip(map, chip, adr + chip->start); 2661 ret = 0; 2662 2663 out_unlock: 2664 mutex_unlock(&chip->mutex); 2665 return ret; 2666 } 2667 2668 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2669 unsigned long adr, int len, void *thunk) 2670 { 2671 struct cfi_private *cfi = map->fldrv_priv; 2672 int ret; 2673 2674 mutex_lock(&chip->mutex); 2675 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2676 if (ret) 2677 goto out_unlock; 2678 chip->state = FL_UNLOCKING; 2679 2680 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2681 2682 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2683 cfi->device_type, NULL); 2684 map_write(map, CMD(0x70), adr); 2685 2686 chip->state = FL_READY; 2687 put_chip(map, chip, adr + chip->start); 2688 ret = 0; 2689 2690 out_unlock: 2691 mutex_unlock(&chip->mutex); 2692 return ret; 2693 } 2694 2695 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2696 { 2697 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2698 } 2699 2700 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2701 { 2702 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2703 } 2704 2705 /* 2706 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2707 */ 2708 2709 struct ppb_lock { 2710 struct flchip *chip; 2711 unsigned long adr; 2712 int locked; 2713 }; 2714 2715 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2716 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2717 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2718 2719 static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2720 struct flchip *chip, 2721 unsigned long adr, int len, void *thunk) 2722 { 2723 struct cfi_private *cfi = map->fldrv_priv; 2724 unsigned long timeo; 2725 int ret; 2726 2727 adr += chip->start; 2728 mutex_lock(&chip->mutex); 2729 ret = get_chip(map, chip, adr, FL_LOCKING); 2730 if (ret) { 2731 mutex_unlock(&chip->mutex); 2732 return ret; 2733 } 2734 2735 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2736 2737 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2738 cfi->device_type, NULL); 2739 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2740 cfi->device_type, NULL); 2741 /* PPB entry command */ 2742 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2743 cfi->device_type, NULL); 2744 2745 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2746 chip->state = FL_LOCKING; 2747 map_write(map, CMD(0xA0), adr); 2748 map_write(map, CMD(0x00), adr); 2749 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2750 /* 2751 * Unlocking of one specific sector is not supported, so we 2752 * have to unlock all sectors of this device instead 2753 */ 2754 chip->state = FL_UNLOCKING; 2755 map_write(map, CMD(0x80), chip->start); 2756 map_write(map, CMD(0x30), chip->start); 2757 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2758 chip->state = FL_JEDEC_QUERY; 2759 /* Return locked status: 0->locked, 1->unlocked */ 2760 ret = !cfi_read_query(map, adr); 2761 } else 2762 BUG(); 2763 2764 /* 2765 * Wait for some time as unlocking of all sectors takes quite long 2766 */ 2767 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2768 for (;;) { 2769 if (chip_ready(map, chip, adr)) 2770 break; 2771 2772 if (time_after(jiffies, timeo)) { 2773 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2774 ret = -EIO; 2775 break; 2776 } 2777 2778 UDELAY(map, chip, adr, 1); 2779 } 2780 2781 /* Exit BC commands */ 2782 map_write(map, CMD(0x90), chip->start); 2783 map_write(map, CMD(0x00), chip->start); 2784 2785 chip->state = FL_READY; 2786 put_chip(map, chip, adr); 2787 mutex_unlock(&chip->mutex); 2788 2789 return ret; 2790 } 2791 2792 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2793 uint64_t len) 2794 { 2795 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2796 DO_XXLOCK_ONEBLOCK_LOCK); 2797 } 2798 2799 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2800 uint64_t len) 2801 { 2802 struct mtd_erase_region_info *regions = mtd->eraseregions; 2803 struct map_info *map = mtd->priv; 2804 struct cfi_private *cfi = map->fldrv_priv; 2805 struct ppb_lock *sect; 2806 unsigned long adr; 2807 loff_t offset; 2808 uint64_t length; 2809 int chipnum; 2810 int i; 2811 int sectors; 2812 int ret; 2813 int max_sectors; 2814 2815 /* 2816 * PPB unlocking always unlocks all sectors of the flash chip. 2817 * We need to re-lock all previously locked sectors. So lets 2818 * first check the locking status of all sectors and save 2819 * it for future use. 2820 */ 2821 max_sectors = 0; 2822 for (i = 0; i < mtd->numeraseregions; i++) 2823 max_sectors += regions[i].numblocks; 2824 2825 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL); 2826 if (!sect) 2827 return -ENOMEM; 2828 2829 /* 2830 * This code to walk all sectors is a slightly modified version 2831 * of the cfi_varsize_frob() code. 2832 */ 2833 i = 0; 2834 chipnum = 0; 2835 adr = 0; 2836 sectors = 0; 2837 offset = 0; 2838 length = mtd->size; 2839 2840 while (length) { 2841 int size = regions[i].erasesize; 2842 2843 /* 2844 * Only test sectors that shall not be unlocked. The other 2845 * sectors shall be unlocked, so lets keep their locking 2846 * status at "unlocked" (locked=0) for the final re-locking. 2847 */ 2848 if ((offset < ofs) || (offset >= (ofs + len))) { 2849 sect[sectors].chip = &cfi->chips[chipnum]; 2850 sect[sectors].adr = adr; 2851 sect[sectors].locked = do_ppb_xxlock( 2852 map, &cfi->chips[chipnum], adr, 0, 2853 DO_XXLOCK_ONEBLOCK_GETLOCK); 2854 } 2855 2856 adr += size; 2857 offset += size; 2858 length -= size; 2859 2860 if (offset == regions[i].offset + size * regions[i].numblocks) 2861 i++; 2862 2863 if (adr >> cfi->chipshift) { 2864 if (offset >= (ofs + len)) 2865 break; 2866 adr = 0; 2867 chipnum++; 2868 2869 if (chipnum >= cfi->numchips) 2870 break; 2871 } 2872 2873 sectors++; 2874 if (sectors >= max_sectors) { 2875 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2876 max_sectors); 2877 kfree(sect); 2878 return -EINVAL; 2879 } 2880 } 2881 2882 /* Now unlock the whole chip */ 2883 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2884 DO_XXLOCK_ONEBLOCK_UNLOCK); 2885 if (ret) { 2886 kfree(sect); 2887 return ret; 2888 } 2889 2890 /* 2891 * PPB unlocking always unlocks all sectors of the flash chip. 2892 * We need to re-lock all previously locked sectors. 2893 */ 2894 for (i = 0; i < sectors; i++) { 2895 if (sect[i].locked) 2896 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, 2897 DO_XXLOCK_ONEBLOCK_LOCK); 2898 } 2899 2900 kfree(sect); 2901 return ret; 2902 } 2903 2904 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2905 uint64_t len) 2906 { 2907 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2908 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2909 } 2910 2911 static void cfi_amdstd_sync (struct mtd_info *mtd) 2912 { 2913 struct map_info *map = mtd->priv; 2914 struct cfi_private *cfi = map->fldrv_priv; 2915 int i; 2916 struct flchip *chip; 2917 int ret = 0; 2918 DECLARE_WAITQUEUE(wait, current); 2919 2920 for (i=0; !ret && i<cfi->numchips; i++) { 2921 chip = &cfi->chips[i]; 2922 2923 retry: 2924 mutex_lock(&chip->mutex); 2925 2926 switch(chip->state) { 2927 case FL_READY: 2928 case FL_STATUS: 2929 case FL_CFI_QUERY: 2930 case FL_JEDEC_QUERY: 2931 chip->oldstate = chip->state; 2932 chip->state = FL_SYNCING; 2933 /* No need to wake_up() on this state change - 2934 * as the whole point is that nobody can do anything 2935 * with the chip now anyway. 2936 */ 2937 fallthrough; 2938 case FL_SYNCING: 2939 mutex_unlock(&chip->mutex); 2940 break; 2941 2942 default: 2943 /* Not an idle state */ 2944 set_current_state(TASK_UNINTERRUPTIBLE); 2945 add_wait_queue(&chip->wq, &wait); 2946 2947 mutex_unlock(&chip->mutex); 2948 2949 schedule(); 2950 2951 remove_wait_queue(&chip->wq, &wait); 2952 2953 goto retry; 2954 } 2955 } 2956 2957 /* Unlock the chips again */ 2958 2959 for (i--; i >=0; i--) { 2960 chip = &cfi->chips[i]; 2961 2962 mutex_lock(&chip->mutex); 2963 2964 if (chip->state == FL_SYNCING) { 2965 chip->state = chip->oldstate; 2966 wake_up(&chip->wq); 2967 } 2968 mutex_unlock(&chip->mutex); 2969 } 2970 } 2971 2972 2973 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2974 { 2975 struct map_info *map = mtd->priv; 2976 struct cfi_private *cfi = map->fldrv_priv; 2977 int i; 2978 struct flchip *chip; 2979 int ret = 0; 2980 2981 for (i=0; !ret && i<cfi->numchips; i++) { 2982 chip = &cfi->chips[i]; 2983 2984 mutex_lock(&chip->mutex); 2985 2986 switch(chip->state) { 2987 case FL_READY: 2988 case FL_STATUS: 2989 case FL_CFI_QUERY: 2990 case FL_JEDEC_QUERY: 2991 chip->oldstate = chip->state; 2992 chip->state = FL_PM_SUSPENDED; 2993 /* No need to wake_up() on this state change - 2994 * as the whole point is that nobody can do anything 2995 * with the chip now anyway. 2996 */ 2997 case FL_PM_SUSPENDED: 2998 break; 2999 3000 default: 3001 ret = -EAGAIN; 3002 break; 3003 } 3004 mutex_unlock(&chip->mutex); 3005 } 3006 3007 /* Unlock the chips again */ 3008 3009 if (ret) { 3010 for (i--; i >=0; i--) { 3011 chip = &cfi->chips[i]; 3012 3013 mutex_lock(&chip->mutex); 3014 3015 if (chip->state == FL_PM_SUSPENDED) { 3016 chip->state = chip->oldstate; 3017 wake_up(&chip->wq); 3018 } 3019 mutex_unlock(&chip->mutex); 3020 } 3021 } 3022 3023 return ret; 3024 } 3025 3026 3027 static void cfi_amdstd_resume(struct mtd_info *mtd) 3028 { 3029 struct map_info *map = mtd->priv; 3030 struct cfi_private *cfi = map->fldrv_priv; 3031 int i; 3032 struct flchip *chip; 3033 3034 for (i=0; i<cfi->numchips; i++) { 3035 3036 chip = &cfi->chips[i]; 3037 3038 mutex_lock(&chip->mutex); 3039 3040 if (chip->state == FL_PM_SUSPENDED) { 3041 chip->state = FL_READY; 3042 map_write(map, CMD(0xF0), chip->start); 3043 wake_up(&chip->wq); 3044 } 3045 else 3046 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 3047 3048 mutex_unlock(&chip->mutex); 3049 } 3050 } 3051 3052 3053 /* 3054 * Ensure that the flash device is put back into read array mode before 3055 * unloading the driver or rebooting. On some systems, rebooting while 3056 * the flash is in query/program/erase mode will prevent the CPU from 3057 * fetching the bootloader code, requiring a hard reset or power cycle. 3058 */ 3059 static int cfi_amdstd_reset(struct mtd_info *mtd) 3060 { 3061 struct map_info *map = mtd->priv; 3062 struct cfi_private *cfi = map->fldrv_priv; 3063 int i, ret; 3064 struct flchip *chip; 3065 3066 for (i = 0; i < cfi->numchips; i++) { 3067 3068 chip = &cfi->chips[i]; 3069 3070 mutex_lock(&chip->mutex); 3071 3072 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 3073 if (!ret) { 3074 map_write(map, CMD(0xF0), chip->start); 3075 chip->state = FL_SHUTDOWN; 3076 put_chip(map, chip, chip->start); 3077 } 3078 3079 mutex_unlock(&chip->mutex); 3080 } 3081 3082 return 0; 3083 } 3084 3085 3086 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 3087 void *v) 3088 { 3089 struct mtd_info *mtd; 3090 3091 mtd = container_of(nb, struct mtd_info, reboot_notifier); 3092 cfi_amdstd_reset(mtd); 3093 return NOTIFY_DONE; 3094 } 3095 3096 3097 static void cfi_amdstd_destroy(struct mtd_info *mtd) 3098 { 3099 struct map_info *map = mtd->priv; 3100 struct cfi_private *cfi = map->fldrv_priv; 3101 3102 cfi_amdstd_reset(mtd); 3103 unregister_reboot_notifier(&mtd->reboot_notifier); 3104 kfree(cfi->cmdset_priv); 3105 kfree(cfi->cfiq); 3106 kfree(cfi); 3107 kfree(mtd->eraseregions); 3108 } 3109 3110 MODULE_LICENSE("GPL"); 3111 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 3112 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 3113 MODULE_ALIAS("cfi_cmdset_0006"); 3114 MODULE_ALIAS("cfi_cmdset_0701"); 3115