1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/init.h> 28 #include <asm/io.h> 29 #include <asm/byteorder.h> 30 31 #include <linux/errno.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/reboot.h> 36 #include <linux/mtd/map.h> 37 #include <linux/mtd/mtd.h> 38 #include <linux/mtd/cfi.h> 39 #include <linux/mtd/xip.h> 40 41 #define AMD_BOOTLOC_BUG 42 #define FORCE_WORD_WRITE 0 43 44 #define MAX_WORD_RETRIES 3 45 46 #define SST49LF004B 0x0060 47 #define SST49LF040B 0x0050 48 #define SST49LF008A 0x005a 49 #define AT49BV6416 0x00d6 50 51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 56 static void cfi_amdstd_sync (struct mtd_info *); 57 static int cfi_amdstd_suspend (struct mtd_info *); 58 static void cfi_amdstd_resume (struct mtd_info *); 59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61 62 static void cfi_amdstd_destroy(struct mtd_info *); 63 64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 66 67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 69 #include "fwh_lock.h" 70 71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 73 74 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 75 .probe = NULL, /* Not usable directly */ 76 .destroy = cfi_amdstd_destroy, 77 .name = "cfi_cmdset_0002", 78 .module = THIS_MODULE 79 }; 80 81 82 /* #define DEBUG_CFI_FEATURES */ 83 84 85 #ifdef DEBUG_CFI_FEATURES 86 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 87 { 88 const char* erase_suspend[3] = { 89 "Not supported", "Read only", "Read/write" 90 }; 91 const char* top_bottom[6] = { 92 "No WP", "8x8KiB sectors at top & bottom, no WP", 93 "Bottom boot", "Top boot", 94 "Uniform, Bottom WP", "Uniform, Top WP" 95 }; 96 97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 98 printk(" Address sensitive unlock: %s\n", 99 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 100 101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 103 else 104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 105 106 if (extp->BlkProt == 0) 107 printk(" Block protection: Not supported\n"); 108 else 109 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 110 111 112 printk(" Temporary block unprotect: %s\n", 113 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 116 printk(" Burst mode: %s\n", 117 extp->BurstMode ? "Supported" : "Not supported"); 118 if (extp->PageMode == 0) 119 printk(" Page mode: Not supported\n"); 120 else 121 printk(" Page mode: %d word page\n", extp->PageMode << 2); 122 123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 124 extp->VppMin >> 4, extp->VppMin & 0xf); 125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 126 extp->VppMax >> 4, extp->VppMax & 0xf); 127 128 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 130 else 131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 132 } 133 #endif 134 135 #ifdef AMD_BOOTLOC_BUG 136 /* Wheee. Bring me the head of someone at AMD. */ 137 static void fixup_amd_bootblock(struct mtd_info *mtd) 138 { 139 struct map_info *map = mtd->priv; 140 struct cfi_private *cfi = map->fldrv_priv; 141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 142 __u8 major = extp->MajorVersion; 143 __u8 minor = extp->MinorVersion; 144 145 if (((major << 8) | minor) < 0x3131) { 146 /* CFI version 1.0 => don't trust bootloc */ 147 148 DEBUG(MTD_DEBUG_LEVEL1, 149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 150 map->name, cfi->mfr, cfi->id); 151 152 /* AFAICS all 29LV400 with a bottom boot block have a device ID 153 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 154 * These were badly detected as they have the 0x80 bit set 155 * so treat them as a special case. 156 */ 157 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 158 159 /* Macronix added CFI to their 2nd generation 160 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 161 * Fujitsu, Spansion, EON, ESI and older Macronix) 162 * has CFI. 163 * 164 * Therefore also check the manufacturer. 165 * This reduces the risk of false detection due to 166 * the 8-bit device ID. 167 */ 168 (cfi->mfr == CFI_MFR_MACRONIX)) { 169 DEBUG(MTD_DEBUG_LEVEL1, 170 "%s: Macronix MX29LV400C with bottom boot block" 171 " detected\n", map->name); 172 extp->TopBottom = 2; /* bottom boot */ 173 } else 174 if (cfi->id & 0x80) { 175 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 176 extp->TopBottom = 3; /* top boot */ 177 } else { 178 extp->TopBottom = 2; /* bottom boot */ 179 } 180 181 DEBUG(MTD_DEBUG_LEVEL1, 182 "%s: AMD CFI PRI V%c.%c has no boot block field;" 183 " deduced %s from Device ID\n", map->name, major, minor, 184 extp->TopBottom == 2 ? "bottom" : "top"); 185 } 186 } 187 #endif 188 189 static void fixup_use_write_buffers(struct mtd_info *mtd) 190 { 191 struct map_info *map = mtd->priv; 192 struct cfi_private *cfi = map->fldrv_priv; 193 if (cfi->cfiq->BufWriteTimeoutTyp) { 194 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 195 mtd->write = cfi_amdstd_write_buffers; 196 } 197 } 198 199 /* Atmel chips don't use the same PRI format as AMD chips */ 200 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 201 { 202 struct map_info *map = mtd->priv; 203 struct cfi_private *cfi = map->fldrv_priv; 204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 205 struct cfi_pri_atmel atmel_pri; 206 207 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 208 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 209 210 if (atmel_pri.Features & 0x02) 211 extp->EraseSuspend = 2; 212 213 /* Some chips got it backwards... */ 214 if (cfi->id == AT49BV6416) { 215 if (atmel_pri.BottomBoot) 216 extp->TopBottom = 3; 217 else 218 extp->TopBottom = 2; 219 } else { 220 if (atmel_pri.BottomBoot) 221 extp->TopBottom = 2; 222 else 223 extp->TopBottom = 3; 224 } 225 226 /* burst write mode not supported */ 227 cfi->cfiq->BufWriteTimeoutTyp = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0; 229 } 230 231 static void fixup_use_secsi(struct mtd_info *mtd) 232 { 233 /* Setup for chips with a secsi area */ 234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 236 } 237 238 static void fixup_use_erase_chip(struct mtd_info *mtd) 239 { 240 struct map_info *map = mtd->priv; 241 struct cfi_private *cfi = map->fldrv_priv; 242 if ((cfi->cfiq->NumEraseRegions == 1) && 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 244 mtd->erase = cfi_amdstd_erase_chip; 245 } 246 247 } 248 249 /* 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 251 * locked by default. 252 */ 253 static void fixup_use_atmel_lock(struct mtd_info *mtd) 254 { 255 mtd->lock = cfi_atmel_lock; 256 mtd->unlock = cfi_atmel_unlock; 257 mtd->flags |= MTD_POWERUP_LOCK; 258 } 259 260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 261 { 262 struct map_info *map = mtd->priv; 263 struct cfi_private *cfi = map->fldrv_priv; 264 265 /* 266 * These flashes report two separate eraseblock regions based on the 267 * sector_erase-size and block_erase-size, although they both operate on the 268 * same memory. This is not allowed according to CFI, so we just pick the 269 * sector_erase-size. 270 */ 271 cfi->cfiq->NumEraseRegions = 1; 272 } 273 274 static void fixup_sst39vf(struct mtd_info *mtd) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 279 fixup_old_sst_eraseregion(mtd); 280 281 cfi->addr_unlock1 = 0x5555; 282 cfi->addr_unlock2 = 0x2AAA; 283 } 284 285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 286 { 287 struct map_info *map = mtd->priv; 288 struct cfi_private *cfi = map->fldrv_priv; 289 290 fixup_old_sst_eraseregion(mtd); 291 292 cfi->addr_unlock1 = 0x555; 293 cfi->addr_unlock2 = 0x2AA; 294 295 cfi->sector_erase_cmd = CMD(0x50); 296 } 297 298 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 299 { 300 struct map_info *map = mtd->priv; 301 struct cfi_private *cfi = map->fldrv_priv; 302 303 fixup_sst39vf_rev_b(mtd); 304 305 /* 306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 307 * it should report a size of 8KBytes (0x0020*256). 308 */ 309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 311 } 312 313 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 314 { 315 struct map_info *map = mtd->priv; 316 struct cfi_private *cfi = map->fldrv_priv; 317 318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 320 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 321 } 322 } 323 324 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 325 { 326 struct map_info *map = mtd->priv; 327 struct cfi_private *cfi = map->fldrv_priv; 328 329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 331 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 332 } 333 } 334 335 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 336 static struct cfi_fixup cfi_nopri_fixup_table[] = { 337 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 338 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 339 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 340 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 341 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 342 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 343 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 344 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 345 { 0, 0, NULL } 346 }; 347 348 static struct cfi_fixup cfi_fixup_table[] = { 349 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 350 #ifdef AMD_BOOTLOC_BUG 351 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 352 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 353 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 354 #endif 355 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 356 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 357 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 358 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 359 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 360 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 361 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 362 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 363 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 364 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 365 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 366 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 367 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 368 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 369 #if !FORCE_WORD_WRITE 370 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 371 #endif 372 { 0, 0, NULL } 373 }; 374 static struct cfi_fixup jedec_fixup_table[] = { 375 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 376 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 377 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 378 { 0, 0, NULL } 379 }; 380 381 static struct cfi_fixup fixup_table[] = { 382 /* The CFI vendor ids and the JEDEC vendor IDs appear 383 * to be common. It is like the devices id's are as 384 * well. This table is to pick all cases where 385 * we know that is the case. 386 */ 387 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 388 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 389 { 0, 0, NULL } 390 }; 391 392 393 static void cfi_fixup_major_minor(struct cfi_private *cfi, 394 struct cfi_pri_amdstd *extp) 395 { 396 if (cfi->mfr == CFI_MFR_SAMSUNG) { 397 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 398 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 399 /* 400 * Samsung K8P2815UQB and K8D6x16UxM chips 401 * report major=0 / minor=0. 402 * K8D3x16UxC chips report major=3 / minor=3. 403 */ 404 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 405 " Extended Query version to 1.%c\n", 406 extp->MinorVersion); 407 extp->MajorVersion = '1'; 408 } 409 } 410 411 /* 412 * SST 38VF640x chips report major=0xFF / minor=0xFF. 413 */ 414 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 415 extp->MajorVersion = '1'; 416 extp->MinorVersion = '0'; 417 } 418 } 419 420 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 421 { 422 struct cfi_private *cfi = map->fldrv_priv; 423 struct mtd_info *mtd; 424 int i; 425 426 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 427 if (!mtd) { 428 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 429 return NULL; 430 } 431 mtd->priv = map; 432 mtd->type = MTD_NORFLASH; 433 434 /* Fill in the default mtd operations */ 435 mtd->erase = cfi_amdstd_erase_varsize; 436 mtd->write = cfi_amdstd_write_words; 437 mtd->read = cfi_amdstd_read; 438 mtd->sync = cfi_amdstd_sync; 439 mtd->suspend = cfi_amdstd_suspend; 440 mtd->resume = cfi_amdstd_resume; 441 mtd->flags = MTD_CAP_NORFLASH; 442 mtd->name = map->name; 443 mtd->writesize = 1; 444 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 445 446 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", 447 __func__, mtd->writebufsize); 448 449 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 450 451 if (cfi->cfi_mode==CFI_MODE_CFI){ 452 unsigned char bootloc; 453 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 454 struct cfi_pri_amdstd *extp; 455 456 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 457 if (extp) { 458 /* 459 * It's a real CFI chip, not one for which the probe 460 * routine faked a CFI structure. 461 */ 462 cfi_fixup_major_minor(cfi, extp); 463 464 /* 465 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4 466 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 467 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 468 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 469 */ 470 if (extp->MajorVersion != '1' || 471 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) { 472 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 473 "version %c.%c (%#02x/%#02x).\n", 474 extp->MajorVersion, extp->MinorVersion, 475 extp->MajorVersion, extp->MinorVersion); 476 kfree(extp); 477 kfree(mtd); 478 return NULL; 479 } 480 481 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 482 extp->MajorVersion, extp->MinorVersion); 483 484 /* Install our own private info structure */ 485 cfi->cmdset_priv = extp; 486 487 /* Apply cfi device specific fixups */ 488 cfi_fixup(mtd, cfi_fixup_table); 489 490 #ifdef DEBUG_CFI_FEATURES 491 /* Tell the user about it in lots of lovely detail */ 492 cfi_tell_features(extp); 493 #endif 494 495 bootloc = extp->TopBottom; 496 if ((bootloc < 2) || (bootloc > 5)) { 497 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 498 "bank location (%d). Assuming bottom.\n", 499 map->name, bootloc); 500 bootloc = 2; 501 } 502 503 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 504 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 505 506 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 507 int j = (cfi->cfiq->NumEraseRegions-1)-i; 508 __u32 swap; 509 510 swap = cfi->cfiq->EraseRegionInfo[i]; 511 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 512 cfi->cfiq->EraseRegionInfo[j] = swap; 513 } 514 } 515 /* Set the default CFI lock/unlock addresses */ 516 cfi->addr_unlock1 = 0x555; 517 cfi->addr_unlock2 = 0x2aa; 518 } 519 cfi_fixup(mtd, cfi_nopri_fixup_table); 520 521 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 522 kfree(mtd); 523 return NULL; 524 } 525 526 } /* CFI mode */ 527 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 528 /* Apply jedec specific fixups */ 529 cfi_fixup(mtd, jedec_fixup_table); 530 } 531 /* Apply generic fixups */ 532 cfi_fixup(mtd, fixup_table); 533 534 for (i=0; i< cfi->numchips; i++) { 535 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 536 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 537 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 538 cfi->chips[i].ref_point_counter = 0; 539 init_waitqueue_head(&(cfi->chips[i].wq)); 540 } 541 542 map->fldrv = &cfi_amdstd_chipdrv; 543 544 return cfi_amdstd_setup(mtd); 545 } 546 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 547 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 548 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 549 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 550 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 551 552 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 553 { 554 struct map_info *map = mtd->priv; 555 struct cfi_private *cfi = map->fldrv_priv; 556 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 557 unsigned long offset = 0; 558 int i,j; 559 560 printk(KERN_NOTICE "number of %s chips: %d\n", 561 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 562 /* Select the correct geometry setup */ 563 mtd->size = devsize * cfi->numchips; 564 565 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 566 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 567 * mtd->numeraseregions, GFP_KERNEL); 568 if (!mtd->eraseregions) { 569 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 570 goto setup_err; 571 } 572 573 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 574 unsigned long ernum, ersize; 575 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 576 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 577 578 if (mtd->erasesize < ersize) { 579 mtd->erasesize = ersize; 580 } 581 for (j=0; j<cfi->numchips; j++) { 582 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 583 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 584 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 585 } 586 offset += (ersize * ernum); 587 } 588 if (offset != devsize) { 589 /* Argh */ 590 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 591 goto setup_err; 592 } 593 594 __module_get(THIS_MODULE); 595 register_reboot_notifier(&mtd->reboot_notifier); 596 return mtd; 597 598 setup_err: 599 kfree(mtd->eraseregions); 600 kfree(mtd); 601 kfree(cfi->cmdset_priv); 602 kfree(cfi->cfiq); 603 return NULL; 604 } 605 606 /* 607 * Return true if the chip is ready. 608 * 609 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 610 * non-suspended sector) and is indicated by no toggle bits toggling. 611 * 612 * Note that anything more complicated than checking if no bits are toggling 613 * (including checking DQ5 for an error status) is tricky to get working 614 * correctly and is therefore not done (particularly with interleaved chips 615 * as each chip must be checked independently of the others). 616 */ 617 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 618 { 619 map_word d, t; 620 621 d = map_read(map, addr); 622 t = map_read(map, addr); 623 624 return map_word_equal(map, d, t); 625 } 626 627 /* 628 * Return true if the chip is ready and has the correct value. 629 * 630 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 631 * non-suspended sector) and it is indicated by no bits toggling. 632 * 633 * Error are indicated by toggling bits or bits held with the wrong value, 634 * or with bits toggling. 635 * 636 * Note that anything more complicated than checking if no bits are toggling 637 * (including checking DQ5 for an error status) is tricky to get working 638 * correctly and is therefore not done (particularly with interleaved chips 639 * as each chip must be checked independently of the others). 640 * 641 */ 642 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 643 { 644 map_word oldd, curd; 645 646 oldd = map_read(map, addr); 647 curd = map_read(map, addr); 648 649 return map_word_equal(map, oldd, curd) && 650 map_word_equal(map, curd, expected); 651 } 652 653 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 654 { 655 DECLARE_WAITQUEUE(wait, current); 656 struct cfi_private *cfi = map->fldrv_priv; 657 unsigned long timeo; 658 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 659 660 resettime: 661 timeo = jiffies + HZ; 662 retry: 663 switch (chip->state) { 664 665 case FL_STATUS: 666 for (;;) { 667 if (chip_ready(map, adr)) 668 break; 669 670 if (time_after(jiffies, timeo)) { 671 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 672 return -EIO; 673 } 674 mutex_unlock(&chip->mutex); 675 cfi_udelay(1); 676 mutex_lock(&chip->mutex); 677 /* Someone else might have been playing with it. */ 678 goto retry; 679 } 680 681 case FL_READY: 682 case FL_CFI_QUERY: 683 case FL_JEDEC_QUERY: 684 return 0; 685 686 case FL_ERASING: 687 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 688 !(mode == FL_READY || mode == FL_POINT || 689 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 690 goto sleep; 691 692 /* We could check to see if we're trying to access the sector 693 * that is currently being erased. However, no user will try 694 * anything like that so we just wait for the timeout. */ 695 696 /* Erase suspend */ 697 /* It's harmless to issue the Erase-Suspend and Erase-Resume 698 * commands when the erase algorithm isn't in progress. */ 699 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 700 chip->oldstate = FL_ERASING; 701 chip->state = FL_ERASE_SUSPENDING; 702 chip->erase_suspended = 1; 703 for (;;) { 704 if (chip_ready(map, adr)) 705 break; 706 707 if (time_after(jiffies, timeo)) { 708 /* Should have suspended the erase by now. 709 * Send an Erase-Resume command as either 710 * there was an error (so leave the erase 711 * routine to recover from it) or we trying to 712 * use the erase-in-progress sector. */ 713 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 714 chip->state = FL_ERASING; 715 chip->oldstate = FL_READY; 716 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 717 return -EIO; 718 } 719 720 mutex_unlock(&chip->mutex); 721 cfi_udelay(1); 722 mutex_lock(&chip->mutex); 723 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 724 So we can just loop here. */ 725 } 726 chip->state = FL_READY; 727 return 0; 728 729 case FL_XIP_WHILE_ERASING: 730 if (mode != FL_READY && mode != FL_POINT && 731 (!cfip || !(cfip->EraseSuspend&2))) 732 goto sleep; 733 chip->oldstate = chip->state; 734 chip->state = FL_READY; 735 return 0; 736 737 case FL_SHUTDOWN: 738 /* The machine is rebooting */ 739 return -EIO; 740 741 case FL_POINT: 742 /* Only if there's no operation suspended... */ 743 if (mode == FL_READY && chip->oldstate == FL_READY) 744 return 0; 745 746 default: 747 sleep: 748 set_current_state(TASK_UNINTERRUPTIBLE); 749 add_wait_queue(&chip->wq, &wait); 750 mutex_unlock(&chip->mutex); 751 schedule(); 752 remove_wait_queue(&chip->wq, &wait); 753 mutex_lock(&chip->mutex); 754 goto resettime; 755 } 756 } 757 758 759 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 760 { 761 struct cfi_private *cfi = map->fldrv_priv; 762 763 switch(chip->oldstate) { 764 case FL_ERASING: 765 chip->state = chip->oldstate; 766 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 767 chip->oldstate = FL_READY; 768 chip->state = FL_ERASING; 769 break; 770 771 case FL_XIP_WHILE_ERASING: 772 chip->state = chip->oldstate; 773 chip->oldstate = FL_READY; 774 break; 775 776 case FL_READY: 777 case FL_STATUS: 778 /* We should really make set_vpp() count, rather than doing this */ 779 DISABLE_VPP(map); 780 break; 781 default: 782 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 783 } 784 wake_up(&chip->wq); 785 } 786 787 #ifdef CONFIG_MTD_XIP 788 789 /* 790 * No interrupt what so ever can be serviced while the flash isn't in array 791 * mode. This is ensured by the xip_disable() and xip_enable() functions 792 * enclosing any code path where the flash is known not to be in array mode. 793 * And within a XIP disabled code path, only functions marked with __xipram 794 * may be called and nothing else (it's a good thing to inspect generated 795 * assembly to make sure inline functions were actually inlined and that gcc 796 * didn't emit calls to its own support functions). Also configuring MTD CFI 797 * support to a single buswidth and a single interleave is also recommended. 798 */ 799 800 static void xip_disable(struct map_info *map, struct flchip *chip, 801 unsigned long adr) 802 { 803 /* TODO: chips with no XIP use should ignore and return */ 804 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 805 local_irq_disable(); 806 } 807 808 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 809 unsigned long adr) 810 { 811 struct cfi_private *cfi = map->fldrv_priv; 812 813 if (chip->state != FL_POINT && chip->state != FL_READY) { 814 map_write(map, CMD(0xf0), adr); 815 chip->state = FL_READY; 816 } 817 (void) map_read(map, adr); 818 xip_iprefetch(); 819 local_irq_enable(); 820 } 821 822 /* 823 * When a delay is required for the flash operation to complete, the 824 * xip_udelay() function is polling for both the given timeout and pending 825 * (but still masked) hardware interrupts. Whenever there is an interrupt 826 * pending then the flash erase operation is suspended, array mode restored 827 * and interrupts unmasked. Task scheduling might also happen at that 828 * point. The CPU eventually returns from the interrupt or the call to 829 * schedule() and the suspended flash operation is resumed for the remaining 830 * of the delay period. 831 * 832 * Warning: this function _will_ fool interrupt latency tracing tools. 833 */ 834 835 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 836 unsigned long adr, int usec) 837 { 838 struct cfi_private *cfi = map->fldrv_priv; 839 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 840 map_word status, OK = CMD(0x80); 841 unsigned long suspended, start = xip_currtime(); 842 flstate_t oldstate; 843 844 do { 845 cpu_relax(); 846 if (xip_irqpending() && extp && 847 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 848 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 849 /* 850 * Let's suspend the erase operation when supported. 851 * Note that we currently don't try to suspend 852 * interleaved chips if there is already another 853 * operation suspended (imagine what happens 854 * when one chip was already done with the current 855 * operation while another chip suspended it, then 856 * we resume the whole thing at once). Yes, it 857 * can happen! 858 */ 859 map_write(map, CMD(0xb0), adr); 860 usec -= xip_elapsed_since(start); 861 suspended = xip_currtime(); 862 do { 863 if (xip_elapsed_since(suspended) > 100000) { 864 /* 865 * The chip doesn't want to suspend 866 * after waiting for 100 msecs. 867 * This is a critical error but there 868 * is not much we can do here. 869 */ 870 return; 871 } 872 status = map_read(map, adr); 873 } while (!map_word_andequal(map, status, OK, OK)); 874 875 /* Suspend succeeded */ 876 oldstate = chip->state; 877 if (!map_word_bitsset(map, status, CMD(0x40))) 878 break; 879 chip->state = FL_XIP_WHILE_ERASING; 880 chip->erase_suspended = 1; 881 map_write(map, CMD(0xf0), adr); 882 (void) map_read(map, adr); 883 xip_iprefetch(); 884 local_irq_enable(); 885 mutex_unlock(&chip->mutex); 886 xip_iprefetch(); 887 cond_resched(); 888 889 /* 890 * We're back. However someone else might have 891 * decided to go write to the chip if we are in 892 * a suspended erase state. If so let's wait 893 * until it's done. 894 */ 895 mutex_lock(&chip->mutex); 896 while (chip->state != FL_XIP_WHILE_ERASING) { 897 DECLARE_WAITQUEUE(wait, current); 898 set_current_state(TASK_UNINTERRUPTIBLE); 899 add_wait_queue(&chip->wq, &wait); 900 mutex_unlock(&chip->mutex); 901 schedule(); 902 remove_wait_queue(&chip->wq, &wait); 903 mutex_lock(&chip->mutex); 904 } 905 /* Disallow XIP again */ 906 local_irq_disable(); 907 908 /* Resume the write or erase operation */ 909 map_write(map, cfi->sector_erase_cmd, adr); 910 chip->state = oldstate; 911 start = xip_currtime(); 912 } else if (usec >= 1000000/HZ) { 913 /* 914 * Try to save on CPU power when waiting delay 915 * is at least a system timer tick period. 916 * No need to be extremely accurate here. 917 */ 918 xip_cpu_idle(); 919 } 920 status = map_read(map, adr); 921 } while (!map_word_andequal(map, status, OK, OK) 922 && xip_elapsed_since(start) < usec); 923 } 924 925 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 926 927 /* 928 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 929 * the flash is actively programming or erasing since we have to poll for 930 * the operation to complete anyway. We can't do that in a generic way with 931 * a XIP setup so do it before the actual flash operation in this case 932 * and stub it out from INVALIDATE_CACHE_UDELAY. 933 */ 934 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 935 INVALIDATE_CACHED_RANGE(map, from, size) 936 937 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 938 UDELAY(map, chip, adr, usec) 939 940 /* 941 * Extra notes: 942 * 943 * Activating this XIP support changes the way the code works a bit. For 944 * example the code to suspend the current process when concurrent access 945 * happens is never executed because xip_udelay() will always return with the 946 * same chip state as it was entered with. This is why there is no care for 947 * the presence of add_wait_queue() or schedule() calls from within a couple 948 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 949 * The queueing and scheduling are always happening within xip_udelay(). 950 * 951 * Similarly, get_chip() and put_chip() just happen to always be executed 952 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 953 * is in array mode, therefore never executing many cases therein and not 954 * causing any problem with XIP. 955 */ 956 957 #else 958 959 #define xip_disable(map, chip, adr) 960 #define xip_enable(map, chip, adr) 961 #define XIP_INVAL_CACHED_RANGE(x...) 962 963 #define UDELAY(map, chip, adr, usec) \ 964 do { \ 965 mutex_unlock(&chip->mutex); \ 966 cfi_udelay(usec); \ 967 mutex_lock(&chip->mutex); \ 968 } while (0) 969 970 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 971 do { \ 972 mutex_unlock(&chip->mutex); \ 973 INVALIDATE_CACHED_RANGE(map, adr, len); \ 974 cfi_udelay(usec); \ 975 mutex_lock(&chip->mutex); \ 976 } while (0) 977 978 #endif 979 980 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 981 { 982 unsigned long cmd_addr; 983 struct cfi_private *cfi = map->fldrv_priv; 984 int ret; 985 986 adr += chip->start; 987 988 /* Ensure cmd read/writes are aligned. */ 989 cmd_addr = adr & ~(map_bankwidth(map)-1); 990 991 mutex_lock(&chip->mutex); 992 ret = get_chip(map, chip, cmd_addr, FL_READY); 993 if (ret) { 994 mutex_unlock(&chip->mutex); 995 return ret; 996 } 997 998 if (chip->state != FL_POINT && chip->state != FL_READY) { 999 map_write(map, CMD(0xf0), cmd_addr); 1000 chip->state = FL_READY; 1001 } 1002 1003 map_copy_from(map, buf, adr, len); 1004 1005 put_chip(map, chip, cmd_addr); 1006 1007 mutex_unlock(&chip->mutex); 1008 return 0; 1009 } 1010 1011 1012 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1013 { 1014 struct map_info *map = mtd->priv; 1015 struct cfi_private *cfi = map->fldrv_priv; 1016 unsigned long ofs; 1017 int chipnum; 1018 int ret = 0; 1019 1020 /* ofs: offset within the first chip that the first read should start */ 1021 1022 chipnum = (from >> cfi->chipshift); 1023 ofs = from - (chipnum << cfi->chipshift); 1024 1025 1026 *retlen = 0; 1027 1028 while (len) { 1029 unsigned long thislen; 1030 1031 if (chipnum >= cfi->numchips) 1032 break; 1033 1034 if ((len + ofs -1) >> cfi->chipshift) 1035 thislen = (1<<cfi->chipshift) - ofs; 1036 else 1037 thislen = len; 1038 1039 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1040 if (ret) 1041 break; 1042 1043 *retlen += thislen; 1044 len -= thislen; 1045 buf += thislen; 1046 1047 ofs = 0; 1048 chipnum++; 1049 } 1050 return ret; 1051 } 1052 1053 1054 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1055 { 1056 DECLARE_WAITQUEUE(wait, current); 1057 unsigned long timeo = jiffies + HZ; 1058 struct cfi_private *cfi = map->fldrv_priv; 1059 1060 retry: 1061 mutex_lock(&chip->mutex); 1062 1063 if (chip->state != FL_READY){ 1064 set_current_state(TASK_UNINTERRUPTIBLE); 1065 add_wait_queue(&chip->wq, &wait); 1066 1067 mutex_unlock(&chip->mutex); 1068 1069 schedule(); 1070 remove_wait_queue(&chip->wq, &wait); 1071 timeo = jiffies + HZ; 1072 1073 goto retry; 1074 } 1075 1076 adr += chip->start; 1077 1078 chip->state = FL_READY; 1079 1080 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1081 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1082 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1083 1084 map_copy_from(map, buf, adr, len); 1085 1086 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1087 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1088 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1089 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1090 1091 wake_up(&chip->wq); 1092 mutex_unlock(&chip->mutex); 1093 1094 return 0; 1095 } 1096 1097 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1098 { 1099 struct map_info *map = mtd->priv; 1100 struct cfi_private *cfi = map->fldrv_priv; 1101 unsigned long ofs; 1102 int chipnum; 1103 int ret = 0; 1104 1105 1106 /* ofs: offset within the first chip that the first read should start */ 1107 1108 /* 8 secsi bytes per chip */ 1109 chipnum=from>>3; 1110 ofs=from & 7; 1111 1112 1113 *retlen = 0; 1114 1115 while (len) { 1116 unsigned long thislen; 1117 1118 if (chipnum >= cfi->numchips) 1119 break; 1120 1121 if ((len + ofs -1) >> 3) 1122 thislen = (1<<3) - ofs; 1123 else 1124 thislen = len; 1125 1126 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1127 if (ret) 1128 break; 1129 1130 *retlen += thislen; 1131 len -= thislen; 1132 buf += thislen; 1133 1134 ofs = 0; 1135 chipnum++; 1136 } 1137 return ret; 1138 } 1139 1140 1141 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1142 { 1143 struct cfi_private *cfi = map->fldrv_priv; 1144 unsigned long timeo = jiffies + HZ; 1145 /* 1146 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1147 * have a max write time of a few hundreds usec). However, we should 1148 * use the maximum timeout value given by the chip at probe time 1149 * instead. Unfortunately, struct flchip does have a field for 1150 * maximum timeout, only for typical which can be far too short 1151 * depending of the conditions. The ' + 1' is to avoid having a 1152 * timeout of 0 jiffies if HZ is smaller than 1000. 1153 */ 1154 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1155 int ret = 0; 1156 map_word oldd; 1157 int retry_cnt = 0; 1158 1159 adr += chip->start; 1160 1161 mutex_lock(&chip->mutex); 1162 ret = get_chip(map, chip, adr, FL_WRITING); 1163 if (ret) { 1164 mutex_unlock(&chip->mutex); 1165 return ret; 1166 } 1167 1168 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1169 __func__, adr, datum.x[0] ); 1170 1171 /* 1172 * Check for a NOP for the case when the datum to write is already 1173 * present - it saves time and works around buggy chips that corrupt 1174 * data at other locations when 0xff is written to a location that 1175 * already contains 0xff. 1176 */ 1177 oldd = map_read(map, adr); 1178 if (map_word_equal(map, oldd, datum)) { 1179 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1180 __func__); 1181 goto op_done; 1182 } 1183 1184 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1185 ENABLE_VPP(map); 1186 xip_disable(map, chip, adr); 1187 retry: 1188 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1189 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1190 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1191 map_write(map, datum, adr); 1192 chip->state = FL_WRITING; 1193 1194 INVALIDATE_CACHE_UDELAY(map, chip, 1195 adr, map_bankwidth(map), 1196 chip->word_write_time); 1197 1198 /* See comment above for timeout value. */ 1199 timeo = jiffies + uWriteTimeout; 1200 for (;;) { 1201 if (chip->state != FL_WRITING) { 1202 /* Someone's suspended the write. Sleep */ 1203 DECLARE_WAITQUEUE(wait, current); 1204 1205 set_current_state(TASK_UNINTERRUPTIBLE); 1206 add_wait_queue(&chip->wq, &wait); 1207 mutex_unlock(&chip->mutex); 1208 schedule(); 1209 remove_wait_queue(&chip->wq, &wait); 1210 timeo = jiffies + (HZ / 2); /* FIXME */ 1211 mutex_lock(&chip->mutex); 1212 continue; 1213 } 1214 1215 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1216 xip_enable(map, chip, adr); 1217 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1218 xip_disable(map, chip, adr); 1219 break; 1220 } 1221 1222 if (chip_ready(map, adr)) 1223 break; 1224 1225 /* Latency issues. Drop the lock, wait a while and retry */ 1226 UDELAY(map, chip, adr, 1); 1227 } 1228 /* Did we succeed? */ 1229 if (!chip_good(map, adr, datum)) { 1230 /* reset on all failures. */ 1231 map_write( map, CMD(0xF0), chip->start ); 1232 /* FIXME - should have reset delay before continuing */ 1233 1234 if (++retry_cnt <= MAX_WORD_RETRIES) 1235 goto retry; 1236 1237 ret = -EIO; 1238 } 1239 xip_enable(map, chip, adr); 1240 op_done: 1241 chip->state = FL_READY; 1242 put_chip(map, chip, adr); 1243 mutex_unlock(&chip->mutex); 1244 1245 return ret; 1246 } 1247 1248 1249 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1250 size_t *retlen, const u_char *buf) 1251 { 1252 struct map_info *map = mtd->priv; 1253 struct cfi_private *cfi = map->fldrv_priv; 1254 int ret = 0; 1255 int chipnum; 1256 unsigned long ofs, chipstart; 1257 DECLARE_WAITQUEUE(wait, current); 1258 1259 *retlen = 0; 1260 if (!len) 1261 return 0; 1262 1263 chipnum = to >> cfi->chipshift; 1264 ofs = to - (chipnum << cfi->chipshift); 1265 chipstart = cfi->chips[chipnum].start; 1266 1267 /* If it's not bus-aligned, do the first byte write */ 1268 if (ofs & (map_bankwidth(map)-1)) { 1269 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1270 int i = ofs - bus_ofs; 1271 int n = 0; 1272 map_word tmp_buf; 1273 1274 retry: 1275 mutex_lock(&cfi->chips[chipnum].mutex); 1276 1277 if (cfi->chips[chipnum].state != FL_READY) { 1278 set_current_state(TASK_UNINTERRUPTIBLE); 1279 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1280 1281 mutex_unlock(&cfi->chips[chipnum].mutex); 1282 1283 schedule(); 1284 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1285 goto retry; 1286 } 1287 1288 /* Load 'tmp_buf' with old contents of flash */ 1289 tmp_buf = map_read(map, bus_ofs+chipstart); 1290 1291 mutex_unlock(&cfi->chips[chipnum].mutex); 1292 1293 /* Number of bytes to copy from buffer */ 1294 n = min_t(int, len, map_bankwidth(map)-i); 1295 1296 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1297 1298 ret = do_write_oneword(map, &cfi->chips[chipnum], 1299 bus_ofs, tmp_buf); 1300 if (ret) 1301 return ret; 1302 1303 ofs += n; 1304 buf += n; 1305 (*retlen) += n; 1306 len -= n; 1307 1308 if (ofs >> cfi->chipshift) { 1309 chipnum ++; 1310 ofs = 0; 1311 if (chipnum == cfi->numchips) 1312 return 0; 1313 } 1314 } 1315 1316 /* We are now aligned, write as much as possible */ 1317 while(len >= map_bankwidth(map)) { 1318 map_word datum; 1319 1320 datum = map_word_load(map, buf); 1321 1322 ret = do_write_oneword(map, &cfi->chips[chipnum], 1323 ofs, datum); 1324 if (ret) 1325 return ret; 1326 1327 ofs += map_bankwidth(map); 1328 buf += map_bankwidth(map); 1329 (*retlen) += map_bankwidth(map); 1330 len -= map_bankwidth(map); 1331 1332 if (ofs >> cfi->chipshift) { 1333 chipnum ++; 1334 ofs = 0; 1335 if (chipnum == cfi->numchips) 1336 return 0; 1337 chipstart = cfi->chips[chipnum].start; 1338 } 1339 } 1340 1341 /* Write the trailing bytes if any */ 1342 if (len & (map_bankwidth(map)-1)) { 1343 map_word tmp_buf; 1344 1345 retry1: 1346 mutex_lock(&cfi->chips[chipnum].mutex); 1347 1348 if (cfi->chips[chipnum].state != FL_READY) { 1349 set_current_state(TASK_UNINTERRUPTIBLE); 1350 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1351 1352 mutex_unlock(&cfi->chips[chipnum].mutex); 1353 1354 schedule(); 1355 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1356 goto retry1; 1357 } 1358 1359 tmp_buf = map_read(map, ofs + chipstart); 1360 1361 mutex_unlock(&cfi->chips[chipnum].mutex); 1362 1363 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1364 1365 ret = do_write_oneword(map, &cfi->chips[chipnum], 1366 ofs, tmp_buf); 1367 if (ret) 1368 return ret; 1369 1370 (*retlen) += len; 1371 } 1372 1373 return 0; 1374 } 1375 1376 1377 /* 1378 * FIXME: interleaved mode not tested, and probably not supported! 1379 */ 1380 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1381 unsigned long adr, const u_char *buf, 1382 int len) 1383 { 1384 struct cfi_private *cfi = map->fldrv_priv; 1385 unsigned long timeo = jiffies + HZ; 1386 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1387 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1388 int ret = -EIO; 1389 unsigned long cmd_adr; 1390 int z, words; 1391 map_word datum; 1392 1393 adr += chip->start; 1394 cmd_adr = adr; 1395 1396 mutex_lock(&chip->mutex); 1397 ret = get_chip(map, chip, adr, FL_WRITING); 1398 if (ret) { 1399 mutex_unlock(&chip->mutex); 1400 return ret; 1401 } 1402 1403 datum = map_word_load(map, buf); 1404 1405 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1406 __func__, adr, datum.x[0] ); 1407 1408 XIP_INVAL_CACHED_RANGE(map, adr, len); 1409 ENABLE_VPP(map); 1410 xip_disable(map, chip, cmd_adr); 1411 1412 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1413 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1414 1415 /* Write Buffer Load */ 1416 map_write(map, CMD(0x25), cmd_adr); 1417 1418 chip->state = FL_WRITING_TO_BUFFER; 1419 1420 /* Write length of data to come */ 1421 words = len / map_bankwidth(map); 1422 map_write(map, CMD(words - 1), cmd_adr); 1423 /* Write data */ 1424 z = 0; 1425 while(z < words * map_bankwidth(map)) { 1426 datum = map_word_load(map, buf); 1427 map_write(map, datum, adr + z); 1428 1429 z += map_bankwidth(map); 1430 buf += map_bankwidth(map); 1431 } 1432 z -= map_bankwidth(map); 1433 1434 adr += z; 1435 1436 /* Write Buffer Program Confirm: GO GO GO */ 1437 map_write(map, CMD(0x29), cmd_adr); 1438 chip->state = FL_WRITING; 1439 1440 INVALIDATE_CACHE_UDELAY(map, chip, 1441 adr, map_bankwidth(map), 1442 chip->word_write_time); 1443 1444 timeo = jiffies + uWriteTimeout; 1445 1446 for (;;) { 1447 if (chip->state != FL_WRITING) { 1448 /* Someone's suspended the write. Sleep */ 1449 DECLARE_WAITQUEUE(wait, current); 1450 1451 set_current_state(TASK_UNINTERRUPTIBLE); 1452 add_wait_queue(&chip->wq, &wait); 1453 mutex_unlock(&chip->mutex); 1454 schedule(); 1455 remove_wait_queue(&chip->wq, &wait); 1456 timeo = jiffies + (HZ / 2); /* FIXME */ 1457 mutex_lock(&chip->mutex); 1458 continue; 1459 } 1460 1461 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1462 break; 1463 1464 if (chip_ready(map, adr)) { 1465 xip_enable(map, chip, adr); 1466 goto op_done; 1467 } 1468 1469 /* Latency issues. Drop the lock, wait a while and retry */ 1470 UDELAY(map, chip, adr, 1); 1471 } 1472 1473 /* reset on all failures. */ 1474 map_write( map, CMD(0xF0), chip->start ); 1475 xip_enable(map, chip, adr); 1476 /* FIXME - should have reset delay before continuing */ 1477 1478 printk(KERN_WARNING "MTD %s(): software timeout\n", 1479 __func__ ); 1480 1481 ret = -EIO; 1482 op_done: 1483 chip->state = FL_READY; 1484 put_chip(map, chip, adr); 1485 mutex_unlock(&chip->mutex); 1486 1487 return ret; 1488 } 1489 1490 1491 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1492 size_t *retlen, const u_char *buf) 1493 { 1494 struct map_info *map = mtd->priv; 1495 struct cfi_private *cfi = map->fldrv_priv; 1496 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1497 int ret = 0; 1498 int chipnum; 1499 unsigned long ofs; 1500 1501 *retlen = 0; 1502 if (!len) 1503 return 0; 1504 1505 chipnum = to >> cfi->chipshift; 1506 ofs = to - (chipnum << cfi->chipshift); 1507 1508 /* If it's not bus-aligned, do the first word write */ 1509 if (ofs & (map_bankwidth(map)-1)) { 1510 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1511 if (local_len > len) 1512 local_len = len; 1513 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1514 local_len, retlen, buf); 1515 if (ret) 1516 return ret; 1517 ofs += local_len; 1518 buf += local_len; 1519 len -= local_len; 1520 1521 if (ofs >> cfi->chipshift) { 1522 chipnum ++; 1523 ofs = 0; 1524 if (chipnum == cfi->numchips) 1525 return 0; 1526 } 1527 } 1528 1529 /* Write buffer is worth it only if more than one word to write... */ 1530 while (len >= map_bankwidth(map) * 2) { 1531 /* We must not cross write block boundaries */ 1532 int size = wbufsize - (ofs & (wbufsize-1)); 1533 1534 if (size > len) 1535 size = len; 1536 if (size % map_bankwidth(map)) 1537 size -= size % map_bankwidth(map); 1538 1539 ret = do_write_buffer(map, &cfi->chips[chipnum], 1540 ofs, buf, size); 1541 if (ret) 1542 return ret; 1543 1544 ofs += size; 1545 buf += size; 1546 (*retlen) += size; 1547 len -= size; 1548 1549 if (ofs >> cfi->chipshift) { 1550 chipnum ++; 1551 ofs = 0; 1552 if (chipnum == cfi->numchips) 1553 return 0; 1554 } 1555 } 1556 1557 if (len) { 1558 size_t retlen_dregs = 0; 1559 1560 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1561 len, &retlen_dregs, buf); 1562 1563 *retlen += retlen_dregs; 1564 return ret; 1565 } 1566 1567 return 0; 1568 } 1569 1570 1571 /* 1572 * Handle devices with one erase region, that only implement 1573 * the chip erase command. 1574 */ 1575 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1576 { 1577 struct cfi_private *cfi = map->fldrv_priv; 1578 unsigned long timeo = jiffies + HZ; 1579 unsigned long int adr; 1580 DECLARE_WAITQUEUE(wait, current); 1581 int ret = 0; 1582 1583 adr = cfi->addr_unlock1; 1584 1585 mutex_lock(&chip->mutex); 1586 ret = get_chip(map, chip, adr, FL_WRITING); 1587 if (ret) { 1588 mutex_unlock(&chip->mutex); 1589 return ret; 1590 } 1591 1592 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1593 __func__, chip->start ); 1594 1595 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1596 ENABLE_VPP(map); 1597 xip_disable(map, chip, adr); 1598 1599 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1600 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1601 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1602 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1603 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1604 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1605 1606 chip->state = FL_ERASING; 1607 chip->erase_suspended = 0; 1608 chip->in_progress_block_addr = adr; 1609 1610 INVALIDATE_CACHE_UDELAY(map, chip, 1611 adr, map->size, 1612 chip->erase_time*500); 1613 1614 timeo = jiffies + (HZ*20); 1615 1616 for (;;) { 1617 if (chip->state != FL_ERASING) { 1618 /* Someone's suspended the erase. Sleep */ 1619 set_current_state(TASK_UNINTERRUPTIBLE); 1620 add_wait_queue(&chip->wq, &wait); 1621 mutex_unlock(&chip->mutex); 1622 schedule(); 1623 remove_wait_queue(&chip->wq, &wait); 1624 mutex_lock(&chip->mutex); 1625 continue; 1626 } 1627 if (chip->erase_suspended) { 1628 /* This erase was suspended and resumed. 1629 Adjust the timeout */ 1630 timeo = jiffies + (HZ*20); /* FIXME */ 1631 chip->erase_suspended = 0; 1632 } 1633 1634 if (chip_ready(map, adr)) 1635 break; 1636 1637 if (time_after(jiffies, timeo)) { 1638 printk(KERN_WARNING "MTD %s(): software timeout\n", 1639 __func__ ); 1640 break; 1641 } 1642 1643 /* Latency issues. Drop the lock, wait a while and retry */ 1644 UDELAY(map, chip, adr, 1000000/HZ); 1645 } 1646 /* Did we succeed? */ 1647 if (!chip_good(map, adr, map_word_ff(map))) { 1648 /* reset on all failures. */ 1649 map_write( map, CMD(0xF0), chip->start ); 1650 /* FIXME - should have reset delay before continuing */ 1651 1652 ret = -EIO; 1653 } 1654 1655 chip->state = FL_READY; 1656 xip_enable(map, chip, adr); 1657 put_chip(map, chip, adr); 1658 mutex_unlock(&chip->mutex); 1659 1660 return ret; 1661 } 1662 1663 1664 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1665 { 1666 struct cfi_private *cfi = map->fldrv_priv; 1667 unsigned long timeo = jiffies + HZ; 1668 DECLARE_WAITQUEUE(wait, current); 1669 int ret = 0; 1670 1671 adr += chip->start; 1672 1673 mutex_lock(&chip->mutex); 1674 ret = get_chip(map, chip, adr, FL_ERASING); 1675 if (ret) { 1676 mutex_unlock(&chip->mutex); 1677 return ret; 1678 } 1679 1680 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1681 __func__, adr ); 1682 1683 XIP_INVAL_CACHED_RANGE(map, adr, len); 1684 ENABLE_VPP(map); 1685 xip_disable(map, chip, adr); 1686 1687 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1688 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1689 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1690 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1691 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1692 map_write(map, cfi->sector_erase_cmd, adr); 1693 1694 chip->state = FL_ERASING; 1695 chip->erase_suspended = 0; 1696 chip->in_progress_block_addr = adr; 1697 1698 INVALIDATE_CACHE_UDELAY(map, chip, 1699 adr, len, 1700 chip->erase_time*500); 1701 1702 timeo = jiffies + (HZ*20); 1703 1704 for (;;) { 1705 if (chip->state != FL_ERASING) { 1706 /* Someone's suspended the erase. Sleep */ 1707 set_current_state(TASK_UNINTERRUPTIBLE); 1708 add_wait_queue(&chip->wq, &wait); 1709 mutex_unlock(&chip->mutex); 1710 schedule(); 1711 remove_wait_queue(&chip->wq, &wait); 1712 mutex_lock(&chip->mutex); 1713 continue; 1714 } 1715 if (chip->erase_suspended) { 1716 /* This erase was suspended and resumed. 1717 Adjust the timeout */ 1718 timeo = jiffies + (HZ*20); /* FIXME */ 1719 chip->erase_suspended = 0; 1720 } 1721 1722 if (chip_ready(map, adr)) { 1723 xip_enable(map, chip, adr); 1724 break; 1725 } 1726 1727 if (time_after(jiffies, timeo)) { 1728 xip_enable(map, chip, adr); 1729 printk(KERN_WARNING "MTD %s(): software timeout\n", 1730 __func__ ); 1731 break; 1732 } 1733 1734 /* Latency issues. Drop the lock, wait a while and retry */ 1735 UDELAY(map, chip, adr, 1000000/HZ); 1736 } 1737 /* Did we succeed? */ 1738 if (!chip_good(map, adr, map_word_ff(map))) { 1739 /* reset on all failures. */ 1740 map_write( map, CMD(0xF0), chip->start ); 1741 /* FIXME - should have reset delay before continuing */ 1742 1743 ret = -EIO; 1744 } 1745 1746 chip->state = FL_READY; 1747 put_chip(map, chip, adr); 1748 mutex_unlock(&chip->mutex); 1749 return ret; 1750 } 1751 1752 1753 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1754 { 1755 unsigned long ofs, len; 1756 int ret; 1757 1758 ofs = instr->addr; 1759 len = instr->len; 1760 1761 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1762 if (ret) 1763 return ret; 1764 1765 instr->state = MTD_ERASE_DONE; 1766 mtd_erase_callback(instr); 1767 1768 return 0; 1769 } 1770 1771 1772 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1773 { 1774 struct map_info *map = mtd->priv; 1775 struct cfi_private *cfi = map->fldrv_priv; 1776 int ret = 0; 1777 1778 if (instr->addr != 0) 1779 return -EINVAL; 1780 1781 if (instr->len != mtd->size) 1782 return -EINVAL; 1783 1784 ret = do_erase_chip(map, &cfi->chips[0]); 1785 if (ret) 1786 return ret; 1787 1788 instr->state = MTD_ERASE_DONE; 1789 mtd_erase_callback(instr); 1790 1791 return 0; 1792 } 1793 1794 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 1795 unsigned long adr, int len, void *thunk) 1796 { 1797 struct cfi_private *cfi = map->fldrv_priv; 1798 int ret; 1799 1800 mutex_lock(&chip->mutex); 1801 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1802 if (ret) 1803 goto out_unlock; 1804 chip->state = FL_LOCKING; 1805 1806 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1807 __func__, adr, len); 1808 1809 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1810 cfi->device_type, NULL); 1811 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1812 cfi->device_type, NULL); 1813 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 1814 cfi->device_type, NULL); 1815 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1816 cfi->device_type, NULL); 1817 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1818 cfi->device_type, NULL); 1819 map_write(map, CMD(0x40), chip->start + adr); 1820 1821 chip->state = FL_READY; 1822 put_chip(map, chip, adr + chip->start); 1823 ret = 0; 1824 1825 out_unlock: 1826 mutex_unlock(&chip->mutex); 1827 return ret; 1828 } 1829 1830 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 1831 unsigned long adr, int len, void *thunk) 1832 { 1833 struct cfi_private *cfi = map->fldrv_priv; 1834 int ret; 1835 1836 mutex_lock(&chip->mutex); 1837 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1838 if (ret) 1839 goto out_unlock; 1840 chip->state = FL_UNLOCKING; 1841 1842 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1843 __func__, adr, len); 1844 1845 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1846 cfi->device_type, NULL); 1847 map_write(map, CMD(0x70), adr); 1848 1849 chip->state = FL_READY; 1850 put_chip(map, chip, adr + chip->start); 1851 ret = 0; 1852 1853 out_unlock: 1854 mutex_unlock(&chip->mutex); 1855 return ret; 1856 } 1857 1858 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1859 { 1860 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1861 } 1862 1863 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1864 { 1865 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1866 } 1867 1868 1869 static void cfi_amdstd_sync (struct mtd_info *mtd) 1870 { 1871 struct map_info *map = mtd->priv; 1872 struct cfi_private *cfi = map->fldrv_priv; 1873 int i; 1874 struct flchip *chip; 1875 int ret = 0; 1876 DECLARE_WAITQUEUE(wait, current); 1877 1878 for (i=0; !ret && i<cfi->numchips; i++) { 1879 chip = &cfi->chips[i]; 1880 1881 retry: 1882 mutex_lock(&chip->mutex); 1883 1884 switch(chip->state) { 1885 case FL_READY: 1886 case FL_STATUS: 1887 case FL_CFI_QUERY: 1888 case FL_JEDEC_QUERY: 1889 chip->oldstate = chip->state; 1890 chip->state = FL_SYNCING; 1891 /* No need to wake_up() on this state change - 1892 * as the whole point is that nobody can do anything 1893 * with the chip now anyway. 1894 */ 1895 case FL_SYNCING: 1896 mutex_unlock(&chip->mutex); 1897 break; 1898 1899 default: 1900 /* Not an idle state */ 1901 set_current_state(TASK_UNINTERRUPTIBLE); 1902 add_wait_queue(&chip->wq, &wait); 1903 1904 mutex_unlock(&chip->mutex); 1905 1906 schedule(); 1907 1908 remove_wait_queue(&chip->wq, &wait); 1909 1910 goto retry; 1911 } 1912 } 1913 1914 /* Unlock the chips again */ 1915 1916 for (i--; i >=0; i--) { 1917 chip = &cfi->chips[i]; 1918 1919 mutex_lock(&chip->mutex); 1920 1921 if (chip->state == FL_SYNCING) { 1922 chip->state = chip->oldstate; 1923 wake_up(&chip->wq); 1924 } 1925 mutex_unlock(&chip->mutex); 1926 } 1927 } 1928 1929 1930 static int cfi_amdstd_suspend(struct mtd_info *mtd) 1931 { 1932 struct map_info *map = mtd->priv; 1933 struct cfi_private *cfi = map->fldrv_priv; 1934 int i; 1935 struct flchip *chip; 1936 int ret = 0; 1937 1938 for (i=0; !ret && i<cfi->numchips; i++) { 1939 chip = &cfi->chips[i]; 1940 1941 mutex_lock(&chip->mutex); 1942 1943 switch(chip->state) { 1944 case FL_READY: 1945 case FL_STATUS: 1946 case FL_CFI_QUERY: 1947 case FL_JEDEC_QUERY: 1948 chip->oldstate = chip->state; 1949 chip->state = FL_PM_SUSPENDED; 1950 /* No need to wake_up() on this state change - 1951 * as the whole point is that nobody can do anything 1952 * with the chip now anyway. 1953 */ 1954 case FL_PM_SUSPENDED: 1955 break; 1956 1957 default: 1958 ret = -EAGAIN; 1959 break; 1960 } 1961 mutex_unlock(&chip->mutex); 1962 } 1963 1964 /* Unlock the chips again */ 1965 1966 if (ret) { 1967 for (i--; i >=0; i--) { 1968 chip = &cfi->chips[i]; 1969 1970 mutex_lock(&chip->mutex); 1971 1972 if (chip->state == FL_PM_SUSPENDED) { 1973 chip->state = chip->oldstate; 1974 wake_up(&chip->wq); 1975 } 1976 mutex_unlock(&chip->mutex); 1977 } 1978 } 1979 1980 return ret; 1981 } 1982 1983 1984 static void cfi_amdstd_resume(struct mtd_info *mtd) 1985 { 1986 struct map_info *map = mtd->priv; 1987 struct cfi_private *cfi = map->fldrv_priv; 1988 int i; 1989 struct flchip *chip; 1990 1991 for (i=0; i<cfi->numchips; i++) { 1992 1993 chip = &cfi->chips[i]; 1994 1995 mutex_lock(&chip->mutex); 1996 1997 if (chip->state == FL_PM_SUSPENDED) { 1998 chip->state = FL_READY; 1999 map_write(map, CMD(0xF0), chip->start); 2000 wake_up(&chip->wq); 2001 } 2002 else 2003 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 2004 2005 mutex_unlock(&chip->mutex); 2006 } 2007 } 2008 2009 2010 /* 2011 * Ensure that the flash device is put back into read array mode before 2012 * unloading the driver or rebooting. On some systems, rebooting while 2013 * the flash is in query/program/erase mode will prevent the CPU from 2014 * fetching the bootloader code, requiring a hard reset or power cycle. 2015 */ 2016 static int cfi_amdstd_reset(struct mtd_info *mtd) 2017 { 2018 struct map_info *map = mtd->priv; 2019 struct cfi_private *cfi = map->fldrv_priv; 2020 int i, ret; 2021 struct flchip *chip; 2022 2023 for (i = 0; i < cfi->numchips; i++) { 2024 2025 chip = &cfi->chips[i]; 2026 2027 mutex_lock(&chip->mutex); 2028 2029 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2030 if (!ret) { 2031 map_write(map, CMD(0xF0), chip->start); 2032 chip->state = FL_SHUTDOWN; 2033 put_chip(map, chip, chip->start); 2034 } 2035 2036 mutex_unlock(&chip->mutex); 2037 } 2038 2039 return 0; 2040 } 2041 2042 2043 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2044 void *v) 2045 { 2046 struct mtd_info *mtd; 2047 2048 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2049 cfi_amdstd_reset(mtd); 2050 return NOTIFY_DONE; 2051 } 2052 2053 2054 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2055 { 2056 struct map_info *map = mtd->priv; 2057 struct cfi_private *cfi = map->fldrv_priv; 2058 2059 cfi_amdstd_reset(mtd); 2060 unregister_reboot_notifier(&mtd->reboot_notifier); 2061 kfree(cfi->cmdset_priv); 2062 kfree(cfi->cfiq); 2063 kfree(cfi); 2064 kfree(mtd->eraseregions); 2065 } 2066 2067 MODULE_LICENSE("GPL"); 2068 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2069 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2070 MODULE_ALIAS("cfi_cmdset_0006"); 2071 MODULE_ALIAS("cfi_cmdset_0701"); 2072