1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/init.h> 28 #include <asm/io.h> 29 #include <asm/byteorder.h> 30 31 #include <linux/errno.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/reboot.h> 36 #include <linux/mtd/map.h> 37 #include <linux/mtd/mtd.h> 38 #include <linux/mtd/cfi.h> 39 #include <linux/mtd/xip.h> 40 41 #define AMD_BOOTLOC_BUG 42 #define FORCE_WORD_WRITE 0 43 44 #define MAX_WORD_RETRIES 3 45 46 #define SST49LF004B 0x0060 47 #define SST49LF040B 0x0050 48 #define SST49LF008A 0x005a 49 #define AT49BV6416 0x00d6 50 51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 56 static void cfi_amdstd_sync (struct mtd_info *); 57 static int cfi_amdstd_suspend (struct mtd_info *); 58 static void cfi_amdstd_resume (struct mtd_info *); 59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61 62 static void cfi_amdstd_destroy(struct mtd_info *); 63 64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 66 67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 69 #include "fwh_lock.h" 70 71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 73 74 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 75 .probe = NULL, /* Not usable directly */ 76 .destroy = cfi_amdstd_destroy, 77 .name = "cfi_cmdset_0002", 78 .module = THIS_MODULE 79 }; 80 81 82 /* #define DEBUG_CFI_FEATURES */ 83 84 85 #ifdef DEBUG_CFI_FEATURES 86 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 87 { 88 const char* erase_suspend[3] = { 89 "Not supported", "Read only", "Read/write" 90 }; 91 const char* top_bottom[6] = { 92 "No WP", "8x8KiB sectors at top & bottom, no WP", 93 "Bottom boot", "Top boot", 94 "Uniform, Bottom WP", "Uniform, Top WP" 95 }; 96 97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 98 printk(" Address sensitive unlock: %s\n", 99 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 100 101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 103 else 104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 105 106 if (extp->BlkProt == 0) 107 printk(" Block protection: Not supported\n"); 108 else 109 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 110 111 112 printk(" Temporary block unprotect: %s\n", 113 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 116 printk(" Burst mode: %s\n", 117 extp->BurstMode ? "Supported" : "Not supported"); 118 if (extp->PageMode == 0) 119 printk(" Page mode: Not supported\n"); 120 else 121 printk(" Page mode: %d word page\n", extp->PageMode << 2); 122 123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 124 extp->VppMin >> 4, extp->VppMin & 0xf); 125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 126 extp->VppMax >> 4, extp->VppMax & 0xf); 127 128 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 130 else 131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 132 } 133 #endif 134 135 #ifdef AMD_BOOTLOC_BUG 136 /* Wheee. Bring me the head of someone at AMD. */ 137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) 138 { 139 struct map_info *map = mtd->priv; 140 struct cfi_private *cfi = map->fldrv_priv; 141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 142 __u8 major = extp->MajorVersion; 143 __u8 minor = extp->MinorVersion; 144 145 if (((major << 8) | minor) < 0x3131) { 146 /* CFI version 1.0 => don't trust bootloc */ 147 148 DEBUG(MTD_DEBUG_LEVEL1, 149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 150 map->name, cfi->mfr, cfi->id); 151 152 /* AFAICS all 29LV400 with a bottom boot block have a device ID 153 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 154 * These were badly detected as they have the 0x80 bit set 155 * so treat them as a special case. 156 */ 157 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 158 159 /* Macronix added CFI to their 2nd generation 160 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 161 * Fujitsu, Spansion, EON, ESI and older Macronix) 162 * has CFI. 163 * 164 * Therefore also check the manufacturer. 165 * This reduces the risk of false detection due to 166 * the 8-bit device ID. 167 */ 168 (cfi->mfr == CFI_MFR_MACRONIX)) { 169 DEBUG(MTD_DEBUG_LEVEL1, 170 "%s: Macronix MX29LV400C with bottom boot block" 171 " detected\n", map->name); 172 extp->TopBottom = 2; /* bottom boot */ 173 } else 174 if (cfi->id & 0x80) { 175 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 176 extp->TopBottom = 3; /* top boot */ 177 } else { 178 extp->TopBottom = 2; /* bottom boot */ 179 } 180 181 DEBUG(MTD_DEBUG_LEVEL1, 182 "%s: AMD CFI PRI V%c.%c has no boot block field;" 183 " deduced %s from Device ID\n", map->name, major, minor, 184 extp->TopBottom == 2 ? "bottom" : "top"); 185 } 186 } 187 #endif 188 189 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 190 { 191 struct map_info *map = mtd->priv; 192 struct cfi_private *cfi = map->fldrv_priv; 193 if (cfi->cfiq->BufWriteTimeoutTyp) { 194 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 195 mtd->write = cfi_amdstd_write_buffers; 196 } 197 } 198 199 /* Atmel chips don't use the same PRI format as AMD chips */ 200 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 201 { 202 struct map_info *map = mtd->priv; 203 struct cfi_private *cfi = map->fldrv_priv; 204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 205 struct cfi_pri_atmel atmel_pri; 206 207 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 208 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 209 210 if (atmel_pri.Features & 0x02) 211 extp->EraseSuspend = 2; 212 213 /* Some chips got it backwards... */ 214 if (cfi->id == AT49BV6416) { 215 if (atmel_pri.BottomBoot) 216 extp->TopBottom = 3; 217 else 218 extp->TopBottom = 2; 219 } else { 220 if (atmel_pri.BottomBoot) 221 extp->TopBottom = 2; 222 else 223 extp->TopBottom = 3; 224 } 225 226 /* burst write mode not supported */ 227 cfi->cfiq->BufWriteTimeoutTyp = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0; 229 } 230 231 static void fixup_use_secsi(struct mtd_info *mtd, void *param) 232 { 233 /* Setup for chips with a secsi area */ 234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 236 } 237 238 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) 239 { 240 struct map_info *map = mtd->priv; 241 struct cfi_private *cfi = map->fldrv_priv; 242 if ((cfi->cfiq->NumEraseRegions == 1) && 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 244 mtd->erase = cfi_amdstd_erase_chip; 245 } 246 247 } 248 249 /* 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 251 * locked by default. 252 */ 253 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) 254 { 255 mtd->lock = cfi_atmel_lock; 256 mtd->unlock = cfi_atmel_unlock; 257 mtd->flags |= MTD_POWERUP_LOCK; 258 } 259 260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 261 { 262 struct map_info *map = mtd->priv; 263 struct cfi_private *cfi = map->fldrv_priv; 264 265 /* 266 * These flashes report two seperate eraseblock regions based on the 267 * sector_erase-size and block_erase-size, although they both operate on the 268 * same memory. This is not allowed according to CFI, so we just pick the 269 * sector_erase-size. 270 */ 271 cfi->cfiq->NumEraseRegions = 1; 272 } 273 274 static void fixup_sst39vf(struct mtd_info *mtd, void *param) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 279 fixup_old_sst_eraseregion(mtd); 280 281 cfi->addr_unlock1 = 0x5555; 282 cfi->addr_unlock2 = 0x2AAA; 283 } 284 285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param) 286 { 287 struct map_info *map = mtd->priv; 288 struct cfi_private *cfi = map->fldrv_priv; 289 290 fixup_old_sst_eraseregion(mtd); 291 292 cfi->addr_unlock1 = 0x555; 293 cfi->addr_unlock2 = 0x2AA; 294 } 295 296 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 297 { 298 struct map_info *map = mtd->priv; 299 struct cfi_private *cfi = map->fldrv_priv; 300 301 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 302 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 303 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 304 } 305 } 306 307 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) 308 { 309 struct map_info *map = mtd->priv; 310 struct cfi_private *cfi = map->fldrv_priv; 311 312 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 313 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 314 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 315 } 316 } 317 318 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 319 static struct cfi_fixup cfi_nopri_fixup_table[] = { 320 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602 321 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601 322 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202 323 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201 324 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B 325 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B 326 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B 327 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B 328 { 0, 0, NULL, NULL } 329 }; 330 331 static struct cfi_fixup cfi_fixup_table[] = { 332 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 333 #ifdef AMD_BOOTLOC_BUG 334 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 335 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 336 #endif 337 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 338 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 339 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, 340 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 341 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 342 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 343 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, }, 344 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 345 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 346 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 347 #if !FORCE_WORD_WRITE 348 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 349 #endif 350 { 0, 0, NULL, NULL } 351 }; 352 static struct cfi_fixup jedec_fixup_table[] = { 353 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 354 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 355 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 356 { 0, 0, NULL, NULL } 357 }; 358 359 static struct cfi_fixup fixup_table[] = { 360 /* The CFI vendor ids and the JEDEC vendor IDs appear 361 * to be common. It is like the devices id's are as 362 * well. This table is to pick all cases where 363 * we know that is the case. 364 */ 365 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 366 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL }, 367 { 0, 0, NULL, NULL } 368 }; 369 370 371 static void cfi_fixup_major_minor(struct cfi_private *cfi, 372 struct cfi_pri_amdstd *extp) 373 { 374 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e && 375 extp->MajorVersion == '0') 376 extp->MajorVersion = '1'; 377 } 378 379 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 380 { 381 struct cfi_private *cfi = map->fldrv_priv; 382 struct mtd_info *mtd; 383 int i; 384 385 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 386 if (!mtd) { 387 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 388 return NULL; 389 } 390 mtd->priv = map; 391 mtd->type = MTD_NORFLASH; 392 393 /* Fill in the default mtd operations */ 394 mtd->erase = cfi_amdstd_erase_varsize; 395 mtd->write = cfi_amdstd_write_words; 396 mtd->read = cfi_amdstd_read; 397 mtd->sync = cfi_amdstd_sync; 398 mtd->suspend = cfi_amdstd_suspend; 399 mtd->resume = cfi_amdstd_resume; 400 mtd->flags = MTD_CAP_NORFLASH; 401 mtd->name = map->name; 402 mtd->writesize = 1; 403 404 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 405 406 if (cfi->cfi_mode==CFI_MODE_CFI){ 407 unsigned char bootloc; 408 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 409 struct cfi_pri_amdstd *extp; 410 411 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 412 if (extp) { 413 /* 414 * It's a real CFI chip, not one for which the probe 415 * routine faked a CFI structure. 416 */ 417 cfi_fixup_major_minor(cfi, extp); 418 419 /* 420 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4 421 * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19 422 * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf 423 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 424 */ 425 if (extp->MajorVersion != '1' || 426 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) { 427 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 428 "version %c.%c (%#02x/%#02x).\n", 429 extp->MajorVersion, extp->MinorVersion, 430 extp->MajorVersion, extp->MinorVersion); 431 kfree(extp); 432 kfree(mtd); 433 return NULL; 434 } 435 436 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 437 extp->MajorVersion, extp->MinorVersion); 438 439 /* Install our own private info structure */ 440 cfi->cmdset_priv = extp; 441 442 /* Apply cfi device specific fixups */ 443 cfi_fixup(mtd, cfi_fixup_table); 444 445 #ifdef DEBUG_CFI_FEATURES 446 /* Tell the user about it in lots of lovely detail */ 447 cfi_tell_features(extp); 448 #endif 449 450 bootloc = extp->TopBottom; 451 if ((bootloc < 2) || (bootloc > 5)) { 452 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 453 "bank location (%d). Assuming bottom.\n", 454 map->name, bootloc); 455 bootloc = 2; 456 } 457 458 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 459 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 460 461 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 462 int j = (cfi->cfiq->NumEraseRegions-1)-i; 463 __u32 swap; 464 465 swap = cfi->cfiq->EraseRegionInfo[i]; 466 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 467 cfi->cfiq->EraseRegionInfo[j] = swap; 468 } 469 } 470 /* Set the default CFI lock/unlock addresses */ 471 cfi->addr_unlock1 = 0x555; 472 cfi->addr_unlock2 = 0x2aa; 473 } 474 cfi_fixup(mtd, cfi_nopri_fixup_table); 475 476 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 477 kfree(mtd); 478 return NULL; 479 } 480 481 } /* CFI mode */ 482 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 483 /* Apply jedec specific fixups */ 484 cfi_fixup(mtd, jedec_fixup_table); 485 } 486 /* Apply generic fixups */ 487 cfi_fixup(mtd, fixup_table); 488 489 for (i=0; i< cfi->numchips; i++) { 490 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 491 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 492 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 493 cfi->chips[i].ref_point_counter = 0; 494 init_waitqueue_head(&(cfi->chips[i].wq)); 495 } 496 497 map->fldrv = &cfi_amdstd_chipdrv; 498 499 return cfi_amdstd_setup(mtd); 500 } 501 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 502 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 503 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 504 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 505 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 506 507 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 508 { 509 struct map_info *map = mtd->priv; 510 struct cfi_private *cfi = map->fldrv_priv; 511 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 512 unsigned long offset = 0; 513 int i,j; 514 515 printk(KERN_NOTICE "number of %s chips: %d\n", 516 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 517 /* Select the correct geometry setup */ 518 mtd->size = devsize * cfi->numchips; 519 520 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 521 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 522 * mtd->numeraseregions, GFP_KERNEL); 523 if (!mtd->eraseregions) { 524 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 525 goto setup_err; 526 } 527 528 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 529 unsigned long ernum, ersize; 530 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 531 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 532 533 if (mtd->erasesize < ersize) { 534 mtd->erasesize = ersize; 535 } 536 for (j=0; j<cfi->numchips; j++) { 537 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 538 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 539 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 540 } 541 offset += (ersize * ernum); 542 } 543 if (offset != devsize) { 544 /* Argh */ 545 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 546 goto setup_err; 547 } 548 #if 0 549 // debug 550 for (i=0; i<mtd->numeraseregions;i++){ 551 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", 552 i,mtd->eraseregions[i].offset, 553 mtd->eraseregions[i].erasesize, 554 mtd->eraseregions[i].numblocks); 555 } 556 #endif 557 558 __module_get(THIS_MODULE); 559 register_reboot_notifier(&mtd->reboot_notifier); 560 return mtd; 561 562 setup_err: 563 kfree(mtd->eraseregions); 564 kfree(mtd); 565 kfree(cfi->cmdset_priv); 566 kfree(cfi->cfiq); 567 return NULL; 568 } 569 570 /* 571 * Return true if the chip is ready. 572 * 573 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 574 * non-suspended sector) and is indicated by no toggle bits toggling. 575 * 576 * Note that anything more complicated than checking if no bits are toggling 577 * (including checking DQ5 for an error status) is tricky to get working 578 * correctly and is therefore not done (particulary with interleaved chips 579 * as each chip must be checked independantly of the others). 580 */ 581 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 582 { 583 map_word d, t; 584 585 d = map_read(map, addr); 586 t = map_read(map, addr); 587 588 return map_word_equal(map, d, t); 589 } 590 591 /* 592 * Return true if the chip is ready and has the correct value. 593 * 594 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 595 * non-suspended sector) and it is indicated by no bits toggling. 596 * 597 * Error are indicated by toggling bits or bits held with the wrong value, 598 * or with bits toggling. 599 * 600 * Note that anything more complicated than checking if no bits are toggling 601 * (including checking DQ5 for an error status) is tricky to get working 602 * correctly and is therefore not done (particulary with interleaved chips 603 * as each chip must be checked independantly of the others). 604 * 605 */ 606 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 607 { 608 map_word oldd, curd; 609 610 oldd = map_read(map, addr); 611 curd = map_read(map, addr); 612 613 return map_word_equal(map, oldd, curd) && 614 map_word_equal(map, curd, expected); 615 } 616 617 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 618 { 619 DECLARE_WAITQUEUE(wait, current); 620 struct cfi_private *cfi = map->fldrv_priv; 621 unsigned long timeo; 622 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 623 624 resettime: 625 timeo = jiffies + HZ; 626 retry: 627 switch (chip->state) { 628 629 case FL_STATUS: 630 for (;;) { 631 if (chip_ready(map, adr)) 632 break; 633 634 if (time_after(jiffies, timeo)) { 635 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 636 return -EIO; 637 } 638 mutex_unlock(&chip->mutex); 639 cfi_udelay(1); 640 mutex_lock(&chip->mutex); 641 /* Someone else might have been playing with it. */ 642 goto retry; 643 } 644 645 case FL_READY: 646 case FL_CFI_QUERY: 647 case FL_JEDEC_QUERY: 648 return 0; 649 650 case FL_ERASING: 651 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 652 !(mode == FL_READY || mode == FL_POINT || 653 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 654 goto sleep; 655 656 /* We could check to see if we're trying to access the sector 657 * that is currently being erased. However, no user will try 658 * anything like that so we just wait for the timeout. */ 659 660 /* Erase suspend */ 661 /* It's harmless to issue the Erase-Suspend and Erase-Resume 662 * commands when the erase algorithm isn't in progress. */ 663 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 664 chip->oldstate = FL_ERASING; 665 chip->state = FL_ERASE_SUSPENDING; 666 chip->erase_suspended = 1; 667 for (;;) { 668 if (chip_ready(map, adr)) 669 break; 670 671 if (time_after(jiffies, timeo)) { 672 /* Should have suspended the erase by now. 673 * Send an Erase-Resume command as either 674 * there was an error (so leave the erase 675 * routine to recover from it) or we trying to 676 * use the erase-in-progress sector. */ 677 map_write(map, CMD(0x30), chip->in_progress_block_addr); 678 chip->state = FL_ERASING; 679 chip->oldstate = FL_READY; 680 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 681 return -EIO; 682 } 683 684 mutex_unlock(&chip->mutex); 685 cfi_udelay(1); 686 mutex_lock(&chip->mutex); 687 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 688 So we can just loop here. */ 689 } 690 chip->state = FL_READY; 691 return 0; 692 693 case FL_XIP_WHILE_ERASING: 694 if (mode != FL_READY && mode != FL_POINT && 695 (!cfip || !(cfip->EraseSuspend&2))) 696 goto sleep; 697 chip->oldstate = chip->state; 698 chip->state = FL_READY; 699 return 0; 700 701 case FL_SHUTDOWN: 702 /* The machine is rebooting */ 703 return -EIO; 704 705 case FL_POINT: 706 /* Only if there's no operation suspended... */ 707 if (mode == FL_READY && chip->oldstate == FL_READY) 708 return 0; 709 710 default: 711 sleep: 712 set_current_state(TASK_UNINTERRUPTIBLE); 713 add_wait_queue(&chip->wq, &wait); 714 mutex_unlock(&chip->mutex); 715 schedule(); 716 remove_wait_queue(&chip->wq, &wait); 717 mutex_lock(&chip->mutex); 718 goto resettime; 719 } 720 } 721 722 723 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 724 { 725 struct cfi_private *cfi = map->fldrv_priv; 726 727 switch(chip->oldstate) { 728 case FL_ERASING: 729 chip->state = chip->oldstate; 730 map_write(map, CMD(0x30), chip->in_progress_block_addr); 731 chip->oldstate = FL_READY; 732 chip->state = FL_ERASING; 733 break; 734 735 case FL_XIP_WHILE_ERASING: 736 chip->state = chip->oldstate; 737 chip->oldstate = FL_READY; 738 break; 739 740 case FL_READY: 741 case FL_STATUS: 742 /* We should really make set_vpp() count, rather than doing this */ 743 DISABLE_VPP(map); 744 break; 745 default: 746 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 747 } 748 wake_up(&chip->wq); 749 } 750 751 #ifdef CONFIG_MTD_XIP 752 753 /* 754 * No interrupt what so ever can be serviced while the flash isn't in array 755 * mode. This is ensured by the xip_disable() and xip_enable() functions 756 * enclosing any code path where the flash is known not to be in array mode. 757 * And within a XIP disabled code path, only functions marked with __xipram 758 * may be called and nothing else (it's a good thing to inspect generated 759 * assembly to make sure inline functions were actually inlined and that gcc 760 * didn't emit calls to its own support functions). Also configuring MTD CFI 761 * support to a single buswidth and a single interleave is also recommended. 762 */ 763 764 static void xip_disable(struct map_info *map, struct flchip *chip, 765 unsigned long adr) 766 { 767 /* TODO: chips with no XIP use should ignore and return */ 768 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 769 local_irq_disable(); 770 } 771 772 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 773 unsigned long adr) 774 { 775 struct cfi_private *cfi = map->fldrv_priv; 776 777 if (chip->state != FL_POINT && chip->state != FL_READY) { 778 map_write(map, CMD(0xf0), adr); 779 chip->state = FL_READY; 780 } 781 (void) map_read(map, adr); 782 xip_iprefetch(); 783 local_irq_enable(); 784 } 785 786 /* 787 * When a delay is required for the flash operation to complete, the 788 * xip_udelay() function is polling for both the given timeout and pending 789 * (but still masked) hardware interrupts. Whenever there is an interrupt 790 * pending then the flash erase operation is suspended, array mode restored 791 * and interrupts unmasked. Task scheduling might also happen at that 792 * point. The CPU eventually returns from the interrupt or the call to 793 * schedule() and the suspended flash operation is resumed for the remaining 794 * of the delay period. 795 * 796 * Warning: this function _will_ fool interrupt latency tracing tools. 797 */ 798 799 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 800 unsigned long adr, int usec) 801 { 802 struct cfi_private *cfi = map->fldrv_priv; 803 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 804 map_word status, OK = CMD(0x80); 805 unsigned long suspended, start = xip_currtime(); 806 flstate_t oldstate; 807 808 do { 809 cpu_relax(); 810 if (xip_irqpending() && extp && 811 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 812 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 813 /* 814 * Let's suspend the erase operation when supported. 815 * Note that we currently don't try to suspend 816 * interleaved chips if there is already another 817 * operation suspended (imagine what happens 818 * when one chip was already done with the current 819 * operation while another chip suspended it, then 820 * we resume the whole thing at once). Yes, it 821 * can happen! 822 */ 823 map_write(map, CMD(0xb0), adr); 824 usec -= xip_elapsed_since(start); 825 suspended = xip_currtime(); 826 do { 827 if (xip_elapsed_since(suspended) > 100000) { 828 /* 829 * The chip doesn't want to suspend 830 * after waiting for 100 msecs. 831 * This is a critical error but there 832 * is not much we can do here. 833 */ 834 return; 835 } 836 status = map_read(map, adr); 837 } while (!map_word_andequal(map, status, OK, OK)); 838 839 /* Suspend succeeded */ 840 oldstate = chip->state; 841 if (!map_word_bitsset(map, status, CMD(0x40))) 842 break; 843 chip->state = FL_XIP_WHILE_ERASING; 844 chip->erase_suspended = 1; 845 map_write(map, CMD(0xf0), adr); 846 (void) map_read(map, adr); 847 xip_iprefetch(); 848 local_irq_enable(); 849 mutex_unlock(&chip->mutex); 850 xip_iprefetch(); 851 cond_resched(); 852 853 /* 854 * We're back. However someone else might have 855 * decided to go write to the chip if we are in 856 * a suspended erase state. If so let's wait 857 * until it's done. 858 */ 859 mutex_lock(&chip->mutex); 860 while (chip->state != FL_XIP_WHILE_ERASING) { 861 DECLARE_WAITQUEUE(wait, current); 862 set_current_state(TASK_UNINTERRUPTIBLE); 863 add_wait_queue(&chip->wq, &wait); 864 mutex_unlock(&chip->mutex); 865 schedule(); 866 remove_wait_queue(&chip->wq, &wait); 867 mutex_lock(&chip->mutex); 868 } 869 /* Disallow XIP again */ 870 local_irq_disable(); 871 872 /* Resume the write or erase operation */ 873 map_write(map, CMD(0x30), adr); 874 chip->state = oldstate; 875 start = xip_currtime(); 876 } else if (usec >= 1000000/HZ) { 877 /* 878 * Try to save on CPU power when waiting delay 879 * is at least a system timer tick period. 880 * No need to be extremely accurate here. 881 */ 882 xip_cpu_idle(); 883 } 884 status = map_read(map, adr); 885 } while (!map_word_andequal(map, status, OK, OK) 886 && xip_elapsed_since(start) < usec); 887 } 888 889 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 890 891 /* 892 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 893 * the flash is actively programming or erasing since we have to poll for 894 * the operation to complete anyway. We can't do that in a generic way with 895 * a XIP setup so do it before the actual flash operation in this case 896 * and stub it out from INVALIDATE_CACHE_UDELAY. 897 */ 898 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 899 INVALIDATE_CACHED_RANGE(map, from, size) 900 901 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 902 UDELAY(map, chip, adr, usec) 903 904 /* 905 * Extra notes: 906 * 907 * Activating this XIP support changes the way the code works a bit. For 908 * example the code to suspend the current process when concurrent access 909 * happens is never executed because xip_udelay() will always return with the 910 * same chip state as it was entered with. This is why there is no care for 911 * the presence of add_wait_queue() or schedule() calls from within a couple 912 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 913 * The queueing and scheduling are always happening within xip_udelay(). 914 * 915 * Similarly, get_chip() and put_chip() just happen to always be executed 916 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 917 * is in array mode, therefore never executing many cases therein and not 918 * causing any problem with XIP. 919 */ 920 921 #else 922 923 #define xip_disable(map, chip, adr) 924 #define xip_enable(map, chip, adr) 925 #define XIP_INVAL_CACHED_RANGE(x...) 926 927 #define UDELAY(map, chip, adr, usec) \ 928 do { \ 929 mutex_unlock(&chip->mutex); \ 930 cfi_udelay(usec); \ 931 mutex_lock(&chip->mutex); \ 932 } while (0) 933 934 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 935 do { \ 936 mutex_unlock(&chip->mutex); \ 937 INVALIDATE_CACHED_RANGE(map, adr, len); \ 938 cfi_udelay(usec); \ 939 mutex_lock(&chip->mutex); \ 940 } while (0) 941 942 #endif 943 944 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 945 { 946 unsigned long cmd_addr; 947 struct cfi_private *cfi = map->fldrv_priv; 948 int ret; 949 950 adr += chip->start; 951 952 /* Ensure cmd read/writes are aligned. */ 953 cmd_addr = adr & ~(map_bankwidth(map)-1); 954 955 mutex_lock(&chip->mutex); 956 ret = get_chip(map, chip, cmd_addr, FL_READY); 957 if (ret) { 958 mutex_unlock(&chip->mutex); 959 return ret; 960 } 961 962 if (chip->state != FL_POINT && chip->state != FL_READY) { 963 map_write(map, CMD(0xf0), cmd_addr); 964 chip->state = FL_READY; 965 } 966 967 map_copy_from(map, buf, adr, len); 968 969 put_chip(map, chip, cmd_addr); 970 971 mutex_unlock(&chip->mutex); 972 return 0; 973 } 974 975 976 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 977 { 978 struct map_info *map = mtd->priv; 979 struct cfi_private *cfi = map->fldrv_priv; 980 unsigned long ofs; 981 int chipnum; 982 int ret = 0; 983 984 /* ofs: offset within the first chip that the first read should start */ 985 986 chipnum = (from >> cfi->chipshift); 987 ofs = from - (chipnum << cfi->chipshift); 988 989 990 *retlen = 0; 991 992 while (len) { 993 unsigned long thislen; 994 995 if (chipnum >= cfi->numchips) 996 break; 997 998 if ((len + ofs -1) >> cfi->chipshift) 999 thislen = (1<<cfi->chipshift) - ofs; 1000 else 1001 thislen = len; 1002 1003 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1004 if (ret) 1005 break; 1006 1007 *retlen += thislen; 1008 len -= thislen; 1009 buf += thislen; 1010 1011 ofs = 0; 1012 chipnum++; 1013 } 1014 return ret; 1015 } 1016 1017 1018 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1019 { 1020 DECLARE_WAITQUEUE(wait, current); 1021 unsigned long timeo = jiffies + HZ; 1022 struct cfi_private *cfi = map->fldrv_priv; 1023 1024 retry: 1025 mutex_lock(&chip->mutex); 1026 1027 if (chip->state != FL_READY){ 1028 #if 0 1029 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); 1030 #endif 1031 set_current_state(TASK_UNINTERRUPTIBLE); 1032 add_wait_queue(&chip->wq, &wait); 1033 1034 mutex_unlock(&chip->mutex); 1035 1036 schedule(); 1037 remove_wait_queue(&chip->wq, &wait); 1038 #if 0 1039 if(signal_pending(current)) 1040 return -EINTR; 1041 #endif 1042 timeo = jiffies + HZ; 1043 1044 goto retry; 1045 } 1046 1047 adr += chip->start; 1048 1049 chip->state = FL_READY; 1050 1051 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1052 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1053 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1054 1055 map_copy_from(map, buf, adr, len); 1056 1057 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1058 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1059 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1060 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1061 1062 wake_up(&chip->wq); 1063 mutex_unlock(&chip->mutex); 1064 1065 return 0; 1066 } 1067 1068 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1069 { 1070 struct map_info *map = mtd->priv; 1071 struct cfi_private *cfi = map->fldrv_priv; 1072 unsigned long ofs; 1073 int chipnum; 1074 int ret = 0; 1075 1076 1077 /* ofs: offset within the first chip that the first read should start */ 1078 1079 /* 8 secsi bytes per chip */ 1080 chipnum=from>>3; 1081 ofs=from & 7; 1082 1083 1084 *retlen = 0; 1085 1086 while (len) { 1087 unsigned long thislen; 1088 1089 if (chipnum >= cfi->numchips) 1090 break; 1091 1092 if ((len + ofs -1) >> 3) 1093 thislen = (1<<3) - ofs; 1094 else 1095 thislen = len; 1096 1097 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1098 if (ret) 1099 break; 1100 1101 *retlen += thislen; 1102 len -= thislen; 1103 buf += thislen; 1104 1105 ofs = 0; 1106 chipnum++; 1107 } 1108 return ret; 1109 } 1110 1111 1112 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1113 { 1114 struct cfi_private *cfi = map->fldrv_priv; 1115 unsigned long timeo = jiffies + HZ; 1116 /* 1117 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1118 * have a max write time of a few hundreds usec). However, we should 1119 * use the maximum timeout value given by the chip at probe time 1120 * instead. Unfortunately, struct flchip does have a field for 1121 * maximum timeout, only for typical which can be far too short 1122 * depending of the conditions. The ' + 1' is to avoid having a 1123 * timeout of 0 jiffies if HZ is smaller than 1000. 1124 */ 1125 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1126 int ret = 0; 1127 map_word oldd; 1128 int retry_cnt = 0; 1129 1130 adr += chip->start; 1131 1132 mutex_lock(&chip->mutex); 1133 ret = get_chip(map, chip, adr, FL_WRITING); 1134 if (ret) { 1135 mutex_unlock(&chip->mutex); 1136 return ret; 1137 } 1138 1139 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1140 __func__, adr, datum.x[0] ); 1141 1142 /* 1143 * Check for a NOP for the case when the datum to write is already 1144 * present - it saves time and works around buggy chips that corrupt 1145 * data at other locations when 0xff is written to a location that 1146 * already contains 0xff. 1147 */ 1148 oldd = map_read(map, adr); 1149 if (map_word_equal(map, oldd, datum)) { 1150 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1151 __func__); 1152 goto op_done; 1153 } 1154 1155 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1156 ENABLE_VPP(map); 1157 xip_disable(map, chip, adr); 1158 retry: 1159 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1160 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1161 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1162 map_write(map, datum, adr); 1163 chip->state = FL_WRITING; 1164 1165 INVALIDATE_CACHE_UDELAY(map, chip, 1166 adr, map_bankwidth(map), 1167 chip->word_write_time); 1168 1169 /* See comment above for timeout value. */ 1170 timeo = jiffies + uWriteTimeout; 1171 for (;;) { 1172 if (chip->state != FL_WRITING) { 1173 /* Someone's suspended the write. Sleep */ 1174 DECLARE_WAITQUEUE(wait, current); 1175 1176 set_current_state(TASK_UNINTERRUPTIBLE); 1177 add_wait_queue(&chip->wq, &wait); 1178 mutex_unlock(&chip->mutex); 1179 schedule(); 1180 remove_wait_queue(&chip->wq, &wait); 1181 timeo = jiffies + (HZ / 2); /* FIXME */ 1182 mutex_lock(&chip->mutex); 1183 continue; 1184 } 1185 1186 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1187 xip_enable(map, chip, adr); 1188 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1189 xip_disable(map, chip, adr); 1190 break; 1191 } 1192 1193 if (chip_ready(map, adr)) 1194 break; 1195 1196 /* Latency issues. Drop the lock, wait a while and retry */ 1197 UDELAY(map, chip, adr, 1); 1198 } 1199 /* Did we succeed? */ 1200 if (!chip_good(map, adr, datum)) { 1201 /* reset on all failures. */ 1202 map_write( map, CMD(0xF0), chip->start ); 1203 /* FIXME - should have reset delay before continuing */ 1204 1205 if (++retry_cnt <= MAX_WORD_RETRIES) 1206 goto retry; 1207 1208 ret = -EIO; 1209 } 1210 xip_enable(map, chip, adr); 1211 op_done: 1212 chip->state = FL_READY; 1213 put_chip(map, chip, adr); 1214 mutex_unlock(&chip->mutex); 1215 1216 return ret; 1217 } 1218 1219 1220 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1221 size_t *retlen, const u_char *buf) 1222 { 1223 struct map_info *map = mtd->priv; 1224 struct cfi_private *cfi = map->fldrv_priv; 1225 int ret = 0; 1226 int chipnum; 1227 unsigned long ofs, chipstart; 1228 DECLARE_WAITQUEUE(wait, current); 1229 1230 *retlen = 0; 1231 if (!len) 1232 return 0; 1233 1234 chipnum = to >> cfi->chipshift; 1235 ofs = to - (chipnum << cfi->chipshift); 1236 chipstart = cfi->chips[chipnum].start; 1237 1238 /* If it's not bus-aligned, do the first byte write */ 1239 if (ofs & (map_bankwidth(map)-1)) { 1240 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1241 int i = ofs - bus_ofs; 1242 int n = 0; 1243 map_word tmp_buf; 1244 1245 retry: 1246 mutex_lock(&cfi->chips[chipnum].mutex); 1247 1248 if (cfi->chips[chipnum].state != FL_READY) { 1249 #if 0 1250 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1251 #endif 1252 set_current_state(TASK_UNINTERRUPTIBLE); 1253 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1254 1255 mutex_unlock(&cfi->chips[chipnum].mutex); 1256 1257 schedule(); 1258 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1259 #if 0 1260 if(signal_pending(current)) 1261 return -EINTR; 1262 #endif 1263 goto retry; 1264 } 1265 1266 /* Load 'tmp_buf' with old contents of flash */ 1267 tmp_buf = map_read(map, bus_ofs+chipstart); 1268 1269 mutex_unlock(&cfi->chips[chipnum].mutex); 1270 1271 /* Number of bytes to copy from buffer */ 1272 n = min_t(int, len, map_bankwidth(map)-i); 1273 1274 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1275 1276 ret = do_write_oneword(map, &cfi->chips[chipnum], 1277 bus_ofs, tmp_buf); 1278 if (ret) 1279 return ret; 1280 1281 ofs += n; 1282 buf += n; 1283 (*retlen) += n; 1284 len -= n; 1285 1286 if (ofs >> cfi->chipshift) { 1287 chipnum ++; 1288 ofs = 0; 1289 if (chipnum == cfi->numchips) 1290 return 0; 1291 } 1292 } 1293 1294 /* We are now aligned, write as much as possible */ 1295 while(len >= map_bankwidth(map)) { 1296 map_word datum; 1297 1298 datum = map_word_load(map, buf); 1299 1300 ret = do_write_oneword(map, &cfi->chips[chipnum], 1301 ofs, datum); 1302 if (ret) 1303 return ret; 1304 1305 ofs += map_bankwidth(map); 1306 buf += map_bankwidth(map); 1307 (*retlen) += map_bankwidth(map); 1308 len -= map_bankwidth(map); 1309 1310 if (ofs >> cfi->chipshift) { 1311 chipnum ++; 1312 ofs = 0; 1313 if (chipnum == cfi->numchips) 1314 return 0; 1315 chipstart = cfi->chips[chipnum].start; 1316 } 1317 } 1318 1319 /* Write the trailing bytes if any */ 1320 if (len & (map_bankwidth(map)-1)) { 1321 map_word tmp_buf; 1322 1323 retry1: 1324 mutex_lock(&cfi->chips[chipnum].mutex); 1325 1326 if (cfi->chips[chipnum].state != FL_READY) { 1327 #if 0 1328 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1329 #endif 1330 set_current_state(TASK_UNINTERRUPTIBLE); 1331 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1332 1333 mutex_unlock(&cfi->chips[chipnum].mutex); 1334 1335 schedule(); 1336 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1337 #if 0 1338 if(signal_pending(current)) 1339 return -EINTR; 1340 #endif 1341 goto retry1; 1342 } 1343 1344 tmp_buf = map_read(map, ofs + chipstart); 1345 1346 mutex_unlock(&cfi->chips[chipnum].mutex); 1347 1348 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1349 1350 ret = do_write_oneword(map, &cfi->chips[chipnum], 1351 ofs, tmp_buf); 1352 if (ret) 1353 return ret; 1354 1355 (*retlen) += len; 1356 } 1357 1358 return 0; 1359 } 1360 1361 1362 /* 1363 * FIXME: interleaved mode not tested, and probably not supported! 1364 */ 1365 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1366 unsigned long adr, const u_char *buf, 1367 int len) 1368 { 1369 struct cfi_private *cfi = map->fldrv_priv; 1370 unsigned long timeo = jiffies + HZ; 1371 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1372 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1373 int ret = -EIO; 1374 unsigned long cmd_adr; 1375 int z, words; 1376 map_word datum; 1377 1378 adr += chip->start; 1379 cmd_adr = adr; 1380 1381 mutex_lock(&chip->mutex); 1382 ret = get_chip(map, chip, adr, FL_WRITING); 1383 if (ret) { 1384 mutex_unlock(&chip->mutex); 1385 return ret; 1386 } 1387 1388 datum = map_word_load(map, buf); 1389 1390 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1391 __func__, adr, datum.x[0] ); 1392 1393 XIP_INVAL_CACHED_RANGE(map, adr, len); 1394 ENABLE_VPP(map); 1395 xip_disable(map, chip, cmd_adr); 1396 1397 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1398 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1399 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1400 1401 /* Write Buffer Load */ 1402 map_write(map, CMD(0x25), cmd_adr); 1403 1404 chip->state = FL_WRITING_TO_BUFFER; 1405 1406 /* Write length of data to come */ 1407 words = len / map_bankwidth(map); 1408 map_write(map, CMD(words - 1), cmd_adr); 1409 /* Write data */ 1410 z = 0; 1411 while(z < words * map_bankwidth(map)) { 1412 datum = map_word_load(map, buf); 1413 map_write(map, datum, adr + z); 1414 1415 z += map_bankwidth(map); 1416 buf += map_bankwidth(map); 1417 } 1418 z -= map_bankwidth(map); 1419 1420 adr += z; 1421 1422 /* Write Buffer Program Confirm: GO GO GO */ 1423 map_write(map, CMD(0x29), cmd_adr); 1424 chip->state = FL_WRITING; 1425 1426 INVALIDATE_CACHE_UDELAY(map, chip, 1427 adr, map_bankwidth(map), 1428 chip->word_write_time); 1429 1430 timeo = jiffies + uWriteTimeout; 1431 1432 for (;;) { 1433 if (chip->state != FL_WRITING) { 1434 /* Someone's suspended the write. Sleep */ 1435 DECLARE_WAITQUEUE(wait, current); 1436 1437 set_current_state(TASK_UNINTERRUPTIBLE); 1438 add_wait_queue(&chip->wq, &wait); 1439 mutex_unlock(&chip->mutex); 1440 schedule(); 1441 remove_wait_queue(&chip->wq, &wait); 1442 timeo = jiffies + (HZ / 2); /* FIXME */ 1443 mutex_lock(&chip->mutex); 1444 continue; 1445 } 1446 1447 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1448 break; 1449 1450 if (chip_ready(map, adr)) { 1451 xip_enable(map, chip, adr); 1452 goto op_done; 1453 } 1454 1455 /* Latency issues. Drop the lock, wait a while and retry */ 1456 UDELAY(map, chip, adr, 1); 1457 } 1458 1459 /* reset on all failures. */ 1460 map_write( map, CMD(0xF0), chip->start ); 1461 xip_enable(map, chip, adr); 1462 /* FIXME - should have reset delay before continuing */ 1463 1464 printk(KERN_WARNING "MTD %s(): software timeout\n", 1465 __func__ ); 1466 1467 ret = -EIO; 1468 op_done: 1469 chip->state = FL_READY; 1470 put_chip(map, chip, adr); 1471 mutex_unlock(&chip->mutex); 1472 1473 return ret; 1474 } 1475 1476 1477 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1478 size_t *retlen, const u_char *buf) 1479 { 1480 struct map_info *map = mtd->priv; 1481 struct cfi_private *cfi = map->fldrv_priv; 1482 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1483 int ret = 0; 1484 int chipnum; 1485 unsigned long ofs; 1486 1487 *retlen = 0; 1488 if (!len) 1489 return 0; 1490 1491 chipnum = to >> cfi->chipshift; 1492 ofs = to - (chipnum << cfi->chipshift); 1493 1494 /* If it's not bus-aligned, do the first word write */ 1495 if (ofs & (map_bankwidth(map)-1)) { 1496 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1497 if (local_len > len) 1498 local_len = len; 1499 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1500 local_len, retlen, buf); 1501 if (ret) 1502 return ret; 1503 ofs += local_len; 1504 buf += local_len; 1505 len -= local_len; 1506 1507 if (ofs >> cfi->chipshift) { 1508 chipnum ++; 1509 ofs = 0; 1510 if (chipnum == cfi->numchips) 1511 return 0; 1512 } 1513 } 1514 1515 /* Write buffer is worth it only if more than one word to write... */ 1516 while (len >= map_bankwidth(map) * 2) { 1517 /* We must not cross write block boundaries */ 1518 int size = wbufsize - (ofs & (wbufsize-1)); 1519 1520 if (size > len) 1521 size = len; 1522 if (size % map_bankwidth(map)) 1523 size -= size % map_bankwidth(map); 1524 1525 ret = do_write_buffer(map, &cfi->chips[chipnum], 1526 ofs, buf, size); 1527 if (ret) 1528 return ret; 1529 1530 ofs += size; 1531 buf += size; 1532 (*retlen) += size; 1533 len -= size; 1534 1535 if (ofs >> cfi->chipshift) { 1536 chipnum ++; 1537 ofs = 0; 1538 if (chipnum == cfi->numchips) 1539 return 0; 1540 } 1541 } 1542 1543 if (len) { 1544 size_t retlen_dregs = 0; 1545 1546 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1547 len, &retlen_dregs, buf); 1548 1549 *retlen += retlen_dregs; 1550 return ret; 1551 } 1552 1553 return 0; 1554 } 1555 1556 1557 /* 1558 * Handle devices with one erase region, that only implement 1559 * the chip erase command. 1560 */ 1561 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1562 { 1563 struct cfi_private *cfi = map->fldrv_priv; 1564 unsigned long timeo = jiffies + HZ; 1565 unsigned long int adr; 1566 DECLARE_WAITQUEUE(wait, current); 1567 int ret = 0; 1568 1569 adr = cfi->addr_unlock1; 1570 1571 mutex_lock(&chip->mutex); 1572 ret = get_chip(map, chip, adr, FL_WRITING); 1573 if (ret) { 1574 mutex_unlock(&chip->mutex); 1575 return ret; 1576 } 1577 1578 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1579 __func__, chip->start ); 1580 1581 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1582 ENABLE_VPP(map); 1583 xip_disable(map, chip, adr); 1584 1585 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1586 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1587 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1588 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1589 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1590 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1591 1592 chip->state = FL_ERASING; 1593 chip->erase_suspended = 0; 1594 chip->in_progress_block_addr = adr; 1595 1596 INVALIDATE_CACHE_UDELAY(map, chip, 1597 adr, map->size, 1598 chip->erase_time*500); 1599 1600 timeo = jiffies + (HZ*20); 1601 1602 for (;;) { 1603 if (chip->state != FL_ERASING) { 1604 /* Someone's suspended the erase. Sleep */ 1605 set_current_state(TASK_UNINTERRUPTIBLE); 1606 add_wait_queue(&chip->wq, &wait); 1607 mutex_unlock(&chip->mutex); 1608 schedule(); 1609 remove_wait_queue(&chip->wq, &wait); 1610 mutex_lock(&chip->mutex); 1611 continue; 1612 } 1613 if (chip->erase_suspended) { 1614 /* This erase was suspended and resumed. 1615 Adjust the timeout */ 1616 timeo = jiffies + (HZ*20); /* FIXME */ 1617 chip->erase_suspended = 0; 1618 } 1619 1620 if (chip_ready(map, adr)) 1621 break; 1622 1623 if (time_after(jiffies, timeo)) { 1624 printk(KERN_WARNING "MTD %s(): software timeout\n", 1625 __func__ ); 1626 break; 1627 } 1628 1629 /* Latency issues. Drop the lock, wait a while and retry */ 1630 UDELAY(map, chip, adr, 1000000/HZ); 1631 } 1632 /* Did we succeed? */ 1633 if (!chip_good(map, adr, map_word_ff(map))) { 1634 /* reset on all failures. */ 1635 map_write( map, CMD(0xF0), chip->start ); 1636 /* FIXME - should have reset delay before continuing */ 1637 1638 ret = -EIO; 1639 } 1640 1641 chip->state = FL_READY; 1642 xip_enable(map, chip, adr); 1643 put_chip(map, chip, adr); 1644 mutex_unlock(&chip->mutex); 1645 1646 return ret; 1647 } 1648 1649 1650 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1651 { 1652 struct cfi_private *cfi = map->fldrv_priv; 1653 unsigned long timeo = jiffies + HZ; 1654 DECLARE_WAITQUEUE(wait, current); 1655 int ret = 0; 1656 1657 adr += chip->start; 1658 1659 mutex_lock(&chip->mutex); 1660 ret = get_chip(map, chip, adr, FL_ERASING); 1661 if (ret) { 1662 mutex_unlock(&chip->mutex); 1663 return ret; 1664 } 1665 1666 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1667 __func__, adr ); 1668 1669 XIP_INVAL_CACHED_RANGE(map, adr, len); 1670 ENABLE_VPP(map); 1671 xip_disable(map, chip, adr); 1672 1673 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1674 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1675 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1676 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1677 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1678 map_write(map, CMD(0x30), adr); 1679 1680 chip->state = FL_ERASING; 1681 chip->erase_suspended = 0; 1682 chip->in_progress_block_addr = adr; 1683 1684 INVALIDATE_CACHE_UDELAY(map, chip, 1685 adr, len, 1686 chip->erase_time*500); 1687 1688 timeo = jiffies + (HZ*20); 1689 1690 for (;;) { 1691 if (chip->state != FL_ERASING) { 1692 /* Someone's suspended the erase. Sleep */ 1693 set_current_state(TASK_UNINTERRUPTIBLE); 1694 add_wait_queue(&chip->wq, &wait); 1695 mutex_unlock(&chip->mutex); 1696 schedule(); 1697 remove_wait_queue(&chip->wq, &wait); 1698 mutex_lock(&chip->mutex); 1699 continue; 1700 } 1701 if (chip->erase_suspended) { 1702 /* This erase was suspended and resumed. 1703 Adjust the timeout */ 1704 timeo = jiffies + (HZ*20); /* FIXME */ 1705 chip->erase_suspended = 0; 1706 } 1707 1708 if (chip_ready(map, adr)) { 1709 xip_enable(map, chip, adr); 1710 break; 1711 } 1712 1713 if (time_after(jiffies, timeo)) { 1714 xip_enable(map, chip, adr); 1715 printk(KERN_WARNING "MTD %s(): software timeout\n", 1716 __func__ ); 1717 break; 1718 } 1719 1720 /* Latency issues. Drop the lock, wait a while and retry */ 1721 UDELAY(map, chip, adr, 1000000/HZ); 1722 } 1723 /* Did we succeed? */ 1724 if (!chip_good(map, adr, map_word_ff(map))) { 1725 /* reset on all failures. */ 1726 map_write( map, CMD(0xF0), chip->start ); 1727 /* FIXME - should have reset delay before continuing */ 1728 1729 ret = -EIO; 1730 } 1731 1732 chip->state = FL_READY; 1733 put_chip(map, chip, adr); 1734 mutex_unlock(&chip->mutex); 1735 return ret; 1736 } 1737 1738 1739 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1740 { 1741 unsigned long ofs, len; 1742 int ret; 1743 1744 ofs = instr->addr; 1745 len = instr->len; 1746 1747 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1748 if (ret) 1749 return ret; 1750 1751 instr->state = MTD_ERASE_DONE; 1752 mtd_erase_callback(instr); 1753 1754 return 0; 1755 } 1756 1757 1758 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1759 { 1760 struct map_info *map = mtd->priv; 1761 struct cfi_private *cfi = map->fldrv_priv; 1762 int ret = 0; 1763 1764 if (instr->addr != 0) 1765 return -EINVAL; 1766 1767 if (instr->len != mtd->size) 1768 return -EINVAL; 1769 1770 ret = do_erase_chip(map, &cfi->chips[0]); 1771 if (ret) 1772 return ret; 1773 1774 instr->state = MTD_ERASE_DONE; 1775 mtd_erase_callback(instr); 1776 1777 return 0; 1778 } 1779 1780 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 1781 unsigned long adr, int len, void *thunk) 1782 { 1783 struct cfi_private *cfi = map->fldrv_priv; 1784 int ret; 1785 1786 mutex_lock(&chip->mutex); 1787 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1788 if (ret) 1789 goto out_unlock; 1790 chip->state = FL_LOCKING; 1791 1792 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1793 __func__, adr, len); 1794 1795 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1796 cfi->device_type, NULL); 1797 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1798 cfi->device_type, NULL); 1799 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 1800 cfi->device_type, NULL); 1801 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1802 cfi->device_type, NULL); 1803 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1804 cfi->device_type, NULL); 1805 map_write(map, CMD(0x40), chip->start + adr); 1806 1807 chip->state = FL_READY; 1808 put_chip(map, chip, adr + chip->start); 1809 ret = 0; 1810 1811 out_unlock: 1812 mutex_unlock(&chip->mutex); 1813 return ret; 1814 } 1815 1816 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 1817 unsigned long adr, int len, void *thunk) 1818 { 1819 struct cfi_private *cfi = map->fldrv_priv; 1820 int ret; 1821 1822 mutex_lock(&chip->mutex); 1823 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1824 if (ret) 1825 goto out_unlock; 1826 chip->state = FL_UNLOCKING; 1827 1828 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1829 __func__, adr, len); 1830 1831 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1832 cfi->device_type, NULL); 1833 map_write(map, CMD(0x70), adr); 1834 1835 chip->state = FL_READY; 1836 put_chip(map, chip, adr + chip->start); 1837 ret = 0; 1838 1839 out_unlock: 1840 mutex_unlock(&chip->mutex); 1841 return ret; 1842 } 1843 1844 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1845 { 1846 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1847 } 1848 1849 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 1850 { 1851 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1852 } 1853 1854 1855 static void cfi_amdstd_sync (struct mtd_info *mtd) 1856 { 1857 struct map_info *map = mtd->priv; 1858 struct cfi_private *cfi = map->fldrv_priv; 1859 int i; 1860 struct flchip *chip; 1861 int ret = 0; 1862 DECLARE_WAITQUEUE(wait, current); 1863 1864 for (i=0; !ret && i<cfi->numchips; i++) { 1865 chip = &cfi->chips[i]; 1866 1867 retry: 1868 mutex_lock(&chip->mutex); 1869 1870 switch(chip->state) { 1871 case FL_READY: 1872 case FL_STATUS: 1873 case FL_CFI_QUERY: 1874 case FL_JEDEC_QUERY: 1875 chip->oldstate = chip->state; 1876 chip->state = FL_SYNCING; 1877 /* No need to wake_up() on this state change - 1878 * as the whole point is that nobody can do anything 1879 * with the chip now anyway. 1880 */ 1881 case FL_SYNCING: 1882 mutex_unlock(&chip->mutex); 1883 break; 1884 1885 default: 1886 /* Not an idle state */ 1887 set_current_state(TASK_UNINTERRUPTIBLE); 1888 add_wait_queue(&chip->wq, &wait); 1889 1890 mutex_unlock(&chip->mutex); 1891 1892 schedule(); 1893 1894 remove_wait_queue(&chip->wq, &wait); 1895 1896 goto retry; 1897 } 1898 } 1899 1900 /* Unlock the chips again */ 1901 1902 for (i--; i >=0; i--) { 1903 chip = &cfi->chips[i]; 1904 1905 mutex_lock(&chip->mutex); 1906 1907 if (chip->state == FL_SYNCING) { 1908 chip->state = chip->oldstate; 1909 wake_up(&chip->wq); 1910 } 1911 mutex_unlock(&chip->mutex); 1912 } 1913 } 1914 1915 1916 static int cfi_amdstd_suspend(struct mtd_info *mtd) 1917 { 1918 struct map_info *map = mtd->priv; 1919 struct cfi_private *cfi = map->fldrv_priv; 1920 int i; 1921 struct flchip *chip; 1922 int ret = 0; 1923 1924 for (i=0; !ret && i<cfi->numchips; i++) { 1925 chip = &cfi->chips[i]; 1926 1927 mutex_lock(&chip->mutex); 1928 1929 switch(chip->state) { 1930 case FL_READY: 1931 case FL_STATUS: 1932 case FL_CFI_QUERY: 1933 case FL_JEDEC_QUERY: 1934 chip->oldstate = chip->state; 1935 chip->state = FL_PM_SUSPENDED; 1936 /* No need to wake_up() on this state change - 1937 * as the whole point is that nobody can do anything 1938 * with the chip now anyway. 1939 */ 1940 case FL_PM_SUSPENDED: 1941 break; 1942 1943 default: 1944 ret = -EAGAIN; 1945 break; 1946 } 1947 mutex_unlock(&chip->mutex); 1948 } 1949 1950 /* Unlock the chips again */ 1951 1952 if (ret) { 1953 for (i--; i >=0; i--) { 1954 chip = &cfi->chips[i]; 1955 1956 mutex_lock(&chip->mutex); 1957 1958 if (chip->state == FL_PM_SUSPENDED) { 1959 chip->state = chip->oldstate; 1960 wake_up(&chip->wq); 1961 } 1962 mutex_unlock(&chip->mutex); 1963 } 1964 } 1965 1966 return ret; 1967 } 1968 1969 1970 static void cfi_amdstd_resume(struct mtd_info *mtd) 1971 { 1972 struct map_info *map = mtd->priv; 1973 struct cfi_private *cfi = map->fldrv_priv; 1974 int i; 1975 struct flchip *chip; 1976 1977 for (i=0; i<cfi->numchips; i++) { 1978 1979 chip = &cfi->chips[i]; 1980 1981 mutex_lock(&chip->mutex); 1982 1983 if (chip->state == FL_PM_SUSPENDED) { 1984 chip->state = FL_READY; 1985 map_write(map, CMD(0xF0), chip->start); 1986 wake_up(&chip->wq); 1987 } 1988 else 1989 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1990 1991 mutex_unlock(&chip->mutex); 1992 } 1993 } 1994 1995 1996 /* 1997 * Ensure that the flash device is put back into read array mode before 1998 * unloading the driver or rebooting. On some systems, rebooting while 1999 * the flash is in query/program/erase mode will prevent the CPU from 2000 * fetching the bootloader code, requiring a hard reset or power cycle. 2001 */ 2002 static int cfi_amdstd_reset(struct mtd_info *mtd) 2003 { 2004 struct map_info *map = mtd->priv; 2005 struct cfi_private *cfi = map->fldrv_priv; 2006 int i, ret; 2007 struct flchip *chip; 2008 2009 for (i = 0; i < cfi->numchips; i++) { 2010 2011 chip = &cfi->chips[i]; 2012 2013 mutex_lock(&chip->mutex); 2014 2015 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2016 if (!ret) { 2017 map_write(map, CMD(0xF0), chip->start); 2018 chip->state = FL_SHUTDOWN; 2019 put_chip(map, chip, chip->start); 2020 } 2021 2022 mutex_unlock(&chip->mutex); 2023 } 2024 2025 return 0; 2026 } 2027 2028 2029 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2030 void *v) 2031 { 2032 struct mtd_info *mtd; 2033 2034 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2035 cfi_amdstd_reset(mtd); 2036 return NOTIFY_DONE; 2037 } 2038 2039 2040 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2041 { 2042 struct map_info *map = mtd->priv; 2043 struct cfi_private *cfi = map->fldrv_priv; 2044 2045 cfi_amdstd_reset(mtd); 2046 unregister_reboot_notifier(&mtd->reboot_notifier); 2047 kfree(cfi->cmdset_priv); 2048 kfree(cfi->cfiq); 2049 kfree(cfi); 2050 kfree(mtd->eraseregions); 2051 } 2052 2053 MODULE_LICENSE("GPL"); 2054 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2055 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2056 MODULE_ALIAS("cfi_cmdset_0006"); 2057 MODULE_ALIAS("cfi_cmdset_0701"); 2058