1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 17 * 18 * This code is GPL 19 * 20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $ 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/sched.h> 28 #include <linux/init.h> 29 #include <asm/io.h> 30 #include <asm/byteorder.h> 31 32 #include <linux/errno.h> 33 #include <linux/slab.h> 34 #include <linux/delay.h> 35 #include <linux/interrupt.h> 36 #include <linux/mtd/compatmac.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_WORD_RETRIES 3 46 47 #define MANUFACTURER_AMD 0x0001 48 #define MANUFACTURER_ATMEL 0x001F 49 #define MANUFACTURER_SST 0x00BF 50 #define SST49LF004B 0x0060 51 #define SST49LF040B 0x0050 52 #define SST49LF008A 0x005a 53 #define AT49BV6416 0x00d6 54 55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 60 static void cfi_amdstd_sync (struct mtd_info *); 61 static int cfi_amdstd_suspend (struct mtd_info *); 62 static void cfi_amdstd_resume (struct mtd_info *); 63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 64 65 static void cfi_amdstd_destroy(struct mtd_info *); 66 67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 69 70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 72 #include "fwh_lock.h" 73 74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len); 75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); 76 77 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 78 .probe = NULL, /* Not usable directly */ 79 .destroy = cfi_amdstd_destroy, 80 .name = "cfi_cmdset_0002", 81 .module = THIS_MODULE 82 }; 83 84 85 /* #define DEBUG_CFI_FEATURES */ 86 87 88 #ifdef DEBUG_CFI_FEATURES 89 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 90 { 91 const char* erase_suspend[3] = { 92 "Not supported", "Read only", "Read/write" 93 }; 94 const char* top_bottom[6] = { 95 "No WP", "8x8KiB sectors at top & bottom, no WP", 96 "Bottom boot", "Top boot", 97 "Uniform, Bottom WP", "Uniform, Top WP" 98 }; 99 100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 101 printk(" Address sensitive unlock: %s\n", 102 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 103 104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 106 else 107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 108 109 if (extp->BlkProt == 0) 110 printk(" Block protection: Not supported\n"); 111 else 112 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 113 114 115 printk(" Temporary block unprotect: %s\n", 116 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 119 printk(" Burst mode: %s\n", 120 extp->BurstMode ? "Supported" : "Not supported"); 121 if (extp->PageMode == 0) 122 printk(" Page mode: Not supported\n"); 123 else 124 printk(" Page mode: %d word page\n", extp->PageMode << 2); 125 126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 127 extp->VppMin >> 4, extp->VppMin & 0xf); 128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 129 extp->VppMax >> 4, extp->VppMax & 0xf); 130 131 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 133 else 134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 135 } 136 #endif 137 138 #ifdef AMD_BOOTLOC_BUG 139 /* Wheee. Bring me the head of someone at AMD. */ 140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) 141 { 142 struct map_info *map = mtd->priv; 143 struct cfi_private *cfi = map->fldrv_priv; 144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 145 __u8 major = extp->MajorVersion; 146 __u8 minor = extp->MinorVersion; 147 148 if (((major << 8) | minor) < 0x3131) { 149 /* CFI version 1.0 => don't trust bootloc */ 150 if (cfi->id & 0x80) { 151 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 152 extp->TopBottom = 3; /* top boot */ 153 } else { 154 extp->TopBottom = 2; /* bottom boot */ 155 } 156 } 157 } 158 #endif 159 160 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) 161 { 162 struct map_info *map = mtd->priv; 163 struct cfi_private *cfi = map->fldrv_priv; 164 if (cfi->cfiq->BufWriteTimeoutTyp) { 165 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); 166 mtd->write = cfi_amdstd_write_buffers; 167 } 168 } 169 170 /* Atmel chips don't use the same PRI format as AMD chips */ 171 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 172 { 173 struct map_info *map = mtd->priv; 174 struct cfi_private *cfi = map->fldrv_priv; 175 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 176 struct cfi_pri_atmel atmel_pri; 177 178 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 179 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 180 181 if (atmel_pri.Features & 0x02) 182 extp->EraseSuspend = 2; 183 184 if (atmel_pri.BottomBoot) 185 extp->TopBottom = 2; 186 else 187 extp->TopBottom = 3; 188 189 /* burst write mode not supported */ 190 cfi->cfiq->BufWriteTimeoutTyp = 0; 191 cfi->cfiq->BufWriteTimeoutMax = 0; 192 } 193 194 static void fixup_use_secsi(struct mtd_info *mtd, void *param) 195 { 196 /* Setup for chips with a secsi area */ 197 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 198 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 199 } 200 201 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) 202 { 203 struct map_info *map = mtd->priv; 204 struct cfi_private *cfi = map->fldrv_priv; 205 if ((cfi->cfiq->NumEraseRegions == 1) && 206 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 207 mtd->erase = cfi_amdstd_erase_chip; 208 } 209 210 } 211 212 /* 213 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 214 * locked by default. 215 */ 216 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) 217 { 218 mtd->lock = cfi_atmel_lock; 219 mtd->unlock = cfi_atmel_unlock; 220 mtd->flags |= MTD_POWERUP_LOCK; 221 } 222 223 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 224 { 225 struct map_info *map = mtd->priv; 226 struct cfi_private *cfi = map->fldrv_priv; 227 228 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 229 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 230 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 231 } 232 } 233 234 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param) 235 { 236 struct map_info *map = mtd->priv; 237 struct cfi_private *cfi = map->fldrv_priv; 238 239 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 240 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 241 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 242 } 243 } 244 245 static struct cfi_fixup cfi_fixup_table[] = { 246 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 247 #ifdef AMD_BOOTLOC_BUG 248 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 249 #endif 250 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 251 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, 252 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, 253 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, 254 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, 255 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, 256 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, }, 257 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, }, 258 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, }, 259 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, }, 260 #if !FORCE_WORD_WRITE 261 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 262 #endif 263 { 0, 0, NULL, NULL } 264 }; 265 static struct cfi_fixup jedec_fixup_table[] = { 266 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 267 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 268 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 269 { 0, 0, NULL, NULL } 270 }; 271 272 static struct cfi_fixup fixup_table[] = { 273 /* The CFI vendor ids and the JEDEC vendor IDs appear 274 * to be common. It is like the devices id's are as 275 * well. This table is to pick all cases where 276 * we know that is the case. 277 */ 278 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 279 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL }, 280 { 0, 0, NULL, NULL } 281 }; 282 283 284 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 285 { 286 struct cfi_private *cfi = map->fldrv_priv; 287 struct mtd_info *mtd; 288 int i; 289 290 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 291 if (!mtd) { 292 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 293 return NULL; 294 } 295 mtd->priv = map; 296 mtd->type = MTD_NORFLASH; 297 298 /* Fill in the default mtd operations */ 299 mtd->erase = cfi_amdstd_erase_varsize; 300 mtd->write = cfi_amdstd_write_words; 301 mtd->read = cfi_amdstd_read; 302 mtd->sync = cfi_amdstd_sync; 303 mtd->suspend = cfi_amdstd_suspend; 304 mtd->resume = cfi_amdstd_resume; 305 mtd->flags = MTD_CAP_NORFLASH; 306 mtd->name = map->name; 307 mtd->writesize = 1; 308 309 if (cfi->cfi_mode==CFI_MODE_CFI){ 310 unsigned char bootloc; 311 /* 312 * It's a real CFI chip, not one for which the probe 313 * routine faked a CFI structure. So we read the feature 314 * table from it. 315 */ 316 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 317 struct cfi_pri_amdstd *extp; 318 319 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 320 if (!extp) { 321 kfree(mtd); 322 return NULL; 323 } 324 325 if (extp->MajorVersion != '1' || 326 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 327 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 328 "version %c.%c.\n", extp->MajorVersion, 329 extp->MinorVersion); 330 kfree(extp); 331 kfree(mtd); 332 return NULL; 333 } 334 335 /* Install our own private info structure */ 336 cfi->cmdset_priv = extp; 337 338 /* Apply cfi device specific fixups */ 339 cfi_fixup(mtd, cfi_fixup_table); 340 341 #ifdef DEBUG_CFI_FEATURES 342 /* Tell the user about it in lots of lovely detail */ 343 cfi_tell_features(extp); 344 #endif 345 346 bootloc = extp->TopBottom; 347 if ((bootloc != 2) && (bootloc != 3)) { 348 printk(KERN_WARNING "%s: CFI does not contain boot " 349 "bank location. Assuming top.\n", map->name); 350 bootloc = 2; 351 } 352 353 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 354 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); 355 356 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 357 int j = (cfi->cfiq->NumEraseRegions-1)-i; 358 __u32 swap; 359 360 swap = cfi->cfiq->EraseRegionInfo[i]; 361 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 362 cfi->cfiq->EraseRegionInfo[j] = swap; 363 } 364 } 365 /* Set the default CFI lock/unlock addresses */ 366 cfi->addr_unlock1 = 0x555; 367 cfi->addr_unlock2 = 0x2aa; 368 /* Modify the unlock address if we are in compatibility mode */ 369 if ( /* x16 in x8 mode */ 370 ((cfi->device_type == CFI_DEVICETYPE_X8) && 371 (cfi->cfiq->InterfaceDesc == 372 CFI_INTERFACE_X8_BY_X16_ASYNC)) || 373 /* x32 in x16 mode */ 374 ((cfi->device_type == CFI_DEVICETYPE_X16) && 375 (cfi->cfiq->InterfaceDesc == 376 CFI_INTERFACE_X16_BY_X32_ASYNC))) 377 { 378 cfi->addr_unlock1 = 0xaaa; 379 cfi->addr_unlock2 = 0x555; 380 } 381 382 } /* CFI mode */ 383 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 384 /* Apply jedec specific fixups */ 385 cfi_fixup(mtd, jedec_fixup_table); 386 } 387 /* Apply generic fixups */ 388 cfi_fixup(mtd, fixup_table); 389 390 for (i=0; i< cfi->numchips; i++) { 391 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 392 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 393 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 394 cfi->chips[i].ref_point_counter = 0; 395 init_waitqueue_head(&(cfi->chips[i].wq)); 396 } 397 398 map->fldrv = &cfi_amdstd_chipdrv; 399 400 return cfi_amdstd_setup(mtd); 401 } 402 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 403 404 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 405 { 406 struct map_info *map = mtd->priv; 407 struct cfi_private *cfi = map->fldrv_priv; 408 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 409 unsigned long offset = 0; 410 int i,j; 411 412 printk(KERN_NOTICE "number of %s chips: %d\n", 413 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 414 /* Select the correct geometry setup */ 415 mtd->size = devsize * cfi->numchips; 416 417 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 418 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 419 * mtd->numeraseregions, GFP_KERNEL); 420 if (!mtd->eraseregions) { 421 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 422 goto setup_err; 423 } 424 425 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 426 unsigned long ernum, ersize; 427 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 428 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 429 430 if (mtd->erasesize < ersize) { 431 mtd->erasesize = ersize; 432 } 433 for (j=0; j<cfi->numchips; j++) { 434 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 435 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 436 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 437 } 438 offset += (ersize * ernum); 439 } 440 if (offset != devsize) { 441 /* Argh */ 442 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 443 goto setup_err; 444 } 445 #if 0 446 // debug 447 for (i=0; i<mtd->numeraseregions;i++){ 448 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", 449 i,mtd->eraseregions[i].offset, 450 mtd->eraseregions[i].erasesize, 451 mtd->eraseregions[i].numblocks); 452 } 453 #endif 454 455 /* FIXME: erase-suspend-program is broken. See 456 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */ 457 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n"); 458 459 __module_get(THIS_MODULE); 460 return mtd; 461 462 setup_err: 463 if(mtd) { 464 kfree(mtd->eraseregions); 465 kfree(mtd); 466 } 467 kfree(cfi->cmdset_priv); 468 kfree(cfi->cfiq); 469 return NULL; 470 } 471 472 /* 473 * Return true if the chip is ready. 474 * 475 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 476 * non-suspended sector) and is indicated by no toggle bits toggling. 477 * 478 * Note that anything more complicated than checking if no bits are toggling 479 * (including checking DQ5 for an error status) is tricky to get working 480 * correctly and is therefore not done (particulary with interleaved chips 481 * as each chip must be checked independantly of the others). 482 */ 483 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 484 { 485 map_word d, t; 486 487 d = map_read(map, addr); 488 t = map_read(map, addr); 489 490 return map_word_equal(map, d, t); 491 } 492 493 /* 494 * Return true if the chip is ready and has the correct value. 495 * 496 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 497 * non-suspended sector) and it is indicated by no bits toggling. 498 * 499 * Error are indicated by toggling bits or bits held with the wrong value, 500 * or with bits toggling. 501 * 502 * Note that anything more complicated than checking if no bits are toggling 503 * (including checking DQ5 for an error status) is tricky to get working 504 * correctly and is therefore not done (particulary with interleaved chips 505 * as each chip must be checked independantly of the others). 506 * 507 */ 508 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 509 { 510 map_word oldd, curd; 511 512 oldd = map_read(map, addr); 513 curd = map_read(map, addr); 514 515 return map_word_equal(map, oldd, curd) && 516 map_word_equal(map, curd, expected); 517 } 518 519 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 520 { 521 DECLARE_WAITQUEUE(wait, current); 522 struct cfi_private *cfi = map->fldrv_priv; 523 unsigned long timeo; 524 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 525 526 resettime: 527 timeo = jiffies + HZ; 528 retry: 529 switch (chip->state) { 530 531 case FL_STATUS: 532 for (;;) { 533 if (chip_ready(map, adr)) 534 break; 535 536 if (time_after(jiffies, timeo)) { 537 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 538 spin_unlock(chip->mutex); 539 return -EIO; 540 } 541 spin_unlock(chip->mutex); 542 cfi_udelay(1); 543 spin_lock(chip->mutex); 544 /* Someone else might have been playing with it. */ 545 goto retry; 546 } 547 548 case FL_READY: 549 case FL_CFI_QUERY: 550 case FL_JEDEC_QUERY: 551 return 0; 552 553 case FL_ERASING: 554 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ 555 goto sleep; 556 557 if (!( mode == FL_READY 558 || mode == FL_POINT 559 || !cfip 560 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)) 561 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1) 562 ))) 563 goto sleep; 564 565 /* We could check to see if we're trying to access the sector 566 * that is currently being erased. However, no user will try 567 * anything like that so we just wait for the timeout. */ 568 569 /* Erase suspend */ 570 /* It's harmless to issue the Erase-Suspend and Erase-Resume 571 * commands when the erase algorithm isn't in progress. */ 572 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 573 chip->oldstate = FL_ERASING; 574 chip->state = FL_ERASE_SUSPENDING; 575 chip->erase_suspended = 1; 576 for (;;) { 577 if (chip_ready(map, adr)) 578 break; 579 580 if (time_after(jiffies, timeo)) { 581 /* Should have suspended the erase by now. 582 * Send an Erase-Resume command as either 583 * there was an error (so leave the erase 584 * routine to recover from it) or we trying to 585 * use the erase-in-progress sector. */ 586 map_write(map, CMD(0x30), chip->in_progress_block_addr); 587 chip->state = FL_ERASING; 588 chip->oldstate = FL_READY; 589 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 590 return -EIO; 591 } 592 593 spin_unlock(chip->mutex); 594 cfi_udelay(1); 595 spin_lock(chip->mutex); 596 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 597 So we can just loop here. */ 598 } 599 chip->state = FL_READY; 600 return 0; 601 602 case FL_XIP_WHILE_ERASING: 603 if (mode != FL_READY && mode != FL_POINT && 604 (!cfip || !(cfip->EraseSuspend&2))) 605 goto sleep; 606 chip->oldstate = chip->state; 607 chip->state = FL_READY; 608 return 0; 609 610 case FL_POINT: 611 /* Only if there's no operation suspended... */ 612 if (mode == FL_READY && chip->oldstate == FL_READY) 613 return 0; 614 615 default: 616 sleep: 617 set_current_state(TASK_UNINTERRUPTIBLE); 618 add_wait_queue(&chip->wq, &wait); 619 spin_unlock(chip->mutex); 620 schedule(); 621 remove_wait_queue(&chip->wq, &wait); 622 spin_lock(chip->mutex); 623 goto resettime; 624 } 625 } 626 627 628 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 629 { 630 struct cfi_private *cfi = map->fldrv_priv; 631 632 switch(chip->oldstate) { 633 case FL_ERASING: 634 chip->state = chip->oldstate; 635 map_write(map, CMD(0x30), chip->in_progress_block_addr); 636 chip->oldstate = FL_READY; 637 chip->state = FL_ERASING; 638 break; 639 640 case FL_XIP_WHILE_ERASING: 641 chip->state = chip->oldstate; 642 chip->oldstate = FL_READY; 643 break; 644 645 case FL_READY: 646 case FL_STATUS: 647 /* We should really make set_vpp() count, rather than doing this */ 648 DISABLE_VPP(map); 649 break; 650 default: 651 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 652 } 653 wake_up(&chip->wq); 654 } 655 656 #ifdef CONFIG_MTD_XIP 657 658 /* 659 * No interrupt what so ever can be serviced while the flash isn't in array 660 * mode. This is ensured by the xip_disable() and xip_enable() functions 661 * enclosing any code path where the flash is known not to be in array mode. 662 * And within a XIP disabled code path, only functions marked with __xipram 663 * may be called and nothing else (it's a good thing to inspect generated 664 * assembly to make sure inline functions were actually inlined and that gcc 665 * didn't emit calls to its own support functions). Also configuring MTD CFI 666 * support to a single buswidth and a single interleave is also recommended. 667 */ 668 669 static void xip_disable(struct map_info *map, struct flchip *chip, 670 unsigned long adr) 671 { 672 /* TODO: chips with no XIP use should ignore and return */ 673 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 674 local_irq_disable(); 675 } 676 677 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 678 unsigned long adr) 679 { 680 struct cfi_private *cfi = map->fldrv_priv; 681 682 if (chip->state != FL_POINT && chip->state != FL_READY) { 683 map_write(map, CMD(0xf0), adr); 684 chip->state = FL_READY; 685 } 686 (void) map_read(map, adr); 687 xip_iprefetch(); 688 local_irq_enable(); 689 } 690 691 /* 692 * When a delay is required for the flash operation to complete, the 693 * xip_udelay() function is polling for both the given timeout and pending 694 * (but still masked) hardware interrupts. Whenever there is an interrupt 695 * pending then the flash erase operation is suspended, array mode restored 696 * and interrupts unmasked. Task scheduling might also happen at that 697 * point. The CPU eventually returns from the interrupt or the call to 698 * schedule() and the suspended flash operation is resumed for the remaining 699 * of the delay period. 700 * 701 * Warning: this function _will_ fool interrupt latency tracing tools. 702 */ 703 704 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 705 unsigned long adr, int usec) 706 { 707 struct cfi_private *cfi = map->fldrv_priv; 708 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 709 map_word status, OK = CMD(0x80); 710 unsigned long suspended, start = xip_currtime(); 711 flstate_t oldstate; 712 713 do { 714 cpu_relax(); 715 if (xip_irqpending() && extp && 716 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 717 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 718 /* 719 * Let's suspend the erase operation when supported. 720 * Note that we currently don't try to suspend 721 * interleaved chips if there is already another 722 * operation suspended (imagine what happens 723 * when one chip was already done with the current 724 * operation while another chip suspended it, then 725 * we resume the whole thing at once). Yes, it 726 * can happen! 727 */ 728 map_write(map, CMD(0xb0), adr); 729 usec -= xip_elapsed_since(start); 730 suspended = xip_currtime(); 731 do { 732 if (xip_elapsed_since(suspended) > 100000) { 733 /* 734 * The chip doesn't want to suspend 735 * after waiting for 100 msecs. 736 * This is a critical error but there 737 * is not much we can do here. 738 */ 739 return; 740 } 741 status = map_read(map, adr); 742 } while (!map_word_andequal(map, status, OK, OK)); 743 744 /* Suspend succeeded */ 745 oldstate = chip->state; 746 if (!map_word_bitsset(map, status, CMD(0x40))) 747 break; 748 chip->state = FL_XIP_WHILE_ERASING; 749 chip->erase_suspended = 1; 750 map_write(map, CMD(0xf0), adr); 751 (void) map_read(map, adr); 752 xip_iprefetch(); 753 local_irq_enable(); 754 spin_unlock(chip->mutex); 755 xip_iprefetch(); 756 cond_resched(); 757 758 /* 759 * We're back. However someone else might have 760 * decided to go write to the chip if we are in 761 * a suspended erase state. If so let's wait 762 * until it's done. 763 */ 764 spin_lock(chip->mutex); 765 while (chip->state != FL_XIP_WHILE_ERASING) { 766 DECLARE_WAITQUEUE(wait, current); 767 set_current_state(TASK_UNINTERRUPTIBLE); 768 add_wait_queue(&chip->wq, &wait); 769 spin_unlock(chip->mutex); 770 schedule(); 771 remove_wait_queue(&chip->wq, &wait); 772 spin_lock(chip->mutex); 773 } 774 /* Disallow XIP again */ 775 local_irq_disable(); 776 777 /* Resume the write or erase operation */ 778 map_write(map, CMD(0x30), adr); 779 chip->state = oldstate; 780 start = xip_currtime(); 781 } else if (usec >= 1000000/HZ) { 782 /* 783 * Try to save on CPU power when waiting delay 784 * is at least a system timer tick period. 785 * No need to be extremely accurate here. 786 */ 787 xip_cpu_idle(); 788 } 789 status = map_read(map, adr); 790 } while (!map_word_andequal(map, status, OK, OK) 791 && xip_elapsed_since(start) < usec); 792 } 793 794 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 795 796 /* 797 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 798 * the flash is actively programming or erasing since we have to poll for 799 * the operation to complete anyway. We can't do that in a generic way with 800 * a XIP setup so do it before the actual flash operation in this case 801 * and stub it out from INVALIDATE_CACHE_UDELAY. 802 */ 803 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 804 INVALIDATE_CACHED_RANGE(map, from, size) 805 806 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 807 UDELAY(map, chip, adr, usec) 808 809 /* 810 * Extra notes: 811 * 812 * Activating this XIP support changes the way the code works a bit. For 813 * example the code to suspend the current process when concurrent access 814 * happens is never executed because xip_udelay() will always return with the 815 * same chip state as it was entered with. This is why there is no care for 816 * the presence of add_wait_queue() or schedule() calls from within a couple 817 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 818 * The queueing and scheduling are always happening within xip_udelay(). 819 * 820 * Similarly, get_chip() and put_chip() just happen to always be executed 821 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 822 * is in array mode, therefore never executing many cases therein and not 823 * causing any problem with XIP. 824 */ 825 826 #else 827 828 #define xip_disable(map, chip, adr) 829 #define xip_enable(map, chip, adr) 830 #define XIP_INVAL_CACHED_RANGE(x...) 831 832 #define UDELAY(map, chip, adr, usec) \ 833 do { \ 834 spin_unlock(chip->mutex); \ 835 cfi_udelay(usec); \ 836 spin_lock(chip->mutex); \ 837 } while (0) 838 839 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 840 do { \ 841 spin_unlock(chip->mutex); \ 842 INVALIDATE_CACHED_RANGE(map, adr, len); \ 843 cfi_udelay(usec); \ 844 spin_lock(chip->mutex); \ 845 } while (0) 846 847 #endif 848 849 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 850 { 851 unsigned long cmd_addr; 852 struct cfi_private *cfi = map->fldrv_priv; 853 int ret; 854 855 adr += chip->start; 856 857 /* Ensure cmd read/writes are aligned. */ 858 cmd_addr = adr & ~(map_bankwidth(map)-1); 859 860 spin_lock(chip->mutex); 861 ret = get_chip(map, chip, cmd_addr, FL_READY); 862 if (ret) { 863 spin_unlock(chip->mutex); 864 return ret; 865 } 866 867 if (chip->state != FL_POINT && chip->state != FL_READY) { 868 map_write(map, CMD(0xf0), cmd_addr); 869 chip->state = FL_READY; 870 } 871 872 map_copy_from(map, buf, adr, len); 873 874 put_chip(map, chip, cmd_addr); 875 876 spin_unlock(chip->mutex); 877 return 0; 878 } 879 880 881 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 882 { 883 struct map_info *map = mtd->priv; 884 struct cfi_private *cfi = map->fldrv_priv; 885 unsigned long ofs; 886 int chipnum; 887 int ret = 0; 888 889 /* ofs: offset within the first chip that the first read should start */ 890 891 chipnum = (from >> cfi->chipshift); 892 ofs = from - (chipnum << cfi->chipshift); 893 894 895 *retlen = 0; 896 897 while (len) { 898 unsigned long thislen; 899 900 if (chipnum >= cfi->numchips) 901 break; 902 903 if ((len + ofs -1) >> cfi->chipshift) 904 thislen = (1<<cfi->chipshift) - ofs; 905 else 906 thislen = len; 907 908 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 909 if (ret) 910 break; 911 912 *retlen += thislen; 913 len -= thislen; 914 buf += thislen; 915 916 ofs = 0; 917 chipnum++; 918 } 919 return ret; 920 } 921 922 923 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 924 { 925 DECLARE_WAITQUEUE(wait, current); 926 unsigned long timeo = jiffies + HZ; 927 struct cfi_private *cfi = map->fldrv_priv; 928 929 retry: 930 spin_lock(chip->mutex); 931 932 if (chip->state != FL_READY){ 933 #if 0 934 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); 935 #endif 936 set_current_state(TASK_UNINTERRUPTIBLE); 937 add_wait_queue(&chip->wq, &wait); 938 939 spin_unlock(chip->mutex); 940 941 schedule(); 942 remove_wait_queue(&chip->wq, &wait); 943 #if 0 944 if(signal_pending(current)) 945 return -EINTR; 946 #endif 947 timeo = jiffies + HZ; 948 949 goto retry; 950 } 951 952 adr += chip->start; 953 954 chip->state = FL_READY; 955 956 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 957 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 958 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 959 960 map_copy_from(map, buf, adr, len); 961 962 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 963 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 964 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 965 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 966 967 wake_up(&chip->wq); 968 spin_unlock(chip->mutex); 969 970 return 0; 971 } 972 973 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 974 { 975 struct map_info *map = mtd->priv; 976 struct cfi_private *cfi = map->fldrv_priv; 977 unsigned long ofs; 978 int chipnum; 979 int ret = 0; 980 981 982 /* ofs: offset within the first chip that the first read should start */ 983 984 /* 8 secsi bytes per chip */ 985 chipnum=from>>3; 986 ofs=from & 7; 987 988 989 *retlen = 0; 990 991 while (len) { 992 unsigned long thislen; 993 994 if (chipnum >= cfi->numchips) 995 break; 996 997 if ((len + ofs -1) >> 3) 998 thislen = (1<<3) - ofs; 999 else 1000 thislen = len; 1001 1002 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1003 if (ret) 1004 break; 1005 1006 *retlen += thislen; 1007 len -= thislen; 1008 buf += thislen; 1009 1010 ofs = 0; 1011 chipnum++; 1012 } 1013 return ret; 1014 } 1015 1016 1017 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1018 { 1019 struct cfi_private *cfi = map->fldrv_priv; 1020 unsigned long timeo = jiffies + HZ; 1021 /* 1022 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1023 * have a max write time of a few hundreds usec). However, we should 1024 * use the maximum timeout value given by the chip at probe time 1025 * instead. Unfortunately, struct flchip does have a field for 1026 * maximum timeout, only for typical which can be far too short 1027 * depending of the conditions. The ' + 1' is to avoid having a 1028 * timeout of 0 jiffies if HZ is smaller than 1000. 1029 */ 1030 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1031 int ret = 0; 1032 map_word oldd; 1033 int retry_cnt = 0; 1034 1035 adr += chip->start; 1036 1037 spin_lock(chip->mutex); 1038 ret = get_chip(map, chip, adr, FL_WRITING); 1039 if (ret) { 1040 spin_unlock(chip->mutex); 1041 return ret; 1042 } 1043 1044 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1045 __func__, adr, datum.x[0] ); 1046 1047 /* 1048 * Check for a NOP for the case when the datum to write is already 1049 * present - it saves time and works around buggy chips that corrupt 1050 * data at other locations when 0xff is written to a location that 1051 * already contains 0xff. 1052 */ 1053 oldd = map_read(map, adr); 1054 if (map_word_equal(map, oldd, datum)) { 1055 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", 1056 __func__); 1057 goto op_done; 1058 } 1059 1060 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1061 ENABLE_VPP(map); 1062 xip_disable(map, chip, adr); 1063 retry: 1064 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1065 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1066 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1067 map_write(map, datum, adr); 1068 chip->state = FL_WRITING; 1069 1070 INVALIDATE_CACHE_UDELAY(map, chip, 1071 adr, map_bankwidth(map), 1072 chip->word_write_time); 1073 1074 /* See comment above for timeout value. */ 1075 timeo = jiffies + uWriteTimeout; 1076 for (;;) { 1077 if (chip->state != FL_WRITING) { 1078 /* Someone's suspended the write. Sleep */ 1079 DECLARE_WAITQUEUE(wait, current); 1080 1081 set_current_state(TASK_UNINTERRUPTIBLE); 1082 add_wait_queue(&chip->wq, &wait); 1083 spin_unlock(chip->mutex); 1084 schedule(); 1085 remove_wait_queue(&chip->wq, &wait); 1086 timeo = jiffies + (HZ / 2); /* FIXME */ 1087 spin_lock(chip->mutex); 1088 continue; 1089 } 1090 1091 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1092 xip_enable(map, chip, adr); 1093 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1094 xip_disable(map, chip, adr); 1095 break; 1096 } 1097 1098 if (chip_ready(map, adr)) 1099 break; 1100 1101 /* Latency issues. Drop the lock, wait a while and retry */ 1102 UDELAY(map, chip, adr, 1); 1103 } 1104 /* Did we succeed? */ 1105 if (!chip_good(map, adr, datum)) { 1106 /* reset on all failures. */ 1107 map_write( map, CMD(0xF0), chip->start ); 1108 /* FIXME - should have reset delay before continuing */ 1109 1110 if (++retry_cnt <= MAX_WORD_RETRIES) 1111 goto retry; 1112 1113 ret = -EIO; 1114 } 1115 xip_enable(map, chip, adr); 1116 op_done: 1117 chip->state = FL_READY; 1118 put_chip(map, chip, adr); 1119 spin_unlock(chip->mutex); 1120 1121 return ret; 1122 } 1123 1124 1125 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1126 size_t *retlen, const u_char *buf) 1127 { 1128 struct map_info *map = mtd->priv; 1129 struct cfi_private *cfi = map->fldrv_priv; 1130 int ret = 0; 1131 int chipnum; 1132 unsigned long ofs, chipstart; 1133 DECLARE_WAITQUEUE(wait, current); 1134 1135 *retlen = 0; 1136 if (!len) 1137 return 0; 1138 1139 chipnum = to >> cfi->chipshift; 1140 ofs = to - (chipnum << cfi->chipshift); 1141 chipstart = cfi->chips[chipnum].start; 1142 1143 /* If it's not bus-aligned, do the first byte write */ 1144 if (ofs & (map_bankwidth(map)-1)) { 1145 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1146 int i = ofs - bus_ofs; 1147 int n = 0; 1148 map_word tmp_buf; 1149 1150 retry: 1151 spin_lock(cfi->chips[chipnum].mutex); 1152 1153 if (cfi->chips[chipnum].state != FL_READY) { 1154 #if 0 1155 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1156 #endif 1157 set_current_state(TASK_UNINTERRUPTIBLE); 1158 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1159 1160 spin_unlock(cfi->chips[chipnum].mutex); 1161 1162 schedule(); 1163 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1164 #if 0 1165 if(signal_pending(current)) 1166 return -EINTR; 1167 #endif 1168 goto retry; 1169 } 1170 1171 /* Load 'tmp_buf' with old contents of flash */ 1172 tmp_buf = map_read(map, bus_ofs+chipstart); 1173 1174 spin_unlock(cfi->chips[chipnum].mutex); 1175 1176 /* Number of bytes to copy from buffer */ 1177 n = min_t(int, len, map_bankwidth(map)-i); 1178 1179 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1180 1181 ret = do_write_oneword(map, &cfi->chips[chipnum], 1182 bus_ofs, tmp_buf); 1183 if (ret) 1184 return ret; 1185 1186 ofs += n; 1187 buf += n; 1188 (*retlen) += n; 1189 len -= n; 1190 1191 if (ofs >> cfi->chipshift) { 1192 chipnum ++; 1193 ofs = 0; 1194 if (chipnum == cfi->numchips) 1195 return 0; 1196 } 1197 } 1198 1199 /* We are now aligned, write as much as possible */ 1200 while(len >= map_bankwidth(map)) { 1201 map_word datum; 1202 1203 datum = map_word_load(map, buf); 1204 1205 ret = do_write_oneword(map, &cfi->chips[chipnum], 1206 ofs, datum); 1207 if (ret) 1208 return ret; 1209 1210 ofs += map_bankwidth(map); 1211 buf += map_bankwidth(map); 1212 (*retlen) += map_bankwidth(map); 1213 len -= map_bankwidth(map); 1214 1215 if (ofs >> cfi->chipshift) { 1216 chipnum ++; 1217 ofs = 0; 1218 if (chipnum == cfi->numchips) 1219 return 0; 1220 chipstart = cfi->chips[chipnum].start; 1221 } 1222 } 1223 1224 /* Write the trailing bytes if any */ 1225 if (len & (map_bankwidth(map)-1)) { 1226 map_word tmp_buf; 1227 1228 retry1: 1229 spin_lock(cfi->chips[chipnum].mutex); 1230 1231 if (cfi->chips[chipnum].state != FL_READY) { 1232 #if 0 1233 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); 1234 #endif 1235 set_current_state(TASK_UNINTERRUPTIBLE); 1236 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1237 1238 spin_unlock(cfi->chips[chipnum].mutex); 1239 1240 schedule(); 1241 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1242 #if 0 1243 if(signal_pending(current)) 1244 return -EINTR; 1245 #endif 1246 goto retry1; 1247 } 1248 1249 tmp_buf = map_read(map, ofs + chipstart); 1250 1251 spin_unlock(cfi->chips[chipnum].mutex); 1252 1253 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1254 1255 ret = do_write_oneword(map, &cfi->chips[chipnum], 1256 ofs, tmp_buf); 1257 if (ret) 1258 return ret; 1259 1260 (*retlen) += len; 1261 } 1262 1263 return 0; 1264 } 1265 1266 1267 /* 1268 * FIXME: interleaved mode not tested, and probably not supported! 1269 */ 1270 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1271 unsigned long adr, const u_char *buf, 1272 int len) 1273 { 1274 struct cfi_private *cfi = map->fldrv_priv; 1275 unsigned long timeo = jiffies + HZ; 1276 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1277 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1278 int ret = -EIO; 1279 unsigned long cmd_adr; 1280 int z, words; 1281 map_word datum; 1282 1283 adr += chip->start; 1284 cmd_adr = adr; 1285 1286 spin_lock(chip->mutex); 1287 ret = get_chip(map, chip, adr, FL_WRITING); 1288 if (ret) { 1289 spin_unlock(chip->mutex); 1290 return ret; 1291 } 1292 1293 datum = map_word_load(map, buf); 1294 1295 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1296 __func__, adr, datum.x[0] ); 1297 1298 XIP_INVAL_CACHED_RANGE(map, adr, len); 1299 ENABLE_VPP(map); 1300 xip_disable(map, chip, cmd_adr); 1301 1302 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1303 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1304 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1305 1306 /* Write Buffer Load */ 1307 map_write(map, CMD(0x25), cmd_adr); 1308 1309 chip->state = FL_WRITING_TO_BUFFER; 1310 1311 /* Write length of data to come */ 1312 words = len / map_bankwidth(map); 1313 map_write(map, CMD(words - 1), cmd_adr); 1314 /* Write data */ 1315 z = 0; 1316 while(z < words * map_bankwidth(map)) { 1317 datum = map_word_load(map, buf); 1318 map_write(map, datum, adr + z); 1319 1320 z += map_bankwidth(map); 1321 buf += map_bankwidth(map); 1322 } 1323 z -= map_bankwidth(map); 1324 1325 adr += z; 1326 1327 /* Write Buffer Program Confirm: GO GO GO */ 1328 map_write(map, CMD(0x29), cmd_adr); 1329 chip->state = FL_WRITING; 1330 1331 INVALIDATE_CACHE_UDELAY(map, chip, 1332 adr, map_bankwidth(map), 1333 chip->word_write_time); 1334 1335 timeo = jiffies + uWriteTimeout; 1336 1337 for (;;) { 1338 if (chip->state != FL_WRITING) { 1339 /* Someone's suspended the write. Sleep */ 1340 DECLARE_WAITQUEUE(wait, current); 1341 1342 set_current_state(TASK_UNINTERRUPTIBLE); 1343 add_wait_queue(&chip->wq, &wait); 1344 spin_unlock(chip->mutex); 1345 schedule(); 1346 remove_wait_queue(&chip->wq, &wait); 1347 timeo = jiffies + (HZ / 2); /* FIXME */ 1348 spin_lock(chip->mutex); 1349 continue; 1350 } 1351 1352 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1353 break; 1354 1355 if (chip_ready(map, adr)) { 1356 xip_enable(map, chip, adr); 1357 goto op_done; 1358 } 1359 1360 /* Latency issues. Drop the lock, wait a while and retry */ 1361 UDELAY(map, chip, adr, 1); 1362 } 1363 1364 /* reset on all failures. */ 1365 map_write( map, CMD(0xF0), chip->start ); 1366 xip_enable(map, chip, adr); 1367 /* FIXME - should have reset delay before continuing */ 1368 1369 printk(KERN_WARNING "MTD %s(): software timeout\n", 1370 __func__ ); 1371 1372 ret = -EIO; 1373 op_done: 1374 chip->state = FL_READY; 1375 put_chip(map, chip, adr); 1376 spin_unlock(chip->mutex); 1377 1378 return ret; 1379 } 1380 1381 1382 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1383 size_t *retlen, const u_char *buf) 1384 { 1385 struct map_info *map = mtd->priv; 1386 struct cfi_private *cfi = map->fldrv_priv; 1387 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1388 int ret = 0; 1389 int chipnum; 1390 unsigned long ofs; 1391 1392 *retlen = 0; 1393 if (!len) 1394 return 0; 1395 1396 chipnum = to >> cfi->chipshift; 1397 ofs = to - (chipnum << cfi->chipshift); 1398 1399 /* If it's not bus-aligned, do the first word write */ 1400 if (ofs & (map_bankwidth(map)-1)) { 1401 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1402 if (local_len > len) 1403 local_len = len; 1404 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1405 local_len, retlen, buf); 1406 if (ret) 1407 return ret; 1408 ofs += local_len; 1409 buf += local_len; 1410 len -= local_len; 1411 1412 if (ofs >> cfi->chipshift) { 1413 chipnum ++; 1414 ofs = 0; 1415 if (chipnum == cfi->numchips) 1416 return 0; 1417 } 1418 } 1419 1420 /* Write buffer is worth it only if more than one word to write... */ 1421 while (len >= map_bankwidth(map) * 2) { 1422 /* We must not cross write block boundaries */ 1423 int size = wbufsize - (ofs & (wbufsize-1)); 1424 1425 if (size > len) 1426 size = len; 1427 if (size % map_bankwidth(map)) 1428 size -= size % map_bankwidth(map); 1429 1430 ret = do_write_buffer(map, &cfi->chips[chipnum], 1431 ofs, buf, size); 1432 if (ret) 1433 return ret; 1434 1435 ofs += size; 1436 buf += size; 1437 (*retlen) += size; 1438 len -= size; 1439 1440 if (ofs >> cfi->chipshift) { 1441 chipnum ++; 1442 ofs = 0; 1443 if (chipnum == cfi->numchips) 1444 return 0; 1445 } 1446 } 1447 1448 if (len) { 1449 size_t retlen_dregs = 0; 1450 1451 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1452 len, &retlen_dregs, buf); 1453 1454 *retlen += retlen_dregs; 1455 return ret; 1456 } 1457 1458 return 0; 1459 } 1460 1461 1462 /* 1463 * Handle devices with one erase region, that only implement 1464 * the chip erase command. 1465 */ 1466 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1467 { 1468 struct cfi_private *cfi = map->fldrv_priv; 1469 unsigned long timeo = jiffies + HZ; 1470 unsigned long int adr; 1471 DECLARE_WAITQUEUE(wait, current); 1472 int ret = 0; 1473 1474 adr = cfi->addr_unlock1; 1475 1476 spin_lock(chip->mutex); 1477 ret = get_chip(map, chip, adr, FL_WRITING); 1478 if (ret) { 1479 spin_unlock(chip->mutex); 1480 return ret; 1481 } 1482 1483 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1484 __func__, chip->start ); 1485 1486 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1487 ENABLE_VPP(map); 1488 xip_disable(map, chip, adr); 1489 1490 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1491 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1492 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1493 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1494 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1495 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1496 1497 chip->state = FL_ERASING; 1498 chip->erase_suspended = 0; 1499 chip->in_progress_block_addr = adr; 1500 1501 INVALIDATE_CACHE_UDELAY(map, chip, 1502 adr, map->size, 1503 chip->erase_time*500); 1504 1505 timeo = jiffies + (HZ*20); 1506 1507 for (;;) { 1508 if (chip->state != FL_ERASING) { 1509 /* Someone's suspended the erase. Sleep */ 1510 set_current_state(TASK_UNINTERRUPTIBLE); 1511 add_wait_queue(&chip->wq, &wait); 1512 spin_unlock(chip->mutex); 1513 schedule(); 1514 remove_wait_queue(&chip->wq, &wait); 1515 spin_lock(chip->mutex); 1516 continue; 1517 } 1518 if (chip->erase_suspended) { 1519 /* This erase was suspended and resumed. 1520 Adjust the timeout */ 1521 timeo = jiffies + (HZ*20); /* FIXME */ 1522 chip->erase_suspended = 0; 1523 } 1524 1525 if (chip_ready(map, adr)) 1526 break; 1527 1528 if (time_after(jiffies, timeo)) { 1529 printk(KERN_WARNING "MTD %s(): software timeout\n", 1530 __func__ ); 1531 break; 1532 } 1533 1534 /* Latency issues. Drop the lock, wait a while and retry */ 1535 UDELAY(map, chip, adr, 1000000/HZ); 1536 } 1537 /* Did we succeed? */ 1538 if (!chip_good(map, adr, map_word_ff(map))) { 1539 /* reset on all failures. */ 1540 map_write( map, CMD(0xF0), chip->start ); 1541 /* FIXME - should have reset delay before continuing */ 1542 1543 ret = -EIO; 1544 } 1545 1546 chip->state = FL_READY; 1547 xip_enable(map, chip, adr); 1548 put_chip(map, chip, adr); 1549 spin_unlock(chip->mutex); 1550 1551 return ret; 1552 } 1553 1554 1555 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1556 { 1557 struct cfi_private *cfi = map->fldrv_priv; 1558 unsigned long timeo = jiffies + HZ; 1559 DECLARE_WAITQUEUE(wait, current); 1560 int ret = 0; 1561 1562 adr += chip->start; 1563 1564 spin_lock(chip->mutex); 1565 ret = get_chip(map, chip, adr, FL_ERASING); 1566 if (ret) { 1567 spin_unlock(chip->mutex); 1568 return ret; 1569 } 1570 1571 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", 1572 __func__, adr ); 1573 1574 XIP_INVAL_CACHED_RANGE(map, adr, len); 1575 ENABLE_VPP(map); 1576 xip_disable(map, chip, adr); 1577 1578 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1579 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1580 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1581 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1582 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1583 map_write(map, CMD(0x30), adr); 1584 1585 chip->state = FL_ERASING; 1586 chip->erase_suspended = 0; 1587 chip->in_progress_block_addr = adr; 1588 1589 INVALIDATE_CACHE_UDELAY(map, chip, 1590 adr, len, 1591 chip->erase_time*500); 1592 1593 timeo = jiffies + (HZ*20); 1594 1595 for (;;) { 1596 if (chip->state != FL_ERASING) { 1597 /* Someone's suspended the erase. Sleep */ 1598 set_current_state(TASK_UNINTERRUPTIBLE); 1599 add_wait_queue(&chip->wq, &wait); 1600 spin_unlock(chip->mutex); 1601 schedule(); 1602 remove_wait_queue(&chip->wq, &wait); 1603 spin_lock(chip->mutex); 1604 continue; 1605 } 1606 if (chip->erase_suspended) { 1607 /* This erase was suspended and resumed. 1608 Adjust the timeout */ 1609 timeo = jiffies + (HZ*20); /* FIXME */ 1610 chip->erase_suspended = 0; 1611 } 1612 1613 if (chip_ready(map, adr)) { 1614 xip_enable(map, chip, adr); 1615 break; 1616 } 1617 1618 if (time_after(jiffies, timeo)) { 1619 xip_enable(map, chip, adr); 1620 printk(KERN_WARNING "MTD %s(): software timeout\n", 1621 __func__ ); 1622 break; 1623 } 1624 1625 /* Latency issues. Drop the lock, wait a while and retry */ 1626 UDELAY(map, chip, adr, 1000000/HZ); 1627 } 1628 /* Did we succeed? */ 1629 if (!chip_good(map, adr, map_word_ff(map))) { 1630 /* reset on all failures. */ 1631 map_write( map, CMD(0xF0), chip->start ); 1632 /* FIXME - should have reset delay before continuing */ 1633 1634 ret = -EIO; 1635 } 1636 1637 chip->state = FL_READY; 1638 put_chip(map, chip, adr); 1639 spin_unlock(chip->mutex); 1640 return ret; 1641 } 1642 1643 1644 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 1645 { 1646 unsigned long ofs, len; 1647 int ret; 1648 1649 ofs = instr->addr; 1650 len = instr->len; 1651 1652 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 1653 if (ret) 1654 return ret; 1655 1656 instr->state = MTD_ERASE_DONE; 1657 mtd_erase_callback(instr); 1658 1659 return 0; 1660 } 1661 1662 1663 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 1664 { 1665 struct map_info *map = mtd->priv; 1666 struct cfi_private *cfi = map->fldrv_priv; 1667 int ret = 0; 1668 1669 if (instr->addr != 0) 1670 return -EINVAL; 1671 1672 if (instr->len != mtd->size) 1673 return -EINVAL; 1674 1675 ret = do_erase_chip(map, &cfi->chips[0]); 1676 if (ret) 1677 return ret; 1678 1679 instr->state = MTD_ERASE_DONE; 1680 mtd_erase_callback(instr); 1681 1682 return 0; 1683 } 1684 1685 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 1686 unsigned long adr, int len, void *thunk) 1687 { 1688 struct cfi_private *cfi = map->fldrv_priv; 1689 int ret; 1690 1691 spin_lock(chip->mutex); 1692 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1693 if (ret) 1694 goto out_unlock; 1695 chip->state = FL_LOCKING; 1696 1697 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1698 __func__, adr, len); 1699 1700 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1701 cfi->device_type, NULL); 1702 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1703 cfi->device_type, NULL); 1704 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 1705 cfi->device_type, NULL); 1706 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1707 cfi->device_type, NULL); 1708 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1709 cfi->device_type, NULL); 1710 map_write(map, CMD(0x40), chip->start + adr); 1711 1712 chip->state = FL_READY; 1713 put_chip(map, chip, adr + chip->start); 1714 ret = 0; 1715 1716 out_unlock: 1717 spin_unlock(chip->mutex); 1718 return ret; 1719 } 1720 1721 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 1722 unsigned long adr, int len, void *thunk) 1723 { 1724 struct cfi_private *cfi = map->fldrv_priv; 1725 int ret; 1726 1727 spin_lock(chip->mutex); 1728 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1729 if (ret) 1730 goto out_unlock; 1731 chip->state = FL_UNLOCKING; 1732 1733 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", 1734 __func__, adr, len); 1735 1736 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1737 cfi->device_type, NULL); 1738 map_write(map, CMD(0x70), adr); 1739 1740 chip->state = FL_READY; 1741 put_chip(map, chip, adr + chip->start); 1742 ret = 0; 1743 1744 out_unlock: 1745 spin_unlock(chip->mutex); 1746 return ret; 1747 } 1748 1749 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1750 { 1751 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 1752 } 1753 1754 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) 1755 { 1756 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 1757 } 1758 1759 1760 static void cfi_amdstd_sync (struct mtd_info *mtd) 1761 { 1762 struct map_info *map = mtd->priv; 1763 struct cfi_private *cfi = map->fldrv_priv; 1764 int i; 1765 struct flchip *chip; 1766 int ret = 0; 1767 DECLARE_WAITQUEUE(wait, current); 1768 1769 for (i=0; !ret && i<cfi->numchips; i++) { 1770 chip = &cfi->chips[i]; 1771 1772 retry: 1773 spin_lock(chip->mutex); 1774 1775 switch(chip->state) { 1776 case FL_READY: 1777 case FL_STATUS: 1778 case FL_CFI_QUERY: 1779 case FL_JEDEC_QUERY: 1780 chip->oldstate = chip->state; 1781 chip->state = FL_SYNCING; 1782 /* No need to wake_up() on this state change - 1783 * as the whole point is that nobody can do anything 1784 * with the chip now anyway. 1785 */ 1786 case FL_SYNCING: 1787 spin_unlock(chip->mutex); 1788 break; 1789 1790 default: 1791 /* Not an idle state */ 1792 set_current_state(TASK_UNINTERRUPTIBLE); 1793 add_wait_queue(&chip->wq, &wait); 1794 1795 spin_unlock(chip->mutex); 1796 1797 schedule(); 1798 1799 remove_wait_queue(&chip->wq, &wait); 1800 1801 goto retry; 1802 } 1803 } 1804 1805 /* Unlock the chips again */ 1806 1807 for (i--; i >=0; i--) { 1808 chip = &cfi->chips[i]; 1809 1810 spin_lock(chip->mutex); 1811 1812 if (chip->state == FL_SYNCING) { 1813 chip->state = chip->oldstate; 1814 wake_up(&chip->wq); 1815 } 1816 spin_unlock(chip->mutex); 1817 } 1818 } 1819 1820 1821 static int cfi_amdstd_suspend(struct mtd_info *mtd) 1822 { 1823 struct map_info *map = mtd->priv; 1824 struct cfi_private *cfi = map->fldrv_priv; 1825 int i; 1826 struct flchip *chip; 1827 int ret = 0; 1828 1829 for (i=0; !ret && i<cfi->numchips; i++) { 1830 chip = &cfi->chips[i]; 1831 1832 spin_lock(chip->mutex); 1833 1834 switch(chip->state) { 1835 case FL_READY: 1836 case FL_STATUS: 1837 case FL_CFI_QUERY: 1838 case FL_JEDEC_QUERY: 1839 chip->oldstate = chip->state; 1840 chip->state = FL_PM_SUSPENDED; 1841 /* No need to wake_up() on this state change - 1842 * as the whole point is that nobody can do anything 1843 * with the chip now anyway. 1844 */ 1845 case FL_PM_SUSPENDED: 1846 break; 1847 1848 default: 1849 ret = -EAGAIN; 1850 break; 1851 } 1852 spin_unlock(chip->mutex); 1853 } 1854 1855 /* Unlock the chips again */ 1856 1857 if (ret) { 1858 for (i--; i >=0; i--) { 1859 chip = &cfi->chips[i]; 1860 1861 spin_lock(chip->mutex); 1862 1863 if (chip->state == FL_PM_SUSPENDED) { 1864 chip->state = chip->oldstate; 1865 wake_up(&chip->wq); 1866 } 1867 spin_unlock(chip->mutex); 1868 } 1869 } 1870 1871 return ret; 1872 } 1873 1874 1875 static void cfi_amdstd_resume(struct mtd_info *mtd) 1876 { 1877 struct map_info *map = mtd->priv; 1878 struct cfi_private *cfi = map->fldrv_priv; 1879 int i; 1880 struct flchip *chip; 1881 1882 for (i=0; i<cfi->numchips; i++) { 1883 1884 chip = &cfi->chips[i]; 1885 1886 spin_lock(chip->mutex); 1887 1888 if (chip->state == FL_PM_SUSPENDED) { 1889 chip->state = FL_READY; 1890 map_write(map, CMD(0xF0), chip->start); 1891 wake_up(&chip->wq); 1892 } 1893 else 1894 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1895 1896 spin_unlock(chip->mutex); 1897 } 1898 } 1899 1900 static void cfi_amdstd_destroy(struct mtd_info *mtd) 1901 { 1902 struct map_info *map = mtd->priv; 1903 struct cfi_private *cfi = map->fldrv_priv; 1904 1905 kfree(cfi->cmdset_priv); 1906 kfree(cfi->cfiq); 1907 kfree(cfi); 1908 kfree(mtd->eraseregions); 1909 } 1910 1911 MODULE_LICENSE("GPL"); 1912 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 1913 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 1914