1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Overview: 4 * This is the generic MTD driver for NAND flash devices. It should be 5 * capable of working with almost all NAND chips currently available. 6 * 7 * Additional technical information is available on 8 * http://www.linux-mtd.infradead.org/doc/nand.html 9 * 10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 11 * 2002-2006 Thomas Gleixner (tglx@linutronix.de) 12 * 13 * Credits: 14 * David Woodhouse for adding multichip support 15 * 16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the 17 * rework for 2K page size chips 18 * 19 * TODO: 20 * Enable cached programming for 2k page size chips 21 * Check, if mtd->ecctype should be set to MTD_ECC_HW 22 * if we have HW ECC support. 23 * BBT table is not serialized, has to be fixed 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/module.h> 29 #include <linux/delay.h> 30 #include <linux/errno.h> 31 #include <linux/err.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/mm.h> 35 #include <linux/types.h> 36 #include <linux/mtd/mtd.h> 37 #include <linux/mtd/nand.h> 38 #include <linux/mtd/nand-ecc-sw-hamming.h> 39 #include <linux/mtd/nand-ecc-sw-bch.h> 40 #include <linux/interrupt.h> 41 #include <linux/bitops.h> 42 #include <linux/io.h> 43 #include <linux/mtd/partitions.h> 44 #include <linux/of.h> 45 #include <linux/gpio/consumer.h> 46 47 #include "internals.h" 48 49 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, 50 struct mtd_pairing_info *info) 51 { 52 int lastpage = (mtd->erasesize / mtd->writesize) - 1; 53 int dist = 3; 54 55 if (page == lastpage) 56 dist = 2; 57 58 if (!page || (page & 1)) { 59 info->group = 0; 60 info->pair = (page + 1) / 2; 61 } else { 62 info->group = 1; 63 info->pair = (page + 1 - dist) / 2; 64 } 65 66 return 0; 67 } 68 69 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, 70 const struct mtd_pairing_info *info) 71 { 72 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; 73 int page = info->pair * 2; 74 int dist = 3; 75 76 if (!info->group && !info->pair) 77 return 0; 78 79 if (info->pair == lastpair && info->group) 80 dist = 2; 81 82 if (!info->group) 83 page--; 84 else if (info->pair) 85 page += dist - 1; 86 87 if (page >= mtd->erasesize / mtd->writesize) 88 return -EINVAL; 89 90 return page; 91 } 92 93 const struct mtd_pairing_scheme dist3_pairing_scheme = { 94 .ngroups = 2, 95 .get_info = nand_pairing_dist3_get_info, 96 .get_wunit = nand_pairing_dist3_get_wunit, 97 }; 98 99 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) 100 { 101 int ret = 0; 102 103 /* Start address must align on block boundary */ 104 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) { 105 pr_debug("%s: unaligned address\n", __func__); 106 ret = -EINVAL; 107 } 108 109 /* Length must align on block boundary */ 110 if (len & ((1ULL << chip->phys_erase_shift) - 1)) { 111 pr_debug("%s: length not block aligned\n", __func__); 112 ret = -EINVAL; 113 } 114 115 return ret; 116 } 117 118 /** 119 * nand_extract_bits - Copy unaligned bits from one buffer to another one 120 * @dst: destination buffer 121 * @dst_off: bit offset at which the writing starts 122 * @src: source buffer 123 * @src_off: bit offset at which the reading starts 124 * @nbits: number of bits to copy from @src to @dst 125 * 126 * Copy bits from one memory region to another (overlap authorized). 127 */ 128 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, 129 unsigned int src_off, unsigned int nbits) 130 { 131 unsigned int tmp, n; 132 133 dst += dst_off / 8; 134 dst_off %= 8; 135 src += src_off / 8; 136 src_off %= 8; 137 138 while (nbits) { 139 n = min3(8 - dst_off, 8 - src_off, nbits); 140 141 tmp = (*src >> src_off) & GENMASK(n - 1, 0); 142 *dst &= ~GENMASK(n - 1 + dst_off, dst_off); 143 *dst |= tmp << dst_off; 144 145 dst_off += n; 146 if (dst_off >= 8) { 147 dst++; 148 dst_off -= 8; 149 } 150 151 src_off += n; 152 if (src_off >= 8) { 153 src++; 154 src_off -= 8; 155 } 156 157 nbits -= n; 158 } 159 } 160 EXPORT_SYMBOL_GPL(nand_extract_bits); 161 162 /** 163 * nand_select_target() - Select a NAND target (A.K.A. die) 164 * @chip: NAND chip object 165 * @cs: the CS line to select. Note that this CS id is always from the chip 166 * PoV, not the controller one 167 * 168 * Select a NAND target so that further operations executed on @chip go to the 169 * selected NAND target. 170 */ 171 void nand_select_target(struct nand_chip *chip, unsigned int cs) 172 { 173 /* 174 * cs should always lie between 0 and nanddev_ntargets(), when that's 175 * not the case it's a bug and the caller should be fixed. 176 */ 177 if (WARN_ON(cs > nanddev_ntargets(&chip->base))) 178 return; 179 180 chip->cur_cs = cs; 181 182 if (chip->legacy.select_chip) 183 chip->legacy.select_chip(chip, cs); 184 } 185 EXPORT_SYMBOL_GPL(nand_select_target); 186 187 /** 188 * nand_deselect_target() - Deselect the currently selected target 189 * @chip: NAND chip object 190 * 191 * Deselect the currently selected NAND target. The result of operations 192 * executed on @chip after the target has been deselected is undefined. 193 */ 194 void nand_deselect_target(struct nand_chip *chip) 195 { 196 if (chip->legacy.select_chip) 197 chip->legacy.select_chip(chip, -1); 198 199 chip->cur_cs = -1; 200 } 201 EXPORT_SYMBOL_GPL(nand_deselect_target); 202 203 /** 204 * nand_release_device - [GENERIC] release chip 205 * @chip: NAND chip object 206 * 207 * Release chip lock and wake up anyone waiting on the device. 208 */ 209 static void nand_release_device(struct nand_chip *chip) 210 { 211 /* Release the controller and the chip */ 212 mutex_unlock(&chip->controller->lock); 213 mutex_unlock(&chip->lock); 214 } 215 216 /** 217 * nand_bbm_get_next_page - Get the next page for bad block markers 218 * @chip: NAND chip object 219 * @page: First page to start checking for bad block marker usage 220 * 221 * Returns an integer that corresponds to the page offset within a block, for 222 * a page that is used to store bad block markers. If no more pages are 223 * available, -EINVAL is returned. 224 */ 225 int nand_bbm_get_next_page(struct nand_chip *chip, int page) 226 { 227 struct mtd_info *mtd = nand_to_mtd(chip); 228 int last_page = ((mtd->erasesize - mtd->writesize) >> 229 chip->page_shift) & chip->pagemask; 230 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE 231 | NAND_BBM_LASTPAGE; 232 233 if (page == 0 && !(chip->options & bbm_flags)) 234 return 0; 235 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) 236 return 0; 237 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) 238 return 1; 239 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) 240 return last_page; 241 242 return -EINVAL; 243 } 244 245 /** 246 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 247 * @chip: NAND chip object 248 * @ofs: offset from device start 249 * 250 * Check, if the block is bad. 251 */ 252 static int nand_block_bad(struct nand_chip *chip, loff_t ofs) 253 { 254 int first_page, page_offset; 255 int res; 256 u8 bad; 257 258 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask; 259 page_offset = nand_bbm_get_next_page(chip, 0); 260 261 while (page_offset >= 0) { 262 res = chip->ecc.read_oob(chip, first_page + page_offset); 263 if (res < 0) 264 return res; 265 266 bad = chip->oob_poi[chip->badblockpos]; 267 268 if (likely(chip->badblockbits == 8)) 269 res = bad != 0xFF; 270 else 271 res = hweight8(bad) < chip->badblockbits; 272 if (res) 273 return res; 274 275 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 276 } 277 278 return 0; 279 } 280 281 /** 282 * nand_region_is_secured() - Check if the region is secured 283 * @chip: NAND chip object 284 * @offset: Offset of the region to check 285 * @size: Size of the region to check 286 * 287 * Checks if the region is secured by comparing the offset and size with the 288 * list of secure regions obtained from DT. Returns true if the region is 289 * secured else false. 290 */ 291 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size) 292 { 293 int i; 294 295 /* Skip touching the secure regions if present */ 296 for (i = 0; i < chip->nr_secure_regions; i++) { 297 const struct nand_secure_region *region = &chip->secure_regions[i]; 298 299 if (offset + size <= region->offset || 300 offset >= region->offset + region->size) 301 continue; 302 303 pr_debug("%s: Region 0x%llx - 0x%llx is secured!", 304 __func__, offset, offset + size); 305 306 return true; 307 } 308 309 return false; 310 } 311 312 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) 313 { 314 struct mtd_info *mtd = nand_to_mtd(chip); 315 316 if (chip->options & NAND_NO_BBM_QUIRK) 317 return 0; 318 319 /* Check if the region is secured */ 320 if (nand_region_is_secured(chip, ofs, mtd->erasesize)) 321 return -EIO; 322 323 if (mtd_check_expert_analysis_mode()) 324 return 0; 325 326 if (chip->legacy.block_bad) 327 return chip->legacy.block_bad(chip, ofs); 328 329 return nand_block_bad(chip, ofs); 330 } 331 332 /** 333 * nand_get_device - [GENERIC] Get chip for selected access 334 * @chip: NAND chip structure 335 * 336 * Lock the device and its controller for exclusive access 337 */ 338 static void nand_get_device(struct nand_chip *chip) 339 { 340 /* Wait until the device is resumed. */ 341 while (1) { 342 mutex_lock(&chip->lock); 343 if (!chip->suspended) { 344 mutex_lock(&chip->controller->lock); 345 return; 346 } 347 mutex_unlock(&chip->lock); 348 349 wait_event(chip->resume_wq, !chip->suspended); 350 } 351 } 352 353 /** 354 * nand_check_wp - [GENERIC] check if the chip is write protected 355 * @chip: NAND chip object 356 * 357 * Check, if the device is write protected. The function expects, that the 358 * device is already selected. 359 */ 360 static int nand_check_wp(struct nand_chip *chip) 361 { 362 u8 status; 363 int ret; 364 365 /* Broken xD cards report WP despite being writable */ 366 if (chip->options & NAND_BROKEN_XD) 367 return 0; 368 369 /* controller responsible for NAND write protect */ 370 if (chip->controller->controller_wp) 371 return 0; 372 373 /* Check the WP bit */ 374 ret = nand_status_op(chip, &status); 375 if (ret) 376 return ret; 377 378 return status & NAND_STATUS_WP ? 0 : 1; 379 } 380 381 /** 382 * nand_fill_oob - [INTERN] Transfer client buffer to oob 383 * @chip: NAND chip object 384 * @oob: oob data buffer 385 * @len: oob data write length 386 * @ops: oob ops structure 387 */ 388 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 389 struct mtd_oob_ops *ops) 390 { 391 struct mtd_info *mtd = nand_to_mtd(chip); 392 int ret; 393 394 /* 395 * Initialise to all 0xFF, to avoid the possibility of left over OOB 396 * data from a previous OOB read. 397 */ 398 memset(chip->oob_poi, 0xff, mtd->oobsize); 399 400 switch (ops->mode) { 401 402 case MTD_OPS_PLACE_OOB: 403 case MTD_OPS_RAW: 404 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 405 return oob + len; 406 407 case MTD_OPS_AUTO_OOB: 408 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, 409 ops->ooboffs, len); 410 BUG_ON(ret); 411 return oob + len; 412 413 default: 414 BUG(); 415 } 416 return NULL; 417 } 418 419 /** 420 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 421 * @chip: NAND chip object 422 * @to: offset to write to 423 * @ops: oob operation description structure 424 * 425 * NAND write out-of-band. 426 */ 427 static int nand_do_write_oob(struct nand_chip *chip, loff_t to, 428 struct mtd_oob_ops *ops) 429 { 430 struct mtd_info *mtd = nand_to_mtd(chip); 431 int chipnr, page, status, len, ret; 432 433 pr_debug("%s: to = 0x%08x, len = %i\n", 434 __func__, (unsigned int)to, (int)ops->ooblen); 435 436 len = mtd_oobavail(mtd, ops); 437 438 /* Do not allow write past end of page */ 439 if ((ops->ooboffs + ops->ooblen) > len) { 440 pr_debug("%s: attempt to write past end of page\n", 441 __func__); 442 return -EINVAL; 443 } 444 445 /* Check if the region is secured */ 446 if (nand_region_is_secured(chip, to, ops->ooblen)) 447 return -EIO; 448 449 chipnr = (int)(to >> chip->chip_shift); 450 451 /* 452 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 453 * of my DiskOnChip 2000 test units) will clear the whole data page too 454 * if we don't do this. I have no clue why, but I seem to have 'fixed' 455 * it in the doc2000 driver in August 1999. dwmw2. 456 */ 457 ret = nand_reset(chip, chipnr); 458 if (ret) 459 return ret; 460 461 nand_select_target(chip, chipnr); 462 463 /* Shift to get page */ 464 page = (int)(to >> chip->page_shift); 465 466 /* Check, if it is write protected */ 467 if (nand_check_wp(chip)) { 468 nand_deselect_target(chip); 469 return -EROFS; 470 } 471 472 /* Invalidate the page cache, if we write to the cached page */ 473 if (page == chip->pagecache.page) 474 chip->pagecache.page = -1; 475 476 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 477 478 if (ops->mode == MTD_OPS_RAW) 479 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask); 480 else 481 status = chip->ecc.write_oob(chip, page & chip->pagemask); 482 483 nand_deselect_target(chip); 484 485 if (status) 486 return status; 487 488 ops->oobretlen = ops->ooblen; 489 490 return 0; 491 } 492 493 /** 494 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker 495 * @chip: NAND chip object 496 * @ofs: offset from device start 497 * 498 * This is the default implementation, which can be overridden by a hardware 499 * specific driver. It provides the details for writing a bad block marker to a 500 * block. 501 */ 502 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs) 503 { 504 struct mtd_info *mtd = nand_to_mtd(chip); 505 struct mtd_oob_ops ops; 506 uint8_t buf[2] = { 0, 0 }; 507 int ret = 0, res, page_offset; 508 509 memset(&ops, 0, sizeof(ops)); 510 ops.oobbuf = buf; 511 ops.ooboffs = chip->badblockpos; 512 if (chip->options & NAND_BUSWIDTH_16) { 513 ops.ooboffs &= ~0x01; 514 ops.len = ops.ooblen = 2; 515 } else { 516 ops.len = ops.ooblen = 1; 517 } 518 ops.mode = MTD_OPS_PLACE_OOB; 519 520 page_offset = nand_bbm_get_next_page(chip, 0); 521 522 while (page_offset >= 0) { 523 res = nand_do_write_oob(chip, 524 ofs + (page_offset * mtd->writesize), 525 &ops); 526 527 if (!ret) 528 ret = res; 529 530 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 531 } 532 533 return ret; 534 } 535 536 /** 537 * nand_markbad_bbm - mark a block by updating the BBM 538 * @chip: NAND chip object 539 * @ofs: offset of the block to mark bad 540 */ 541 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs) 542 { 543 if (chip->legacy.block_markbad) 544 return chip->legacy.block_markbad(chip, ofs); 545 546 return nand_default_block_markbad(chip, ofs); 547 } 548 549 /** 550 * nand_block_markbad_lowlevel - mark a block bad 551 * @chip: NAND chip object 552 * @ofs: offset from device start 553 * 554 * This function performs the generic NAND bad block marking steps (i.e., bad 555 * block table(s) and/or marker(s)). We only allow the hardware driver to 556 * specify how to write bad block markers to OOB (chip->legacy.block_markbad). 557 * 558 * We try operations in the following order: 559 * 560 * (1) erase the affected block, to allow OOB marker to be written cleanly 561 * (2) write bad block marker to OOB area of affected block (unless flag 562 * NAND_BBT_NO_OOB_BBM is present) 563 * (3) update the BBT 564 * 565 * Note that we retain the first error encountered in (2) or (3), finish the 566 * procedures, and dump the error in the end. 567 */ 568 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) 569 { 570 struct mtd_info *mtd = nand_to_mtd(chip); 571 int res, ret = 0; 572 573 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) { 574 struct erase_info einfo; 575 576 /* Attempt erase before marking OOB */ 577 memset(&einfo, 0, sizeof(einfo)); 578 einfo.addr = ofs; 579 einfo.len = 1ULL << chip->phys_erase_shift; 580 nand_erase_nand(chip, &einfo, 0); 581 582 /* Write bad block marker to OOB */ 583 nand_get_device(chip); 584 585 ret = nand_markbad_bbm(chip, ofs); 586 nand_release_device(chip); 587 } 588 589 /* Mark block bad in BBT */ 590 if (chip->bbt) { 591 res = nand_markbad_bbt(chip, ofs); 592 if (!ret) 593 ret = res; 594 } 595 596 if (!ret) 597 mtd->ecc_stats.badblocks++; 598 599 return ret; 600 } 601 602 /** 603 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved. 604 * @mtd: MTD device structure 605 * @ofs: offset from device start 606 * 607 * Check if the block is marked as reserved. 608 */ 609 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) 610 { 611 struct nand_chip *chip = mtd_to_nand(mtd); 612 613 if (!chip->bbt) 614 return 0; 615 /* Return info from the table */ 616 return nand_isreserved_bbt(chip, ofs); 617 } 618 619 /** 620 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 621 * @chip: NAND chip object 622 * @ofs: offset from device start 623 * @allowbbt: 1, if its allowed to access the bbt area 624 * 625 * Check, if the block is bad. Either by reading the bad block table or 626 * calling of the scan function. 627 */ 628 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt) 629 { 630 /* Return info from the table */ 631 if (chip->bbt) 632 return nand_isbad_bbt(chip, ofs, allowbbt); 633 634 return nand_isbad_bbm(chip, ofs); 635 } 636 637 /** 638 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1 639 * @chip: NAND chip structure 640 * @timeout_ms: Timeout in ms 641 * 642 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1. 643 * If that does not happen whitin the specified timeout, -ETIMEDOUT is 644 * returned. 645 * 646 * This helper is intended to be used when the controller does not have access 647 * to the NAND R/B pin. 648 * 649 * Be aware that calling this helper from an ->exec_op() implementation means 650 * ->exec_op() must be re-entrant. 651 * 652 * Return 0 if the NAND chip is ready, a negative error otherwise. 653 */ 654 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) 655 { 656 const struct nand_interface_config *conf; 657 u8 status = 0; 658 int ret; 659 660 if (!nand_has_exec_op(chip)) 661 return -ENOTSUPP; 662 663 /* Wait tWB before polling the STATUS reg. */ 664 conf = nand_get_interface_config(chip); 665 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max)); 666 667 ret = nand_status_op(chip, NULL); 668 if (ret) 669 return ret; 670 671 /* 672 * +1 below is necessary because if we are now in the last fraction 673 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 674 * small jiffy fraction - possibly leading to false timeout 675 */ 676 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 677 do { 678 ret = nand_read_data_op(chip, &status, sizeof(status), true, 679 false); 680 if (ret) 681 break; 682 683 if (status & NAND_STATUS_READY) 684 break; 685 686 /* 687 * Typical lowest execution time for a tR on most NANDs is 10us, 688 * use this as polling delay before doing something smarter (ie. 689 * deriving a delay from the timeout value, timeout_ms/ratio). 690 */ 691 udelay(10); 692 } while (time_before(jiffies, timeout_ms)); 693 694 /* 695 * We have to exit READ_STATUS mode in order to read real data on the 696 * bus in case the WAITRDY instruction is preceding a DATA_IN 697 * instruction. 698 */ 699 nand_exit_status_op(chip); 700 701 if (ret) 702 return ret; 703 704 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT; 705 }; 706 EXPORT_SYMBOL_GPL(nand_soft_waitrdy); 707 708 /** 709 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready 710 * @chip: NAND chip structure 711 * @gpiod: GPIO descriptor of R/B pin 712 * @timeout_ms: Timeout in ms 713 * 714 * Poll the R/B GPIO pin until it becomes ready. If that does not happen 715 * whitin the specified timeout, -ETIMEDOUT is returned. 716 * 717 * This helper is intended to be used when the controller has access to the 718 * NAND R/B pin over GPIO. 719 * 720 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise. 721 */ 722 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, 723 unsigned long timeout_ms) 724 { 725 726 /* 727 * Wait until R/B pin indicates chip is ready or timeout occurs. 728 * +1 below is necessary because if we are now in the last fraction 729 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 730 * small jiffy fraction - possibly leading to false timeout. 731 */ 732 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 733 do { 734 if (gpiod_get_value_cansleep(gpiod)) 735 return 0; 736 737 cond_resched(); 738 } while (time_before(jiffies, timeout_ms)); 739 740 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT; 741 }; 742 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy); 743 744 /** 745 * panic_nand_wait - [GENERIC] wait until the command is done 746 * @chip: NAND chip structure 747 * @timeo: timeout 748 * 749 * Wait for command done. This is a helper function for nand_wait used when 750 * we are in interrupt context. May happen when in panic and trying to write 751 * an oops through mtdoops. 752 */ 753 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) 754 { 755 int i; 756 for (i = 0; i < timeo; i++) { 757 if (chip->legacy.dev_ready) { 758 if (chip->legacy.dev_ready(chip)) 759 break; 760 } else { 761 int ret; 762 u8 status; 763 764 ret = nand_read_data_op(chip, &status, sizeof(status), 765 true, false); 766 if (ret) 767 return; 768 769 if (status & NAND_STATUS_READY) 770 break; 771 } 772 mdelay(1); 773 } 774 } 775 776 static bool nand_supports_get_features(struct nand_chip *chip, int addr) 777 { 778 return (chip->parameters.supports_set_get_features && 779 test_bit(addr, chip->parameters.get_feature_list)); 780 } 781 782 static bool nand_supports_set_features(struct nand_chip *chip, int addr) 783 { 784 return (chip->parameters.supports_set_get_features && 785 test_bit(addr, chip->parameters.set_feature_list)); 786 } 787 788 /** 789 * nand_reset_interface - Reset data interface and timings 790 * @chip: The NAND chip 791 * @chipnr: Internal die id 792 * 793 * Reset the Data interface and timings to ONFI mode 0. 794 * 795 * Returns 0 for success or negative error code otherwise. 796 */ 797 static int nand_reset_interface(struct nand_chip *chip, int chipnr) 798 { 799 const struct nand_controller_ops *ops = chip->controller->ops; 800 int ret; 801 802 if (!nand_controller_can_setup_interface(chip)) 803 return 0; 804 805 /* 806 * The ONFI specification says: 807 * " 808 * To transition from NV-DDR or NV-DDR2 to the SDR data 809 * interface, the host shall use the Reset (FFh) command 810 * using SDR timing mode 0. A device in any timing mode is 811 * required to recognize Reset (FFh) command issued in SDR 812 * timing mode 0. 813 * " 814 * 815 * Configure the data interface in SDR mode and set the 816 * timings to timing mode 0. 817 */ 818 819 chip->current_interface_config = nand_get_reset_interface_config(); 820 ret = ops->setup_interface(chip, chipnr, 821 chip->current_interface_config); 822 if (ret) 823 pr_err("Failed to configure data interface to SDR timing mode 0\n"); 824 825 return ret; 826 } 827 828 /** 829 * nand_setup_interface - Setup the best data interface and timings 830 * @chip: The NAND chip 831 * @chipnr: Internal die id 832 * 833 * Configure what has been reported to be the best data interface and NAND 834 * timings supported by the chip and the driver. 835 * 836 * Returns 0 for success or negative error code otherwise. 837 */ 838 static int nand_setup_interface(struct nand_chip *chip, int chipnr) 839 { 840 const struct nand_controller_ops *ops = chip->controller->ops; 841 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request; 842 int ret; 843 844 if (!nand_controller_can_setup_interface(chip)) 845 return 0; 846 847 /* 848 * A nand_reset_interface() put both the NAND chip and the NAND 849 * controller in timings mode 0. If the default mode for this chip is 850 * also 0, no need to proceed to the change again. Plus, at probe time, 851 * nand_setup_interface() uses ->set/get_features() which would 852 * fail anyway as the parameter page is not available yet. 853 */ 854 if (!chip->best_interface_config) 855 return 0; 856 857 request = chip->best_interface_config->timings.mode; 858 if (nand_interface_is_sdr(chip->best_interface_config)) 859 request |= ONFI_DATA_INTERFACE_SDR; 860 else 861 request |= ONFI_DATA_INTERFACE_NVDDR; 862 tmode_param[0] = request; 863 864 /* Change the mode on the chip side (if supported by the NAND chip) */ 865 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { 866 nand_select_target(chip, chipnr); 867 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 868 tmode_param); 869 nand_deselect_target(chip); 870 if (ret) 871 return ret; 872 } 873 874 /* Change the mode on the controller side */ 875 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); 876 if (ret) 877 return ret; 878 879 /* Check the mode has been accepted by the chip, if supported */ 880 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) 881 goto update_interface_config; 882 883 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); 884 nand_select_target(chip, chipnr); 885 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 886 tmode_param); 887 nand_deselect_target(chip); 888 if (ret) 889 goto err_reset_chip; 890 891 if (request != tmode_param[0]) { 892 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n", 893 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR", 894 chip->best_interface_config->timings.mode); 895 pr_debug("NAND chip would work in %s timing mode %d\n", 896 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR", 897 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0])); 898 goto err_reset_chip; 899 } 900 901 update_interface_config: 902 chip->current_interface_config = chip->best_interface_config; 903 904 return 0; 905 906 err_reset_chip: 907 /* 908 * Fallback to mode 0 if the chip explicitly did not ack the chosen 909 * timing mode. 910 */ 911 nand_reset_interface(chip, chipnr); 912 nand_select_target(chip, chipnr); 913 nand_reset_op(chip); 914 nand_deselect_target(chip); 915 916 return ret; 917 } 918 919 /** 920 * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the 921 * NAND controller and the NAND chip support 922 * @chip: the NAND chip 923 * @iface: the interface configuration (can eventually be updated) 924 * @spec_timings: specific timings, when not fitting the ONFI specification 925 * 926 * If specific timings are provided, use them. Otherwise, retrieve supported 927 * timing modes from ONFI information. 928 */ 929 int nand_choose_best_sdr_timings(struct nand_chip *chip, 930 struct nand_interface_config *iface, 931 struct nand_sdr_timings *spec_timings) 932 { 933 const struct nand_controller_ops *ops = chip->controller->ops; 934 int best_mode = 0, mode, ret = -EOPNOTSUPP; 935 936 iface->type = NAND_SDR_IFACE; 937 938 if (spec_timings) { 939 iface->timings.sdr = *spec_timings; 940 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings); 941 942 /* Verify the controller supports the requested interface */ 943 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 944 iface); 945 if (!ret) { 946 chip->best_interface_config = iface; 947 return ret; 948 } 949 950 /* Fallback to slower modes */ 951 best_mode = iface->timings.mode; 952 } else if (chip->parameters.onfi) { 953 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1; 954 } 955 956 for (mode = best_mode; mode >= 0; mode--) { 957 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode); 958 959 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 960 iface); 961 if (!ret) { 962 chip->best_interface_config = iface; 963 break; 964 } 965 } 966 967 return ret; 968 } 969 970 /** 971 * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the 972 * NAND controller and the NAND chip support 973 * @chip: the NAND chip 974 * @iface: the interface configuration (can eventually be updated) 975 * @spec_timings: specific timings, when not fitting the ONFI specification 976 * 977 * If specific timings are provided, use them. Otherwise, retrieve supported 978 * timing modes from ONFI information. 979 */ 980 int nand_choose_best_nvddr_timings(struct nand_chip *chip, 981 struct nand_interface_config *iface, 982 struct nand_nvddr_timings *spec_timings) 983 { 984 const struct nand_controller_ops *ops = chip->controller->ops; 985 int best_mode = 0, mode, ret = -EOPNOTSUPP; 986 987 iface->type = NAND_NVDDR_IFACE; 988 989 if (spec_timings) { 990 iface->timings.nvddr = *spec_timings; 991 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings); 992 993 /* Verify the controller supports the requested interface */ 994 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 995 iface); 996 if (!ret) { 997 chip->best_interface_config = iface; 998 return ret; 999 } 1000 1001 /* Fallback to slower modes */ 1002 best_mode = iface->timings.mode; 1003 } else if (chip->parameters.onfi) { 1004 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1; 1005 } 1006 1007 for (mode = best_mode; mode >= 0; mode--) { 1008 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode); 1009 1010 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, 1011 iface); 1012 if (!ret) { 1013 chip->best_interface_config = iface; 1014 break; 1015 } 1016 } 1017 1018 return ret; 1019 } 1020 1021 /** 1022 * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both 1023 * NAND controller and the NAND chip support 1024 * @chip: the NAND chip 1025 * @iface: the interface configuration (can eventually be updated) 1026 * 1027 * If specific timings are provided, use them. Otherwise, retrieve supported 1028 * timing modes from ONFI information. 1029 */ 1030 static int nand_choose_best_timings(struct nand_chip *chip, 1031 struct nand_interface_config *iface) 1032 { 1033 int ret; 1034 1035 /* Try the fastest timings: NV-DDR */ 1036 ret = nand_choose_best_nvddr_timings(chip, iface, NULL); 1037 if (!ret) 1038 return 0; 1039 1040 /* Fallback to SDR timings otherwise */ 1041 return nand_choose_best_sdr_timings(chip, iface, NULL); 1042 } 1043 1044 /** 1045 * nand_choose_interface_config - find the best data interface and timings 1046 * @chip: The NAND chip 1047 * 1048 * Find the best data interface and NAND timings supported by the chip 1049 * and the driver. Eventually let the NAND manufacturer driver propose his own 1050 * set of timings. 1051 * 1052 * After this function nand_chip->interface_config is initialized with the best 1053 * timing mode available. 1054 * 1055 * Returns 0 for success or negative error code otherwise. 1056 */ 1057 static int nand_choose_interface_config(struct nand_chip *chip) 1058 { 1059 struct nand_interface_config *iface; 1060 int ret; 1061 1062 if (!nand_controller_can_setup_interface(chip)) 1063 return 0; 1064 1065 iface = kzalloc(sizeof(*iface), GFP_KERNEL); 1066 if (!iface) 1067 return -ENOMEM; 1068 1069 if (chip->ops.choose_interface_config) 1070 ret = chip->ops.choose_interface_config(chip, iface); 1071 else 1072 ret = nand_choose_best_timings(chip, iface); 1073 1074 if (ret) 1075 kfree(iface); 1076 1077 return ret; 1078 } 1079 1080 /** 1081 * nand_fill_column_cycles - fill the column cycles of an address 1082 * @chip: The NAND chip 1083 * @addrs: Array of address cycles to fill 1084 * @offset_in_page: The offset in the page 1085 * 1086 * Fills the first or the first two bytes of the @addrs field depending 1087 * on the NAND bus width and the page size. 1088 * 1089 * Returns the number of cycles needed to encode the column, or a negative 1090 * error code in case one of the arguments is invalid. 1091 */ 1092 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, 1093 unsigned int offset_in_page) 1094 { 1095 struct mtd_info *mtd = nand_to_mtd(chip); 1096 1097 /* Make sure the offset is less than the actual page size. */ 1098 if (offset_in_page > mtd->writesize + mtd->oobsize) 1099 return -EINVAL; 1100 1101 /* 1102 * On small page NANDs, there's a dedicated command to access the OOB 1103 * area, and the column address is relative to the start of the OOB 1104 * area, not the start of the page. Asjust the address accordingly. 1105 */ 1106 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) 1107 offset_in_page -= mtd->writesize; 1108 1109 /* 1110 * The offset in page is expressed in bytes, if the NAND bus is 16-bit 1111 * wide, then it must be divided by 2. 1112 */ 1113 if (chip->options & NAND_BUSWIDTH_16) { 1114 if (WARN_ON(offset_in_page % 2)) 1115 return -EINVAL; 1116 1117 offset_in_page /= 2; 1118 } 1119 1120 addrs[0] = offset_in_page; 1121 1122 /* 1123 * Small page NANDs use 1 cycle for the columns, while large page NANDs 1124 * need 2 1125 */ 1126 if (mtd->writesize <= 512) 1127 return 1; 1128 1129 addrs[1] = offset_in_page >> 8; 1130 1131 return 2; 1132 } 1133 1134 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1135 unsigned int offset_in_page, void *buf, 1136 unsigned int len) 1137 { 1138 const struct nand_interface_config *conf = 1139 nand_get_interface_config(chip); 1140 struct mtd_info *mtd = nand_to_mtd(chip); 1141 u8 addrs[4]; 1142 struct nand_op_instr instrs[] = { 1143 NAND_OP_CMD(NAND_CMD_READ0, 0), 1144 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1145 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1146 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1147 NAND_OP_DATA_IN(len, buf, 0), 1148 }; 1149 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1150 int ret; 1151 1152 /* Drop the DATA_IN instruction if len is set to 0. */ 1153 if (!len) 1154 op.ninstrs--; 1155 1156 if (offset_in_page >= mtd->writesize) 1157 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1158 else if (offset_in_page >= 256 && 1159 !(chip->options & NAND_BUSWIDTH_16)) 1160 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1161 1162 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1163 if (ret < 0) 1164 return ret; 1165 1166 addrs[1] = page; 1167 addrs[2] = page >> 8; 1168 1169 if (chip->options & NAND_ROW_ADDR_3) { 1170 addrs[3] = page >> 16; 1171 instrs[1].ctx.addr.naddrs++; 1172 } 1173 1174 return nand_exec_op(chip, &op); 1175 } 1176 1177 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1178 unsigned int offset_in_page, void *buf, 1179 unsigned int len) 1180 { 1181 const struct nand_interface_config *conf = 1182 nand_get_interface_config(chip); 1183 u8 addrs[5]; 1184 struct nand_op_instr instrs[] = { 1185 NAND_OP_CMD(NAND_CMD_READ0, 0), 1186 NAND_OP_ADDR(4, addrs, 0), 1187 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1188 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1189 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1190 NAND_OP_DATA_IN(len, buf, 0), 1191 }; 1192 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1193 int ret; 1194 1195 /* Drop the DATA_IN instruction if len is set to 0. */ 1196 if (!len) 1197 op.ninstrs--; 1198 1199 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1200 if (ret < 0) 1201 return ret; 1202 1203 addrs[2] = page; 1204 addrs[3] = page >> 8; 1205 1206 if (chip->options & NAND_ROW_ADDR_3) { 1207 addrs[4] = page >> 16; 1208 instrs[1].ctx.addr.naddrs++; 1209 } 1210 1211 return nand_exec_op(chip, &op); 1212 } 1213 1214 static void rawnand_cap_cont_reads(struct nand_chip *chip) 1215 { 1216 struct nand_memory_organization *memorg; 1217 unsigned int pages_per_lun, first_lun, last_lun; 1218 1219 memorg = nanddev_get_memorg(&chip->base); 1220 pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun; 1221 first_lun = chip->cont_read.first_page / pages_per_lun; 1222 last_lun = chip->cont_read.last_page / pages_per_lun; 1223 1224 /* Prevent sequential cache reads across LUN boundaries */ 1225 if (first_lun != last_lun) 1226 chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1; 1227 else 1228 chip->cont_read.pause_page = chip->cont_read.last_page; 1229 } 1230 1231 static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page, 1232 unsigned int offset_in_page, void *buf, 1233 unsigned int len, bool check_only) 1234 { 1235 const struct nand_interface_config *conf = 1236 nand_get_interface_config(chip); 1237 u8 addrs[5]; 1238 struct nand_op_instr start_instrs[] = { 1239 NAND_OP_CMD(NAND_CMD_READ0, 0), 1240 NAND_OP_ADDR(4, addrs, 0), 1241 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1242 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0), 1243 NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)), 1244 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1245 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1246 NAND_OP_DATA_IN(len, buf, 0), 1247 }; 1248 struct nand_op_instr cont_instrs[] = { 1249 NAND_OP_CMD(page == chip->cont_read.pause_page ? 1250 NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ, 1251 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1252 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1253 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1254 NAND_OP_DATA_IN(len, buf, 0), 1255 }; 1256 struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs); 1257 struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs); 1258 int ret; 1259 1260 if (!len) { 1261 start_op.ninstrs--; 1262 cont_op.ninstrs--; 1263 } 1264 1265 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1266 if (ret < 0) 1267 return ret; 1268 1269 addrs[2] = page; 1270 addrs[3] = page >> 8; 1271 1272 if (chip->options & NAND_ROW_ADDR_3) { 1273 addrs[4] = page >> 16; 1274 start_instrs[1].ctx.addr.naddrs++; 1275 } 1276 1277 /* Check if cache reads are supported */ 1278 if (check_only) { 1279 if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op)) 1280 return -EOPNOTSUPP; 1281 1282 return 0; 1283 } 1284 1285 if (page == chip->cont_read.first_page) 1286 ret = nand_exec_op(chip, &start_op); 1287 else 1288 ret = nand_exec_op(chip, &cont_op); 1289 if (ret) 1290 return ret; 1291 1292 if (!chip->cont_read.ongoing) 1293 return 0; 1294 1295 if (page == chip->cont_read.pause_page && 1296 page != chip->cont_read.last_page) { 1297 chip->cont_read.first_page = chip->cont_read.pause_page + 1; 1298 rawnand_cap_cont_reads(chip); 1299 } else if (page == chip->cont_read.last_page) { 1300 chip->cont_read.ongoing = false; 1301 } 1302 1303 return 0; 1304 } 1305 1306 static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page) 1307 { 1308 return chip->cont_read.ongoing && page >= chip->cont_read.first_page; 1309 } 1310 1311 /** 1312 * nand_read_page_op - Do a READ PAGE operation 1313 * @chip: The NAND chip 1314 * @page: page to read 1315 * @offset_in_page: offset within the page 1316 * @buf: buffer used to store the data 1317 * @len: length of the buffer 1318 * 1319 * This function issues a READ PAGE operation. 1320 * This function does not select/unselect the CS line. 1321 * 1322 * Returns 0 on success, a negative error code otherwise. 1323 */ 1324 int nand_read_page_op(struct nand_chip *chip, unsigned int page, 1325 unsigned int offset_in_page, void *buf, unsigned int len) 1326 { 1327 struct mtd_info *mtd = nand_to_mtd(chip); 1328 1329 if (len && !buf) 1330 return -EINVAL; 1331 1332 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1333 return -EINVAL; 1334 1335 if (nand_has_exec_op(chip)) { 1336 if (mtd->writesize > 512) { 1337 if (rawnand_cont_read_ongoing(chip, page)) 1338 return nand_lp_exec_cont_read_page_op(chip, page, 1339 offset_in_page, 1340 buf, len, false); 1341 else 1342 return nand_lp_exec_read_page_op(chip, page, 1343 offset_in_page, buf, 1344 len); 1345 } 1346 1347 return nand_sp_exec_read_page_op(chip, page, offset_in_page, 1348 buf, len); 1349 } 1350 1351 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page); 1352 if (len) 1353 chip->legacy.read_buf(chip, buf, len); 1354 1355 return 0; 1356 } 1357 EXPORT_SYMBOL_GPL(nand_read_page_op); 1358 1359 /** 1360 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation 1361 * @chip: The NAND chip 1362 * @page: parameter page to read 1363 * @buf: buffer used to store the data 1364 * @len: length of the buffer 1365 * 1366 * This function issues a READ PARAMETER PAGE operation. 1367 * This function does not select/unselect the CS line. 1368 * 1369 * Returns 0 on success, a negative error code otherwise. 1370 */ 1371 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, 1372 unsigned int len) 1373 { 1374 unsigned int i; 1375 u8 *p = buf; 1376 1377 if (len && !buf) 1378 return -EINVAL; 1379 1380 if (nand_has_exec_op(chip)) { 1381 const struct nand_interface_config *conf = 1382 nand_get_interface_config(chip); 1383 struct nand_op_instr instrs[] = { 1384 NAND_OP_CMD(NAND_CMD_PARAM, 0), 1385 NAND_OP_ADDR(1, &page, 1386 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1387 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 1388 NAND_COMMON_TIMING_NS(conf, tRR_min)), 1389 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1390 }; 1391 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1392 1393 /* Drop the DATA_IN instruction if len is set to 0. */ 1394 if (!len) 1395 op.ninstrs--; 1396 1397 return nand_exec_op(chip, &op); 1398 } 1399 1400 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1); 1401 for (i = 0; i < len; i++) 1402 p[i] = chip->legacy.read_byte(chip); 1403 1404 return 0; 1405 } 1406 1407 /** 1408 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation 1409 * @chip: The NAND chip 1410 * @offset_in_page: offset within the page 1411 * @buf: buffer used to store the data 1412 * @len: length of the buffer 1413 * @force_8bit: force 8-bit bus access 1414 * 1415 * This function issues a CHANGE READ COLUMN operation. 1416 * This function does not select/unselect the CS line. 1417 * 1418 * Returns 0 on success, a negative error code otherwise. 1419 */ 1420 int nand_change_read_column_op(struct nand_chip *chip, 1421 unsigned int offset_in_page, void *buf, 1422 unsigned int len, bool force_8bit) 1423 { 1424 struct mtd_info *mtd = nand_to_mtd(chip); 1425 1426 if (len && !buf) 1427 return -EINVAL; 1428 1429 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1430 return -EINVAL; 1431 1432 /* Small page NANDs do not support column change. */ 1433 if (mtd->writesize <= 512) 1434 return -ENOTSUPP; 1435 1436 if (nand_has_exec_op(chip)) { 1437 const struct nand_interface_config *conf = 1438 nand_get_interface_config(chip); 1439 u8 addrs[2] = {}; 1440 struct nand_op_instr instrs[] = { 1441 NAND_OP_CMD(NAND_CMD_RNDOUT, 0), 1442 NAND_OP_ADDR(2, addrs, 0), 1443 NAND_OP_CMD(NAND_CMD_RNDOUTSTART, 1444 NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1445 NAND_OP_DATA_IN(len, buf, 0), 1446 }; 1447 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1448 int ret; 1449 1450 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1451 if (ret < 0) 1452 return ret; 1453 1454 /* Drop the DATA_IN instruction if len is set to 0. */ 1455 if (!len) 1456 op.ninstrs--; 1457 1458 instrs[3].ctx.data.force_8bit = force_8bit; 1459 1460 return nand_exec_op(chip, &op); 1461 } 1462 1463 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1); 1464 if (len) 1465 chip->legacy.read_buf(chip, buf, len); 1466 1467 return 0; 1468 } 1469 EXPORT_SYMBOL_GPL(nand_change_read_column_op); 1470 1471 /** 1472 * nand_read_oob_op - Do a READ OOB operation 1473 * @chip: The NAND chip 1474 * @page: page to read 1475 * @offset_in_oob: offset within the OOB area 1476 * @buf: buffer used to store the data 1477 * @len: length of the buffer 1478 * 1479 * This function issues a READ OOB operation. 1480 * This function does not select/unselect the CS line. 1481 * 1482 * Returns 0 on success, a negative error code otherwise. 1483 */ 1484 int nand_read_oob_op(struct nand_chip *chip, unsigned int page, 1485 unsigned int offset_in_oob, void *buf, unsigned int len) 1486 { 1487 struct mtd_info *mtd = nand_to_mtd(chip); 1488 1489 if (len && !buf) 1490 return -EINVAL; 1491 1492 if (offset_in_oob + len > mtd->oobsize) 1493 return -EINVAL; 1494 1495 if (nand_has_exec_op(chip)) 1496 return nand_read_page_op(chip, page, 1497 mtd->writesize + offset_in_oob, 1498 buf, len); 1499 1500 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page); 1501 if (len) 1502 chip->legacy.read_buf(chip, buf, len); 1503 1504 return 0; 1505 } 1506 EXPORT_SYMBOL_GPL(nand_read_oob_op); 1507 1508 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, 1509 unsigned int offset_in_page, const void *buf, 1510 unsigned int len, bool prog) 1511 { 1512 const struct nand_interface_config *conf = 1513 nand_get_interface_config(chip); 1514 struct mtd_info *mtd = nand_to_mtd(chip); 1515 u8 addrs[5] = {}; 1516 struct nand_op_instr instrs[] = { 1517 /* 1518 * The first instruction will be dropped if we're dealing 1519 * with a large page NAND and adjusted if we're dealing 1520 * with a small page NAND and the page offset is > 255. 1521 */ 1522 NAND_OP_CMD(NAND_CMD_READ0, 0), 1523 NAND_OP_CMD(NAND_CMD_SEQIN, 0), 1524 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)), 1525 NAND_OP_DATA_OUT(len, buf, 0), 1526 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1527 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1528 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), 1529 }; 1530 struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, 1531 instrs); 1532 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); 1533 1534 if (naddrs < 0) 1535 return naddrs; 1536 1537 addrs[naddrs++] = page; 1538 addrs[naddrs++] = page >> 8; 1539 if (chip->options & NAND_ROW_ADDR_3) 1540 addrs[naddrs++] = page >> 16; 1541 1542 instrs[2].ctx.addr.naddrs = naddrs; 1543 1544 /* Drop the last two instructions if we're not programming the page. */ 1545 if (!prog) { 1546 op.ninstrs -= 2; 1547 /* Also drop the DATA_OUT instruction if empty. */ 1548 if (!len) 1549 op.ninstrs--; 1550 } 1551 1552 if (mtd->writesize <= 512) { 1553 /* 1554 * Small pages need some more tweaking: we have to adjust the 1555 * first instruction depending on the page offset we're trying 1556 * to access. 1557 */ 1558 if (offset_in_page >= mtd->writesize) 1559 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1560 else if (offset_in_page >= 256 && 1561 !(chip->options & NAND_BUSWIDTH_16)) 1562 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1563 } else { 1564 /* 1565 * Drop the first command if we're dealing with a large page 1566 * NAND. 1567 */ 1568 op.instrs++; 1569 op.ninstrs--; 1570 } 1571 1572 return nand_exec_op(chip, &op); 1573 } 1574 1575 /** 1576 * nand_prog_page_begin_op - starts a PROG PAGE operation 1577 * @chip: The NAND chip 1578 * @page: page to write 1579 * @offset_in_page: offset within the page 1580 * @buf: buffer containing the data to write to the page 1581 * @len: length of the buffer 1582 * 1583 * This function issues the first half of a PROG PAGE operation. 1584 * This function does not select/unselect the CS line. 1585 * 1586 * Returns 0 on success, a negative error code otherwise. 1587 */ 1588 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, 1589 unsigned int offset_in_page, const void *buf, 1590 unsigned int len) 1591 { 1592 struct mtd_info *mtd = nand_to_mtd(chip); 1593 1594 if (len && !buf) 1595 return -EINVAL; 1596 1597 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1598 return -EINVAL; 1599 1600 if (nand_has_exec_op(chip)) 1601 return nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1602 len, false); 1603 1604 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); 1605 1606 if (buf) 1607 chip->legacy.write_buf(chip, buf, len); 1608 1609 return 0; 1610 } 1611 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); 1612 1613 /** 1614 * nand_prog_page_end_op - ends a PROG PAGE operation 1615 * @chip: The NAND chip 1616 * 1617 * This function issues the second half of a PROG PAGE operation. 1618 * This function does not select/unselect the CS line. 1619 * 1620 * Returns 0 on success, a negative error code otherwise. 1621 */ 1622 int nand_prog_page_end_op(struct nand_chip *chip) 1623 { 1624 int ret; 1625 u8 status; 1626 1627 if (nand_has_exec_op(chip)) { 1628 const struct nand_interface_config *conf = 1629 nand_get_interface_config(chip); 1630 struct nand_op_instr instrs[] = { 1631 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1632 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1633 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 1634 0), 1635 }; 1636 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1637 1638 ret = nand_exec_op(chip, &op); 1639 if (ret) 1640 return ret; 1641 1642 ret = nand_status_op(chip, &status); 1643 if (ret) 1644 return ret; 1645 } else { 1646 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1647 ret = chip->legacy.waitfunc(chip); 1648 if (ret < 0) 1649 return ret; 1650 1651 status = ret; 1652 } 1653 1654 if (status & NAND_STATUS_FAIL) 1655 return -EIO; 1656 1657 return 0; 1658 } 1659 EXPORT_SYMBOL_GPL(nand_prog_page_end_op); 1660 1661 /** 1662 * nand_prog_page_op - Do a full PROG PAGE operation 1663 * @chip: The NAND chip 1664 * @page: page to write 1665 * @offset_in_page: offset within the page 1666 * @buf: buffer containing the data to write to the page 1667 * @len: length of the buffer 1668 * 1669 * This function issues a full PROG PAGE operation. 1670 * This function does not select/unselect the CS line. 1671 * 1672 * Returns 0 on success, a negative error code otherwise. 1673 */ 1674 int nand_prog_page_op(struct nand_chip *chip, unsigned int page, 1675 unsigned int offset_in_page, const void *buf, 1676 unsigned int len) 1677 { 1678 struct mtd_info *mtd = nand_to_mtd(chip); 1679 u8 status; 1680 int ret; 1681 1682 if (!len || !buf) 1683 return -EINVAL; 1684 1685 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1686 return -EINVAL; 1687 1688 if (nand_has_exec_op(chip)) { 1689 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1690 len, true); 1691 if (ret) 1692 return ret; 1693 1694 ret = nand_status_op(chip, &status); 1695 if (ret) 1696 return ret; 1697 } else { 1698 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, 1699 page); 1700 chip->legacy.write_buf(chip, buf, len); 1701 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1702 ret = chip->legacy.waitfunc(chip); 1703 if (ret < 0) 1704 return ret; 1705 1706 status = ret; 1707 } 1708 1709 if (status & NAND_STATUS_FAIL) 1710 return -EIO; 1711 1712 return 0; 1713 } 1714 EXPORT_SYMBOL_GPL(nand_prog_page_op); 1715 1716 /** 1717 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation 1718 * @chip: The NAND chip 1719 * @offset_in_page: offset within the page 1720 * @buf: buffer containing the data to send to the NAND 1721 * @len: length of the buffer 1722 * @force_8bit: force 8-bit bus access 1723 * 1724 * This function issues a CHANGE WRITE COLUMN operation. 1725 * This function does not select/unselect the CS line. 1726 * 1727 * Returns 0 on success, a negative error code otherwise. 1728 */ 1729 int nand_change_write_column_op(struct nand_chip *chip, 1730 unsigned int offset_in_page, 1731 const void *buf, unsigned int len, 1732 bool force_8bit) 1733 { 1734 struct mtd_info *mtd = nand_to_mtd(chip); 1735 1736 if (len && !buf) 1737 return -EINVAL; 1738 1739 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1740 return -EINVAL; 1741 1742 /* Small page NANDs do not support column change. */ 1743 if (mtd->writesize <= 512) 1744 return -ENOTSUPP; 1745 1746 if (nand_has_exec_op(chip)) { 1747 const struct nand_interface_config *conf = 1748 nand_get_interface_config(chip); 1749 u8 addrs[2]; 1750 struct nand_op_instr instrs[] = { 1751 NAND_OP_CMD(NAND_CMD_RNDIN, 0), 1752 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)), 1753 NAND_OP_DATA_OUT(len, buf, 0), 1754 }; 1755 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1756 int ret; 1757 1758 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1759 if (ret < 0) 1760 return ret; 1761 1762 instrs[2].ctx.data.force_8bit = force_8bit; 1763 1764 /* Drop the DATA_OUT instruction if len is set to 0. */ 1765 if (!len) 1766 op.ninstrs--; 1767 1768 return nand_exec_op(chip, &op); 1769 } 1770 1771 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1); 1772 if (len) 1773 chip->legacy.write_buf(chip, buf, len); 1774 1775 return 0; 1776 } 1777 EXPORT_SYMBOL_GPL(nand_change_write_column_op); 1778 1779 /** 1780 * nand_readid_op - Do a READID operation 1781 * @chip: The NAND chip 1782 * @addr: address cycle to pass after the READID command 1783 * @buf: buffer used to store the ID 1784 * @len: length of the buffer 1785 * 1786 * This function sends a READID command and reads back the ID returned by the 1787 * NAND. 1788 * This function does not select/unselect the CS line. 1789 * 1790 * Returns 0 on success, a negative error code otherwise. 1791 */ 1792 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, 1793 unsigned int len) 1794 { 1795 unsigned int i; 1796 u8 *id = buf, *ddrbuf = NULL; 1797 1798 if (len && !buf) 1799 return -EINVAL; 1800 1801 if (nand_has_exec_op(chip)) { 1802 const struct nand_interface_config *conf = 1803 nand_get_interface_config(chip); 1804 struct nand_op_instr instrs[] = { 1805 NAND_OP_CMD(NAND_CMD_READID, 0), 1806 NAND_OP_ADDR(1, &addr, 1807 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1808 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1809 }; 1810 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1811 int ret; 1812 1813 /* READ_ID data bytes are received twice in NV-DDR mode */ 1814 if (len && nand_interface_is_nvddr(conf)) { 1815 ddrbuf = kzalloc(len * 2, GFP_KERNEL); 1816 if (!ddrbuf) 1817 return -ENOMEM; 1818 1819 instrs[2].ctx.data.len *= 2; 1820 instrs[2].ctx.data.buf.in = ddrbuf; 1821 } 1822 1823 /* Drop the DATA_IN instruction if len is set to 0. */ 1824 if (!len) 1825 op.ninstrs--; 1826 1827 ret = nand_exec_op(chip, &op); 1828 if (!ret && len && nand_interface_is_nvddr(conf)) { 1829 for (i = 0; i < len; i++) 1830 id[i] = ddrbuf[i * 2]; 1831 } 1832 1833 kfree(ddrbuf); 1834 1835 return ret; 1836 } 1837 1838 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); 1839 1840 for (i = 0; i < len; i++) 1841 id[i] = chip->legacy.read_byte(chip); 1842 1843 return 0; 1844 } 1845 EXPORT_SYMBOL_GPL(nand_readid_op); 1846 1847 /** 1848 * nand_status_op - Do a STATUS operation 1849 * @chip: The NAND chip 1850 * @status: out variable to store the NAND status 1851 * 1852 * This function sends a STATUS command and reads back the status returned by 1853 * the NAND. 1854 * This function does not select/unselect the CS line. 1855 * 1856 * Returns 0 on success, a negative error code otherwise. 1857 */ 1858 int nand_status_op(struct nand_chip *chip, u8 *status) 1859 { 1860 if (nand_has_exec_op(chip)) { 1861 const struct nand_interface_config *conf = 1862 nand_get_interface_config(chip); 1863 u8 ddrstatus[2]; 1864 struct nand_op_instr instrs[] = { 1865 NAND_OP_CMD(NAND_CMD_STATUS, 1866 NAND_COMMON_TIMING_NS(conf, tADL_min)), 1867 NAND_OP_8BIT_DATA_IN(1, status, 0), 1868 }; 1869 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1870 int ret; 1871 1872 /* The status data byte will be received twice in NV-DDR mode */ 1873 if (status && nand_interface_is_nvddr(conf)) { 1874 instrs[1].ctx.data.len *= 2; 1875 instrs[1].ctx.data.buf.in = ddrstatus; 1876 } 1877 1878 if (!status) 1879 op.ninstrs--; 1880 1881 ret = nand_exec_op(chip, &op); 1882 if (!ret && status && nand_interface_is_nvddr(conf)) 1883 *status = ddrstatus[0]; 1884 1885 return ret; 1886 } 1887 1888 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); 1889 if (status) 1890 *status = chip->legacy.read_byte(chip); 1891 1892 return 0; 1893 } 1894 EXPORT_SYMBOL_GPL(nand_status_op); 1895 1896 /** 1897 * nand_exit_status_op - Exit a STATUS operation 1898 * @chip: The NAND chip 1899 * 1900 * This function sends a READ0 command to cancel the effect of the STATUS 1901 * command to avoid reading only the status until a new read command is sent. 1902 * 1903 * This function does not select/unselect the CS line. 1904 * 1905 * Returns 0 on success, a negative error code otherwise. 1906 */ 1907 int nand_exit_status_op(struct nand_chip *chip) 1908 { 1909 if (nand_has_exec_op(chip)) { 1910 struct nand_op_instr instrs[] = { 1911 NAND_OP_CMD(NAND_CMD_READ0, 0), 1912 }; 1913 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1914 1915 return nand_exec_op(chip, &op); 1916 } 1917 1918 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1); 1919 1920 return 0; 1921 } 1922 EXPORT_SYMBOL_GPL(nand_exit_status_op); 1923 1924 /** 1925 * nand_erase_op - Do an erase operation 1926 * @chip: The NAND chip 1927 * @eraseblock: block to erase 1928 * 1929 * This function sends an ERASE command and waits for the NAND to be ready 1930 * before returning. 1931 * This function does not select/unselect the CS line. 1932 * 1933 * Returns 0 on success, a negative error code otherwise. 1934 */ 1935 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) 1936 { 1937 unsigned int page = eraseblock << 1938 (chip->phys_erase_shift - chip->page_shift); 1939 int ret; 1940 u8 status; 1941 1942 if (nand_has_exec_op(chip)) { 1943 const struct nand_interface_config *conf = 1944 nand_get_interface_config(chip); 1945 u8 addrs[3] = { page, page >> 8, page >> 16 }; 1946 struct nand_op_instr instrs[] = { 1947 NAND_OP_CMD(NAND_CMD_ERASE1, 0), 1948 NAND_OP_ADDR(2, addrs, 0), 1949 NAND_OP_CMD(NAND_CMD_ERASE2, 1950 NAND_COMMON_TIMING_NS(conf, tWB_max)), 1951 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), 1952 0), 1953 }; 1954 struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs, 1955 instrs); 1956 1957 if (chip->options & NAND_ROW_ADDR_3) 1958 instrs[1].ctx.addr.naddrs++; 1959 1960 ret = nand_exec_op(chip, &op); 1961 if (ret) 1962 return ret; 1963 1964 ret = nand_status_op(chip, &status); 1965 if (ret) 1966 return ret; 1967 } else { 1968 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page); 1969 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1); 1970 1971 ret = chip->legacy.waitfunc(chip); 1972 if (ret < 0) 1973 return ret; 1974 1975 status = ret; 1976 } 1977 1978 if (status & NAND_STATUS_FAIL) 1979 return -EIO; 1980 1981 return 0; 1982 } 1983 EXPORT_SYMBOL_GPL(nand_erase_op); 1984 1985 /** 1986 * nand_set_features_op - Do a SET FEATURES operation 1987 * @chip: The NAND chip 1988 * @feature: feature id 1989 * @data: 4 bytes of data 1990 * 1991 * This function sends a SET FEATURES command and waits for the NAND to be 1992 * ready before returning. 1993 * This function does not select/unselect the CS line. 1994 * 1995 * Returns 0 on success, a negative error code otherwise. 1996 */ 1997 static int nand_set_features_op(struct nand_chip *chip, u8 feature, 1998 const void *data) 1999 { 2000 const u8 *params = data; 2001 int i, ret; 2002 2003 if (nand_has_exec_op(chip)) { 2004 const struct nand_interface_config *conf = 2005 nand_get_interface_config(chip); 2006 struct nand_op_instr instrs[] = { 2007 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), 2008 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf, 2009 tADL_min)), 2010 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, 2011 NAND_COMMON_TIMING_NS(conf, 2012 tWB_max)), 2013 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 2014 0), 2015 }; 2016 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2017 2018 return nand_exec_op(chip, &op); 2019 } 2020 2021 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1); 2022 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 2023 chip->legacy.write_byte(chip, params[i]); 2024 2025 ret = chip->legacy.waitfunc(chip); 2026 if (ret < 0) 2027 return ret; 2028 2029 if (ret & NAND_STATUS_FAIL) 2030 return -EIO; 2031 2032 return 0; 2033 } 2034 2035 /** 2036 * nand_get_features_op - Do a GET FEATURES operation 2037 * @chip: The NAND chip 2038 * @feature: feature id 2039 * @data: 4 bytes of data 2040 * 2041 * This function sends a GET FEATURES command and waits for the NAND to be 2042 * ready before returning. 2043 * This function does not select/unselect the CS line. 2044 * 2045 * Returns 0 on success, a negative error code otherwise. 2046 */ 2047 static int nand_get_features_op(struct nand_chip *chip, u8 feature, 2048 void *data) 2049 { 2050 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2]; 2051 int i; 2052 2053 if (nand_has_exec_op(chip)) { 2054 const struct nand_interface_config *conf = 2055 nand_get_interface_config(chip); 2056 struct nand_op_instr instrs[] = { 2057 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), 2058 NAND_OP_ADDR(1, &feature, 2059 NAND_COMMON_TIMING_NS(conf, tWB_max)), 2060 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), 2061 NAND_COMMON_TIMING_NS(conf, tRR_min)), 2062 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, 2063 data, 0), 2064 }; 2065 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2066 int ret; 2067 2068 /* GET_FEATURE data bytes are received twice in NV-DDR mode */ 2069 if (nand_interface_is_nvddr(conf)) { 2070 instrs[3].ctx.data.len *= 2; 2071 instrs[3].ctx.data.buf.in = ddrbuf; 2072 } 2073 2074 ret = nand_exec_op(chip, &op); 2075 if (nand_interface_is_nvddr(conf)) { 2076 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++) 2077 params[i] = ddrbuf[i * 2]; 2078 } 2079 2080 return ret; 2081 } 2082 2083 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); 2084 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 2085 params[i] = chip->legacy.read_byte(chip); 2086 2087 return 0; 2088 } 2089 2090 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, 2091 unsigned int delay_ns) 2092 { 2093 if (nand_has_exec_op(chip)) { 2094 struct nand_op_instr instrs[] = { 2095 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms), 2096 PSEC_TO_NSEC(delay_ns)), 2097 }; 2098 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2099 2100 return nand_exec_op(chip, &op); 2101 } 2102 2103 /* Apply delay or wait for ready/busy pin */ 2104 if (!chip->legacy.dev_ready) 2105 udelay(chip->legacy.chip_delay); 2106 else 2107 nand_wait_ready(chip); 2108 2109 return 0; 2110 } 2111 2112 /** 2113 * nand_reset_op - Do a reset operation 2114 * @chip: The NAND chip 2115 * 2116 * This function sends a RESET command and waits for the NAND to be ready 2117 * before returning. 2118 * This function does not select/unselect the CS line. 2119 * 2120 * Returns 0 on success, a negative error code otherwise. 2121 */ 2122 int nand_reset_op(struct nand_chip *chip) 2123 { 2124 if (nand_has_exec_op(chip)) { 2125 const struct nand_interface_config *conf = 2126 nand_get_interface_config(chip); 2127 struct nand_op_instr instrs[] = { 2128 NAND_OP_CMD(NAND_CMD_RESET, 2129 NAND_COMMON_TIMING_NS(conf, tWB_max)), 2130 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max), 2131 0), 2132 }; 2133 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2134 2135 return nand_exec_op(chip, &op); 2136 } 2137 2138 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1); 2139 2140 return 0; 2141 } 2142 EXPORT_SYMBOL_GPL(nand_reset_op); 2143 2144 /** 2145 * nand_read_data_op - Read data from the NAND 2146 * @chip: The NAND chip 2147 * @buf: buffer used to store the data 2148 * @len: length of the buffer 2149 * @force_8bit: force 8-bit bus access 2150 * @check_only: do not actually run the command, only checks if the 2151 * controller driver supports it 2152 * 2153 * This function does a raw data read on the bus. Usually used after launching 2154 * another NAND operation like nand_read_page_op(). 2155 * This function does not select/unselect the CS line. 2156 * 2157 * Returns 0 on success, a negative error code otherwise. 2158 */ 2159 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, 2160 bool force_8bit, bool check_only) 2161 { 2162 if (!len || !buf) 2163 return -EINVAL; 2164 2165 if (nand_has_exec_op(chip)) { 2166 const struct nand_interface_config *conf = 2167 nand_get_interface_config(chip); 2168 struct nand_op_instr instrs[] = { 2169 NAND_OP_DATA_IN(len, buf, 0), 2170 }; 2171 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2172 u8 *ddrbuf = NULL; 2173 int ret, i; 2174 2175 instrs[0].ctx.data.force_8bit = force_8bit; 2176 2177 /* 2178 * Parameter payloads (ID, status, features, etc) do not go 2179 * through the same pipeline as regular data, hence the 2180 * force_8bit flag must be set and this also indicates that in 2181 * case NV-DDR timings are being used the data will be received 2182 * twice. 2183 */ 2184 if (force_8bit && nand_interface_is_nvddr(conf)) { 2185 ddrbuf = kzalloc(len * 2, GFP_KERNEL); 2186 if (!ddrbuf) 2187 return -ENOMEM; 2188 2189 instrs[0].ctx.data.len *= 2; 2190 instrs[0].ctx.data.buf.in = ddrbuf; 2191 } 2192 2193 if (check_only) { 2194 ret = nand_check_op(chip, &op); 2195 kfree(ddrbuf); 2196 return ret; 2197 } 2198 2199 ret = nand_exec_op(chip, &op); 2200 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) { 2201 u8 *dst = buf; 2202 2203 for (i = 0; i < len; i++) 2204 dst[i] = ddrbuf[i * 2]; 2205 } 2206 2207 kfree(ddrbuf); 2208 2209 return ret; 2210 } 2211 2212 if (check_only) 2213 return 0; 2214 2215 if (force_8bit) { 2216 u8 *p = buf; 2217 unsigned int i; 2218 2219 for (i = 0; i < len; i++) 2220 p[i] = chip->legacy.read_byte(chip); 2221 } else { 2222 chip->legacy.read_buf(chip, buf, len); 2223 } 2224 2225 return 0; 2226 } 2227 EXPORT_SYMBOL_GPL(nand_read_data_op); 2228 2229 /** 2230 * nand_write_data_op - Write data from the NAND 2231 * @chip: The NAND chip 2232 * @buf: buffer containing the data to send on the bus 2233 * @len: length of the buffer 2234 * @force_8bit: force 8-bit bus access 2235 * 2236 * This function does a raw data write on the bus. Usually used after launching 2237 * another NAND operation like nand_write_page_begin_op(). 2238 * This function does not select/unselect the CS line. 2239 * 2240 * Returns 0 on success, a negative error code otherwise. 2241 */ 2242 int nand_write_data_op(struct nand_chip *chip, const void *buf, 2243 unsigned int len, bool force_8bit) 2244 { 2245 if (!len || !buf) 2246 return -EINVAL; 2247 2248 if (nand_has_exec_op(chip)) { 2249 struct nand_op_instr instrs[] = { 2250 NAND_OP_DATA_OUT(len, buf, 0), 2251 }; 2252 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 2253 2254 instrs[0].ctx.data.force_8bit = force_8bit; 2255 2256 return nand_exec_op(chip, &op); 2257 } 2258 2259 if (force_8bit) { 2260 const u8 *p = buf; 2261 unsigned int i; 2262 2263 for (i = 0; i < len; i++) 2264 chip->legacy.write_byte(chip, p[i]); 2265 } else { 2266 chip->legacy.write_buf(chip, buf, len); 2267 } 2268 2269 return 0; 2270 } 2271 EXPORT_SYMBOL_GPL(nand_write_data_op); 2272 2273 /** 2274 * struct nand_op_parser_ctx - Context used by the parser 2275 * @instrs: array of all the instructions that must be addressed 2276 * @ninstrs: length of the @instrs array 2277 * @subop: Sub-operation to be passed to the NAND controller 2278 * 2279 * This structure is used by the core to split NAND operations into 2280 * sub-operations that can be handled by the NAND controller. 2281 */ 2282 struct nand_op_parser_ctx { 2283 const struct nand_op_instr *instrs; 2284 unsigned int ninstrs; 2285 struct nand_subop subop; 2286 }; 2287 2288 /** 2289 * nand_op_parser_must_split_instr - Checks if an instruction must be split 2290 * @pat: the parser pattern element that matches @instr 2291 * @instr: pointer to the instruction to check 2292 * @start_offset: this is an in/out parameter. If @instr has already been 2293 * split, then @start_offset is the offset from which to start 2294 * (either an address cycle or an offset in the data buffer). 2295 * Conversely, if the function returns true (ie. instr must be 2296 * split), this parameter is updated to point to the first 2297 * data/address cycle that has not been taken care of. 2298 * 2299 * Some NAND controllers are limited and cannot send X address cycles with a 2300 * unique operation, or cannot read/write more than Y bytes at the same time. 2301 * In this case, split the instruction that does not fit in a single 2302 * controller-operation into two or more chunks. 2303 * 2304 * Returns true if the instruction must be split, false otherwise. 2305 * The @start_offset parameter is also updated to the offset at which the next 2306 * bundle of instruction must start (if an address or a data instruction). 2307 */ 2308 static bool 2309 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat, 2310 const struct nand_op_instr *instr, 2311 unsigned int *start_offset) 2312 { 2313 switch (pat->type) { 2314 case NAND_OP_ADDR_INSTR: 2315 if (!pat->ctx.addr.maxcycles) 2316 break; 2317 2318 if (instr->ctx.addr.naddrs - *start_offset > 2319 pat->ctx.addr.maxcycles) { 2320 *start_offset += pat->ctx.addr.maxcycles; 2321 return true; 2322 } 2323 break; 2324 2325 case NAND_OP_DATA_IN_INSTR: 2326 case NAND_OP_DATA_OUT_INSTR: 2327 if (!pat->ctx.data.maxlen) 2328 break; 2329 2330 if (instr->ctx.data.len - *start_offset > 2331 pat->ctx.data.maxlen) { 2332 *start_offset += pat->ctx.data.maxlen; 2333 return true; 2334 } 2335 break; 2336 2337 default: 2338 break; 2339 } 2340 2341 return false; 2342 } 2343 2344 /** 2345 * nand_op_parser_match_pat - Checks if a pattern matches the instructions 2346 * remaining in the parser context 2347 * @pat: the pattern to test 2348 * @ctx: the parser context structure to match with the pattern @pat 2349 * 2350 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx. 2351 * Returns true if this is the case, false ortherwise. When true is returned, 2352 * @ctx->subop is updated with the set of instructions to be passed to the 2353 * controller driver. 2354 */ 2355 static bool 2356 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat, 2357 struct nand_op_parser_ctx *ctx) 2358 { 2359 unsigned int instr_offset = ctx->subop.first_instr_start_off; 2360 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs; 2361 const struct nand_op_instr *instr = ctx->subop.instrs; 2362 unsigned int i, ninstrs; 2363 2364 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) { 2365 /* 2366 * The pattern instruction does not match the operation 2367 * instruction. If the instruction is marked optional in the 2368 * pattern definition, we skip the pattern element and continue 2369 * to the next one. If the element is mandatory, there's no 2370 * match and we can return false directly. 2371 */ 2372 if (instr->type != pat->elems[i].type) { 2373 if (!pat->elems[i].optional) 2374 return false; 2375 2376 continue; 2377 } 2378 2379 /* 2380 * Now check the pattern element constraints. If the pattern is 2381 * not able to handle the whole instruction in a single step, 2382 * we have to split it. 2383 * The last_instr_end_off value comes back updated to point to 2384 * the position where we have to split the instruction (the 2385 * start of the next subop chunk). 2386 */ 2387 if (nand_op_parser_must_split_instr(&pat->elems[i], instr, 2388 &instr_offset)) { 2389 ninstrs++; 2390 i++; 2391 break; 2392 } 2393 2394 instr++; 2395 ninstrs++; 2396 instr_offset = 0; 2397 } 2398 2399 /* 2400 * This can happen if all instructions of a pattern are optional. 2401 * Still, if there's not at least one instruction handled by this 2402 * pattern, this is not a match, and we should try the next one (if 2403 * any). 2404 */ 2405 if (!ninstrs) 2406 return false; 2407 2408 /* 2409 * We had a match on the pattern head, but the pattern may be longer 2410 * than the instructions we're asked to execute. We need to make sure 2411 * there's no mandatory elements in the pattern tail. 2412 */ 2413 for (; i < pat->nelems; i++) { 2414 if (!pat->elems[i].optional) 2415 return false; 2416 } 2417 2418 /* 2419 * We have a match: update the subop structure accordingly and return 2420 * true. 2421 */ 2422 ctx->subop.ninstrs = ninstrs; 2423 ctx->subop.last_instr_end_off = instr_offset; 2424 2425 return true; 2426 } 2427 2428 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) 2429 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2430 { 2431 const struct nand_op_instr *instr; 2432 char *prefix = " "; 2433 unsigned int i; 2434 2435 pr_debug("executing subop (CS%d):\n", ctx->subop.cs); 2436 2437 for (i = 0; i < ctx->ninstrs; i++) { 2438 instr = &ctx->instrs[i]; 2439 2440 if (instr == &ctx->subop.instrs[0]) 2441 prefix = " ->"; 2442 2443 nand_op_trace(prefix, instr); 2444 2445 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) 2446 prefix = " "; 2447 } 2448 } 2449 #else 2450 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2451 { 2452 /* NOP */ 2453 } 2454 #endif 2455 2456 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a, 2457 const struct nand_op_parser_ctx *b) 2458 { 2459 if (a->subop.ninstrs < b->subop.ninstrs) 2460 return -1; 2461 else if (a->subop.ninstrs > b->subop.ninstrs) 2462 return 1; 2463 2464 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off) 2465 return -1; 2466 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off) 2467 return 1; 2468 2469 return 0; 2470 } 2471 2472 /** 2473 * nand_op_parser_exec_op - exec_op parser 2474 * @chip: the NAND chip 2475 * @parser: patterns description provided by the controller driver 2476 * @op: the NAND operation to address 2477 * @check_only: when true, the function only checks if @op can be handled but 2478 * does not execute the operation 2479 * 2480 * Helper function designed to ease integration of NAND controller drivers that 2481 * only support a limited set of instruction sequences. The supported sequences 2482 * are described in @parser, and the framework takes care of splitting @op into 2483 * multiple sub-operations (if required) and pass them back to the ->exec() 2484 * callback of the matching pattern if @check_only is set to false. 2485 * 2486 * NAND controller drivers should call this function from their own ->exec_op() 2487 * implementation. 2488 * 2489 * Returns 0 on success, a negative error code otherwise. A failure can be 2490 * caused by an unsupported operation (none of the supported patterns is able 2491 * to handle the requested operation), or an error returned by one of the 2492 * matching pattern->exec() hook. 2493 */ 2494 int nand_op_parser_exec_op(struct nand_chip *chip, 2495 const struct nand_op_parser *parser, 2496 const struct nand_operation *op, bool check_only) 2497 { 2498 struct nand_op_parser_ctx ctx = { 2499 .subop.cs = op->cs, 2500 .subop.instrs = op->instrs, 2501 .instrs = op->instrs, 2502 .ninstrs = op->ninstrs, 2503 }; 2504 unsigned int i; 2505 2506 while (ctx.subop.instrs < op->instrs + op->ninstrs) { 2507 const struct nand_op_parser_pattern *pattern; 2508 struct nand_op_parser_ctx best_ctx; 2509 int ret, best_pattern = -1; 2510 2511 for (i = 0; i < parser->npatterns; i++) { 2512 struct nand_op_parser_ctx test_ctx = ctx; 2513 2514 pattern = &parser->patterns[i]; 2515 if (!nand_op_parser_match_pat(pattern, &test_ctx)) 2516 continue; 2517 2518 if (best_pattern >= 0 && 2519 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0) 2520 continue; 2521 2522 best_pattern = i; 2523 best_ctx = test_ctx; 2524 } 2525 2526 if (best_pattern < 0) { 2527 pr_debug("->exec_op() parser: pattern not found!\n"); 2528 return -ENOTSUPP; 2529 } 2530 2531 ctx = best_ctx; 2532 nand_op_parser_trace(&ctx); 2533 2534 if (!check_only) { 2535 pattern = &parser->patterns[best_pattern]; 2536 ret = pattern->exec(chip, &ctx.subop); 2537 if (ret) 2538 return ret; 2539 } 2540 2541 /* 2542 * Update the context structure by pointing to the start of the 2543 * next subop. 2544 */ 2545 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs; 2546 if (ctx.subop.last_instr_end_off) 2547 ctx.subop.instrs -= 1; 2548 2549 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off; 2550 } 2551 2552 return 0; 2553 } 2554 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op); 2555 2556 static bool nand_instr_is_data(const struct nand_op_instr *instr) 2557 { 2558 return instr && (instr->type == NAND_OP_DATA_IN_INSTR || 2559 instr->type == NAND_OP_DATA_OUT_INSTR); 2560 } 2561 2562 static bool nand_subop_instr_is_valid(const struct nand_subop *subop, 2563 unsigned int instr_idx) 2564 { 2565 return subop && instr_idx < subop->ninstrs; 2566 } 2567 2568 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop, 2569 unsigned int instr_idx) 2570 { 2571 if (instr_idx) 2572 return 0; 2573 2574 return subop->first_instr_start_off; 2575 } 2576 2577 /** 2578 * nand_subop_get_addr_start_off - Get the start offset in an address array 2579 * @subop: The entire sub-operation 2580 * @instr_idx: Index of the instruction inside the sub-operation 2581 * 2582 * During driver development, one could be tempted to directly use the 2583 * ->addr.addrs field of address instructions. This is wrong as address 2584 * instructions might be split. 2585 * 2586 * Given an address instruction, returns the offset of the first cycle to issue. 2587 */ 2588 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, 2589 unsigned int instr_idx) 2590 { 2591 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2592 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2593 return 0; 2594 2595 return nand_subop_get_start_off(subop, instr_idx); 2596 } 2597 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off); 2598 2599 /** 2600 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert 2601 * @subop: The entire sub-operation 2602 * @instr_idx: Index of the instruction inside the sub-operation 2603 * 2604 * During driver development, one could be tempted to directly use the 2605 * ->addr->naddrs field of a data instruction. This is wrong as instructions 2606 * might be split. 2607 * 2608 * Given an address instruction, returns the number of address cycle to issue. 2609 */ 2610 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, 2611 unsigned int instr_idx) 2612 { 2613 int start_off, end_off; 2614 2615 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2616 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2617 return 0; 2618 2619 start_off = nand_subop_get_addr_start_off(subop, instr_idx); 2620 2621 if (instr_idx == subop->ninstrs - 1 && 2622 subop->last_instr_end_off) 2623 end_off = subop->last_instr_end_off; 2624 else 2625 end_off = subop->instrs[instr_idx].ctx.addr.naddrs; 2626 2627 return end_off - start_off; 2628 } 2629 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc); 2630 2631 /** 2632 * nand_subop_get_data_start_off - Get the start offset in a data array 2633 * @subop: The entire sub-operation 2634 * @instr_idx: Index of the instruction inside the sub-operation 2635 * 2636 * During driver development, one could be tempted to directly use the 2637 * ->data->buf.{in,out} field of data instructions. This is wrong as data 2638 * instructions might be split. 2639 * 2640 * Given a data instruction, returns the offset to start from. 2641 */ 2642 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, 2643 unsigned int instr_idx) 2644 { 2645 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2646 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2647 return 0; 2648 2649 return nand_subop_get_start_off(subop, instr_idx); 2650 } 2651 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off); 2652 2653 /** 2654 * nand_subop_get_data_len - Get the number of bytes to retrieve 2655 * @subop: The entire sub-operation 2656 * @instr_idx: Index of the instruction inside the sub-operation 2657 * 2658 * During driver development, one could be tempted to directly use the 2659 * ->data->len field of a data instruction. This is wrong as data instructions 2660 * might be split. 2661 * 2662 * Returns the length of the chunk of data to send/receive. 2663 */ 2664 unsigned int nand_subop_get_data_len(const struct nand_subop *subop, 2665 unsigned int instr_idx) 2666 { 2667 int start_off = 0, end_off; 2668 2669 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2670 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2671 return 0; 2672 2673 start_off = nand_subop_get_data_start_off(subop, instr_idx); 2674 2675 if (instr_idx == subop->ninstrs - 1 && 2676 subop->last_instr_end_off) 2677 end_off = subop->last_instr_end_off; 2678 else 2679 end_off = subop->instrs[instr_idx].ctx.data.len; 2680 2681 return end_off - start_off; 2682 } 2683 EXPORT_SYMBOL_GPL(nand_subop_get_data_len); 2684 2685 /** 2686 * nand_reset - Reset and initialize a NAND device 2687 * @chip: The NAND chip 2688 * @chipnr: Internal die id 2689 * 2690 * Save the timings data structure, then apply SDR timings mode 0 (see 2691 * nand_reset_interface for details), do the reset operation, and apply 2692 * back the previous timings. 2693 * 2694 * Returns 0 on success, a negative error code otherwise. 2695 */ 2696 int nand_reset(struct nand_chip *chip, int chipnr) 2697 { 2698 int ret; 2699 2700 ret = nand_reset_interface(chip, chipnr); 2701 if (ret) 2702 return ret; 2703 2704 /* 2705 * The CS line has to be released before we can apply the new NAND 2706 * interface settings, hence this weird nand_select_target() 2707 * nand_deselect_target() dance. 2708 */ 2709 nand_select_target(chip, chipnr); 2710 ret = nand_reset_op(chip); 2711 nand_deselect_target(chip); 2712 if (ret) 2713 return ret; 2714 2715 ret = nand_setup_interface(chip, chipnr); 2716 if (ret) 2717 return ret; 2718 2719 return 0; 2720 } 2721 EXPORT_SYMBOL_GPL(nand_reset); 2722 2723 /** 2724 * nand_get_features - wrapper to perform a GET_FEATURE 2725 * @chip: NAND chip info structure 2726 * @addr: feature address 2727 * @subfeature_param: the subfeature parameters, a four bytes array 2728 * 2729 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2730 * operation cannot be handled. 2731 */ 2732 int nand_get_features(struct nand_chip *chip, int addr, 2733 u8 *subfeature_param) 2734 { 2735 if (!nand_supports_get_features(chip, addr)) 2736 return -ENOTSUPP; 2737 2738 if (chip->legacy.get_features) 2739 return chip->legacy.get_features(chip, addr, subfeature_param); 2740 2741 return nand_get_features_op(chip, addr, subfeature_param); 2742 } 2743 2744 /** 2745 * nand_set_features - wrapper to perform a SET_FEATURE 2746 * @chip: NAND chip info structure 2747 * @addr: feature address 2748 * @subfeature_param: the subfeature parameters, a four bytes array 2749 * 2750 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2751 * operation cannot be handled. 2752 */ 2753 int nand_set_features(struct nand_chip *chip, int addr, 2754 u8 *subfeature_param) 2755 { 2756 if (!nand_supports_set_features(chip, addr)) 2757 return -ENOTSUPP; 2758 2759 if (chip->legacy.set_features) 2760 return chip->legacy.set_features(chip, addr, subfeature_param); 2761 2762 return nand_set_features_op(chip, addr, subfeature_param); 2763 } 2764 2765 /** 2766 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 2767 * @buf: buffer to test 2768 * @len: buffer length 2769 * @bitflips_threshold: maximum number of bitflips 2770 * 2771 * Check if a buffer contains only 0xff, which means the underlying region 2772 * has been erased and is ready to be programmed. 2773 * The bitflips_threshold specify the maximum number of bitflips before 2774 * considering the region is not erased. 2775 * Note: The logic of this function has been extracted from the memweight 2776 * implementation, except that nand_check_erased_buf function exit before 2777 * testing the whole buffer if the number of bitflips exceed the 2778 * bitflips_threshold value. 2779 * 2780 * Returns a positive number of bitflips less than or equal to 2781 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2782 * threshold. 2783 */ 2784 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 2785 { 2786 const unsigned char *bitmap = buf; 2787 int bitflips = 0; 2788 int weight; 2789 2790 for (; len && ((uintptr_t)bitmap) % sizeof(long); 2791 len--, bitmap++) { 2792 weight = hweight8(*bitmap); 2793 bitflips += BITS_PER_BYTE - weight; 2794 if (unlikely(bitflips > bitflips_threshold)) 2795 return -EBADMSG; 2796 } 2797 2798 for (; len >= sizeof(long); 2799 len -= sizeof(long), bitmap += sizeof(long)) { 2800 unsigned long d = *((unsigned long *)bitmap); 2801 if (d == ~0UL) 2802 continue; 2803 weight = hweight_long(d); 2804 bitflips += BITS_PER_LONG - weight; 2805 if (unlikely(bitflips > bitflips_threshold)) 2806 return -EBADMSG; 2807 } 2808 2809 for (; len > 0; len--, bitmap++) { 2810 weight = hweight8(*bitmap); 2811 bitflips += BITS_PER_BYTE - weight; 2812 if (unlikely(bitflips > bitflips_threshold)) 2813 return -EBADMSG; 2814 } 2815 2816 return bitflips; 2817 } 2818 2819 /** 2820 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 2821 * 0xff data 2822 * @data: data buffer to test 2823 * @datalen: data length 2824 * @ecc: ECC buffer 2825 * @ecclen: ECC length 2826 * @extraoob: extra OOB buffer 2827 * @extraooblen: extra OOB length 2828 * @bitflips_threshold: maximum number of bitflips 2829 * 2830 * Check if a data buffer and its associated ECC and OOB data contains only 2831 * 0xff pattern, which means the underlying region has been erased and is 2832 * ready to be programmed. 2833 * The bitflips_threshold specify the maximum number of bitflips before 2834 * considering the region as not erased. 2835 * 2836 * Note: 2837 * 1/ ECC algorithms are working on pre-defined block sizes which are usually 2838 * different from the NAND page size. When fixing bitflips, ECC engines will 2839 * report the number of errors per chunk, and the NAND core infrastructure 2840 * expect you to return the maximum number of bitflips for the whole page. 2841 * This is why you should always use this function on a single chunk and 2842 * not on the whole page. After checking each chunk you should update your 2843 * max_bitflips value accordingly. 2844 * 2/ When checking for bitflips in erased pages you should not only check 2845 * the payload data but also their associated ECC data, because a user might 2846 * have programmed almost all bits to 1 but a few. In this case, we 2847 * shouldn't consider the chunk as erased, and checking ECC bytes prevent 2848 * this case. 2849 * 3/ The extraoob argument is optional, and should be used if some of your OOB 2850 * data are protected by the ECC engine. 2851 * It could also be used if you support subpages and want to attach some 2852 * extra OOB data to an ECC chunk. 2853 * 2854 * Returns a positive number of bitflips less than or equal to 2855 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2856 * threshold. In case of success, the passed buffers are filled with 0xff. 2857 */ 2858 int nand_check_erased_ecc_chunk(void *data, int datalen, 2859 void *ecc, int ecclen, 2860 void *extraoob, int extraooblen, 2861 int bitflips_threshold) 2862 { 2863 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 2864 2865 data_bitflips = nand_check_erased_buf(data, datalen, 2866 bitflips_threshold); 2867 if (data_bitflips < 0) 2868 return data_bitflips; 2869 2870 bitflips_threshold -= data_bitflips; 2871 2872 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 2873 if (ecc_bitflips < 0) 2874 return ecc_bitflips; 2875 2876 bitflips_threshold -= ecc_bitflips; 2877 2878 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 2879 bitflips_threshold); 2880 if (extraoob_bitflips < 0) 2881 return extraoob_bitflips; 2882 2883 if (data_bitflips) 2884 memset(data, 0xff, datalen); 2885 2886 if (ecc_bitflips) 2887 memset(ecc, 0xff, ecclen); 2888 2889 if (extraoob_bitflips) 2890 memset(extraoob, 0xff, extraooblen); 2891 2892 return data_bitflips + ecc_bitflips + extraoob_bitflips; 2893 } 2894 EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 2895 2896 /** 2897 * nand_read_page_raw_notsupp - dummy read raw page function 2898 * @chip: nand chip info structure 2899 * @buf: buffer to store read data 2900 * @oob_required: caller requires OOB data read to chip->oob_poi 2901 * @page: page number to read 2902 * 2903 * Returns -ENOTSUPP unconditionally. 2904 */ 2905 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, 2906 int oob_required, int page) 2907 { 2908 return -ENOTSUPP; 2909 } 2910 2911 /** 2912 * nand_read_page_raw - [INTERN] read raw page data without ecc 2913 * @chip: nand chip info structure 2914 * @buf: buffer to store read data 2915 * @oob_required: caller requires OOB data read to chip->oob_poi 2916 * @page: page number to read 2917 * 2918 * Not for syndrome calculating ECC controllers, which use a special oob layout. 2919 */ 2920 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, 2921 int page) 2922 { 2923 struct mtd_info *mtd = nand_to_mtd(chip); 2924 int ret; 2925 2926 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); 2927 if (ret) 2928 return ret; 2929 2930 if (oob_required) { 2931 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, 2932 false, false); 2933 if (ret) 2934 return ret; 2935 } 2936 2937 return 0; 2938 } 2939 EXPORT_SYMBOL(nand_read_page_raw); 2940 2941 /** 2942 * nand_monolithic_read_page_raw - Monolithic page read in raw mode 2943 * @chip: NAND chip info structure 2944 * @buf: buffer to store read data 2945 * @oob_required: caller requires OOB data read to chip->oob_poi 2946 * @page: page number to read 2947 * 2948 * This is a raw page read, ie. without any error detection/correction. 2949 * Monolithic means we are requesting all the relevant data (main plus 2950 * eventually OOB) to be loaded in the NAND cache and sent over the 2951 * bus (from the NAND chip to the NAND controller) in a single 2952 * operation. This is an alternative to nand_read_page_raw(), which 2953 * first reads the main data, and if the OOB data is requested too, 2954 * then reads more data on the bus. 2955 */ 2956 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, 2957 int oob_required, int page) 2958 { 2959 struct mtd_info *mtd = nand_to_mtd(chip); 2960 unsigned int size = mtd->writesize; 2961 u8 *read_buf = buf; 2962 int ret; 2963 2964 if (oob_required) { 2965 size += mtd->oobsize; 2966 2967 if (buf != chip->data_buf) 2968 read_buf = nand_get_data_buf(chip); 2969 } 2970 2971 ret = nand_read_page_op(chip, page, 0, read_buf, size); 2972 if (ret) 2973 return ret; 2974 2975 if (buf != chip->data_buf) 2976 memcpy(buf, read_buf, mtd->writesize); 2977 2978 return 0; 2979 } 2980 EXPORT_SYMBOL(nand_monolithic_read_page_raw); 2981 2982 /** 2983 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 2984 * @chip: nand chip info structure 2985 * @buf: buffer to store read data 2986 * @oob_required: caller requires OOB data read to chip->oob_poi 2987 * @page: page number to read 2988 * 2989 * We need a special oob layout and handling even when OOB isn't used. 2990 */ 2991 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, 2992 int oob_required, int page) 2993 { 2994 struct mtd_info *mtd = nand_to_mtd(chip); 2995 int eccsize = chip->ecc.size; 2996 int eccbytes = chip->ecc.bytes; 2997 uint8_t *oob = chip->oob_poi; 2998 int steps, size, ret; 2999 3000 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3001 if (ret) 3002 return ret; 3003 3004 for (steps = chip->ecc.steps; steps > 0; steps--) { 3005 ret = nand_read_data_op(chip, buf, eccsize, false, false); 3006 if (ret) 3007 return ret; 3008 3009 buf += eccsize; 3010 3011 if (chip->ecc.prepad) { 3012 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 3013 false, false); 3014 if (ret) 3015 return ret; 3016 3017 oob += chip->ecc.prepad; 3018 } 3019 3020 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 3021 if (ret) 3022 return ret; 3023 3024 oob += eccbytes; 3025 3026 if (chip->ecc.postpad) { 3027 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 3028 false, false); 3029 if (ret) 3030 return ret; 3031 3032 oob += chip->ecc.postpad; 3033 } 3034 } 3035 3036 size = mtd->oobsize - (oob - chip->oob_poi); 3037 if (size) { 3038 ret = nand_read_data_op(chip, oob, size, false, false); 3039 if (ret) 3040 return ret; 3041 } 3042 3043 return 0; 3044 } 3045 3046 /** 3047 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 3048 * @chip: nand chip info structure 3049 * @buf: buffer to store read data 3050 * @oob_required: caller requires OOB data read to chip->oob_poi 3051 * @page: page number to read 3052 */ 3053 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf, 3054 int oob_required, int page) 3055 { 3056 struct mtd_info *mtd = nand_to_mtd(chip); 3057 int i, eccsize = chip->ecc.size, ret; 3058 int eccbytes = chip->ecc.bytes; 3059 int eccsteps = chip->ecc.steps; 3060 uint8_t *p = buf; 3061 uint8_t *ecc_calc = chip->ecc.calc_buf; 3062 uint8_t *ecc_code = chip->ecc.code_buf; 3063 unsigned int max_bitflips = 0; 3064 3065 chip->ecc.read_page_raw(chip, buf, 1, page); 3066 3067 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 3068 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3069 3070 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3071 chip->ecc.total); 3072 if (ret) 3073 return ret; 3074 3075 eccsteps = chip->ecc.steps; 3076 p = buf; 3077 3078 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3079 int stat; 3080 3081 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 3082 if (stat < 0) { 3083 mtd->ecc_stats.failed++; 3084 } else { 3085 mtd->ecc_stats.corrected += stat; 3086 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3087 } 3088 } 3089 return max_bitflips; 3090 } 3091 3092 /** 3093 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function 3094 * @chip: nand chip info structure 3095 * @data_offs: offset of requested data within the page 3096 * @readlen: data length 3097 * @bufpoi: buffer to store read data 3098 * @page: page number to read 3099 */ 3100 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs, 3101 uint32_t readlen, uint8_t *bufpoi, int page) 3102 { 3103 struct mtd_info *mtd = nand_to_mtd(chip); 3104 int start_step, end_step, num_steps, ret; 3105 uint8_t *p; 3106 int data_col_addr, i, gaps = 0; 3107 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 3108 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 3109 int index, section = 0; 3110 unsigned int max_bitflips = 0; 3111 struct mtd_oob_region oobregion = { }; 3112 3113 /* Column address within the page aligned to ECC size (256bytes) */ 3114 start_step = data_offs / chip->ecc.size; 3115 end_step = (data_offs + readlen - 1) / chip->ecc.size; 3116 num_steps = end_step - start_step + 1; 3117 index = start_step * chip->ecc.bytes; 3118 3119 /* Data size aligned to ECC ecc.size */ 3120 datafrag_len = num_steps * chip->ecc.size; 3121 eccfrag_len = num_steps * chip->ecc.bytes; 3122 3123 data_col_addr = start_step * chip->ecc.size; 3124 /* If we read not a page aligned data */ 3125 p = bufpoi + data_col_addr; 3126 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len); 3127 if (ret) 3128 return ret; 3129 3130 /* Calculate ECC */ 3131 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 3132 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]); 3133 3134 /* 3135 * The performance is faster if we position offsets according to 3136 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 3137 */ 3138 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion); 3139 if (ret) 3140 return ret; 3141 3142 if (oobregion.length < eccfrag_len) 3143 gaps = 1; 3144 3145 if (gaps) { 3146 ret = nand_change_read_column_op(chip, mtd->writesize, 3147 chip->oob_poi, mtd->oobsize, 3148 false); 3149 if (ret) 3150 return ret; 3151 } else { 3152 /* 3153 * Send the command to read the particular ECC bytes take care 3154 * about buswidth alignment in read_buf. 3155 */ 3156 aligned_pos = oobregion.offset & ~(busw - 1); 3157 aligned_len = eccfrag_len; 3158 if (oobregion.offset & (busw - 1)) 3159 aligned_len++; 3160 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & 3161 (busw - 1)) 3162 aligned_len++; 3163 3164 ret = nand_change_read_column_op(chip, 3165 mtd->writesize + aligned_pos, 3166 &chip->oob_poi[aligned_pos], 3167 aligned_len, false); 3168 if (ret) 3169 return ret; 3170 } 3171 3172 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf, 3173 chip->oob_poi, index, eccfrag_len); 3174 if (ret) 3175 return ret; 3176 3177 p = bufpoi + data_col_addr; 3178 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 3179 int stat; 3180 3181 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i], 3182 &chip->ecc.calc_buf[i]); 3183 if (stat == -EBADMSG && 3184 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3185 /* check for empty pages with bitflips */ 3186 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3187 &chip->ecc.code_buf[i], 3188 chip->ecc.bytes, 3189 NULL, 0, 3190 chip->ecc.strength); 3191 } 3192 3193 if (stat < 0) { 3194 mtd->ecc_stats.failed++; 3195 } else { 3196 mtd->ecc_stats.corrected += stat; 3197 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3198 } 3199 } 3200 return max_bitflips; 3201 } 3202 3203 /** 3204 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 3205 * @chip: nand chip info structure 3206 * @buf: buffer to store read data 3207 * @oob_required: caller requires OOB data read to chip->oob_poi 3208 * @page: page number to read 3209 * 3210 * Not for syndrome calculating ECC controllers which need a special oob layout. 3211 */ 3212 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 3213 int oob_required, int page) 3214 { 3215 struct mtd_info *mtd = nand_to_mtd(chip); 3216 int i, eccsize = chip->ecc.size, ret; 3217 int eccbytes = chip->ecc.bytes; 3218 int eccsteps = chip->ecc.steps; 3219 uint8_t *p = buf; 3220 uint8_t *ecc_calc = chip->ecc.calc_buf; 3221 uint8_t *ecc_code = chip->ecc.code_buf; 3222 unsigned int max_bitflips = 0; 3223 3224 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3225 if (ret) 3226 return ret; 3227 3228 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3229 chip->ecc.hwctl(chip, NAND_ECC_READ); 3230 3231 ret = nand_read_data_op(chip, p, eccsize, false, false); 3232 if (ret) 3233 return ret; 3234 3235 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3236 } 3237 3238 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, 3239 false); 3240 if (ret) 3241 return ret; 3242 3243 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3244 chip->ecc.total); 3245 if (ret) 3246 return ret; 3247 3248 eccsteps = chip->ecc.steps; 3249 p = buf; 3250 3251 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3252 int stat; 3253 3254 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 3255 if (stat == -EBADMSG && 3256 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3257 /* check for empty pages with bitflips */ 3258 stat = nand_check_erased_ecc_chunk(p, eccsize, 3259 &ecc_code[i], eccbytes, 3260 NULL, 0, 3261 chip->ecc.strength); 3262 } 3263 3264 if (stat < 0) { 3265 mtd->ecc_stats.failed++; 3266 } else { 3267 mtd->ecc_stats.corrected += stat; 3268 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3269 } 3270 } 3271 return max_bitflips; 3272 } 3273 3274 /** 3275 * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC 3276 * data read from OOB area 3277 * @chip: nand chip info structure 3278 * @buf: buffer to store read data 3279 * @oob_required: caller requires OOB data read to chip->oob_poi 3280 * @page: page number to read 3281 * 3282 * Hardware ECC for large page chips, which requires the ECC data to be 3283 * extracted from the OOB before the actual data is read. 3284 */ 3285 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, 3286 int oob_required, int page) 3287 { 3288 struct mtd_info *mtd = nand_to_mtd(chip); 3289 int i, eccsize = chip->ecc.size, ret; 3290 int eccbytes = chip->ecc.bytes; 3291 int eccsteps = chip->ecc.steps; 3292 uint8_t *p = buf; 3293 uint8_t *ecc_code = chip->ecc.code_buf; 3294 unsigned int max_bitflips = 0; 3295 3296 /* Read the OOB area first */ 3297 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3298 if (ret) 3299 return ret; 3300 3301 /* Move read cursor to start of page */ 3302 ret = nand_change_read_column_op(chip, 0, NULL, 0, false); 3303 if (ret) 3304 return ret; 3305 3306 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3307 chip->ecc.total); 3308 if (ret) 3309 return ret; 3310 3311 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3312 int stat; 3313 3314 chip->ecc.hwctl(chip, NAND_ECC_READ); 3315 3316 ret = nand_read_data_op(chip, p, eccsize, false, false); 3317 if (ret) 3318 return ret; 3319 3320 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); 3321 if (stat == -EBADMSG && 3322 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3323 /* check for empty pages with bitflips */ 3324 stat = nand_check_erased_ecc_chunk(p, eccsize, 3325 &ecc_code[i], 3326 eccbytes, NULL, 0, 3327 chip->ecc.strength); 3328 } 3329 3330 if (stat < 0) { 3331 mtd->ecc_stats.failed++; 3332 } else { 3333 mtd->ecc_stats.corrected += stat; 3334 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3335 } 3336 } 3337 return max_bitflips; 3338 } 3339 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first); 3340 3341 /** 3342 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 3343 * @chip: nand chip info structure 3344 * @buf: buffer to store read data 3345 * @oob_required: caller requires OOB data read to chip->oob_poi 3346 * @page: page number to read 3347 * 3348 * The hw generator calculates the error syndrome automatically. Therefore we 3349 * need a special oob layout and handling. 3350 */ 3351 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, 3352 int oob_required, int page) 3353 { 3354 struct mtd_info *mtd = nand_to_mtd(chip); 3355 int ret, i, eccsize = chip->ecc.size; 3356 int eccbytes = chip->ecc.bytes; 3357 int eccsteps = chip->ecc.steps; 3358 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; 3359 uint8_t *p = buf; 3360 uint8_t *oob = chip->oob_poi; 3361 unsigned int max_bitflips = 0; 3362 3363 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3364 if (ret) 3365 return ret; 3366 3367 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3368 int stat; 3369 3370 chip->ecc.hwctl(chip, NAND_ECC_READ); 3371 3372 ret = nand_read_data_op(chip, p, eccsize, false, false); 3373 if (ret) 3374 return ret; 3375 3376 if (chip->ecc.prepad) { 3377 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 3378 false, false); 3379 if (ret) 3380 return ret; 3381 3382 oob += chip->ecc.prepad; 3383 } 3384 3385 chip->ecc.hwctl(chip, NAND_ECC_READSYN); 3386 3387 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 3388 if (ret) 3389 return ret; 3390 3391 stat = chip->ecc.correct(chip, p, oob, NULL); 3392 3393 oob += eccbytes; 3394 3395 if (chip->ecc.postpad) { 3396 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 3397 false, false); 3398 if (ret) 3399 return ret; 3400 3401 oob += chip->ecc.postpad; 3402 } 3403 3404 if (stat == -EBADMSG && 3405 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3406 /* check for empty pages with bitflips */ 3407 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3408 oob - eccpadbytes, 3409 eccpadbytes, 3410 NULL, 0, 3411 chip->ecc.strength); 3412 } 3413 3414 if (stat < 0) { 3415 mtd->ecc_stats.failed++; 3416 } else { 3417 mtd->ecc_stats.corrected += stat; 3418 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3419 } 3420 } 3421 3422 /* Calculate remaining oob bytes */ 3423 i = mtd->oobsize - (oob - chip->oob_poi); 3424 if (i) { 3425 ret = nand_read_data_op(chip, oob, i, false, false); 3426 if (ret) 3427 return ret; 3428 } 3429 3430 return max_bitflips; 3431 } 3432 3433 /** 3434 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 3435 * @chip: NAND chip object 3436 * @oob: oob destination address 3437 * @ops: oob ops structure 3438 * @len: size of oob to transfer 3439 */ 3440 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 3441 struct mtd_oob_ops *ops, size_t len) 3442 { 3443 struct mtd_info *mtd = nand_to_mtd(chip); 3444 int ret; 3445 3446 switch (ops->mode) { 3447 3448 case MTD_OPS_PLACE_OOB: 3449 case MTD_OPS_RAW: 3450 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 3451 return oob + len; 3452 3453 case MTD_OPS_AUTO_OOB: 3454 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, 3455 ops->ooboffs, len); 3456 BUG_ON(ret); 3457 return oob + len; 3458 3459 default: 3460 BUG(); 3461 } 3462 return NULL; 3463 } 3464 3465 static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page, 3466 u32 readlen, int col) 3467 { 3468 struct mtd_info *mtd = nand_to_mtd(chip); 3469 unsigned int end_page, end_col; 3470 3471 chip->cont_read.ongoing = false; 3472 3473 if (!chip->controller->supported_op.cont_read) 3474 return; 3475 3476 end_page = DIV_ROUND_UP(col + readlen, mtd->writesize); 3477 end_col = (col + readlen) % mtd->writesize; 3478 3479 if (col) 3480 page++; 3481 3482 if (end_col && end_page) 3483 end_page--; 3484 3485 if (page + 1 > end_page) 3486 return; 3487 3488 chip->cont_read.first_page = page; 3489 chip->cont_read.last_page = end_page; 3490 chip->cont_read.ongoing = true; 3491 3492 rawnand_cap_cont_reads(chip); 3493 } 3494 3495 static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page) 3496 { 3497 if (!chip->cont_read.ongoing || page != chip->cont_read.first_page) 3498 return; 3499 3500 chip->cont_read.first_page++; 3501 if (chip->cont_read.first_page == chip->cont_read.pause_page) 3502 chip->cont_read.first_page++; 3503 if (chip->cont_read.first_page >= chip->cont_read.last_page) 3504 chip->cont_read.ongoing = false; 3505 } 3506 3507 /** 3508 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode 3509 * @chip: NAND chip object 3510 * @retry_mode: the retry mode to use 3511 * 3512 * Some vendors supply a special command to shift the Vt threshold, to be used 3513 * when there are too many bitflips in a page (i.e., ECC error). After setting 3514 * a new threshold, the host should retry reading the page. 3515 */ 3516 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) 3517 { 3518 pr_debug("setting READ RETRY mode %d\n", retry_mode); 3519 3520 if (retry_mode >= chip->read_retries) 3521 return -EINVAL; 3522 3523 if (!chip->ops.setup_read_retry) 3524 return -EOPNOTSUPP; 3525 3526 return chip->ops.setup_read_retry(chip, retry_mode); 3527 } 3528 3529 static void nand_wait_readrdy(struct nand_chip *chip) 3530 { 3531 const struct nand_interface_config *conf; 3532 3533 if (!(chip->options & NAND_NEED_READRDY)) 3534 return; 3535 3536 conf = nand_get_interface_config(chip); 3537 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0)); 3538 } 3539 3540 /** 3541 * nand_do_read_ops - [INTERN] Read data with ECC 3542 * @chip: NAND chip object 3543 * @from: offset to read from 3544 * @ops: oob ops structure 3545 * 3546 * Internal function. Called with chip held. 3547 */ 3548 static int nand_do_read_ops(struct nand_chip *chip, loff_t from, 3549 struct mtd_oob_ops *ops) 3550 { 3551 int chipnr, page, realpage, col, bytes, aligned, oob_required; 3552 struct mtd_info *mtd = nand_to_mtd(chip); 3553 int ret = 0; 3554 uint32_t readlen = ops->len; 3555 uint32_t oobreadlen = ops->ooblen; 3556 uint32_t max_oobsize = mtd_oobavail(mtd, ops); 3557 3558 uint8_t *bufpoi, *oob, *buf; 3559 int use_bounce_buf; 3560 unsigned int max_bitflips = 0; 3561 int retry_mode = 0; 3562 bool ecc_fail = false; 3563 3564 /* Check if the region is secured */ 3565 if (nand_region_is_secured(chip, from, readlen)) 3566 return -EIO; 3567 3568 chipnr = (int)(from >> chip->chip_shift); 3569 nand_select_target(chip, chipnr); 3570 3571 realpage = (int)(from >> chip->page_shift); 3572 page = realpage & chip->pagemask; 3573 3574 col = (int)(from & (mtd->writesize - 1)); 3575 3576 buf = ops->datbuf; 3577 oob = ops->oobbuf; 3578 oob_required = oob ? 1 : 0; 3579 3580 rawnand_enable_cont_reads(chip, page, readlen, col); 3581 3582 while (1) { 3583 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; 3584 3585 bytes = min(mtd->writesize - col, readlen); 3586 aligned = (bytes == mtd->writesize); 3587 3588 if (!aligned) 3589 use_bounce_buf = 1; 3590 else if (chip->options & NAND_USES_DMA) 3591 use_bounce_buf = !virt_addr_valid(buf) || 3592 !IS_ALIGNED((unsigned long)buf, 3593 chip->buf_align); 3594 else 3595 use_bounce_buf = 0; 3596 3597 /* Is the current page in the buffer? */ 3598 if (realpage != chip->pagecache.page || oob) { 3599 bufpoi = use_bounce_buf ? chip->data_buf : buf; 3600 3601 if (use_bounce_buf && aligned) 3602 pr_debug("%s: using read bounce buffer for buf@%p\n", 3603 __func__, buf); 3604 3605 read_retry: 3606 /* 3607 * Now read the page into the buffer. Absent an error, 3608 * the read methods return max bitflips per ecc step. 3609 */ 3610 if (unlikely(ops->mode == MTD_OPS_RAW)) 3611 ret = chip->ecc.read_page_raw(chip, bufpoi, 3612 oob_required, 3613 page); 3614 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && 3615 !oob) 3616 ret = chip->ecc.read_subpage(chip, col, bytes, 3617 bufpoi, page); 3618 else 3619 ret = chip->ecc.read_page(chip, bufpoi, 3620 oob_required, page); 3621 if (ret < 0) { 3622 if (use_bounce_buf) 3623 /* Invalidate page cache */ 3624 chip->pagecache.page = -1; 3625 break; 3626 } 3627 3628 /* 3629 * Copy back the data in the initial buffer when reading 3630 * partial pages or when a bounce buffer is required. 3631 */ 3632 if (use_bounce_buf) { 3633 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 3634 !(mtd->ecc_stats.failed - ecc_stats.failed) && 3635 (ops->mode != MTD_OPS_RAW)) { 3636 chip->pagecache.page = realpage; 3637 chip->pagecache.bitflips = ret; 3638 } else { 3639 /* Invalidate page cache */ 3640 chip->pagecache.page = -1; 3641 } 3642 memcpy(buf, bufpoi + col, bytes); 3643 } 3644 3645 if (unlikely(oob)) { 3646 int toread = min(oobreadlen, max_oobsize); 3647 3648 if (toread) { 3649 oob = nand_transfer_oob(chip, oob, ops, 3650 toread); 3651 oobreadlen -= toread; 3652 } 3653 } 3654 3655 nand_wait_readrdy(chip); 3656 3657 if (mtd->ecc_stats.failed - ecc_stats.failed) { 3658 if (retry_mode + 1 < chip->read_retries) { 3659 retry_mode++; 3660 ret = nand_setup_read_retry(chip, 3661 retry_mode); 3662 if (ret < 0) 3663 break; 3664 3665 /* Reset ecc_stats; retry */ 3666 mtd->ecc_stats = ecc_stats; 3667 goto read_retry; 3668 } else { 3669 /* No more retry modes; real failure */ 3670 ecc_fail = true; 3671 } 3672 } 3673 3674 buf += bytes; 3675 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3676 } else { 3677 memcpy(buf, chip->data_buf + col, bytes); 3678 buf += bytes; 3679 max_bitflips = max_t(unsigned int, max_bitflips, 3680 chip->pagecache.bitflips); 3681 3682 rawnand_cont_read_skip_first_page(chip, page); 3683 } 3684 3685 readlen -= bytes; 3686 3687 /* Reset to retry mode 0 */ 3688 if (retry_mode) { 3689 ret = nand_setup_read_retry(chip, 0); 3690 if (ret < 0) 3691 break; 3692 retry_mode = 0; 3693 } 3694 3695 if (!readlen) 3696 break; 3697 3698 /* For subsequent reads align to page boundary */ 3699 col = 0; 3700 /* Increment page address */ 3701 realpage++; 3702 3703 page = realpage & chip->pagemask; 3704 /* Check, if we cross a chip boundary */ 3705 if (!page) { 3706 chipnr++; 3707 nand_deselect_target(chip); 3708 nand_select_target(chip, chipnr); 3709 } 3710 } 3711 nand_deselect_target(chip); 3712 3713 ops->retlen = ops->len - (size_t) readlen; 3714 if (oob) 3715 ops->oobretlen = ops->ooblen - oobreadlen; 3716 3717 if (ret < 0) 3718 return ret; 3719 3720 if (ecc_fail) 3721 return -EBADMSG; 3722 3723 return max_bitflips; 3724 } 3725 3726 /** 3727 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 3728 * @chip: nand chip info structure 3729 * @page: page number to read 3730 */ 3731 int nand_read_oob_std(struct nand_chip *chip, int page) 3732 { 3733 struct mtd_info *mtd = nand_to_mtd(chip); 3734 3735 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3736 } 3737 EXPORT_SYMBOL(nand_read_oob_std); 3738 3739 /** 3740 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 3741 * with syndromes 3742 * @chip: nand chip info structure 3743 * @page: page number to read 3744 */ 3745 static int nand_read_oob_syndrome(struct nand_chip *chip, int page) 3746 { 3747 struct mtd_info *mtd = nand_to_mtd(chip); 3748 int length = mtd->oobsize; 3749 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3750 int eccsize = chip->ecc.size; 3751 uint8_t *bufpoi = chip->oob_poi; 3752 int i, toread, sndrnd = 0, pos, ret; 3753 3754 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0); 3755 if (ret) 3756 return ret; 3757 3758 for (i = 0; i < chip->ecc.steps; i++) { 3759 if (sndrnd) { 3760 int ret; 3761 3762 pos = eccsize + i * (eccsize + chunk); 3763 if (mtd->writesize > 512) 3764 ret = nand_change_read_column_op(chip, pos, 3765 NULL, 0, 3766 false); 3767 else 3768 ret = nand_read_page_op(chip, page, pos, NULL, 3769 0); 3770 3771 if (ret) 3772 return ret; 3773 } else 3774 sndrnd = 1; 3775 toread = min_t(int, length, chunk); 3776 3777 ret = nand_read_data_op(chip, bufpoi, toread, false, false); 3778 if (ret) 3779 return ret; 3780 3781 bufpoi += toread; 3782 length -= toread; 3783 } 3784 if (length > 0) { 3785 ret = nand_read_data_op(chip, bufpoi, length, false, false); 3786 if (ret) 3787 return ret; 3788 } 3789 3790 return 0; 3791 } 3792 3793 /** 3794 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 3795 * @chip: nand chip info structure 3796 * @page: page number to write 3797 */ 3798 int nand_write_oob_std(struct nand_chip *chip, int page) 3799 { 3800 struct mtd_info *mtd = nand_to_mtd(chip); 3801 3802 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi, 3803 mtd->oobsize); 3804 } 3805 EXPORT_SYMBOL(nand_write_oob_std); 3806 3807 /** 3808 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 3809 * with syndrome - only for large page flash 3810 * @chip: nand chip info structure 3811 * @page: page number to write 3812 */ 3813 static int nand_write_oob_syndrome(struct nand_chip *chip, int page) 3814 { 3815 struct mtd_info *mtd = nand_to_mtd(chip); 3816 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3817 int eccsize = chip->ecc.size, length = mtd->oobsize; 3818 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps; 3819 const uint8_t *bufpoi = chip->oob_poi; 3820 3821 /* 3822 * data-ecc-data-ecc ... ecc-oob 3823 * or 3824 * data-pad-ecc-pad-data-pad .... ecc-pad-oob 3825 */ 3826 if (!chip->ecc.prepad && !chip->ecc.postpad) { 3827 pos = steps * (eccsize + chunk); 3828 steps = 0; 3829 } else 3830 pos = eccsize; 3831 3832 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0); 3833 if (ret) 3834 return ret; 3835 3836 for (i = 0; i < steps; i++) { 3837 if (sndcmd) { 3838 if (mtd->writesize <= 512) { 3839 uint32_t fill = 0xFFFFFFFF; 3840 3841 len = eccsize; 3842 while (len > 0) { 3843 int num = min_t(int, len, 4); 3844 3845 ret = nand_write_data_op(chip, &fill, 3846 num, false); 3847 if (ret) 3848 return ret; 3849 3850 len -= num; 3851 } 3852 } else { 3853 pos = eccsize + i * (eccsize + chunk); 3854 ret = nand_change_write_column_op(chip, pos, 3855 NULL, 0, 3856 false); 3857 if (ret) 3858 return ret; 3859 } 3860 } else 3861 sndcmd = 1; 3862 len = min_t(int, length, chunk); 3863 3864 ret = nand_write_data_op(chip, bufpoi, len, false); 3865 if (ret) 3866 return ret; 3867 3868 bufpoi += len; 3869 length -= len; 3870 } 3871 if (length > 0) { 3872 ret = nand_write_data_op(chip, bufpoi, length, false); 3873 if (ret) 3874 return ret; 3875 } 3876 3877 return nand_prog_page_end_op(chip); 3878 } 3879 3880 /** 3881 * nand_do_read_oob - [INTERN] NAND read out-of-band 3882 * @chip: NAND chip object 3883 * @from: offset to read from 3884 * @ops: oob operations description structure 3885 * 3886 * NAND read out-of-band data from the spare area. 3887 */ 3888 static int nand_do_read_oob(struct nand_chip *chip, loff_t from, 3889 struct mtd_oob_ops *ops) 3890 { 3891 struct mtd_info *mtd = nand_to_mtd(chip); 3892 unsigned int max_bitflips = 0; 3893 int page, realpage, chipnr; 3894 struct mtd_ecc_stats stats; 3895 int readlen = ops->ooblen; 3896 int len; 3897 uint8_t *buf = ops->oobbuf; 3898 int ret = 0; 3899 3900 pr_debug("%s: from = 0x%08Lx, len = %i\n", 3901 __func__, (unsigned long long)from, readlen); 3902 3903 /* Check if the region is secured */ 3904 if (nand_region_is_secured(chip, from, readlen)) 3905 return -EIO; 3906 3907 stats = mtd->ecc_stats; 3908 3909 len = mtd_oobavail(mtd, ops); 3910 3911 chipnr = (int)(from >> chip->chip_shift); 3912 nand_select_target(chip, chipnr); 3913 3914 /* Shift to get page */ 3915 realpage = (int)(from >> chip->page_shift); 3916 page = realpage & chip->pagemask; 3917 3918 while (1) { 3919 if (ops->mode == MTD_OPS_RAW) 3920 ret = chip->ecc.read_oob_raw(chip, page); 3921 else 3922 ret = chip->ecc.read_oob(chip, page); 3923 3924 if (ret < 0) 3925 break; 3926 3927 len = min(len, readlen); 3928 buf = nand_transfer_oob(chip, buf, ops, len); 3929 3930 nand_wait_readrdy(chip); 3931 3932 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3933 3934 readlen -= len; 3935 if (!readlen) 3936 break; 3937 3938 /* Increment page address */ 3939 realpage++; 3940 3941 page = realpage & chip->pagemask; 3942 /* Check, if we cross a chip boundary */ 3943 if (!page) { 3944 chipnr++; 3945 nand_deselect_target(chip); 3946 nand_select_target(chip, chipnr); 3947 } 3948 } 3949 nand_deselect_target(chip); 3950 3951 ops->oobretlen = ops->ooblen - readlen; 3952 3953 if (ret < 0) 3954 return ret; 3955 3956 if (mtd->ecc_stats.failed - stats.failed) 3957 return -EBADMSG; 3958 3959 return max_bitflips; 3960 } 3961 3962 /** 3963 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 3964 * @mtd: MTD device structure 3965 * @from: offset to read from 3966 * @ops: oob operation description structure 3967 * 3968 * NAND read data and/or out-of-band data. 3969 */ 3970 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 3971 struct mtd_oob_ops *ops) 3972 { 3973 struct nand_chip *chip = mtd_to_nand(mtd); 3974 struct mtd_ecc_stats old_stats; 3975 int ret; 3976 3977 ops->retlen = 0; 3978 3979 if (ops->mode != MTD_OPS_PLACE_OOB && 3980 ops->mode != MTD_OPS_AUTO_OOB && 3981 ops->mode != MTD_OPS_RAW) 3982 return -ENOTSUPP; 3983 3984 nand_get_device(chip); 3985 3986 old_stats = mtd->ecc_stats; 3987 3988 if (!ops->datbuf) 3989 ret = nand_do_read_oob(chip, from, ops); 3990 else 3991 ret = nand_do_read_ops(chip, from, ops); 3992 3993 if (ops->stats) { 3994 ops->stats->uncorrectable_errors += 3995 mtd->ecc_stats.failed - old_stats.failed; 3996 ops->stats->corrected_bitflips += 3997 mtd->ecc_stats.corrected - old_stats.corrected; 3998 } 3999 4000 nand_release_device(chip); 4001 return ret; 4002 } 4003 4004 /** 4005 * nand_write_page_raw_notsupp - dummy raw page write function 4006 * @chip: nand chip info structure 4007 * @buf: data buffer 4008 * @oob_required: must write chip->oob_poi to OOB 4009 * @page: page number to write 4010 * 4011 * Returns -ENOTSUPP unconditionally. 4012 */ 4013 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf, 4014 int oob_required, int page) 4015 { 4016 return -ENOTSUPP; 4017 } 4018 4019 /** 4020 * nand_write_page_raw - [INTERN] raw page write function 4021 * @chip: nand chip info structure 4022 * @buf: data buffer 4023 * @oob_required: must write chip->oob_poi to OOB 4024 * @page: page number to write 4025 * 4026 * Not for syndrome calculating ECC controllers, which use a special oob layout. 4027 */ 4028 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 4029 int oob_required, int page) 4030 { 4031 struct mtd_info *mtd = nand_to_mtd(chip); 4032 int ret; 4033 4034 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 4035 if (ret) 4036 return ret; 4037 4038 if (oob_required) { 4039 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, 4040 false); 4041 if (ret) 4042 return ret; 4043 } 4044 4045 return nand_prog_page_end_op(chip); 4046 } 4047 EXPORT_SYMBOL(nand_write_page_raw); 4048 4049 /** 4050 * nand_monolithic_write_page_raw - Monolithic page write in raw mode 4051 * @chip: NAND chip info structure 4052 * @buf: data buffer to write 4053 * @oob_required: must write chip->oob_poi to OOB 4054 * @page: page number to write 4055 * 4056 * This is a raw page write, ie. without any error detection/correction. 4057 * Monolithic means we are requesting all the relevant data (main plus 4058 * eventually OOB) to be sent over the bus and effectively programmed 4059 * into the NAND chip arrays in a single operation. This is an 4060 * alternative to nand_write_page_raw(), which first sends the main 4061 * data, then eventually send the OOB data by latching more data 4062 * cycles on the NAND bus, and finally sends the program command to 4063 * synchronyze the NAND chip cache. 4064 */ 4065 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, 4066 int oob_required, int page) 4067 { 4068 struct mtd_info *mtd = nand_to_mtd(chip); 4069 unsigned int size = mtd->writesize; 4070 u8 *write_buf = (u8 *)buf; 4071 4072 if (oob_required) { 4073 size += mtd->oobsize; 4074 4075 if (buf != chip->data_buf) { 4076 write_buf = nand_get_data_buf(chip); 4077 memcpy(write_buf, buf, mtd->writesize); 4078 } 4079 } 4080 4081 return nand_prog_page_op(chip, page, 0, write_buf, size); 4082 } 4083 EXPORT_SYMBOL(nand_monolithic_write_page_raw); 4084 4085 /** 4086 * nand_write_page_raw_syndrome - [INTERN] raw page write function 4087 * @chip: nand chip info structure 4088 * @buf: data buffer 4089 * @oob_required: must write chip->oob_poi to OOB 4090 * @page: page number to write 4091 * 4092 * We need a special oob layout and handling even when ECC isn't checked. 4093 */ 4094 static int nand_write_page_raw_syndrome(struct nand_chip *chip, 4095 const uint8_t *buf, int oob_required, 4096 int page) 4097 { 4098 struct mtd_info *mtd = nand_to_mtd(chip); 4099 int eccsize = chip->ecc.size; 4100 int eccbytes = chip->ecc.bytes; 4101 uint8_t *oob = chip->oob_poi; 4102 int steps, size, ret; 4103 4104 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4105 if (ret) 4106 return ret; 4107 4108 for (steps = chip->ecc.steps; steps > 0; steps--) { 4109 ret = nand_write_data_op(chip, buf, eccsize, false); 4110 if (ret) 4111 return ret; 4112 4113 buf += eccsize; 4114 4115 if (chip->ecc.prepad) { 4116 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 4117 false); 4118 if (ret) 4119 return ret; 4120 4121 oob += chip->ecc.prepad; 4122 } 4123 4124 ret = nand_write_data_op(chip, oob, eccbytes, false); 4125 if (ret) 4126 return ret; 4127 4128 oob += eccbytes; 4129 4130 if (chip->ecc.postpad) { 4131 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 4132 false); 4133 if (ret) 4134 return ret; 4135 4136 oob += chip->ecc.postpad; 4137 } 4138 } 4139 4140 size = mtd->oobsize - (oob - chip->oob_poi); 4141 if (size) { 4142 ret = nand_write_data_op(chip, oob, size, false); 4143 if (ret) 4144 return ret; 4145 } 4146 4147 return nand_prog_page_end_op(chip); 4148 } 4149 /** 4150 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 4151 * @chip: nand chip info structure 4152 * @buf: data buffer 4153 * @oob_required: must write chip->oob_poi to OOB 4154 * @page: page number to write 4155 */ 4156 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf, 4157 int oob_required, int page) 4158 { 4159 struct mtd_info *mtd = nand_to_mtd(chip); 4160 int i, eccsize = chip->ecc.size, ret; 4161 int eccbytes = chip->ecc.bytes; 4162 int eccsteps = chip->ecc.steps; 4163 uint8_t *ecc_calc = chip->ecc.calc_buf; 4164 const uint8_t *p = buf; 4165 4166 /* Software ECC calculation */ 4167 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 4168 chip->ecc.calculate(chip, p, &ecc_calc[i]); 4169 4170 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4171 chip->ecc.total); 4172 if (ret) 4173 return ret; 4174 4175 return chip->ecc.write_page_raw(chip, buf, 1, page); 4176 } 4177 4178 /** 4179 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 4180 * @chip: nand chip info structure 4181 * @buf: data buffer 4182 * @oob_required: must write chip->oob_poi to OOB 4183 * @page: page number to write 4184 */ 4185 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 4186 int oob_required, int page) 4187 { 4188 struct mtd_info *mtd = nand_to_mtd(chip); 4189 int i, eccsize = chip->ecc.size, ret; 4190 int eccbytes = chip->ecc.bytes; 4191 int eccsteps = chip->ecc.steps; 4192 uint8_t *ecc_calc = chip->ecc.calc_buf; 4193 const uint8_t *p = buf; 4194 4195 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4196 if (ret) 4197 return ret; 4198 4199 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4200 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4201 4202 ret = nand_write_data_op(chip, p, eccsize, false); 4203 if (ret) 4204 return ret; 4205 4206 chip->ecc.calculate(chip, p, &ecc_calc[i]); 4207 } 4208 4209 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4210 chip->ecc.total); 4211 if (ret) 4212 return ret; 4213 4214 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 4215 if (ret) 4216 return ret; 4217 4218 return nand_prog_page_end_op(chip); 4219 } 4220 4221 4222 /** 4223 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write 4224 * @chip: nand chip info structure 4225 * @offset: column address of subpage within the page 4226 * @data_len: data length 4227 * @buf: data buffer 4228 * @oob_required: must write chip->oob_poi to OOB 4229 * @page: page number to write 4230 */ 4231 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset, 4232 uint32_t data_len, const uint8_t *buf, 4233 int oob_required, int page) 4234 { 4235 struct mtd_info *mtd = nand_to_mtd(chip); 4236 uint8_t *oob_buf = chip->oob_poi; 4237 uint8_t *ecc_calc = chip->ecc.calc_buf; 4238 int ecc_size = chip->ecc.size; 4239 int ecc_bytes = chip->ecc.bytes; 4240 int ecc_steps = chip->ecc.steps; 4241 uint32_t start_step = offset / ecc_size; 4242 uint32_t end_step = (offset + data_len - 1) / ecc_size; 4243 int oob_bytes = mtd->oobsize / ecc_steps; 4244 int step, ret; 4245 4246 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4247 if (ret) 4248 return ret; 4249 4250 for (step = 0; step < ecc_steps; step++) { 4251 /* configure controller for WRITE access */ 4252 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4253 4254 /* write data (untouched subpages already masked by 0xFF) */ 4255 ret = nand_write_data_op(chip, buf, ecc_size, false); 4256 if (ret) 4257 return ret; 4258 4259 /* mask ECC of un-touched subpages by padding 0xFF */ 4260 if ((step < start_step) || (step > end_step)) 4261 memset(ecc_calc, 0xff, ecc_bytes); 4262 else 4263 chip->ecc.calculate(chip, buf, ecc_calc); 4264 4265 /* mask OOB of un-touched subpages by padding 0xFF */ 4266 /* if oob_required, preserve OOB metadata of written subpage */ 4267 if (!oob_required || (step < start_step) || (step > end_step)) 4268 memset(oob_buf, 0xff, oob_bytes); 4269 4270 buf += ecc_size; 4271 ecc_calc += ecc_bytes; 4272 oob_buf += oob_bytes; 4273 } 4274 4275 /* copy calculated ECC for whole page to chip->buffer->oob */ 4276 /* this include masked-value(0xFF) for unwritten subpages */ 4277 ecc_calc = chip->ecc.calc_buf; 4278 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 4279 chip->ecc.total); 4280 if (ret) 4281 return ret; 4282 4283 /* write OOB buffer to NAND device */ 4284 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 4285 if (ret) 4286 return ret; 4287 4288 return nand_prog_page_end_op(chip); 4289 } 4290 4291 4292 /** 4293 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 4294 * @chip: nand chip info structure 4295 * @buf: data buffer 4296 * @oob_required: must write chip->oob_poi to OOB 4297 * @page: page number to write 4298 * 4299 * The hw generator calculates the error syndrome automatically. Therefore we 4300 * need a special oob layout and handling. 4301 */ 4302 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf, 4303 int oob_required, int page) 4304 { 4305 struct mtd_info *mtd = nand_to_mtd(chip); 4306 int i, eccsize = chip->ecc.size; 4307 int eccbytes = chip->ecc.bytes; 4308 int eccsteps = chip->ecc.steps; 4309 const uint8_t *p = buf; 4310 uint8_t *oob = chip->oob_poi; 4311 int ret; 4312 4313 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4314 if (ret) 4315 return ret; 4316 4317 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4318 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4319 4320 ret = nand_write_data_op(chip, p, eccsize, false); 4321 if (ret) 4322 return ret; 4323 4324 if (chip->ecc.prepad) { 4325 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 4326 false); 4327 if (ret) 4328 return ret; 4329 4330 oob += chip->ecc.prepad; 4331 } 4332 4333 chip->ecc.calculate(chip, p, oob); 4334 4335 ret = nand_write_data_op(chip, oob, eccbytes, false); 4336 if (ret) 4337 return ret; 4338 4339 oob += eccbytes; 4340 4341 if (chip->ecc.postpad) { 4342 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 4343 false); 4344 if (ret) 4345 return ret; 4346 4347 oob += chip->ecc.postpad; 4348 } 4349 } 4350 4351 /* Calculate remaining oob bytes */ 4352 i = mtd->oobsize - (oob - chip->oob_poi); 4353 if (i) { 4354 ret = nand_write_data_op(chip, oob, i, false); 4355 if (ret) 4356 return ret; 4357 } 4358 4359 return nand_prog_page_end_op(chip); 4360 } 4361 4362 /** 4363 * nand_write_page - write one page 4364 * @chip: NAND chip descriptor 4365 * @offset: address offset within the page 4366 * @data_len: length of actual data to be written 4367 * @buf: the data to write 4368 * @oob_required: must write chip->oob_poi to OOB 4369 * @page: page number to write 4370 * @raw: use _raw version of write_page 4371 */ 4372 static int nand_write_page(struct nand_chip *chip, uint32_t offset, 4373 int data_len, const uint8_t *buf, int oob_required, 4374 int page, int raw) 4375 { 4376 struct mtd_info *mtd = nand_to_mtd(chip); 4377 int status, subpage; 4378 4379 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 4380 chip->ecc.write_subpage) 4381 subpage = offset || (data_len < mtd->writesize); 4382 else 4383 subpage = 0; 4384 4385 if (unlikely(raw)) 4386 status = chip->ecc.write_page_raw(chip, buf, oob_required, 4387 page); 4388 else if (subpage) 4389 status = chip->ecc.write_subpage(chip, offset, data_len, buf, 4390 oob_required, page); 4391 else 4392 status = chip->ecc.write_page(chip, buf, oob_required, page); 4393 4394 if (status < 0) 4395 return status; 4396 4397 return 0; 4398 } 4399 4400 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 4401 4402 /** 4403 * nand_do_write_ops - [INTERN] NAND write with ECC 4404 * @chip: NAND chip object 4405 * @to: offset to write to 4406 * @ops: oob operations description structure 4407 * 4408 * NAND write with ECC. 4409 */ 4410 static int nand_do_write_ops(struct nand_chip *chip, loff_t to, 4411 struct mtd_oob_ops *ops) 4412 { 4413 struct mtd_info *mtd = nand_to_mtd(chip); 4414 int chipnr, realpage, page, column; 4415 uint32_t writelen = ops->len; 4416 4417 uint32_t oobwritelen = ops->ooblen; 4418 uint32_t oobmaxlen = mtd_oobavail(mtd, ops); 4419 4420 uint8_t *oob = ops->oobbuf; 4421 uint8_t *buf = ops->datbuf; 4422 int ret; 4423 int oob_required = oob ? 1 : 0; 4424 4425 ops->retlen = 0; 4426 if (!writelen) 4427 return 0; 4428 4429 /* Reject writes, which are not page aligned */ 4430 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 4431 pr_notice("%s: attempt to write non page aligned data\n", 4432 __func__); 4433 return -EINVAL; 4434 } 4435 4436 /* Check if the region is secured */ 4437 if (nand_region_is_secured(chip, to, writelen)) 4438 return -EIO; 4439 4440 column = to & (mtd->writesize - 1); 4441 4442 chipnr = (int)(to >> chip->chip_shift); 4443 nand_select_target(chip, chipnr); 4444 4445 /* Check, if it is write protected */ 4446 if (nand_check_wp(chip)) { 4447 ret = -EIO; 4448 goto err_out; 4449 } 4450 4451 realpage = (int)(to >> chip->page_shift); 4452 page = realpage & chip->pagemask; 4453 4454 /* Invalidate the page cache, when we write to the cached page */ 4455 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) && 4456 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len)) 4457 chip->pagecache.page = -1; 4458 4459 /* Don't allow multipage oob writes with offset */ 4460 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 4461 ret = -EINVAL; 4462 goto err_out; 4463 } 4464 4465 while (1) { 4466 int bytes = mtd->writesize; 4467 uint8_t *wbuf = buf; 4468 int use_bounce_buf; 4469 int part_pagewr = (column || writelen < mtd->writesize); 4470 4471 if (part_pagewr) 4472 use_bounce_buf = 1; 4473 else if (chip->options & NAND_USES_DMA) 4474 use_bounce_buf = !virt_addr_valid(buf) || 4475 !IS_ALIGNED((unsigned long)buf, 4476 chip->buf_align); 4477 else 4478 use_bounce_buf = 0; 4479 4480 /* 4481 * Copy the data from the initial buffer when doing partial page 4482 * writes or when a bounce buffer is required. 4483 */ 4484 if (use_bounce_buf) { 4485 pr_debug("%s: using write bounce buffer for buf@%p\n", 4486 __func__, buf); 4487 if (part_pagewr) 4488 bytes = min_t(int, bytes - column, writelen); 4489 wbuf = nand_get_data_buf(chip); 4490 memset(wbuf, 0xff, mtd->writesize); 4491 memcpy(&wbuf[column], buf, bytes); 4492 } 4493 4494 if (unlikely(oob)) { 4495 size_t len = min(oobwritelen, oobmaxlen); 4496 oob = nand_fill_oob(chip, oob, len, ops); 4497 oobwritelen -= len; 4498 } else { 4499 /* We still need to erase leftover OOB data */ 4500 memset(chip->oob_poi, 0xff, mtd->oobsize); 4501 } 4502 4503 ret = nand_write_page(chip, column, bytes, wbuf, 4504 oob_required, page, 4505 (ops->mode == MTD_OPS_RAW)); 4506 if (ret) 4507 break; 4508 4509 writelen -= bytes; 4510 if (!writelen) 4511 break; 4512 4513 column = 0; 4514 buf += bytes; 4515 realpage++; 4516 4517 page = realpage & chip->pagemask; 4518 /* Check, if we cross a chip boundary */ 4519 if (!page) { 4520 chipnr++; 4521 nand_deselect_target(chip); 4522 nand_select_target(chip, chipnr); 4523 } 4524 } 4525 4526 ops->retlen = ops->len - writelen; 4527 if (unlikely(oob)) 4528 ops->oobretlen = ops->ooblen; 4529 4530 err_out: 4531 nand_deselect_target(chip); 4532 return ret; 4533 } 4534 4535 /** 4536 * panic_nand_write - [MTD Interface] NAND write with ECC 4537 * @mtd: MTD device structure 4538 * @to: offset to write to 4539 * @len: number of bytes to write 4540 * @retlen: pointer to variable to store the number of written bytes 4541 * @buf: the data to write 4542 * 4543 * NAND write with ECC. Used when performing writes in interrupt context, this 4544 * may for example be called by mtdoops when writing an oops while in panic. 4545 */ 4546 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, 4547 size_t *retlen, const uint8_t *buf) 4548 { 4549 struct nand_chip *chip = mtd_to_nand(mtd); 4550 int chipnr = (int)(to >> chip->chip_shift); 4551 struct mtd_oob_ops ops; 4552 int ret; 4553 4554 nand_select_target(chip, chipnr); 4555 4556 /* Wait for the device to get ready */ 4557 panic_nand_wait(chip, 400); 4558 4559 memset(&ops, 0, sizeof(ops)); 4560 ops.len = len; 4561 ops.datbuf = (uint8_t *)buf; 4562 ops.mode = MTD_OPS_PLACE_OOB; 4563 4564 ret = nand_do_write_ops(chip, to, &ops); 4565 4566 *retlen = ops.retlen; 4567 return ret; 4568 } 4569 4570 /** 4571 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 4572 * @mtd: MTD device structure 4573 * @to: offset to write to 4574 * @ops: oob operation description structure 4575 */ 4576 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 4577 struct mtd_oob_ops *ops) 4578 { 4579 struct nand_chip *chip = mtd_to_nand(mtd); 4580 int ret = 0; 4581 4582 ops->retlen = 0; 4583 4584 nand_get_device(chip); 4585 4586 switch (ops->mode) { 4587 case MTD_OPS_PLACE_OOB: 4588 case MTD_OPS_AUTO_OOB: 4589 case MTD_OPS_RAW: 4590 break; 4591 4592 default: 4593 goto out; 4594 } 4595 4596 if (!ops->datbuf) 4597 ret = nand_do_write_oob(chip, to, ops); 4598 else 4599 ret = nand_do_write_ops(chip, to, ops); 4600 4601 out: 4602 nand_release_device(chip); 4603 return ret; 4604 } 4605 4606 /** 4607 * nand_erase - [MTD Interface] erase block(s) 4608 * @mtd: MTD device structure 4609 * @instr: erase instruction 4610 * 4611 * Erase one ore more blocks. 4612 */ 4613 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 4614 { 4615 return nand_erase_nand(mtd_to_nand(mtd), instr, 0); 4616 } 4617 4618 /** 4619 * nand_erase_nand - [INTERN] erase block(s) 4620 * @chip: NAND chip object 4621 * @instr: erase instruction 4622 * @allowbbt: allow erasing the bbt area 4623 * 4624 * Erase one ore more blocks. 4625 */ 4626 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, 4627 int allowbbt) 4628 { 4629 int page, pages_per_block, ret, chipnr; 4630 loff_t len; 4631 4632 pr_debug("%s: start = 0x%012llx, len = %llu\n", 4633 __func__, (unsigned long long)instr->addr, 4634 (unsigned long long)instr->len); 4635 4636 if (check_offs_len(chip, instr->addr, instr->len)) 4637 return -EINVAL; 4638 4639 /* Check if the region is secured */ 4640 if (nand_region_is_secured(chip, instr->addr, instr->len)) 4641 return -EIO; 4642 4643 /* Grab the lock and see if the device is available */ 4644 nand_get_device(chip); 4645 4646 /* Shift to get first page */ 4647 page = (int)(instr->addr >> chip->page_shift); 4648 chipnr = (int)(instr->addr >> chip->chip_shift); 4649 4650 /* Calculate pages in each block */ 4651 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); 4652 4653 /* Select the NAND device */ 4654 nand_select_target(chip, chipnr); 4655 4656 /* Check, if it is write protected */ 4657 if (nand_check_wp(chip)) { 4658 pr_debug("%s: device is write protected!\n", 4659 __func__); 4660 ret = -EIO; 4661 goto erase_exit; 4662 } 4663 4664 /* Loop through the pages */ 4665 len = instr->len; 4666 4667 while (len) { 4668 loff_t ofs = (loff_t)page << chip->page_shift; 4669 4670 /* Check if we have a bad block, we do not erase bad blocks! */ 4671 if (nand_block_checkbad(chip, ((loff_t) page) << 4672 chip->page_shift, allowbbt)) { 4673 pr_warn("%s: attempt to erase a bad block at 0x%08llx\n", 4674 __func__, (unsigned long long)ofs); 4675 ret = -EIO; 4676 goto erase_exit; 4677 } 4678 4679 /* 4680 * Invalidate the page cache, if we erase the block which 4681 * contains the current cached page. 4682 */ 4683 if (page <= chip->pagecache.page && chip->pagecache.page < 4684 (page + pages_per_block)) 4685 chip->pagecache.page = -1; 4686 4687 ret = nand_erase_op(chip, (page & chip->pagemask) >> 4688 (chip->phys_erase_shift - chip->page_shift)); 4689 if (ret) { 4690 pr_debug("%s: failed erase, page 0x%08x\n", 4691 __func__, page); 4692 instr->fail_addr = ofs; 4693 goto erase_exit; 4694 } 4695 4696 /* Increment page address and decrement length */ 4697 len -= (1ULL << chip->phys_erase_shift); 4698 page += pages_per_block; 4699 4700 /* Check, if we cross a chip boundary */ 4701 if (len && !(page & chip->pagemask)) { 4702 chipnr++; 4703 nand_deselect_target(chip); 4704 nand_select_target(chip, chipnr); 4705 } 4706 } 4707 4708 ret = 0; 4709 erase_exit: 4710 4711 /* Deselect and wake up anyone waiting on the device */ 4712 nand_deselect_target(chip); 4713 nand_release_device(chip); 4714 4715 /* Return more or less happy */ 4716 return ret; 4717 } 4718 4719 /** 4720 * nand_sync - [MTD Interface] sync 4721 * @mtd: MTD device structure 4722 * 4723 * Sync is actually a wait for chip ready function. 4724 */ 4725 static void nand_sync(struct mtd_info *mtd) 4726 { 4727 struct nand_chip *chip = mtd_to_nand(mtd); 4728 4729 pr_debug("%s: called\n", __func__); 4730 4731 /* Grab the lock and see if the device is available */ 4732 nand_get_device(chip); 4733 /* Release it and go back */ 4734 nand_release_device(chip); 4735 } 4736 4737 /** 4738 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 4739 * @mtd: MTD device structure 4740 * @offs: offset relative to mtd start 4741 */ 4742 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 4743 { 4744 struct nand_chip *chip = mtd_to_nand(mtd); 4745 int chipnr = (int)(offs >> chip->chip_shift); 4746 int ret; 4747 4748 /* Select the NAND device */ 4749 nand_get_device(chip); 4750 4751 nand_select_target(chip, chipnr); 4752 4753 ret = nand_block_checkbad(chip, offs, 0); 4754 4755 nand_deselect_target(chip); 4756 nand_release_device(chip); 4757 4758 return ret; 4759 } 4760 4761 /** 4762 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 4763 * @mtd: MTD device structure 4764 * @ofs: offset relative to mtd start 4765 */ 4766 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 4767 { 4768 int ret; 4769 4770 ret = nand_block_isbad(mtd, ofs); 4771 if (ret) { 4772 /* If it was bad already, return success and do nothing */ 4773 if (ret > 0) 4774 return 0; 4775 return ret; 4776 } 4777 4778 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs); 4779 } 4780 4781 /** 4782 * nand_suspend - [MTD Interface] Suspend the NAND flash 4783 * @mtd: MTD device structure 4784 * 4785 * Returns 0 for success or negative error code otherwise. 4786 */ 4787 static int nand_suspend(struct mtd_info *mtd) 4788 { 4789 struct nand_chip *chip = mtd_to_nand(mtd); 4790 int ret = 0; 4791 4792 mutex_lock(&chip->lock); 4793 if (chip->ops.suspend) 4794 ret = chip->ops.suspend(chip); 4795 if (!ret) 4796 chip->suspended = 1; 4797 mutex_unlock(&chip->lock); 4798 4799 return ret; 4800 } 4801 4802 /** 4803 * nand_resume - [MTD Interface] Resume the NAND flash 4804 * @mtd: MTD device structure 4805 */ 4806 static void nand_resume(struct mtd_info *mtd) 4807 { 4808 struct nand_chip *chip = mtd_to_nand(mtd); 4809 4810 mutex_lock(&chip->lock); 4811 if (chip->suspended) { 4812 if (chip->ops.resume) 4813 chip->ops.resume(chip); 4814 chip->suspended = 0; 4815 } else { 4816 pr_err("%s called for a chip which is not in suspended state\n", 4817 __func__); 4818 } 4819 mutex_unlock(&chip->lock); 4820 4821 wake_up_all(&chip->resume_wq); 4822 } 4823 4824 /** 4825 * nand_shutdown - [MTD Interface] Finish the current NAND operation and 4826 * prevent further operations 4827 * @mtd: MTD device structure 4828 */ 4829 static void nand_shutdown(struct mtd_info *mtd) 4830 { 4831 nand_suspend(mtd); 4832 } 4833 4834 /** 4835 * nand_lock - [MTD Interface] Lock the NAND flash 4836 * @mtd: MTD device structure 4837 * @ofs: offset byte address 4838 * @len: number of bytes to lock (must be a multiple of block/page size) 4839 */ 4840 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4841 { 4842 struct nand_chip *chip = mtd_to_nand(mtd); 4843 4844 if (!chip->ops.lock_area) 4845 return -ENOTSUPP; 4846 4847 return chip->ops.lock_area(chip, ofs, len); 4848 } 4849 4850 /** 4851 * nand_unlock - [MTD Interface] Unlock the NAND flash 4852 * @mtd: MTD device structure 4853 * @ofs: offset byte address 4854 * @len: number of bytes to unlock (must be a multiple of block/page size) 4855 */ 4856 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4857 { 4858 struct nand_chip *chip = mtd_to_nand(mtd); 4859 4860 if (!chip->ops.unlock_area) 4861 return -ENOTSUPP; 4862 4863 return chip->ops.unlock_area(chip, ofs, len); 4864 } 4865 4866 /* Set default functions */ 4867 static void nand_set_defaults(struct nand_chip *chip) 4868 { 4869 /* If no controller is provided, use the dummy, legacy one. */ 4870 if (!chip->controller) { 4871 chip->controller = &chip->legacy.dummy_controller; 4872 nand_controller_init(chip->controller); 4873 } 4874 4875 nand_legacy_set_defaults(chip); 4876 4877 if (!chip->buf_align) 4878 chip->buf_align = 1; 4879 } 4880 4881 /* Sanitize ONFI strings so we can safely print them */ 4882 void sanitize_string(uint8_t *s, size_t len) 4883 { 4884 ssize_t i; 4885 4886 /* Null terminate */ 4887 s[len - 1] = 0; 4888 4889 /* Remove non printable chars */ 4890 for (i = 0; i < len - 1; i++) { 4891 if (s[i] < ' ' || s[i] > 127) 4892 s[i] = '?'; 4893 } 4894 4895 /* Remove trailing spaces */ 4896 strim(s); 4897 } 4898 4899 /* 4900 * nand_id_has_period - Check if an ID string has a given wraparound period 4901 * @id_data: the ID string 4902 * @arrlen: the length of the @id_data array 4903 * @period: the period of repitition 4904 * 4905 * Check if an ID string is repeated within a given sequence of bytes at 4906 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 4907 * period of 3). This is a helper function for nand_id_len(). Returns non-zero 4908 * if the repetition has a period of @period; otherwise, returns zero. 4909 */ 4910 static int nand_id_has_period(u8 *id_data, int arrlen, int period) 4911 { 4912 int i, j; 4913 for (i = 0; i < period; i++) 4914 for (j = i + period; j < arrlen; j += period) 4915 if (id_data[i] != id_data[j]) 4916 return 0; 4917 return 1; 4918 } 4919 4920 /* 4921 * nand_id_len - Get the length of an ID string returned by CMD_READID 4922 * @id_data: the ID string 4923 * @arrlen: the length of the @id_data array 4924 4925 * Returns the length of the ID string, according to known wraparound/trailing 4926 * zero patterns. If no pattern exists, returns the length of the array. 4927 */ 4928 static int nand_id_len(u8 *id_data, int arrlen) 4929 { 4930 int last_nonzero, period; 4931 4932 /* Find last non-zero byte */ 4933 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) 4934 if (id_data[last_nonzero]) 4935 break; 4936 4937 /* All zeros */ 4938 if (last_nonzero < 0) 4939 return 0; 4940 4941 /* Calculate wraparound period */ 4942 for (period = 1; period < arrlen; period++) 4943 if (nand_id_has_period(id_data, arrlen, period)) 4944 break; 4945 4946 /* There's a repeated pattern */ 4947 if (period < arrlen) 4948 return period; 4949 4950 /* There are trailing zeros */ 4951 if (last_nonzero < arrlen - 1) 4952 return last_nonzero + 1; 4953 4954 /* No pattern detected */ 4955 return arrlen; 4956 } 4957 4958 /* Extract the bits of per cell from the 3rd byte of the extended ID */ 4959 static int nand_get_bits_per_cell(u8 cellinfo) 4960 { 4961 int bits; 4962 4963 bits = cellinfo & NAND_CI_CELLTYPE_MSK; 4964 bits >>= NAND_CI_CELLTYPE_SHIFT; 4965 return bits + 1; 4966 } 4967 4968 /* 4969 * Many new NAND share similar device ID codes, which represent the size of the 4970 * chip. The rest of the parameters must be decoded according to generic or 4971 * manufacturer-specific "extended ID" decoding patterns. 4972 */ 4973 void nand_decode_ext_id(struct nand_chip *chip) 4974 { 4975 struct nand_memory_organization *memorg; 4976 struct mtd_info *mtd = nand_to_mtd(chip); 4977 int extid; 4978 u8 *id_data = chip->id.data; 4979 4980 memorg = nanddev_get_memorg(&chip->base); 4981 4982 /* The 3rd id byte holds MLC / multichip data */ 4983 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4984 /* The 4th id byte is the important one */ 4985 extid = id_data[3]; 4986 4987 /* Calc pagesize */ 4988 memorg->pagesize = 1024 << (extid & 0x03); 4989 mtd->writesize = memorg->pagesize; 4990 extid >>= 2; 4991 /* Calc oobsize */ 4992 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); 4993 mtd->oobsize = memorg->oobsize; 4994 extid >>= 2; 4995 /* Calc blocksize. Blocksize is multiples of 64KiB */ 4996 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) / 4997 memorg->pagesize; 4998 mtd->erasesize = (64 * 1024) << (extid & 0x03); 4999 extid >>= 2; 5000 /* Get buswidth information */ 5001 if (extid & 0x1) 5002 chip->options |= NAND_BUSWIDTH_16; 5003 } 5004 EXPORT_SYMBOL_GPL(nand_decode_ext_id); 5005 5006 /* 5007 * Old devices have chip data hardcoded in the device ID table. nand_decode_id 5008 * decodes a matching ID table entry and assigns the MTD size parameters for 5009 * the chip. 5010 */ 5011 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) 5012 { 5013 struct mtd_info *mtd = nand_to_mtd(chip); 5014 struct nand_memory_organization *memorg; 5015 5016 memorg = nanddev_get_memorg(&chip->base); 5017 5018 memorg->pages_per_eraseblock = type->erasesize / type->pagesize; 5019 mtd->erasesize = type->erasesize; 5020 memorg->pagesize = type->pagesize; 5021 mtd->writesize = memorg->pagesize; 5022 memorg->oobsize = memorg->pagesize / 32; 5023 mtd->oobsize = memorg->oobsize; 5024 5025 /* All legacy ID NAND are small-page, SLC */ 5026 memorg->bits_per_cell = 1; 5027 } 5028 5029 /* 5030 * Set the bad block marker/indicator (BBM/BBI) patterns according to some 5031 * heuristic patterns using various detected parameters (e.g., manufacturer, 5032 * page size, cell-type information). 5033 */ 5034 static void nand_decode_bbm_options(struct nand_chip *chip) 5035 { 5036 struct mtd_info *mtd = nand_to_mtd(chip); 5037 5038 /* Set the bad block position */ 5039 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) 5040 chip->badblockpos = NAND_BBM_POS_LARGE; 5041 else 5042 chip->badblockpos = NAND_BBM_POS_SMALL; 5043 } 5044 5045 static inline bool is_full_id_nand(struct nand_flash_dev *type) 5046 { 5047 return type->id_len; 5048 } 5049 5050 static bool find_full_id_nand(struct nand_chip *chip, 5051 struct nand_flash_dev *type) 5052 { 5053 struct nand_device *base = &chip->base; 5054 struct nand_ecc_props requirements; 5055 struct mtd_info *mtd = nand_to_mtd(chip); 5056 struct nand_memory_organization *memorg; 5057 u8 *id_data = chip->id.data; 5058 5059 memorg = nanddev_get_memorg(&chip->base); 5060 5061 if (!strncmp(type->id, id_data, type->id_len)) { 5062 memorg->pagesize = type->pagesize; 5063 mtd->writesize = memorg->pagesize; 5064 memorg->pages_per_eraseblock = type->erasesize / 5065 type->pagesize; 5066 mtd->erasesize = type->erasesize; 5067 memorg->oobsize = type->oobsize; 5068 mtd->oobsize = memorg->oobsize; 5069 5070 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 5071 memorg->eraseblocks_per_lun = 5072 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 5073 memorg->pagesize * 5074 memorg->pages_per_eraseblock); 5075 chip->options |= type->options; 5076 requirements.strength = NAND_ECC_STRENGTH(type); 5077 requirements.step_size = NAND_ECC_STEP(type); 5078 nanddev_set_ecc_requirements(base, &requirements); 5079 5080 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 5081 if (!chip->parameters.model) 5082 return false; 5083 5084 return true; 5085 } 5086 return false; 5087 } 5088 5089 /* 5090 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC 5091 * compliant and does not have a full-id or legacy-id entry in the nand_ids 5092 * table. 5093 */ 5094 static void nand_manufacturer_detect(struct nand_chip *chip) 5095 { 5096 /* 5097 * Try manufacturer detection if available and use 5098 * nand_decode_ext_id() otherwise. 5099 */ 5100 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 5101 chip->manufacturer.desc->ops->detect) { 5102 struct nand_memory_organization *memorg; 5103 5104 memorg = nanddev_get_memorg(&chip->base); 5105 5106 /* The 3rd id byte holds MLC / multichip data */ 5107 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); 5108 chip->manufacturer.desc->ops->detect(chip); 5109 } else { 5110 nand_decode_ext_id(chip); 5111 } 5112 } 5113 5114 /* 5115 * Manufacturer initialization. This function is called for all NANDs including 5116 * ONFI and JEDEC compliant ones. 5117 * Manufacturer drivers should put all their specific initialization code in 5118 * their ->init() hook. 5119 */ 5120 static int nand_manufacturer_init(struct nand_chip *chip) 5121 { 5122 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || 5123 !chip->manufacturer.desc->ops->init) 5124 return 0; 5125 5126 return chip->manufacturer.desc->ops->init(chip); 5127 } 5128 5129 /* 5130 * Manufacturer cleanup. This function is called for all NANDs including 5131 * ONFI and JEDEC compliant ones. 5132 * Manufacturer drivers should put all their specific cleanup code in their 5133 * ->cleanup() hook. 5134 */ 5135 static void nand_manufacturer_cleanup(struct nand_chip *chip) 5136 { 5137 /* Release manufacturer private data */ 5138 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 5139 chip->manufacturer.desc->ops->cleanup) 5140 chip->manufacturer.desc->ops->cleanup(chip); 5141 } 5142 5143 static const char * 5144 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) 5145 { 5146 return manufacturer_desc ? manufacturer_desc->name : "Unknown"; 5147 } 5148 5149 static void rawnand_check_data_only_read_support(struct nand_chip *chip) 5150 { 5151 /* Use an arbitrary size for the check */ 5152 if (!nand_read_data_op(chip, NULL, SZ_512, true, true)) 5153 chip->controller->supported_op.data_only_read = 1; 5154 } 5155 5156 static void rawnand_early_check_supported_ops(struct nand_chip *chip) 5157 { 5158 /* The supported_op fields should not be set by individual drivers */ 5159 WARN_ON_ONCE(chip->controller->supported_op.data_only_read); 5160 5161 if (!nand_has_exec_op(chip)) 5162 return; 5163 5164 rawnand_check_data_only_read_support(chip); 5165 } 5166 5167 static void rawnand_check_cont_read_support(struct nand_chip *chip) 5168 { 5169 struct mtd_info *mtd = nand_to_mtd(chip); 5170 5171 if (!chip->parameters.supports_read_cache) 5172 return; 5173 5174 if (chip->read_retries) 5175 return; 5176 5177 if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL, 5178 mtd->writesize, true)) 5179 chip->controller->supported_op.cont_read = 1; 5180 } 5181 5182 static void rawnand_late_check_supported_ops(struct nand_chip *chip) 5183 { 5184 /* The supported_op fields should not be set by individual drivers */ 5185 WARN_ON_ONCE(chip->controller->supported_op.cont_read); 5186 5187 /* 5188 * Too many devices do not support sequential cached reads with on-die 5189 * ECC correction enabled, so in this case refuse to perform the 5190 * automation. 5191 */ 5192 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) 5193 return; 5194 5195 if (!nand_has_exec_op(chip)) 5196 return; 5197 5198 rawnand_check_cont_read_support(chip); 5199 } 5200 5201 /* 5202 * Get the flash and manufacturer id and lookup if the type is supported. 5203 */ 5204 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) 5205 { 5206 const struct nand_manufacturer_desc *manufacturer_desc; 5207 struct mtd_info *mtd = nand_to_mtd(chip); 5208 struct nand_memory_organization *memorg; 5209 int busw, ret; 5210 u8 *id_data = chip->id.data; 5211 u8 maf_id, dev_id; 5212 u64 targetsize; 5213 5214 /* 5215 * Let's start by initializing memorg fields that might be left 5216 * unassigned by the ID-based detection logic. 5217 */ 5218 memorg = nanddev_get_memorg(&chip->base); 5219 memorg->planes_per_lun = 1; 5220 memorg->luns_per_target = 1; 5221 5222 /* 5223 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 5224 * after power-up. 5225 */ 5226 ret = nand_reset(chip, 0); 5227 if (ret) 5228 return ret; 5229 5230 /* Select the device */ 5231 nand_select_target(chip, 0); 5232 5233 rawnand_early_check_supported_ops(chip); 5234 5235 /* Send the command for reading device ID */ 5236 ret = nand_readid_op(chip, 0, id_data, 2); 5237 if (ret) 5238 return ret; 5239 5240 /* Read manufacturer and device IDs */ 5241 maf_id = id_data[0]; 5242 dev_id = id_data[1]; 5243 5244 /* 5245 * Try again to make sure, as some systems the bus-hold or other 5246 * interface concerns can cause random data which looks like a 5247 * possibly credible NAND flash to appear. If the two results do 5248 * not match, ignore the device completely. 5249 */ 5250 5251 /* Read entire ID string */ 5252 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data)); 5253 if (ret) 5254 return ret; 5255 5256 if (id_data[0] != maf_id || id_data[1] != dev_id) { 5257 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 5258 maf_id, dev_id, id_data[0], id_data[1]); 5259 return -ENODEV; 5260 } 5261 5262 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); 5263 5264 /* Try to identify manufacturer */ 5265 manufacturer_desc = nand_get_manufacturer_desc(maf_id); 5266 chip->manufacturer.desc = manufacturer_desc; 5267 5268 if (!type) 5269 type = nand_flash_ids; 5270 5271 /* 5272 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic 5273 * override it. 5274 * This is required to make sure initial NAND bus width set by the 5275 * NAND controller driver is coherent with the real NAND bus width 5276 * (extracted by auto-detection code). 5277 */ 5278 busw = chip->options & NAND_BUSWIDTH_16; 5279 5280 /* 5281 * The flag is only set (never cleared), reset it to its default value 5282 * before starting auto-detection. 5283 */ 5284 chip->options &= ~NAND_BUSWIDTH_16; 5285 5286 for (; type->name != NULL; type++) { 5287 if (is_full_id_nand(type)) { 5288 if (find_full_id_nand(chip, type)) 5289 goto ident_done; 5290 } else if (dev_id == type->dev_id) { 5291 break; 5292 } 5293 } 5294 5295 if (!type->name || !type->pagesize) { 5296 /* Check if the chip is ONFI compliant */ 5297 ret = nand_onfi_detect(chip); 5298 if (ret < 0) 5299 return ret; 5300 else if (ret) 5301 goto ident_done; 5302 5303 /* Check if the chip is JEDEC compliant */ 5304 ret = nand_jedec_detect(chip); 5305 if (ret < 0) 5306 return ret; 5307 else if (ret) 5308 goto ident_done; 5309 } 5310 5311 if (!type->name) 5312 return -ENODEV; 5313 5314 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 5315 if (!chip->parameters.model) 5316 return -ENOMEM; 5317 5318 if (!type->pagesize) 5319 nand_manufacturer_detect(chip); 5320 else 5321 nand_decode_id(chip, type); 5322 5323 /* Get chip options */ 5324 chip->options |= type->options; 5325 5326 memorg->eraseblocks_per_lun = 5327 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 5328 memorg->pagesize * 5329 memorg->pages_per_eraseblock); 5330 5331 ident_done: 5332 if (!mtd->name) 5333 mtd->name = chip->parameters.model; 5334 5335 if (chip->options & NAND_BUSWIDTH_AUTO) { 5336 WARN_ON(busw & NAND_BUSWIDTH_16); 5337 nand_set_defaults(chip); 5338 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 5339 /* 5340 * Check, if buswidth is correct. Hardware drivers should set 5341 * chip correct! 5342 */ 5343 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5344 maf_id, dev_id); 5345 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5346 mtd->name); 5347 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, 5348 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); 5349 ret = -EINVAL; 5350 5351 goto free_detect_allocation; 5352 } 5353 5354 nand_decode_bbm_options(chip); 5355 5356 /* Calculate the address shift from the page size */ 5357 chip->page_shift = ffs(mtd->writesize) - 1; 5358 /* Convert chipsize to number of pages per chip -1 */ 5359 targetsize = nanddev_target_size(&chip->base); 5360 chip->pagemask = (targetsize >> chip->page_shift) - 1; 5361 5362 chip->bbt_erase_shift = chip->phys_erase_shift = 5363 ffs(mtd->erasesize) - 1; 5364 if (targetsize & 0xffffffff) 5365 chip->chip_shift = ffs((unsigned)targetsize) - 1; 5366 else { 5367 chip->chip_shift = ffs((unsigned)(targetsize >> 32)); 5368 chip->chip_shift += 32 - 1; 5369 } 5370 5371 if (chip->chip_shift - chip->page_shift > 16) 5372 chip->options |= NAND_ROW_ADDR_3; 5373 5374 chip->badblockbits = 8; 5375 5376 nand_legacy_adjust_cmdfunc(chip); 5377 5378 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5379 maf_id, dev_id); 5380 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), 5381 chip->parameters.model); 5382 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", 5383 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", 5384 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize); 5385 return 0; 5386 5387 free_detect_allocation: 5388 kfree(chip->parameters.model); 5389 5390 return ret; 5391 } 5392 5393 static enum nand_ecc_engine_type 5394 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np) 5395 { 5396 enum nand_ecc_legacy_mode { 5397 NAND_ECC_INVALID, 5398 NAND_ECC_NONE, 5399 NAND_ECC_SOFT, 5400 NAND_ECC_SOFT_BCH, 5401 NAND_ECC_HW, 5402 NAND_ECC_HW_SYNDROME, 5403 NAND_ECC_ON_DIE, 5404 }; 5405 const char * const nand_ecc_legacy_modes[] = { 5406 [NAND_ECC_NONE] = "none", 5407 [NAND_ECC_SOFT] = "soft", 5408 [NAND_ECC_SOFT_BCH] = "soft_bch", 5409 [NAND_ECC_HW] = "hw", 5410 [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 5411 [NAND_ECC_ON_DIE] = "on-die", 5412 }; 5413 enum nand_ecc_legacy_mode eng_type; 5414 const char *pm; 5415 int err; 5416 5417 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5418 if (err) 5419 return NAND_ECC_ENGINE_TYPE_INVALID; 5420 5421 for (eng_type = NAND_ECC_NONE; 5422 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) { 5423 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) { 5424 switch (eng_type) { 5425 case NAND_ECC_NONE: 5426 return NAND_ECC_ENGINE_TYPE_NONE; 5427 case NAND_ECC_SOFT: 5428 case NAND_ECC_SOFT_BCH: 5429 return NAND_ECC_ENGINE_TYPE_SOFT; 5430 case NAND_ECC_HW: 5431 case NAND_ECC_HW_SYNDROME: 5432 return NAND_ECC_ENGINE_TYPE_ON_HOST; 5433 case NAND_ECC_ON_DIE: 5434 return NAND_ECC_ENGINE_TYPE_ON_DIE; 5435 default: 5436 break; 5437 } 5438 } 5439 } 5440 5441 return NAND_ECC_ENGINE_TYPE_INVALID; 5442 } 5443 5444 static enum nand_ecc_placement 5445 of_get_rawnand_ecc_placement_legacy(struct device_node *np) 5446 { 5447 const char *pm; 5448 int err; 5449 5450 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5451 if (!err) { 5452 if (!strcasecmp(pm, "hw_syndrome")) 5453 return NAND_ECC_PLACEMENT_INTERLEAVED; 5454 } 5455 5456 return NAND_ECC_PLACEMENT_UNKNOWN; 5457 } 5458 5459 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np) 5460 { 5461 const char *pm; 5462 int err; 5463 5464 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5465 if (!err) { 5466 if (!strcasecmp(pm, "soft")) 5467 return NAND_ECC_ALGO_HAMMING; 5468 else if (!strcasecmp(pm, "soft_bch")) 5469 return NAND_ECC_ALGO_BCH; 5470 } 5471 5472 return NAND_ECC_ALGO_UNKNOWN; 5473 } 5474 5475 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip) 5476 { 5477 struct device_node *dn = nand_get_flash_node(chip); 5478 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf; 5479 5480 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5481 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn); 5482 5483 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN) 5484 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn); 5485 5486 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN) 5487 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn); 5488 } 5489 5490 static int of_get_nand_bus_width(struct nand_chip *chip) 5491 { 5492 struct device_node *dn = nand_get_flash_node(chip); 5493 u32 val; 5494 int ret; 5495 5496 ret = of_property_read_u32(dn, "nand-bus-width", &val); 5497 if (ret == -EINVAL) 5498 /* Buswidth defaults to 8 if the property does not exist .*/ 5499 return 0; 5500 else if (ret) 5501 return ret; 5502 5503 if (val == 16) 5504 chip->options |= NAND_BUSWIDTH_16; 5505 else if (val != 8) 5506 return -EINVAL; 5507 return 0; 5508 } 5509 5510 static int of_get_nand_secure_regions(struct nand_chip *chip) 5511 { 5512 struct device_node *dn = nand_get_flash_node(chip); 5513 struct property *prop; 5514 int nr_elem, i, j; 5515 5516 /* Only proceed if the "secure-regions" property is present in DT */ 5517 prop = of_find_property(dn, "secure-regions", NULL); 5518 if (!prop) 5519 return 0; 5520 5521 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64)); 5522 if (nr_elem <= 0) 5523 return nr_elem; 5524 5525 chip->nr_secure_regions = nr_elem / 2; 5526 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions), 5527 GFP_KERNEL); 5528 if (!chip->secure_regions) 5529 return -ENOMEM; 5530 5531 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) { 5532 of_property_read_u64_index(dn, "secure-regions", j, 5533 &chip->secure_regions[i].offset); 5534 of_property_read_u64_index(dn, "secure-regions", j + 1, 5535 &chip->secure_regions[i].size); 5536 } 5537 5538 return 0; 5539 } 5540 5541 /** 5542 * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller 5543 * @dev: Device that will be parsed. Also used for managed allocations. 5544 * @cs_array: Array of GPIO desc pointers allocated on success 5545 * @ncs_array: Number of entries in @cs_array updated on success. 5546 * @return 0 on success, an error otherwise. 5547 */ 5548 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, 5549 unsigned int *ncs_array) 5550 { 5551 struct gpio_desc **descs; 5552 int ndescs, i; 5553 5554 ndescs = gpiod_count(dev, "cs"); 5555 if (ndescs < 0) { 5556 dev_dbg(dev, "No valid cs-gpios property\n"); 5557 return 0; 5558 } 5559 5560 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL); 5561 if (!descs) 5562 return -ENOMEM; 5563 5564 for (i = 0; i < ndescs; i++) { 5565 descs[i] = gpiod_get_index_optional(dev, "cs", i, 5566 GPIOD_OUT_HIGH); 5567 if (IS_ERR(descs[i])) 5568 return PTR_ERR(descs[i]); 5569 } 5570 5571 *ncs_array = ndescs; 5572 *cs_array = descs; 5573 5574 return 0; 5575 } 5576 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs); 5577 5578 static int rawnand_dt_init(struct nand_chip *chip) 5579 { 5580 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip)); 5581 struct device_node *dn = nand_get_flash_node(chip); 5582 int ret; 5583 5584 if (!dn) 5585 return 0; 5586 5587 ret = of_get_nand_bus_width(chip); 5588 if (ret) 5589 return ret; 5590 5591 if (of_property_read_bool(dn, "nand-is-boot-medium")) 5592 chip->options |= NAND_IS_BOOT_MEDIUM; 5593 5594 if (of_property_read_bool(dn, "nand-on-flash-bbt")) 5595 chip->bbt_options |= NAND_BBT_USE_FLASH; 5596 5597 of_get_nand_ecc_user_config(nand); 5598 of_get_nand_ecc_legacy_user_config(chip); 5599 5600 /* 5601 * If neither the user nor the NAND controller have requested a specific 5602 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST. 5603 */ 5604 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; 5605 5606 /* 5607 * Use the user requested engine type, unless there is none, in this 5608 * case default to the NAND controller choice, otherwise fallback to 5609 * the raw NAND default one. 5610 */ 5611 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID) 5612 chip->ecc.engine_type = nand->ecc.user_conf.engine_type; 5613 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) 5614 chip->ecc.engine_type = nand->ecc.defaults.engine_type; 5615 5616 chip->ecc.placement = nand->ecc.user_conf.placement; 5617 chip->ecc.algo = nand->ecc.user_conf.algo; 5618 chip->ecc.strength = nand->ecc.user_conf.strength; 5619 chip->ecc.size = nand->ecc.user_conf.step_size; 5620 5621 return 0; 5622 } 5623 5624 /** 5625 * nand_scan_ident - Scan for the NAND device 5626 * @chip: NAND chip object 5627 * @maxchips: number of chips to scan for 5628 * @table: alternative NAND ID table 5629 * 5630 * This is the first phase of the normal nand_scan() function. It reads the 5631 * flash ID and sets up MTD fields accordingly. 5632 * 5633 * This helper used to be called directly from controller drivers that needed 5634 * to tweak some ECC-related parameters before nand_scan_tail(). This separation 5635 * prevented dynamic allocations during this phase which was unconvenient and 5636 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks. 5637 */ 5638 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, 5639 struct nand_flash_dev *table) 5640 { 5641 struct mtd_info *mtd = nand_to_mtd(chip); 5642 struct nand_memory_organization *memorg; 5643 int nand_maf_id, nand_dev_id; 5644 unsigned int i; 5645 int ret; 5646 5647 memorg = nanddev_get_memorg(&chip->base); 5648 5649 /* Assume all dies are deselected when we enter nand_scan_ident(). */ 5650 chip->cur_cs = -1; 5651 5652 mutex_init(&chip->lock); 5653 init_waitqueue_head(&chip->resume_wq); 5654 5655 /* Enforce the right timings for reset/detection */ 5656 chip->current_interface_config = nand_get_reset_interface_config(); 5657 5658 ret = rawnand_dt_init(chip); 5659 if (ret) 5660 return ret; 5661 5662 if (!mtd->name && mtd->dev.parent) 5663 mtd->name = dev_name(mtd->dev.parent); 5664 5665 /* Set the default functions */ 5666 nand_set_defaults(chip); 5667 5668 ret = nand_legacy_check_hooks(chip); 5669 if (ret) 5670 return ret; 5671 5672 memorg->ntargets = maxchips; 5673 5674 /* Read the flash type */ 5675 ret = nand_detect(chip, table); 5676 if (ret) { 5677 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 5678 pr_warn("No NAND device found\n"); 5679 nand_deselect_target(chip); 5680 return ret; 5681 } 5682 5683 nand_maf_id = chip->id.data[0]; 5684 nand_dev_id = chip->id.data[1]; 5685 5686 nand_deselect_target(chip); 5687 5688 /* Check for a chip array */ 5689 for (i = 1; i < maxchips; i++) { 5690 u8 id[2]; 5691 5692 /* See comment in nand_get_flash_type for reset */ 5693 ret = nand_reset(chip, i); 5694 if (ret) 5695 break; 5696 5697 nand_select_target(chip, i); 5698 /* Send the command for reading device ID */ 5699 ret = nand_readid_op(chip, 0, id, sizeof(id)); 5700 if (ret) 5701 break; 5702 /* Read manufacturer and device IDs */ 5703 if (nand_maf_id != id[0] || nand_dev_id != id[1]) { 5704 nand_deselect_target(chip); 5705 break; 5706 } 5707 nand_deselect_target(chip); 5708 } 5709 if (i > 1) 5710 pr_info("%d chips detected\n", i); 5711 5712 /* Store the number of chips and calc total size for mtd */ 5713 memorg->ntargets = i; 5714 mtd->size = i * nanddev_target_size(&chip->base); 5715 5716 return 0; 5717 } 5718 5719 static void nand_scan_ident_cleanup(struct nand_chip *chip) 5720 { 5721 kfree(chip->parameters.model); 5722 kfree(chip->parameters.onfi); 5723 } 5724 5725 int rawnand_sw_hamming_init(struct nand_chip *chip) 5726 { 5727 struct nand_ecc_sw_hamming_conf *engine_conf; 5728 struct nand_device *base = &chip->base; 5729 int ret; 5730 5731 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5732 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING; 5733 base->ecc.user_conf.strength = chip->ecc.strength; 5734 base->ecc.user_conf.step_size = chip->ecc.size; 5735 5736 ret = nand_ecc_sw_hamming_init_ctx(base); 5737 if (ret) 5738 return ret; 5739 5740 engine_conf = base->ecc.ctx.priv; 5741 5742 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER) 5743 engine_conf->sm_order = true; 5744 5745 chip->ecc.size = base->ecc.ctx.conf.step_size; 5746 chip->ecc.strength = base->ecc.ctx.conf.strength; 5747 chip->ecc.total = base->ecc.ctx.total; 5748 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5749 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5750 5751 return 0; 5752 } 5753 EXPORT_SYMBOL(rawnand_sw_hamming_init); 5754 5755 int rawnand_sw_hamming_calculate(struct nand_chip *chip, 5756 const unsigned char *buf, 5757 unsigned char *code) 5758 { 5759 struct nand_device *base = &chip->base; 5760 5761 return nand_ecc_sw_hamming_calculate(base, buf, code); 5762 } 5763 EXPORT_SYMBOL(rawnand_sw_hamming_calculate); 5764 5765 int rawnand_sw_hamming_correct(struct nand_chip *chip, 5766 unsigned char *buf, 5767 unsigned char *read_ecc, 5768 unsigned char *calc_ecc) 5769 { 5770 struct nand_device *base = &chip->base; 5771 5772 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc); 5773 } 5774 EXPORT_SYMBOL(rawnand_sw_hamming_correct); 5775 5776 void rawnand_sw_hamming_cleanup(struct nand_chip *chip) 5777 { 5778 struct nand_device *base = &chip->base; 5779 5780 nand_ecc_sw_hamming_cleanup_ctx(base); 5781 } 5782 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup); 5783 5784 int rawnand_sw_bch_init(struct nand_chip *chip) 5785 { 5786 struct nand_device *base = &chip->base; 5787 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base); 5788 int ret; 5789 5790 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 5791 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH; 5792 base->ecc.user_conf.step_size = chip->ecc.size; 5793 base->ecc.user_conf.strength = chip->ecc.strength; 5794 5795 ret = nand_ecc_sw_bch_init_ctx(base); 5796 if (ret) 5797 return ret; 5798 5799 chip->ecc.size = ecc_conf->step_size; 5800 chip->ecc.strength = ecc_conf->strength; 5801 chip->ecc.total = base->ecc.ctx.total; 5802 chip->ecc.steps = nanddev_get_ecc_nsteps(base); 5803 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base); 5804 5805 return 0; 5806 } 5807 EXPORT_SYMBOL(rawnand_sw_bch_init); 5808 5809 static int rawnand_sw_bch_calculate(struct nand_chip *chip, 5810 const unsigned char *buf, 5811 unsigned char *code) 5812 { 5813 struct nand_device *base = &chip->base; 5814 5815 return nand_ecc_sw_bch_calculate(base, buf, code); 5816 } 5817 5818 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, 5819 unsigned char *read_ecc, unsigned char *calc_ecc) 5820 { 5821 struct nand_device *base = &chip->base; 5822 5823 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc); 5824 } 5825 EXPORT_SYMBOL(rawnand_sw_bch_correct); 5826 5827 void rawnand_sw_bch_cleanup(struct nand_chip *chip) 5828 { 5829 struct nand_device *base = &chip->base; 5830 5831 nand_ecc_sw_bch_cleanup_ctx(base); 5832 } 5833 EXPORT_SYMBOL(rawnand_sw_bch_cleanup); 5834 5835 static int nand_set_ecc_on_host_ops(struct nand_chip *chip) 5836 { 5837 struct nand_ecc_ctrl *ecc = &chip->ecc; 5838 5839 switch (ecc->placement) { 5840 case NAND_ECC_PLACEMENT_UNKNOWN: 5841 case NAND_ECC_PLACEMENT_OOB: 5842 /* Use standard hwecc read page function? */ 5843 if (!ecc->read_page) 5844 ecc->read_page = nand_read_page_hwecc; 5845 if (!ecc->write_page) 5846 ecc->write_page = nand_write_page_hwecc; 5847 if (!ecc->read_page_raw) 5848 ecc->read_page_raw = nand_read_page_raw; 5849 if (!ecc->write_page_raw) 5850 ecc->write_page_raw = nand_write_page_raw; 5851 if (!ecc->read_oob) 5852 ecc->read_oob = nand_read_oob_std; 5853 if (!ecc->write_oob) 5854 ecc->write_oob = nand_write_oob_std; 5855 if (!ecc->read_subpage) 5856 ecc->read_subpage = nand_read_subpage; 5857 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) 5858 ecc->write_subpage = nand_write_subpage_hwecc; 5859 fallthrough; 5860 5861 case NAND_ECC_PLACEMENT_INTERLEAVED: 5862 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && 5863 (!ecc->read_page || 5864 ecc->read_page == nand_read_page_hwecc || 5865 !ecc->write_page || 5866 ecc->write_page == nand_write_page_hwecc)) { 5867 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5868 return -EINVAL; 5869 } 5870 /* Use standard syndrome read/write page function? */ 5871 if (!ecc->read_page) 5872 ecc->read_page = nand_read_page_syndrome; 5873 if (!ecc->write_page) 5874 ecc->write_page = nand_write_page_syndrome; 5875 if (!ecc->read_page_raw) 5876 ecc->read_page_raw = nand_read_page_raw_syndrome; 5877 if (!ecc->write_page_raw) 5878 ecc->write_page_raw = nand_write_page_raw_syndrome; 5879 if (!ecc->read_oob) 5880 ecc->read_oob = nand_read_oob_syndrome; 5881 if (!ecc->write_oob) 5882 ecc->write_oob = nand_write_oob_syndrome; 5883 break; 5884 5885 default: 5886 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n", 5887 ecc->placement); 5888 return -EINVAL; 5889 } 5890 5891 return 0; 5892 } 5893 5894 static int nand_set_ecc_soft_ops(struct nand_chip *chip) 5895 { 5896 struct mtd_info *mtd = nand_to_mtd(chip); 5897 struct nand_device *nanddev = mtd_to_nanddev(mtd); 5898 struct nand_ecc_ctrl *ecc = &chip->ecc; 5899 int ret; 5900 5901 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT)) 5902 return -EINVAL; 5903 5904 switch (ecc->algo) { 5905 case NAND_ECC_ALGO_HAMMING: 5906 ecc->calculate = rawnand_sw_hamming_calculate; 5907 ecc->correct = rawnand_sw_hamming_correct; 5908 ecc->read_page = nand_read_page_swecc; 5909 ecc->read_subpage = nand_read_subpage; 5910 ecc->write_page = nand_write_page_swecc; 5911 if (!ecc->read_page_raw) 5912 ecc->read_page_raw = nand_read_page_raw; 5913 if (!ecc->write_page_raw) 5914 ecc->write_page_raw = nand_write_page_raw; 5915 ecc->read_oob = nand_read_oob_std; 5916 ecc->write_oob = nand_write_oob_std; 5917 if (!ecc->size) 5918 ecc->size = 256; 5919 ecc->bytes = 3; 5920 ecc->strength = 1; 5921 5922 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) 5923 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; 5924 5925 ret = rawnand_sw_hamming_init(chip); 5926 if (ret) { 5927 WARN(1, "Hamming ECC initialization failed!\n"); 5928 return ret; 5929 } 5930 5931 return 0; 5932 case NAND_ECC_ALGO_BCH: 5933 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { 5934 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); 5935 return -EINVAL; 5936 } 5937 ecc->calculate = rawnand_sw_bch_calculate; 5938 ecc->correct = rawnand_sw_bch_correct; 5939 ecc->read_page = nand_read_page_swecc; 5940 ecc->read_subpage = nand_read_subpage; 5941 ecc->write_page = nand_write_page_swecc; 5942 if (!ecc->read_page_raw) 5943 ecc->read_page_raw = nand_read_page_raw; 5944 if (!ecc->write_page_raw) 5945 ecc->write_page_raw = nand_write_page_raw; 5946 ecc->read_oob = nand_read_oob_std; 5947 ecc->write_oob = nand_write_oob_std; 5948 5949 /* 5950 * We can only maximize ECC config when the default layout is 5951 * used, otherwise we don't know how many bytes can really be 5952 * used. 5953 */ 5954 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH && 5955 mtd->ooblayout != nand_get_large_page_ooblayout()) 5956 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH; 5957 5958 ret = rawnand_sw_bch_init(chip); 5959 if (ret) { 5960 WARN(1, "BCH ECC initialization failed!\n"); 5961 return ret; 5962 } 5963 5964 return 0; 5965 default: 5966 WARN(1, "Unsupported ECC algorithm!\n"); 5967 return -EINVAL; 5968 } 5969 } 5970 5971 /** 5972 * nand_check_ecc_caps - check the sanity of preset ECC settings 5973 * @chip: nand chip info structure 5974 * @caps: ECC caps info structure 5975 * @oobavail: OOB size that the ECC engine can use 5976 * 5977 * When ECC step size and strength are already set, check if they are supported 5978 * by the controller and the calculated ECC bytes fit within the chip's OOB. 5979 * On success, the calculated ECC bytes is set. 5980 */ 5981 static int 5982 nand_check_ecc_caps(struct nand_chip *chip, 5983 const struct nand_ecc_caps *caps, int oobavail) 5984 { 5985 struct mtd_info *mtd = nand_to_mtd(chip); 5986 const struct nand_ecc_step_info *stepinfo; 5987 int preset_step = chip->ecc.size; 5988 int preset_strength = chip->ecc.strength; 5989 int ecc_bytes, nsteps = mtd->writesize / preset_step; 5990 int i, j; 5991 5992 for (i = 0; i < caps->nstepinfos; i++) { 5993 stepinfo = &caps->stepinfos[i]; 5994 5995 if (stepinfo->stepsize != preset_step) 5996 continue; 5997 5998 for (j = 0; j < stepinfo->nstrengths; j++) { 5999 if (stepinfo->strengths[j] != preset_strength) 6000 continue; 6001 6002 ecc_bytes = caps->calc_ecc_bytes(preset_step, 6003 preset_strength); 6004 if (WARN_ON_ONCE(ecc_bytes < 0)) 6005 return ecc_bytes; 6006 6007 if (ecc_bytes * nsteps > oobavail) { 6008 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", 6009 preset_step, preset_strength); 6010 return -ENOSPC; 6011 } 6012 6013 chip->ecc.bytes = ecc_bytes; 6014 6015 return 0; 6016 } 6017 } 6018 6019 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", 6020 preset_step, preset_strength); 6021 6022 return -ENOTSUPP; 6023 } 6024 6025 /** 6026 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes 6027 * @chip: nand chip info structure 6028 * @caps: ECC engine caps info structure 6029 * @oobavail: OOB size that the ECC engine can use 6030 * 6031 * If a chip's ECC requirement is provided, try to meet it with the least 6032 * number of ECC bytes (i.e. with the largest number of OOB-free bytes). 6033 * On success, the chosen ECC settings are set. 6034 */ 6035 static int 6036 nand_match_ecc_req(struct nand_chip *chip, 6037 const struct nand_ecc_caps *caps, int oobavail) 6038 { 6039 const struct nand_ecc_props *requirements = 6040 nanddev_get_ecc_requirements(&chip->base); 6041 struct mtd_info *mtd = nand_to_mtd(chip); 6042 const struct nand_ecc_step_info *stepinfo; 6043 int req_step = requirements->step_size; 6044 int req_strength = requirements->strength; 6045 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; 6046 int best_step = 0, best_strength = 0, best_ecc_bytes = 0; 6047 int best_ecc_bytes_total = INT_MAX; 6048 int i, j; 6049 6050 /* No information provided by the NAND chip */ 6051 if (!req_step || !req_strength) 6052 return -ENOTSUPP; 6053 6054 /* number of correctable bits the chip requires in a page */ 6055 req_corr = mtd->writesize / req_step * req_strength; 6056 6057 for (i = 0; i < caps->nstepinfos; i++) { 6058 stepinfo = &caps->stepinfos[i]; 6059 step_size = stepinfo->stepsize; 6060 6061 for (j = 0; j < stepinfo->nstrengths; j++) { 6062 strength = stepinfo->strengths[j]; 6063 6064 /* 6065 * If both step size and strength are smaller than the 6066 * chip's requirement, it is not easy to compare the 6067 * resulted reliability. 6068 */ 6069 if (step_size < req_step && strength < req_strength) 6070 continue; 6071 6072 if (mtd->writesize % step_size) 6073 continue; 6074 6075 nsteps = mtd->writesize / step_size; 6076 6077 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 6078 if (WARN_ON_ONCE(ecc_bytes < 0)) 6079 continue; 6080 ecc_bytes_total = ecc_bytes * nsteps; 6081 6082 if (ecc_bytes_total > oobavail || 6083 strength * nsteps < req_corr) 6084 continue; 6085 6086 /* 6087 * We assume the best is to meet the chip's requrement 6088 * with the least number of ECC bytes. 6089 */ 6090 if (ecc_bytes_total < best_ecc_bytes_total) { 6091 best_ecc_bytes_total = ecc_bytes_total; 6092 best_step = step_size; 6093 best_strength = strength; 6094 best_ecc_bytes = ecc_bytes; 6095 } 6096 } 6097 } 6098 6099 if (best_ecc_bytes_total == INT_MAX) 6100 return -ENOTSUPP; 6101 6102 chip->ecc.size = best_step; 6103 chip->ecc.strength = best_strength; 6104 chip->ecc.bytes = best_ecc_bytes; 6105 6106 return 0; 6107 } 6108 6109 /** 6110 * nand_maximize_ecc - choose the max ECC strength available 6111 * @chip: nand chip info structure 6112 * @caps: ECC engine caps info structure 6113 * @oobavail: OOB size that the ECC engine can use 6114 * 6115 * Choose the max ECC strength that is supported on the controller, and can fit 6116 * within the chip's OOB. On success, the chosen ECC settings are set. 6117 */ 6118 static int 6119 nand_maximize_ecc(struct nand_chip *chip, 6120 const struct nand_ecc_caps *caps, int oobavail) 6121 { 6122 struct mtd_info *mtd = nand_to_mtd(chip); 6123 const struct nand_ecc_step_info *stepinfo; 6124 int step_size, strength, nsteps, ecc_bytes, corr; 6125 int best_corr = 0; 6126 int best_step = 0; 6127 int best_strength = 0, best_ecc_bytes = 0; 6128 int i, j; 6129 6130 for (i = 0; i < caps->nstepinfos; i++) { 6131 stepinfo = &caps->stepinfos[i]; 6132 step_size = stepinfo->stepsize; 6133 6134 /* If chip->ecc.size is already set, respect it */ 6135 if (chip->ecc.size && step_size != chip->ecc.size) 6136 continue; 6137 6138 for (j = 0; j < stepinfo->nstrengths; j++) { 6139 strength = stepinfo->strengths[j]; 6140 6141 if (mtd->writesize % step_size) 6142 continue; 6143 6144 nsteps = mtd->writesize / step_size; 6145 6146 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 6147 if (WARN_ON_ONCE(ecc_bytes < 0)) 6148 continue; 6149 6150 if (ecc_bytes * nsteps > oobavail) 6151 continue; 6152 6153 corr = strength * nsteps; 6154 6155 /* 6156 * If the number of correctable bits is the same, 6157 * bigger step_size has more reliability. 6158 */ 6159 if (corr > best_corr || 6160 (corr == best_corr && step_size > best_step)) { 6161 best_corr = corr; 6162 best_step = step_size; 6163 best_strength = strength; 6164 best_ecc_bytes = ecc_bytes; 6165 } 6166 } 6167 } 6168 6169 if (!best_corr) 6170 return -ENOTSUPP; 6171 6172 chip->ecc.size = best_step; 6173 chip->ecc.strength = best_strength; 6174 chip->ecc.bytes = best_ecc_bytes; 6175 6176 return 0; 6177 } 6178 6179 /** 6180 * nand_ecc_choose_conf - Set the ECC strength and ECC step size 6181 * @chip: nand chip info structure 6182 * @caps: ECC engine caps info structure 6183 * @oobavail: OOB size that the ECC engine can use 6184 * 6185 * Choose the ECC configuration according to following logic. 6186 * 6187 * 1. If both ECC step size and ECC strength are already set (usually by DT) 6188 * then check if it is supported by this controller. 6189 * 2. If the user provided the nand-ecc-maximize property, then select maximum 6190 * ECC strength. 6191 * 3. Otherwise, try to match the ECC step size and ECC strength closest 6192 * to the chip's requirement. If available OOB size can't fit the chip 6193 * requirement then fallback to the maximum ECC step size and ECC strength. 6194 * 6195 * On success, the chosen ECC settings are set. 6196 */ 6197 int nand_ecc_choose_conf(struct nand_chip *chip, 6198 const struct nand_ecc_caps *caps, int oobavail) 6199 { 6200 struct mtd_info *mtd = nand_to_mtd(chip); 6201 struct nand_device *nanddev = mtd_to_nanddev(mtd); 6202 6203 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize)) 6204 return -EINVAL; 6205 6206 if (chip->ecc.size && chip->ecc.strength) 6207 return nand_check_ecc_caps(chip, caps, oobavail); 6208 6209 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) 6210 return nand_maximize_ecc(chip, caps, oobavail); 6211 6212 if (!nand_match_ecc_req(chip, caps, oobavail)) 6213 return 0; 6214 6215 return nand_maximize_ecc(chip, caps, oobavail); 6216 } 6217 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf); 6218 6219 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos) 6220 { 6221 struct nand_chip *chip = container_of(nand, struct nand_chip, 6222 base); 6223 unsigned int eb = nanddev_pos_to_row(nand, pos); 6224 int ret; 6225 6226 eb >>= nand->rowconv.eraseblock_addr_shift; 6227 6228 nand_select_target(chip, pos->target); 6229 ret = nand_erase_op(chip, eb); 6230 nand_deselect_target(chip); 6231 6232 return ret; 6233 } 6234 6235 static int rawnand_markbad(struct nand_device *nand, 6236 const struct nand_pos *pos) 6237 { 6238 struct nand_chip *chip = container_of(nand, struct nand_chip, 6239 base); 6240 6241 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 6242 } 6243 6244 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos) 6245 { 6246 struct nand_chip *chip = container_of(nand, struct nand_chip, 6247 base); 6248 int ret; 6249 6250 nand_select_target(chip, pos->target); 6251 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 6252 nand_deselect_target(chip); 6253 6254 return ret; 6255 } 6256 6257 static const struct nand_ops rawnand_ops = { 6258 .erase = rawnand_erase, 6259 .markbad = rawnand_markbad, 6260 .isbad = rawnand_isbad, 6261 }; 6262 6263 /** 6264 * nand_scan_tail - Scan for the NAND device 6265 * @chip: NAND chip object 6266 * 6267 * This is the second phase of the normal nand_scan() function. It fills out 6268 * all the uninitialized function pointers with the defaults and scans for a 6269 * bad block table if appropriate. 6270 */ 6271 static int nand_scan_tail(struct nand_chip *chip) 6272 { 6273 struct mtd_info *mtd = nand_to_mtd(chip); 6274 struct nand_ecc_ctrl *ecc = &chip->ecc; 6275 int ret, i; 6276 6277 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 6278 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 6279 !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 6280 return -EINVAL; 6281 } 6282 6283 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 6284 if (!chip->data_buf) 6285 return -ENOMEM; 6286 6287 /* 6288 * FIXME: some NAND manufacturer drivers expect the first die to be 6289 * selected when manufacturer->init() is called. They should be fixed 6290 * to explictly select the relevant die when interacting with the NAND 6291 * chip. 6292 */ 6293 nand_select_target(chip, 0); 6294 ret = nand_manufacturer_init(chip); 6295 nand_deselect_target(chip); 6296 if (ret) 6297 goto err_free_buf; 6298 6299 /* Set the internal oob buffer location, just after the page data */ 6300 chip->oob_poi = chip->data_buf + mtd->writesize; 6301 6302 /* 6303 * If no default placement scheme is given, select an appropriate one. 6304 */ 6305 if (!mtd->ooblayout && 6306 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6307 ecc->algo == NAND_ECC_ALGO_BCH) && 6308 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && 6309 ecc->algo == NAND_ECC_ALGO_HAMMING)) { 6310 switch (mtd->oobsize) { 6311 case 8: 6312 case 16: 6313 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout()); 6314 break; 6315 case 64: 6316 case 128: 6317 mtd_set_ooblayout(mtd, 6318 nand_get_large_page_hamming_ooblayout()); 6319 break; 6320 default: 6321 /* 6322 * Expose the whole OOB area to users if ECC_NONE 6323 * is passed. We could do that for all kind of 6324 * ->oobsize, but we must keep the old large/small 6325 * page with ECC layout when ->oobsize <= 128 for 6326 * compatibility reasons. 6327 */ 6328 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) { 6329 mtd_set_ooblayout(mtd, 6330 nand_get_large_page_ooblayout()); 6331 break; 6332 } 6333 6334 WARN(1, "No oob scheme defined for oobsize %d\n", 6335 mtd->oobsize); 6336 ret = -EINVAL; 6337 goto err_nand_manuf_cleanup; 6338 } 6339 } 6340 6341 /* 6342 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 6343 * selected and we have 256 byte pagesize fallback to software ECC 6344 */ 6345 6346 switch (ecc->engine_type) { 6347 case NAND_ECC_ENGINE_TYPE_ON_HOST: 6348 ret = nand_set_ecc_on_host_ops(chip); 6349 if (ret) 6350 goto err_nand_manuf_cleanup; 6351 6352 if (mtd->writesize >= ecc->size) { 6353 if (!ecc->strength) { 6354 WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); 6355 ret = -EINVAL; 6356 goto err_nand_manuf_cleanup; 6357 } 6358 break; 6359 } 6360 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 6361 ecc->size, mtd->writesize); 6362 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; 6363 ecc->algo = NAND_ECC_ALGO_HAMMING; 6364 fallthrough; 6365 6366 case NAND_ECC_ENGINE_TYPE_SOFT: 6367 ret = nand_set_ecc_soft_ops(chip); 6368 if (ret) 6369 goto err_nand_manuf_cleanup; 6370 break; 6371 6372 case NAND_ECC_ENGINE_TYPE_ON_DIE: 6373 if (!ecc->read_page || !ecc->write_page) { 6374 WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); 6375 ret = -EINVAL; 6376 goto err_nand_manuf_cleanup; 6377 } 6378 if (!ecc->read_oob) 6379 ecc->read_oob = nand_read_oob_std; 6380 if (!ecc->write_oob) 6381 ecc->write_oob = nand_write_oob_std; 6382 break; 6383 6384 case NAND_ECC_ENGINE_TYPE_NONE: 6385 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n"); 6386 ecc->read_page = nand_read_page_raw; 6387 ecc->write_page = nand_write_page_raw; 6388 ecc->read_oob = nand_read_oob_std; 6389 ecc->read_page_raw = nand_read_page_raw; 6390 ecc->write_page_raw = nand_write_page_raw; 6391 ecc->write_oob = nand_write_oob_std; 6392 ecc->size = mtd->writesize; 6393 ecc->bytes = 0; 6394 ecc->strength = 0; 6395 break; 6396 6397 default: 6398 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type); 6399 ret = -EINVAL; 6400 goto err_nand_manuf_cleanup; 6401 } 6402 6403 if (ecc->correct || ecc->calculate) { 6404 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6405 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 6406 if (!ecc->calc_buf || !ecc->code_buf) { 6407 ret = -ENOMEM; 6408 goto err_nand_manuf_cleanup; 6409 } 6410 } 6411 6412 /* For many systems, the standard OOB write also works for raw */ 6413 if (!ecc->read_oob_raw) 6414 ecc->read_oob_raw = ecc->read_oob; 6415 if (!ecc->write_oob_raw) 6416 ecc->write_oob_raw = ecc->write_oob; 6417 6418 /* propagate ecc info to mtd_info */ 6419 mtd->ecc_strength = ecc->strength; 6420 mtd->ecc_step_size = ecc->size; 6421 6422 /* 6423 * Set the number of read / write steps for one page depending on ECC 6424 * mode. 6425 */ 6426 if (!ecc->steps) 6427 ecc->steps = mtd->writesize / ecc->size; 6428 if (ecc->steps * ecc->size != mtd->writesize) { 6429 WARN(1, "Invalid ECC parameters\n"); 6430 ret = -EINVAL; 6431 goto err_nand_manuf_cleanup; 6432 } 6433 6434 if (!ecc->total) { 6435 ecc->total = ecc->steps * ecc->bytes; 6436 chip->base.ecc.ctx.total = ecc->total; 6437 } 6438 6439 if (ecc->total > mtd->oobsize) { 6440 WARN(1, "Total number of ECC bytes exceeded oobsize\n"); 6441 ret = -EINVAL; 6442 goto err_nand_manuf_cleanup; 6443 } 6444 6445 /* 6446 * The number of bytes available for a client to place data into 6447 * the out of band area. 6448 */ 6449 ret = mtd_ooblayout_count_freebytes(mtd); 6450 if (ret < 0) 6451 ret = 0; 6452 6453 mtd->oobavail = ret; 6454 6455 /* ECC sanity check: warn if it's too weak */ 6456 if (!nand_ecc_is_strong_enough(&chip->base)) 6457 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", 6458 mtd->name, chip->ecc.strength, chip->ecc.size, 6459 nanddev_get_ecc_requirements(&chip->base)->strength, 6460 nanddev_get_ecc_requirements(&chip->base)->step_size); 6461 6462 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 6463 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { 6464 switch (ecc->steps) { 6465 case 2: 6466 mtd->subpage_sft = 1; 6467 break; 6468 case 4: 6469 case 8: 6470 case 16: 6471 mtd->subpage_sft = 2; 6472 break; 6473 } 6474 } 6475 chip->subpagesize = mtd->writesize >> mtd->subpage_sft; 6476 6477 /* Invalidate the pagebuffer reference */ 6478 chip->pagecache.page = -1; 6479 6480 /* Large page NAND with SOFT_ECC should support subpage reads */ 6481 switch (ecc->engine_type) { 6482 case NAND_ECC_ENGINE_TYPE_SOFT: 6483 if (chip->page_shift > 9) 6484 chip->options |= NAND_SUBPAGE_READ; 6485 break; 6486 6487 default: 6488 break; 6489 } 6490 6491 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner); 6492 if (ret) 6493 goto err_nand_manuf_cleanup; 6494 6495 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */ 6496 if (chip->options & NAND_ROM) 6497 mtd->flags = MTD_CAP_ROM; 6498 6499 /* Fill in remaining MTD driver data */ 6500 mtd->_erase = nand_erase; 6501 mtd->_point = NULL; 6502 mtd->_unpoint = NULL; 6503 mtd->_panic_write = panic_nand_write; 6504 mtd->_read_oob = nand_read_oob; 6505 mtd->_write_oob = nand_write_oob; 6506 mtd->_sync = nand_sync; 6507 mtd->_lock = nand_lock; 6508 mtd->_unlock = nand_unlock; 6509 mtd->_suspend = nand_suspend; 6510 mtd->_resume = nand_resume; 6511 mtd->_reboot = nand_shutdown; 6512 mtd->_block_isreserved = nand_block_isreserved; 6513 mtd->_block_isbad = nand_block_isbad; 6514 mtd->_block_markbad = nand_block_markbad; 6515 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 6516 6517 /* 6518 * Initialize bitflip_threshold to its default prior scan_bbt() call. 6519 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be 6520 * properly set. 6521 */ 6522 if (!mtd->bitflip_threshold) 6523 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 6524 6525 /* Find the fastest data interface for this chip */ 6526 ret = nand_choose_interface_config(chip); 6527 if (ret) 6528 goto err_nanddev_cleanup; 6529 6530 /* Enter fastest possible mode on all dies. */ 6531 for (i = 0; i < nanddev_ntargets(&chip->base); i++) { 6532 ret = nand_setup_interface(chip, i); 6533 if (ret) 6534 goto err_free_interface_config; 6535 } 6536 6537 rawnand_late_check_supported_ops(chip); 6538 6539 /* 6540 * Look for secure regions in the NAND chip. These regions are supposed 6541 * to be protected by a secure element like Trustzone. So the read/write 6542 * accesses to these regions will be blocked in the runtime by this 6543 * driver. 6544 */ 6545 ret = of_get_nand_secure_regions(chip); 6546 if (ret) 6547 goto err_free_interface_config; 6548 6549 /* Check, if we should skip the bad block table scan */ 6550 if (chip->options & NAND_SKIP_BBTSCAN) 6551 return 0; 6552 6553 /* Build bad block table */ 6554 ret = nand_create_bbt(chip); 6555 if (ret) 6556 goto err_free_secure_regions; 6557 6558 return 0; 6559 6560 err_free_secure_regions: 6561 kfree(chip->secure_regions); 6562 6563 err_free_interface_config: 6564 kfree(chip->best_interface_config); 6565 6566 err_nanddev_cleanup: 6567 nanddev_cleanup(&chip->base); 6568 6569 err_nand_manuf_cleanup: 6570 nand_manufacturer_cleanup(chip); 6571 6572 err_free_buf: 6573 kfree(chip->data_buf); 6574 kfree(ecc->code_buf); 6575 kfree(ecc->calc_buf); 6576 6577 return ret; 6578 } 6579 6580 static int nand_attach(struct nand_chip *chip) 6581 { 6582 if (chip->controller->ops && chip->controller->ops->attach_chip) 6583 return chip->controller->ops->attach_chip(chip); 6584 6585 return 0; 6586 } 6587 6588 static void nand_detach(struct nand_chip *chip) 6589 { 6590 if (chip->controller->ops && chip->controller->ops->detach_chip) 6591 chip->controller->ops->detach_chip(chip); 6592 } 6593 6594 /** 6595 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device 6596 * @chip: NAND chip object 6597 * @maxchips: number of chips to scan for. 6598 * @ids: optional flash IDs table 6599 * 6600 * This fills out all the uninitialized function pointers with the defaults. 6601 * The flash ID is read and the mtd/chip structures are filled with the 6602 * appropriate values. 6603 */ 6604 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips, 6605 struct nand_flash_dev *ids) 6606 { 6607 int ret; 6608 6609 if (!maxchips) 6610 return -EINVAL; 6611 6612 ret = nand_scan_ident(chip, maxchips, ids); 6613 if (ret) 6614 return ret; 6615 6616 ret = nand_attach(chip); 6617 if (ret) 6618 goto cleanup_ident; 6619 6620 ret = nand_scan_tail(chip); 6621 if (ret) 6622 goto detach_chip; 6623 6624 return 0; 6625 6626 detach_chip: 6627 nand_detach(chip); 6628 cleanup_ident: 6629 nand_scan_ident_cleanup(chip); 6630 6631 return ret; 6632 } 6633 EXPORT_SYMBOL(nand_scan_with_ids); 6634 6635 /** 6636 * nand_cleanup - [NAND Interface] Free resources held by the NAND device 6637 * @chip: NAND chip object 6638 */ 6639 void nand_cleanup(struct nand_chip *chip) 6640 { 6641 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) { 6642 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) 6643 rawnand_sw_hamming_cleanup(chip); 6644 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH) 6645 rawnand_sw_bch_cleanup(chip); 6646 } 6647 6648 nanddev_cleanup(&chip->base); 6649 6650 /* Free secure regions data */ 6651 kfree(chip->secure_regions); 6652 6653 /* Free bad block table memory */ 6654 kfree(chip->bbt); 6655 kfree(chip->data_buf); 6656 kfree(chip->ecc.code_buf); 6657 kfree(chip->ecc.calc_buf); 6658 6659 /* Free bad block descriptor memory */ 6660 if (chip->badblock_pattern && chip->badblock_pattern->options 6661 & NAND_BBT_DYNAMICSTRUCT) 6662 kfree(chip->badblock_pattern); 6663 6664 /* Free the data interface */ 6665 kfree(chip->best_interface_config); 6666 6667 /* Free manufacturer priv data. */ 6668 nand_manufacturer_cleanup(chip); 6669 6670 /* Free controller specific allocations after chip identification */ 6671 nand_detach(chip); 6672 6673 /* Free identification phase allocations */ 6674 nand_scan_ident_cleanup(chip); 6675 } 6676 6677 EXPORT_SYMBOL_GPL(nand_cleanup); 6678 6679 MODULE_LICENSE("GPL"); 6680 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); 6681 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 6682 MODULE_DESCRIPTION("Generic NAND flash driver code"); 6683