1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Overview: 4 * This is the generic MTD driver for NAND flash devices. It should be 5 * capable of working with almost all NAND chips currently available. 6 * 7 * Additional technical information is available on 8 * http://www.linux-mtd.infradead.org/doc/nand.html 9 * 10 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 11 * 2002-2006 Thomas Gleixner (tglx@linutronix.de) 12 * 13 * Credits: 14 * David Woodhouse for adding multichip support 15 * 16 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the 17 * rework for 2K page size chips 18 * 19 * TODO: 20 * Enable cached programming for 2k page size chips 21 * Check, if mtd->ecctype should be set to MTD_ECC_HW 22 * if we have HW ECC support. 23 * BBT table is not serialized, has to be fixed 24 */ 25 26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 27 28 #include <linux/module.h> 29 #include <linux/delay.h> 30 #include <linux/errno.h> 31 #include <linux/err.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/mm.h> 35 #include <linux/types.h> 36 #include <linux/mtd/mtd.h> 37 #include <linux/mtd/nand_ecc.h> 38 #include <linux/mtd/nand_bch.h> 39 #include <linux/interrupt.h> 40 #include <linux/bitops.h> 41 #include <linux/io.h> 42 #include <linux/mtd/partitions.h> 43 #include <linux/of.h> 44 #include <linux/gpio/consumer.h> 45 46 #include "internals.h" 47 48 /* Define default oob placement schemes for large and small page devices */ 49 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, 50 struct mtd_oob_region *oobregion) 51 { 52 struct nand_chip *chip = mtd_to_nand(mtd); 53 struct nand_ecc_ctrl *ecc = &chip->ecc; 54 55 if (section > 1) 56 return -ERANGE; 57 58 if (!section) { 59 oobregion->offset = 0; 60 if (mtd->oobsize == 16) 61 oobregion->length = 4; 62 else 63 oobregion->length = 3; 64 } else { 65 if (mtd->oobsize == 8) 66 return -ERANGE; 67 68 oobregion->offset = 6; 69 oobregion->length = ecc->total - 4; 70 } 71 72 return 0; 73 } 74 75 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section, 76 struct mtd_oob_region *oobregion) 77 { 78 if (section > 1) 79 return -ERANGE; 80 81 if (mtd->oobsize == 16) { 82 if (section) 83 return -ERANGE; 84 85 oobregion->length = 8; 86 oobregion->offset = 8; 87 } else { 88 oobregion->length = 2; 89 if (!section) 90 oobregion->offset = 3; 91 else 92 oobregion->offset = 6; 93 } 94 95 return 0; 96 } 97 98 const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = { 99 .ecc = nand_ooblayout_ecc_sp, 100 .free = nand_ooblayout_free_sp, 101 }; 102 EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops); 103 104 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, 105 struct mtd_oob_region *oobregion) 106 { 107 struct nand_chip *chip = mtd_to_nand(mtd); 108 struct nand_ecc_ctrl *ecc = &chip->ecc; 109 110 if (section || !ecc->total) 111 return -ERANGE; 112 113 oobregion->length = ecc->total; 114 oobregion->offset = mtd->oobsize - oobregion->length; 115 116 return 0; 117 } 118 119 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section, 120 struct mtd_oob_region *oobregion) 121 { 122 struct nand_chip *chip = mtd_to_nand(mtd); 123 struct nand_ecc_ctrl *ecc = &chip->ecc; 124 125 if (section) 126 return -ERANGE; 127 128 oobregion->length = mtd->oobsize - ecc->total - 2; 129 oobregion->offset = 2; 130 131 return 0; 132 } 133 134 const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = { 135 .ecc = nand_ooblayout_ecc_lp, 136 .free = nand_ooblayout_free_lp, 137 }; 138 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops); 139 140 /* 141 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC 142 * are placed at a fixed offset. 143 */ 144 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section, 145 struct mtd_oob_region *oobregion) 146 { 147 struct nand_chip *chip = mtd_to_nand(mtd); 148 struct nand_ecc_ctrl *ecc = &chip->ecc; 149 150 if (section) 151 return -ERANGE; 152 153 switch (mtd->oobsize) { 154 case 64: 155 oobregion->offset = 40; 156 break; 157 case 128: 158 oobregion->offset = 80; 159 break; 160 default: 161 return -EINVAL; 162 } 163 164 oobregion->length = ecc->total; 165 if (oobregion->offset + oobregion->length > mtd->oobsize) 166 return -ERANGE; 167 168 return 0; 169 } 170 171 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section, 172 struct mtd_oob_region *oobregion) 173 { 174 struct nand_chip *chip = mtd_to_nand(mtd); 175 struct nand_ecc_ctrl *ecc = &chip->ecc; 176 int ecc_offset = 0; 177 178 if (section < 0 || section > 1) 179 return -ERANGE; 180 181 switch (mtd->oobsize) { 182 case 64: 183 ecc_offset = 40; 184 break; 185 case 128: 186 ecc_offset = 80; 187 break; 188 default: 189 return -EINVAL; 190 } 191 192 if (section == 0) { 193 oobregion->offset = 2; 194 oobregion->length = ecc_offset - 2; 195 } else { 196 oobregion->offset = ecc_offset + ecc->total; 197 oobregion->length = mtd->oobsize - oobregion->offset; 198 } 199 200 return 0; 201 } 202 203 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { 204 .ecc = nand_ooblayout_ecc_lp_hamming, 205 .free = nand_ooblayout_free_lp_hamming, 206 }; 207 208 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, 209 struct mtd_pairing_info *info) 210 { 211 int lastpage = (mtd->erasesize / mtd->writesize) - 1; 212 int dist = 3; 213 214 if (page == lastpage) 215 dist = 2; 216 217 if (!page || (page & 1)) { 218 info->group = 0; 219 info->pair = (page + 1) / 2; 220 } else { 221 info->group = 1; 222 info->pair = (page + 1 - dist) / 2; 223 } 224 225 return 0; 226 } 227 228 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, 229 const struct mtd_pairing_info *info) 230 { 231 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; 232 int page = info->pair * 2; 233 int dist = 3; 234 235 if (!info->group && !info->pair) 236 return 0; 237 238 if (info->pair == lastpair && info->group) 239 dist = 2; 240 241 if (!info->group) 242 page--; 243 else if (info->pair) 244 page += dist - 1; 245 246 if (page >= mtd->erasesize / mtd->writesize) 247 return -EINVAL; 248 249 return page; 250 } 251 252 const struct mtd_pairing_scheme dist3_pairing_scheme = { 253 .ngroups = 2, 254 .get_info = nand_pairing_dist3_get_info, 255 .get_wunit = nand_pairing_dist3_get_wunit, 256 }; 257 258 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) 259 { 260 int ret = 0; 261 262 /* Start address must align on block boundary */ 263 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) { 264 pr_debug("%s: unaligned address\n", __func__); 265 ret = -EINVAL; 266 } 267 268 /* Length must align on block boundary */ 269 if (len & ((1ULL << chip->phys_erase_shift) - 1)) { 270 pr_debug("%s: length not block aligned\n", __func__); 271 ret = -EINVAL; 272 } 273 274 return ret; 275 } 276 277 /** 278 * nand_select_target() - Select a NAND target (A.K.A. die) 279 * @chip: NAND chip object 280 * @cs: the CS line to select. Note that this CS id is always from the chip 281 * PoV, not the controller one 282 * 283 * Select a NAND target so that further operations executed on @chip go to the 284 * selected NAND target. 285 */ 286 void nand_select_target(struct nand_chip *chip, unsigned int cs) 287 { 288 /* 289 * cs should always lie between 0 and nanddev_ntargets(), when that's 290 * not the case it's a bug and the caller should be fixed. 291 */ 292 if (WARN_ON(cs > nanddev_ntargets(&chip->base))) 293 return; 294 295 chip->cur_cs = cs; 296 297 if (chip->legacy.select_chip) 298 chip->legacy.select_chip(chip, cs); 299 } 300 EXPORT_SYMBOL_GPL(nand_select_target); 301 302 /** 303 * nand_deselect_target() - Deselect the currently selected target 304 * @chip: NAND chip object 305 * 306 * Deselect the currently selected NAND target. The result of operations 307 * executed on @chip after the target has been deselected is undefined. 308 */ 309 void nand_deselect_target(struct nand_chip *chip) 310 { 311 if (chip->legacy.select_chip) 312 chip->legacy.select_chip(chip, -1); 313 314 chip->cur_cs = -1; 315 } 316 EXPORT_SYMBOL_GPL(nand_deselect_target); 317 318 /** 319 * nand_release_device - [GENERIC] release chip 320 * @chip: NAND chip object 321 * 322 * Release chip lock and wake up anyone waiting on the device. 323 */ 324 static void nand_release_device(struct nand_chip *chip) 325 { 326 /* Release the controller and the chip */ 327 mutex_unlock(&chip->controller->lock); 328 mutex_unlock(&chip->lock); 329 } 330 331 /** 332 * nand_bbm_get_next_page - Get the next page for bad block markers 333 * @chip: NAND chip object 334 * @page: First page to start checking for bad block marker usage 335 * 336 * Returns an integer that corresponds to the page offset within a block, for 337 * a page that is used to store bad block markers. If no more pages are 338 * available, -EINVAL is returned. 339 */ 340 int nand_bbm_get_next_page(struct nand_chip *chip, int page) 341 { 342 struct mtd_info *mtd = nand_to_mtd(chip); 343 int last_page = ((mtd->erasesize - mtd->writesize) >> 344 chip->page_shift) & chip->pagemask; 345 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE 346 | NAND_BBM_LASTPAGE; 347 348 if (page == 0 && !(chip->options & bbm_flags)) 349 return 0; 350 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE) 351 return 0; 352 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE) 353 return 1; 354 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE) 355 return last_page; 356 357 return -EINVAL; 358 } 359 360 /** 361 * nand_block_bad - [DEFAULT] Read bad block marker from the chip 362 * @chip: NAND chip object 363 * @ofs: offset from device start 364 * 365 * Check, if the block is bad. 366 */ 367 static int nand_block_bad(struct nand_chip *chip, loff_t ofs) 368 { 369 int first_page, page_offset; 370 int res; 371 u8 bad; 372 373 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask; 374 page_offset = nand_bbm_get_next_page(chip, 0); 375 376 while (page_offset >= 0) { 377 res = chip->ecc.read_oob(chip, first_page + page_offset); 378 if (res < 0) 379 return res; 380 381 bad = chip->oob_poi[chip->badblockpos]; 382 383 if (likely(chip->badblockbits == 8)) 384 res = bad != 0xFF; 385 else 386 res = hweight8(bad) < chip->badblockbits; 387 if (res) 388 return res; 389 390 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 391 } 392 393 return 0; 394 } 395 396 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) 397 { 398 if (chip->legacy.block_bad) 399 return chip->legacy.block_bad(chip, ofs); 400 401 return nand_block_bad(chip, ofs); 402 } 403 404 /** 405 * nand_get_device - [GENERIC] Get chip for selected access 406 * @chip: NAND chip structure 407 * 408 * Lock the device and its controller for exclusive access 409 * 410 * Return: -EBUSY if the chip has been suspended, 0 otherwise 411 */ 412 static int nand_get_device(struct nand_chip *chip) 413 { 414 mutex_lock(&chip->lock); 415 if (chip->suspended) { 416 mutex_unlock(&chip->lock); 417 return -EBUSY; 418 } 419 mutex_lock(&chip->controller->lock); 420 421 return 0; 422 } 423 424 /** 425 * nand_check_wp - [GENERIC] check if the chip is write protected 426 * @chip: NAND chip object 427 * 428 * Check, if the device is write protected. The function expects, that the 429 * device is already selected. 430 */ 431 static int nand_check_wp(struct nand_chip *chip) 432 { 433 u8 status; 434 int ret; 435 436 /* Broken xD cards report WP despite being writable */ 437 if (chip->options & NAND_BROKEN_XD) 438 return 0; 439 440 /* Check the WP bit */ 441 ret = nand_status_op(chip, &status); 442 if (ret) 443 return ret; 444 445 return status & NAND_STATUS_WP ? 0 : 1; 446 } 447 448 /** 449 * nand_fill_oob - [INTERN] Transfer client buffer to oob 450 * @chip: NAND chip object 451 * @oob: oob data buffer 452 * @len: oob data write length 453 * @ops: oob ops structure 454 */ 455 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 456 struct mtd_oob_ops *ops) 457 { 458 struct mtd_info *mtd = nand_to_mtd(chip); 459 int ret; 460 461 /* 462 * Initialise to all 0xFF, to avoid the possibility of left over OOB 463 * data from a previous OOB read. 464 */ 465 memset(chip->oob_poi, 0xff, mtd->oobsize); 466 467 switch (ops->mode) { 468 469 case MTD_OPS_PLACE_OOB: 470 case MTD_OPS_RAW: 471 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 472 return oob + len; 473 474 case MTD_OPS_AUTO_OOB: 475 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi, 476 ops->ooboffs, len); 477 BUG_ON(ret); 478 return oob + len; 479 480 default: 481 BUG(); 482 } 483 return NULL; 484 } 485 486 /** 487 * nand_do_write_oob - [MTD Interface] NAND write out-of-band 488 * @chip: NAND chip object 489 * @to: offset to write to 490 * @ops: oob operation description structure 491 * 492 * NAND write out-of-band. 493 */ 494 static int nand_do_write_oob(struct nand_chip *chip, loff_t to, 495 struct mtd_oob_ops *ops) 496 { 497 struct mtd_info *mtd = nand_to_mtd(chip); 498 int chipnr, page, status, len, ret; 499 500 pr_debug("%s: to = 0x%08x, len = %i\n", 501 __func__, (unsigned int)to, (int)ops->ooblen); 502 503 len = mtd_oobavail(mtd, ops); 504 505 /* Do not allow write past end of page */ 506 if ((ops->ooboffs + ops->ooblen) > len) { 507 pr_debug("%s: attempt to write past end of page\n", 508 __func__); 509 return -EINVAL; 510 } 511 512 chipnr = (int)(to >> chip->chip_shift); 513 514 /* 515 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one 516 * of my DiskOnChip 2000 test units) will clear the whole data page too 517 * if we don't do this. I have no clue why, but I seem to have 'fixed' 518 * it in the doc2000 driver in August 1999. dwmw2. 519 */ 520 ret = nand_reset(chip, chipnr); 521 if (ret) 522 return ret; 523 524 nand_select_target(chip, chipnr); 525 526 /* Shift to get page */ 527 page = (int)(to >> chip->page_shift); 528 529 /* Check, if it is write protected */ 530 if (nand_check_wp(chip)) { 531 nand_deselect_target(chip); 532 return -EROFS; 533 } 534 535 /* Invalidate the page cache, if we write to the cached page */ 536 if (page == chip->pagecache.page) 537 chip->pagecache.page = -1; 538 539 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 540 541 if (ops->mode == MTD_OPS_RAW) 542 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask); 543 else 544 status = chip->ecc.write_oob(chip, page & chip->pagemask); 545 546 nand_deselect_target(chip); 547 548 if (status) 549 return status; 550 551 ops->oobretlen = ops->ooblen; 552 553 return 0; 554 } 555 556 /** 557 * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker 558 * @chip: NAND chip object 559 * @ofs: offset from device start 560 * 561 * This is the default implementation, which can be overridden by a hardware 562 * specific driver. It provides the details for writing a bad block marker to a 563 * block. 564 */ 565 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs) 566 { 567 struct mtd_info *mtd = nand_to_mtd(chip); 568 struct mtd_oob_ops ops; 569 uint8_t buf[2] = { 0, 0 }; 570 int ret = 0, res, page_offset; 571 572 memset(&ops, 0, sizeof(ops)); 573 ops.oobbuf = buf; 574 ops.ooboffs = chip->badblockpos; 575 if (chip->options & NAND_BUSWIDTH_16) { 576 ops.ooboffs &= ~0x01; 577 ops.len = ops.ooblen = 2; 578 } else { 579 ops.len = ops.ooblen = 1; 580 } 581 ops.mode = MTD_OPS_PLACE_OOB; 582 583 page_offset = nand_bbm_get_next_page(chip, 0); 584 585 while (page_offset >= 0) { 586 res = nand_do_write_oob(chip, 587 ofs + (page_offset * mtd->writesize), 588 &ops); 589 590 if (!ret) 591 ret = res; 592 593 page_offset = nand_bbm_get_next_page(chip, page_offset + 1); 594 } 595 596 return ret; 597 } 598 599 /** 600 * nand_markbad_bbm - mark a block by updating the BBM 601 * @chip: NAND chip object 602 * @ofs: offset of the block to mark bad 603 */ 604 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs) 605 { 606 if (chip->legacy.block_markbad) 607 return chip->legacy.block_markbad(chip, ofs); 608 609 return nand_default_block_markbad(chip, ofs); 610 } 611 612 /** 613 * nand_block_markbad_lowlevel - mark a block bad 614 * @chip: NAND chip object 615 * @ofs: offset from device start 616 * 617 * This function performs the generic NAND bad block marking steps (i.e., bad 618 * block table(s) and/or marker(s)). We only allow the hardware driver to 619 * specify how to write bad block markers to OOB (chip->legacy.block_markbad). 620 * 621 * We try operations in the following order: 622 * 623 * (1) erase the affected block, to allow OOB marker to be written cleanly 624 * (2) write bad block marker to OOB area of affected block (unless flag 625 * NAND_BBT_NO_OOB_BBM is present) 626 * (3) update the BBT 627 * 628 * Note that we retain the first error encountered in (2) or (3), finish the 629 * procedures, and dump the error in the end. 630 */ 631 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) 632 { 633 struct mtd_info *mtd = nand_to_mtd(chip); 634 int res, ret = 0; 635 636 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) { 637 struct erase_info einfo; 638 639 /* Attempt erase before marking OOB */ 640 memset(&einfo, 0, sizeof(einfo)); 641 einfo.addr = ofs; 642 einfo.len = 1ULL << chip->phys_erase_shift; 643 nand_erase_nand(chip, &einfo, 0); 644 645 /* Write bad block marker to OOB */ 646 ret = nand_get_device(chip); 647 if (ret) 648 return ret; 649 650 ret = nand_markbad_bbm(chip, ofs); 651 nand_release_device(chip); 652 } 653 654 /* Mark block bad in BBT */ 655 if (chip->bbt) { 656 res = nand_markbad_bbt(chip, ofs); 657 if (!ret) 658 ret = res; 659 } 660 661 if (!ret) 662 mtd->ecc_stats.badblocks++; 663 664 return ret; 665 } 666 667 /** 668 * nand_block_isreserved - [GENERIC] Check if a block is marked reserved. 669 * @mtd: MTD device structure 670 * @ofs: offset from device start 671 * 672 * Check if the block is marked as reserved. 673 */ 674 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) 675 { 676 struct nand_chip *chip = mtd_to_nand(mtd); 677 678 if (!chip->bbt) 679 return 0; 680 /* Return info from the table */ 681 return nand_isreserved_bbt(chip, ofs); 682 } 683 684 /** 685 * nand_block_checkbad - [GENERIC] Check if a block is marked bad 686 * @chip: NAND chip object 687 * @ofs: offset from device start 688 * @allowbbt: 1, if its allowed to access the bbt area 689 * 690 * Check, if the block is bad. Either by reading the bad block table or 691 * calling of the scan function. 692 */ 693 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt) 694 { 695 /* Return info from the table */ 696 if (chip->bbt) 697 return nand_isbad_bbt(chip, ofs, allowbbt); 698 699 return nand_isbad_bbm(chip, ofs); 700 } 701 702 /** 703 * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1 704 * @chip: NAND chip structure 705 * @timeout_ms: Timeout in ms 706 * 707 * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1. 708 * If that does not happen whitin the specified timeout, -ETIMEDOUT is 709 * returned. 710 * 711 * This helper is intended to be used when the controller does not have access 712 * to the NAND R/B pin. 713 * 714 * Be aware that calling this helper from an ->exec_op() implementation means 715 * ->exec_op() must be re-entrant. 716 * 717 * Return 0 if the NAND chip is ready, a negative error otherwise. 718 */ 719 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) 720 { 721 const struct nand_sdr_timings *timings; 722 u8 status = 0; 723 int ret; 724 725 if (!nand_has_exec_op(chip)) 726 return -ENOTSUPP; 727 728 /* Wait tWB before polling the STATUS reg. */ 729 timings = nand_get_sdr_timings(&chip->data_interface); 730 ndelay(PSEC_TO_NSEC(timings->tWB_max)); 731 732 ret = nand_status_op(chip, NULL); 733 if (ret) 734 return ret; 735 736 /* 737 * +1 below is necessary because if we are now in the last fraction 738 * of jiffy and msecs_to_jiffies is 1 then we will wait only that 739 * small jiffy fraction - possibly leading to false timeout 740 */ 741 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; 742 do { 743 ret = nand_read_data_op(chip, &status, sizeof(status), true, 744 false); 745 if (ret) 746 break; 747 748 if (status & NAND_STATUS_READY) 749 break; 750 751 /* 752 * Typical lowest execution time for a tR on most NANDs is 10us, 753 * use this as polling delay before doing something smarter (ie. 754 * deriving a delay from the timeout value, timeout_ms/ratio). 755 */ 756 udelay(10); 757 } while (time_before(jiffies, timeout_ms)); 758 759 /* 760 * We have to exit READ_STATUS mode in order to read real data on the 761 * bus in case the WAITRDY instruction is preceding a DATA_IN 762 * instruction. 763 */ 764 nand_exit_status_op(chip); 765 766 if (ret) 767 return ret; 768 769 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT; 770 }; 771 EXPORT_SYMBOL_GPL(nand_soft_waitrdy); 772 773 /** 774 * nand_gpio_waitrdy - Poll R/B GPIO pin until ready 775 * @chip: NAND chip structure 776 * @gpiod: GPIO descriptor of R/B pin 777 * @timeout_ms: Timeout in ms 778 * 779 * Poll the R/B GPIO pin until it becomes ready. If that does not happen 780 * whitin the specified timeout, -ETIMEDOUT is returned. 781 * 782 * This helper is intended to be used when the controller has access to the 783 * NAND R/B pin over GPIO. 784 * 785 * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise. 786 */ 787 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, 788 unsigned long timeout_ms) 789 { 790 /* Wait until R/B pin indicates chip is ready or timeout occurs */ 791 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms); 792 do { 793 if (gpiod_get_value_cansleep(gpiod)) 794 return 0; 795 796 cond_resched(); 797 } while (time_before(jiffies, timeout_ms)); 798 799 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT; 800 }; 801 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy); 802 803 /** 804 * panic_nand_wait - [GENERIC] wait until the command is done 805 * @chip: NAND chip structure 806 * @timeo: timeout 807 * 808 * Wait for command done. This is a helper function for nand_wait used when 809 * we are in interrupt context. May happen when in panic and trying to write 810 * an oops through mtdoops. 811 */ 812 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) 813 { 814 int i; 815 for (i = 0; i < timeo; i++) { 816 if (chip->legacy.dev_ready) { 817 if (chip->legacy.dev_ready(chip)) 818 break; 819 } else { 820 int ret; 821 u8 status; 822 823 ret = nand_read_data_op(chip, &status, sizeof(status), 824 true, false); 825 if (ret) 826 return; 827 828 if (status & NAND_STATUS_READY) 829 break; 830 } 831 mdelay(1); 832 } 833 } 834 835 static bool nand_supports_get_features(struct nand_chip *chip, int addr) 836 { 837 return (chip->parameters.supports_set_get_features && 838 test_bit(addr, chip->parameters.get_feature_list)); 839 } 840 841 static bool nand_supports_set_features(struct nand_chip *chip, int addr) 842 { 843 return (chip->parameters.supports_set_get_features && 844 test_bit(addr, chip->parameters.set_feature_list)); 845 } 846 847 /** 848 * nand_reset_data_interface - Reset data interface and timings 849 * @chip: The NAND chip 850 * @chipnr: Internal die id 851 * 852 * Reset the Data interface and timings to ONFI mode 0. 853 * 854 * Returns 0 for success or negative error code otherwise. 855 */ 856 static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) 857 { 858 int ret; 859 860 if (!nand_has_setup_data_iface(chip)) 861 return 0; 862 863 /* 864 * The ONFI specification says: 865 * " 866 * To transition from NV-DDR or NV-DDR2 to the SDR data 867 * interface, the host shall use the Reset (FFh) command 868 * using SDR timing mode 0. A device in any timing mode is 869 * required to recognize Reset (FFh) command issued in SDR 870 * timing mode 0. 871 * " 872 * 873 * Configure the data interface in SDR mode and set the 874 * timings to timing mode 0. 875 */ 876 877 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); 878 ret = chip->controller->ops->setup_data_interface(chip, chipnr, 879 &chip->data_interface); 880 if (ret) 881 pr_err("Failed to configure data interface to SDR timing mode 0\n"); 882 883 return ret; 884 } 885 886 /** 887 * nand_setup_data_interface - Setup the best data interface and timings 888 * @chip: The NAND chip 889 * @chipnr: Internal die id 890 * 891 * Find and configure the best data interface and NAND timings supported by 892 * the chip and the driver. 893 * First tries to retrieve supported timing modes from ONFI information, 894 * and if the NAND chip does not support ONFI, relies on the 895 * ->onfi_timing_mode_default specified in the nand_ids table. 896 * 897 * Returns 0 for success or negative error code otherwise. 898 */ 899 static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) 900 { 901 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { 902 chip->onfi_timing_mode_default, 903 }; 904 int ret; 905 906 if (!nand_has_setup_data_iface(chip)) 907 return 0; 908 909 /* Change the mode on the chip side (if supported by the NAND chip) */ 910 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { 911 nand_select_target(chip, chipnr); 912 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 913 tmode_param); 914 nand_deselect_target(chip); 915 if (ret) 916 return ret; 917 } 918 919 /* Change the mode on the controller side */ 920 ret = chip->controller->ops->setup_data_interface(chip, chipnr, 921 &chip->data_interface); 922 if (ret) 923 return ret; 924 925 /* Check the mode has been accepted by the chip, if supported */ 926 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) 927 return 0; 928 929 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); 930 nand_select_target(chip, chipnr); 931 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE, 932 tmode_param); 933 nand_deselect_target(chip); 934 if (ret) 935 goto err_reset_chip; 936 937 if (tmode_param[0] != chip->onfi_timing_mode_default) { 938 pr_warn("timing mode %d not acknowledged by the NAND chip\n", 939 chip->onfi_timing_mode_default); 940 goto err_reset_chip; 941 } 942 943 return 0; 944 945 err_reset_chip: 946 /* 947 * Fallback to mode 0 if the chip explicitly did not ack the chosen 948 * timing mode. 949 */ 950 nand_reset_data_interface(chip, chipnr); 951 nand_select_target(chip, chipnr); 952 nand_reset_op(chip); 953 nand_deselect_target(chip); 954 955 return ret; 956 } 957 958 /** 959 * nand_init_data_interface - find the best data interface and timings 960 * @chip: The NAND chip 961 * 962 * Find the best data interface and NAND timings supported by the chip 963 * and the driver. 964 * First tries to retrieve supported timing modes from ONFI information, 965 * and if the NAND chip does not support ONFI, relies on the 966 * ->onfi_timing_mode_default specified in the nand_ids table. After this 967 * function nand_chip->data_interface is initialized with the best timing mode 968 * available. 969 * 970 * Returns 0 for success or negative error code otherwise. 971 */ 972 static int nand_init_data_interface(struct nand_chip *chip) 973 { 974 int modes, mode, ret; 975 976 if (!nand_has_setup_data_iface(chip)) 977 return 0; 978 979 /* 980 * First try to identify the best timings from ONFI parameters and 981 * if the NAND does not support ONFI, fallback to the default ONFI 982 * timing mode. 983 */ 984 if (chip->parameters.onfi) { 985 modes = chip->parameters.onfi->async_timing_mode; 986 } else { 987 if (!chip->onfi_timing_mode_default) 988 return 0; 989 990 modes = GENMASK(chip->onfi_timing_mode_default, 0); 991 } 992 993 for (mode = fls(modes) - 1; mode >= 0; mode--) { 994 ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode); 995 if (ret) 996 continue; 997 998 /* 999 * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the 1000 * controller supports the requested timings. 1001 */ 1002 ret = chip->controller->ops->setup_data_interface(chip, 1003 NAND_DATA_IFACE_CHECK_ONLY, 1004 &chip->data_interface); 1005 if (!ret) { 1006 chip->onfi_timing_mode_default = mode; 1007 break; 1008 } 1009 } 1010 1011 return 0; 1012 } 1013 1014 /** 1015 * nand_fill_column_cycles - fill the column cycles of an address 1016 * @chip: The NAND chip 1017 * @addrs: Array of address cycles to fill 1018 * @offset_in_page: The offset in the page 1019 * 1020 * Fills the first or the first two bytes of the @addrs field depending 1021 * on the NAND bus width and the page size. 1022 * 1023 * Returns the number of cycles needed to encode the column, or a negative 1024 * error code in case one of the arguments is invalid. 1025 */ 1026 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs, 1027 unsigned int offset_in_page) 1028 { 1029 struct mtd_info *mtd = nand_to_mtd(chip); 1030 1031 /* Make sure the offset is less than the actual page size. */ 1032 if (offset_in_page > mtd->writesize + mtd->oobsize) 1033 return -EINVAL; 1034 1035 /* 1036 * On small page NANDs, there's a dedicated command to access the OOB 1037 * area, and the column address is relative to the start of the OOB 1038 * area, not the start of the page. Asjust the address accordingly. 1039 */ 1040 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize) 1041 offset_in_page -= mtd->writesize; 1042 1043 /* 1044 * The offset in page is expressed in bytes, if the NAND bus is 16-bit 1045 * wide, then it must be divided by 2. 1046 */ 1047 if (chip->options & NAND_BUSWIDTH_16) { 1048 if (WARN_ON(offset_in_page % 2)) 1049 return -EINVAL; 1050 1051 offset_in_page /= 2; 1052 } 1053 1054 addrs[0] = offset_in_page; 1055 1056 /* 1057 * Small page NANDs use 1 cycle for the columns, while large page NANDs 1058 * need 2 1059 */ 1060 if (mtd->writesize <= 512) 1061 return 1; 1062 1063 addrs[1] = offset_in_page >> 8; 1064 1065 return 2; 1066 } 1067 1068 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1069 unsigned int offset_in_page, void *buf, 1070 unsigned int len) 1071 { 1072 struct mtd_info *mtd = nand_to_mtd(chip); 1073 const struct nand_sdr_timings *sdr = 1074 nand_get_sdr_timings(&chip->data_interface); 1075 u8 addrs[4]; 1076 struct nand_op_instr instrs[] = { 1077 NAND_OP_CMD(NAND_CMD_READ0, 0), 1078 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)), 1079 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), 1080 PSEC_TO_NSEC(sdr->tRR_min)), 1081 NAND_OP_DATA_IN(len, buf, 0), 1082 }; 1083 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1084 int ret; 1085 1086 /* Drop the DATA_IN instruction if len is set to 0. */ 1087 if (!len) 1088 op.ninstrs--; 1089 1090 if (offset_in_page >= mtd->writesize) 1091 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1092 else if (offset_in_page >= 256 && 1093 !(chip->options & NAND_BUSWIDTH_16)) 1094 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1095 1096 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1097 if (ret < 0) 1098 return ret; 1099 1100 addrs[1] = page; 1101 addrs[2] = page >> 8; 1102 1103 if (chip->options & NAND_ROW_ADDR_3) { 1104 addrs[3] = page >> 16; 1105 instrs[1].ctx.addr.naddrs++; 1106 } 1107 1108 return nand_exec_op(chip, &op); 1109 } 1110 1111 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, 1112 unsigned int offset_in_page, void *buf, 1113 unsigned int len) 1114 { 1115 const struct nand_sdr_timings *sdr = 1116 nand_get_sdr_timings(&chip->data_interface); 1117 u8 addrs[5]; 1118 struct nand_op_instr instrs[] = { 1119 NAND_OP_CMD(NAND_CMD_READ0, 0), 1120 NAND_OP_ADDR(4, addrs, 0), 1121 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)), 1122 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), 1123 PSEC_TO_NSEC(sdr->tRR_min)), 1124 NAND_OP_DATA_IN(len, buf, 0), 1125 }; 1126 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1127 int ret; 1128 1129 /* Drop the DATA_IN instruction if len is set to 0. */ 1130 if (!len) 1131 op.ninstrs--; 1132 1133 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1134 if (ret < 0) 1135 return ret; 1136 1137 addrs[2] = page; 1138 addrs[3] = page >> 8; 1139 1140 if (chip->options & NAND_ROW_ADDR_3) { 1141 addrs[4] = page >> 16; 1142 instrs[1].ctx.addr.naddrs++; 1143 } 1144 1145 return nand_exec_op(chip, &op); 1146 } 1147 1148 /** 1149 * nand_read_page_op - Do a READ PAGE operation 1150 * @chip: The NAND chip 1151 * @page: page to read 1152 * @offset_in_page: offset within the page 1153 * @buf: buffer used to store the data 1154 * @len: length of the buffer 1155 * 1156 * This function issues a READ PAGE operation. 1157 * This function does not select/unselect the CS line. 1158 * 1159 * Returns 0 on success, a negative error code otherwise. 1160 */ 1161 int nand_read_page_op(struct nand_chip *chip, unsigned int page, 1162 unsigned int offset_in_page, void *buf, unsigned int len) 1163 { 1164 struct mtd_info *mtd = nand_to_mtd(chip); 1165 1166 if (len && !buf) 1167 return -EINVAL; 1168 1169 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1170 return -EINVAL; 1171 1172 if (nand_has_exec_op(chip)) { 1173 if (mtd->writesize > 512) 1174 return nand_lp_exec_read_page_op(chip, page, 1175 offset_in_page, buf, 1176 len); 1177 1178 return nand_sp_exec_read_page_op(chip, page, offset_in_page, 1179 buf, len); 1180 } 1181 1182 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page); 1183 if (len) 1184 chip->legacy.read_buf(chip, buf, len); 1185 1186 return 0; 1187 } 1188 EXPORT_SYMBOL_GPL(nand_read_page_op); 1189 1190 /** 1191 * nand_read_param_page_op - Do a READ PARAMETER PAGE operation 1192 * @chip: The NAND chip 1193 * @page: parameter page to read 1194 * @buf: buffer used to store the data 1195 * @len: length of the buffer 1196 * 1197 * This function issues a READ PARAMETER PAGE operation. 1198 * This function does not select/unselect the CS line. 1199 * 1200 * Returns 0 on success, a negative error code otherwise. 1201 */ 1202 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, 1203 unsigned int len) 1204 { 1205 unsigned int i; 1206 u8 *p = buf; 1207 1208 if (len && !buf) 1209 return -EINVAL; 1210 1211 if (nand_has_exec_op(chip)) { 1212 const struct nand_sdr_timings *sdr = 1213 nand_get_sdr_timings(&chip->data_interface); 1214 struct nand_op_instr instrs[] = { 1215 NAND_OP_CMD(NAND_CMD_PARAM, 0), 1216 NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)), 1217 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), 1218 PSEC_TO_NSEC(sdr->tRR_min)), 1219 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1220 }; 1221 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1222 1223 /* Drop the DATA_IN instruction if len is set to 0. */ 1224 if (!len) 1225 op.ninstrs--; 1226 1227 return nand_exec_op(chip, &op); 1228 } 1229 1230 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1); 1231 for (i = 0; i < len; i++) 1232 p[i] = chip->legacy.read_byte(chip); 1233 1234 return 0; 1235 } 1236 1237 /** 1238 * nand_change_read_column_op - Do a CHANGE READ COLUMN operation 1239 * @chip: The NAND chip 1240 * @offset_in_page: offset within the page 1241 * @buf: buffer used to store the data 1242 * @len: length of the buffer 1243 * @force_8bit: force 8-bit bus access 1244 * 1245 * This function issues a CHANGE READ COLUMN operation. 1246 * This function does not select/unselect the CS line. 1247 * 1248 * Returns 0 on success, a negative error code otherwise. 1249 */ 1250 int nand_change_read_column_op(struct nand_chip *chip, 1251 unsigned int offset_in_page, void *buf, 1252 unsigned int len, bool force_8bit) 1253 { 1254 struct mtd_info *mtd = nand_to_mtd(chip); 1255 1256 if (len && !buf) 1257 return -EINVAL; 1258 1259 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1260 return -EINVAL; 1261 1262 /* Small page NANDs do not support column change. */ 1263 if (mtd->writesize <= 512) 1264 return -ENOTSUPP; 1265 1266 if (nand_has_exec_op(chip)) { 1267 const struct nand_sdr_timings *sdr = 1268 nand_get_sdr_timings(&chip->data_interface); 1269 u8 addrs[2] = {}; 1270 struct nand_op_instr instrs[] = { 1271 NAND_OP_CMD(NAND_CMD_RNDOUT, 0), 1272 NAND_OP_ADDR(2, addrs, 0), 1273 NAND_OP_CMD(NAND_CMD_RNDOUTSTART, 1274 PSEC_TO_NSEC(sdr->tCCS_min)), 1275 NAND_OP_DATA_IN(len, buf, 0), 1276 }; 1277 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1278 int ret; 1279 1280 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1281 if (ret < 0) 1282 return ret; 1283 1284 /* Drop the DATA_IN instruction if len is set to 0. */ 1285 if (!len) 1286 op.ninstrs--; 1287 1288 instrs[3].ctx.data.force_8bit = force_8bit; 1289 1290 return nand_exec_op(chip, &op); 1291 } 1292 1293 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1); 1294 if (len) 1295 chip->legacy.read_buf(chip, buf, len); 1296 1297 return 0; 1298 } 1299 EXPORT_SYMBOL_GPL(nand_change_read_column_op); 1300 1301 /** 1302 * nand_read_oob_op - Do a READ OOB operation 1303 * @chip: The NAND chip 1304 * @page: page to read 1305 * @offset_in_oob: offset within the OOB area 1306 * @buf: buffer used to store the data 1307 * @len: length of the buffer 1308 * 1309 * This function issues a READ OOB operation. 1310 * This function does not select/unselect the CS line. 1311 * 1312 * Returns 0 on success, a negative error code otherwise. 1313 */ 1314 int nand_read_oob_op(struct nand_chip *chip, unsigned int page, 1315 unsigned int offset_in_oob, void *buf, unsigned int len) 1316 { 1317 struct mtd_info *mtd = nand_to_mtd(chip); 1318 1319 if (len && !buf) 1320 return -EINVAL; 1321 1322 if (offset_in_oob + len > mtd->oobsize) 1323 return -EINVAL; 1324 1325 if (nand_has_exec_op(chip)) 1326 return nand_read_page_op(chip, page, 1327 mtd->writesize + offset_in_oob, 1328 buf, len); 1329 1330 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page); 1331 if (len) 1332 chip->legacy.read_buf(chip, buf, len); 1333 1334 return 0; 1335 } 1336 EXPORT_SYMBOL_GPL(nand_read_oob_op); 1337 1338 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, 1339 unsigned int offset_in_page, const void *buf, 1340 unsigned int len, bool prog) 1341 { 1342 struct mtd_info *mtd = nand_to_mtd(chip); 1343 const struct nand_sdr_timings *sdr = 1344 nand_get_sdr_timings(&chip->data_interface); 1345 u8 addrs[5] = {}; 1346 struct nand_op_instr instrs[] = { 1347 /* 1348 * The first instruction will be dropped if we're dealing 1349 * with a large page NAND and adjusted if we're dealing 1350 * with a small page NAND and the page offset is > 255. 1351 */ 1352 NAND_OP_CMD(NAND_CMD_READ0, 0), 1353 NAND_OP_CMD(NAND_CMD_SEQIN, 0), 1354 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)), 1355 NAND_OP_DATA_OUT(len, buf, 0), 1356 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)), 1357 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0), 1358 }; 1359 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1360 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); 1361 int ret; 1362 u8 status; 1363 1364 if (naddrs < 0) 1365 return naddrs; 1366 1367 addrs[naddrs++] = page; 1368 addrs[naddrs++] = page >> 8; 1369 if (chip->options & NAND_ROW_ADDR_3) 1370 addrs[naddrs++] = page >> 16; 1371 1372 instrs[2].ctx.addr.naddrs = naddrs; 1373 1374 /* Drop the last two instructions if we're not programming the page. */ 1375 if (!prog) { 1376 op.ninstrs -= 2; 1377 /* Also drop the DATA_OUT instruction if empty. */ 1378 if (!len) 1379 op.ninstrs--; 1380 } 1381 1382 if (mtd->writesize <= 512) { 1383 /* 1384 * Small pages need some more tweaking: we have to adjust the 1385 * first instruction depending on the page offset we're trying 1386 * to access. 1387 */ 1388 if (offset_in_page >= mtd->writesize) 1389 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB; 1390 else if (offset_in_page >= 256 && 1391 !(chip->options & NAND_BUSWIDTH_16)) 1392 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1; 1393 } else { 1394 /* 1395 * Drop the first command if we're dealing with a large page 1396 * NAND. 1397 */ 1398 op.instrs++; 1399 op.ninstrs--; 1400 } 1401 1402 ret = nand_exec_op(chip, &op); 1403 if (!prog || ret) 1404 return ret; 1405 1406 ret = nand_status_op(chip, &status); 1407 if (ret) 1408 return ret; 1409 1410 return status; 1411 } 1412 1413 /** 1414 * nand_prog_page_begin_op - starts a PROG PAGE operation 1415 * @chip: The NAND chip 1416 * @page: page to write 1417 * @offset_in_page: offset within the page 1418 * @buf: buffer containing the data to write to the page 1419 * @len: length of the buffer 1420 * 1421 * This function issues the first half of a PROG PAGE operation. 1422 * This function does not select/unselect the CS line. 1423 * 1424 * Returns 0 on success, a negative error code otherwise. 1425 */ 1426 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, 1427 unsigned int offset_in_page, const void *buf, 1428 unsigned int len) 1429 { 1430 struct mtd_info *mtd = nand_to_mtd(chip); 1431 1432 if (len && !buf) 1433 return -EINVAL; 1434 1435 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1436 return -EINVAL; 1437 1438 if (nand_has_exec_op(chip)) 1439 return nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1440 len, false); 1441 1442 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page); 1443 1444 if (buf) 1445 chip->legacy.write_buf(chip, buf, len); 1446 1447 return 0; 1448 } 1449 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op); 1450 1451 /** 1452 * nand_prog_page_end_op - ends a PROG PAGE operation 1453 * @chip: The NAND chip 1454 * 1455 * This function issues the second half of a PROG PAGE operation. 1456 * This function does not select/unselect the CS line. 1457 * 1458 * Returns 0 on success, a negative error code otherwise. 1459 */ 1460 int nand_prog_page_end_op(struct nand_chip *chip) 1461 { 1462 int ret; 1463 u8 status; 1464 1465 if (nand_has_exec_op(chip)) { 1466 const struct nand_sdr_timings *sdr = 1467 nand_get_sdr_timings(&chip->data_interface); 1468 struct nand_op_instr instrs[] = { 1469 NAND_OP_CMD(NAND_CMD_PAGEPROG, 1470 PSEC_TO_NSEC(sdr->tWB_max)), 1471 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0), 1472 }; 1473 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1474 1475 ret = nand_exec_op(chip, &op); 1476 if (ret) 1477 return ret; 1478 1479 ret = nand_status_op(chip, &status); 1480 if (ret) 1481 return ret; 1482 } else { 1483 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1484 ret = chip->legacy.waitfunc(chip); 1485 if (ret < 0) 1486 return ret; 1487 1488 status = ret; 1489 } 1490 1491 if (status & NAND_STATUS_FAIL) 1492 return -EIO; 1493 1494 return 0; 1495 } 1496 EXPORT_SYMBOL_GPL(nand_prog_page_end_op); 1497 1498 /** 1499 * nand_prog_page_op - Do a full PROG PAGE operation 1500 * @chip: The NAND chip 1501 * @page: page to write 1502 * @offset_in_page: offset within the page 1503 * @buf: buffer containing the data to write to the page 1504 * @len: length of the buffer 1505 * 1506 * This function issues a full PROG PAGE operation. 1507 * This function does not select/unselect the CS line. 1508 * 1509 * Returns 0 on success, a negative error code otherwise. 1510 */ 1511 int nand_prog_page_op(struct nand_chip *chip, unsigned int page, 1512 unsigned int offset_in_page, const void *buf, 1513 unsigned int len) 1514 { 1515 struct mtd_info *mtd = nand_to_mtd(chip); 1516 int status; 1517 1518 if (!len || !buf) 1519 return -EINVAL; 1520 1521 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1522 return -EINVAL; 1523 1524 if (nand_has_exec_op(chip)) { 1525 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf, 1526 len, true); 1527 } else { 1528 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, 1529 page); 1530 chip->legacy.write_buf(chip, buf, len); 1531 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1); 1532 status = chip->legacy.waitfunc(chip); 1533 } 1534 1535 if (status & NAND_STATUS_FAIL) 1536 return -EIO; 1537 1538 return 0; 1539 } 1540 EXPORT_SYMBOL_GPL(nand_prog_page_op); 1541 1542 /** 1543 * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation 1544 * @chip: The NAND chip 1545 * @offset_in_page: offset within the page 1546 * @buf: buffer containing the data to send to the NAND 1547 * @len: length of the buffer 1548 * @force_8bit: force 8-bit bus access 1549 * 1550 * This function issues a CHANGE WRITE COLUMN operation. 1551 * This function does not select/unselect the CS line. 1552 * 1553 * Returns 0 on success, a negative error code otherwise. 1554 */ 1555 int nand_change_write_column_op(struct nand_chip *chip, 1556 unsigned int offset_in_page, 1557 const void *buf, unsigned int len, 1558 bool force_8bit) 1559 { 1560 struct mtd_info *mtd = nand_to_mtd(chip); 1561 1562 if (len && !buf) 1563 return -EINVAL; 1564 1565 if (offset_in_page + len > mtd->writesize + mtd->oobsize) 1566 return -EINVAL; 1567 1568 /* Small page NANDs do not support column change. */ 1569 if (mtd->writesize <= 512) 1570 return -ENOTSUPP; 1571 1572 if (nand_has_exec_op(chip)) { 1573 const struct nand_sdr_timings *sdr = 1574 nand_get_sdr_timings(&chip->data_interface); 1575 u8 addrs[2]; 1576 struct nand_op_instr instrs[] = { 1577 NAND_OP_CMD(NAND_CMD_RNDIN, 0), 1578 NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)), 1579 NAND_OP_DATA_OUT(len, buf, 0), 1580 }; 1581 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1582 int ret; 1583 1584 ret = nand_fill_column_cycles(chip, addrs, offset_in_page); 1585 if (ret < 0) 1586 return ret; 1587 1588 instrs[2].ctx.data.force_8bit = force_8bit; 1589 1590 /* Drop the DATA_OUT instruction if len is set to 0. */ 1591 if (!len) 1592 op.ninstrs--; 1593 1594 return nand_exec_op(chip, &op); 1595 } 1596 1597 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1); 1598 if (len) 1599 chip->legacy.write_buf(chip, buf, len); 1600 1601 return 0; 1602 } 1603 EXPORT_SYMBOL_GPL(nand_change_write_column_op); 1604 1605 /** 1606 * nand_readid_op - Do a READID operation 1607 * @chip: The NAND chip 1608 * @addr: address cycle to pass after the READID command 1609 * @buf: buffer used to store the ID 1610 * @len: length of the buffer 1611 * 1612 * This function sends a READID command and reads back the ID returned by the 1613 * NAND. 1614 * This function does not select/unselect the CS line. 1615 * 1616 * Returns 0 on success, a negative error code otherwise. 1617 */ 1618 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, 1619 unsigned int len) 1620 { 1621 unsigned int i; 1622 u8 *id = buf; 1623 1624 if (len && !buf) 1625 return -EINVAL; 1626 1627 if (nand_has_exec_op(chip)) { 1628 const struct nand_sdr_timings *sdr = 1629 nand_get_sdr_timings(&chip->data_interface); 1630 struct nand_op_instr instrs[] = { 1631 NAND_OP_CMD(NAND_CMD_READID, 0), 1632 NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)), 1633 NAND_OP_8BIT_DATA_IN(len, buf, 0), 1634 }; 1635 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1636 1637 /* Drop the DATA_IN instruction if len is set to 0. */ 1638 if (!len) 1639 op.ninstrs--; 1640 1641 return nand_exec_op(chip, &op); 1642 } 1643 1644 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); 1645 1646 for (i = 0; i < len; i++) 1647 id[i] = chip->legacy.read_byte(chip); 1648 1649 return 0; 1650 } 1651 EXPORT_SYMBOL_GPL(nand_readid_op); 1652 1653 /** 1654 * nand_status_op - Do a STATUS operation 1655 * @chip: The NAND chip 1656 * @status: out variable to store the NAND status 1657 * 1658 * This function sends a STATUS command and reads back the status returned by 1659 * the NAND. 1660 * This function does not select/unselect the CS line. 1661 * 1662 * Returns 0 on success, a negative error code otherwise. 1663 */ 1664 int nand_status_op(struct nand_chip *chip, u8 *status) 1665 { 1666 if (nand_has_exec_op(chip)) { 1667 const struct nand_sdr_timings *sdr = 1668 nand_get_sdr_timings(&chip->data_interface); 1669 struct nand_op_instr instrs[] = { 1670 NAND_OP_CMD(NAND_CMD_STATUS, 1671 PSEC_TO_NSEC(sdr->tADL_min)), 1672 NAND_OP_8BIT_DATA_IN(1, status, 0), 1673 }; 1674 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1675 1676 if (!status) 1677 op.ninstrs--; 1678 1679 return nand_exec_op(chip, &op); 1680 } 1681 1682 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); 1683 if (status) 1684 *status = chip->legacy.read_byte(chip); 1685 1686 return 0; 1687 } 1688 EXPORT_SYMBOL_GPL(nand_status_op); 1689 1690 /** 1691 * nand_exit_status_op - Exit a STATUS operation 1692 * @chip: The NAND chip 1693 * 1694 * This function sends a READ0 command to cancel the effect of the STATUS 1695 * command to avoid reading only the status until a new read command is sent. 1696 * 1697 * This function does not select/unselect the CS line. 1698 * 1699 * Returns 0 on success, a negative error code otherwise. 1700 */ 1701 int nand_exit_status_op(struct nand_chip *chip) 1702 { 1703 if (nand_has_exec_op(chip)) { 1704 struct nand_op_instr instrs[] = { 1705 NAND_OP_CMD(NAND_CMD_READ0, 0), 1706 }; 1707 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1708 1709 return nand_exec_op(chip, &op); 1710 } 1711 1712 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1); 1713 1714 return 0; 1715 } 1716 1717 /** 1718 * nand_erase_op - Do an erase operation 1719 * @chip: The NAND chip 1720 * @eraseblock: block to erase 1721 * 1722 * This function sends an ERASE command and waits for the NAND to be ready 1723 * before returning. 1724 * This function does not select/unselect the CS line. 1725 * 1726 * Returns 0 on success, a negative error code otherwise. 1727 */ 1728 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) 1729 { 1730 unsigned int page = eraseblock << 1731 (chip->phys_erase_shift - chip->page_shift); 1732 int ret; 1733 u8 status; 1734 1735 if (nand_has_exec_op(chip)) { 1736 const struct nand_sdr_timings *sdr = 1737 nand_get_sdr_timings(&chip->data_interface); 1738 u8 addrs[3] = { page, page >> 8, page >> 16 }; 1739 struct nand_op_instr instrs[] = { 1740 NAND_OP_CMD(NAND_CMD_ERASE1, 0), 1741 NAND_OP_ADDR(2, addrs, 0), 1742 NAND_OP_CMD(NAND_CMD_ERASE2, 1743 PSEC_TO_MSEC(sdr->tWB_max)), 1744 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0), 1745 }; 1746 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1747 1748 if (chip->options & NAND_ROW_ADDR_3) 1749 instrs[1].ctx.addr.naddrs++; 1750 1751 ret = nand_exec_op(chip, &op); 1752 if (ret) 1753 return ret; 1754 1755 ret = nand_status_op(chip, &status); 1756 if (ret) 1757 return ret; 1758 } else { 1759 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page); 1760 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1); 1761 1762 ret = chip->legacy.waitfunc(chip); 1763 if (ret < 0) 1764 return ret; 1765 1766 status = ret; 1767 } 1768 1769 if (status & NAND_STATUS_FAIL) 1770 return -EIO; 1771 1772 return 0; 1773 } 1774 EXPORT_SYMBOL_GPL(nand_erase_op); 1775 1776 /** 1777 * nand_set_features_op - Do a SET FEATURES operation 1778 * @chip: The NAND chip 1779 * @feature: feature id 1780 * @data: 4 bytes of data 1781 * 1782 * This function sends a SET FEATURES command and waits for the NAND to be 1783 * ready before returning. 1784 * This function does not select/unselect the CS line. 1785 * 1786 * Returns 0 on success, a negative error code otherwise. 1787 */ 1788 static int nand_set_features_op(struct nand_chip *chip, u8 feature, 1789 const void *data) 1790 { 1791 const u8 *params = data; 1792 int i, ret; 1793 1794 if (nand_has_exec_op(chip)) { 1795 const struct nand_sdr_timings *sdr = 1796 nand_get_sdr_timings(&chip->data_interface); 1797 struct nand_op_instr instrs[] = { 1798 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), 1799 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)), 1800 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, 1801 PSEC_TO_NSEC(sdr->tWB_max)), 1802 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0), 1803 }; 1804 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1805 1806 return nand_exec_op(chip, &op); 1807 } 1808 1809 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1); 1810 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1811 chip->legacy.write_byte(chip, params[i]); 1812 1813 ret = chip->legacy.waitfunc(chip); 1814 if (ret < 0) 1815 return ret; 1816 1817 if (ret & NAND_STATUS_FAIL) 1818 return -EIO; 1819 1820 return 0; 1821 } 1822 1823 /** 1824 * nand_get_features_op - Do a GET FEATURES operation 1825 * @chip: The NAND chip 1826 * @feature: feature id 1827 * @data: 4 bytes of data 1828 * 1829 * This function sends a GET FEATURES command and waits for the NAND to be 1830 * ready before returning. 1831 * This function does not select/unselect the CS line. 1832 * 1833 * Returns 0 on success, a negative error code otherwise. 1834 */ 1835 static int nand_get_features_op(struct nand_chip *chip, u8 feature, 1836 void *data) 1837 { 1838 u8 *params = data; 1839 int i; 1840 1841 if (nand_has_exec_op(chip)) { 1842 const struct nand_sdr_timings *sdr = 1843 nand_get_sdr_timings(&chip->data_interface); 1844 struct nand_op_instr instrs[] = { 1845 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), 1846 NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)), 1847 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 1848 PSEC_TO_NSEC(sdr->tRR_min)), 1849 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, 1850 data, 0), 1851 }; 1852 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1853 1854 return nand_exec_op(chip, &op); 1855 } 1856 1857 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); 1858 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i) 1859 params[i] = chip->legacy.read_byte(chip); 1860 1861 return 0; 1862 } 1863 1864 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, 1865 unsigned int delay_ns) 1866 { 1867 if (nand_has_exec_op(chip)) { 1868 struct nand_op_instr instrs[] = { 1869 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms), 1870 PSEC_TO_NSEC(delay_ns)), 1871 }; 1872 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1873 1874 return nand_exec_op(chip, &op); 1875 } 1876 1877 /* Apply delay or wait for ready/busy pin */ 1878 if (!chip->legacy.dev_ready) 1879 udelay(chip->legacy.chip_delay); 1880 else 1881 nand_wait_ready(chip); 1882 1883 return 0; 1884 } 1885 1886 /** 1887 * nand_reset_op - Do a reset operation 1888 * @chip: The NAND chip 1889 * 1890 * This function sends a RESET command and waits for the NAND to be ready 1891 * before returning. 1892 * This function does not select/unselect the CS line. 1893 * 1894 * Returns 0 on success, a negative error code otherwise. 1895 */ 1896 int nand_reset_op(struct nand_chip *chip) 1897 { 1898 if (nand_has_exec_op(chip)) { 1899 const struct nand_sdr_timings *sdr = 1900 nand_get_sdr_timings(&chip->data_interface); 1901 struct nand_op_instr instrs[] = { 1902 NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)), 1903 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0), 1904 }; 1905 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1906 1907 return nand_exec_op(chip, &op); 1908 } 1909 1910 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1); 1911 1912 return 0; 1913 } 1914 EXPORT_SYMBOL_GPL(nand_reset_op); 1915 1916 /** 1917 * nand_read_data_op - Read data from the NAND 1918 * @chip: The NAND chip 1919 * @buf: buffer used to store the data 1920 * @len: length of the buffer 1921 * @force_8bit: force 8-bit bus access 1922 * @check_only: do not actually run the command, only checks if the 1923 * controller driver supports it 1924 * 1925 * This function does a raw data read on the bus. Usually used after launching 1926 * another NAND operation like nand_read_page_op(). 1927 * This function does not select/unselect the CS line. 1928 * 1929 * Returns 0 on success, a negative error code otherwise. 1930 */ 1931 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, 1932 bool force_8bit, bool check_only) 1933 { 1934 if (!len || !buf) 1935 return -EINVAL; 1936 1937 if (nand_has_exec_op(chip)) { 1938 struct nand_op_instr instrs[] = { 1939 NAND_OP_DATA_IN(len, buf, 0), 1940 }; 1941 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1942 1943 instrs[0].ctx.data.force_8bit = force_8bit; 1944 1945 if (check_only) 1946 return nand_check_op(chip, &op); 1947 1948 return nand_exec_op(chip, &op); 1949 } 1950 1951 if (check_only) 1952 return 0; 1953 1954 if (force_8bit) { 1955 u8 *p = buf; 1956 unsigned int i; 1957 1958 for (i = 0; i < len; i++) 1959 p[i] = chip->legacy.read_byte(chip); 1960 } else { 1961 chip->legacy.read_buf(chip, buf, len); 1962 } 1963 1964 return 0; 1965 } 1966 EXPORT_SYMBOL_GPL(nand_read_data_op); 1967 1968 /** 1969 * nand_write_data_op - Write data from the NAND 1970 * @chip: The NAND chip 1971 * @buf: buffer containing the data to send on the bus 1972 * @len: length of the buffer 1973 * @force_8bit: force 8-bit bus access 1974 * 1975 * This function does a raw data write on the bus. Usually used after launching 1976 * another NAND operation like nand_write_page_begin_op(). 1977 * This function does not select/unselect the CS line. 1978 * 1979 * Returns 0 on success, a negative error code otherwise. 1980 */ 1981 int nand_write_data_op(struct nand_chip *chip, const void *buf, 1982 unsigned int len, bool force_8bit) 1983 { 1984 if (!len || !buf) 1985 return -EINVAL; 1986 1987 if (nand_has_exec_op(chip)) { 1988 struct nand_op_instr instrs[] = { 1989 NAND_OP_DATA_OUT(len, buf, 0), 1990 }; 1991 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); 1992 1993 instrs[0].ctx.data.force_8bit = force_8bit; 1994 1995 return nand_exec_op(chip, &op); 1996 } 1997 1998 if (force_8bit) { 1999 const u8 *p = buf; 2000 unsigned int i; 2001 2002 for (i = 0; i < len; i++) 2003 chip->legacy.write_byte(chip, p[i]); 2004 } else { 2005 chip->legacy.write_buf(chip, buf, len); 2006 } 2007 2008 return 0; 2009 } 2010 EXPORT_SYMBOL_GPL(nand_write_data_op); 2011 2012 /** 2013 * struct nand_op_parser_ctx - Context used by the parser 2014 * @instrs: array of all the instructions that must be addressed 2015 * @ninstrs: length of the @instrs array 2016 * @subop: Sub-operation to be passed to the NAND controller 2017 * 2018 * This structure is used by the core to split NAND operations into 2019 * sub-operations that can be handled by the NAND controller. 2020 */ 2021 struct nand_op_parser_ctx { 2022 const struct nand_op_instr *instrs; 2023 unsigned int ninstrs; 2024 struct nand_subop subop; 2025 }; 2026 2027 /** 2028 * nand_op_parser_must_split_instr - Checks if an instruction must be split 2029 * @pat: the parser pattern element that matches @instr 2030 * @instr: pointer to the instruction to check 2031 * @start_offset: this is an in/out parameter. If @instr has already been 2032 * split, then @start_offset is the offset from which to start 2033 * (either an address cycle or an offset in the data buffer). 2034 * Conversely, if the function returns true (ie. instr must be 2035 * split), this parameter is updated to point to the first 2036 * data/address cycle that has not been taken care of. 2037 * 2038 * Some NAND controllers are limited and cannot send X address cycles with a 2039 * unique operation, or cannot read/write more than Y bytes at the same time. 2040 * In this case, split the instruction that does not fit in a single 2041 * controller-operation into two or more chunks. 2042 * 2043 * Returns true if the instruction must be split, false otherwise. 2044 * The @start_offset parameter is also updated to the offset at which the next 2045 * bundle of instruction must start (if an address or a data instruction). 2046 */ 2047 static bool 2048 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat, 2049 const struct nand_op_instr *instr, 2050 unsigned int *start_offset) 2051 { 2052 switch (pat->type) { 2053 case NAND_OP_ADDR_INSTR: 2054 if (!pat->ctx.addr.maxcycles) 2055 break; 2056 2057 if (instr->ctx.addr.naddrs - *start_offset > 2058 pat->ctx.addr.maxcycles) { 2059 *start_offset += pat->ctx.addr.maxcycles; 2060 return true; 2061 } 2062 break; 2063 2064 case NAND_OP_DATA_IN_INSTR: 2065 case NAND_OP_DATA_OUT_INSTR: 2066 if (!pat->ctx.data.maxlen) 2067 break; 2068 2069 if (instr->ctx.data.len - *start_offset > 2070 pat->ctx.data.maxlen) { 2071 *start_offset += pat->ctx.data.maxlen; 2072 return true; 2073 } 2074 break; 2075 2076 default: 2077 break; 2078 } 2079 2080 return false; 2081 } 2082 2083 /** 2084 * nand_op_parser_match_pat - Checks if a pattern matches the instructions 2085 * remaining in the parser context 2086 * @pat: the pattern to test 2087 * @ctx: the parser context structure to match with the pattern @pat 2088 * 2089 * Check if @pat matches the set or a sub-set of instructions remaining in @ctx. 2090 * Returns true if this is the case, false ortherwise. When true is returned, 2091 * @ctx->subop is updated with the set of instructions to be passed to the 2092 * controller driver. 2093 */ 2094 static bool 2095 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat, 2096 struct nand_op_parser_ctx *ctx) 2097 { 2098 unsigned int instr_offset = ctx->subop.first_instr_start_off; 2099 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs; 2100 const struct nand_op_instr *instr = ctx->subop.instrs; 2101 unsigned int i, ninstrs; 2102 2103 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) { 2104 /* 2105 * The pattern instruction does not match the operation 2106 * instruction. If the instruction is marked optional in the 2107 * pattern definition, we skip the pattern element and continue 2108 * to the next one. If the element is mandatory, there's no 2109 * match and we can return false directly. 2110 */ 2111 if (instr->type != pat->elems[i].type) { 2112 if (!pat->elems[i].optional) 2113 return false; 2114 2115 continue; 2116 } 2117 2118 /* 2119 * Now check the pattern element constraints. If the pattern is 2120 * not able to handle the whole instruction in a single step, 2121 * we have to split it. 2122 * The last_instr_end_off value comes back updated to point to 2123 * the position where we have to split the instruction (the 2124 * start of the next subop chunk). 2125 */ 2126 if (nand_op_parser_must_split_instr(&pat->elems[i], instr, 2127 &instr_offset)) { 2128 ninstrs++; 2129 i++; 2130 break; 2131 } 2132 2133 instr++; 2134 ninstrs++; 2135 instr_offset = 0; 2136 } 2137 2138 /* 2139 * This can happen if all instructions of a pattern are optional. 2140 * Still, if there's not at least one instruction handled by this 2141 * pattern, this is not a match, and we should try the next one (if 2142 * any). 2143 */ 2144 if (!ninstrs) 2145 return false; 2146 2147 /* 2148 * We had a match on the pattern head, but the pattern may be longer 2149 * than the instructions we're asked to execute. We need to make sure 2150 * there's no mandatory elements in the pattern tail. 2151 */ 2152 for (; i < pat->nelems; i++) { 2153 if (!pat->elems[i].optional) 2154 return false; 2155 } 2156 2157 /* 2158 * We have a match: update the subop structure accordingly and return 2159 * true. 2160 */ 2161 ctx->subop.ninstrs = ninstrs; 2162 ctx->subop.last_instr_end_off = instr_offset; 2163 2164 return true; 2165 } 2166 2167 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) 2168 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2169 { 2170 const struct nand_op_instr *instr; 2171 char *prefix = " "; 2172 unsigned int i; 2173 2174 pr_debug("executing subop:\n"); 2175 2176 for (i = 0; i < ctx->ninstrs; i++) { 2177 instr = &ctx->instrs[i]; 2178 2179 if (instr == &ctx->subop.instrs[0]) 2180 prefix = " ->"; 2181 2182 nand_op_trace(prefix, instr); 2183 2184 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) 2185 prefix = " "; 2186 } 2187 } 2188 #else 2189 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) 2190 { 2191 /* NOP */ 2192 } 2193 #endif 2194 2195 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a, 2196 const struct nand_op_parser_ctx *b) 2197 { 2198 if (a->subop.ninstrs < b->subop.ninstrs) 2199 return -1; 2200 else if (a->subop.ninstrs > b->subop.ninstrs) 2201 return 1; 2202 2203 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off) 2204 return -1; 2205 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off) 2206 return 1; 2207 2208 return 0; 2209 } 2210 2211 /** 2212 * nand_op_parser_exec_op - exec_op parser 2213 * @chip: the NAND chip 2214 * @parser: patterns description provided by the controller driver 2215 * @op: the NAND operation to address 2216 * @check_only: when true, the function only checks if @op can be handled but 2217 * does not execute the operation 2218 * 2219 * Helper function designed to ease integration of NAND controller drivers that 2220 * only support a limited set of instruction sequences. The supported sequences 2221 * are described in @parser, and the framework takes care of splitting @op into 2222 * multiple sub-operations (if required) and pass them back to the ->exec() 2223 * callback of the matching pattern if @check_only is set to false. 2224 * 2225 * NAND controller drivers should call this function from their own ->exec_op() 2226 * implementation. 2227 * 2228 * Returns 0 on success, a negative error code otherwise. A failure can be 2229 * caused by an unsupported operation (none of the supported patterns is able 2230 * to handle the requested operation), or an error returned by one of the 2231 * matching pattern->exec() hook. 2232 */ 2233 int nand_op_parser_exec_op(struct nand_chip *chip, 2234 const struct nand_op_parser *parser, 2235 const struct nand_operation *op, bool check_only) 2236 { 2237 struct nand_op_parser_ctx ctx = { 2238 .subop.instrs = op->instrs, 2239 .instrs = op->instrs, 2240 .ninstrs = op->ninstrs, 2241 }; 2242 unsigned int i; 2243 2244 while (ctx.subop.instrs < op->instrs + op->ninstrs) { 2245 const struct nand_op_parser_pattern *pattern; 2246 struct nand_op_parser_ctx best_ctx; 2247 int ret, best_pattern = -1; 2248 2249 for (i = 0; i < parser->npatterns; i++) { 2250 struct nand_op_parser_ctx test_ctx = ctx; 2251 2252 pattern = &parser->patterns[i]; 2253 if (!nand_op_parser_match_pat(pattern, &test_ctx)) 2254 continue; 2255 2256 if (best_pattern >= 0 && 2257 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0) 2258 continue; 2259 2260 best_pattern = i; 2261 best_ctx = test_ctx; 2262 } 2263 2264 if (best_pattern < 0) { 2265 pr_debug("->exec_op() parser: pattern not found!\n"); 2266 return -ENOTSUPP; 2267 } 2268 2269 ctx = best_ctx; 2270 nand_op_parser_trace(&ctx); 2271 2272 if (!check_only) { 2273 pattern = &parser->patterns[best_pattern]; 2274 ret = pattern->exec(chip, &ctx.subop); 2275 if (ret) 2276 return ret; 2277 } 2278 2279 /* 2280 * Update the context structure by pointing to the start of the 2281 * next subop. 2282 */ 2283 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs; 2284 if (ctx.subop.last_instr_end_off) 2285 ctx.subop.instrs -= 1; 2286 2287 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off; 2288 } 2289 2290 return 0; 2291 } 2292 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op); 2293 2294 static bool nand_instr_is_data(const struct nand_op_instr *instr) 2295 { 2296 return instr && (instr->type == NAND_OP_DATA_IN_INSTR || 2297 instr->type == NAND_OP_DATA_OUT_INSTR); 2298 } 2299 2300 static bool nand_subop_instr_is_valid(const struct nand_subop *subop, 2301 unsigned int instr_idx) 2302 { 2303 return subop && instr_idx < subop->ninstrs; 2304 } 2305 2306 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop, 2307 unsigned int instr_idx) 2308 { 2309 if (instr_idx) 2310 return 0; 2311 2312 return subop->first_instr_start_off; 2313 } 2314 2315 /** 2316 * nand_subop_get_addr_start_off - Get the start offset in an address array 2317 * @subop: The entire sub-operation 2318 * @instr_idx: Index of the instruction inside the sub-operation 2319 * 2320 * During driver development, one could be tempted to directly use the 2321 * ->addr.addrs field of address instructions. This is wrong as address 2322 * instructions might be split. 2323 * 2324 * Given an address instruction, returns the offset of the first cycle to issue. 2325 */ 2326 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop, 2327 unsigned int instr_idx) 2328 { 2329 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2330 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2331 return 0; 2332 2333 return nand_subop_get_start_off(subop, instr_idx); 2334 } 2335 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off); 2336 2337 /** 2338 * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert 2339 * @subop: The entire sub-operation 2340 * @instr_idx: Index of the instruction inside the sub-operation 2341 * 2342 * During driver development, one could be tempted to directly use the 2343 * ->addr->naddrs field of a data instruction. This is wrong as instructions 2344 * might be split. 2345 * 2346 * Given an address instruction, returns the number of address cycle to issue. 2347 */ 2348 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, 2349 unsigned int instr_idx) 2350 { 2351 int start_off, end_off; 2352 2353 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2354 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)) 2355 return 0; 2356 2357 start_off = nand_subop_get_addr_start_off(subop, instr_idx); 2358 2359 if (instr_idx == subop->ninstrs - 1 && 2360 subop->last_instr_end_off) 2361 end_off = subop->last_instr_end_off; 2362 else 2363 end_off = subop->instrs[instr_idx].ctx.addr.naddrs; 2364 2365 return end_off - start_off; 2366 } 2367 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc); 2368 2369 /** 2370 * nand_subop_get_data_start_off - Get the start offset in a data array 2371 * @subop: The entire sub-operation 2372 * @instr_idx: Index of the instruction inside the sub-operation 2373 * 2374 * During driver development, one could be tempted to directly use the 2375 * ->data->buf.{in,out} field of data instructions. This is wrong as data 2376 * instructions might be split. 2377 * 2378 * Given a data instruction, returns the offset to start from. 2379 */ 2380 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop, 2381 unsigned int instr_idx) 2382 { 2383 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2384 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2385 return 0; 2386 2387 return nand_subop_get_start_off(subop, instr_idx); 2388 } 2389 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off); 2390 2391 /** 2392 * nand_subop_get_data_len - Get the number of bytes to retrieve 2393 * @subop: The entire sub-operation 2394 * @instr_idx: Index of the instruction inside the sub-operation 2395 * 2396 * During driver development, one could be tempted to directly use the 2397 * ->data->len field of a data instruction. This is wrong as data instructions 2398 * might be split. 2399 * 2400 * Returns the length of the chunk of data to send/receive. 2401 */ 2402 unsigned int nand_subop_get_data_len(const struct nand_subop *subop, 2403 unsigned int instr_idx) 2404 { 2405 int start_off = 0, end_off; 2406 2407 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) || 2408 !nand_instr_is_data(&subop->instrs[instr_idx]))) 2409 return 0; 2410 2411 start_off = nand_subop_get_data_start_off(subop, instr_idx); 2412 2413 if (instr_idx == subop->ninstrs - 1 && 2414 subop->last_instr_end_off) 2415 end_off = subop->last_instr_end_off; 2416 else 2417 end_off = subop->instrs[instr_idx].ctx.data.len; 2418 2419 return end_off - start_off; 2420 } 2421 EXPORT_SYMBOL_GPL(nand_subop_get_data_len); 2422 2423 /** 2424 * nand_reset - Reset and initialize a NAND device 2425 * @chip: The NAND chip 2426 * @chipnr: Internal die id 2427 * 2428 * Save the timings data structure, then apply SDR timings mode 0 (see 2429 * nand_reset_data_interface for details), do the reset operation, and 2430 * apply back the previous timings. 2431 * 2432 * Returns 0 on success, a negative error code otherwise. 2433 */ 2434 int nand_reset(struct nand_chip *chip, int chipnr) 2435 { 2436 struct nand_data_interface saved_data_intf = chip->data_interface; 2437 int ret; 2438 2439 ret = nand_reset_data_interface(chip, chipnr); 2440 if (ret) 2441 return ret; 2442 2443 /* 2444 * The CS line has to be released before we can apply the new NAND 2445 * interface settings, hence this weird nand_select_target() 2446 * nand_deselect_target() dance. 2447 */ 2448 nand_select_target(chip, chipnr); 2449 ret = nand_reset_op(chip); 2450 nand_deselect_target(chip); 2451 if (ret) 2452 return ret; 2453 2454 /* 2455 * A nand_reset_data_interface() put both the NAND chip and the NAND 2456 * controller in timings mode 0. If the default mode for this chip is 2457 * also 0, no need to proceed to the change again. Plus, at probe time, 2458 * nand_setup_data_interface() uses ->set/get_features() which would 2459 * fail anyway as the parameter page is not available yet. 2460 */ 2461 if (!chip->onfi_timing_mode_default) 2462 return 0; 2463 2464 chip->data_interface = saved_data_intf; 2465 ret = nand_setup_data_interface(chip, chipnr); 2466 if (ret) 2467 return ret; 2468 2469 return 0; 2470 } 2471 EXPORT_SYMBOL_GPL(nand_reset); 2472 2473 /** 2474 * nand_get_features - wrapper to perform a GET_FEATURE 2475 * @chip: NAND chip info structure 2476 * @addr: feature address 2477 * @subfeature_param: the subfeature parameters, a four bytes array 2478 * 2479 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2480 * operation cannot be handled. 2481 */ 2482 int nand_get_features(struct nand_chip *chip, int addr, 2483 u8 *subfeature_param) 2484 { 2485 if (!nand_supports_get_features(chip, addr)) 2486 return -ENOTSUPP; 2487 2488 if (chip->legacy.get_features) 2489 return chip->legacy.get_features(chip, addr, subfeature_param); 2490 2491 return nand_get_features_op(chip, addr, subfeature_param); 2492 } 2493 2494 /** 2495 * nand_set_features - wrapper to perform a SET_FEATURE 2496 * @chip: NAND chip info structure 2497 * @addr: feature address 2498 * @subfeature_param: the subfeature parameters, a four bytes array 2499 * 2500 * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the 2501 * operation cannot be handled. 2502 */ 2503 int nand_set_features(struct nand_chip *chip, int addr, 2504 u8 *subfeature_param) 2505 { 2506 if (!nand_supports_set_features(chip, addr)) 2507 return -ENOTSUPP; 2508 2509 if (chip->legacy.set_features) 2510 return chip->legacy.set_features(chip, addr, subfeature_param); 2511 2512 return nand_set_features_op(chip, addr, subfeature_param); 2513 } 2514 2515 /** 2516 * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data 2517 * @buf: buffer to test 2518 * @len: buffer length 2519 * @bitflips_threshold: maximum number of bitflips 2520 * 2521 * Check if a buffer contains only 0xff, which means the underlying region 2522 * has been erased and is ready to be programmed. 2523 * The bitflips_threshold specify the maximum number of bitflips before 2524 * considering the region is not erased. 2525 * Note: The logic of this function has been extracted from the memweight 2526 * implementation, except that nand_check_erased_buf function exit before 2527 * testing the whole buffer if the number of bitflips exceed the 2528 * bitflips_threshold value. 2529 * 2530 * Returns a positive number of bitflips less than or equal to 2531 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2532 * threshold. 2533 */ 2534 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) 2535 { 2536 const unsigned char *bitmap = buf; 2537 int bitflips = 0; 2538 int weight; 2539 2540 for (; len && ((uintptr_t)bitmap) % sizeof(long); 2541 len--, bitmap++) { 2542 weight = hweight8(*bitmap); 2543 bitflips += BITS_PER_BYTE - weight; 2544 if (unlikely(bitflips > bitflips_threshold)) 2545 return -EBADMSG; 2546 } 2547 2548 for (; len >= sizeof(long); 2549 len -= sizeof(long), bitmap += sizeof(long)) { 2550 unsigned long d = *((unsigned long *)bitmap); 2551 if (d == ~0UL) 2552 continue; 2553 weight = hweight_long(d); 2554 bitflips += BITS_PER_LONG - weight; 2555 if (unlikely(bitflips > bitflips_threshold)) 2556 return -EBADMSG; 2557 } 2558 2559 for (; len > 0; len--, bitmap++) { 2560 weight = hweight8(*bitmap); 2561 bitflips += BITS_PER_BYTE - weight; 2562 if (unlikely(bitflips > bitflips_threshold)) 2563 return -EBADMSG; 2564 } 2565 2566 return bitflips; 2567 } 2568 2569 /** 2570 * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only 2571 * 0xff data 2572 * @data: data buffer to test 2573 * @datalen: data length 2574 * @ecc: ECC buffer 2575 * @ecclen: ECC length 2576 * @extraoob: extra OOB buffer 2577 * @extraooblen: extra OOB length 2578 * @bitflips_threshold: maximum number of bitflips 2579 * 2580 * Check if a data buffer and its associated ECC and OOB data contains only 2581 * 0xff pattern, which means the underlying region has been erased and is 2582 * ready to be programmed. 2583 * The bitflips_threshold specify the maximum number of bitflips before 2584 * considering the region as not erased. 2585 * 2586 * Note: 2587 * 1/ ECC algorithms are working on pre-defined block sizes which are usually 2588 * different from the NAND page size. When fixing bitflips, ECC engines will 2589 * report the number of errors per chunk, and the NAND core infrastructure 2590 * expect you to return the maximum number of bitflips for the whole page. 2591 * This is why you should always use this function on a single chunk and 2592 * not on the whole page. After checking each chunk you should update your 2593 * max_bitflips value accordingly. 2594 * 2/ When checking for bitflips in erased pages you should not only check 2595 * the payload data but also their associated ECC data, because a user might 2596 * have programmed almost all bits to 1 but a few. In this case, we 2597 * shouldn't consider the chunk as erased, and checking ECC bytes prevent 2598 * this case. 2599 * 3/ The extraoob argument is optional, and should be used if some of your OOB 2600 * data are protected by the ECC engine. 2601 * It could also be used if you support subpages and want to attach some 2602 * extra OOB data to an ECC chunk. 2603 * 2604 * Returns a positive number of bitflips less than or equal to 2605 * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the 2606 * threshold. In case of success, the passed buffers are filled with 0xff. 2607 */ 2608 int nand_check_erased_ecc_chunk(void *data, int datalen, 2609 void *ecc, int ecclen, 2610 void *extraoob, int extraooblen, 2611 int bitflips_threshold) 2612 { 2613 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0; 2614 2615 data_bitflips = nand_check_erased_buf(data, datalen, 2616 bitflips_threshold); 2617 if (data_bitflips < 0) 2618 return data_bitflips; 2619 2620 bitflips_threshold -= data_bitflips; 2621 2622 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold); 2623 if (ecc_bitflips < 0) 2624 return ecc_bitflips; 2625 2626 bitflips_threshold -= ecc_bitflips; 2627 2628 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen, 2629 bitflips_threshold); 2630 if (extraoob_bitflips < 0) 2631 return extraoob_bitflips; 2632 2633 if (data_bitflips) 2634 memset(data, 0xff, datalen); 2635 2636 if (ecc_bitflips) 2637 memset(ecc, 0xff, ecclen); 2638 2639 if (extraoob_bitflips) 2640 memset(extraoob, 0xff, extraooblen); 2641 2642 return data_bitflips + ecc_bitflips + extraoob_bitflips; 2643 } 2644 EXPORT_SYMBOL(nand_check_erased_ecc_chunk); 2645 2646 /** 2647 * nand_read_page_raw_notsupp - dummy read raw page function 2648 * @chip: nand chip info structure 2649 * @buf: buffer to store read data 2650 * @oob_required: caller requires OOB data read to chip->oob_poi 2651 * @page: page number to read 2652 * 2653 * Returns -ENOTSUPP unconditionally. 2654 */ 2655 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, 2656 int oob_required, int page) 2657 { 2658 return -ENOTSUPP; 2659 } 2660 2661 /** 2662 * nand_read_page_raw - [INTERN] read raw page data without ecc 2663 * @chip: nand chip info structure 2664 * @buf: buffer to store read data 2665 * @oob_required: caller requires OOB data read to chip->oob_poi 2666 * @page: page number to read 2667 * 2668 * Not for syndrome calculating ECC controllers, which use a special oob layout. 2669 */ 2670 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, 2671 int page) 2672 { 2673 struct mtd_info *mtd = nand_to_mtd(chip); 2674 int ret; 2675 2676 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize); 2677 if (ret) 2678 return ret; 2679 2680 if (oob_required) { 2681 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, 2682 false, false); 2683 if (ret) 2684 return ret; 2685 } 2686 2687 return 0; 2688 } 2689 EXPORT_SYMBOL(nand_read_page_raw); 2690 2691 /** 2692 * nand_monolithic_read_page_raw - Monolithic page read in raw mode 2693 * @chip: NAND chip info structure 2694 * @buf: buffer to store read data 2695 * @oob_required: caller requires OOB data read to chip->oob_poi 2696 * @page: page number to read 2697 * 2698 * This is a raw page read, ie. without any error detection/correction. 2699 * Monolithic means we are requesting all the relevant data (main plus 2700 * eventually OOB) to be loaded in the NAND cache and sent over the 2701 * bus (from the NAND chip to the NAND controller) in a single 2702 * operation. This is an alternative to nand_read_page_raw(), which 2703 * first reads the main data, and if the OOB data is requested too, 2704 * then reads more data on the bus. 2705 */ 2706 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, 2707 int oob_required, int page) 2708 { 2709 struct mtd_info *mtd = nand_to_mtd(chip); 2710 unsigned int size = mtd->writesize; 2711 u8 *read_buf = buf; 2712 int ret; 2713 2714 if (oob_required) { 2715 size += mtd->oobsize; 2716 2717 if (buf != chip->data_buf) 2718 read_buf = nand_get_data_buf(chip); 2719 } 2720 2721 ret = nand_read_page_op(chip, page, 0, read_buf, size); 2722 if (ret) 2723 return ret; 2724 2725 if (buf != chip->data_buf) 2726 memcpy(buf, read_buf, mtd->writesize); 2727 2728 return 0; 2729 } 2730 EXPORT_SYMBOL(nand_monolithic_read_page_raw); 2731 2732 /** 2733 * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc 2734 * @chip: nand chip info structure 2735 * @buf: buffer to store read data 2736 * @oob_required: caller requires OOB data read to chip->oob_poi 2737 * @page: page number to read 2738 * 2739 * We need a special oob layout and handling even when OOB isn't used. 2740 */ 2741 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, 2742 int oob_required, int page) 2743 { 2744 struct mtd_info *mtd = nand_to_mtd(chip); 2745 int eccsize = chip->ecc.size; 2746 int eccbytes = chip->ecc.bytes; 2747 uint8_t *oob = chip->oob_poi; 2748 int steps, size, ret; 2749 2750 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2751 if (ret) 2752 return ret; 2753 2754 for (steps = chip->ecc.steps; steps > 0; steps--) { 2755 ret = nand_read_data_op(chip, buf, eccsize, false, false); 2756 if (ret) 2757 return ret; 2758 2759 buf += eccsize; 2760 2761 if (chip->ecc.prepad) { 2762 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 2763 false, false); 2764 if (ret) 2765 return ret; 2766 2767 oob += chip->ecc.prepad; 2768 } 2769 2770 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 2771 if (ret) 2772 return ret; 2773 2774 oob += eccbytes; 2775 2776 if (chip->ecc.postpad) { 2777 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 2778 false, false); 2779 if (ret) 2780 return ret; 2781 2782 oob += chip->ecc.postpad; 2783 } 2784 } 2785 2786 size = mtd->oobsize - (oob - chip->oob_poi); 2787 if (size) { 2788 ret = nand_read_data_op(chip, oob, size, false, false); 2789 if (ret) 2790 return ret; 2791 } 2792 2793 return 0; 2794 } 2795 2796 /** 2797 * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function 2798 * @chip: nand chip info structure 2799 * @buf: buffer to store read data 2800 * @oob_required: caller requires OOB data read to chip->oob_poi 2801 * @page: page number to read 2802 */ 2803 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf, 2804 int oob_required, int page) 2805 { 2806 struct mtd_info *mtd = nand_to_mtd(chip); 2807 int i, eccsize = chip->ecc.size, ret; 2808 int eccbytes = chip->ecc.bytes; 2809 int eccsteps = chip->ecc.steps; 2810 uint8_t *p = buf; 2811 uint8_t *ecc_calc = chip->ecc.calc_buf; 2812 uint8_t *ecc_code = chip->ecc.code_buf; 2813 unsigned int max_bitflips = 0; 2814 2815 chip->ecc.read_page_raw(chip, buf, 1, page); 2816 2817 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 2818 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2819 2820 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2821 chip->ecc.total); 2822 if (ret) 2823 return ret; 2824 2825 eccsteps = chip->ecc.steps; 2826 p = buf; 2827 2828 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2829 int stat; 2830 2831 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 2832 if (stat < 0) { 2833 mtd->ecc_stats.failed++; 2834 } else { 2835 mtd->ecc_stats.corrected += stat; 2836 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2837 } 2838 } 2839 return max_bitflips; 2840 } 2841 2842 /** 2843 * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function 2844 * @chip: nand chip info structure 2845 * @data_offs: offset of requested data within the page 2846 * @readlen: data length 2847 * @bufpoi: buffer to store read data 2848 * @page: page number to read 2849 */ 2850 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs, 2851 uint32_t readlen, uint8_t *bufpoi, int page) 2852 { 2853 struct mtd_info *mtd = nand_to_mtd(chip); 2854 int start_step, end_step, num_steps, ret; 2855 uint8_t *p; 2856 int data_col_addr, i, gaps = 0; 2857 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 2858 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 2859 int index, section = 0; 2860 unsigned int max_bitflips = 0; 2861 struct mtd_oob_region oobregion = { }; 2862 2863 /* Column address within the page aligned to ECC size (256bytes) */ 2864 start_step = data_offs / chip->ecc.size; 2865 end_step = (data_offs + readlen - 1) / chip->ecc.size; 2866 num_steps = end_step - start_step + 1; 2867 index = start_step * chip->ecc.bytes; 2868 2869 /* Data size aligned to ECC ecc.size */ 2870 datafrag_len = num_steps * chip->ecc.size; 2871 eccfrag_len = num_steps * chip->ecc.bytes; 2872 2873 data_col_addr = start_step * chip->ecc.size; 2874 /* If we read not a page aligned data */ 2875 p = bufpoi + data_col_addr; 2876 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len); 2877 if (ret) 2878 return ret; 2879 2880 /* Calculate ECC */ 2881 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) 2882 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]); 2883 2884 /* 2885 * The performance is faster if we position offsets according to 2886 * ecc.pos. Let's make sure that there are no gaps in ECC positions. 2887 */ 2888 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion); 2889 if (ret) 2890 return ret; 2891 2892 if (oobregion.length < eccfrag_len) 2893 gaps = 1; 2894 2895 if (gaps) { 2896 ret = nand_change_read_column_op(chip, mtd->writesize, 2897 chip->oob_poi, mtd->oobsize, 2898 false); 2899 if (ret) 2900 return ret; 2901 } else { 2902 /* 2903 * Send the command to read the particular ECC bytes take care 2904 * about buswidth alignment in read_buf. 2905 */ 2906 aligned_pos = oobregion.offset & ~(busw - 1); 2907 aligned_len = eccfrag_len; 2908 if (oobregion.offset & (busw - 1)) 2909 aligned_len++; 2910 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) & 2911 (busw - 1)) 2912 aligned_len++; 2913 2914 ret = nand_change_read_column_op(chip, 2915 mtd->writesize + aligned_pos, 2916 &chip->oob_poi[aligned_pos], 2917 aligned_len, false); 2918 if (ret) 2919 return ret; 2920 } 2921 2922 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf, 2923 chip->oob_poi, index, eccfrag_len); 2924 if (ret) 2925 return ret; 2926 2927 p = bufpoi + data_col_addr; 2928 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { 2929 int stat; 2930 2931 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i], 2932 &chip->ecc.calc_buf[i]); 2933 if (stat == -EBADMSG && 2934 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 2935 /* check for empty pages with bitflips */ 2936 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 2937 &chip->ecc.code_buf[i], 2938 chip->ecc.bytes, 2939 NULL, 0, 2940 chip->ecc.strength); 2941 } 2942 2943 if (stat < 0) { 2944 mtd->ecc_stats.failed++; 2945 } else { 2946 mtd->ecc_stats.corrected += stat; 2947 max_bitflips = max_t(unsigned int, max_bitflips, stat); 2948 } 2949 } 2950 return max_bitflips; 2951 } 2952 2953 /** 2954 * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function 2955 * @chip: nand chip info structure 2956 * @buf: buffer to store read data 2957 * @oob_required: caller requires OOB data read to chip->oob_poi 2958 * @page: page number to read 2959 * 2960 * Not for syndrome calculating ECC controllers which need a special oob layout. 2961 */ 2962 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, 2963 int oob_required, int page) 2964 { 2965 struct mtd_info *mtd = nand_to_mtd(chip); 2966 int i, eccsize = chip->ecc.size, ret; 2967 int eccbytes = chip->ecc.bytes; 2968 int eccsteps = chip->ecc.steps; 2969 uint8_t *p = buf; 2970 uint8_t *ecc_calc = chip->ecc.calc_buf; 2971 uint8_t *ecc_code = chip->ecc.code_buf; 2972 unsigned int max_bitflips = 0; 2973 2974 ret = nand_read_page_op(chip, page, 0, NULL, 0); 2975 if (ret) 2976 return ret; 2977 2978 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 2979 chip->ecc.hwctl(chip, NAND_ECC_READ); 2980 2981 ret = nand_read_data_op(chip, p, eccsize, false, false); 2982 if (ret) 2983 return ret; 2984 2985 chip->ecc.calculate(chip, p, &ecc_calc[i]); 2986 } 2987 2988 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, 2989 false); 2990 if (ret) 2991 return ret; 2992 2993 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 2994 chip->ecc.total); 2995 if (ret) 2996 return ret; 2997 2998 eccsteps = chip->ecc.steps; 2999 p = buf; 3000 3001 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3002 int stat; 3003 3004 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]); 3005 if (stat == -EBADMSG && 3006 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3007 /* check for empty pages with bitflips */ 3008 stat = nand_check_erased_ecc_chunk(p, eccsize, 3009 &ecc_code[i], eccbytes, 3010 NULL, 0, 3011 chip->ecc.strength); 3012 } 3013 3014 if (stat < 0) { 3015 mtd->ecc_stats.failed++; 3016 } else { 3017 mtd->ecc_stats.corrected += stat; 3018 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3019 } 3020 } 3021 return max_bitflips; 3022 } 3023 3024 /** 3025 * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first 3026 * @chip: nand chip info structure 3027 * @buf: buffer to store read data 3028 * @oob_required: caller requires OOB data read to chip->oob_poi 3029 * @page: page number to read 3030 * 3031 * Hardware ECC for large page chips, require OOB to be read first. For this 3032 * ECC mode, the write_page method is re-used from ECC_HW. These methods 3033 * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with 3034 * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from 3035 * the data area, by overwriting the NAND manufacturer bad block markings. 3036 */ 3037 static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, 3038 int oob_required, int page) 3039 { 3040 struct mtd_info *mtd = nand_to_mtd(chip); 3041 int i, eccsize = chip->ecc.size, ret; 3042 int eccbytes = chip->ecc.bytes; 3043 int eccsteps = chip->ecc.steps; 3044 uint8_t *p = buf; 3045 uint8_t *ecc_code = chip->ecc.code_buf; 3046 uint8_t *ecc_calc = chip->ecc.calc_buf; 3047 unsigned int max_bitflips = 0; 3048 3049 /* Read the OOB area first */ 3050 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3051 if (ret) 3052 return ret; 3053 3054 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3055 if (ret) 3056 return ret; 3057 3058 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 3059 chip->ecc.total); 3060 if (ret) 3061 return ret; 3062 3063 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3064 int stat; 3065 3066 chip->ecc.hwctl(chip, NAND_ECC_READ); 3067 3068 ret = nand_read_data_op(chip, p, eccsize, false, false); 3069 if (ret) 3070 return ret; 3071 3072 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3073 3074 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); 3075 if (stat == -EBADMSG && 3076 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3077 /* check for empty pages with bitflips */ 3078 stat = nand_check_erased_ecc_chunk(p, eccsize, 3079 &ecc_code[i], eccbytes, 3080 NULL, 0, 3081 chip->ecc.strength); 3082 } 3083 3084 if (stat < 0) { 3085 mtd->ecc_stats.failed++; 3086 } else { 3087 mtd->ecc_stats.corrected += stat; 3088 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3089 } 3090 } 3091 return max_bitflips; 3092 } 3093 3094 /** 3095 * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read 3096 * @chip: nand chip info structure 3097 * @buf: buffer to store read data 3098 * @oob_required: caller requires OOB data read to chip->oob_poi 3099 * @page: page number to read 3100 * 3101 * The hw generator calculates the error syndrome automatically. Therefore we 3102 * need a special oob layout and handling. 3103 */ 3104 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, 3105 int oob_required, int page) 3106 { 3107 struct mtd_info *mtd = nand_to_mtd(chip); 3108 int ret, i, eccsize = chip->ecc.size; 3109 int eccbytes = chip->ecc.bytes; 3110 int eccsteps = chip->ecc.steps; 3111 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad; 3112 uint8_t *p = buf; 3113 uint8_t *oob = chip->oob_poi; 3114 unsigned int max_bitflips = 0; 3115 3116 ret = nand_read_page_op(chip, page, 0, NULL, 0); 3117 if (ret) 3118 return ret; 3119 3120 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3121 int stat; 3122 3123 chip->ecc.hwctl(chip, NAND_ECC_READ); 3124 3125 ret = nand_read_data_op(chip, p, eccsize, false, false); 3126 if (ret) 3127 return ret; 3128 3129 if (chip->ecc.prepad) { 3130 ret = nand_read_data_op(chip, oob, chip->ecc.prepad, 3131 false, false); 3132 if (ret) 3133 return ret; 3134 3135 oob += chip->ecc.prepad; 3136 } 3137 3138 chip->ecc.hwctl(chip, NAND_ECC_READSYN); 3139 3140 ret = nand_read_data_op(chip, oob, eccbytes, false, false); 3141 if (ret) 3142 return ret; 3143 3144 stat = chip->ecc.correct(chip, p, oob, NULL); 3145 3146 oob += eccbytes; 3147 3148 if (chip->ecc.postpad) { 3149 ret = nand_read_data_op(chip, oob, chip->ecc.postpad, 3150 false, false); 3151 if (ret) 3152 return ret; 3153 3154 oob += chip->ecc.postpad; 3155 } 3156 3157 if (stat == -EBADMSG && 3158 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { 3159 /* check for empty pages with bitflips */ 3160 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size, 3161 oob - eccpadbytes, 3162 eccpadbytes, 3163 NULL, 0, 3164 chip->ecc.strength); 3165 } 3166 3167 if (stat < 0) { 3168 mtd->ecc_stats.failed++; 3169 } else { 3170 mtd->ecc_stats.corrected += stat; 3171 max_bitflips = max_t(unsigned int, max_bitflips, stat); 3172 } 3173 } 3174 3175 /* Calculate remaining oob bytes */ 3176 i = mtd->oobsize - (oob - chip->oob_poi); 3177 if (i) { 3178 ret = nand_read_data_op(chip, oob, i, false, false); 3179 if (ret) 3180 return ret; 3181 } 3182 3183 return max_bitflips; 3184 } 3185 3186 /** 3187 * nand_transfer_oob - [INTERN] Transfer oob to client buffer 3188 * @chip: NAND chip object 3189 * @oob: oob destination address 3190 * @ops: oob ops structure 3191 * @len: size of oob to transfer 3192 */ 3193 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 3194 struct mtd_oob_ops *ops, size_t len) 3195 { 3196 struct mtd_info *mtd = nand_to_mtd(chip); 3197 int ret; 3198 3199 switch (ops->mode) { 3200 3201 case MTD_OPS_PLACE_OOB: 3202 case MTD_OPS_RAW: 3203 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 3204 return oob + len; 3205 3206 case MTD_OPS_AUTO_OOB: 3207 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi, 3208 ops->ooboffs, len); 3209 BUG_ON(ret); 3210 return oob + len; 3211 3212 default: 3213 BUG(); 3214 } 3215 return NULL; 3216 } 3217 3218 /** 3219 * nand_setup_read_retry - [INTERN] Set the READ RETRY mode 3220 * @chip: NAND chip object 3221 * @retry_mode: the retry mode to use 3222 * 3223 * Some vendors supply a special command to shift the Vt threshold, to be used 3224 * when there are too many bitflips in a page (i.e., ECC error). After setting 3225 * a new threshold, the host should retry reading the page. 3226 */ 3227 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) 3228 { 3229 pr_debug("setting READ RETRY mode %d\n", retry_mode); 3230 3231 if (retry_mode >= chip->read_retries) 3232 return -EINVAL; 3233 3234 if (!chip->setup_read_retry) 3235 return -EOPNOTSUPP; 3236 3237 return chip->setup_read_retry(chip, retry_mode); 3238 } 3239 3240 static void nand_wait_readrdy(struct nand_chip *chip) 3241 { 3242 const struct nand_sdr_timings *sdr; 3243 3244 if (!(chip->options & NAND_NEED_READRDY)) 3245 return; 3246 3247 sdr = nand_get_sdr_timings(&chip->data_interface); 3248 WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0)); 3249 } 3250 3251 /** 3252 * nand_do_read_ops - [INTERN] Read data with ECC 3253 * @chip: NAND chip object 3254 * @from: offset to read from 3255 * @ops: oob ops structure 3256 * 3257 * Internal function. Called with chip held. 3258 */ 3259 static int nand_do_read_ops(struct nand_chip *chip, loff_t from, 3260 struct mtd_oob_ops *ops) 3261 { 3262 int chipnr, page, realpage, col, bytes, aligned, oob_required; 3263 struct mtd_info *mtd = nand_to_mtd(chip); 3264 int ret = 0; 3265 uint32_t readlen = ops->len; 3266 uint32_t oobreadlen = ops->ooblen; 3267 uint32_t max_oobsize = mtd_oobavail(mtd, ops); 3268 3269 uint8_t *bufpoi, *oob, *buf; 3270 int use_bounce_buf; 3271 unsigned int max_bitflips = 0; 3272 int retry_mode = 0; 3273 bool ecc_fail = false; 3274 3275 chipnr = (int)(from >> chip->chip_shift); 3276 nand_select_target(chip, chipnr); 3277 3278 realpage = (int)(from >> chip->page_shift); 3279 page = realpage & chip->pagemask; 3280 3281 col = (int)(from & (mtd->writesize - 1)); 3282 3283 buf = ops->datbuf; 3284 oob = ops->oobbuf; 3285 oob_required = oob ? 1 : 0; 3286 3287 while (1) { 3288 unsigned int ecc_failures = mtd->ecc_stats.failed; 3289 3290 bytes = min(mtd->writesize - col, readlen); 3291 aligned = (bytes == mtd->writesize); 3292 3293 if (!aligned) 3294 use_bounce_buf = 1; 3295 else if (chip->options & NAND_USES_DMA) 3296 use_bounce_buf = !virt_addr_valid(buf) || 3297 !IS_ALIGNED((unsigned long)buf, 3298 chip->buf_align); 3299 else 3300 use_bounce_buf = 0; 3301 3302 /* Is the current page in the buffer? */ 3303 if (realpage != chip->pagecache.page || oob) { 3304 bufpoi = use_bounce_buf ? chip->data_buf : buf; 3305 3306 if (use_bounce_buf && aligned) 3307 pr_debug("%s: using read bounce buffer for buf@%p\n", 3308 __func__, buf); 3309 3310 read_retry: 3311 /* 3312 * Now read the page into the buffer. Absent an error, 3313 * the read methods return max bitflips per ecc step. 3314 */ 3315 if (unlikely(ops->mode == MTD_OPS_RAW)) 3316 ret = chip->ecc.read_page_raw(chip, bufpoi, 3317 oob_required, 3318 page); 3319 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && 3320 !oob) 3321 ret = chip->ecc.read_subpage(chip, col, bytes, 3322 bufpoi, page); 3323 else 3324 ret = chip->ecc.read_page(chip, bufpoi, 3325 oob_required, page); 3326 if (ret < 0) { 3327 if (use_bounce_buf) 3328 /* Invalidate page cache */ 3329 chip->pagecache.page = -1; 3330 break; 3331 } 3332 3333 /* 3334 * Copy back the data in the initial buffer when reading 3335 * partial pages or when a bounce buffer is required. 3336 */ 3337 if (use_bounce_buf) { 3338 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && 3339 !(mtd->ecc_stats.failed - ecc_failures) && 3340 (ops->mode != MTD_OPS_RAW)) { 3341 chip->pagecache.page = realpage; 3342 chip->pagecache.bitflips = ret; 3343 } else { 3344 /* Invalidate page cache */ 3345 chip->pagecache.page = -1; 3346 } 3347 memcpy(buf, bufpoi + col, bytes); 3348 } 3349 3350 if (unlikely(oob)) { 3351 int toread = min(oobreadlen, max_oobsize); 3352 3353 if (toread) { 3354 oob = nand_transfer_oob(chip, oob, ops, 3355 toread); 3356 oobreadlen -= toread; 3357 } 3358 } 3359 3360 nand_wait_readrdy(chip); 3361 3362 if (mtd->ecc_stats.failed - ecc_failures) { 3363 if (retry_mode + 1 < chip->read_retries) { 3364 retry_mode++; 3365 ret = nand_setup_read_retry(chip, 3366 retry_mode); 3367 if (ret < 0) 3368 break; 3369 3370 /* Reset failures; retry */ 3371 mtd->ecc_stats.failed = ecc_failures; 3372 goto read_retry; 3373 } else { 3374 /* No more retry modes; real failure */ 3375 ecc_fail = true; 3376 } 3377 } 3378 3379 buf += bytes; 3380 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3381 } else { 3382 memcpy(buf, chip->data_buf + col, bytes); 3383 buf += bytes; 3384 max_bitflips = max_t(unsigned int, max_bitflips, 3385 chip->pagecache.bitflips); 3386 } 3387 3388 readlen -= bytes; 3389 3390 /* Reset to retry mode 0 */ 3391 if (retry_mode) { 3392 ret = nand_setup_read_retry(chip, 0); 3393 if (ret < 0) 3394 break; 3395 retry_mode = 0; 3396 } 3397 3398 if (!readlen) 3399 break; 3400 3401 /* For subsequent reads align to page boundary */ 3402 col = 0; 3403 /* Increment page address */ 3404 realpage++; 3405 3406 page = realpage & chip->pagemask; 3407 /* Check, if we cross a chip boundary */ 3408 if (!page) { 3409 chipnr++; 3410 nand_deselect_target(chip); 3411 nand_select_target(chip, chipnr); 3412 } 3413 } 3414 nand_deselect_target(chip); 3415 3416 ops->retlen = ops->len - (size_t) readlen; 3417 if (oob) 3418 ops->oobretlen = ops->ooblen - oobreadlen; 3419 3420 if (ret < 0) 3421 return ret; 3422 3423 if (ecc_fail) 3424 return -EBADMSG; 3425 3426 return max_bitflips; 3427 } 3428 3429 /** 3430 * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function 3431 * @chip: nand chip info structure 3432 * @page: page number to read 3433 */ 3434 int nand_read_oob_std(struct nand_chip *chip, int page) 3435 { 3436 struct mtd_info *mtd = nand_to_mtd(chip); 3437 3438 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); 3439 } 3440 EXPORT_SYMBOL(nand_read_oob_std); 3441 3442 /** 3443 * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC 3444 * with syndromes 3445 * @chip: nand chip info structure 3446 * @page: page number to read 3447 */ 3448 static int nand_read_oob_syndrome(struct nand_chip *chip, int page) 3449 { 3450 struct mtd_info *mtd = nand_to_mtd(chip); 3451 int length = mtd->oobsize; 3452 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3453 int eccsize = chip->ecc.size; 3454 uint8_t *bufpoi = chip->oob_poi; 3455 int i, toread, sndrnd = 0, pos, ret; 3456 3457 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0); 3458 if (ret) 3459 return ret; 3460 3461 for (i = 0; i < chip->ecc.steps; i++) { 3462 if (sndrnd) { 3463 int ret; 3464 3465 pos = eccsize + i * (eccsize + chunk); 3466 if (mtd->writesize > 512) 3467 ret = nand_change_read_column_op(chip, pos, 3468 NULL, 0, 3469 false); 3470 else 3471 ret = nand_read_page_op(chip, page, pos, NULL, 3472 0); 3473 3474 if (ret) 3475 return ret; 3476 } else 3477 sndrnd = 1; 3478 toread = min_t(int, length, chunk); 3479 3480 ret = nand_read_data_op(chip, bufpoi, toread, false, false); 3481 if (ret) 3482 return ret; 3483 3484 bufpoi += toread; 3485 length -= toread; 3486 } 3487 if (length > 0) { 3488 ret = nand_read_data_op(chip, bufpoi, length, false, false); 3489 if (ret) 3490 return ret; 3491 } 3492 3493 return 0; 3494 } 3495 3496 /** 3497 * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function 3498 * @chip: nand chip info structure 3499 * @page: page number to write 3500 */ 3501 int nand_write_oob_std(struct nand_chip *chip, int page) 3502 { 3503 struct mtd_info *mtd = nand_to_mtd(chip); 3504 3505 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi, 3506 mtd->oobsize); 3507 } 3508 EXPORT_SYMBOL(nand_write_oob_std); 3509 3510 /** 3511 * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC 3512 * with syndrome - only for large page flash 3513 * @chip: nand chip info structure 3514 * @page: page number to write 3515 */ 3516 static int nand_write_oob_syndrome(struct nand_chip *chip, int page) 3517 { 3518 struct mtd_info *mtd = nand_to_mtd(chip); 3519 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 3520 int eccsize = chip->ecc.size, length = mtd->oobsize; 3521 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps; 3522 const uint8_t *bufpoi = chip->oob_poi; 3523 3524 /* 3525 * data-ecc-data-ecc ... ecc-oob 3526 * or 3527 * data-pad-ecc-pad-data-pad .... ecc-pad-oob 3528 */ 3529 if (!chip->ecc.prepad && !chip->ecc.postpad) { 3530 pos = steps * (eccsize + chunk); 3531 steps = 0; 3532 } else 3533 pos = eccsize; 3534 3535 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0); 3536 if (ret) 3537 return ret; 3538 3539 for (i = 0; i < steps; i++) { 3540 if (sndcmd) { 3541 if (mtd->writesize <= 512) { 3542 uint32_t fill = 0xFFFFFFFF; 3543 3544 len = eccsize; 3545 while (len > 0) { 3546 int num = min_t(int, len, 4); 3547 3548 ret = nand_write_data_op(chip, &fill, 3549 num, false); 3550 if (ret) 3551 return ret; 3552 3553 len -= num; 3554 } 3555 } else { 3556 pos = eccsize + i * (eccsize + chunk); 3557 ret = nand_change_write_column_op(chip, pos, 3558 NULL, 0, 3559 false); 3560 if (ret) 3561 return ret; 3562 } 3563 } else 3564 sndcmd = 1; 3565 len = min_t(int, length, chunk); 3566 3567 ret = nand_write_data_op(chip, bufpoi, len, false); 3568 if (ret) 3569 return ret; 3570 3571 bufpoi += len; 3572 length -= len; 3573 } 3574 if (length > 0) { 3575 ret = nand_write_data_op(chip, bufpoi, length, false); 3576 if (ret) 3577 return ret; 3578 } 3579 3580 return nand_prog_page_end_op(chip); 3581 } 3582 3583 /** 3584 * nand_do_read_oob - [INTERN] NAND read out-of-band 3585 * @chip: NAND chip object 3586 * @from: offset to read from 3587 * @ops: oob operations description structure 3588 * 3589 * NAND read out-of-band data from the spare area. 3590 */ 3591 static int nand_do_read_oob(struct nand_chip *chip, loff_t from, 3592 struct mtd_oob_ops *ops) 3593 { 3594 struct mtd_info *mtd = nand_to_mtd(chip); 3595 unsigned int max_bitflips = 0; 3596 int page, realpage, chipnr; 3597 struct mtd_ecc_stats stats; 3598 int readlen = ops->ooblen; 3599 int len; 3600 uint8_t *buf = ops->oobbuf; 3601 int ret = 0; 3602 3603 pr_debug("%s: from = 0x%08Lx, len = %i\n", 3604 __func__, (unsigned long long)from, readlen); 3605 3606 stats = mtd->ecc_stats; 3607 3608 len = mtd_oobavail(mtd, ops); 3609 3610 chipnr = (int)(from >> chip->chip_shift); 3611 nand_select_target(chip, chipnr); 3612 3613 /* Shift to get page */ 3614 realpage = (int)(from >> chip->page_shift); 3615 page = realpage & chip->pagemask; 3616 3617 while (1) { 3618 if (ops->mode == MTD_OPS_RAW) 3619 ret = chip->ecc.read_oob_raw(chip, page); 3620 else 3621 ret = chip->ecc.read_oob(chip, page); 3622 3623 if (ret < 0) 3624 break; 3625 3626 len = min(len, readlen); 3627 buf = nand_transfer_oob(chip, buf, ops, len); 3628 3629 nand_wait_readrdy(chip); 3630 3631 max_bitflips = max_t(unsigned int, max_bitflips, ret); 3632 3633 readlen -= len; 3634 if (!readlen) 3635 break; 3636 3637 /* Increment page address */ 3638 realpage++; 3639 3640 page = realpage & chip->pagemask; 3641 /* Check, if we cross a chip boundary */ 3642 if (!page) { 3643 chipnr++; 3644 nand_deselect_target(chip); 3645 nand_select_target(chip, chipnr); 3646 } 3647 } 3648 nand_deselect_target(chip); 3649 3650 ops->oobretlen = ops->ooblen - readlen; 3651 3652 if (ret < 0) 3653 return ret; 3654 3655 if (mtd->ecc_stats.failed - stats.failed) 3656 return -EBADMSG; 3657 3658 return max_bitflips; 3659 } 3660 3661 /** 3662 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band 3663 * @mtd: MTD device structure 3664 * @from: offset to read from 3665 * @ops: oob operation description structure 3666 * 3667 * NAND read data and/or out-of-band data. 3668 */ 3669 static int nand_read_oob(struct mtd_info *mtd, loff_t from, 3670 struct mtd_oob_ops *ops) 3671 { 3672 struct nand_chip *chip = mtd_to_nand(mtd); 3673 int ret; 3674 3675 ops->retlen = 0; 3676 3677 if (ops->mode != MTD_OPS_PLACE_OOB && 3678 ops->mode != MTD_OPS_AUTO_OOB && 3679 ops->mode != MTD_OPS_RAW) 3680 return -ENOTSUPP; 3681 3682 ret = nand_get_device(chip); 3683 if (ret) 3684 return ret; 3685 3686 if (!ops->datbuf) 3687 ret = nand_do_read_oob(chip, from, ops); 3688 else 3689 ret = nand_do_read_ops(chip, from, ops); 3690 3691 nand_release_device(chip); 3692 return ret; 3693 } 3694 3695 /** 3696 * nand_write_page_raw_notsupp - dummy raw page write function 3697 * @chip: nand chip info structure 3698 * @buf: data buffer 3699 * @oob_required: must write chip->oob_poi to OOB 3700 * @page: page number to write 3701 * 3702 * Returns -ENOTSUPP unconditionally. 3703 */ 3704 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf, 3705 int oob_required, int page) 3706 { 3707 return -ENOTSUPP; 3708 } 3709 3710 /** 3711 * nand_write_page_raw - [INTERN] raw page write function 3712 * @chip: nand chip info structure 3713 * @buf: data buffer 3714 * @oob_required: must write chip->oob_poi to OOB 3715 * @page: page number to write 3716 * 3717 * Not for syndrome calculating ECC controllers, which use a special oob layout. 3718 */ 3719 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, 3720 int oob_required, int page) 3721 { 3722 struct mtd_info *mtd = nand_to_mtd(chip); 3723 int ret; 3724 3725 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize); 3726 if (ret) 3727 return ret; 3728 3729 if (oob_required) { 3730 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, 3731 false); 3732 if (ret) 3733 return ret; 3734 } 3735 3736 return nand_prog_page_end_op(chip); 3737 } 3738 EXPORT_SYMBOL(nand_write_page_raw); 3739 3740 /** 3741 * nand_monolithic_write_page_raw - Monolithic page write in raw mode 3742 * @chip: NAND chip info structure 3743 * @buf: data buffer to write 3744 * @oob_required: must write chip->oob_poi to OOB 3745 * @page: page number to write 3746 * 3747 * This is a raw page write, ie. without any error detection/correction. 3748 * Monolithic means we are requesting all the relevant data (main plus 3749 * eventually OOB) to be sent over the bus and effectively programmed 3750 * into the NAND chip arrays in a single operation. This is an 3751 * alternative to nand_write_page_raw(), which first sends the main 3752 * data, then eventually send the OOB data by latching more data 3753 * cycles on the NAND bus, and finally sends the program command to 3754 * synchronyze the NAND chip cache. 3755 */ 3756 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, 3757 int oob_required, int page) 3758 { 3759 struct mtd_info *mtd = nand_to_mtd(chip); 3760 unsigned int size = mtd->writesize; 3761 u8 *write_buf = (u8 *)buf; 3762 3763 if (oob_required) { 3764 size += mtd->oobsize; 3765 3766 if (buf != chip->data_buf) { 3767 write_buf = nand_get_data_buf(chip); 3768 memcpy(write_buf, buf, mtd->writesize); 3769 } 3770 } 3771 3772 return nand_prog_page_op(chip, page, 0, write_buf, size); 3773 } 3774 EXPORT_SYMBOL(nand_monolithic_write_page_raw); 3775 3776 /** 3777 * nand_write_page_raw_syndrome - [INTERN] raw page write function 3778 * @chip: nand chip info structure 3779 * @buf: data buffer 3780 * @oob_required: must write chip->oob_poi to OOB 3781 * @page: page number to write 3782 * 3783 * We need a special oob layout and handling even when ECC isn't checked. 3784 */ 3785 static int nand_write_page_raw_syndrome(struct nand_chip *chip, 3786 const uint8_t *buf, int oob_required, 3787 int page) 3788 { 3789 struct mtd_info *mtd = nand_to_mtd(chip); 3790 int eccsize = chip->ecc.size; 3791 int eccbytes = chip->ecc.bytes; 3792 uint8_t *oob = chip->oob_poi; 3793 int steps, size, ret; 3794 3795 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3796 if (ret) 3797 return ret; 3798 3799 for (steps = chip->ecc.steps; steps > 0; steps--) { 3800 ret = nand_write_data_op(chip, buf, eccsize, false); 3801 if (ret) 3802 return ret; 3803 3804 buf += eccsize; 3805 3806 if (chip->ecc.prepad) { 3807 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 3808 false); 3809 if (ret) 3810 return ret; 3811 3812 oob += chip->ecc.prepad; 3813 } 3814 3815 ret = nand_write_data_op(chip, oob, eccbytes, false); 3816 if (ret) 3817 return ret; 3818 3819 oob += eccbytes; 3820 3821 if (chip->ecc.postpad) { 3822 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 3823 false); 3824 if (ret) 3825 return ret; 3826 3827 oob += chip->ecc.postpad; 3828 } 3829 } 3830 3831 size = mtd->oobsize - (oob - chip->oob_poi); 3832 if (size) { 3833 ret = nand_write_data_op(chip, oob, size, false); 3834 if (ret) 3835 return ret; 3836 } 3837 3838 return nand_prog_page_end_op(chip); 3839 } 3840 /** 3841 * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function 3842 * @chip: nand chip info structure 3843 * @buf: data buffer 3844 * @oob_required: must write chip->oob_poi to OOB 3845 * @page: page number to write 3846 */ 3847 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf, 3848 int oob_required, int page) 3849 { 3850 struct mtd_info *mtd = nand_to_mtd(chip); 3851 int i, eccsize = chip->ecc.size, ret; 3852 int eccbytes = chip->ecc.bytes; 3853 int eccsteps = chip->ecc.steps; 3854 uint8_t *ecc_calc = chip->ecc.calc_buf; 3855 const uint8_t *p = buf; 3856 3857 /* Software ECC calculation */ 3858 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 3859 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3860 3861 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3862 chip->ecc.total); 3863 if (ret) 3864 return ret; 3865 3866 return chip->ecc.write_page_raw(chip, buf, 1, page); 3867 } 3868 3869 /** 3870 * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function 3871 * @chip: nand chip info structure 3872 * @buf: data buffer 3873 * @oob_required: must write chip->oob_poi to OOB 3874 * @page: page number to write 3875 */ 3876 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf, 3877 int oob_required, int page) 3878 { 3879 struct mtd_info *mtd = nand_to_mtd(chip); 3880 int i, eccsize = chip->ecc.size, ret; 3881 int eccbytes = chip->ecc.bytes; 3882 int eccsteps = chip->ecc.steps; 3883 uint8_t *ecc_calc = chip->ecc.calc_buf; 3884 const uint8_t *p = buf; 3885 3886 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3887 if (ret) 3888 return ret; 3889 3890 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 3891 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 3892 3893 ret = nand_write_data_op(chip, p, eccsize, false); 3894 if (ret) 3895 return ret; 3896 3897 chip->ecc.calculate(chip, p, &ecc_calc[i]); 3898 } 3899 3900 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3901 chip->ecc.total); 3902 if (ret) 3903 return ret; 3904 3905 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 3906 if (ret) 3907 return ret; 3908 3909 return nand_prog_page_end_op(chip); 3910 } 3911 3912 3913 /** 3914 * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write 3915 * @chip: nand chip info structure 3916 * @offset: column address of subpage within the page 3917 * @data_len: data length 3918 * @buf: data buffer 3919 * @oob_required: must write chip->oob_poi to OOB 3920 * @page: page number to write 3921 */ 3922 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset, 3923 uint32_t data_len, const uint8_t *buf, 3924 int oob_required, int page) 3925 { 3926 struct mtd_info *mtd = nand_to_mtd(chip); 3927 uint8_t *oob_buf = chip->oob_poi; 3928 uint8_t *ecc_calc = chip->ecc.calc_buf; 3929 int ecc_size = chip->ecc.size; 3930 int ecc_bytes = chip->ecc.bytes; 3931 int ecc_steps = chip->ecc.steps; 3932 uint32_t start_step = offset / ecc_size; 3933 uint32_t end_step = (offset + data_len - 1) / ecc_size; 3934 int oob_bytes = mtd->oobsize / ecc_steps; 3935 int step, ret; 3936 3937 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 3938 if (ret) 3939 return ret; 3940 3941 for (step = 0; step < ecc_steps; step++) { 3942 /* configure controller for WRITE access */ 3943 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 3944 3945 /* write data (untouched subpages already masked by 0xFF) */ 3946 ret = nand_write_data_op(chip, buf, ecc_size, false); 3947 if (ret) 3948 return ret; 3949 3950 /* mask ECC of un-touched subpages by padding 0xFF */ 3951 if ((step < start_step) || (step > end_step)) 3952 memset(ecc_calc, 0xff, ecc_bytes); 3953 else 3954 chip->ecc.calculate(chip, buf, ecc_calc); 3955 3956 /* mask OOB of un-touched subpages by padding 0xFF */ 3957 /* if oob_required, preserve OOB metadata of written subpage */ 3958 if (!oob_required || (step < start_step) || (step > end_step)) 3959 memset(oob_buf, 0xff, oob_bytes); 3960 3961 buf += ecc_size; 3962 ecc_calc += ecc_bytes; 3963 oob_buf += oob_bytes; 3964 } 3965 3966 /* copy calculated ECC for whole page to chip->buffer->oob */ 3967 /* this include masked-value(0xFF) for unwritten subpages */ 3968 ecc_calc = chip->ecc.calc_buf; 3969 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, 3970 chip->ecc.total); 3971 if (ret) 3972 return ret; 3973 3974 /* write OOB buffer to NAND device */ 3975 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false); 3976 if (ret) 3977 return ret; 3978 3979 return nand_prog_page_end_op(chip); 3980 } 3981 3982 3983 /** 3984 * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write 3985 * @chip: nand chip info structure 3986 * @buf: data buffer 3987 * @oob_required: must write chip->oob_poi to OOB 3988 * @page: page number to write 3989 * 3990 * The hw generator calculates the error syndrome automatically. Therefore we 3991 * need a special oob layout and handling. 3992 */ 3993 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf, 3994 int oob_required, int page) 3995 { 3996 struct mtd_info *mtd = nand_to_mtd(chip); 3997 int i, eccsize = chip->ecc.size; 3998 int eccbytes = chip->ecc.bytes; 3999 int eccsteps = chip->ecc.steps; 4000 const uint8_t *p = buf; 4001 uint8_t *oob = chip->oob_poi; 4002 int ret; 4003 4004 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); 4005 if (ret) 4006 return ret; 4007 4008 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 4009 chip->ecc.hwctl(chip, NAND_ECC_WRITE); 4010 4011 ret = nand_write_data_op(chip, p, eccsize, false); 4012 if (ret) 4013 return ret; 4014 4015 if (chip->ecc.prepad) { 4016 ret = nand_write_data_op(chip, oob, chip->ecc.prepad, 4017 false); 4018 if (ret) 4019 return ret; 4020 4021 oob += chip->ecc.prepad; 4022 } 4023 4024 chip->ecc.calculate(chip, p, oob); 4025 4026 ret = nand_write_data_op(chip, oob, eccbytes, false); 4027 if (ret) 4028 return ret; 4029 4030 oob += eccbytes; 4031 4032 if (chip->ecc.postpad) { 4033 ret = nand_write_data_op(chip, oob, chip->ecc.postpad, 4034 false); 4035 if (ret) 4036 return ret; 4037 4038 oob += chip->ecc.postpad; 4039 } 4040 } 4041 4042 /* Calculate remaining oob bytes */ 4043 i = mtd->oobsize - (oob - chip->oob_poi); 4044 if (i) { 4045 ret = nand_write_data_op(chip, oob, i, false); 4046 if (ret) 4047 return ret; 4048 } 4049 4050 return nand_prog_page_end_op(chip); 4051 } 4052 4053 /** 4054 * nand_write_page - write one page 4055 * @chip: NAND chip descriptor 4056 * @offset: address offset within the page 4057 * @data_len: length of actual data to be written 4058 * @buf: the data to write 4059 * @oob_required: must write chip->oob_poi to OOB 4060 * @page: page number to write 4061 * @raw: use _raw version of write_page 4062 */ 4063 static int nand_write_page(struct nand_chip *chip, uint32_t offset, 4064 int data_len, const uint8_t *buf, int oob_required, 4065 int page, int raw) 4066 { 4067 struct mtd_info *mtd = nand_to_mtd(chip); 4068 int status, subpage; 4069 4070 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 4071 chip->ecc.write_subpage) 4072 subpage = offset || (data_len < mtd->writesize); 4073 else 4074 subpage = 0; 4075 4076 if (unlikely(raw)) 4077 status = chip->ecc.write_page_raw(chip, buf, oob_required, 4078 page); 4079 else if (subpage) 4080 status = chip->ecc.write_subpage(chip, offset, data_len, buf, 4081 oob_required, page); 4082 else 4083 status = chip->ecc.write_page(chip, buf, oob_required, page); 4084 4085 if (status < 0) 4086 return status; 4087 4088 return 0; 4089 } 4090 4091 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) 4092 4093 /** 4094 * nand_do_write_ops - [INTERN] NAND write with ECC 4095 * @chip: NAND chip object 4096 * @to: offset to write to 4097 * @ops: oob operations description structure 4098 * 4099 * NAND write with ECC. 4100 */ 4101 static int nand_do_write_ops(struct nand_chip *chip, loff_t to, 4102 struct mtd_oob_ops *ops) 4103 { 4104 struct mtd_info *mtd = nand_to_mtd(chip); 4105 int chipnr, realpage, page, column; 4106 uint32_t writelen = ops->len; 4107 4108 uint32_t oobwritelen = ops->ooblen; 4109 uint32_t oobmaxlen = mtd_oobavail(mtd, ops); 4110 4111 uint8_t *oob = ops->oobbuf; 4112 uint8_t *buf = ops->datbuf; 4113 int ret; 4114 int oob_required = oob ? 1 : 0; 4115 4116 ops->retlen = 0; 4117 if (!writelen) 4118 return 0; 4119 4120 /* Reject writes, which are not page aligned */ 4121 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 4122 pr_notice("%s: attempt to write non page aligned data\n", 4123 __func__); 4124 return -EINVAL; 4125 } 4126 4127 column = to & (mtd->writesize - 1); 4128 4129 chipnr = (int)(to >> chip->chip_shift); 4130 nand_select_target(chip, chipnr); 4131 4132 /* Check, if it is write protected */ 4133 if (nand_check_wp(chip)) { 4134 ret = -EIO; 4135 goto err_out; 4136 } 4137 4138 realpage = (int)(to >> chip->page_shift); 4139 page = realpage & chip->pagemask; 4140 4141 /* Invalidate the page cache, when we write to the cached page */ 4142 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) && 4143 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len)) 4144 chip->pagecache.page = -1; 4145 4146 /* Don't allow multipage oob writes with offset */ 4147 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) { 4148 ret = -EINVAL; 4149 goto err_out; 4150 } 4151 4152 while (1) { 4153 int bytes = mtd->writesize; 4154 uint8_t *wbuf = buf; 4155 int use_bounce_buf; 4156 int part_pagewr = (column || writelen < mtd->writesize); 4157 4158 if (part_pagewr) 4159 use_bounce_buf = 1; 4160 else if (chip->options & NAND_USES_DMA) 4161 use_bounce_buf = !virt_addr_valid(buf) || 4162 !IS_ALIGNED((unsigned long)buf, 4163 chip->buf_align); 4164 else 4165 use_bounce_buf = 0; 4166 4167 /* 4168 * Copy the data from the initial buffer when doing partial page 4169 * writes or when a bounce buffer is required. 4170 */ 4171 if (use_bounce_buf) { 4172 pr_debug("%s: using write bounce buffer for buf@%p\n", 4173 __func__, buf); 4174 if (part_pagewr) 4175 bytes = min_t(int, bytes - column, writelen); 4176 wbuf = nand_get_data_buf(chip); 4177 memset(wbuf, 0xff, mtd->writesize); 4178 memcpy(&wbuf[column], buf, bytes); 4179 } 4180 4181 if (unlikely(oob)) { 4182 size_t len = min(oobwritelen, oobmaxlen); 4183 oob = nand_fill_oob(chip, oob, len, ops); 4184 oobwritelen -= len; 4185 } else { 4186 /* We still need to erase leftover OOB data */ 4187 memset(chip->oob_poi, 0xff, mtd->oobsize); 4188 } 4189 4190 ret = nand_write_page(chip, column, bytes, wbuf, 4191 oob_required, page, 4192 (ops->mode == MTD_OPS_RAW)); 4193 if (ret) 4194 break; 4195 4196 writelen -= bytes; 4197 if (!writelen) 4198 break; 4199 4200 column = 0; 4201 buf += bytes; 4202 realpage++; 4203 4204 page = realpage & chip->pagemask; 4205 /* Check, if we cross a chip boundary */ 4206 if (!page) { 4207 chipnr++; 4208 nand_deselect_target(chip); 4209 nand_select_target(chip, chipnr); 4210 } 4211 } 4212 4213 ops->retlen = ops->len - writelen; 4214 if (unlikely(oob)) 4215 ops->oobretlen = ops->ooblen; 4216 4217 err_out: 4218 nand_deselect_target(chip); 4219 return ret; 4220 } 4221 4222 /** 4223 * panic_nand_write - [MTD Interface] NAND write with ECC 4224 * @mtd: MTD device structure 4225 * @to: offset to write to 4226 * @len: number of bytes to write 4227 * @retlen: pointer to variable to store the number of written bytes 4228 * @buf: the data to write 4229 * 4230 * NAND write with ECC. Used when performing writes in interrupt context, this 4231 * may for example be called by mtdoops when writing an oops while in panic. 4232 */ 4233 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, 4234 size_t *retlen, const uint8_t *buf) 4235 { 4236 struct nand_chip *chip = mtd_to_nand(mtd); 4237 int chipnr = (int)(to >> chip->chip_shift); 4238 struct mtd_oob_ops ops; 4239 int ret; 4240 4241 nand_select_target(chip, chipnr); 4242 4243 /* Wait for the device to get ready */ 4244 panic_nand_wait(chip, 400); 4245 4246 memset(&ops, 0, sizeof(ops)); 4247 ops.len = len; 4248 ops.datbuf = (uint8_t *)buf; 4249 ops.mode = MTD_OPS_PLACE_OOB; 4250 4251 ret = nand_do_write_ops(chip, to, &ops); 4252 4253 *retlen = ops.retlen; 4254 return ret; 4255 } 4256 4257 /** 4258 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band 4259 * @mtd: MTD device structure 4260 * @to: offset to write to 4261 * @ops: oob operation description structure 4262 */ 4263 static int nand_write_oob(struct mtd_info *mtd, loff_t to, 4264 struct mtd_oob_ops *ops) 4265 { 4266 struct nand_chip *chip = mtd_to_nand(mtd); 4267 int ret; 4268 4269 ops->retlen = 0; 4270 4271 ret = nand_get_device(chip); 4272 if (ret) 4273 return ret; 4274 4275 switch (ops->mode) { 4276 case MTD_OPS_PLACE_OOB: 4277 case MTD_OPS_AUTO_OOB: 4278 case MTD_OPS_RAW: 4279 break; 4280 4281 default: 4282 goto out; 4283 } 4284 4285 if (!ops->datbuf) 4286 ret = nand_do_write_oob(chip, to, ops); 4287 else 4288 ret = nand_do_write_ops(chip, to, ops); 4289 4290 out: 4291 nand_release_device(chip); 4292 return ret; 4293 } 4294 4295 /** 4296 * nand_erase - [MTD Interface] erase block(s) 4297 * @mtd: MTD device structure 4298 * @instr: erase instruction 4299 * 4300 * Erase one ore more blocks. 4301 */ 4302 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) 4303 { 4304 return nand_erase_nand(mtd_to_nand(mtd), instr, 0); 4305 } 4306 4307 /** 4308 * nand_erase_nand - [INTERN] erase block(s) 4309 * @chip: NAND chip object 4310 * @instr: erase instruction 4311 * @allowbbt: allow erasing the bbt area 4312 * 4313 * Erase one ore more blocks. 4314 */ 4315 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, 4316 int allowbbt) 4317 { 4318 int page, pages_per_block, ret, chipnr; 4319 loff_t len; 4320 4321 pr_debug("%s: start = 0x%012llx, len = %llu\n", 4322 __func__, (unsigned long long)instr->addr, 4323 (unsigned long long)instr->len); 4324 4325 if (check_offs_len(chip, instr->addr, instr->len)) 4326 return -EINVAL; 4327 4328 /* Grab the lock and see if the device is available */ 4329 ret = nand_get_device(chip); 4330 if (ret) 4331 return ret; 4332 4333 /* Shift to get first page */ 4334 page = (int)(instr->addr >> chip->page_shift); 4335 chipnr = (int)(instr->addr >> chip->chip_shift); 4336 4337 /* Calculate pages in each block */ 4338 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); 4339 4340 /* Select the NAND device */ 4341 nand_select_target(chip, chipnr); 4342 4343 /* Check, if it is write protected */ 4344 if (nand_check_wp(chip)) { 4345 pr_debug("%s: device is write protected!\n", 4346 __func__); 4347 ret = -EIO; 4348 goto erase_exit; 4349 } 4350 4351 /* Loop through the pages */ 4352 len = instr->len; 4353 4354 while (len) { 4355 /* Check if we have a bad block, we do not erase bad blocks! */ 4356 if (nand_block_checkbad(chip, ((loff_t) page) << 4357 chip->page_shift, allowbbt)) { 4358 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", 4359 __func__, page); 4360 ret = -EIO; 4361 goto erase_exit; 4362 } 4363 4364 /* 4365 * Invalidate the page cache, if we erase the block which 4366 * contains the current cached page. 4367 */ 4368 if (page <= chip->pagecache.page && chip->pagecache.page < 4369 (page + pages_per_block)) 4370 chip->pagecache.page = -1; 4371 4372 ret = nand_erase_op(chip, (page & chip->pagemask) >> 4373 (chip->phys_erase_shift - chip->page_shift)); 4374 if (ret) { 4375 pr_debug("%s: failed erase, page 0x%08x\n", 4376 __func__, page); 4377 instr->fail_addr = 4378 ((loff_t)page << chip->page_shift); 4379 goto erase_exit; 4380 } 4381 4382 /* Increment page address and decrement length */ 4383 len -= (1ULL << chip->phys_erase_shift); 4384 page += pages_per_block; 4385 4386 /* Check, if we cross a chip boundary */ 4387 if (len && !(page & chip->pagemask)) { 4388 chipnr++; 4389 nand_deselect_target(chip); 4390 nand_select_target(chip, chipnr); 4391 } 4392 } 4393 4394 ret = 0; 4395 erase_exit: 4396 4397 /* Deselect and wake up anyone waiting on the device */ 4398 nand_deselect_target(chip); 4399 nand_release_device(chip); 4400 4401 /* Return more or less happy */ 4402 return ret; 4403 } 4404 4405 /** 4406 * nand_sync - [MTD Interface] sync 4407 * @mtd: MTD device structure 4408 * 4409 * Sync is actually a wait for chip ready function. 4410 */ 4411 static void nand_sync(struct mtd_info *mtd) 4412 { 4413 struct nand_chip *chip = mtd_to_nand(mtd); 4414 4415 pr_debug("%s: called\n", __func__); 4416 4417 /* Grab the lock and see if the device is available */ 4418 WARN_ON(nand_get_device(chip)); 4419 /* Release it and go back */ 4420 nand_release_device(chip); 4421 } 4422 4423 /** 4424 * nand_block_isbad - [MTD Interface] Check if block at offset is bad 4425 * @mtd: MTD device structure 4426 * @offs: offset relative to mtd start 4427 */ 4428 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 4429 { 4430 struct nand_chip *chip = mtd_to_nand(mtd); 4431 int chipnr = (int)(offs >> chip->chip_shift); 4432 int ret; 4433 4434 /* Select the NAND device */ 4435 ret = nand_get_device(chip); 4436 if (ret) 4437 return ret; 4438 4439 nand_select_target(chip, chipnr); 4440 4441 ret = nand_block_checkbad(chip, offs, 0); 4442 4443 nand_deselect_target(chip); 4444 nand_release_device(chip); 4445 4446 return ret; 4447 } 4448 4449 /** 4450 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad 4451 * @mtd: MTD device structure 4452 * @ofs: offset relative to mtd start 4453 */ 4454 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) 4455 { 4456 int ret; 4457 4458 ret = nand_block_isbad(mtd, ofs); 4459 if (ret) { 4460 /* If it was bad already, return success and do nothing */ 4461 if (ret > 0) 4462 return 0; 4463 return ret; 4464 } 4465 4466 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs); 4467 } 4468 4469 /** 4470 * nand_suspend - [MTD Interface] Suspend the NAND flash 4471 * @mtd: MTD device structure 4472 * 4473 * Returns 0 for success or negative error code otherwise. 4474 */ 4475 static int nand_suspend(struct mtd_info *mtd) 4476 { 4477 struct nand_chip *chip = mtd_to_nand(mtd); 4478 int ret = 0; 4479 4480 mutex_lock(&chip->lock); 4481 if (chip->suspend) 4482 ret = chip->suspend(chip); 4483 if (!ret) 4484 chip->suspended = 1; 4485 mutex_unlock(&chip->lock); 4486 4487 return ret; 4488 } 4489 4490 /** 4491 * nand_resume - [MTD Interface] Resume the NAND flash 4492 * @mtd: MTD device structure 4493 */ 4494 static void nand_resume(struct mtd_info *mtd) 4495 { 4496 struct nand_chip *chip = mtd_to_nand(mtd); 4497 4498 mutex_lock(&chip->lock); 4499 if (chip->suspended) { 4500 if (chip->resume) 4501 chip->resume(chip); 4502 chip->suspended = 0; 4503 } else { 4504 pr_err("%s called for a chip which is not in suspended state\n", 4505 __func__); 4506 } 4507 mutex_unlock(&chip->lock); 4508 } 4509 4510 /** 4511 * nand_shutdown - [MTD Interface] Finish the current NAND operation and 4512 * prevent further operations 4513 * @mtd: MTD device structure 4514 */ 4515 static void nand_shutdown(struct mtd_info *mtd) 4516 { 4517 nand_suspend(mtd); 4518 } 4519 4520 /** 4521 * nand_lock - [MTD Interface] Lock the NAND flash 4522 * @mtd: MTD device structure 4523 * @ofs: offset byte address 4524 * @len: number of bytes to lock (must be a multiple of block/page size) 4525 */ 4526 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4527 { 4528 struct nand_chip *chip = mtd_to_nand(mtd); 4529 4530 if (!chip->lock_area) 4531 return -ENOTSUPP; 4532 4533 return chip->lock_area(chip, ofs, len); 4534 } 4535 4536 /** 4537 * nand_unlock - [MTD Interface] Unlock the NAND flash 4538 * @mtd: MTD device structure 4539 * @ofs: offset byte address 4540 * @len: number of bytes to unlock (must be a multiple of block/page size) 4541 */ 4542 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4543 { 4544 struct nand_chip *chip = mtd_to_nand(mtd); 4545 4546 if (!chip->unlock_area) 4547 return -ENOTSUPP; 4548 4549 return chip->unlock_area(chip, ofs, len); 4550 } 4551 4552 /* Set default functions */ 4553 static void nand_set_defaults(struct nand_chip *chip) 4554 { 4555 /* If no controller is provided, use the dummy, legacy one. */ 4556 if (!chip->controller) { 4557 chip->controller = &chip->legacy.dummy_controller; 4558 nand_controller_init(chip->controller); 4559 } 4560 4561 nand_legacy_set_defaults(chip); 4562 4563 if (!chip->buf_align) 4564 chip->buf_align = 1; 4565 } 4566 4567 /* Sanitize ONFI strings so we can safely print them */ 4568 void sanitize_string(uint8_t *s, size_t len) 4569 { 4570 ssize_t i; 4571 4572 /* Null terminate */ 4573 s[len - 1] = 0; 4574 4575 /* Remove non printable chars */ 4576 for (i = 0; i < len - 1; i++) { 4577 if (s[i] < ' ' || s[i] > 127) 4578 s[i] = '?'; 4579 } 4580 4581 /* Remove trailing spaces */ 4582 strim(s); 4583 } 4584 4585 /* 4586 * nand_id_has_period - Check if an ID string has a given wraparound period 4587 * @id_data: the ID string 4588 * @arrlen: the length of the @id_data array 4589 * @period: the period of repitition 4590 * 4591 * Check if an ID string is repeated within a given sequence of bytes at 4592 * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a 4593 * period of 3). This is a helper function for nand_id_len(). Returns non-zero 4594 * if the repetition has a period of @period; otherwise, returns zero. 4595 */ 4596 static int nand_id_has_period(u8 *id_data, int arrlen, int period) 4597 { 4598 int i, j; 4599 for (i = 0; i < period; i++) 4600 for (j = i + period; j < arrlen; j += period) 4601 if (id_data[i] != id_data[j]) 4602 return 0; 4603 return 1; 4604 } 4605 4606 /* 4607 * nand_id_len - Get the length of an ID string returned by CMD_READID 4608 * @id_data: the ID string 4609 * @arrlen: the length of the @id_data array 4610 4611 * Returns the length of the ID string, according to known wraparound/trailing 4612 * zero patterns. If no pattern exists, returns the length of the array. 4613 */ 4614 static int nand_id_len(u8 *id_data, int arrlen) 4615 { 4616 int last_nonzero, period; 4617 4618 /* Find last non-zero byte */ 4619 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) 4620 if (id_data[last_nonzero]) 4621 break; 4622 4623 /* All zeros */ 4624 if (last_nonzero < 0) 4625 return 0; 4626 4627 /* Calculate wraparound period */ 4628 for (period = 1; period < arrlen; period++) 4629 if (nand_id_has_period(id_data, arrlen, period)) 4630 break; 4631 4632 /* There's a repeated pattern */ 4633 if (period < arrlen) 4634 return period; 4635 4636 /* There are trailing zeros */ 4637 if (last_nonzero < arrlen - 1) 4638 return last_nonzero + 1; 4639 4640 /* No pattern detected */ 4641 return arrlen; 4642 } 4643 4644 /* Extract the bits of per cell from the 3rd byte of the extended ID */ 4645 static int nand_get_bits_per_cell(u8 cellinfo) 4646 { 4647 int bits; 4648 4649 bits = cellinfo & NAND_CI_CELLTYPE_MSK; 4650 bits >>= NAND_CI_CELLTYPE_SHIFT; 4651 return bits + 1; 4652 } 4653 4654 /* 4655 * Many new NAND share similar device ID codes, which represent the size of the 4656 * chip. The rest of the parameters must be decoded according to generic or 4657 * manufacturer-specific "extended ID" decoding patterns. 4658 */ 4659 void nand_decode_ext_id(struct nand_chip *chip) 4660 { 4661 struct nand_memory_organization *memorg; 4662 struct mtd_info *mtd = nand_to_mtd(chip); 4663 int extid; 4664 u8 *id_data = chip->id.data; 4665 4666 memorg = nanddev_get_memorg(&chip->base); 4667 4668 /* The 3rd id byte holds MLC / multichip data */ 4669 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4670 /* The 4th id byte is the important one */ 4671 extid = id_data[3]; 4672 4673 /* Calc pagesize */ 4674 memorg->pagesize = 1024 << (extid & 0x03); 4675 mtd->writesize = memorg->pagesize; 4676 extid >>= 2; 4677 /* Calc oobsize */ 4678 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); 4679 mtd->oobsize = memorg->oobsize; 4680 extid >>= 2; 4681 /* Calc blocksize. Blocksize is multiples of 64KiB */ 4682 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) / 4683 memorg->pagesize; 4684 mtd->erasesize = (64 * 1024) << (extid & 0x03); 4685 extid >>= 2; 4686 /* Get buswidth information */ 4687 if (extid & 0x1) 4688 chip->options |= NAND_BUSWIDTH_16; 4689 } 4690 EXPORT_SYMBOL_GPL(nand_decode_ext_id); 4691 4692 /* 4693 * Old devices have chip data hardcoded in the device ID table. nand_decode_id 4694 * decodes a matching ID table entry and assigns the MTD size parameters for 4695 * the chip. 4696 */ 4697 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type) 4698 { 4699 struct mtd_info *mtd = nand_to_mtd(chip); 4700 struct nand_memory_organization *memorg; 4701 4702 memorg = nanddev_get_memorg(&chip->base); 4703 4704 memorg->pages_per_eraseblock = type->erasesize / type->pagesize; 4705 mtd->erasesize = type->erasesize; 4706 memorg->pagesize = type->pagesize; 4707 mtd->writesize = memorg->pagesize; 4708 memorg->oobsize = memorg->pagesize / 32; 4709 mtd->oobsize = memorg->oobsize; 4710 4711 /* All legacy ID NAND are small-page, SLC */ 4712 memorg->bits_per_cell = 1; 4713 } 4714 4715 /* 4716 * Set the bad block marker/indicator (BBM/BBI) patterns according to some 4717 * heuristic patterns using various detected parameters (e.g., manufacturer, 4718 * page size, cell-type information). 4719 */ 4720 static void nand_decode_bbm_options(struct nand_chip *chip) 4721 { 4722 struct mtd_info *mtd = nand_to_mtd(chip); 4723 4724 /* Set the bad block position */ 4725 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) 4726 chip->badblockpos = NAND_BBM_POS_LARGE; 4727 else 4728 chip->badblockpos = NAND_BBM_POS_SMALL; 4729 } 4730 4731 static inline bool is_full_id_nand(struct nand_flash_dev *type) 4732 { 4733 return type->id_len; 4734 } 4735 4736 static bool find_full_id_nand(struct nand_chip *chip, 4737 struct nand_flash_dev *type) 4738 { 4739 struct mtd_info *mtd = nand_to_mtd(chip); 4740 struct nand_memory_organization *memorg; 4741 u8 *id_data = chip->id.data; 4742 4743 memorg = nanddev_get_memorg(&chip->base); 4744 4745 if (!strncmp(type->id, id_data, type->id_len)) { 4746 memorg->pagesize = type->pagesize; 4747 mtd->writesize = memorg->pagesize; 4748 memorg->pages_per_eraseblock = type->erasesize / 4749 type->pagesize; 4750 mtd->erasesize = type->erasesize; 4751 memorg->oobsize = type->oobsize; 4752 mtd->oobsize = memorg->oobsize; 4753 4754 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]); 4755 memorg->eraseblocks_per_lun = 4756 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 4757 memorg->pagesize * 4758 memorg->pages_per_eraseblock); 4759 chip->options |= type->options; 4760 chip->base.eccreq.strength = NAND_ECC_STRENGTH(type); 4761 chip->base.eccreq.step_size = NAND_ECC_STEP(type); 4762 chip->onfi_timing_mode_default = 4763 type->onfi_timing_mode_default; 4764 4765 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 4766 if (!chip->parameters.model) 4767 return false; 4768 4769 return true; 4770 } 4771 return false; 4772 } 4773 4774 /* 4775 * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC 4776 * compliant and does not have a full-id or legacy-id entry in the nand_ids 4777 * table. 4778 */ 4779 static void nand_manufacturer_detect(struct nand_chip *chip) 4780 { 4781 /* 4782 * Try manufacturer detection if available and use 4783 * nand_decode_ext_id() otherwise. 4784 */ 4785 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4786 chip->manufacturer.desc->ops->detect) { 4787 struct nand_memory_organization *memorg; 4788 4789 memorg = nanddev_get_memorg(&chip->base); 4790 4791 /* The 3rd id byte holds MLC / multichip data */ 4792 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]); 4793 chip->manufacturer.desc->ops->detect(chip); 4794 } else { 4795 nand_decode_ext_id(chip); 4796 } 4797 } 4798 4799 /* 4800 * Manufacturer initialization. This function is called for all NANDs including 4801 * ONFI and JEDEC compliant ones. 4802 * Manufacturer drivers should put all their specific initialization code in 4803 * their ->init() hook. 4804 */ 4805 static int nand_manufacturer_init(struct nand_chip *chip) 4806 { 4807 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops || 4808 !chip->manufacturer.desc->ops->init) 4809 return 0; 4810 4811 return chip->manufacturer.desc->ops->init(chip); 4812 } 4813 4814 /* 4815 * Manufacturer cleanup. This function is called for all NANDs including 4816 * ONFI and JEDEC compliant ones. 4817 * Manufacturer drivers should put all their specific cleanup code in their 4818 * ->cleanup() hook. 4819 */ 4820 static void nand_manufacturer_cleanup(struct nand_chip *chip) 4821 { 4822 /* Release manufacturer private data */ 4823 if (chip->manufacturer.desc && chip->manufacturer.desc->ops && 4824 chip->manufacturer.desc->ops->cleanup) 4825 chip->manufacturer.desc->ops->cleanup(chip); 4826 } 4827 4828 static const char * 4829 nand_manufacturer_name(const struct nand_manufacturer *manufacturer) 4830 { 4831 return manufacturer ? manufacturer->name : "Unknown"; 4832 } 4833 4834 /* 4835 * Get the flash and manufacturer id and lookup if the type is supported. 4836 */ 4837 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) 4838 { 4839 const struct nand_manufacturer *manufacturer; 4840 struct mtd_info *mtd = nand_to_mtd(chip); 4841 struct nand_memory_organization *memorg; 4842 int busw, ret; 4843 u8 *id_data = chip->id.data; 4844 u8 maf_id, dev_id; 4845 u64 targetsize; 4846 4847 /* 4848 * Let's start by initializing memorg fields that might be left 4849 * unassigned by the ID-based detection logic. 4850 */ 4851 memorg = nanddev_get_memorg(&chip->base); 4852 memorg->planes_per_lun = 1; 4853 memorg->luns_per_target = 1; 4854 4855 /* 4856 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 4857 * after power-up. 4858 */ 4859 ret = nand_reset(chip, 0); 4860 if (ret) 4861 return ret; 4862 4863 /* Select the device */ 4864 nand_select_target(chip, 0); 4865 4866 /* Send the command for reading device ID */ 4867 ret = nand_readid_op(chip, 0, id_data, 2); 4868 if (ret) 4869 return ret; 4870 4871 /* Read manufacturer and device IDs */ 4872 maf_id = id_data[0]; 4873 dev_id = id_data[1]; 4874 4875 /* 4876 * Try again to make sure, as some systems the bus-hold or other 4877 * interface concerns can cause random data which looks like a 4878 * possibly credible NAND flash to appear. If the two results do 4879 * not match, ignore the device completely. 4880 */ 4881 4882 /* Read entire ID string */ 4883 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data)); 4884 if (ret) 4885 return ret; 4886 4887 if (id_data[0] != maf_id || id_data[1] != dev_id) { 4888 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n", 4889 maf_id, dev_id, id_data[0], id_data[1]); 4890 return -ENODEV; 4891 } 4892 4893 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); 4894 4895 /* Try to identify manufacturer */ 4896 manufacturer = nand_get_manufacturer(maf_id); 4897 chip->manufacturer.desc = manufacturer; 4898 4899 if (!type) 4900 type = nand_flash_ids; 4901 4902 /* 4903 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic 4904 * override it. 4905 * This is required to make sure initial NAND bus width set by the 4906 * NAND controller driver is coherent with the real NAND bus width 4907 * (extracted by auto-detection code). 4908 */ 4909 busw = chip->options & NAND_BUSWIDTH_16; 4910 4911 /* 4912 * The flag is only set (never cleared), reset it to its default value 4913 * before starting auto-detection. 4914 */ 4915 chip->options &= ~NAND_BUSWIDTH_16; 4916 4917 for (; type->name != NULL; type++) { 4918 if (is_full_id_nand(type)) { 4919 if (find_full_id_nand(chip, type)) 4920 goto ident_done; 4921 } else if (dev_id == type->dev_id) { 4922 break; 4923 } 4924 } 4925 4926 if (!type->name || !type->pagesize) { 4927 /* Check if the chip is ONFI compliant */ 4928 ret = nand_onfi_detect(chip); 4929 if (ret < 0) 4930 return ret; 4931 else if (ret) 4932 goto ident_done; 4933 4934 /* Check if the chip is JEDEC compliant */ 4935 ret = nand_jedec_detect(chip); 4936 if (ret < 0) 4937 return ret; 4938 else if (ret) 4939 goto ident_done; 4940 } 4941 4942 if (!type->name) 4943 return -ENODEV; 4944 4945 chip->parameters.model = kstrdup(type->name, GFP_KERNEL); 4946 if (!chip->parameters.model) 4947 return -ENOMEM; 4948 4949 if (!type->pagesize) 4950 nand_manufacturer_detect(chip); 4951 else 4952 nand_decode_id(chip, type); 4953 4954 /* Get chip options */ 4955 chip->options |= type->options; 4956 4957 memorg->eraseblocks_per_lun = 4958 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20, 4959 memorg->pagesize * 4960 memorg->pages_per_eraseblock); 4961 4962 ident_done: 4963 if (!mtd->name) 4964 mtd->name = chip->parameters.model; 4965 4966 if (chip->options & NAND_BUSWIDTH_AUTO) { 4967 WARN_ON(busw & NAND_BUSWIDTH_16); 4968 nand_set_defaults(chip); 4969 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { 4970 /* 4971 * Check, if buswidth is correct. Hardware drivers should set 4972 * chip correct! 4973 */ 4974 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 4975 maf_id, dev_id); 4976 pr_info("%s %s\n", nand_manufacturer_name(manufacturer), 4977 mtd->name); 4978 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, 4979 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); 4980 ret = -EINVAL; 4981 4982 goto free_detect_allocation; 4983 } 4984 4985 nand_decode_bbm_options(chip); 4986 4987 /* Calculate the address shift from the page size */ 4988 chip->page_shift = ffs(mtd->writesize) - 1; 4989 /* Convert chipsize to number of pages per chip -1 */ 4990 targetsize = nanddev_target_size(&chip->base); 4991 chip->pagemask = (targetsize >> chip->page_shift) - 1; 4992 4993 chip->bbt_erase_shift = chip->phys_erase_shift = 4994 ffs(mtd->erasesize) - 1; 4995 if (targetsize & 0xffffffff) 4996 chip->chip_shift = ffs((unsigned)targetsize) - 1; 4997 else { 4998 chip->chip_shift = ffs((unsigned)(targetsize >> 32)); 4999 chip->chip_shift += 32 - 1; 5000 } 5001 5002 if (chip->chip_shift - chip->page_shift > 16) 5003 chip->options |= NAND_ROW_ADDR_3; 5004 5005 chip->badblockbits = 8; 5006 5007 nand_legacy_adjust_cmdfunc(chip); 5008 5009 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", 5010 maf_id, dev_id); 5011 pr_info("%s %s\n", nand_manufacturer_name(manufacturer), 5012 chip->parameters.model); 5013 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", 5014 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", 5015 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize); 5016 return 0; 5017 5018 free_detect_allocation: 5019 kfree(chip->parameters.model); 5020 5021 return ret; 5022 } 5023 5024 static const char * const nand_ecc_modes[] = { 5025 [NAND_ECC_NONE] = "none", 5026 [NAND_ECC_SOFT] = "soft", 5027 [NAND_ECC_HW] = "hw", 5028 [NAND_ECC_HW_SYNDROME] = "hw_syndrome", 5029 [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", 5030 [NAND_ECC_ON_DIE] = "on-die", 5031 }; 5032 5033 static int of_get_nand_ecc_mode(struct device_node *np) 5034 { 5035 const char *pm; 5036 int err, i; 5037 5038 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5039 if (err < 0) 5040 return err; 5041 5042 for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) 5043 if (!strcasecmp(pm, nand_ecc_modes[i])) 5044 return i; 5045 5046 /* 5047 * For backward compatibility we support few obsoleted values that don't 5048 * have their mappings into nand_ecc_modes_t anymore (they were merged 5049 * with other enums). 5050 */ 5051 if (!strcasecmp(pm, "soft_bch")) 5052 return NAND_ECC_SOFT; 5053 5054 return -ENODEV; 5055 } 5056 5057 static const char * const nand_ecc_algos[] = { 5058 [NAND_ECC_HAMMING] = "hamming", 5059 [NAND_ECC_BCH] = "bch", 5060 [NAND_ECC_RS] = "rs", 5061 }; 5062 5063 static int of_get_nand_ecc_algo(struct device_node *np) 5064 { 5065 const char *pm; 5066 int err, i; 5067 5068 err = of_property_read_string(np, "nand-ecc-algo", &pm); 5069 if (!err) { 5070 for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++) 5071 if (!strcasecmp(pm, nand_ecc_algos[i])) 5072 return i; 5073 return -ENODEV; 5074 } 5075 5076 /* 5077 * For backward compatibility we also read "nand-ecc-mode" checking 5078 * for some obsoleted values that were specifying ECC algorithm. 5079 */ 5080 err = of_property_read_string(np, "nand-ecc-mode", &pm); 5081 if (err < 0) 5082 return err; 5083 5084 if (!strcasecmp(pm, "soft")) 5085 return NAND_ECC_HAMMING; 5086 else if (!strcasecmp(pm, "soft_bch")) 5087 return NAND_ECC_BCH; 5088 5089 return -ENODEV; 5090 } 5091 5092 static int of_get_nand_ecc_step_size(struct device_node *np) 5093 { 5094 int ret; 5095 u32 val; 5096 5097 ret = of_property_read_u32(np, "nand-ecc-step-size", &val); 5098 return ret ? ret : val; 5099 } 5100 5101 static int of_get_nand_ecc_strength(struct device_node *np) 5102 { 5103 int ret; 5104 u32 val; 5105 5106 ret = of_property_read_u32(np, "nand-ecc-strength", &val); 5107 return ret ? ret : val; 5108 } 5109 5110 static int of_get_nand_bus_width(struct device_node *np) 5111 { 5112 u32 val; 5113 5114 if (of_property_read_u32(np, "nand-bus-width", &val)) 5115 return 8; 5116 5117 switch (val) { 5118 case 8: 5119 case 16: 5120 return val; 5121 default: 5122 return -EIO; 5123 } 5124 } 5125 5126 static bool of_get_nand_on_flash_bbt(struct device_node *np) 5127 { 5128 return of_property_read_bool(np, "nand-on-flash-bbt"); 5129 } 5130 5131 static int nand_dt_init(struct nand_chip *chip) 5132 { 5133 struct device_node *dn = nand_get_flash_node(chip); 5134 int ecc_mode, ecc_algo, ecc_strength, ecc_step; 5135 5136 if (!dn) 5137 return 0; 5138 5139 if (of_get_nand_bus_width(dn) == 16) 5140 chip->options |= NAND_BUSWIDTH_16; 5141 5142 if (of_property_read_bool(dn, "nand-is-boot-medium")) 5143 chip->options |= NAND_IS_BOOT_MEDIUM; 5144 5145 if (of_get_nand_on_flash_bbt(dn)) 5146 chip->bbt_options |= NAND_BBT_USE_FLASH; 5147 5148 ecc_mode = of_get_nand_ecc_mode(dn); 5149 ecc_algo = of_get_nand_ecc_algo(dn); 5150 ecc_strength = of_get_nand_ecc_strength(dn); 5151 ecc_step = of_get_nand_ecc_step_size(dn); 5152 5153 if (ecc_mode >= 0) 5154 chip->ecc.mode = ecc_mode; 5155 5156 if (ecc_algo >= 0) 5157 chip->ecc.algo = ecc_algo; 5158 5159 if (ecc_strength >= 0) 5160 chip->ecc.strength = ecc_strength; 5161 5162 if (ecc_step > 0) 5163 chip->ecc.size = ecc_step; 5164 5165 if (of_property_read_bool(dn, "nand-ecc-maximize")) 5166 chip->ecc.options |= NAND_ECC_MAXIMIZE; 5167 5168 return 0; 5169 } 5170 5171 /** 5172 * nand_scan_ident - Scan for the NAND device 5173 * @chip: NAND chip object 5174 * @maxchips: number of chips to scan for 5175 * @table: alternative NAND ID table 5176 * 5177 * This is the first phase of the normal nand_scan() function. It reads the 5178 * flash ID and sets up MTD fields accordingly. 5179 * 5180 * This helper used to be called directly from controller drivers that needed 5181 * to tweak some ECC-related parameters before nand_scan_tail(). This separation 5182 * prevented dynamic allocations during this phase which was unconvenient and 5183 * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks. 5184 */ 5185 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, 5186 struct nand_flash_dev *table) 5187 { 5188 struct mtd_info *mtd = nand_to_mtd(chip); 5189 struct nand_memory_organization *memorg; 5190 int nand_maf_id, nand_dev_id; 5191 unsigned int i; 5192 int ret; 5193 5194 memorg = nanddev_get_memorg(&chip->base); 5195 5196 /* Assume all dies are deselected when we enter nand_scan_ident(). */ 5197 chip->cur_cs = -1; 5198 5199 mutex_init(&chip->lock); 5200 5201 /* Enforce the right timings for reset/detection */ 5202 onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); 5203 5204 ret = nand_dt_init(chip); 5205 if (ret) 5206 return ret; 5207 5208 if (!mtd->name && mtd->dev.parent) 5209 mtd->name = dev_name(mtd->dev.parent); 5210 5211 /* Set the default functions */ 5212 nand_set_defaults(chip); 5213 5214 ret = nand_legacy_check_hooks(chip); 5215 if (ret) 5216 return ret; 5217 5218 memorg->ntargets = maxchips; 5219 5220 /* Read the flash type */ 5221 ret = nand_detect(chip, table); 5222 if (ret) { 5223 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) 5224 pr_warn("No NAND device found\n"); 5225 nand_deselect_target(chip); 5226 return ret; 5227 } 5228 5229 nand_maf_id = chip->id.data[0]; 5230 nand_dev_id = chip->id.data[1]; 5231 5232 nand_deselect_target(chip); 5233 5234 /* Check for a chip array */ 5235 for (i = 1; i < maxchips; i++) { 5236 u8 id[2]; 5237 5238 /* See comment in nand_get_flash_type for reset */ 5239 ret = nand_reset(chip, i); 5240 if (ret) 5241 break; 5242 5243 nand_select_target(chip, i); 5244 /* Send the command for reading device ID */ 5245 ret = nand_readid_op(chip, 0, id, sizeof(id)); 5246 if (ret) 5247 break; 5248 /* Read manufacturer and device IDs */ 5249 if (nand_maf_id != id[0] || nand_dev_id != id[1]) { 5250 nand_deselect_target(chip); 5251 break; 5252 } 5253 nand_deselect_target(chip); 5254 } 5255 if (i > 1) 5256 pr_info("%d chips detected\n", i); 5257 5258 /* Store the number of chips and calc total size for mtd */ 5259 memorg->ntargets = i; 5260 mtd->size = i * nanddev_target_size(&chip->base); 5261 5262 return 0; 5263 } 5264 5265 static void nand_scan_ident_cleanup(struct nand_chip *chip) 5266 { 5267 kfree(chip->parameters.model); 5268 kfree(chip->parameters.onfi); 5269 } 5270 5271 static int nand_set_ecc_soft_ops(struct nand_chip *chip) 5272 { 5273 struct mtd_info *mtd = nand_to_mtd(chip); 5274 struct nand_ecc_ctrl *ecc = &chip->ecc; 5275 5276 if (WARN_ON(ecc->mode != NAND_ECC_SOFT)) 5277 return -EINVAL; 5278 5279 switch (ecc->algo) { 5280 case NAND_ECC_HAMMING: 5281 ecc->calculate = nand_calculate_ecc; 5282 ecc->correct = nand_correct_data; 5283 ecc->read_page = nand_read_page_swecc; 5284 ecc->read_subpage = nand_read_subpage; 5285 ecc->write_page = nand_write_page_swecc; 5286 if (!ecc->read_page_raw) 5287 ecc->read_page_raw = nand_read_page_raw; 5288 if (!ecc->write_page_raw) 5289 ecc->write_page_raw = nand_write_page_raw; 5290 ecc->read_oob = nand_read_oob_std; 5291 ecc->write_oob = nand_write_oob_std; 5292 if (!ecc->size) 5293 ecc->size = 256; 5294 ecc->bytes = 3; 5295 ecc->strength = 1; 5296 5297 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) 5298 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; 5299 5300 return 0; 5301 case NAND_ECC_BCH: 5302 if (!mtd_nand_has_bch()) { 5303 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); 5304 return -EINVAL; 5305 } 5306 ecc->calculate = nand_bch_calculate_ecc; 5307 ecc->correct = nand_bch_correct_data; 5308 ecc->read_page = nand_read_page_swecc; 5309 ecc->read_subpage = nand_read_subpage; 5310 ecc->write_page = nand_write_page_swecc; 5311 if (!ecc->read_page_raw) 5312 ecc->read_page_raw = nand_read_page_raw; 5313 if (!ecc->write_page_raw) 5314 ecc->write_page_raw = nand_write_page_raw; 5315 ecc->read_oob = nand_read_oob_std; 5316 ecc->write_oob = nand_write_oob_std; 5317 5318 /* 5319 * Board driver should supply ecc.size and ecc.strength 5320 * values to select how many bits are correctable. 5321 * Otherwise, default to 4 bits for large page devices. 5322 */ 5323 if (!ecc->size && (mtd->oobsize >= 64)) { 5324 ecc->size = 512; 5325 ecc->strength = 4; 5326 } 5327 5328 /* 5329 * if no ecc placement scheme was provided pickup the default 5330 * large page one. 5331 */ 5332 if (!mtd->ooblayout) { 5333 /* handle large page devices only */ 5334 if (mtd->oobsize < 64) { 5335 WARN(1, "OOB layout is required when using software BCH on small pages\n"); 5336 return -EINVAL; 5337 } 5338 5339 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); 5340 5341 } 5342 5343 /* 5344 * We can only maximize ECC config when the default layout is 5345 * used, otherwise we don't know how many bytes can really be 5346 * used. 5347 */ 5348 if (mtd->ooblayout == &nand_ooblayout_lp_ops && 5349 ecc->options & NAND_ECC_MAXIMIZE) { 5350 int steps, bytes; 5351 5352 /* Always prefer 1k blocks over 512bytes ones */ 5353 ecc->size = 1024; 5354 steps = mtd->writesize / ecc->size; 5355 5356 /* Reserve 2 bytes for the BBM */ 5357 bytes = (mtd->oobsize - 2) / steps; 5358 ecc->strength = bytes * 8 / fls(8 * ecc->size); 5359 } 5360 5361 /* See nand_bch_init() for details. */ 5362 ecc->bytes = 0; 5363 ecc->priv = nand_bch_init(mtd); 5364 if (!ecc->priv) { 5365 WARN(1, "BCH ECC initialization failed!\n"); 5366 return -EINVAL; 5367 } 5368 return 0; 5369 default: 5370 WARN(1, "Unsupported ECC algorithm!\n"); 5371 return -EINVAL; 5372 } 5373 } 5374 5375 /** 5376 * nand_check_ecc_caps - check the sanity of preset ECC settings 5377 * @chip: nand chip info structure 5378 * @caps: ECC caps info structure 5379 * @oobavail: OOB size that the ECC engine can use 5380 * 5381 * When ECC step size and strength are already set, check if they are supported 5382 * by the controller and the calculated ECC bytes fit within the chip's OOB. 5383 * On success, the calculated ECC bytes is set. 5384 */ 5385 static int 5386 nand_check_ecc_caps(struct nand_chip *chip, 5387 const struct nand_ecc_caps *caps, int oobavail) 5388 { 5389 struct mtd_info *mtd = nand_to_mtd(chip); 5390 const struct nand_ecc_step_info *stepinfo; 5391 int preset_step = chip->ecc.size; 5392 int preset_strength = chip->ecc.strength; 5393 int ecc_bytes, nsteps = mtd->writesize / preset_step; 5394 int i, j; 5395 5396 for (i = 0; i < caps->nstepinfos; i++) { 5397 stepinfo = &caps->stepinfos[i]; 5398 5399 if (stepinfo->stepsize != preset_step) 5400 continue; 5401 5402 for (j = 0; j < stepinfo->nstrengths; j++) { 5403 if (stepinfo->strengths[j] != preset_strength) 5404 continue; 5405 5406 ecc_bytes = caps->calc_ecc_bytes(preset_step, 5407 preset_strength); 5408 if (WARN_ON_ONCE(ecc_bytes < 0)) 5409 return ecc_bytes; 5410 5411 if (ecc_bytes * nsteps > oobavail) { 5412 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", 5413 preset_step, preset_strength); 5414 return -ENOSPC; 5415 } 5416 5417 chip->ecc.bytes = ecc_bytes; 5418 5419 return 0; 5420 } 5421 } 5422 5423 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", 5424 preset_step, preset_strength); 5425 5426 return -ENOTSUPP; 5427 } 5428 5429 /** 5430 * nand_match_ecc_req - meet the chip's requirement with least ECC bytes 5431 * @chip: nand chip info structure 5432 * @caps: ECC engine caps info structure 5433 * @oobavail: OOB size that the ECC engine can use 5434 * 5435 * If a chip's ECC requirement is provided, try to meet it with the least 5436 * number of ECC bytes (i.e. with the largest number of OOB-free bytes). 5437 * On success, the chosen ECC settings are set. 5438 */ 5439 static int 5440 nand_match_ecc_req(struct nand_chip *chip, 5441 const struct nand_ecc_caps *caps, int oobavail) 5442 { 5443 struct mtd_info *mtd = nand_to_mtd(chip); 5444 const struct nand_ecc_step_info *stepinfo; 5445 int req_step = chip->base.eccreq.step_size; 5446 int req_strength = chip->base.eccreq.strength; 5447 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; 5448 int best_step, best_strength, best_ecc_bytes; 5449 int best_ecc_bytes_total = INT_MAX; 5450 int i, j; 5451 5452 /* No information provided by the NAND chip */ 5453 if (!req_step || !req_strength) 5454 return -ENOTSUPP; 5455 5456 /* number of correctable bits the chip requires in a page */ 5457 req_corr = mtd->writesize / req_step * req_strength; 5458 5459 for (i = 0; i < caps->nstepinfos; i++) { 5460 stepinfo = &caps->stepinfos[i]; 5461 step_size = stepinfo->stepsize; 5462 5463 for (j = 0; j < stepinfo->nstrengths; j++) { 5464 strength = stepinfo->strengths[j]; 5465 5466 /* 5467 * If both step size and strength are smaller than the 5468 * chip's requirement, it is not easy to compare the 5469 * resulted reliability. 5470 */ 5471 if (step_size < req_step && strength < req_strength) 5472 continue; 5473 5474 if (mtd->writesize % step_size) 5475 continue; 5476 5477 nsteps = mtd->writesize / step_size; 5478 5479 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5480 if (WARN_ON_ONCE(ecc_bytes < 0)) 5481 continue; 5482 ecc_bytes_total = ecc_bytes * nsteps; 5483 5484 if (ecc_bytes_total > oobavail || 5485 strength * nsteps < req_corr) 5486 continue; 5487 5488 /* 5489 * We assume the best is to meet the chip's requrement 5490 * with the least number of ECC bytes. 5491 */ 5492 if (ecc_bytes_total < best_ecc_bytes_total) { 5493 best_ecc_bytes_total = ecc_bytes_total; 5494 best_step = step_size; 5495 best_strength = strength; 5496 best_ecc_bytes = ecc_bytes; 5497 } 5498 } 5499 } 5500 5501 if (best_ecc_bytes_total == INT_MAX) 5502 return -ENOTSUPP; 5503 5504 chip->ecc.size = best_step; 5505 chip->ecc.strength = best_strength; 5506 chip->ecc.bytes = best_ecc_bytes; 5507 5508 return 0; 5509 } 5510 5511 /** 5512 * nand_maximize_ecc - choose the max ECC strength available 5513 * @chip: nand chip info structure 5514 * @caps: ECC engine caps info structure 5515 * @oobavail: OOB size that the ECC engine can use 5516 * 5517 * Choose the max ECC strength that is supported on the controller, and can fit 5518 * within the chip's OOB. On success, the chosen ECC settings are set. 5519 */ 5520 static int 5521 nand_maximize_ecc(struct nand_chip *chip, 5522 const struct nand_ecc_caps *caps, int oobavail) 5523 { 5524 struct mtd_info *mtd = nand_to_mtd(chip); 5525 const struct nand_ecc_step_info *stepinfo; 5526 int step_size, strength, nsteps, ecc_bytes, corr; 5527 int best_corr = 0; 5528 int best_step = 0; 5529 int best_strength, best_ecc_bytes; 5530 int i, j; 5531 5532 for (i = 0; i < caps->nstepinfos; i++) { 5533 stepinfo = &caps->stepinfos[i]; 5534 step_size = stepinfo->stepsize; 5535 5536 /* If chip->ecc.size is already set, respect it */ 5537 if (chip->ecc.size && step_size != chip->ecc.size) 5538 continue; 5539 5540 for (j = 0; j < stepinfo->nstrengths; j++) { 5541 strength = stepinfo->strengths[j]; 5542 5543 if (mtd->writesize % step_size) 5544 continue; 5545 5546 nsteps = mtd->writesize / step_size; 5547 5548 ecc_bytes = caps->calc_ecc_bytes(step_size, strength); 5549 if (WARN_ON_ONCE(ecc_bytes < 0)) 5550 continue; 5551 5552 if (ecc_bytes * nsteps > oobavail) 5553 continue; 5554 5555 corr = strength * nsteps; 5556 5557 /* 5558 * If the number of correctable bits is the same, 5559 * bigger step_size has more reliability. 5560 */ 5561 if (corr > best_corr || 5562 (corr == best_corr && step_size > best_step)) { 5563 best_corr = corr; 5564 best_step = step_size; 5565 best_strength = strength; 5566 best_ecc_bytes = ecc_bytes; 5567 } 5568 } 5569 } 5570 5571 if (!best_corr) 5572 return -ENOTSUPP; 5573 5574 chip->ecc.size = best_step; 5575 chip->ecc.strength = best_strength; 5576 chip->ecc.bytes = best_ecc_bytes; 5577 5578 return 0; 5579 } 5580 5581 /** 5582 * nand_ecc_choose_conf - Set the ECC strength and ECC step size 5583 * @chip: nand chip info structure 5584 * @caps: ECC engine caps info structure 5585 * @oobavail: OOB size that the ECC engine can use 5586 * 5587 * Choose the ECC configuration according to following logic 5588 * 5589 * 1. If both ECC step size and ECC strength are already set (usually by DT) 5590 * then check if it is supported by this controller. 5591 * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength. 5592 * 3. Otherwise, try to match the ECC step size and ECC strength closest 5593 * to the chip's requirement. If available OOB size can't fit the chip 5594 * requirement then fallback to the maximum ECC step size and ECC strength. 5595 * 5596 * On success, the chosen ECC settings are set. 5597 */ 5598 int nand_ecc_choose_conf(struct nand_chip *chip, 5599 const struct nand_ecc_caps *caps, int oobavail) 5600 { 5601 struct mtd_info *mtd = nand_to_mtd(chip); 5602 5603 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize)) 5604 return -EINVAL; 5605 5606 if (chip->ecc.size && chip->ecc.strength) 5607 return nand_check_ecc_caps(chip, caps, oobavail); 5608 5609 if (chip->ecc.options & NAND_ECC_MAXIMIZE) 5610 return nand_maximize_ecc(chip, caps, oobavail); 5611 5612 if (!nand_match_ecc_req(chip, caps, oobavail)) 5613 return 0; 5614 5615 return nand_maximize_ecc(chip, caps, oobavail); 5616 } 5617 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf); 5618 5619 /* 5620 * Check if the chip configuration meet the datasheet requirements. 5621 5622 * If our configuration corrects A bits per B bytes and the minimum 5623 * required correction level is X bits per Y bytes, then we must ensure 5624 * both of the following are true: 5625 * 5626 * (1) A / B >= X / Y 5627 * (2) A >= X 5628 * 5629 * Requirement (1) ensures we can correct for the required bitflip density. 5630 * Requirement (2) ensures we can correct even when all bitflips are clumped 5631 * in the same sector. 5632 */ 5633 static bool nand_ecc_strength_good(struct nand_chip *chip) 5634 { 5635 struct mtd_info *mtd = nand_to_mtd(chip); 5636 struct nand_ecc_ctrl *ecc = &chip->ecc; 5637 int corr, ds_corr; 5638 5639 if (ecc->size == 0 || chip->base.eccreq.step_size == 0) 5640 /* Not enough information */ 5641 return true; 5642 5643 /* 5644 * We get the number of corrected bits per page to compare 5645 * the correction density. 5646 */ 5647 corr = (mtd->writesize * ecc->strength) / ecc->size; 5648 ds_corr = (mtd->writesize * chip->base.eccreq.strength) / 5649 chip->base.eccreq.step_size; 5650 5651 return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength; 5652 } 5653 5654 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos) 5655 { 5656 struct nand_chip *chip = container_of(nand, struct nand_chip, 5657 base); 5658 unsigned int eb = nanddev_pos_to_row(nand, pos); 5659 int ret; 5660 5661 eb >>= nand->rowconv.eraseblock_addr_shift; 5662 5663 nand_select_target(chip, pos->target); 5664 ret = nand_erase_op(chip, eb); 5665 nand_deselect_target(chip); 5666 5667 return ret; 5668 } 5669 5670 static int rawnand_markbad(struct nand_device *nand, 5671 const struct nand_pos *pos) 5672 { 5673 struct nand_chip *chip = container_of(nand, struct nand_chip, 5674 base); 5675 5676 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 5677 } 5678 5679 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos) 5680 { 5681 struct nand_chip *chip = container_of(nand, struct nand_chip, 5682 base); 5683 int ret; 5684 5685 nand_select_target(chip, pos->target); 5686 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos)); 5687 nand_deselect_target(chip); 5688 5689 return ret; 5690 } 5691 5692 static const struct nand_ops rawnand_ops = { 5693 .erase = rawnand_erase, 5694 .markbad = rawnand_markbad, 5695 .isbad = rawnand_isbad, 5696 }; 5697 5698 /** 5699 * nand_scan_tail - Scan for the NAND device 5700 * @chip: NAND chip object 5701 * 5702 * This is the second phase of the normal nand_scan() function. It fills out 5703 * all the uninitialized function pointers with the defaults and scans for a 5704 * bad block table if appropriate. 5705 */ 5706 static int nand_scan_tail(struct nand_chip *chip) 5707 { 5708 struct mtd_info *mtd = nand_to_mtd(chip); 5709 struct nand_ecc_ctrl *ecc = &chip->ecc; 5710 int ret, i; 5711 5712 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 5713 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 5714 !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 5715 return -EINVAL; 5716 } 5717 5718 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 5719 if (!chip->data_buf) 5720 return -ENOMEM; 5721 5722 /* 5723 * FIXME: some NAND manufacturer drivers expect the first die to be 5724 * selected when manufacturer->init() is called. They should be fixed 5725 * to explictly select the relevant die when interacting with the NAND 5726 * chip. 5727 */ 5728 nand_select_target(chip, 0); 5729 ret = nand_manufacturer_init(chip); 5730 nand_deselect_target(chip); 5731 if (ret) 5732 goto err_free_buf; 5733 5734 /* Set the internal oob buffer location, just after the page data */ 5735 chip->oob_poi = chip->data_buf + mtd->writesize; 5736 5737 /* 5738 * If no default placement scheme is given, select an appropriate one. 5739 */ 5740 if (!mtd->ooblayout && 5741 !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) { 5742 switch (mtd->oobsize) { 5743 case 8: 5744 case 16: 5745 mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops); 5746 break; 5747 case 64: 5748 case 128: 5749 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); 5750 break; 5751 default: 5752 /* 5753 * Expose the whole OOB area to users if ECC_NONE 5754 * is passed. We could do that for all kind of 5755 * ->oobsize, but we must keep the old large/small 5756 * page with ECC layout when ->oobsize <= 128 for 5757 * compatibility reasons. 5758 */ 5759 if (ecc->mode == NAND_ECC_NONE) { 5760 mtd_set_ooblayout(mtd, 5761 &nand_ooblayout_lp_ops); 5762 break; 5763 } 5764 5765 WARN(1, "No oob scheme defined for oobsize %d\n", 5766 mtd->oobsize); 5767 ret = -EINVAL; 5768 goto err_nand_manuf_cleanup; 5769 } 5770 } 5771 5772 /* 5773 * Check ECC mode, default to software if 3byte/512byte hardware ECC is 5774 * selected and we have 256 byte pagesize fallback to software ECC 5775 */ 5776 5777 switch (ecc->mode) { 5778 case NAND_ECC_HW_OOB_FIRST: 5779 /* Similar to NAND_ECC_HW, but a separate read_page handle */ 5780 if (!ecc->calculate || !ecc->correct || !ecc->hwctl) { 5781 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5782 ret = -EINVAL; 5783 goto err_nand_manuf_cleanup; 5784 } 5785 if (!ecc->read_page) 5786 ecc->read_page = nand_read_page_hwecc_oob_first; 5787 fallthrough; 5788 case NAND_ECC_HW: 5789 /* Use standard hwecc read page function? */ 5790 if (!ecc->read_page) 5791 ecc->read_page = nand_read_page_hwecc; 5792 if (!ecc->write_page) 5793 ecc->write_page = nand_write_page_hwecc; 5794 if (!ecc->read_page_raw) 5795 ecc->read_page_raw = nand_read_page_raw; 5796 if (!ecc->write_page_raw) 5797 ecc->write_page_raw = nand_write_page_raw; 5798 if (!ecc->read_oob) 5799 ecc->read_oob = nand_read_oob_std; 5800 if (!ecc->write_oob) 5801 ecc->write_oob = nand_write_oob_std; 5802 if (!ecc->read_subpage) 5803 ecc->read_subpage = nand_read_subpage; 5804 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) 5805 ecc->write_subpage = nand_write_subpage_hwecc; 5806 fallthrough; 5807 case NAND_ECC_HW_SYNDROME: 5808 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && 5809 (!ecc->read_page || 5810 ecc->read_page == nand_read_page_hwecc || 5811 !ecc->write_page || 5812 ecc->write_page == nand_write_page_hwecc)) { 5813 WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); 5814 ret = -EINVAL; 5815 goto err_nand_manuf_cleanup; 5816 } 5817 /* Use standard syndrome read/write page function? */ 5818 if (!ecc->read_page) 5819 ecc->read_page = nand_read_page_syndrome; 5820 if (!ecc->write_page) 5821 ecc->write_page = nand_write_page_syndrome; 5822 if (!ecc->read_page_raw) 5823 ecc->read_page_raw = nand_read_page_raw_syndrome; 5824 if (!ecc->write_page_raw) 5825 ecc->write_page_raw = nand_write_page_raw_syndrome; 5826 if (!ecc->read_oob) 5827 ecc->read_oob = nand_read_oob_syndrome; 5828 if (!ecc->write_oob) 5829 ecc->write_oob = nand_write_oob_syndrome; 5830 5831 if (mtd->writesize >= ecc->size) { 5832 if (!ecc->strength) { 5833 WARN(1, "Driver must set ecc.strength when using hardware ECC\n"); 5834 ret = -EINVAL; 5835 goto err_nand_manuf_cleanup; 5836 } 5837 break; 5838 } 5839 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 5840 ecc->size, mtd->writesize); 5841 ecc->mode = NAND_ECC_SOFT; 5842 ecc->algo = NAND_ECC_HAMMING; 5843 fallthrough; 5844 case NAND_ECC_SOFT: 5845 ret = nand_set_ecc_soft_ops(chip); 5846 if (ret) { 5847 ret = -EINVAL; 5848 goto err_nand_manuf_cleanup; 5849 } 5850 break; 5851 5852 case NAND_ECC_ON_DIE: 5853 if (!ecc->read_page || !ecc->write_page) { 5854 WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); 5855 ret = -EINVAL; 5856 goto err_nand_manuf_cleanup; 5857 } 5858 if (!ecc->read_oob) 5859 ecc->read_oob = nand_read_oob_std; 5860 if (!ecc->write_oob) 5861 ecc->write_oob = nand_write_oob_std; 5862 break; 5863 5864 case NAND_ECC_NONE: 5865 pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n"); 5866 ecc->read_page = nand_read_page_raw; 5867 ecc->write_page = nand_write_page_raw; 5868 ecc->read_oob = nand_read_oob_std; 5869 ecc->read_page_raw = nand_read_page_raw; 5870 ecc->write_page_raw = nand_write_page_raw; 5871 ecc->write_oob = nand_write_oob_std; 5872 ecc->size = mtd->writesize; 5873 ecc->bytes = 0; 5874 ecc->strength = 0; 5875 break; 5876 5877 default: 5878 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode); 5879 ret = -EINVAL; 5880 goto err_nand_manuf_cleanup; 5881 } 5882 5883 if (ecc->correct || ecc->calculate) { 5884 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 5885 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL); 5886 if (!ecc->calc_buf || !ecc->code_buf) { 5887 ret = -ENOMEM; 5888 goto err_nand_manuf_cleanup; 5889 } 5890 } 5891 5892 /* For many systems, the standard OOB write also works for raw */ 5893 if (!ecc->read_oob_raw) 5894 ecc->read_oob_raw = ecc->read_oob; 5895 if (!ecc->write_oob_raw) 5896 ecc->write_oob_raw = ecc->write_oob; 5897 5898 /* propagate ecc info to mtd_info */ 5899 mtd->ecc_strength = ecc->strength; 5900 mtd->ecc_step_size = ecc->size; 5901 5902 /* 5903 * Set the number of read / write steps for one page depending on ECC 5904 * mode. 5905 */ 5906 ecc->steps = mtd->writesize / ecc->size; 5907 if (ecc->steps * ecc->size != mtd->writesize) { 5908 WARN(1, "Invalid ECC parameters\n"); 5909 ret = -EINVAL; 5910 goto err_nand_manuf_cleanup; 5911 } 5912 ecc->total = ecc->steps * ecc->bytes; 5913 if (ecc->total > mtd->oobsize) { 5914 WARN(1, "Total number of ECC bytes exceeded oobsize\n"); 5915 ret = -EINVAL; 5916 goto err_nand_manuf_cleanup; 5917 } 5918 5919 /* 5920 * The number of bytes available for a client to place data into 5921 * the out of band area. 5922 */ 5923 ret = mtd_ooblayout_count_freebytes(mtd); 5924 if (ret < 0) 5925 ret = 0; 5926 5927 mtd->oobavail = ret; 5928 5929 /* ECC sanity check: warn if it's too weak */ 5930 if (!nand_ecc_strength_good(chip)) 5931 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", 5932 mtd->name, chip->ecc.strength, chip->ecc.size, 5933 chip->base.eccreq.strength, 5934 chip->base.eccreq.step_size); 5935 5936 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ 5937 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { 5938 switch (ecc->steps) { 5939 case 2: 5940 mtd->subpage_sft = 1; 5941 break; 5942 case 4: 5943 case 8: 5944 case 16: 5945 mtd->subpage_sft = 2; 5946 break; 5947 } 5948 } 5949 chip->subpagesize = mtd->writesize >> mtd->subpage_sft; 5950 5951 /* Invalidate the pagebuffer reference */ 5952 chip->pagecache.page = -1; 5953 5954 /* Large page NAND with SOFT_ECC should support subpage reads */ 5955 switch (ecc->mode) { 5956 case NAND_ECC_SOFT: 5957 if (chip->page_shift > 9) 5958 chip->options |= NAND_SUBPAGE_READ; 5959 break; 5960 5961 default: 5962 break; 5963 } 5964 5965 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner); 5966 if (ret) 5967 goto err_nand_manuf_cleanup; 5968 5969 /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */ 5970 if (chip->options & NAND_ROM) 5971 mtd->flags = MTD_CAP_ROM; 5972 5973 /* Fill in remaining MTD driver data */ 5974 mtd->_erase = nand_erase; 5975 mtd->_point = NULL; 5976 mtd->_unpoint = NULL; 5977 mtd->_panic_write = panic_nand_write; 5978 mtd->_read_oob = nand_read_oob; 5979 mtd->_write_oob = nand_write_oob; 5980 mtd->_sync = nand_sync; 5981 mtd->_lock = nand_lock; 5982 mtd->_unlock = nand_unlock; 5983 mtd->_suspend = nand_suspend; 5984 mtd->_resume = nand_resume; 5985 mtd->_reboot = nand_shutdown; 5986 mtd->_block_isreserved = nand_block_isreserved; 5987 mtd->_block_isbad = nand_block_isbad; 5988 mtd->_block_markbad = nand_block_markbad; 5989 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; 5990 5991 /* 5992 * Initialize bitflip_threshold to its default prior scan_bbt() call. 5993 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be 5994 * properly set. 5995 */ 5996 if (!mtd->bitflip_threshold) 5997 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); 5998 5999 /* Initialize the ->data_interface field. */ 6000 ret = nand_init_data_interface(chip); 6001 if (ret) 6002 goto err_nanddev_cleanup; 6003 6004 /* Enter fastest possible mode on all dies. */ 6005 for (i = 0; i < nanddev_ntargets(&chip->base); i++) { 6006 ret = nand_setup_data_interface(chip, i); 6007 if (ret) 6008 goto err_nanddev_cleanup; 6009 } 6010 6011 /* Check, if we should skip the bad block table scan */ 6012 if (chip->options & NAND_SKIP_BBTSCAN) 6013 return 0; 6014 6015 /* Build bad block table */ 6016 ret = nand_create_bbt(chip); 6017 if (ret) 6018 goto err_nanddev_cleanup; 6019 6020 return 0; 6021 6022 6023 err_nanddev_cleanup: 6024 nanddev_cleanup(&chip->base); 6025 6026 err_nand_manuf_cleanup: 6027 nand_manufacturer_cleanup(chip); 6028 6029 err_free_buf: 6030 kfree(chip->data_buf); 6031 kfree(ecc->code_buf); 6032 kfree(ecc->calc_buf); 6033 6034 return ret; 6035 } 6036 6037 static int nand_attach(struct nand_chip *chip) 6038 { 6039 if (chip->controller->ops && chip->controller->ops->attach_chip) 6040 return chip->controller->ops->attach_chip(chip); 6041 6042 return 0; 6043 } 6044 6045 static void nand_detach(struct nand_chip *chip) 6046 { 6047 if (chip->controller->ops && chip->controller->ops->detach_chip) 6048 chip->controller->ops->detach_chip(chip); 6049 } 6050 6051 /** 6052 * nand_scan_with_ids - [NAND Interface] Scan for the NAND device 6053 * @chip: NAND chip object 6054 * @maxchips: number of chips to scan for. 6055 * @ids: optional flash IDs table 6056 * 6057 * This fills out all the uninitialized function pointers with the defaults. 6058 * The flash ID is read and the mtd/chip structures are filled with the 6059 * appropriate values. 6060 */ 6061 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips, 6062 struct nand_flash_dev *ids) 6063 { 6064 int ret; 6065 6066 if (!maxchips) 6067 return -EINVAL; 6068 6069 ret = nand_scan_ident(chip, maxchips, ids); 6070 if (ret) 6071 return ret; 6072 6073 ret = nand_attach(chip); 6074 if (ret) 6075 goto cleanup_ident; 6076 6077 ret = nand_scan_tail(chip); 6078 if (ret) 6079 goto detach_chip; 6080 6081 return 0; 6082 6083 detach_chip: 6084 nand_detach(chip); 6085 cleanup_ident: 6086 nand_scan_ident_cleanup(chip); 6087 6088 return ret; 6089 } 6090 EXPORT_SYMBOL(nand_scan_with_ids); 6091 6092 /** 6093 * nand_cleanup - [NAND Interface] Free resources held by the NAND device 6094 * @chip: NAND chip object 6095 */ 6096 void nand_cleanup(struct nand_chip *chip) 6097 { 6098 if (chip->ecc.mode == NAND_ECC_SOFT && 6099 chip->ecc.algo == NAND_ECC_BCH) 6100 nand_bch_free((struct nand_bch_control *)chip->ecc.priv); 6101 6102 nanddev_cleanup(&chip->base); 6103 6104 /* Free bad block table memory */ 6105 kfree(chip->bbt); 6106 kfree(chip->data_buf); 6107 kfree(chip->ecc.code_buf); 6108 kfree(chip->ecc.calc_buf); 6109 6110 /* Free bad block descriptor memory */ 6111 if (chip->badblock_pattern && chip->badblock_pattern->options 6112 & NAND_BBT_DYNAMICSTRUCT) 6113 kfree(chip->badblock_pattern); 6114 6115 /* Free manufacturer priv data. */ 6116 nand_manufacturer_cleanup(chip); 6117 6118 /* Free controller specific allocations after chip identification */ 6119 nand_detach(chip); 6120 6121 /* Free identification phase allocations */ 6122 nand_scan_ident_cleanup(chip); 6123 } 6124 6125 EXPORT_SYMBOL_GPL(nand_cleanup); 6126 6127 /** 6128 * nand_release - [NAND Interface] Unregister the MTD device and free resources 6129 * held by the NAND device 6130 * @chip: NAND chip object 6131 */ 6132 void nand_release(struct nand_chip *chip) 6133 { 6134 mtd_device_unregister(nand_to_mtd(chip)); 6135 nand_cleanup(chip); 6136 } 6137 EXPORT_SYMBOL_GPL(nand_release); 6138 6139 MODULE_LICENSE("GPL"); 6140 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); 6141 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 6142 MODULE_DESCRIPTION("Generic NAND flash driver code"); 6143